[tools][cts] Add test coverage loss command

Adds the expectation-coverage command to "tools/run cts". This grabs
recent unique config/test name combinations from CI builders that are
affected by expectations in some way and figures out which expectations
are suppressing failures in tests. This information is then surfaced to
the user ordered by which chunks of expectations are affecting the most
tests, which can be used to prioritize fixes in order to get test
coverage back.

By default, the command will run:
  - Against the core expectations file
  - Include temporary Skip expectations
  - Group expectations by the whitespace-delineated chunks as they
    appear in the file
  - Only surface the top 25 chunks contributing to test coverage loss

However, command line flags exist to change behavior to any of the
following:
  - Run against the compat expectations file
  - Exclude temporary Skip expectations
  - Check for coverage loss on a per-expectation basis
  - Surface the top X chunks/individual expectations contributing to
    test coverage loss (or surface all results)

Bug: 390105593
Change-Id: Ifabfedc3baa7a8950ddf4f05a4d03486ed27e30b
Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/223335
Auto-Submit: Brian Sheedy <bsheedy@google.com>
Reviewed-by: dan sinclair <dsinclair@chromium.org>
Commit-Queue: Brian Sheedy <bsheedy@google.com>
diff --git a/tools/src/cmd/cts/common/config.go b/tools/src/cmd/cts/common/config.go
index 2b4e1ad..4cf4ee4 100644
--- a/tools/src/cmd/cts/common/config.go
+++ b/tools/src/cmd/cts/common/config.go
@@ -35,6 +35,8 @@
 
 	"dawn.googlesource.com/dawn/tools/src/buildbucket"
 	"dawn.googlesource.com/dawn/tools/src/cts/result"
+	"dawn.googlesource.com/dawn/tools/src/oswrapper"
+	"dawn.googlesource.com/dawn/tools/src/resultsdb"
 	"github.com/tidwall/jsonc"
 )
 
@@ -72,6 +74,8 @@
 	Sheets struct {
 		ID string
 	}
+	OsWrapper oswrapper.OSWrapper
+	Querier   resultsdb.Querier
 }
 
 // TestConfig holds configuration data for a single test type.
diff --git a/tools/src/cmd/cts/common/results.go b/tools/src/cmd/cts/common/results.go
index 710312f..fa9c641 100644
--- a/tools/src/cmd/cts/common/results.go
+++ b/tools/src/cmd/cts/common/results.go
@@ -45,6 +45,7 @@
 	"dawn.googlesource.com/dawn/tools/src/cts/result"
 	"dawn.googlesource.com/dawn/tools/src/fileutils"
 	"dawn.googlesource.com/dawn/tools/src/gerrit"
+	"dawn.googlesource.com/dawn/tools/src/oswrapper"
 	"dawn.googlesource.com/dawn/tools/src/resultsdb"
 	"dawn.googlesource.com/dawn/tools/src/subcmd"
 	"go.chromium.org/luci/auth"
@@ -336,23 +337,6 @@
 
 	lastPrintedDot := time.Now()
 
-	toStatus := func(s string) result.Status {
-		switch s {
-		default:
-			return result.Unknown
-		case rdbpb.TestStatus_PASS.String():
-			return result.Pass
-		case rdbpb.TestStatus_FAIL.String():
-			return result.Failure
-		case rdbpb.TestStatus_CRASH.String():
-			return result.Crash
-		case rdbpb.TestStatus_ABORT.String():
-			return result.Abort
-		case rdbpb.TestStatus_SKIP.String():
-			return result.Skip
-		}
-	}
-
 	resultsByExecutionMode := result.ResultsByExecutionMode{}
 	for _, test := range cfg.Tests {
 		results := result.List{}
@@ -369,7 +353,7 @@
 				}
 
 				testName := r.TestId[len(prefix):]
-				status := toStatus(r.Status)
+				status := convertRdbStatus(r.Status)
 				tags := result.NewTags()
 				duration := time.Duration(r.Duration * float64(time.Second))
 				mayExonerate := false
@@ -415,6 +399,23 @@
 	return resultsByExecutionMode, nil
 }
 
+func convertRdbStatus(rdbStatus string) result.Status {
+	switch rdbStatus {
+	default:
+		return result.Unknown
+	case rdbpb.TestStatus_PASS.String():
+		return result.Pass
+	case rdbpb.TestStatus_FAIL.String():
+		return result.Failure
+	case rdbpb.TestStatus_CRASH.String():
+		return result.Crash
+	case rdbpb.TestStatus_ABORT.String():
+		return result.Abort
+	case rdbpb.TestStatus_SKIP.String():
+		return result.Skip
+	}
+}
+
 // LatestCTSRoll returns for the latest merged CTS roll that landed in the past
 // month. If no roll can be found, then an error is returned.
 func LatestCTSRoll(g *gerrit.Gerrit) (gerrit.ChangeInfo, error) {
@@ -546,3 +547,151 @@
 		return result.Failure
 	})
 }
+
+// CacheRecentUniqueSuppressedCoreResults is a helper function to only get the
+// core results from CacheRecentUniqueSuppressedResults. This allows other
+// unused results to be garbage collected.
+func CacheRecentUniqueSuppressedCoreResults(
+	ctx context.Context,
+	cfg Config,
+	cacheDir string,
+	client resultsdb.Querier,
+	osWrapper oswrapper.OSWrapper) (result.List, error) {
+
+	resultsByExecutionMode, err := CacheRecentUniqueSuppressedResults(
+		ctx, cfg, cacheDir, client, osWrapper)
+	if err != nil {
+		return nil, err
+	}
+
+	return resultsByExecutionMode["core"], nil
+}
+
+// CacheRecentUniqueSuppressedCompatResults is a helper function to only get the
+// compat results from CacheRecentUniqueSuppressedResults. This allows other
+// unused results to be garbage collected.
+func CacheRecentUniqueSuppressedCompatResults(
+	ctx context.Context,
+	cfg Config,
+	cacheDir string,
+	client resultsdb.Querier,
+	osWrapper oswrapper.OSWrapper) (result.List, error) {
+
+	resultsByExecutionMode, err := CacheRecentUniqueSuppressedResults(
+		ctx, cfg, cacheDir, client, osWrapper)
+	if err != nil {
+		return nil, err
+	}
+
+	return resultsByExecutionMode["compat"], nil
+}
+
+// CacheRecentRexpectationAffectedCiResults looks in the cache at 'cacheDir' for
+// CI results from the recent history. If the cache contains the results, they
+// are loaded and returned. If the cache does not contain the results, they are
+// fetched, cleaned with CleanResults(), saved to the cache directory, and
+// returned.
+// TODO(crbug.com/390105593): Consider sharing code with the other cache
+// functions.
+func CacheRecentUniqueSuppressedResults(
+	ctx context.Context,
+	cfg Config,
+	cacheDir string,
+	client resultsdb.Querier,
+	osWrapper oswrapper.OSWrapper) (result.ResultsByExecutionMode, error) {
+
+	// Load cached results if they are available.
+	var cachePath string
+	if cacheDir != "" {
+		dir := fileutils.ExpandHomeWithWrapper(cacheDir, osWrapper)
+		year, month, day := time.Now().Date()
+		path := filepath.Join(dir, "expectation-affected-ci-results", fmt.Sprintf("%d-%d-%d.txt", year, month, day))
+		if _, err := osWrapper.Stat(path); err == nil {
+			log.Println("loading cached results for today")
+			return result.LoadWithWrapper(path, osWrapper)
+		}
+		cachePath = path
+	}
+
+	// Retrieve, clean, and cache results.
+	log.Println("fetching results from recent CI builds")
+	resultsByExecutionMode, err := getRecentUniqueSuppressedResults(ctx, cfg, client)
+	if err != nil {
+		return nil, err
+	}
+
+	for i, results := range resultsByExecutionMode {
+		CleanResults(cfg, &results)
+		results.Sort()
+		resultsByExecutionMode[i] = results
+	}
+
+	if err := result.SaveWithWrapper(cachePath, resultsByExecutionMode, osWrapper); err != nil {
+		log.Printf("failed to save results to cache: %v", err)
+	}
+
+	return resultsByExecutionMode, nil
+}
+
+// getRecentUniqueSuppressedResults fetches recent results from CI via ResultDB
+// which were affected by an expectation, without applying CleanResults(). The
+// returned results should be unique for each combination of test name and typ
+// tags, with other fields not containing real data.
+// TODO(crbug.com/390105593): Consider sharing code with the other
+// GetRawResults functions.
+func getRecentUniqueSuppressedResults(
+	ctx context.Context,
+	cfg Config,
+	client resultsdb.Querier) (result.ResultsByExecutionMode, error) {
+
+	log.Println("fetching results from resultdb...")
+
+	resultsByExecutionMode := result.ResultsByExecutionMode{}
+	for _, test := range cfg.Tests {
+		results := result.List{}
+		for _, prefix := range test.Prefixes {
+
+			rowHandler := func(r *resultsdb.QueryResult) error {
+				if !strings.HasPrefix(r.TestId, prefix) {
+					return fmt.Errorf(
+						"Test ID %s did not start with %s even though query should have filtered.",
+						r.TestId, prefix)
+				}
+
+				testName := r.TestId[len(prefix):]
+				tags := result.NewTags()
+
+				for _, tagPair := range r.Tags {
+					if tagPair.Key != "typ_tag" {
+						return fmt.Errorf("Got tag key %v when only typ_tag should be present", tagPair.Key)
+					}
+					tags.Add(tagPair.Value)
+				}
+
+				results = append(results, result.Result{
+					// We don't actually care about anything other than the query and
+					// tags.
+					Query:        query.Parse(testName),
+					Status:       result.Pass,
+					Tags:         tags,
+					Duration:     0,
+					MayExonerate: false,
+				})
+
+				return nil
+			}
+
+			err := client.QueryRecentUniqueSuppressedTestResults(ctx, prefix, rowHandler)
+			if err != nil {
+				return nil, err
+			}
+
+			results.Sort()
+		}
+		resultsByExecutionMode[test.ExecutionMode] = results
+	}
+
+	fmt.Println(" done")
+
+	return resultsByExecutionMode, nil
+}
diff --git a/tools/src/cmd/cts/common/results_test.go b/tools/src/cmd/cts/common/results_test.go
index e696cfd..75d1c2c 100644
--- a/tools/src/cmd/cts/common/results_test.go
+++ b/tools/src/cmd/cts/common/results_test.go
@@ -29,14 +29,17 @@
 
 import (
 	"context"
+	"fmt"
+	"path/filepath"
 	"testing"
 	"time"
 
-	"dawn.googlesource.com/dawn/tools/src/buildbucket"
 	"dawn.googlesource.com/dawn/tools/src/cts/query"
 	"dawn.googlesource.com/dawn/tools/src/cts/result"
+	"dawn.googlesource.com/dawn/tools/src/fileutils"
+	"dawn.googlesource.com/dawn/tools/src/oswrapper"
 	"dawn.googlesource.com/dawn/tools/src/resultsdb"
-	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
 )
 
 // TODO(crbug.com/342554800): Add test coverage for:
@@ -50,41 +53,12 @@
 //   MostRecentUnsuppressedFailingResultsForChange (ditto)
 
 /*******************************************************************************
- * Fake implementations
- ******************************************************************************/
-
-// A fake version of dawn/tools/src/resultsdb's BigQueryClient.
-type mockedBigQueryClient struct {
-	returnValues                    []resultsdb.QueryResult
-	unsuppressedFailureReturnValues []resultsdb.QueryResult
-}
-
-func (bq mockedBigQueryClient) QueryTestResults(
-	ctx context.Context, builds []buildbucket.BuildID, testPrefix string, f func(*resultsdb.QueryResult) error) error {
-	for _, result := range bq.returnValues {
-		if err := f(&result); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-func (bq mockedBigQueryClient) QueryUnsuppressedFailingTestResults(
-	ctx context.Context, builds []buildbucket.BuildID, testPrefix string, f func(*resultsdb.QueryResult) error) error {
-
-	for _, result := range bq.unsuppressedFailureReturnValues {
-		if err := f(&result); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-/*******************************************************************************
  * GetResults tests
  ******************************************************************************/
 
-func generateGoodGetResultsInputs() (context.Context, Config, *mockedBigQueryClient, BuildsByName) {
+func generateGoodGetResultsInputs() (
+	context.Context, Config, *resultsdb.MockBigQueryClient, BuildsByName) {
+
 	ctx := context.Background()
 
 	cfg := Config{
@@ -96,13 +70,15 @@
 		},
 	}
 
-	client := &mockedBigQueryClient{
-		returnValues: []resultsdb.QueryResult{
-			resultsdb.QueryResult{
-				TestId:   "prefix_test",
-				Status:   "PASS",
-				Tags:     []resultsdb.TagPair{},
-				Duration: 1.0,
+	client := &resultsdb.MockBigQueryClient{
+		ReturnValues: resultsdb.PrefixGroupedQueryResults{
+			"prefix": []resultsdb.QueryResult{
+				resultsdb.QueryResult{
+					TestId:   "prefix_test",
+					Status:   "PASS",
+					Tags:     []resultsdb.TagPair{},
+					Duration: 1.0,
+				},
 			},
 		},
 	}
@@ -120,32 +96,34 @@
 		"remove_me",
 	}
 
-	client.returnValues = []resultsdb.QueryResult{
-		resultsdb.QueryResult{
-			TestId: "prefix_test_2",
-			Status: "PASS",
-			Tags: []resultsdb.TagPair{
-				resultsdb.TagPair{
-					Key:   "typ_tag",
-					Value: "remove_me",
+	client.ReturnValues = resultsdb.PrefixGroupedQueryResults{
+		"prefix": []resultsdb.QueryResult{
+			resultsdb.QueryResult{
+				TestId: "prefix_test_2",
+				Status: "PASS",
+				Tags: []resultsdb.TagPair{
+					resultsdb.TagPair{
+						Key:   "typ_tag",
+						Value: "remove_me",
+					},
+					resultsdb.TagPair{
+						Key:   "typ_tag",
+						Value: "win",
+					},
 				},
-				resultsdb.TagPair{
-					Key:   "typ_tag",
-					Value: "win",
-				},
+				Duration: 2.0,
 			},
-			Duration: 2.0,
-		},
-		resultsdb.QueryResult{
-			TestId: "prefix_test_1",
-			Status: "PASS",
-			Tags: []resultsdb.TagPair{
-				resultsdb.TagPair{
-					Key:   "typ_tag",
-					Value: "linux",
+			resultsdb.QueryResult{
+				TestId: "prefix_test_1",
+				Status: "PASS",
+				Tags: []resultsdb.TagPair{
+					resultsdb.TagPair{
+						Key:   "typ_tag",
+						Value: "linux",
+					},
 				},
+				Duration: 1.0,
 			},
-			Duration: 1.0,
 		},
 	}
 
@@ -170,25 +148,27 @@
 	expectedResults["execution_mode"] = expectedResultsList
 
 	results, err := GetResults(ctx, cfg, client, builds)
-	assert.Nil(t, err)
-	assert.Equal(t, results, expectedResults)
+	require.Nil(t, err)
+	require.Equal(t, results, expectedResults)
 }
 
 // Tests that errors from GetRawResults are properly surfaced.
 func TestGetResultsGetRawResultsErrorSurfaced(t *testing.T) {
 	ctx, cfg, client, builds := generateGoodGetResultsInputs()
-	client.returnValues[0].TestId = "bad_test"
+	client.ReturnValues["prefix"][0].TestId = "bad_test"
 
 	results, err := GetResults(ctx, cfg, client, builds)
-	assert.Nil(t, results)
-	assert.ErrorContains(t, err, "Test ID bad_test did not start with prefix even though query should have filtered.")
+	require.Nil(t, results)
+	require.ErrorContains(t, err, "Test ID bad_test did not start with prefix even though query should have filtered.")
 }
 
 /*******************************************************************************
  * GetUnsuppressedFailingResults tests
  ******************************************************************************/
 
-func generateGoodGetUnsuppressedFailingResultsInputs() (context.Context, Config, *mockedBigQueryClient, BuildsByName) {
+func generateGoodGetUnsuppressedFailingResultsInputs() (
+	context.Context, Config, *resultsdb.MockBigQueryClient, BuildsByName) {
+
 	ctx := context.Background()
 
 	cfg := Config{
@@ -200,13 +180,15 @@
 		},
 	}
 
-	client := &mockedBigQueryClient{
-		unsuppressedFailureReturnValues: []resultsdb.QueryResult{
-			resultsdb.QueryResult{
-				TestId:   "prefix_test",
-				Status:   "FAIL",
-				Tags:     []resultsdb.TagPair{},
-				Duration: 1.0,
+	client := &resultsdb.MockBigQueryClient{
+		UnsuppressedFailureReturnValues: resultsdb.PrefixGroupedQueryResults{
+			"prefix": []resultsdb.QueryResult{
+				resultsdb.QueryResult{
+					TestId:   "prefix_test",
+					Status:   "FAIL",
+					Tags:     []resultsdb.TagPair{},
+					Duration: 1.0,
+				},
 			},
 		},
 	}
@@ -224,32 +206,34 @@
 		"remove_me",
 	}
 
-	client.unsuppressedFailureReturnValues = []resultsdb.QueryResult{
-		resultsdb.QueryResult{
-			TestId: "prefix_test_2",
-			Status: "FAIL",
-			Tags: []resultsdb.TagPair{
-				resultsdb.TagPair{
-					Key:   "typ_tag",
-					Value: "remove_me",
+	client.UnsuppressedFailureReturnValues = resultsdb.PrefixGroupedQueryResults{
+		"prefix": []resultsdb.QueryResult{
+			resultsdb.QueryResult{
+				TestId: "prefix_test_2",
+				Status: "FAIL",
+				Tags: []resultsdb.TagPair{
+					resultsdb.TagPair{
+						Key:   "typ_tag",
+						Value: "remove_me",
+					},
+					resultsdb.TagPair{
+						Key:   "typ_tag",
+						Value: "win",
+					},
 				},
-				resultsdb.TagPair{
-					Key:   "typ_tag",
-					Value: "win",
-				},
+				Duration: 2.0,
 			},
-			Duration: 2.0,
-		},
-		resultsdb.QueryResult{
-			TestId: "prefix_test_1",
-			Status: "FAIL",
-			Tags: []resultsdb.TagPair{
-				resultsdb.TagPair{
-					Key:   "typ_tag",
-					Value: "linux",
+			resultsdb.QueryResult{
+				TestId: "prefix_test_1",
+				Status: "FAIL",
+				Tags: []resultsdb.TagPair{
+					resultsdb.TagPair{
+						Key:   "typ_tag",
+						Value: "linux",
+					},
 				},
+				Duration: 1.0,
 			},
-			Duration: 1.0,
 		},
 	}
 
@@ -274,18 +258,18 @@
 	expectedResults["execution_mode"] = expectedResultsList
 
 	results, err := GetUnsuppressedFailingResults(ctx, cfg, client, builds)
-	assert.Nil(t, err)
-	assert.Equal(t, results, expectedResults)
+	require.Nil(t, err)
+	require.Equal(t, results, expectedResults)
 }
 
 // Tests that errors from GetRawResults are properly surfaced.
 func TestGetUnsuppressedFailingResultsGetRawResultsErrorSurfaced(t *testing.T) {
 	ctx, cfg, client, builds := generateGoodGetUnsuppressedFailingResultsInputs()
-	client.unsuppressedFailureReturnValues[0].TestId = "bad_test"
+	client.UnsuppressedFailureReturnValues["prefix"][0].TestId = "bad_test"
 
 	results, err := GetUnsuppressedFailingResults(ctx, cfg, client, builds)
-	assert.Nil(t, results)
-	assert.ErrorContains(t, err, "Test ID bad_test did not start with prefix even though query should have filtered.")
+	require.Nil(t, results)
+	require.ErrorContains(t, err, "Test ID bad_test did not start with prefix even though query should have filtered.")
 }
 
 /*******************************************************************************
@@ -295,49 +279,51 @@
 // Tests that valid results are properly parsed and returned.
 func TestGetRawResultsHappyPath(t *testing.T) {
 	ctx, cfg, client, builds := generateGoodGetResultsInputs()
-	client.returnValues = []resultsdb.QueryResult{
-		resultsdb.QueryResult{
-			TestId:   "prefix_test_1",
-			Status:   "PASS",
-			Tags:     []resultsdb.TagPair{},
-			Duration: 1.0,
-		},
-		resultsdb.QueryResult{
-			TestId: "prefix_test_2",
-			Status: "FAIL",
-			Tags: []resultsdb.TagPair{
-				resultsdb.TagPair{
-					Key:   "javascript_duration",
-					Value: "0.5s",
-				},
+	client.ReturnValues = resultsdb.PrefixGroupedQueryResults{
+		"prefix": []resultsdb.QueryResult{
+			resultsdb.QueryResult{
+				TestId:   "prefix_test_1",
+				Status:   "PASS",
+				Tags:     []resultsdb.TagPair{},
+				Duration: 1.0,
 			},
-			Duration: 2.0,
-		},
-		resultsdb.QueryResult{
-			TestId: "prefix_test_3",
-			Status: "SKIP",
-			Tags: []resultsdb.TagPair{
-				resultsdb.TagPair{
-					Key:   "may_exonerate",
-					Value: "true",
+			resultsdb.QueryResult{
+				TestId: "prefix_test_2",
+				Status: "FAIL",
+				Tags: []resultsdb.TagPair{
+					resultsdb.TagPair{
+						Key:   "javascript_duration",
+						Value: "0.5s",
+					},
 				},
+				Duration: 2.0,
 			},
-			Duration: 3.0,
-		},
-		resultsdb.QueryResult{
-			TestId: "prefix_test_4",
-			Status: "SomeStatus",
-			Tags: []resultsdb.TagPair{
-				resultsdb.TagPair{
-					Key:   "typ_tag",
-					Value: "linux",
+			resultsdb.QueryResult{
+				TestId: "prefix_test_3",
+				Status: "SKIP",
+				Tags: []resultsdb.TagPair{
+					resultsdb.TagPair{
+						Key:   "may_exonerate",
+						Value: "true",
+					},
 				},
-				resultsdb.TagPair{
-					Key:   "typ_tag",
-					Value: "intel",
-				},
+				Duration: 3.0,
 			},
-			Duration: 4.0,
+			resultsdb.QueryResult{
+				TestId: "prefix_test_4",
+				Status: "SomeStatus",
+				Tags: []resultsdb.TagPair{
+					resultsdb.TagPair{
+						Key:   "typ_tag",
+						Value: "linux",
+					},
+					resultsdb.TagPair{
+						Key:   "typ_tag",
+						Value: "intel",
+					},
+				},
+				Duration: 4.0,
+			},
 		},
 	}
 
@@ -376,24 +362,24 @@
 	expectedResults["execution_mode"] = expectedResultsList
 
 	results, err := GetRawResults(ctx, cfg, client, builds)
-	assert.Nil(t, err)
-	assert.Equal(t, results, expectedResults)
+	require.Nil(t, err)
+	require.Equal(t, results, expectedResults)
 }
 
 // Tests that a mismatched prefix results in an error.
 func TestGetRawResultsPrefixMismatch(t *testing.T) {
 	ctx, cfg, client, builds := generateGoodGetResultsInputs()
-	client.returnValues[0].TestId = "bad_test"
+	client.ReturnValues["prefix"][0].TestId = "bad_test"
 
 	results, err := GetRawResults(ctx, cfg, client, builds)
-	assert.Nil(t, results)
-	assert.ErrorContains(t, err, "Test ID bad_test did not start with prefix even though query should have filtered.")
+	require.Nil(t, results)
+	require.ErrorContains(t, err, "Test ID bad_test did not start with prefix even though query should have filtered.")
 }
 
 // Tests that a JavaScript duration that cannot be parsed results in an error.
 func TestGetRawResultsBadJavaScriptDuration(t *testing.T) {
 	ctx, cfg, client, builds := generateGoodGetResultsInputs()
-	client.returnValues[0].Tags = []resultsdb.TagPair{
+	client.ReturnValues["prefix"][0].Tags = []resultsdb.TagPair{
 		resultsdb.TagPair{
 			Key:   "javascript_duration",
 			Value: "1000foo",
@@ -401,14 +387,14 @@
 	}
 
 	results, err := GetRawResults(ctx, cfg, client, builds)
-	assert.Nil(t, results)
-	assert.ErrorContains(t, err, `time: unknown unit "foo" in duration "1000foo"`)
+	require.Nil(t, results)
+	require.ErrorContains(t, err, `time: unknown unit "foo" in duration "1000foo"`)
 }
 
 // Tests that a non-boolean may_exonerate value results in an error.
 func TestGetRawResultsBadMayExonerate(t *testing.T) {
 	ctx, cfg, client, builds := generateGoodGetResultsInputs()
-	client.returnValues[0].Tags = []resultsdb.TagPair{
+	client.ReturnValues["prefix"][0].Tags = []resultsdb.TagPair{
 		resultsdb.TagPair{
 			Key:   "may_exonerate",
 			Value: "yesnt",
@@ -416,8 +402,58 @@
 	}
 
 	results, err := GetRawResults(ctx, cfg, client, builds)
-	assert.Nil(t, results)
-	assert.ErrorContains(t, err, `strconv.ParseBool: parsing "yesnt": invalid syntax`)
+	require.Nil(t, results)
+	require.ErrorContains(t, err, `strconv.ParseBool: parsing "yesnt": invalid syntax`)
+}
+
+/*******************************************************************************
+ * convertRdbStatus tests
+ ******************************************************************************/
+
+func TestConvertRdbStatus(t *testing.T) {
+	tests := []struct {
+		name  string
+		input string
+		want  result.Status
+	}{
+		{
+			name:  "Unknown",
+			input: "asdf",
+			want:  result.Unknown,
+		},
+		{
+			name:  "Pass",
+			input: "PASS",
+			want:  result.Pass,
+		},
+		{
+			name:  "Failure",
+			input: "FAIL",
+			want:  result.Failure,
+		},
+		{
+			name:  "Crash",
+			input: "CRASH",
+			want:  result.Crash,
+		},
+		{
+			name:  "Abort",
+			input: "ABORT",
+			want:  result.Abort,
+		},
+		{
+			name:  "Skip",
+			input: "SKIP",
+			want:  result.Skip,
+		},
+	}
+
+	for _, testCase := range tests {
+		t.Run(testCase.name, func(t *testing.T) {
+			status := convertRdbStatus(testCase.input)
+			require.Equal(t, testCase.want, status)
+		})
+	}
 }
 
 /*******************************************************************************
@@ -427,49 +463,51 @@
 // Tests that valid results are properly parsed and returned.
 func TestGetRawUnsuppressedFailingResultsHappyPath(t *testing.T) {
 	ctx, cfg, client, builds := generateGoodGetUnsuppressedFailingResultsInputs()
-	client.unsuppressedFailureReturnValues = []resultsdb.QueryResult{
-		resultsdb.QueryResult{
-			TestId:   "prefix_test_1",
-			Status:   "FAIL",
-			Tags:     []resultsdb.TagPair{},
-			Duration: 1.0,
-		},
-		resultsdb.QueryResult{
-			TestId: "prefix_test_2",
-			Status: "FAIL",
-			Tags: []resultsdb.TagPair{
-				resultsdb.TagPair{
-					Key:   "javascript_duration",
-					Value: "0.5s",
-				},
+	client.UnsuppressedFailureReturnValues = resultsdb.PrefixGroupedQueryResults{
+		"prefix": []resultsdb.QueryResult{
+			resultsdb.QueryResult{
+				TestId:   "prefix_test_1",
+				Status:   "FAIL",
+				Tags:     []resultsdb.TagPair{},
+				Duration: 1.0,
 			},
-			Duration: 2.0,
-		},
-		resultsdb.QueryResult{
-			TestId: "prefix_test_3",
-			Status: "FAIL",
-			Tags: []resultsdb.TagPair{
-				resultsdb.TagPair{
-					Key:   "may_exonerate",
-					Value: "true",
+			resultsdb.QueryResult{
+				TestId: "prefix_test_2",
+				Status: "FAIL",
+				Tags: []resultsdb.TagPair{
+					resultsdb.TagPair{
+						Key:   "javascript_duration",
+						Value: "0.5s",
+					},
 				},
+				Duration: 2.0,
 			},
-			Duration: 3.0,
-		},
-		resultsdb.QueryResult{
-			TestId: "prefix_test_4",
-			Status: "SomeStatus",
-			Tags: []resultsdb.TagPair{
-				resultsdb.TagPair{
-					Key:   "typ_tag",
-					Value: "linux",
+			resultsdb.QueryResult{
+				TestId: "prefix_test_3",
+				Status: "FAIL",
+				Tags: []resultsdb.TagPair{
+					resultsdb.TagPair{
+						Key:   "may_exonerate",
+						Value: "true",
+					},
 				},
-				resultsdb.TagPair{
-					Key:   "typ_tag",
-					Value: "intel",
-				},
+				Duration: 3.0,
 			},
-			Duration: 4.0,
+			resultsdb.QueryResult{
+				TestId: "prefix_test_4",
+				Status: "SomeStatus",
+				Tags: []resultsdb.TagPair{
+					resultsdb.TagPair{
+						Key:   "typ_tag",
+						Value: "linux",
+					},
+					resultsdb.TagPair{
+						Key:   "typ_tag",
+						Value: "intel",
+					},
+				},
+				Duration: 4.0,
+			},
 		},
 	}
 
@@ -508,24 +546,24 @@
 	expectedResults["execution_mode"] = expectedResultsList
 
 	results, err := GetRawUnsuppressedFailingResults(ctx, cfg, client, builds)
-	assert.Nil(t, err)
-	assert.Equal(t, results, expectedResults)
+	require.Nil(t, err)
+	require.Equal(t, results, expectedResults)
 }
 
 // Tests that a mismatched prefix results in an error.
 func TestGetRawUnsuppressedFailingResultsPrefixMismatch(t *testing.T) {
 	ctx, cfg, client, builds := generateGoodGetUnsuppressedFailingResultsInputs()
-	client.unsuppressedFailureReturnValues[0].TestId = "bad_test"
+	client.UnsuppressedFailureReturnValues["prefix"][0].TestId = "bad_test"
 
 	results, err := GetRawUnsuppressedFailingResults(ctx, cfg, client, builds)
-	assert.Nil(t, results)
-	assert.ErrorContains(t, err, "Test ID bad_test did not start with prefix even though query should have filtered.")
+	require.Nil(t, results)
+	require.ErrorContains(t, err, "Test ID bad_test did not start with prefix even though query should have filtered.")
 }
 
 // Tests that a JavaScript duration that cannot be parsed results in an error.
 func TestGetRawUnsuppressedFailingResultsBadJavaScriptDuration(t *testing.T) {
 	ctx, cfg, client, builds := generateGoodGetUnsuppressedFailingResultsInputs()
-	client.unsuppressedFailureReturnValues[0].Tags = []resultsdb.TagPair{
+	client.UnsuppressedFailureReturnValues["prefix"][0].Tags = []resultsdb.TagPair{
 		resultsdb.TagPair{
 			Key:   "javascript_duration",
 			Value: "1000foo",
@@ -533,14 +571,14 @@
 	}
 
 	results, err := GetRawUnsuppressedFailingResults(ctx, cfg, client, builds)
-	assert.Nil(t, results)
-	assert.ErrorContains(t, err, `time: unknown unit "foo" in duration "1000foo"`)
+	require.Nil(t, results)
+	require.ErrorContains(t, err, `time: unknown unit "foo" in duration "1000foo"`)
 }
 
 // Tests that a non-boolean may_exonerate value results in an error.
 func TestGetRawUnsuppressedFailingResultsBadMayExonerate(t *testing.T) {
 	ctx, cfg, client, builds := generateGoodGetUnsuppressedFailingResultsInputs()
-	client.unsuppressedFailureReturnValues[0].Tags = []resultsdb.TagPair{
+	client.UnsuppressedFailureReturnValues["prefix"][0].Tags = []resultsdb.TagPair{
 		resultsdb.TagPair{
 			Key:   "may_exonerate",
 			Value: "yesnt",
@@ -548,8 +586,8 @@
 	}
 
 	results, err := GetRawUnsuppressedFailingResults(ctx, cfg, client, builds)
-	assert.Nil(t, results)
-	assert.ErrorContains(t, err, `strconv.ParseBool: parsing "yesnt": invalid syntax`)
+	require.Nil(t, results)
+	require.ErrorContains(t, err, `strconv.ParseBool: parsing "yesnt": invalid syntax`)
 }
 
 /*******************************************************************************
@@ -601,7 +639,7 @@
 	}
 
 	CleanResults(cfg, &results)
-	assert.Equal(t, results, expectedResults)
+	require.Equal(t, results, expectedResults)
 }
 
 // Tests that duplicate results with the same status always use that status.
@@ -637,7 +675,7 @@
 		}
 
 		CleanResults(cfg, &results)
-		assert.Equal(t, results, expectedResults)
+		require.Equal(t, results, expectedResults)
 	}
 }
 
@@ -672,7 +710,7 @@
 		}
 
 		CleanResults(cfg, &results)
-		assert.Equal(t, results, expectedResults)
+		require.Equal(t, results, expectedResults)
 	}
 }
 
@@ -738,7 +776,443 @@
 			}
 
 			CleanResults(cfg, &results)
-			assert.Equal(t, results, expectedResults)
+			require.Equal(t, results, expectedResults)
 		}
 	}
 }
+
+/*******************************************************************************
+ * CacheRecentUniqueSuppressed tests
+ ******************************************************************************/
+
+func getMultiPrefixConfig() Config {
+	return Config{
+		Tests: []TestConfig{
+			TestConfig{
+				ExecutionMode: result.ExecutionMode("core"),
+				Prefixes:      []string{"core_prefix"},
+			},
+			TestConfig{
+				ExecutionMode: result.ExecutionMode("compat"),
+				Prefixes:      []string{"compat_prefix"},
+			},
+		},
+	}
+}
+
+func getMultiPrefixQueryResults() resultsdb.PrefixGroupedQueryResults {
+	return resultsdb.PrefixGroupedQueryResults{
+		"core_prefix": []resultsdb.QueryResult{
+			{
+				TestId: "core_prefix_test_1",
+				Status: "FAIL",
+				Tags: []resultsdb.TagPair{
+					{
+						Key:   "typ_tag",
+						Value: "tag_1",
+					},
+				},
+				Duration: 1.0,
+			},
+			{
+				TestId: "core_prefix_test_2",
+				Status: "FAIL",
+				Tags: []resultsdb.TagPair{
+					{
+						Key:   "typ_tag",
+						Value: "tag_2",
+					},
+				},
+				Duration: 1.0,
+			},
+		},
+		"compat_prefix": []resultsdb.QueryResult{
+			{
+				TestId: "compat_prefix_test_3",
+				Status: "FAIL",
+				Tags: []resultsdb.TagPair{
+					{
+						Key:   "typ_tag",
+						Value: "tag_3",
+					},
+				},
+				Duration: 1.0,
+			},
+			{
+				TestId: "compat_prefix_test_4",
+				Status: "FAIL",
+				Tags: []resultsdb.TagPair{
+					{
+						Key:   "typ_tag",
+						Value: "tag_4",
+					},
+				},
+				Duration: 1.0,
+			},
+		},
+	}
+}
+
+func getExpectedMultiPrefixResults() result.ResultsByExecutionMode {
+	return result.ResultsByExecutionMode{
+		"core": result.List{
+			{
+				Query:        query.Parse("_test_1"),
+				Tags:         result.NewTags("tag_1"),
+				Status:       result.Pass,
+				Duration:     0,
+				MayExonerate: false,
+			},
+			{
+				Query:        query.Parse("_test_2"),
+				Tags:         result.NewTags("tag_2"),
+				Status:       result.Pass,
+				Duration:     0,
+				MayExonerate: false,
+			},
+		},
+		"compat": result.List{
+			{
+				Query:        query.Parse("_test_3"),
+				Tags:         result.NewTags("tag_3"),
+				Status:       result.Pass,
+				Duration:     0,
+				MayExonerate: false,
+			},
+			{
+				Query:        query.Parse("_test_4"),
+				Tags:         result.NewTags("tag_4"),
+				Status:       result.Pass,
+				Duration:     0,
+				MayExonerate: false,
+			},
+		},
+	}
+}
+
+func TestCacheRecentUniqueSuppressedCoreResults_ErrorSurfaced(t *testing.T) {
+	ctx := context.Background()
+	cfg := getMultiPrefixConfig()
+	wrapper := oswrapper.CreateMemMapOSWrapper()
+
+	results := getMultiPrefixQueryResults()
+	results["core_prefix"][0].TestId = "bad_test"
+	client := resultsdb.MockBigQueryClient{
+		RecentUniqueSuppressedReturnValues: results,
+	}
+
+	resultsList, err := CacheRecentUniqueSuppressedCoreResults(
+		ctx, cfg, fileutils.ThisDir(), client, wrapper)
+	require.Nil(t, resultsList)
+	require.ErrorContains(t, err,
+		"Test ID bad_test did not start with core_prefix even though query should have filtered.")
+}
+
+func TestCacheRecentUniqueSuppressedCoreResults_Success(t *testing.T) {
+	ctx := context.Background()
+	cfg := getMultiPrefixConfig()
+	wrapper := oswrapper.CreateMemMapOSWrapper()
+
+	client := resultsdb.MockBigQueryClient{
+		RecentUniqueSuppressedReturnValues: getMultiPrefixQueryResults(),
+	}
+
+	resultsList, err := CacheRecentUniqueSuppressedCoreResults(
+		ctx, cfg, fileutils.ThisDir(), client, wrapper)
+	require.NoErrorf(t, err, "Error getting results: %v", err)
+	require.Equal(t, resultsList, getExpectedMultiPrefixResults()["core"])
+}
+
+func TestCAcheRecentUniqueSuppressedCompatResults_ErrorSurfaced(t *testing.T) {
+	ctx := context.Background()
+	cfg := getMultiPrefixConfig()
+	wrapper := oswrapper.CreateMemMapOSWrapper()
+
+	results := getMultiPrefixQueryResults()
+	results["compat_prefix"][0].TestId = "bad_test"
+	client := resultsdb.MockBigQueryClient{
+		RecentUniqueSuppressedReturnValues: results,
+	}
+
+	resultsList, err := CacheRecentUniqueSuppressedCoreResults(
+		ctx, cfg, fileutils.ThisDir(), client, wrapper)
+	require.Nil(t, resultsList)
+	require.ErrorContains(t, err,
+		"Test ID bad_test did not start with compat_prefix even though query should have filtered.")
+}
+
+func TestCacheRecentUniqueSuppressedCompatResults_Success(t *testing.T) {
+	ctx := context.Background()
+	cfg := getMultiPrefixConfig()
+	wrapper := oswrapper.CreateMemMapOSWrapper()
+
+	client := resultsdb.MockBigQueryClient{
+		RecentUniqueSuppressedReturnValues: getMultiPrefixQueryResults(),
+	}
+
+	resultsList, err := CacheRecentUniqueSuppressedCompatResults(
+		ctx, cfg, fileutils.ThisDir(), client, wrapper)
+	require.NoErrorf(t, err, "Error getting results: %v", err)
+	require.Equal(t, resultsList, getExpectedMultiPrefixResults()["compat"])
+}
+
+func TestCacheRecentUniqueSuppressedResults_CacheHit(t *testing.T) {
+	ctx := context.Background()
+	cfg := getMultiPrefixConfig()
+	wrapper := oswrapper.CreateMemMapOSWrapper()
+
+	// Technically this could run into a race condition if we run this test at the
+	// exact time the day changes so the file is created on a different day than
+	// it's read, but that seems exceedingly unlikely in practice.
+	year, month, day := time.Now().Date()
+	result.SaveWithWrapper(
+		filepath.Join(
+			fileutils.ThisDir(),
+			"expectation-affected-ci-results",
+			fmt.Sprintf("%d-%d-%d.txt", year, month, day)),
+		getExpectedMultiPrefixResults(),
+		wrapper)
+
+	client := resultsdb.MockBigQueryClient{}
+
+	resultsByExecutionMode, err := CacheRecentUniqueSuppressedResults(
+		ctx, cfg, fileutils.ThisDir(), client, wrapper)
+	require.NoErrorf(t, err, "Error getting results: %v", err)
+	require.Equal(t, getExpectedMultiPrefixResults(), resultsByExecutionMode)
+}
+
+func TestCacheRecentUniqueSuppressedResults_CacheSkippedIfUnspecified(t *testing.T) {
+	ctx := context.Background()
+	cfg := getMultiPrefixConfig()
+	wrapper := oswrapper.CreateMemMapOSWrapper()
+
+	modifiedResults := getExpectedMultiPrefixResults()
+	modifiedResults["core"] = append(modifiedResults["core"], result.Result{
+		Query:        query.Parse("_test_5"),
+		Tags:         result.NewTags("tag_5"),
+		Status:       result.Pass,
+		Duration:     0,
+		MayExonerate: false,
+	})
+
+	year, month, day := time.Now().Date()
+	result.SaveWithWrapper(
+		filepath.Join(
+			"expectation-affected-ci-results",
+			fmt.Sprintf("%d-%d-%d.txt", year, month, day)),
+		modifiedResults,
+		wrapper)
+
+	client := resultsdb.MockBigQueryClient{
+		RecentUniqueSuppressedReturnValues: getMultiPrefixQueryResults(),
+	}
+
+	resultsByExecutionMode, err := CacheRecentUniqueSuppressedResults(
+		ctx, cfg, "", client, wrapper)
+	require.NoErrorf(t, err, "Error getting results: %v", err)
+	require.Equal(t, getExpectedMultiPrefixResults(), resultsByExecutionMode)
+}
+
+func TestCacheRecentUniqueSuppressedResults_GetResultsError(t *testing.T) {
+	ctx := context.Background()
+	cfg := getMultiPrefixConfig()
+	wrapper := oswrapper.CreateMemMapOSWrapper()
+
+	modifiedQueryResults := getMultiPrefixQueryResults()
+	modifiedQueryResults["core_prefix"][0].Tags[0].Key = "non_typ_tag"
+	client := resultsdb.MockBigQueryClient{
+		RecentUniqueSuppressedReturnValues: modifiedQueryResults,
+	}
+
+	resultsByExecutionMode, err := CacheRecentUniqueSuppressedResults(
+		ctx, cfg, fileutils.ThisDir(), client, wrapper)
+	require.Nil(t, resultsByExecutionMode)
+	require.ErrorContains(t, err,
+		"Got tag key non_typ_tag when only typ_tag should be present")
+}
+
+func TestCacheRecentUniqueSuppressedResults_Success(t *testing.T) {
+	ctx := context.Background()
+	cfg := getMultiPrefixConfig()
+	wrapper := oswrapper.CreateMemMapOSWrapper()
+	client := resultsdb.MockBigQueryClient{
+		RecentUniqueSuppressedReturnValues: getMultiPrefixQueryResults(),
+	}
+
+	resultsByExecutionMode, err := CacheRecentUniqueSuppressedResults(
+		ctx, cfg, fileutils.ThisDir(), client, wrapper)
+	require.NoErrorf(t, err, "Error getting results: %v", err)
+	require.Equal(t, getExpectedMultiPrefixResults(), resultsByExecutionMode)
+}
+
+/*******************************************************************************
+ * getRecentUniqueSuppressedResults tests
+ ******************************************************************************/
+
+func TestGetRecentUniqueSuppressedResults_PrefixMismatch(t *testing.T) {
+	ctx := context.Background()
+
+	cfg := Config{
+		Tests: []TestConfig{
+			TestConfig{
+				ExecutionMode: result.ExecutionMode("execution_mode"),
+				Prefixes:      []string{"prefix"},
+			},
+		},
+	}
+
+	client := resultsdb.MockBigQueryClient{
+		RecentUniqueSuppressedReturnValues: resultsdb.PrefixGroupedQueryResults{
+			"prefix": []resultsdb.QueryResult{
+				{
+					TestId:   "bad_test",
+					Status:   "FAIL",
+					Tags:     []resultsdb.TagPair{},
+					Duration: 1.0,
+				},
+			},
+		},
+	}
+
+	resultsByExecutionMode, err := getRecentUniqueSuppressedResults(ctx, cfg, client)
+	require.Nil(t, resultsByExecutionMode)
+	require.ErrorContains(t, err,
+		"Test ID bad_test did not start with prefix even though query should have filtered.")
+}
+
+func TestGetRecentUniqueSuppressedResults_NonTypTag(t *testing.T) {
+	ctx := context.Background()
+
+	cfg := Config{
+		Tests: []TestConfig{
+			TestConfig{
+				ExecutionMode: result.ExecutionMode("execution_mode"),
+				Prefixes:      []string{"prefix"},
+			},
+		},
+	}
+
+	client := resultsdb.MockBigQueryClient{
+		RecentUniqueSuppressedReturnValues: resultsdb.PrefixGroupedQueryResults{
+			"prefix": []resultsdb.QueryResult{
+				{
+					TestId: "prefix_test",
+					Status: "FAIL",
+					Tags: []resultsdb.TagPair{
+						{
+							Key:   "non_typ_tag",
+							Value: "value",
+						},
+					},
+					Duration: 1.0,
+				},
+			},
+		},
+	}
+
+	resultsByExecutionMode, err := getRecentUniqueSuppressedResults(ctx, cfg, client)
+	require.Nil(t, resultsByExecutionMode)
+	require.ErrorContains(t, err,
+		"Got tag key non_typ_tag when only typ_tag should be present")
+}
+
+func TestGetRecentUniqueSuppressedResults_Success(t *testing.T) {
+	ctx := context.Background()
+
+	cfg := Config{
+		Tests: []TestConfig{
+			TestConfig{
+				ExecutionMode: result.ExecutionMode("execution_mode"),
+				Prefixes:      []string{"prefix"},
+			},
+		},
+	}
+
+	client := resultsdb.MockBigQueryClient{
+		RecentUniqueSuppressedReturnValues: resultsdb.PrefixGroupedQueryResults{
+			"prefix": []resultsdb.QueryResult{
+				{
+					TestId: "prefix_test_1",
+					Status: "FAIL",
+					Tags: []resultsdb.TagPair{
+						{
+							Key:   "typ_tag",
+							Value: "tag_1",
+						},
+					},
+					Duration: 1.0,
+				},
+				{
+					TestId: "prefix_test_2",
+					Status: "FAIL",
+					Tags: []resultsdb.TagPair{
+						{
+							Key:   "typ_tag",
+							Value: "tag_1",
+						},
+					},
+					Duration: 1.0,
+				},
+				{
+					TestId: "prefix_test_1",
+					Status: "FAIL",
+					Tags: []resultsdb.TagPair{
+						{
+							Key:   "typ_tag",
+							Value: "tag_2",
+						},
+					},
+					Duration: 1.0,
+				},
+				{
+					TestId: "prefix_test_2",
+					Status: "FAIL",
+					Tags: []resultsdb.TagPair{
+						{
+							Key:   "typ_tag",
+							Value: "tag_2",
+						},
+					},
+					Duration: 1.0,
+				},
+			},
+		},
+	}
+
+	expectedResults := result.ResultsByExecutionMode{
+		"execution_mode": result.List{
+			{
+				Query:        query.Parse("_test_1"),
+				Tags:         result.NewTags("tag_1"),
+				Status:       result.Pass,
+				Duration:     0,
+				MayExonerate: false,
+			},
+			{
+				Query:        query.Parse("_test_1"),
+				Tags:         result.NewTags("tag_2"),
+				Status:       result.Pass,
+				Duration:     0,
+				MayExonerate: false,
+			},
+			{
+				Query:        query.Parse("_test_2"),
+				Tags:         result.NewTags("tag_1"),
+				Status:       result.Pass,
+				Duration:     0,
+				MayExonerate: false,
+			},
+			{
+				Query:        query.Parse("_test_2"),
+				Tags:         result.NewTags("tag_2"),
+				Status:       result.Pass,
+				Duration:     0,
+				MayExonerate: false,
+			},
+		},
+	}
+
+	resultsByExecutionMode, err := getRecentUniqueSuppressedResults(ctx, cfg, client)
+	require.NoErrorf(t, err, "Got error getting results: %v", err)
+	require.Equal(t, resultsByExecutionMode, expectedResults)
+}
diff --git a/tools/src/cmd/cts/expectationcoverage/expectationcoverage.go b/tools/src/cmd/cts/expectationcoverage/expectationcoverage.go
new file mode 100644
index 0000000..dea86d2
--- /dev/null
+++ b/tools/src/cmd/cts/expectationcoverage/expectationcoverage.go
@@ -0,0 +1,480 @@
+// Copyright 2025 The Dawn & Tint Authors
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+//    list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+//    this list of conditions and the following disclaimer in the documentation
+//    and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the copyright holder nor the names of its
+//    contributors may be used to endorse or promote products derived from
+//    this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package expectationcoverage
+
+import (
+	"context"
+	"flag"
+	"fmt"
+	"io"
+	"os"
+	"runtime"
+	"slices"
+	"sort"
+	"strings"
+	"sync"
+	"time"
+
+	"dawn.googlesource.com/dawn/tools/src/cmd/cts/common"
+	"dawn.googlesource.com/dawn/tools/src/cts/expectations"
+	"dawn.googlesource.com/dawn/tools/src/cts/result"
+	"dawn.googlesource.com/dawn/tools/src/oswrapper"
+)
+
+const maxResultsPerWorker = 200000
+
+func init() {
+	common.Register(&cmd{})
+}
+
+type arrayFlags []string
+
+func (i *arrayFlags) String() string {
+	return strings.Join((*i), " ")
+}
+
+func (i *arrayFlags) Set(value string) error {
+	*i = append(*i, value)
+	return nil
+}
+
+type cmd struct {
+	flags struct {
+		maxOutput               int
+		checkCompatExpectations bool
+		individualExpectations  bool
+		ignoreSkipExpectations  bool
+		cacheDir                string
+		verbose                 bool
+	}
+}
+
+type ChunkWithCounter struct {
+	Chunk *expectations.Chunk
+	Count int
+}
+
+func (cmd) Name() string {
+	return "expectation-coverage"
+}
+
+func (cmd) Desc() string {
+	return "checks how much test coverage is lost due to expectations"
+}
+
+func (c *cmd) RegisterFlags(cts context.Context, cfg common.Config) ([]string, error) {
+	flag.IntVar(
+		&c.flags.maxOutput,
+		"max-output",
+		25,
+		"limit output to the top X expectation groups, set to 0 for unlimited")
+	flag.BoolVar(
+		&c.flags.checkCompatExpectations,
+		"check-compat-expectations",
+		false,
+		"check compat expectations instead of regular expectations")
+	flag.BoolVar(
+		&c.flags.individualExpectations,
+		"check-individual-expectations",
+		false,
+		"check individual expectations instead of groups")
+	flag.BoolVar(
+		&c.flags.ignoreSkipExpectations,
+		"ignore-skip-expectations",
+		false,
+		"do not check the impact of Skip expectations")
+	flag.StringVar(
+		&c.flags.cacheDir,
+		"cache",
+		common.DefaultCacheDir,
+		"path to the results cache")
+	flag.BoolVar(
+		&c.flags.verbose,
+		"verbose",
+		false,
+		"emit additional logging")
+	return nil, nil
+}
+
+func (c *cmd) Run(ctx context.Context, cfg common.Config) error {
+	individualExpectations := c.flags.individualExpectations
+
+	// Parse expectation file
+	fmt.Println("Getting trimmed expectation file content")
+	startTime := time.Now()
+	var expectationPath string
+	if c.flags.checkCompatExpectations {
+		expectationPath = common.DefaultCompatExpectationsPath()
+	} else {
+		expectationPath = common.DefaultExpectationsPath()
+	}
+	content, err := getTrimmedContent(expectationPath,
+		individualExpectations,
+		c.flags.ignoreSkipExpectations,
+		c.flags.verbose,
+		cfg.OsWrapper)
+	if err != nil {
+		return err
+	}
+	if c.flags.verbose {
+		fmt.Printf("Took %s\n", time.Now().Sub(startTime).String())
+		fmt.Printf("Got %d chunks/individual expectations\n", len(content.Chunks))
+	}
+
+	// Get ResultDB data
+	fmt.Println("Getting results")
+	startTime = time.Now()
+	var uniqueResults result.List
+	if c.flags.checkCompatExpectations {
+		uniqueResults, err = common.CacheRecentUniqueSuppressedCompatResults(
+			ctx, cfg, c.flags.cacheDir, cfg.Querier, cfg.OsWrapper)
+	} else {
+		uniqueResults, err = common.CacheRecentUniqueSuppressedCoreResults(
+			ctx, cfg, c.flags.cacheDir, cfg.Querier, cfg.OsWrapper)
+	}
+	if err != nil {
+		return err
+	}
+	if c.flags.verbose {
+		fmt.Printf("Took %s\n", time.Now().Sub(startTime).String())
+		fmt.Printf("Got %d unique results\n", len(uniqueResults))
+	}
+
+	// Process ResultDB data
+	fmt.Println("Processing results")
+	startTime = time.Now()
+	orderedChunks := getChunksOrderedByCoverageLoss(&content, &uniqueResults)
+	if c.flags.verbose {
+		fmt.Printf("Took %s\n", time.Now().Sub(startTime).String())
+	}
+
+	// Output results.
+	outputResults(orderedChunks, c.flags.maxOutput, individualExpectations, os.Stdout)
+
+	return nil
+}
+
+// getTrimmedContent returns a Content with certain Chunks removed or modified
+// based on the provided arguments.
+func getTrimmedContent(
+	expectationPath string,
+	individualExpectations bool,
+	ignoreSkipExpectations bool,
+	verbose bool,
+	fsReader oswrapper.FilesystemReader) (expectations.Content, error) {
+	rawFileContentBytes, err := fsReader.ReadFile(expectationPath)
+	if err != nil {
+		return expectations.Content{}, err
+	}
+	rawFileContent := string(rawFileContentBytes[:])
+
+	content, err := expectations.Parse(expectationPath, rawFileContent)
+	if err != nil {
+		return expectations.Content{}, err
+	}
+
+	// Remove any permanent Skip expectations since they are never expected to
+	// be removed from the file. Also remove any pure comment chunks.
+	permanentSkipContent, err := getPermanentSkipContent(expectationPath, rawFileContent)
+	if err != nil {
+		return expectations.Content{}, err
+	}
+
+	// Get a copy of all relevant chunks.
+	var trimmedChunks []expectations.Chunk
+	for _, chunk := range content.Chunks {
+		if chunk.IsCommentOnly() {
+			continue
+		}
+		if chunk.ContainedWithinList(&permanentSkipContent.Chunks) {
+			continue
+		}
+
+		var maybeSkiplessChunk expectations.Chunk
+		if ignoreSkipExpectations {
+			maybeSkiplessChunk = expectations.Chunk{
+				Comments:     chunk.Comments,
+				Expectations: expectations.Expectations{},
+			}
+			for _, e := range chunk.Expectations {
+				if slices.Contains(e.Status, string(result.Skip)) {
+					continue
+				}
+				maybeSkiplessChunk.Expectations = append(maybeSkiplessChunk.Expectations, e)
+			}
+		} else {
+			maybeSkiplessChunk = chunk
+		}
+
+		if maybeSkiplessChunk.IsCommentOnly() {
+			continue
+		}
+		trimmedChunks = append(trimmedChunks, maybeSkiplessChunk)
+	}
+
+	// Split chunks into individual expectations if requested.
+	var maybeSplitChunks []expectations.Chunk
+	if individualExpectations {
+		for _, chunk := range trimmedChunks {
+			for _, e := range chunk.Expectations {
+				individualChunk := expectations.Chunk{
+					Comments:     chunk.Comments,
+					Expectations: expectations.Expectations{e},
+				}
+				maybeSplitChunks = append(maybeSplitChunks, individualChunk)
+			}
+		}
+	} else {
+		maybeSplitChunks = trimmedChunks
+	}
+	content.Chunks = maybeSplitChunks
+
+	return content, nil
+}
+
+// getPermanentSkipContent returns a Content only containing Chunks for
+// permanent Skip expectations.
+func getPermanentSkipContent(
+	expectationPath string,
+	rawFileContent string) (expectations.Content, error) {
+	// Since the standard format for expectation files is:
+	//  - Permanent Skip expectations
+	//  - Temporary Skip expectations
+	//  - Triaged flakes/failure expectations
+	//  - Untriaged auto-generated expectations
+	// Assume we care about everything up to the temporary Skip expectation
+	// section.
+	targetLine := "# Temporary Skip Expectations"
+	var keptLines []string
+	brokeEarly := false
+	for _, line := range strings.Split(rawFileContent, "\n") {
+		if strings.HasPrefix(line, targetLine) {
+			brokeEarly = true
+			break
+		}
+		keptLines = append(keptLines, line)
+	}
+
+	if !brokeEarly {
+		fmt.Println("Unable to find permanent Skip expectations, assuming none exist")
+		return expectations.Content{}, nil
+	}
+
+	permanentSkipRawContent := strings.Join(keptLines, "\n")
+	permanentSkipContent, err := expectations.Parse(expectationPath, permanentSkipRawContent)
+	if err != nil {
+		return expectations.Content{}, err
+	}
+
+	// Omit any pure comment chunks.
+	var trimmedChunks []expectations.Chunk
+	for _, chunk := range permanentSkipContent.Chunks {
+		if !chunk.IsCommentOnly() {
+			trimmedChunks = append(trimmedChunks, chunk)
+		}
+	}
+	permanentSkipContent.Chunks = trimmedChunks
+
+	return permanentSkipContent, nil
+}
+
+// math.Min only works on floats, and the built-in min is not available until
+// go 1.21.
+func minInt(a, b int) int {
+	if a < b {
+		return a
+	}
+	return b
+}
+
+// getChunksOrderedByCoverageLost returns the Chunks contained within 'content'
+// ordered by how many results from 'uniqueResults' are affected by expectations
+// within the Chunk.
+//
+// Under the hood, actual processing is farmed out to goroutines to better
+// handle large amounts of results.
+func getChunksOrderedByCoverageLoss(
+	content *expectations.Content,
+	uniqueResults *result.List) []ChunkWithCounter {
+
+	affectedChunks := make([]ChunkWithCounter, len(content.Chunks))
+	for i, _ := range content.Chunks {
+		affectedChunks[i].Chunk = &(content.Chunks[i])
+	}
+
+	// Create a goroutine pool. Each worker pulls a single ChunkWithCounter from
+	// the queue at a time and handles all of the processing for it.
+	numWorkers := minInt(len(affectedChunks), runtime.NumCPU())
+	workQueue := make(chan *ChunkWithCounter)
+	waitGroup := new(sync.WaitGroup)
+	waitGroup.Add(numWorkers)
+	for i := 0; i < numWorkers; i++ {
+		go processChunk(workQueue, uniqueResults, waitGroup)
+	}
+
+	// Each of the ChunkWithCounter will have its Count filled in place by a
+	// worker when picked up.
+	for i, _ := range affectedChunks {
+		workQueue <- &(affectedChunks[i])
+	}
+	close(workQueue)
+	waitGroup.Wait()
+
+	// Sort based on the final tally.
+	sortFunc := func(i, j int) bool {
+		return affectedChunks[i].Count > affectedChunks[j].Count
+	}
+	sort.SliceStable(affectedChunks, sortFunc)
+
+	return affectedChunks
+}
+
+// processChunk counts how many Results in 'uniqueResults' apply to Expectations
+// in a provided ChunkWithCounter that is provided via 'workQueue'. The function
+// will continue to pull work from 'workQueue' until it is closed and empty, at
+// which point the function will exit and signal to 'waitGroup' that it is
+// finished.
+//
+// Under the hood, actual processing is farmed out to additional goroutines to
+// better handle large amounts of results.
+func processChunk(
+	workQueue chan *ChunkWithCounter,
+	uniqueResults *result.List,
+	waitGroup *sync.WaitGroup) {
+
+	defer waitGroup.Done()
+
+	// Create a pool of workers to handle processing of subsets of results. Each
+	// worker handles every ith result and returns the number of those results
+	// that applied to an expectation within the given ChunkWithCounter.
+	numWorkers := int(len(*uniqueResults)/maxResultsPerWorker) + 1
+	subWorkQueues := []chan *ChunkWithCounter{}
+	subResultQueues := []chan int{}
+	for i := 0; i < numWorkers; i++ {
+		subWorkQueues = append(subWorkQueues, make(chan *ChunkWithCounter))
+		subResultQueues = append(subResultQueues, make(chan int))
+		go processChunkForResultSubset(
+			subWorkQueues[i],
+			subResultQueues[i],
+			uniqueResults,
+			i,
+			numWorkers)
+	}
+
+	for {
+		chunkWithCounter, queueOpen := <-workQueue
+		if !queueOpen {
+			for _, swq := range subWorkQueues {
+				close(swq)
+			}
+			return
+		}
+
+		for i := 0; i < numWorkers; i++ {
+			subWorkQueues[i] <- chunkWithCounter
+		}
+		for i := 0; i < numWorkers; i++ {
+			chunkWithCounter.Count += <-subResultQueues[i]
+		}
+	}
+}
+
+// processChunkForResultSubset counts how many Results in 'uniqueResults' apply
+// to Expectations in a provided ChunkWithCounter that is provided via
+// 'workQueue'. Only every 'numWorkers' element of 'uniqueResults' is processed,
+// starting at the 'workNumber' element. The count for each ChunkWithCounter is
+// returned via 'resultQueue' in the same order that the work was provided.
+//
+// The function will continue to pull work from 'workQueue' until it is closed
+// and empty.
+func processChunkForResultSubset(
+	workQueue chan *ChunkWithCounter,
+	resultQueue chan int,
+	uniqueResults *result.List,
+	workerNumber, numWorkers int) {
+
+	for {
+		chunkWithCounter, queueOpen := <-workQueue
+		if !queueOpen {
+			return
+		}
+
+		numApplicableResults := 0
+		for i := workerNumber; i < len(*uniqueResults); i += numWorkers {
+			result := (*uniqueResults)[i]
+			for _, expectation := range chunkWithCounter.Chunk.Expectations {
+				if expectation.AppliesToResult(result) {
+					numApplicableResults += 1
+					break
+				}
+			}
+		}
+
+		resultQueue <- numApplicableResults
+	}
+}
+
+func outputResults(
+	orderedChunks []ChunkWithCounter,
+	maxChunksToOutput int,
+	individualExpectations bool,
+	writer io.Writer) {
+
+	var expectationPrefix, chunkType string
+	if individualExpectations {
+		chunkType = "individual expectation"
+		expectationPrefix = "Expectation: "
+	} else {
+		chunkType = "chunk"
+		expectationPrefix = "First expectation: "
+	}
+
+	if maxChunksToOutput == 0 {
+		fmt.Fprintln(writer, "\nComplete output:")
+	} else {
+		fmt.Fprintf(
+			writer,
+			"\nTop %d %ss contributing to test coverage loss:\n",
+			maxChunksToOutput,
+			chunkType)
+	}
+
+	for i, chunkWithCounter := range orderedChunks {
+		if maxChunksToOutput != 0 && i == maxChunksToOutput {
+			break
+		}
+
+		chunk := chunkWithCounter.Chunk
+		firstExpectation := chunk.Expectations[0]
+		fmt.Fprintln(writer, "")
+		fmt.Fprintf(writer, "Comment: %s\n", strings.Join(chunk.Comments, "\n"))
+		fmt.Fprintf(writer, "%s%s\n", expectationPrefix, firstExpectation.AsExpectationFileString())
+		fmt.Fprintf(writer, "Line number: %d\n", firstExpectation.Line)
+		fmt.Fprintf(writer, "Affected %d test results\n", chunkWithCounter.Count)
+	}
+}
diff --git a/tools/src/cmd/cts/expectationcoverage/expectationcoverage_test.go b/tools/src/cmd/cts/expectationcoverage/expectationcoverage_test.go
new file mode 100644
index 0000000..e98f68a
--- /dev/null
+++ b/tools/src/cmd/cts/expectationcoverage/expectationcoverage_test.go
@@ -0,0 +1,1017 @@
+// Copyright 2025 The Dawn & Tint Authors
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+//    list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+//    this list of conditions and the following disclaimer in the documentation
+//    and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the copyright holder nor the names of its
+//    contributors may be used to endorse or promote products derived from
+//    this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package expectationcoverage
+
+import (
+	"bytes"
+	"context"
+	"fmt"
+	"reflect"
+	"testing"
+
+	"dawn.googlesource.com/dawn/tools/src/cmd/cts/common"
+	"dawn.googlesource.com/dawn/tools/src/cts/expectations"
+	"dawn.googlesource.com/dawn/tools/src/cts/query"
+	"dawn.googlesource.com/dawn/tools/src/cts/result"
+	"dawn.googlesource.com/dawn/tools/src/oswrapper"
+	"dawn.googlesource.com/dawn/tools/src/resultsdb"
+
+	"github.com/stretchr/testify/require"
+)
+
+/*******************************************************************************
+ * Run tests
+ ******************************************************************************/
+
+func createConfig(wrapper oswrapper.OSWrapper, client resultsdb.Querier) common.Config {
+	return common.Config{
+		Tests: []common.TestConfig{
+			{
+				ExecutionMode: "core",
+				Prefixes:      []string{"core_prefix"},
+			},
+			{
+				ExecutionMode: "compat",
+				Prefixes:      []string{"compat_prefix"},
+			},
+		},
+		OsWrapper: wrapper,
+		Querier:   client,
+	}
+}
+
+func createRunSampleQueryResults() resultsdb.PrefixGroupedQueryResults {
+	return resultsdb.PrefixGroupedQueryResults{
+		"core_prefix": []resultsdb.QueryResult{
+			{
+				TestId: "core_prefix_foo",
+				Status: "FAIL",
+				Tags: []resultsdb.TagPair{
+					{
+						Key:   "typ_tag",
+						Value: "linux",
+					},
+				},
+			},
+			{
+				TestId: "core_prefix_bar",
+				Status: "FAIL",
+				Tags: []resultsdb.TagPair{
+					{
+						Key:   "typ_tag",
+						Value: "mac",
+					},
+				},
+			},
+		},
+		"compat_prefix": []resultsdb.QueryResult{
+			{
+				TestId: "compat_prefix_bar",
+				Status: "FAIL",
+				Tags: []resultsdb.TagPair{
+					{
+						Key:   "typ_tag",
+						Value: "linux",
+					},
+				},
+			},
+			{
+				TestId: "compat_prefix_foo",
+				Status: "FAIL",
+				Tags: []resultsdb.TagPair{
+					{
+						Key:   "typ_tag",
+						Value: "win10",
+					},
+				},
+			},
+		},
+	}
+}
+
+func TestRun_GetTrimmedContentFailure(t *testing.T) {
+	wrapper := oswrapper.CreateMemMapOSWrapper()
+	client := resultsdb.MockBigQueryClient{}
+
+	expectationFileContent := `# BEGIN TAG HEADER
+# OS
+# tags: [ android linux mac win10 ]
+# END TAG HEADER
+
+crbug.com/0000 [ android ] foo
+`
+	wrapper.WriteFile(common.DefaultExpectationsPath(), []byte(expectationFileContent), 0o700)
+
+	ctx := context.Background()
+	cfg := createConfig(wrapper, client)
+	c := cmd{}
+	err := c.Run(ctx, cfg)
+	require.ErrorContains(t, err, "/expectations.txt:6:31 error: expected status")
+}
+
+func TestRun_GetResultsFailure(t *testing.T) {
+	wrapper := oswrapper.CreateMemMapOSWrapper()
+	client := resultsdb.MockBigQueryClient{}
+
+	expectationFileContent := getExpectationContentForTrimmedContentTest()
+	wrapper.WriteFile(common.DefaultExpectationsPath(), []byte(expectationFileContent), 0o700)
+
+	client.RecentUniqueSuppressedReturnValues = resultsdb.PrefixGroupedQueryResults{
+		"core_prefix": []resultsdb.QueryResult{
+			{
+				TestId: "invalid_prefix_test",
+				Status: "FAIL",
+			},
+		},
+	}
+
+	ctx := context.Background()
+	cfg := createConfig(wrapper, client)
+	c := cmd{}
+	err := c.Run(ctx, cfg)
+	require.ErrorContains(t, err,
+		"Test ID invalid_prefix_test did not start with core_prefix even though query should have filtered.")
+}
+
+func TestRun_SuccessCore(t *testing.T) {
+	wrapper := oswrapper.CreateMemMapOSWrapper()
+	client := resultsdb.MockBigQueryClient{}
+
+	expectationFileContent := getExpectationContentForTrimmedContentTest()
+	wrapper.WriteFile(common.DefaultExpectationsPath(), []byte(expectationFileContent), 0o700)
+
+	client.RecentUniqueSuppressedReturnValues = createRunSampleQueryResults()
+
+	ctx := context.Background()
+	cfg := createConfig(wrapper, client)
+	c := cmd{}
+	err := c.Run(ctx, cfg)
+	require.NoErrorf(t, err, "Got error: %v", err)
+}
+
+func TestRun_SuccessCompat(t *testing.T) {
+	wrapper := oswrapper.CreateMemMapOSWrapper()
+	client := resultsdb.MockBigQueryClient{}
+
+	expectationFileContent := getExpectationContentForTrimmedContentTest()
+	wrapper.WriteFile(common.DefaultCompatExpectationsPath(), []byte(expectationFileContent), 0o700)
+
+	client.RecentUniqueSuppressedReturnValues = createRunSampleQueryResults()
+
+	ctx := context.Background()
+	cfg := createConfig(wrapper, client)
+	c := cmd{}
+	c.flags.checkCompatExpectations = true
+	err := c.Run(ctx, cfg)
+	require.NoErrorf(t, err, "Got error: %v", err)
+}
+
+/*******************************************************************************
+ * getTrimmedContent tests
+ ******************************************************************************/
+
+func TestGetTrimmedContent_NonExistentFile(t *testing.T) {
+	wrapper := oswrapper.CreateMemMapOSWrapper()
+	content, err := getTrimmedContent(
+		"/expectations.txt",
+		/*individualExpectations=*/ false,
+		/*ignoreSkipExpectations=*/ false,
+		/*verbose=*/ false,
+		wrapper)
+
+	require.Equal(t, content, expectations.Content{})
+	require.ErrorContains(t, err, "open /expectations.txt: file does not exist")
+}
+
+func TestGetTrimmedContent_InvalidFile(t *testing.T) {
+	wrapper := oswrapper.CreateMemMapOSWrapper()
+	expectationFileContent := `# BEGIN TAG HEADER
+# OS
+# tags: [ android linux mac win10 ]
+# END TAG HEADER
+
+crbug.com/0000 [ android ] foo
+`
+	wrapper.WriteFile("/expectations.txt", []byte(expectationFileContent), 0o700)
+
+	content, err := getTrimmedContent(
+		"/expectations.txt",
+		/*individualExpectations=*/ false,
+		/*ignoreSkipExpectations=*/ false,
+		/*verbose=*/ false,
+		wrapper)
+
+	require.Equal(t, content, expectations.Content{})
+	require.ErrorContains(t, err, "/expectations.txt:6:31 error: expected status")
+}
+
+// Error from getPermanentSkipContent not tested since there is no way to
+// trigger an error there without triggering an error when parsing in
+// getTrimmedContent.
+
+func getExpectationContentForTrimmedContentTest() string {
+	return `# BEGIN TAG HEADER
+# OS
+# tags: [ android linux mac win10 ]
+# END TAG HEADER
+
+################################################################################
+# Permanent Skip Expectations
+################################################################################
+
+# Permanent skips
+crbug.com/0000 [ android ] foo [ Skip ]
+crbug.com/0000 [ android ] bar [ Skip ]
+
+################################################################################
+# Temporary Skip Expectations
+################################################################################
+
+# Temporary skips
+crbug.com/0000 [ linux ] foo [ Skip ]
+crbug.com/0000 [ linux ] bar [ Skip ]
+
+################################################################################
+# Triaged/Manually Added Failures
+################################################################################
+
+# Failures 1
+crbug.com/0000 [ mac ] foo [ Failure ]
+crbug.com/0000 [ mac ] bar [ RetryOnFailure ]
+
+# Failures 2
+# Second line
+crbug.com/0000 [ win10 ] foo [ Failure ]
+crbug.com/0000 [ win10 ] bar [ Failure ]
+`
+}
+
+func TestGetTrimmedContent_GroupedWithSkips(t *testing.T) {
+	wrapper := oswrapper.CreateMemMapOSWrapper()
+	expectationFileContent := getExpectationContentForTrimmedContentTest()
+	wrapper.WriteFile("/expectations.txt", []byte(expectationFileContent), 0o700)
+
+	content, err := getTrimmedContent(
+		"/expectations.txt",
+		/*individualExpectations=*/ false,
+		/*ignoreSkipExpectations=*/ false,
+		/*verbose=*/ false,
+		wrapper)
+
+	expectedChunks := []expectations.Chunk{
+		{
+			Comments: []string{"# Temporary skips"},
+			Expectations: expectations.Expectations{
+				{
+					Line:   19,
+					Bug:    "crbug.com/0000",
+					Tags:   result.NewTags("linux"),
+					Query:  "foo",
+					Status: []string{"Skip"},
+				},
+				{
+					Line:   20,
+					Bug:    "crbug.com/0000",
+					Tags:   result.NewTags("linux"),
+					Query:  "bar",
+					Status: []string{"Skip"},
+				},
+			},
+		},
+		{
+			Comments: []string{"# Failures 1"},
+			Expectations: expectations.Expectations{
+				{
+					Line:   27,
+					Bug:    "crbug.com/0000",
+					Tags:   result.NewTags("mac"),
+					Query:  "foo",
+					Status: []string{"Failure"},
+				},
+				{
+					Line:   28,
+					Bug:    "crbug.com/0000",
+					Tags:   result.NewTags("mac"),
+					Query:  "bar",
+					Status: []string{"RetryOnFailure"},
+				},
+			},
+		},
+		{
+			Comments: []string{"# Failures 2", "# Second line"},
+			Expectations: expectations.Expectations{
+				{
+					Line:   32,
+					Bug:    "crbug.com/0000",
+					Tags:   result.NewTags("win10"),
+					Query:  "foo",
+					Status: []string{"Failure"},
+				},
+				{
+					Line:   33,
+					Bug:    "crbug.com/0000",
+					Tags:   result.NewTags("win10"),
+					Query:  "bar",
+					Status: []string{"Failure"},
+				},
+			},
+		},
+	}
+
+	require.NoErrorf(t, err, "Got error getting trimmed content: %v", err)
+	require.Equal(t, expectedChunks, content.Chunks)
+}
+
+func TestGetTrimmedContent_GroupedWithoutSkips(t *testing.T) {
+	wrapper := oswrapper.CreateMemMapOSWrapper()
+	expectationFileContent := getExpectationContentForTrimmedContentTest()
+	wrapper.WriteFile("/expectations.txt", []byte(expectationFileContent), 0o700)
+
+	content, err := getTrimmedContent(
+		"/expectations.txt",
+		/*individualExpectations=*/ false,
+		/*ignoreSkipExpectations=*/ true,
+		/*verbose=*/ false,
+		wrapper)
+
+	expectedChunks := []expectations.Chunk{
+		{
+			Comments: []string{"# Failures 1"},
+			Expectations: expectations.Expectations{
+				{
+					Line:   27,
+					Bug:    "crbug.com/0000",
+					Tags:   result.NewTags("mac"),
+					Query:  "foo",
+					Status: []string{"Failure"},
+				},
+				{
+					Line:   28,
+					Bug:    "crbug.com/0000",
+					Tags:   result.NewTags("mac"),
+					Query:  "bar",
+					Status: []string{"RetryOnFailure"},
+				},
+			},
+		},
+		{
+			Comments: []string{"# Failures 2", "# Second line"},
+			Expectations: expectations.Expectations{
+				{
+					Line:   32,
+					Bug:    "crbug.com/0000",
+					Tags:   result.NewTags("win10"),
+					Query:  "foo",
+					Status: []string{"Failure"},
+				},
+				{
+					Line:   33,
+					Bug:    "crbug.com/0000",
+					Tags:   result.NewTags("win10"),
+					Query:  "bar",
+					Status: []string{"Failure"},
+				},
+			},
+		},
+	}
+
+	require.NoErrorf(t, err, "Got error getting trimmed content: %v", err)
+	require.Equal(t, expectedChunks, content.Chunks)
+}
+
+func TestGetTrimmedContent_IndividualWithSkips(t *testing.T) {
+	wrapper := oswrapper.CreateMemMapOSWrapper()
+	expectationFileContent := getExpectationContentForTrimmedContentTest()
+	wrapper.WriteFile("/expectations.txt", []byte(expectationFileContent), 0o700)
+
+	content, err := getTrimmedContent(
+		"/expectations.txt",
+		/*individualExpectations=*/ true,
+		/*ignoreSkipExpectations=*/ false,
+		/*verbose=*/ false,
+		wrapper)
+
+	expectedChunks := []expectations.Chunk{
+		{
+			Comments: []string{"# Temporary skips"},
+			Expectations: expectations.Expectations{
+				{
+					Line:   19,
+					Bug:    "crbug.com/0000",
+					Tags:   result.NewTags("linux"),
+					Query:  "foo",
+					Status: []string{"Skip"},
+				},
+			},
+		},
+		{
+			Comments: []string{"# Temporary skips"},
+			Expectations: expectations.Expectations{
+				{
+					Line:   20,
+					Bug:    "crbug.com/0000",
+					Tags:   result.NewTags("linux"),
+					Query:  "bar",
+					Status: []string{"Skip"},
+				},
+			},
+		},
+		{
+			Comments: []string{"# Failures 1"},
+			Expectations: expectations.Expectations{
+				{
+					Line:   27,
+					Bug:    "crbug.com/0000",
+					Tags:   result.NewTags("mac"),
+					Query:  "foo",
+					Status: []string{"Failure"},
+				},
+			},
+		},
+		{
+			Comments: []string{"# Failures 1"},
+			Expectations: expectations.Expectations{
+				{
+					Line:   28,
+					Bug:    "crbug.com/0000",
+					Tags:   result.NewTags("mac"),
+					Query:  "bar",
+					Status: []string{"RetryOnFailure"},
+				},
+			},
+		},
+		{
+			Comments: []string{"# Failures 2", "# Second line"},
+			Expectations: expectations.Expectations{
+				{
+					Line:   32,
+					Bug:    "crbug.com/0000",
+					Tags:   result.NewTags("win10"),
+					Query:  "foo",
+					Status: []string{"Failure"},
+				},
+			},
+		},
+		{
+			Comments: []string{"# Failures 2", "# Second line"},
+			Expectations: expectations.Expectations{
+				{
+					Line:   33,
+					Bug:    "crbug.com/0000",
+					Tags:   result.NewTags("win10"),
+					Query:  "bar",
+					Status: []string{"Failure"},
+				},
+			},
+		},
+	}
+
+	require.NoErrorf(t, err, "Got error getting trimmed content: %v", err)
+	require.Equal(t, expectedChunks, content.Chunks)
+}
+
+func TestGetTrimmedContent_IndividualWithoutSkips(t *testing.T) {
+	wrapper := oswrapper.CreateMemMapOSWrapper()
+	expectationFileContent := getExpectationContentForTrimmedContentTest()
+	wrapper.WriteFile("/expectations.txt", []byte(expectationFileContent), 0o700)
+
+	content, err := getTrimmedContent(
+		"/expectations.txt",
+		/*individualExpectations=*/ true,
+		/*ignoreSkipExpectations=*/ true,
+		/*verbose=*/ false,
+		wrapper)
+
+	expectedChunks := []expectations.Chunk{
+		{
+			Comments: []string{"# Failures 1"},
+			Expectations: expectations.Expectations{
+				{
+					Line:   27,
+					Bug:    "crbug.com/0000",
+					Tags:   result.NewTags("mac"),
+					Query:  "foo",
+					Status: []string{"Failure"},
+				},
+			},
+		},
+		{
+			Comments: []string{"# Failures 1"},
+			Expectations: expectations.Expectations{
+				{
+					Line:   28,
+					Bug:    "crbug.com/0000",
+					Tags:   result.NewTags("mac"),
+					Query:  "bar",
+					Status: []string{"RetryOnFailure"},
+				},
+			},
+		},
+		{
+			Comments: []string{"# Failures 2", "# Second line"},
+			Expectations: expectations.Expectations{
+				{
+					Line:   32,
+					Bug:    "crbug.com/0000",
+					Tags:   result.NewTags("win10"),
+					Query:  "foo",
+					Status: []string{"Failure"},
+				},
+			},
+		},
+		{
+			Comments: []string{"# Failures 2", "# Second line"},
+			Expectations: expectations.Expectations{
+				{
+					Line:   33,
+					Bug:    "crbug.com/0000",
+					Tags:   result.NewTags("win10"),
+					Query:  "bar",
+					Status: []string{"Failure"},
+				},
+			},
+		},
+	}
+
+	require.NoErrorf(t, err, "Got error getting trimmed content: %v", err)
+	require.Equal(t, expectedChunks, content.Chunks)
+}
+
+/*******************************************************************************
+ * getPermanentSkipContent tests
+ ******************************************************************************/
+
+func TestGetPermanentSkipContent(t *testing.T) {
+	tests := []struct {
+		name       string
+		content    string
+		want       []expectations.Chunk
+		wantErr    bool
+		wantErrMsg string
+	}{
+		{ /////////////////////////////////////////////////////////////////////////
+			name: "Success",
+			content: `# BEGIN TAG HEADER
+# OS
+# tags: [ android linux mac win10 ]
+# END TAG HEADER
+
+# Comment
+crbug.com/1234 [ android ] foo [ Skip ]
+
+################################################################################
+# Temporary Skip Expectations
+################################################################################
+
+crbug.com/2345 [ linux ] bar [ Skip ]
+`,
+			want: []expectations.Chunk{
+				expectations.Chunk{
+					Comments: []string{
+						"# Comment",
+					},
+					Expectations: []expectations.Expectation{
+						expectations.Expectation{
+							Line:  7,
+							Bug:   "crbug.com/1234",
+							Tags:  result.NewTags("android"),
+							Query: "foo",
+							Status: []string{
+								"Skip",
+							},
+						},
+					},
+				},
+			},
+			wantErr:    false,
+			wantErrMsg: "",
+		},
+		{ /////////////////////////////////////////////////////////////////////////
+			name: "No temporary skip header",
+			content: `# BEGIN TAG HEADER
+# OS
+# tags: [ android linux mac win10 ]
+# END TAG HEADER
+
+# Comment
+crbug.com/1234 [ android ] foo [ Skip ]
+
+crbug.com/2345 [ linux ] bar [ Skip ]
+`,
+			want:       nil,
+			wantErr:    false,
+			wantErrMsg: "",
+		},
+		{ /////////////////////////////////////////////////////////////////////////
+			name: "Parse error",
+			content: `# BEGIN TAG HEADER
+# OS
+# tags: [ android linux mac win10 ]
+# END TAG HEADER
+
+# Comment
+crbug.com/1234 [ android ] foo
+
+################################################################################
+# Temporary Skip Expectations
+################################################################################
+
+crbug.com/2345 [ linux ] bar [ Skip ]
+`,
+			want:       nil,
+			wantErr:    true,
+			wantErrMsg: "expectations.txt:7:31 error: expected status",
+		},
+	}
+
+	for _, testCase := range tests {
+		t.Run(testCase.name, func(t *testing.T) {
+			content, err := getPermanentSkipContent("expectations.txt", testCase.content)
+			if (err != nil) != testCase.wantErr {
+				t.Errorf("getPermanentSkipContent() error = %v, wantErr %v", err, testCase.wantErr)
+			}
+			if testCase.wantErr && err.Error() != testCase.wantErrMsg {
+				t.Errorf("getPermanentSkipContent() errorMsg = %v, wantErrMsg %v", err.Error(), testCase.wantErrMsg)
+				return
+			}
+			if !reflect.DeepEqual(content.Chunks, testCase.want) {
+				t.Errorf("getPermanentSkipContent() = %v, want %v", content.Chunks, testCase.want)
+				return
+			}
+		})
+	}
+}
+
+/*******************************************************************************
+ * getChunksOrderedByCoverageLoss tests
+ ******************************************************************************/
+
+func TestGetChunksOrderedByCoverageLoss_FewResults(t *testing.T) {
+	content := expectations.Content{
+		Chunks: []expectations.Chunk{
+			// Should not apply to anything.
+			{
+				Expectations: expectations.Expectations{
+					{
+						Query: "fake_test",
+						Tags:  result.NewTags("android"),
+					},
+					{
+						Query: "real_test",
+						Tags:  result.NewTags("fake_tag"),
+					},
+				},
+			},
+			// Should apply to everything due to the first expectation.
+			{
+				Expectations: expectations.Expectations{
+					{
+						Query: "*",
+						Tags:  result.NewTags(),
+					},
+					{
+						Query: "fake_test",
+						Tags:  result.NewTags("android"),
+					},
+				},
+			},
+			// Should apply to everything due to the second expectation.
+			{
+				Expectations: expectations.Expectations{
+					{
+						Query: "fake_test",
+						Tags:  result.NewTags("android"),
+					},
+					{
+						Query: "*",
+						Tags:  result.NewTags(),
+					},
+				},
+			},
+			// Should only apply to a single result.
+			{
+				Expectations: expectations.Expectations{
+					{
+						Query: "real_test_2",
+						Tags:  result.NewTags("android"),
+					},
+				},
+			},
+			// Should apply to all Android results, and thus be fairly high up.
+			{
+				Expectations: expectations.Expectations{
+					{
+						Query: "*",
+						Tags:  result.NewTags("android"),
+					},
+				},
+			},
+			// Should apply to all Linux results, and thus should be below the
+			// Android one.
+			{
+				Expectations: expectations.Expectations{
+					{
+						Query: "*",
+						Tags:  result.NewTags("linux"),
+					},
+				},
+			},
+			// Should also apply to all Linux results, but should not end up double
+			// counting.
+			{
+				Expectations: expectations.Expectations{
+					{
+						Query: "real_*",
+						Tags:  result.NewTags("linux"),
+					},
+					{
+						Query: "real_test_1",
+						Tags:  result.NewTags("linux"),
+					},
+					{
+						Query: "real_test_2",
+						Tags:  result.NewTags("linux"),
+					},
+				},
+			},
+		},
+	}
+
+	uniqueResults := result.List{
+		{
+			Query: query.Parse("real_test_1"),
+			Tags:  result.NewTags("android"),
+		},
+		{
+			Query: query.Parse("real_test_2"),
+			Tags:  result.NewTags("android"),
+		},
+		{
+			Query: query.Parse("real_test_3"),
+			Tags:  result.NewTags("android"),
+		},
+		{
+			Query: query.Parse("real_test_1"),
+			Tags:  result.NewTags("linux"),
+		},
+		{
+			Query: query.Parse("real_test_2"),
+			Tags:  result.NewTags("linux"),
+		},
+	}
+
+	expectedChunkCounts := []ChunkWithCounter{
+		{
+			Chunk: &content.Chunks[1],
+			Count: 5,
+		},
+		{
+			Chunk: &content.Chunks[2],
+			Count: 5,
+		},
+		{
+			Chunk: &content.Chunks[4],
+			Count: 3,
+		},
+		{
+			Chunk: &content.Chunks[5],
+			Count: 2,
+		},
+		{
+			Chunk: &content.Chunks[6],
+			Count: 2,
+		},
+		{
+			Chunk: &content.Chunks[3],
+			Count: 1,
+		},
+		{
+			Chunk: &content.Chunks[0],
+			Count: 0,
+		},
+	}
+
+	actualChunkCounts := getChunksOrderedByCoverageLoss(&content, &uniqueResults)
+	require.Equal(t, expectedChunkCounts, actualChunkCounts)
+}
+
+func TestGetChunksOrderedByCoverageLoss_ManyResults(t *testing.T) {
+	// This is primarily to exercise code when there are multiple subworkers,
+	// which requires a large number of results.
+	content := expectations.Content{
+		Chunks: []expectations.Chunk{
+			{
+				Expectations: expectations.Expectations{
+					{
+						Query: "*",
+						Tags:  result.NewTags("android"),
+					},
+				},
+			},
+			{
+				Expectations: expectations.Expectations{
+					{
+						Query: "*",
+						Tags:  result.NewTags("linux"),
+					},
+				},
+			},
+		},
+	}
+
+	uniqueResults := result.List{}
+	for i := 0; i < maxResultsPerWorker*2; i++ {
+		r := result.Result{
+			Query: query.Parse(fmt.Sprintf("real_test_%d", i)),
+			Tags:  result.NewTags("android"),
+		}
+		uniqueResults = append(uniqueResults, r)
+
+		if i%2 == 0 {
+			r = result.Result{
+				Query: query.Parse(fmt.Sprintf("real_test_%d", i)),
+				Tags:  result.NewTags("linux"),
+			}
+			uniqueResults = append(uniqueResults, r)
+		}
+	}
+
+	expectedChunkCounts := []ChunkWithCounter{
+		{
+			Chunk: &content.Chunks[0],
+			Count: maxResultsPerWorker * 2,
+		},
+		{
+			Chunk: &content.Chunks[1],
+			Count: maxResultsPerWorker,
+		},
+	}
+
+	actualChunkCounts := getChunksOrderedByCoverageLoss(&content, &uniqueResults)
+	require.Equal(t, expectedChunkCounts, actualChunkCounts)
+}
+
+/*******************************************************************************
+ * outputResults tests
+ ******************************************************************************/
+
+func getOutputResultsOrderedChunks() []ChunkWithCounter {
+	chunks := []expectations.Chunk{
+		{
+			Comments: []string{"# Linux"},
+			Expectations: expectations.Expectations{
+				{
+					Line:    5,
+					Bug:     "crbug.com/1234",
+					Tags:    result.NewTags("linux"),
+					Query:   "real_test_1",
+					Status:  []string{"Failure"},
+					Comment: "# trailing comment",
+				},
+				{
+					Line:   6,
+					Bug:    "crbug.com/2345",
+					Tags:   result.NewTags("linux"),
+					Query:  "real_test_2",
+					Status: []string{"Failure"},
+				},
+			},
+		},
+		{
+			Comments: []string{"# Android", "# Second comment line"},
+			Expectations: expectations.Expectations{
+				{
+					Line:   10,
+					Bug:    "crbug.com/3456",
+					Tags:   result.NewTags("android"),
+					Query:  "real_test_*",
+					Status: []string{"Failure"},
+				},
+			},
+		},
+	}
+
+	return []ChunkWithCounter{
+		{
+			Chunk: &chunks[1],
+			Count: 10,
+		},
+		{
+			Chunk: &chunks[0],
+			Count: 5,
+		},
+	}
+}
+
+func TestOutputResults(t *testing.T) {
+	tests := []struct {
+		name                   string
+		maxChunksToOutput      int
+		individualExpectations bool
+		expectedOutput         string
+	}{
+		{
+			name:                   "Output all as chunks",
+			maxChunksToOutput:      0,
+			individualExpectations: false,
+			expectedOutput: `
+Complete output:
+
+Comment: # Android
+# Second comment line
+First expectation: crbug.com/3456 [ android ] real_test_* [ Failure ]
+Line number: 10
+Affected 10 test results
+
+Comment: # Linux
+First expectation: crbug.com/1234 [ linux ] real_test_1 [ Failure ] # trailing comment
+Line number: 5
+Affected 5 test results
+`,
+		},
+		{
+			name:                   "Output all as individual expectations",
+			maxChunksToOutput:      0,
+			individualExpectations: true,
+			expectedOutput: `
+Complete output:
+
+Comment: # Android
+# Second comment line
+Expectation: crbug.com/3456 [ android ] real_test_* [ Failure ]
+Line number: 10
+Affected 10 test results
+
+Comment: # Linux
+Expectation: crbug.com/1234 [ linux ] real_test_1 [ Failure ] # trailing comment
+Line number: 5
+Affected 5 test results
+`,
+		},
+		{
+			name:                   "Output one as chunk",
+			maxChunksToOutput:      1,
+			individualExpectations: false,
+			expectedOutput: `
+Top 1 chunks contributing to test coverage loss:
+
+Comment: # Android
+# Second comment line
+First expectation: crbug.com/3456 [ android ] real_test_* [ Failure ]
+Line number: 10
+Affected 10 test results
+`,
+		},
+		{
+			name:                   "Output one as individual expectations",
+			maxChunksToOutput:      1,
+			individualExpectations: true,
+			expectedOutput: `
+Top 1 individual expectations contributing to test coverage loss:
+
+Comment: # Android
+# Second comment line
+Expectation: crbug.com/3456 [ android ] real_test_* [ Failure ]
+Line number: 10
+Affected 10 test results
+`,
+		},
+	}
+
+	for _, testCase := range tests {
+		t.Run(testCase.name, func(t *testing.T) {
+			buffer := bytes.Buffer{}
+			outputResults(
+				getOutputResultsOrderedChunks(),
+				testCase.maxChunksToOutput,
+				testCase.individualExpectations,
+				&buffer)
+			require.Equal(t, testCase.expectedOutput, buffer.String())
+		})
+	}
+}
diff --git a/tools/src/cmd/cts/main.go b/tools/src/cmd/cts/main.go
index 8b4f01c..234ba91 100644
--- a/tools/src/cmd/cts/main.go
+++ b/tools/src/cmd/cts/main.go
@@ -38,9 +38,12 @@
 
 	"dawn.googlesource.com/dawn/tools/src/cmd/cts/common"
 	"dawn.googlesource.com/dawn/tools/src/fileutils"
+	"dawn.googlesource.com/dawn/tools/src/oswrapper"
+	"dawn.googlesource.com/dawn/tools/src/resultsdb"
 	"dawn.googlesource.com/dawn/tools/src/subcmd"
 
 	// Register sub-commands
+	_ "dawn.googlesource.com/dawn/tools/src/cmd/cts/expectationcoverage"
 	_ "dawn.googlesource.com/dawn/tools/src/cmd/cts/export"
 	_ "dawn.googlesource.com/dawn/tools/src/cmd/cts/format"
 	_ "dawn.googlesource.com/dawn/tools/src/cmd/cts/merge"
@@ -62,6 +65,13 @@
 		os.Exit(1)
 	}
 
+	cfg.OsWrapper = oswrapper.GetRealOSWrapper()
+	cfg.Querier, err = resultsdb.NewBigQueryClient(ctx, resultsdb.DefaultQueryProject)
+	if err != nil {
+		fmt.Fprintln(os.Stderr, err)
+		os.Exit(1)
+	}
+
 	if err := subcmd.Run(ctx, *cfg, common.Commands()...); err != nil {
 		if err != subcmd.ErrInvalidCLA {
 			fmt.Fprintln(os.Stderr, err)
diff --git a/tools/src/cts/expectations/expectations.go b/tools/src/cts/expectations/expectations.go
index acf5aa2..daba32c 100644
--- a/tools/src/cts/expectations/expectations.go
+++ b/tools/src/cts/expectations/expectations.go
@@ -35,6 +35,7 @@
 	"fmt"
 	"io"
 	"os"
+	"reflect"
 	"sort"
 	"strings"
 
@@ -160,6 +161,39 @@
 	return Chunk{comments, expectations}
 }
 
+func (c Chunk) ContainedWithinList(chunkList *[]Chunk) bool {
+	for _, otherChunk := range *chunkList {
+		if reflect.DeepEqual(c, otherChunk) {
+			return true
+		}
+	}
+	return false
+}
+
+// AppliesToResult returns whether the Expectation applies to the test + config
+// represented by the Result.
+func (e Expectation) AppliesToResult(r result.Result) bool {
+	// Tags apply as long as the Expectation's tags are a subset of the Result's
+	// tags.
+	tagsApply := r.Tags.ContainsAll(e.Tags)
+
+	// The query applies if it's an exact match or the Expectation has a wildcard
+	// and the Result's test name starts with the Expectation's test name.
+	var queryApplies bool
+	if strings.HasSuffix(e.Query, "*") && !strings.HasSuffix(e.Query, "\\*") {
+		// The expectation file format currently guarantees that wildcards are only
+		// ever at the end. If more generic wildcards are added in for WebGPU usage,
+		// this will need to be changed to a proper fnmatch check.
+		queryApplies = strings.HasPrefix(
+			r.Query.ExpectationFileString(),
+			e.Query[:len(e.Query)-1])
+	} else {
+		queryApplies = e.Query == r.Query.ExpectationFileString()
+	}
+
+	return tagsApply && queryApplies
+}
+
 // AsExpectationFileString returns the human-readable form of the expectation
 // that matches the syntax of the expectation files.
 func (e Expectation) AsExpectationFileString() string {
diff --git a/tools/src/cts/expectations/expectations_test.go b/tools/src/cts/expectations/expectations_test.go
index e49f974..ff50576 100644
--- a/tools/src/cts/expectations/expectations_test.go
+++ b/tools/src/cts/expectations/expectations_test.go
@@ -30,10 +30,11 @@
 import (
 	"testing"
 
+	"dawn.googlesource.com/dawn/tools/src/cts/query"
 	"dawn.googlesource.com/dawn/tools/src/cts/result"
 
 	"github.com/google/go-cmp/cmp"
-	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
 )
 
 // Tests behavior of Content.Format()
@@ -136,7 +137,7 @@
 		Status:  []string{"Failure", "Slow"},
 		Comment: "# comment",
 	}
-	assert.Equal(t, e.AsExpectationFileString(), "crbug.com/1234 [ linux nvidia ] query [ Failure Slow ] # comment")
+	require.Equal(t, e.AsExpectationFileString(), "crbug.com/1234 [ linux nvidia ] query [ Failure Slow ] # comment")
 
 	// No bug.
 	e = Expectation{
@@ -145,7 +146,7 @@
 		Status:  []string{"Failure", "Slow"},
 		Comment: "# comment",
 	}
-	assert.Equal(t, e.AsExpectationFileString(), "[ linux nvidia ] query [ Failure Slow ] # comment")
+	require.Equal(t, e.AsExpectationFileString(), "[ linux nvidia ] query [ Failure Slow ] # comment")
 
 	// No tags.
 	e = Expectation{
@@ -155,7 +156,7 @@
 		Status:  []string{"Failure", "Slow"},
 		Comment: "# comment",
 	}
-	assert.Equal(t, e.AsExpectationFileString(), "crbug.com/1234 query [ Failure Slow ] # comment")
+	require.Equal(t, e.AsExpectationFileString(), "crbug.com/1234 query [ Failure Slow ] # comment")
 
 	// No comment.
 	e = Expectation{
@@ -164,14 +165,14 @@
 		Query:  "query",
 		Status: []string{"Failure", "Slow"},
 	}
-	assert.Equal(t, e.AsExpectationFileString(), "crbug.com/1234 [ linux nvidia ] query [ Failure Slow ]")
+	require.Equal(t, e.AsExpectationFileString(), "crbug.com/1234 [ linux nvidia ] query [ Failure Slow ]")
 
 	// Minimal expectation.
 	e = Expectation{
 		Query:  "query",
 		Status: []string{"Failure", "Slow"},
 	}
-	assert.Equal(t, e.AsExpectationFileString(), "query [ Failure Slow ]")
+	require.Equal(t, e.AsExpectationFileString(), "query [ Failure Slow ]")
 }
 
 func TestSort(t *testing.T) {
@@ -255,7 +256,7 @@
 		secondLinuxTwo,
 	}
 
-	assert.Equal(t, expectationsList, expectedList)
+	require.Equal(t, expectationsList, expectedList)
 }
 
 func TestSortPrioritizeQuery(t *testing.T) {
@@ -339,5 +340,488 @@
 		secondLinuxTwo,
 	}
 
-	assert.Equal(t, expectationsList, expectedList)
+	require.Equal(t, expectationsList, expectedList)
+}
+
+func TestChunk_ContainedWithinList(t *testing.T) {
+	tests := []struct {
+		name      string
+		chunk     Chunk
+		chunkList []Chunk
+		want      bool
+	}{
+		{ /////////////////////////////////////////////////////////////////////////
+			name:      "Empty list",
+			chunk:     Chunk{},
+			chunkList: []Chunk{},
+			want:      false,
+		},
+		{ /////////////////////////////////////////////////////////////////////////
+			name:  "Empty chunk",
+			chunk: Chunk{},
+			chunkList: []Chunk{
+				Chunk{},
+			},
+			want: true,
+		},
+		{ /////////////////////////////////////////////////////////////////////////
+			name: "Complex chunk success",
+			chunk: Chunk{
+				Comments: []string{"c1", "c2"},
+				Expectations: []Expectation{
+					{
+						Line:    2,
+						Bug:     "bug",
+						Tags:    result.NewTags("t1", "t2"),
+						Query:   "q",
+						Status:  []string{"status"},
+						Comment: "comment",
+					},
+				},
+			},
+			chunkList: []Chunk{
+				Chunk{
+					Comments: []string{"c1", "c2"},
+					Expectations: []Expectation{
+						{
+							Line:    2,
+							Bug:     "bug",
+							Tags:    result.NewTags("t1", "t2"),
+							Query:   "q",
+							Status:  []string{"status"},
+							Comment: "comment",
+						},
+					},
+				},
+			},
+			want: true,
+		},
+		{ /////////////////////////////////////////////////////////////////////////
+			name: "Complex chunk success multiple in list",
+			chunk: Chunk{
+				Comments: []string{"c1", "c2"},
+				Expectations: []Expectation{
+					{
+						Line:    2,
+						Bug:     "bug",
+						Tags:    result.NewTags("t1", "t2"),
+						Query:   "q",
+						Status:  []string{"status"},
+						Comment: "comment",
+					},
+				},
+			},
+			chunkList: []Chunk{
+				Chunk{
+					Comments: []string{"c3"},
+					Expectations: []Expectation{
+						{
+							Line:    2,
+							Bug:     "bug",
+							Tags:    result.NewTags("t1", "t2"),
+							Query:   "q",
+							Status:  []string{"status"},
+							Comment: "comment",
+						},
+					},
+				},
+				Chunk{
+					Comments: []string{"c1", "c2"},
+					Expectations: []Expectation{
+						{
+							Line:    2,
+							Bug:     "bug",
+							Tags:    result.NewTags("t1", "t2"),
+							Query:   "q",
+							Status:  []string{"status"},
+							Comment: "comment",
+						},
+					},
+				},
+			},
+			want: true,
+		},
+		{ /////////////////////////////////////////////////////////////////////////
+			name: "Complex chunk comments differ",
+			chunk: Chunk{
+				Comments: []string{"c1", "c2"},
+				Expectations: []Expectation{
+					{
+						Line:    2,
+						Bug:     "bug",
+						Tags:    result.NewTags("t1", "t2"),
+						Query:   "q",
+						Status:  []string{"status"},
+						Comment: "comment",
+					},
+				},
+			},
+			chunkList: []Chunk{
+				Chunk{
+					Comments: []string{"c2"},
+					Expectations: []Expectation{
+						{
+							Line:    2,
+							Bug:     "bug",
+							Tags:    result.NewTags("t1", "t2"),
+							Query:   "q",
+							Status:  []string{"status"},
+							Comment: "comment",
+						},
+					},
+				},
+			},
+			want: false,
+		},
+		{ /////////////////////////////////////////////////////////////////////////
+			name: "Complex chunk success expectations differ",
+			chunk: Chunk{
+				Comments: []string{"c1", "c2"},
+				Expectations: []Expectation{
+					{
+						Line:    2,
+						Bug:     "bug",
+						Tags:    result.NewTags("t1", "t2"),
+						Query:   "q",
+						Status:  []string{"status"},
+						Comment: "comment",
+					},
+				},
+			},
+			chunkList: []Chunk{
+				Chunk{
+					Comments: []string{"c1", "c2"},
+					Expectations: []Expectation{
+						{
+							Line:    2,
+							Bug:     "bug",
+							Tags:    result.NewTags("t1", "t2"),
+							Query:   "q",
+							Status:  []string{"status"},
+							Comment: "comment",
+						},
+						{
+							Line:    3,
+							Bug:     "bug",
+							Tags:    result.NewTags("t1", "t2"),
+							Query:   "q",
+							Status:  []string{"status"},
+							Comment: "comment",
+						},
+					},
+				},
+			},
+			want: false,
+		},
+		{ /////////////////////////////////////////////////////////////////////////
+			name: "Complex chunk expectation line differs",
+			chunk: Chunk{
+				Comments: []string{"c1", "c2"},
+				Expectations: []Expectation{
+					{
+						Line:    2,
+						Bug:     "bug",
+						Tags:    result.NewTags("t1", "t2"),
+						Query:   "q",
+						Status:  []string{"status"},
+						Comment: "comment",
+					},
+				},
+			},
+			chunkList: []Chunk{
+				Chunk{
+					Comments: []string{"c1", "c2"},
+					Expectations: []Expectation{
+						{
+							Line:    3,
+							Bug:     "bug",
+							Tags:    result.NewTags("t1", "t2"),
+							Query:   "q",
+							Status:  []string{"status"},
+							Comment: "comment",
+						},
+					},
+				},
+			},
+			want: false,
+		},
+		{ /////////////////////////////////////////////////////////////////////////
+			name: "Complex chunk expectation bug differs",
+			chunk: Chunk{
+				Comments: []string{"c1", "c2"},
+				Expectations: []Expectation{
+					{
+						Line:    2,
+						Bug:     "bug",
+						Tags:    result.NewTags("t1", "t2"),
+						Query:   "q",
+						Status:  []string{"status"},
+						Comment: "comment",
+					},
+				},
+			},
+			chunkList: []Chunk{
+				Chunk{
+					Comments: []string{"c1", "c2"},
+					Expectations: []Expectation{
+						{
+							Line:    2,
+							Bug:     "bug2",
+							Tags:    result.NewTags("t1", "t2"),
+							Query:   "q",
+							Status:  []string{"status"},
+							Comment: "comment",
+						},
+					},
+				},
+			},
+			want: false,
+		},
+		{ /////////////////////////////////////////////////////////////////////////
+			name: "Complex chunk expectation tags differ",
+			chunk: Chunk{
+				Comments: []string{"c1", "c2"},
+				Expectations: []Expectation{
+					{
+						Line:    2,
+						Bug:     "bug",
+						Tags:    result.NewTags("t1", "t2"),
+						Query:   "q",
+						Status:  []string{"status"},
+						Comment: "comment",
+					},
+				},
+			},
+			chunkList: []Chunk{
+				Chunk{
+					Comments: []string{"c1", "c2"},
+					Expectations: []Expectation{
+						{
+							Line:    2,
+							Bug:     "bug",
+							Tags:    result.NewTags("t1", "t2", "t3"),
+							Query:   "q",
+							Status:  []string{"status"},
+							Comment: "comment",
+						},
+					},
+				},
+			},
+			want: false,
+		},
+		{ /////////////////////////////////////////////////////////////////////////
+			name: "Complex chunk expectation query differs",
+			chunk: Chunk{
+				Comments: []string{"c1", "c2"},
+				Expectations: []Expectation{
+					{
+						Line:    2,
+						Bug:     "bug",
+						Tags:    result.NewTags("t1", "t2"),
+						Query:   "q",
+						Status:  []string{"status"},
+						Comment: "comment",
+					},
+				},
+			},
+			chunkList: []Chunk{
+				Chunk{
+					Comments: []string{"c1", "c2"},
+					Expectations: []Expectation{
+						{
+							Line:    2,
+							Bug:     "bug",
+							Tags:    result.NewTags("t1", "t2"),
+							Query:   "q2",
+							Status:  []string{"status"},
+							Comment: "comment",
+						},
+					},
+				},
+			},
+			want: false,
+		},
+		{ /////////////////////////////////////////////////////////////////////////
+			name: "Complex chunk expectation status differs",
+			chunk: Chunk{
+				Comments: []string{"c1", "c2"},
+				Expectations: []Expectation{
+					{
+						Line:    2,
+						Bug:     "bug",
+						Tags:    result.NewTags("t1", "t2"),
+						Query:   "q",
+						Status:  []string{"status"},
+						Comment: "comment",
+					},
+				},
+			},
+			chunkList: []Chunk{
+				Chunk{
+					Comments: []string{"c1", "c2"},
+					Expectations: []Expectation{
+						{
+							Line:    2,
+							Bug:     "bug",
+							Tags:    result.NewTags("t1", "t2"),
+							Query:   "q",
+							Status:  []string{"status2"},
+							Comment: "comment",
+						},
+					},
+				},
+			},
+			want: false,
+		},
+		{ /////////////////////////////////////////////////////////////////////////
+			name: "Complex chunk expectation comment differs",
+			chunk: Chunk{
+				Comments: []string{"c1", "c2"},
+				Expectations: []Expectation{
+					{
+						Line:    2,
+						Bug:     "bug",
+						Tags:    result.NewTags("t1", "t2"),
+						Query:   "q",
+						Status:  []string{"status"},
+						Comment: "comment",
+					},
+				},
+			},
+			chunkList: []Chunk{
+				Chunk{
+					Comments: []string{"c1", "c2"},
+					Expectations: []Expectation{
+						{
+							Line:    2,
+							Bug:     "bug",
+							Tags:    result.NewTags("t1", "t2"),
+							Query:   "q",
+							Status:  []string{"status"},
+							Comment: "comment2",
+						},
+					},
+				},
+			},
+			want: false,
+		},
+	}
+	for _, testCase := range tests {
+		t.Run(testCase.name, func(t *testing.T) {
+			got := testCase.chunk.ContainedWithinList(&testCase.chunkList)
+			require.Equal(t, testCase.want, got)
+		})
+	}
+}
+
+func TestAppliesToResult(t *testing.T) {
+	tests := []struct {
+		name        string
+		e           Expectation
+		r           result.Result
+		shouldMatch bool
+	}{
+		{ /////////////////////////////////////////////////////////////////////////
+			name: "Exact match",
+			e: Expectation{
+				Query: "foo",
+				Tags:  result.NewTags("android"),
+			},
+			r: result.Result{
+				Query: query.Parse("foo"),
+				Tags:  result.NewTags("android", "release"),
+			},
+			shouldMatch: true,
+		},
+		{ /////////////////////////////////////////////////////////////////////////
+			name: "Wildcard match",
+			e: Expectation{
+				Query: "foo*",
+				Tags:  result.NewTags("android"),
+			},
+			r: result.Result{
+				Query: query.Parse("foobar"),
+				Tags:  result.NewTags("android", "release"),
+			},
+			shouldMatch: true,
+		},
+		{ /////////////////////////////////////////////////////////////////////////
+			name: "Wildcard match everything",
+			e: Expectation{
+				Query: "*",
+				Tags:  result.NewTags("android"),
+			},
+			r: result.Result{
+				Query: query.Parse("foobar"),
+				Tags:  result.NewTags("android", "release"),
+			},
+			shouldMatch: true,
+		},
+		{ /////////////////////////////////////////////////////////////////////////
+			name: "Name mismatch",
+			e: Expectation{
+				Query: "foo",
+				Tags:  result.NewTags("android"),
+			},
+			r: result.Result{
+				Query: query.Parse("bar"),
+				Tags:  result.NewTags("android", "release"),
+			},
+			shouldMatch: false,
+		},
+		{ /////////////////////////////////////////////////////////////////////////
+			name: "Wildcard mismatch",
+			e: Expectation{
+				Query: "foo*",
+				Tags:  result.NewTags("android"),
+			},
+			r: result.Result{
+				Query: query.Parse("bar"),
+				Tags:  result.NewTags("android", "release"),
+			},
+			shouldMatch: false,
+		},
+		{ /////////////////////////////////////////////////////////////////////////
+			name: "Escaped wildcard mismatch",
+			e: Expectation{
+				Query: "foo\\*",
+				Tags:  result.NewTags("android"),
+			},
+			r: result.Result{
+				Query: query.Parse("foobar"),
+				Tags:  result.NewTags("android", "release"),
+			},
+			shouldMatch: false,
+		},
+		{ /////////////////////////////////////////////////////////////////////////
+			name: "Tag mismatch",
+			e: Expectation{
+				Query: "foo",
+				Tags:  result.NewTags("android"),
+			},
+			r: result.Result{
+				Query: query.Parse("foo"),
+				Tags:  result.NewTags("linux", "release"),
+			},
+			shouldMatch: false,
+		},
+		{ /////////////////////////////////////////////////////////////////////////
+			name: "Tag not subset",
+			e: Expectation{
+				Query: "foo",
+				Tags:  result.NewTags("android", "release"),
+			},
+			r: result.Result{
+				Query: query.Parse("foo"),
+				Tags:  result.NewTags("linux", "release"),
+			},
+			shouldMatch: false,
+		},
+	}
+
+	for _, testCase := range tests {
+		t.Run(testCase.name, func(t *testing.T) {
+			require.Equal(t, testCase.shouldMatch, testCase.e.AppliesToResult(testCase.r))
+		})
+	}
 }
diff --git a/tools/src/cts/result/result.go b/tools/src/cts/result/result.go
index a3a9c7d..3877a26 100644
--- a/tools/src/cts/result/result.go
+++ b/tools/src/cts/result/result.go
@@ -41,6 +41,7 @@
 
 	"dawn.googlesource.com/dawn/tools/src/container"
 	"dawn.googlesource.com/dawn/tools/src/cts/query"
+	"dawn.googlesource.com/dawn/tools/src/oswrapper"
 )
 
 // Result holds the result of a CTS test
@@ -352,6 +353,27 @@
 	return results, nil
 }
 
+// Identical to Load, but using the provided fsReader instead of directly using
+// os.
+// TODO(crbug.com/344014313): Merge this with Load once all uses have switched
+// to the wrapper version.
+func LoadWithWrapper(
+	path string,
+	fsReader oswrapper.FilesystemReader) (ResultsByExecutionMode, error) {
+
+	file, err := fsReader.Open(path)
+	if err != nil {
+		return nil, err
+	}
+	defer file.Close()
+
+	results, err := Read(file)
+	if err != nil {
+		return nil, fmt.Errorf("while reading '%v': %w", path, err)
+	}
+	return results, nil
+}
+
 // Save saves the result list to the file with the given path
 func Save(path string, results ResultsByExecutionMode) error {
 	dir := filepath.Dir(path)
@@ -366,6 +388,27 @@
 	return Write(file, results)
 }
 
+// Identical to Save, but using the provided fsWriter instead of directly using
+// os.
+// TODO(crbug.com/344014313): Merge this with Save once all uses have switched
+// to the wrapper version.
+func SaveWithWrapper(
+	path string,
+	results ResultsByExecutionMode,
+	fsWriter oswrapper.FilesystemWriter) error {
+
+	dir := filepath.Dir(path)
+	if err := fsWriter.MkdirAll(dir, 0777); err != nil {
+		return err
+	}
+	file, err := fsWriter.Create(path)
+	if err != nil {
+		return err
+	}
+	defer file.Close()
+	return Write(file, results)
+}
+
 // Read reads a result list from the given reader
 func Read(r io.Reader) (ResultsByExecutionMode, error) {
 	scanner := bufio.NewScanner(r)
diff --git a/tools/src/cts/result/result_test.go b/tools/src/cts/result/result_test.go
index 456348f..dec17c4 100644
--- a/tools/src/cts/result/result_test.go
+++ b/tools/src/cts/result/result_test.go
@@ -29,6 +29,7 @@
 
 import (
 	"bytes"
+	"path/filepath"
 	"testing"
 	"time"
 
@@ -36,7 +37,9 @@
 	"dawn.googlesource.com/dawn/tools/src/cts/query"
 	"dawn.googlesource.com/dawn/tools/src/cts/result"
 	"dawn.googlesource.com/dawn/tools/src/fileutils"
+	"dawn.googlesource.com/dawn/tools/src/oswrapper"
 	"github.com/google/go-cmp/cmp"
+	"github.com/stretchr/testify/require"
 )
 
 var Q = query.Parse
@@ -788,6 +791,33 @@
 	}
 }
 
+func TestSaveLoad(t *testing.T) {
+	wrapper := oswrapper.CreateMemMapOSWrapper()
+
+	in := result.ResultsByExecutionMode{
+		"bar": result.List{
+			{Query: Q(`suite:a:*`), Tags: T(`x`), Status: result.Pass},
+			{Query: Q(`suite:b,*`), Tags: T(`y`), Status: result.Failure},
+			{Query: Q(`suite:a:b:*`), Tags: T(`x`, `y`), Status: result.Skip},
+			{Query: Q(`suite:a:c,*`), Tags: T(`y`, `x`), Status: result.Failure},
+			{Query: Q(`suite:a,b:c,*`), Tags: T(`y`, `x`), Status: result.Crash},
+			{Query: Q(`suite:a,b:c:*`), Status: result.Slow},
+		},
+		"foo": result.List{
+			{Query: Q(`suite:d:*`), Tags: T(`x`), Status: result.Pass},
+			{Query: Q(`suite:e,*`), Tags: T(`y`), Status: result.Failure},
+		},
+	}
+
+	saveLocation := filepath.Join(fileutils.ThisDir(), "cache.txt")
+	err := result.SaveWithWrapper(saveLocation, in, wrapper)
+	require.NoErrorf(t, err, "Error saving results: %v", err)
+
+	out, err := result.LoadWithWrapper(saveLocation, wrapper)
+	require.NoErrorf(t, err, "Error loading results: %v", err)
+	require.Equal(t, in, out)
+}
+
 func TestReadWrite(t *testing.T) {
 	in := result.ResultsByExecutionMode{
 		"bar": result.List{
diff --git a/tools/src/fileutils/paths.go b/tools/src/fileutils/paths.go
index 5070b56..5fb22a9 100644
--- a/tools/src/fileutils/paths.go
+++ b/tools/src/fileutils/paths.go
@@ -34,6 +34,8 @@
 	"path/filepath"
 	"runtime"
 	"strings"
+
+	"dawn.googlesource.com/dawn/tools/src/oswrapper"
 )
 
 // ThisLine returns the filepath and line number of the calling function
@@ -90,6 +92,19 @@
 	return path
 }
 
+// TODO(crbug.com/344014313): Merge this into ExpandHome once all uses are using
+// dependency injection.
+// ExpandHomeWithWrapper is a copy of ExpandHome that uses the provided
+// FilesystemReader instead of calling os directly.
+func ExpandHomeWithWrapper(path string, environProvider oswrapper.EnvironProvider) string {
+	if strings.ContainsRune(path, '~') {
+		if home, err := environProvider.UserHomeDir(); err == nil {
+			return strings.ReplaceAll(path, "~", home)
+		}
+	}
+	return path
+}
+
 // NodePath looks for the node binary, first in dawn's third_party directory,
 // falling back to PATH.
 func NodePath() string {
diff --git a/tools/src/fileutils/paths_test.go b/tools/src/fileutils/paths_test.go
index ec0f1d3..dc25fe0 100644
--- a/tools/src/fileutils/paths_test.go
+++ b/tools/src/fileutils/paths_test.go
@@ -33,12 +33,14 @@
 	"testing"
 
 	"dawn.googlesource.com/dawn/tools/src/fileutils"
+	"dawn.googlesource.com/dawn/tools/src/oswrapper"
 	"github.com/google/go-cmp/cmp"
+	"github.com/stretchr/testify/require"
 )
 
 func TestThisLine(t *testing.T) {
 	td := fileutils.ThisLine()
-	if !strings.HasSuffix(td, "paths_test.go:40") {
+	if !strings.HasSuffix(td, "paths_test.go:42") {
 		t.Errorf("TestThisLine() returned %v", td)
 	}
 }
@@ -63,6 +65,52 @@
 	}
 }
 
+func TestExpandHomeWithWrapper(t *testing.T) {
+	tests := []struct {
+		name  string
+		input string
+		want  string
+	}{
+		{
+			name:  "No substitution",
+			input: "/foo/bar",
+			want:  "/foo/bar",
+		},
+		{
+			name:  "Single substitution",
+			input: "~/foo",
+			want:  "/home/foo",
+		},
+		{
+			name:  "Multi substitution",
+			input: "~/foo/~",
+			want:  "/home/foo//home",
+		},
+		{
+			name:  "Trailing slash",
+			input: "~/foo/",
+			want:  "/home/foo/",
+		},
+		{
+			name:  "Only home",
+			input: "~",
+			want:  "/home",
+		},
+	}
+
+	for _, testCase := range tests {
+		t.Run(testCase.name, func(t *testing.T) {
+			wrapper := oswrapper.CreateMemMapOSWrapper()
+			wrapper.Environment = map[string]string{
+				"HOME": "/home",
+			}
+
+			expandedPath := fileutils.ExpandHomeWithWrapper(testCase.input, wrapper)
+			require.Equal(t, testCase.want, expandedPath)
+		})
+	}
+}
+
 func TestCommonRootDir(t *testing.T) {
 	for _, test := range []struct {
 		a, b   string
diff --git a/tools/src/resultsdb/mockquerier.go b/tools/src/resultsdb/mockquerier.go
new file mode 100644
index 0000000..47c3ccd
--- /dev/null
+++ b/tools/src/resultsdb/mockquerier.go
@@ -0,0 +1,75 @@
+// Copyright 2025 The Dawn & Tint Authors
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+//    list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+//    this list of conditions and the following disclaimer in the documentation
+//    and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the copyright holder nor the names of its
+//    contributors may be used to endorse or promote products derived from
+//    this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package resultsdb
+
+import (
+	"context"
+
+	"dawn.googlesource.com/dawn/tools/src/buildbucket"
+)
+
+type PrefixGroupedQueryResults map[string][]QueryResult
+
+// A fake version of dawn/tools/src/resultsdb's BigQueryClient.
+type MockBigQueryClient struct {
+	ReturnValues                       PrefixGroupedQueryResults
+	UnsuppressedFailureReturnValues    PrefixGroupedQueryResults
+	RecentUniqueSuppressedReturnValues PrefixGroupedQueryResults
+}
+
+func (bq MockBigQueryClient) QueryTestResults(
+	ctx context.Context, builds []buildbucket.BuildID, testPrefix string, f RowHandler) error {
+	for _, result := range bq.ReturnValues[testPrefix] {
+		if err := f(&result); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (bq MockBigQueryClient) QueryUnsuppressedFailingTestResults(
+	ctx context.Context, builds []buildbucket.BuildID, testPrefix string, f RowHandler) error {
+
+	for _, result := range bq.UnsuppressedFailureReturnValues[testPrefix] {
+		if err := f(&result); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (bq MockBigQueryClient) QueryRecentUniqueSuppressedTestResults(
+	ctx context.Context, testPrefix string, f RowHandler) error {
+
+	for _, result := range bq.RecentUniqueSuppressedReturnValues[testPrefix] {
+		if err := f(&result); err != nil {
+			return err
+		}
+	}
+	return nil
+}
diff --git a/tools/src/resultsdb/resultsdb.go b/tools/src/resultsdb/resultsdb.go
index 91c48a4..74ac031 100644
--- a/tools/src/resultsdb/resultsdb.go
+++ b/tools/src/resultsdb/resultsdb.go
@@ -44,6 +44,7 @@
 type Querier interface {
 	QueryTestResults(ctx context.Context, builds []buildbucket.BuildID, testPrefix string, f RowHandler) error
 	QueryUnsuppressedFailingTestResults(ctx context.Context, builds []buildbucket.BuildID, testPrefix string, f RowHandler) error
+	QueryRecentUniqueSuppressedTestResults(ctx context.Context, testPrefix string, f RowHandler) error
 }
 
 // BigQueryClient is a wrapper around bigquery.Client so that we can define new
@@ -159,6 +160,64 @@
 	return bq.runQueryForBuilds(ctx, baseQuery, builds, testPrefix, f)
 }
 
+// QueryRecentUniqueSuppressedTestResults fetches the test results for the given
+// 'testPrefix' that:
+//  1. Were produced within the last 6 hours
+//  2. Had some sort of test suppression in place, regardless of whether the
+//     test passed or not.
+//
+// Results are grouped by unique test ID and typ tags, with other information
+// removed.
+//
+// f is called once per result and is expected to handle any processing or
+// storage of results.
+func (bq BigQueryClient) QueryRecentUniqueSuppressedTestResults(
+	ctx context.Context,
+	testPrefix string,
+	f RowHandler) error {
+
+	baseQuery := `
+		WITH
+			recent_results AS (
+				SELECT
+					test_id AS testid,
+          ARRAY(
+            SELECT t
+            FROM tr.tags t
+            WHERE key = "typ_tag"
+          ) as tags,
+					ARRAY(
+						SELECT value
+						FROM tr.tags
+						WHERE key = "raw_typ_expectation") as typ_expectations
+				FROM ` + "`chrome-luci-data.chromium.gpu_ci_test_results`" + ` tr
+				WHERE
+					TIME(partition_time) > TIME_SUB(CURRENT_TIME(), INTERVAL 6 HOUR)
+					AND STARTS_WITH(tr.test_id, "%v")
+			)
+		SELECT
+		  *
+		EXCEPT
+		  (typ_expectations)
+		FROM
+		  recent_results
+		WHERE
+		  (
+		  	ARRAY_LENGTH(typ_expectations) = 1
+		  	AND typ_expectations[0] != "Pass"
+		  )
+		  OR
+		  (
+		  	ARRAY_LENGTH(typ_expectations) > 1
+		  )
+    GROUP BY testid, tags
+`
+
+	query := fmt.Sprintf(baseQuery, testPrefix)
+
+	return bq.runQuery(ctx, query, f)
+}
+
 // runQueryForBuilds is a helper function for running queries limited to a set
 // of builds and prefix. See callers of this function for additional information.
 func (bq BigQueryClient) runQueryForBuilds(
@@ -169,6 +228,12 @@
 	}
 	query := fmt.Sprintf(baseQuery, strings.Join(buildIds, ","), testPrefix)
 
+	return bq.runQuery(ctx, query, f)
+}
+
+// runQuery is a helper function to run the provided 'query' and call 'f' for
+// each resulting row.
+func (bq BigQueryClient) runQuery(ctx context.Context, query string, f RowHandler) error {
 	q := bq.client.Query(query)
 	iter, err := q.Read(ctx)
 	if err != nil {