[tools][cts] Substantial roller changes

Avoid collisions by processing 'KEEP' blocks before non-keep. The former cannot be changed by the tool, so if expectations are added before the KEEP block is seen, then we'd end up with a collision.

Simplify the expectation parser. Removes the concept of an 'empty' expectation chunk. This results in the expectation file with double-newlines being folded to a single newline, but simplifies the code a bunch.

Remove tag validation of `expectations.txt`. This had false-positives, as you simply cannot statically know whether two tag sets collide, as proper collision detection requires knowledge of the variants being run. We now just rely on typ's collision detection.

Remove emission of 'SLOW' tests. These are now in a separate expectation file, and will fail validation if emitted into `expectations.txt`.

Change the pattern matching of 'untriaged failures' to be more lenient. This is required to match the new style in the expectations file. Also wrap the comment in a horizontal `###` line.

More aggressively collapse nodes with partially failing children to reduce expectation spam.

Add a bunch more logging and add progress bars for the expensive operations.

Change-Id: Ie14cb89790ed32f0e3f3721be5b573113611edc0
Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/151921
Reviewed-by: Antonio Maiorano <amaiorano@google.com>
Auto-Submit: Ben Clayton <bclayton@google.com>
Kokoro: Kokoro <noreply+kokoro@google.com>
Commit-Queue: Ben Clayton <bclayton@google.com>
diff --git a/tools/src/cmd/cts/common/config.go b/tools/src/cmd/cts/common/config.go
index 1811c82..026e103 100644
--- a/tools/src/cmd/cts/common/config.go
+++ b/tools/src/cmd/cts/common/config.go
@@ -19,7 +19,6 @@
 	"encoding/json"
 	"fmt"
 	"io/ioutil"
-	"time"
 
 	"dawn.googlesource.com/dawn/tools/src/buildbucket"
 	"github.com/tidwall/jsonc"
@@ -33,8 +32,6 @@
 	Test struct {
 		// The ResultDB string prefix for CTS tests.
 		Prefixes []string
-		// The time threshold used to classify tests as slow.
-		SlowThreshold time.Duration
 	}
 	// Gerrit holds configuration for Dawn's Gerrit server.
 	Gerrit struct {
diff --git a/tools/src/cmd/cts/common/results.go b/tools/src/cmd/cts/common/results.go
index e4396bd..e85b020 100644
--- a/tools/src/cmd/cts/common/results.go
+++ b/tools/src/cmd/cts/common/results.go
@@ -126,7 +126,6 @@
 
 	// Obtain the patchset's results, kicking a build if there are no results
 	// already available.
-	log.Printf("fetching results from cl %v ps %v...", ps.Change, ps.Patchset)
 	builds, err := GetOrStartBuildsAndWait(ctx, cfg, *ps, bb, "", false)
 	if err != nil {
 		return nil, err
@@ -157,11 +156,13 @@
 		dir := fileutils.ExpandHome(cacheDir)
 		path := filepath.Join(dir, strconv.Itoa(ps.Change), fmt.Sprintf("ps-%v.txt", ps.Patchset))
 		if _, err := os.Stat(path); err == nil {
+			log.Printf("loading cached results from cl %v ps %v...", ps.Change, ps.Patchset)
 			return result.Load(path)
 		}
 		cachePath = path
 	}
 
+	log.Printf("fetching results from cl %v ps %v...", ps.Change, ps.Patchset)
 	results, err := GetResults(ctx, cfg, rdb, builds)
 	if err != nil {
 		return nil, err
@@ -241,10 +242,6 @@
 				}
 			}
 
-			if status == result.Pass && duration > cfg.Test.SlowThreshold {
-				status = result.Slow
-			}
-
 			results = append(results, result.Result{
 				Query:        query.Parse(testName),
 				Status:       status,
diff --git a/tools/src/cmd/cts/roll/roll.go b/tools/src/cmd/cts/roll/roll.go
index 0737dc9..a013873 100644
--- a/tools/src/cmd/cts/roll/roll.go
+++ b/tools/src/cmd/cts/roll/roll.go
@@ -77,7 +77,8 @@
 	rebuild             bool // Rebuild the expectations file from scratch
 	preserve            bool // If false, abandon past roll changes
 	sendToGardener      bool // If true, automatically send to the gardener for review
-	parentSwarmingRunId string
+	parentSwarmingRunID string
+	maxAttempts         int
 }
 
 type cmd struct {
@@ -105,7 +106,8 @@
 	flag.BoolVar(&c.flags.rebuild, "rebuild", false, "rebuild the expectation file from scratch")
 	flag.BoolVar(&c.flags.preserve, "preserve", false, "do not abandon existing rolls")
 	flag.BoolVar(&c.flags.sendToGardener, "send-to-gardener", false, "send the CL to the WebGPU gardener for review")
-	flag.StringVar(&c.flags.parentSwarmingRunId, "parent-swarming-run-id", "", "parent swarming run id. All triggered tasks will be children of this task and will be canceled if the parent is canceled.")
+	flag.StringVar(&c.flags.parentSwarmingRunID, "parent-swarming-run-id", "", "parent swarming run id. All triggered tasks will be children of this task and will be canceled if the parent is canceled.")
+	flag.IntVar(&c.flags.maxAttempts, "max-attempts", 3, "number of update attempts before giving up")
 	return nil, nil
 }
 
@@ -169,7 +171,7 @@
 		flags:               c.flags,
 		auth:                auth,
 		bb:                  bb,
-		parentSwarmingRunId: c.flags.parentSwarmingRunId,
+		parentSwarmingRunID: c.flags.parentSwarmingRunID,
 		rdb:                 rdb,
 		git:                 git,
 		gerrit:              gerrit,
@@ -185,7 +187,7 @@
 	flags               rollerFlags
 	auth                auth.Options
 	bb                  *buildbucket.Buildbucket
-	parentSwarmingRunId string
+	parentSwarmingRunID string
 	rdb                 *resultsdb.ResultsDB
 	git                 *git.Git
 	gerrit              *gerrit.Gerrit
@@ -237,16 +239,13 @@
 		return fmt.Errorf("failed to load expectations: %v", err)
 	}
 
-	// If the user requested a full rebuild of the expecations, strip out
+	// If the user requested a full rebuild of the expectations, strip out
 	// everything but comment chunks.
 	if r.flags.rebuild {
 		rebuilt := ex.Clone()
 		rebuilt.Chunks = rebuilt.Chunks[:0]
 		for _, c := range ex.Chunks {
-			switch {
-			case c.IsBlankLine():
-				rebuilt.MaybeAddBlankLine()
-			case c.IsCommentOnly():
+			if c.IsCommentOnly() {
 				rebuilt.Chunks = append(rebuilt.Chunks, c)
 			}
 		}
@@ -333,12 +332,11 @@
 	}
 
 	// Begin main roll loop
-	const maxAttempts = 3
 	results := result.List{}
 	for attempt := 0; ; attempt++ {
 		// Kick builds
 		log.Printf("building (attempt %v)...\n", attempt)
-		builds, err := common.GetOrStartBuildsAndWait(ctx, r.cfg, ps, r.bb, r.parentSwarmingRunId, false)
+		builds, err := common.GetOrStartBuildsAndWait(ctx, r.cfg, ps, r.bb, r.parentSwarmingRunID, false)
 		if err != nil {
 			return err
 		}
@@ -397,7 +395,7 @@
 			return fmt.Errorf("failed to update change '%v': %v", changeID, err)
 		}
 
-		if attempt >= maxAttempts {
+		if attempt >= r.flags.maxAttempts {
 			err := fmt.Errorf("CTS failed after %v attempts.\nGiving up", attempt)
 			r.gerrit.Comment(ps, err.Error(), nil)
 			return err
@@ -417,10 +415,10 @@
 			return err
 		}
 
-		type StructuredJsonResponse struct {
+		type StructuredJSONResponse struct {
 			Emails []string
 		}
-		var jsonRes StructuredJsonResponse
+		var jsonRes StructuredJSONResponse
 		if err := json.Unmarshal(jsonResponse, &jsonRes); err != nil {
 			return err
 		}
diff --git a/tools/src/cmd/cts/update/update.go b/tools/src/cmd/cts/update/update.go
index 65ba37a..73d7ac8 100644
--- a/tools/src/cmd/cts/update/update.go
+++ b/tools/src/cmd/cts/update/update.go
@@ -19,6 +19,7 @@
 	"flag"
 	"fmt"
 	"io/ioutil"
+	"log"
 	"os"
 	"strings"
 
@@ -79,31 +80,37 @@
 	}
 
 	// Fetch the results
+	log.Println("fetching results...")
 	results, err := c.flags.results.GetResults(ctx, cfg, auth)
 	if err != nil {
 		return err
 	}
 
 	// Merge to remove duplicates
+	log.Println("removing duplicate results...")
 	results = result.Merge(results)
 
 	// Load the expectations file
+	log.Println("loading expectations...")
 	ex, err := expectations.Load(c.flags.expectations)
 	if err != nil {
 		return err
 	}
 
+	log.Println("loading test list...")
 	testlist, err := loadTestList(common.DefaultTestListPath())
 	if err != nil {
 		return err
 	}
 
+	log.Println("validating...")
 	if diag := ex.Validate(); diag.NumErrors() > 0 {
 		diag.Print(os.Stdout, c.flags.expectations)
 		return fmt.Errorf("validation failed")
 	}
 
 	// Update the expectations file with the results
+	log.Println("updating expectations...")
 	diag, err := ex.Update(results, testlist)
 	if err != nil {
 		return err
diff --git a/tools/src/cts/expectations/expectations.go b/tools/src/cts/expectations/expectations.go
index 93edccf..a2e9796 100644
--- a/tools/src/cts/expectations/expectations.go
+++ b/tools/src/cts/expectations/expectations.go
@@ -21,7 +21,6 @@
 import (
 	"fmt"
 	"io"
-	"io/ioutil"
 	"os"
 	"sort"
 	"strings"
@@ -86,7 +85,7 @@
 
 // Load loads the expectation file at 'path', returning a Content.
 func Load(path string) (Content, error) {
-	content, err := ioutil.ReadFile(path)
+	content, err := os.ReadFile(path)
 	if err != nil {
 		return Content{}, err
 	}
@@ -122,27 +121,13 @@
 	return len(c.Chunks) == 0
 }
 
-// EndsInBlankLine returns true if the Content ends with a blank line
-func (c Content) EndsInBlankLine() bool {
-	return !c.Empty() && c.Chunks[len(c.Chunks)-1].IsBlankLine()
-}
-
-// MaybeAddBlankLine appends a new blank line to the content, if the content
-// does not already end in a blank line.
-func (c *Content) MaybeAddBlankLine() {
-	if !c.Empty() && !c.EndsInBlankLine() {
-		c.Chunks = append(c.Chunks, Chunk{})
-	}
-}
-
 // Write writes the Content, in textual form, to the writer w.
 func (c Content) Write(w io.Writer) error {
-	for _, chunk := range c.Chunks {
-		if len(chunk.Comments) == 0 && len(chunk.Expectations) == 0 {
+	for i, chunk := range c.Chunks {
+		if i > 0 {
 			if _, err := fmt.Fprintln(w); err != nil {
 				return err
 			}
-			continue
 		}
 		for _, comment := range chunk.Comments {
 			if _, err := fmt.Fprintln(w, comment); err != nil {
@@ -182,11 +167,6 @@
 	return len(c.Comments) > 0 && len(c.Expectations) == 0
 }
 
-// IsBlankLine returns true if the Chunk has no comments or expectations.
-func (c Chunk) IsBlankLine() bool {
-	return len(c.Comments) == 0 && len(c.Expectations) == 0
-}
-
 // Clone returns a deep-copy of the Chunk
 func (c Chunk) Clone() Chunk {
 	comments := make([]string, len(c.Comments))
diff --git a/tools/src/cts/expectations/parse.go b/tools/src/cts/expectations/parse.go
index 579b26d..78e59cc 100644
--- a/tools/src/cts/expectations/parse.go
+++ b/tools/src/cts/expectations/parse.go
@@ -73,7 +73,6 @@
 		if i > 0 {
 			switch {
 			case
-				lastLineType == blank && lineType != blank,             // blank -> !blank
 				lastLineType != blank && lineType == blank,             // !blank -> blank
 				lastLineType == expectation && lineType != expectation: // expectation -> comment
 				flush()
diff --git a/tools/src/cts/expectations/parse_test.go b/tools/src/cts/expectations/parse_test.go
index e206bcc..7bb97f9 100644
--- a/tools/src/cts/expectations/parse_test.go
+++ b/tools/src/cts/expectations/parse_test.go
@@ -60,7 +60,6 @@
 # a comment`,
 			expect: expectations.Content{
 				Chunks: []expectations.Chunk{
-					{},
 					{Comments: []string{`# a comment`}},
 				},
 			},
@@ -70,7 +69,6 @@
 			in:   "\r\n# a comment",
 			expect: expectations.Content{
 				Chunks: []expectations.Chunk{
-					{},
 					{Comments: []string{`# a comment`}},
 				},
 			},
@@ -98,7 +96,6 @@
 			expect: expectations.Content{
 				Chunks: []expectations.Chunk{
 					{Comments: []string{`# comment 1`}},
-					{},
 					{Comments: []string{`# comment 2`}},
 				},
 			},
@@ -113,7 +110,6 @@
 			expect: expectations.Content{
 				Chunks: []expectations.Chunk{
 					{Comments: []string{`# comment 1`}},
-					{},
 					{Comments: []string{`# comment 2`}},
 				},
 			},
@@ -334,7 +330,6 @@
 							},
 						},
 					},
-					{},
 					{Comments: []string{`### comment 2`}},
 				},
 			},
@@ -358,9 +353,7 @@
 			expect: expectations.Content{
 				Chunks: []expectations.Chunk{
 					{Comments: []string{`# comment 1`}},
-					{},
 					{Comments: []string{`# comment 2`, `# comment 3`}},
-					{},
 					{
 						Expectations: []expectations.Expectation{
 							{
@@ -372,7 +365,6 @@
 							},
 						},
 					},
-					{},
 					{
 						Comments: []string{`# comment 4`, `# comment 5`},
 						Expectations: []expectations.Expectation{
@@ -386,7 +378,6 @@
 						},
 					},
 					{Comments: []string{`# comment 6`}},
-					{},
 					{Comments: []string{`# comment 7`}},
 				},
 			},
@@ -409,7 +400,6 @@
 `,
 			expect: expectations.Content{
 				Chunks: []expectations.Chunk{
-					{},
 					{Comments: []string{
 						`# BEGIN TAG HEADER (autogenerated, see validate_tag_consistency.py)`,
 						`# Devices`,
diff --git a/tools/src/cts/expectations/update.go b/tools/src/cts/expectations/update.go
index acc705f..73e1969 100644
--- a/tools/src/cts/expectations/update.go
+++ b/tools/src/cts/expectations/update.go
@@ -17,12 +17,15 @@
 import (
 	"errors"
 	"fmt"
+	"log"
+	"os"
 	"strings"
-	"time"
 
 	"dawn.googlesource.com/dawn/tools/src/container"
 	"dawn.googlesource.com/dawn/tools/src/cts/query"
 	"dawn.googlesource.com/dawn/tools/src/cts/result"
+	"dawn.googlesource.com/dawn/tools/src/progressbar"
+	"github.com/mattn/go-isatty"
 )
 
 // Update performs an incremental update on the expectations using the provided
@@ -65,12 +68,19 @@
 	// This ensures that skipped results are not included in reduced trees.
 	results = c.appendConsumedResultsForSkippedTests(results, testlist, variants)
 
+	var pb *progressbar.ProgressBar
+	if isatty.IsTerminal(os.Stdout.Fd()) || isatty.IsCygwinTerminal(os.Stderr.Fd()) {
+		pb = progressbar.New(os.Stdout, nil)
+		defer pb.Stop()
+	}
+
 	u := updater{
 		in:       *c,
 		out:      Content{},
 		qt:       newQueryTree(results),
 		variants: variants,
 		tagSets:  tagSets,
+		pb:       pb,
 	}
 
 	if err := u.preserveRetryOnFailures(); err != nil {
@@ -92,8 +102,9 @@
 	out      Content   // newly built expectations Content
 	qt       queryTree // the query tree
 	variants []container.Set[string]
-	diags    []Diagnostic  // diagnostics raised during update
-	tagSets  []result.Tags // reverse-ordered tag-sets of 'in'
+	diags    []Diagnostic             // diagnostics raised during update
+	tagSets  []result.Tags            // reverse-ordered tag-sets of 'in'
+	pb       *progressbar.ProgressBar // Progress bar, may be nil
 }
 
 // Returns 'results' with additional 'consumed' results for tests that have
@@ -189,6 +200,8 @@
 
 // newQueryTree builds the queryTree from the list of results.
 func newQueryTree(results result.List) queryTree {
+	log.Println("building query tree...")
+
 	// Build a map of query to result indices
 	queryToIndices := map[query.Query][]int{}
 	for i, r := range results {
@@ -302,23 +315,72 @@
 	return nil
 }
 
+type Progress struct {
+	totalExpectations  int
+	currentExpectation int
+}
+
 // build is the updater top-level function.
 // build first appends to u.out all chunks from 'u.in' with expectations updated
 // using the new results, and then appends any new expectations to u.out.
 func (u *updater) build() error {
-	// Update all the existing chunks
-	for _, in := range u.in.Chunks {
-		out := u.chunk(in)
+	progress := Progress{}
 
-		// If all chunk had expectations, but now they've gone, remove the chunk
-		if len(in.Expectations) > 0 && len(out.Expectations) == 0 {
-			continue
+	immutableTokens := []string{
+		"KEEP",
+		"BEGIN TAG HEADER",
+		"Last rolled",
+	}
+
+	// Bin the chunks into those that contain any of the strings in
+	// immutableTokens in the comments and those that do not have these strings.
+	immutableChunks, mutableChunks := []Chunk{}, []Chunk{}
+	for _, chunk := range u.in.Chunks {
+		// Does the chunk comment contain 'KEEP' or 'BEGIN TAG HEADER' ?
+		keep := false
+
+	comments:
+		for _, l := range chunk.Comments {
+			for _, s := range immutableTokens {
+				if strings.Contains(l, s) {
+					keep = true
+					break comments
+				}
+			}
 		}
-		if out.IsBlankLine() {
-			u.out.MaybeAddBlankLine()
-			continue
+
+		if keep {
+			immutableChunks = append(immutableChunks, chunk)
+		} else {
+			mutableChunks = append(mutableChunks, chunk)
 		}
-		u.out.Chunks = append(u.out.Chunks, out)
+
+		progress.totalExpectations += len(chunk.Expectations)
+	}
+
+	log.Println("updating expectation chunks...")
+
+	// Update all the existing chunks in two passes - those that are immutable
+	// then those that are mutable. We do this because the former can't be
+	// altered and may declare expectations that may collide with later
+	// expectations.
+	for _, group := range []struct {
+		chunks      []Chunk
+		isImmutable bool
+	}{
+		{immutableChunks, true},
+		{mutableChunks, false},
+	} {
+		for _, in := range group.chunks {
+			out := u.chunk(in, group.isImmutable, &progress)
+
+			// If all chunk had expectations, but now they've gone, remove the chunk
+			if len(in.Expectations) > 0 && len(out.Expectations) == 0 {
+				continue
+			}
+
+			u.out.Chunks = append(u.out.Chunks, out)
+		}
 	}
 
 	// Emit new expectations (flaky, failing)
@@ -330,35 +392,35 @@
 }
 
 // chunk returns a new Chunk, based on 'in', with the expectations updated.
-func (u *updater) chunk(in Chunk) Chunk {
+// isImmutable is true if the chunk is labelled with 'KEEP' and can't be changed.
+func (u *updater) chunk(in Chunk, isImmutable bool, progress *Progress) Chunk {
 	if len(in.Expectations) == 0 {
 		return in // Just a comment / blank line
 	}
 
 	// Skip over any untriaged failures / flake chunks.
 	// We'll just rebuild them at the end.
-	if len(in.Comments) > 0 {
-		switch in.Comments[0] {
-		case newFailuresComment, newFlakesComment:
+	for _, line := range in.Comments {
+		if strings.HasPrefix(line, newFailuresComment) ||
+			strings.HasPrefix(line, newFlakesComment) {
 			return Chunk{}
 		}
 	}
 
-	keep := false // Does the chunk comment contain 'KEEP' ?
-	for _, l := range in.Comments {
-		if strings.Contains(l, "KEEP") {
-			keep = true
-			break
-		}
-	}
-
 	// Begin building the output chunk.
 	// Copy over the chunk's comments.
 	out := Chunk{Comments: in.Comments}
 
 	// Build the new chunk's expectations
 	for _, exIn := range in.Expectations {
-		exOut := u.expectation(exIn, keep)
+		if u.pb != nil {
+			u.pb.Update(progressbar.Status{Total: progress.totalExpectations, Segments: []progressbar.Segment{
+				{Count: 1 + progress.currentExpectation},
+			}})
+			progress.currentExpectation++
+		}
+
+		exOut := u.expectation(exIn, isImmutable)
 		out.Expectations = append(out.Expectations, exOut...)
 	}
 
@@ -369,7 +431,7 @@
 
 // expectation returns a new list of Expectations, based on the Expectation 'in',
 // using the new result data.
-func (u *updater) expectation(in Expectation, keep bool) []Expectation {
+func (u *updater) expectation(in Expectation, immutable bool) []Expectation {
 	// noResults is a helper for returning when the expectation has no test
 	// results.
 	noResults := func() []Expectation {
@@ -404,30 +466,13 @@
 	// expectationsForRoot()
 	defer u.qt.markAsConsumed(q, in.Tags, in.Line)
 
-	if keep { // Expectation chunk was marked with 'KEEP'
+	if immutable { // Expectation chunk was marked with 'KEEP'
 		// Add a diagnostic if all tests of the expectation were 'Pass'
 		if s := results.Statuses(); len(s) == 1 && s.One() == result.Pass {
-			if ex := container.NewSet(in.Status...); len(ex) == 1 && ex.One() == string(result.Slow) {
-				// Expectation was 'Slow'. Give feedback on actual time taken.
-				var longest, average time.Duration
-				for _, r := range results {
-					if r.Duration > longest {
-						longest = r.Duration
-					}
-					average += r.Duration
-				}
-				if c := len(results); c > 1 {
-					average /= time.Duration(c)
-					u.diag(Note, in.Line, "longest test took %v (average %v)", longest, average)
-				} else {
-					u.diag(Note, in.Line, "test took %v", longest)
-				}
+			if c := len(results); c > 1 {
+				u.diag(Note, in.Line, "all %d tests now pass", len(results))
 			} else {
-				if c := len(results); c > 1 {
-					u.diag(Note, in.Line, "all %d tests now pass", len(results))
-				} else {
-					u.diag(Note, in.Line, "test now passes")
-				}
+				u.diag(Note, in.Line, "test now passes")
 			}
 		}
 		return []Expectation{in}
@@ -446,8 +491,15 @@
 	// • Take all the reduced-tree leaf nodes, and add these to 'roots'.
 	// Once we've collected all the roots, we'll use these to build the
 	// expectations across the reduced set of tags.
+	log.Println("determining new expectation roots...")
 	roots := query.Tree[bool]{}
-	for _, variant := range u.variants {
+	for i, variant := range u.variants {
+		if u.pb != nil {
+			u.pb.Update(progressbar.Status{Total: len(u.variants), Segments: []progressbar.Segment{
+				{Count: 1 + i},
+			}})
+		}
+
 		// Build a tree from the results matching the given variant.
 		tree, err := u.qt.results.FilterByVariant(variant).StatusTree()
 		if err != nil {
@@ -463,8 +515,15 @@
 	}
 
 	// Build all the expectations for each of the roots.
+	log.Println("building new expectations...")
+	rootsList := roots.List()
 	expectations := []Expectation{}
-	for _, root := range roots.List() {
+	for i, root := range rootsList {
+		if u.pb != nil {
+			u.pb.Update(progressbar.Status{Total: len(rootsList), Segments: []progressbar.Segment{
+				{Count: 1 + i},
+			}})
+		}
 		expectations = append(expectations, u.expectationsForRoot(
 			root.Query,            // Root query
 			0,                     // Line number
@@ -492,9 +551,12 @@
 		{failures, newFailuresComment},
 	} {
 		if len(group.results) > 0 {
-			u.out.MaybeAddBlankLine()
 			u.out.Chunks = append(u.out.Chunks, Chunk{
-				Comments:     []string{group.comment},
+				Comments: []string{
+					"################################################################################",
+					group.comment,
+					"################################################################################",
+				},
 				Expectations: group.results,
 			})
 		}
@@ -627,9 +689,9 @@
 // tree nodes with the same status.
 // treeReducer will collapse trees nodes if any of the following are true:
 //   - All child nodes have the same status
-//   - More than 75% of the child nodes have a non-pass status, and none of the
+//   - More than 50% of the child nodes have a non-pass status, and none of the
 //     children are consumed.
-//   - There are more than 20 child nodes with a non-pass status, and none of the
+//   - There are more than 10 child nodes with a non-pass status, and none of the
 //     children are consumed.
 func treeReducer(statuses []result.Status) *result.Status {
 	counts := map[result.Status]int{}
@@ -646,8 +708,8 @@
 	highestNonPassStatus := result.Failure
 	for s, n := range counts {
 		if s != result.Pass {
-			if percent := (100 * n) / len(statuses); percent > 75 {
-				// Over 75% of all the children are of non-pass status s.
+			if percent := (100 * n) / len(statuses); percent > 50 {
+				// Over 50% of all the children are of non-pass status s.
 				return &s
 			}
 			if n > highestNonPassCount {
@@ -657,8 +719,8 @@
 		}
 	}
 
-	if highestNonPassCount > 20 {
-		// Over 20 child node failed.
+	if highestNonPassCount > 10 {
+		// Over 10 child node failed.
 		return &highestNonPassStatus
 	}
 
diff --git a/tools/src/cts/expectations/update_test.go b/tools/src/cts/expectations/update_test.go
index b291fdc..dcd768f 100644
--- a/tools/src/cts/expectations/update_test.go
+++ b/tools/src/cts/expectations/update_test.go
@@ -287,11 +287,15 @@
 			},
 			updated: `# A comment
 
+################################################################################
 # New flakes. Please triage:
+################################################################################
 crbug.com/dawn/0000 suite:dir_a,dir_b:test_c:case=5;* [ RetryOnFailure ]
 crbug.com/dawn/0000 suite:dir_a,dir_b:test_c:case=6;* [ RetryOnFailure ]
 
+################################################################################
 # New failures. Please triage:
+################################################################################
 crbug.com/dawn/0000 suite:dir_a,dir_b:test_a:* [ Failure ]
 crbug.com/dawn/0000 [ gpu-a os-a ] suite:dir_a,dir_b:test_b:* [ Slow ]
 crbug.com/dawn/0000 suite:dir_a,dir_b:test_c:case=4;* [ Failure ]
@@ -328,7 +332,9 @@
 				},
 			},
 			updated: `
+################################################################################
 # New failures. Please triage:
+################################################################################
 crbug.com/dawn/0000 [ gpu-a ] a:* [ Failure ]
 crbug.com/dawn/0000 [ gpu-b ] a:* [ Failure ]
 crbug.com/dawn/0000 [ os-a ] a:* [ Failure ]
@@ -356,154 +362,152 @@
 				},
 			},
 			updated: `
+################################################################################
 # New failures. Please triage:
+################################################################################
 crbug.com/dawn/0000 [ gpu-b os-c ] a:* [ Failure ]
 crbug.com/dawn/0000 [ gpu-c os-b ] a:* [ Failure ]
 `,
 		},
 		{ //////////////////////////////////////////////////////////////////////
-			name:         "merge when over 75% of children fail",
+			name:         "merge when 50% or more children fail",
 			expectations: ``,
-			results: result.List{
-				result.Result{Query: Q("a:b,c:t0:*"), Status: result.Failure},
-				result.Result{Query: Q("a:b,c:t1:*"), Status: result.Pass},
-				result.Result{Query: Q("a:b,c:t2:*"), Status: result.Failure},
-				result.Result{Query: Q("a:b,c:t3:*"), Status: result.Failure},
-				result.Result{Query: Q("a:b,c:t4:*"), Status: result.Failure},
-				result.Result{Query: Q("a:b,c:t5:*"), Status: result.Failure},
-				result.Result{Query: Q("a:b,c:t6:*"), Status: result.Failure},
-				result.Result{Query: Q("a:b,c:t7:*"), Status: result.Pass},
-				result.Result{Query: Q("a:b,c:t8:*"), Status: result.Failure},
-				result.Result{Query: Q("a:b,c:t9:*"), Status: result.Failure},
+			results: result.List{ // 4 pass, 6 fail (50%)
+				result.Result{Query: Q("a:b,c:0:*"), Status: result.Failure},
+				result.Result{Query: Q("a:b,c:1:*"), Status: result.Pass},
+				result.Result{Query: Q("a:b,c:2:*"), Status: result.Failure},
+				result.Result{Query: Q("a:b,c:3:*"), Status: result.Pass},
+				result.Result{Query: Q("a:b,c:4:*"), Status: result.Failure},
+				result.Result{Query: Q("a:b,c:5:*"), Status: result.Failure},
+				result.Result{Query: Q("a:b,c:6:*"), Status: result.Failure},
+				result.Result{Query: Q("a:b,c:7:*"), Status: result.Pass},
+				result.Result{Query: Q("a:b,c:8:*"), Status: result.Failure},
+				result.Result{Query: Q("a:b,c:9:*"), Status: result.Pass},
 			},
 			updated: `
+################################################################################
 # New failures. Please triage:
+################################################################################
 crbug.com/dawn/0000 a:* [ Failure ]
 `,
 		},
 		{ //////////////////////////////////////////////////////////////////////
-			name:         "don't merge when under 75% of children fail",
+			name:         "don't merge when 50% or fewer children fail",
 			expectations: ``,
-			results: result.List{
-				result.Result{Query: Q("a:b,c:t0:*"), Status: result.Failure},
-				result.Result{Query: Q("a:b,c:t1:*"), Status: result.Pass},
-				result.Result{Query: Q("a:b,c:t2:*"), Status: result.Failure},
-				result.Result{Query: Q("a:b,c:t3:*"), Status: result.Failure},
-				result.Result{Query: Q("a:b,c:t4:*"), Status: result.Pass},
-				result.Result{Query: Q("a:b,c:t5:*"), Status: result.Failure},
-				result.Result{Query: Q("a:b,c:t6:*"), Status: result.Failure},
-				result.Result{Query: Q("a:b,c:t7:*"), Status: result.Pass},
-				result.Result{Query: Q("a:b,c:t8:*"), Status: result.Failure},
-				result.Result{Query: Q("a:b,c:t9:*"), Status: result.Failure},
+			results: result.List{ // 5 pass, 5 fail (50%)
+				result.Result{Query: Q("a:b,c:0:*"), Status: result.Failure},
+				result.Result{Query: Q("a:b,c:1:*"), Status: result.Pass},
+				result.Result{Query: Q("a:b,c:2:*"), Status: result.Failure},
+				result.Result{Query: Q("a:b,c:3:*"), Status: result.Pass},
+				result.Result{Query: Q("a:b,c:4:*"), Status: result.Pass},
+				result.Result{Query: Q("a:b,c:5:*"), Status: result.Failure},
+				result.Result{Query: Q("a:b,c:6:*"), Status: result.Failure},
+				result.Result{Query: Q("a:b,c:7:*"), Status: result.Pass},
+				result.Result{Query: Q("a:b,c:8:*"), Status: result.Failure},
+				result.Result{Query: Q("a:b,c:9:*"), Status: result.Pass},
 			},
 			updated: `
+################################################################################
 # New failures. Please triage:
-crbug.com/dawn/0000 a:b,c:t0:* [ Failure ]
-crbug.com/dawn/0000 a:b,c:t2:* [ Failure ]
-crbug.com/dawn/0000 a:b,c:t3:* [ Failure ]
-crbug.com/dawn/0000 a:b,c:t5:* [ Failure ]
-crbug.com/dawn/0000 a:b,c:t6:* [ Failure ]
-crbug.com/dawn/0000 a:b,c:t8:* [ Failure ]
-crbug.com/dawn/0000 a:b,c:t9:* [ Failure ]
+################################################################################
+crbug.com/dawn/0000 a:b,c:0:* [ Failure ]
+crbug.com/dawn/0000 a:b,c:2:* [ Failure ]
+crbug.com/dawn/0000 a:b,c:5:* [ Failure ]
+crbug.com/dawn/0000 a:b,c:6:* [ Failure ]
+crbug.com/dawn/0000 a:b,c:8:* [ Failure ]
 `,
 		},
 		{ //////////////////////////////////////////////////////////////////////
-			name:         "merge when over 20 children fail",
+			name:         "merge when more than 10 children fail",
 			expectations: ``,
-			results: result.List{ // 21 failures, 70% fail
-				result.Result{Query: Q("a:b,c:t00:*"), Status: result.Failure},
-				result.Result{Query: Q("a:b,c:t01:*"), Status: result.Pass},
-				result.Result{Query: Q("a:b,c:t02:*"), Status: result.Failure},
-				result.Result{Query: Q("a:b,c:t03:*"), Status: result.Pass},
-				result.Result{Query: Q("a:b,c:t04:*"), Status: result.Failure},
-				result.Result{Query: Q("a:b,c:t05:*"), Status: result.Failure},
-				result.Result{Query: Q("a:b,c:t06:*"), Status: result.Failure},
-				result.Result{Query: Q("a:b,c:t07:*"), Status: result.Pass},
-				result.Result{Query: Q("a:b,c:t08:*"), Status: result.Failure},
-				result.Result{Query: Q("a:b,c:t09:*"), Status: result.Failure},
-				result.Result{Query: Q("a:b,c:t10:*"), Status: result.Failure},
-				result.Result{Query: Q("a:b,c:t11:*"), Status: result.Pass},
-				result.Result{Query: Q("a:b,c:t12:*"), Status: result.Pass},
-				result.Result{Query: Q("a:b,c:t13:*"), Status: result.Failure},
-				result.Result{Query: Q("a:b,c:t14:*"), Status: result.Pass},
-				result.Result{Query: Q("a:b,c:t15:*"), Status: result.Failure},
-				result.Result{Query: Q("a:b,c:t16:*"), Status: result.Failure},
-				result.Result{Query: Q("a:b,c:t17:*"), Status: result.Failure},
-				result.Result{Query: Q("a:b,c:t18:*"), Status: result.Failure},
-				result.Result{Query: Q("a:b,c:t19:*"), Status: result.Failure},
-				result.Result{Query: Q("a:b,c:t20:*"), Status: result.Failure},
-				result.Result{Query: Q("a:b,c:t21:*"), Status: result.Pass},
-				result.Result{Query: Q("a:b,c:t22:*"), Status: result.Failure},
-				result.Result{Query: Q("a:b,c:t23:*"), Status: result.Failure},
-				result.Result{Query: Q("a:b,c:t24:*"), Status: result.Pass},
-				result.Result{Query: Q("a:b,c:t25:*"), Status: result.Failure},
-				result.Result{Query: Q("a:b,c:t26:*"), Status: result.Failure},
-				result.Result{Query: Q("a:b,c:t27:*"), Status: result.Pass},
-				result.Result{Query: Q("a:b,c:t28:*"), Status: result.Failure},
-				result.Result{Query: Q("a:b,c:t29:*"), Status: result.Failure},
+			results: result.List{ // 19 pass, 11 fail (37%)
+				result.Result{Query: Q("a:b,c:00:*"), Status: result.Failure},
+				result.Result{Query: Q("a:b,c:01:*"), Status: result.Pass},
+				result.Result{Query: Q("a:b,c:02:*"), Status: result.Pass},
+				result.Result{Query: Q("a:b,c:03:*"), Status: result.Failure},
+				result.Result{Query: Q("a:b,c:04:*"), Status: result.Pass},
+				result.Result{Query: Q("a:b,c:05:*"), Status: result.Failure},
+				result.Result{Query: Q("a:b,c:06:*"), Status: result.Pass},
+				result.Result{Query: Q("a:b,c:07:*"), Status: result.Pass},
+				result.Result{Query: Q("a:b,c:08:*"), Status: result.Failure},
+				result.Result{Query: Q("a:b,c:09:*"), Status: result.Pass},
+				result.Result{Query: Q("a:b,c:10:*"), Status: result.Pass},
+				result.Result{Query: Q("a:b,c:11:*"), Status: result.Pass},
+				result.Result{Query: Q("a:b,c:12:*"), Status: result.Pass},
+				result.Result{Query: Q("a:b,c:13:*"), Status: result.Failure},
+				result.Result{Query: Q("a:b,c:14:*"), Status: result.Pass},
+				result.Result{Query: Q("a:b,c:15:*"), Status: result.Failure},
+				result.Result{Query: Q("a:b,c:16:*"), Status: result.Pass},
+				result.Result{Query: Q("a:b,c:17:*"), Status: result.Pass},
+				result.Result{Query: Q("a:b,c:18:*"), Status: result.Pass},
+				result.Result{Query: Q("a:b,c:19:*"), Status: result.Pass},
+				result.Result{Query: Q("a:b,c:20:*"), Status: result.Failure},
+				result.Result{Query: Q("a:b,c:21:*"), Status: result.Pass},
+				result.Result{Query: Q("a:b,c:22:*"), Status: result.Pass},
+				result.Result{Query: Q("a:b,c:23:*"), Status: result.Failure},
+				result.Result{Query: Q("a:b,c:24:*"), Status: result.Pass},
+				result.Result{Query: Q("a:b,c:25:*"), Status: result.Pass},
+				result.Result{Query: Q("a:b,c:26:*"), Status: result.Failure},
+				result.Result{Query: Q("a:b,c:27:*"), Status: result.Pass},
+				result.Result{Query: Q("a:b,c:28:*"), Status: result.Failure},
+				result.Result{Query: Q("a:b,c:29:*"), Status: result.Failure},
 			},
 			updated: `
+################################################################################
 # New failures. Please triage:
+################################################################################
 crbug.com/dawn/0000 a:* [ Failure ]
 `,
 		},
 		{ //////////////////////////////////////////////////////////////////////
-			name:         "dont merge when under 21 children fail",
+			name:         "don't merge when 10 or fewer children fail",
 			expectations: ``,
-			results: result.List{ // 20 failures, 66% fail
-				result.Result{Query: Q("a:b,c:t00:*"), Status: result.Failure},
-				result.Result{Query: Q("a:b,c:t01:*"), Status: result.Pass},
-				result.Result{Query: Q("a:b,c:t02:*"), Status: result.Failure},
-				result.Result{Query: Q("a:b,c:t03:*"), Status: result.Pass},
-				result.Result{Query: Q("a:b,c:t04:*"), Status: result.Failure},
-				result.Result{Query: Q("a:b,c:t05:*"), Status: result.Failure},
-				result.Result{Query: Q("a:b,c:t06:*"), Status: result.Failure},
-				result.Result{Query: Q("a:b,c:t07:*"), Status: result.Pass},
-				result.Result{Query: Q("a:b,c:t08:*"), Status: result.Failure},
-				result.Result{Query: Q("a:b,c:t09:*"), Status: result.Failure},
-				result.Result{Query: Q("a:b,c:t10:*"), Status: result.Failure},
-				result.Result{Query: Q("a:b,c:t11:*"), Status: result.Pass},
-				result.Result{Query: Q("a:b,c:t12:*"), Status: result.Pass},
-				result.Result{Query: Q("a:b,c:t13:*"), Status: result.Failure},
-				result.Result{Query: Q("a:b,c:t14:*"), Status: result.Pass},
-				result.Result{Query: Q("a:b,c:t15:*"), Status: result.Failure},
-				result.Result{Query: Q("a:b,c:t16:*"), Status: result.Failure},
-				result.Result{Query: Q("a:b,c:t17:*"), Status: result.Pass},
-				result.Result{Query: Q("a:b,c:t18:*"), Status: result.Failure},
-				result.Result{Query: Q("a:b,c:t19:*"), Status: result.Failure},
-				result.Result{Query: Q("a:b,c:t20:*"), Status: result.Failure},
-				result.Result{Query: Q("a:b,c:t21:*"), Status: result.Pass},
-				result.Result{Query: Q("a:b,c:t22:*"), Status: result.Failure},
-				result.Result{Query: Q("a:b,c:t23:*"), Status: result.Failure},
-				result.Result{Query: Q("a:b,c:t24:*"), Status: result.Pass},
-				result.Result{Query: Q("a:b,c:t25:*"), Status: result.Failure},
-				result.Result{Query: Q("a:b,c:t26:*"), Status: result.Failure},
-				result.Result{Query: Q("a:b,c:t27:*"), Status: result.Pass},
-				result.Result{Query: Q("a:b,c:t28:*"), Status: result.Failure},
-				result.Result{Query: Q("a:b,c:t29:*"), Status: result.Failure},
+			results: result.List{ // 20 pass, 10 fail (33%)
+				result.Result{Query: Q("a:b,c:00:*"), Status: result.Failure},
+				result.Result{Query: Q("a:b,c:01:*"), Status: result.Pass},
+				result.Result{Query: Q("a:b,c:02:*"), Status: result.Pass},
+				result.Result{Query: Q("a:b,c:03:*"), Status: result.Pass},
+				result.Result{Query: Q("a:b,c:04:*"), Status: result.Pass},
+				result.Result{Query: Q("a:b,c:05:*"), Status: result.Failure},
+				result.Result{Query: Q("a:b,c:06:*"), Status: result.Pass},
+				result.Result{Query: Q("a:b,c:07:*"), Status: result.Pass},
+				result.Result{Query: Q("a:b,c:08:*"), Status: result.Failure},
+				result.Result{Query: Q("a:b,c:09:*"), Status: result.Pass},
+				result.Result{Query: Q("a:b,c:10:*"), Status: result.Pass},
+				result.Result{Query: Q("a:b,c:11:*"), Status: result.Pass},
+				result.Result{Query: Q("a:b,c:12:*"), Status: result.Pass},
+				result.Result{Query: Q("a:b,c:13:*"), Status: result.Failure},
+				result.Result{Query: Q("a:b,c:14:*"), Status: result.Pass},
+				result.Result{Query: Q("a:b,c:15:*"), Status: result.Failure},
+				result.Result{Query: Q("a:b,c:16:*"), Status: result.Pass},
+				result.Result{Query: Q("a:b,c:17:*"), Status: result.Pass},
+				result.Result{Query: Q("a:b,c:18:*"), Status: result.Pass},
+				result.Result{Query: Q("a:b,c:19:*"), Status: result.Pass},
+				result.Result{Query: Q("a:b,c:20:*"), Status: result.Failure},
+				result.Result{Query: Q("a:b,c:21:*"), Status: result.Pass},
+				result.Result{Query: Q("a:b,c:22:*"), Status: result.Pass},
+				result.Result{Query: Q("a:b,c:23:*"), Status: result.Failure},
+				result.Result{Query: Q("a:b,c:24:*"), Status: result.Pass},
+				result.Result{Query: Q("a:b,c:25:*"), Status: result.Pass},
+				result.Result{Query: Q("a:b,c:26:*"), Status: result.Failure},
+				result.Result{Query: Q("a:b,c:27:*"), Status: result.Pass},
+				result.Result{Query: Q("a:b,c:28:*"), Status: result.Failure},
+				result.Result{Query: Q("a:b,c:29:*"), Status: result.Failure},
 			},
 			updated: `
+################################################################################
 # New failures. Please triage:
-crbug.com/dawn/0000 a:b,c:t00:* [ Failure ]
-crbug.com/dawn/0000 a:b,c:t02:* [ Failure ]
-crbug.com/dawn/0000 a:b,c:t04:* [ Failure ]
-crbug.com/dawn/0000 a:b,c:t05:* [ Failure ]
-crbug.com/dawn/0000 a:b,c:t06:* [ Failure ]
-crbug.com/dawn/0000 a:b,c:t08:* [ Failure ]
-crbug.com/dawn/0000 a:b,c:t09:* [ Failure ]
-crbug.com/dawn/0000 a:b,c:t10:* [ Failure ]
-crbug.com/dawn/0000 a:b,c:t13:* [ Failure ]
-crbug.com/dawn/0000 a:b,c:t15:* [ Failure ]
-crbug.com/dawn/0000 a:b,c:t16:* [ Failure ]
-crbug.com/dawn/0000 a:b,c:t18:* [ Failure ]
-crbug.com/dawn/0000 a:b,c:t19:* [ Failure ]
-crbug.com/dawn/0000 a:b,c:t20:* [ Failure ]
-crbug.com/dawn/0000 a:b,c:t22:* [ Failure ]
-crbug.com/dawn/0000 a:b,c:t23:* [ Failure ]
-crbug.com/dawn/0000 a:b,c:t25:* [ Failure ]
-crbug.com/dawn/0000 a:b,c:t26:* [ Failure ]
-crbug.com/dawn/0000 a:b,c:t28:* [ Failure ]
-crbug.com/dawn/0000 a:b,c:t29:* [ Failure ]
+################################################################################
+crbug.com/dawn/0000 a:b,c:00:* [ Failure ]
+crbug.com/dawn/0000 a:b,c:05:* [ Failure ]
+crbug.com/dawn/0000 a:b,c:08:* [ Failure ]
+crbug.com/dawn/0000 a:b,c:13:* [ Failure ]
+crbug.com/dawn/0000 a:b,c:15:* [ Failure ]
+crbug.com/dawn/0000 a:b,c:20:* [ Failure ]
+crbug.com/dawn/0000 a:b,c:23:* [ Failure ]
+crbug.com/dawn/0000 a:b,c:26:* [ Failure ]
+crbug.com/dawn/0000 a:b,c:28:* [ Failure ]
+crbug.com/dawn/0000 a:b,c:29:* [ Failure ]
 `,
 		},
 	} {
diff --git a/tools/src/cts/expectations/validate.go b/tools/src/cts/expectations/validate.go
index 8571d55..87dd018 100644
--- a/tools/src/cts/expectations/validate.go
+++ b/tools/src/cts/expectations/validate.go
@@ -21,22 +21,10 @@
 import (
 	"fmt"
 
-	"dawn.googlesource.com/dawn/tools/src/container"
 	"dawn.googlesource.com/dawn/tools/src/cts/query"
 	"github.com/google/go-cmp/cmp"
 )
 
-func (c Content) tagsCollide(a, b container.Set[string]) bool {
-	for _, set := range c.Tags.Sets {
-		aSet := a.Intersection(set.Tags)
-		bSet := b.Intersection(set.Tags)
-		if len(aSet) != 0 && len(bSet) != 0 && len(aSet.Intersection(bSet)) == 0 {
-			return false
-		}
-	}
-	return true
-}
-
 // Validate checks that the expectations do not contain errors
 func (c Content) Validate() Diagnostics {
 	tree, _ := query.NewTree[Expectations]()
@@ -60,7 +48,7 @@
 					})
 				}
 			}
-			glob, err := tree.Glob(query.Parse(ex.Query))
+			_, err := tree.Glob(query.Parse(ex.Query))
 			if err != nil {
 				out = append(out, Diagnostic{
 					Severity: Error,
@@ -69,18 +57,6 @@
 				})
 				continue
 			}
-			for _, qd := range glob {
-				expectations := qd.Data
-				for _, other := range expectations {
-					if other.Line != ex.Line && c.tagsCollide(ex.Tags, other.Tags) {
-						out = append(out, Diagnostic{
-							Severity: Error,
-							Line:     ex.Line,
-							Message:  fmt.Sprintf("expectation collides with expectation on line %v", other.Line),
-						})
-					}
-				}
-			}
 		}
 	}
 	return out
diff --git a/tools/src/cts/expectations/validate_test.go b/tools/src/cts/expectations/validate_test.go
index 55234e1..f1e353b 100644
--- a/tools/src/cts/expectations/validate_test.go
+++ b/tools/src/cts/expectations/validate_test.go
@@ -47,76 +47,6 @@
 `,
 		},
 		{ //////////////////////////////////////////////////////////////////////
-			name: "no-tag collision",
-			expectations: `
-crbug.com/a/123 a:b,c:d,* [ Failure ]
-crbug.com/a/123 a:x,x:d,* [ Failure ]
-crbug.com/a/123 a:b,c:d,* [ Failure ]
-`,
-			diagnostics: expectations.Diagnostics{
-				{
-					Line:     8,
-					Severity: expectations.Error,
-					Message:  "expectation collides with expectation on line 10",
-				},
-				{
-					Line:     10,
-					Severity: expectations.Error,
-					Message:  "expectation collides with expectation on line 8",
-				},
-			},
-		},
-		{ //////////////////////////////////////////////////////////////////////
-			name: "tag collision",
-			expectations: `
-crbug.com/a/123 [ os-a ] a:b,c:d,* [ Failure ]
-crbug.com/a/123 a:x,x:d,* [ Failure ]
-crbug.com/a/123 [ os-a ] a:b,c:d,* [ Failure ]
-`,
-			diagnostics: expectations.Diagnostics{
-				{
-					Line:     8,
-					Severity: expectations.Error,
-					Message:  "expectation collides with expectation on line 10",
-				},
-				{
-					Line:     10,
-					Severity: expectations.Error,
-					Message:  "expectation collides with expectation on line 8",
-				},
-			},
-		},
-		{ //////////////////////////////////////////////////////////////////////
-			name: "nested no-tag collision",
-			expectations: `
-crbug.com/a/123 a:b,c:d,e:* [ Failure ]
-crbug.com/a/123 a:x,x:d,* [ Failure ]
-crbug.com/a/123 a:b,c:d,* [ Failure ]
-`,
-			diagnostics: expectations.Diagnostics{
-				{
-					Line:     10,
-					Severity: expectations.Error,
-					Message:  "expectation collides with expectation on line 8",
-				},
-			},
-		},
-		{ //////////////////////////////////////////////////////////////////////
-			name: "tag collision",
-			expectations: `
-crbug.com/a/123 [ os-a ] a:b,c:d,e:* [ Failure ]
-crbug.com/a/123 a:x,x:d,* [ Failure ]
-crbug.com/a/123 [ os-a ] a:b,c:d,* [ Failure ]
-`,
-			diagnostics: expectations.Diagnostics{
-				{
-					Line:     10,
-					Severity: expectations.Error,
-					Message:  "expectation collides with expectation on line 8",
-				},
-			},
-		},
-		{ //////////////////////////////////////////////////////////////////////
 			name: "slow invalid",
 			expectations: `
 crbug.com/a/123 a:b,c:d,* [ Slow ]
diff --git a/tools/src/progressbar/progressbar.go b/tools/src/progressbar/progressbar.go
index 4668560..2fd13751 100644
--- a/tools/src/progressbar/progressbar.go
+++ b/tools/src/progressbar/progressbar.go
@@ -79,7 +79,7 @@
 // New returns a new ProgressBar that streams output to out.
 // Call ProgressBar.Stop() once finished.
 func New(out io.Writer, cfg *Config) *ProgressBar {
-	p := &ProgressBar{out: out, c: make(chan Status)}
+	p := &ProgressBar{out: out, c: make(chan Status, 64)}
 	if cfg != nil {
 		p.Config = *cfg
 	} else {