@@ -8,27 +8,24 @@ package structlogging_test
8
8
import (
9
9
"context"
10
10
"encoding/json"
11
+ "errors"
11
12
"regexp"
13
+ "slices"
12
14
"testing"
13
15
"time"
14
16
15
17
"github.com/cockroachdb/cockroach/pkg/base"
16
- "github.com/cockroachdb/cockroach/pkg/keys"
17
18
"github.com/cockroachdb/cockroach/pkg/kv/kvserver"
18
19
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/allocator/plan"
19
20
"github.com/cockroachdb/cockroach/pkg/server/structlogging"
20
- "github.com/cockroachdb/cockroach/pkg/sql/catalog/desctestutils"
21
21
"github.com/cockroachdb/cockroach/pkg/testutils"
22
22
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
23
23
"github.com/cockroachdb/cockroach/pkg/testutils/skip"
24
- "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
25
24
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
26
25
"github.com/cockroachdb/cockroach/pkg/util/log"
27
26
"github.com/cockroachdb/cockroach/pkg/util/log/eventpb"
28
27
"github.com/cockroachdb/cockroach/pkg/util/log/logpb"
29
28
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
30
- "github.com/cockroachdb/errors"
31
- "github.com/stretchr/testify/assert"
32
29
)
33
30
34
31
type hotRangesLogSpy struct {
@@ -74,21 +71,12 @@ func (spy *hotRangesLogSpy) Reset() {
74
71
spy .mu .logs = nil
75
72
}
76
73
77
- // TestHotRangesStatsTenants tests that hot ranges stats are logged per node.
78
- // The test will ensure each node contains 5 distinct range replicas for hot
79
- // ranges logging. Each node should thus log 5 distinct range ids.
80
- func TestHotRangesStats (t * testing.T ) {
81
- defer leaktest .AfterTest (t )()
74
+ func setupHotRangesLogTest (
75
+ t * testing.T , ctx context.Context ,
76
+ ) (serverutils.ApplicationLayerInterface , * hotRangesLogSpy , func ()) {
82
77
sc := log .ScopeWithoutShowLogs (t )
83
- defer sc .Close (t )
84
-
85
- skip .UnderRace (t )
86
-
87
- ctx := context .Background ()
88
- spy := hotRangesLogSpy {t : t }
89
- defer log .InterceptWith (ctx , & spy )()
90
-
91
- tc := serverutils .StartCluster (t , 3 , base.TestClusterArgs {
78
+ spy := & hotRangesLogSpy {t : t }
79
+ tc := serverutils .StartCluster (t , 1 , base.TestClusterArgs {
92
80
ReplicationMode : base .ReplicationManual ,
93
81
ServerArgs : base.TestServerArgs {
94
82
DefaultTestTenant : base .TestControlsTenantsExplicitly ,
@@ -101,72 +89,50 @@ func TestHotRangesStats(t *testing.T) {
101
89
},
102
90
},
103
91
})
104
- defer tc .Stopper ().Stop (ctx )
105
-
106
- db := tc .ServerConn (0 )
107
- sqlutils .CreateTable (
108
- t , db , "foo" ,
109
- "k INT PRIMARY KEY, v INT" ,
110
- 300 ,
111
- sqlutils .ToRowFn (sqlutils .RowIdxFn , sqlutils .RowModuloFn (2 )),
112
- )
113
-
114
- // Ensure both of node 1 and 2 have 5 distinct replicas from the table.
115
- tableDesc := desctestutils .TestingGetPublicTableDescriptor (
116
- tc .Server (0 ).DB (), keys .SystemSQLCodec , "test" , "foo" )
117
- tc .SplitTable (t , tableDesc , []serverutils.SplitPoint {
118
- {TargetNodeIdx : 1 , Vals : []interface {}{100 }},
119
- {TargetNodeIdx : 1 , Vals : []interface {}{120 }},
120
- {TargetNodeIdx : 1 , Vals : []interface {}{140 }},
121
- {TargetNodeIdx : 1 , Vals : []interface {}{160 }},
122
- {TargetNodeIdx : 1 , Vals : []interface {}{180 }},
123
- {TargetNodeIdx : 2 , Vals : []interface {}{200 }},
124
- {TargetNodeIdx : 2 , Vals : []interface {}{220 }},
125
- {TargetNodeIdx : 2 , Vals : []interface {}{240 }},
126
- {TargetNodeIdx : 2 , Vals : []interface {}{260 }},
127
- {TargetNodeIdx : 2 , Vals : []interface {}{280 }},
128
- })
129
92
130
- // query table
131
- for i := 0 ; i < 300 ; i ++ {
132
- db := tc .ServerConn (0 )
133
- sqlutils .MakeSQLRunner (db ).Query (t , `SELECT * FROM test.foo` )
93
+ leakChecker := leaktest .AfterTest (t )
94
+ logInterceptor := log .InterceptWith (ctx , spy )
95
+ stopper := tc .Stopper ()
96
+ teardown := func () {
97
+ stopper .Stop (ctx )
98
+ sc .Close (t )
99
+ logInterceptor ()
100
+ leakChecker ()
134
101
}
135
102
136
- // Skip node 1 since it will contain many more replicas.
137
- // We only need to check nodes 2 and 3 to see that the nodes are logging their local hot ranges.
138
- rangeIDs := make (map [int64 ]struct {})
139
- for _ , i := range []int {1 , 2 } {
140
- spy .Reset ()
141
- ts := tc .ApplicationLayer (i )
142
- structlogging .TelemetryHotRangesStatsEnabled .Override (ctx , & ts .ClusterSettings ().SV , true )
143
- structlogging .TelemetryHotRangesStatsInterval .Override (ctx , & ts .ClusterSettings ().SV , time .Second )
144
- structlogging .TelemetryHotRangesStatsLoggingDelay .Override (ctx , & ts .ClusterSettings ().SV , 0 * time .Millisecond )
145
-
146
- testutils .SucceedsSoon (t , func () error {
147
- logs := spy .Logs ()
148
- if len (logs ) < 5 {
149
- return errors .New ("waiting for hot ranges to be logged" )
150
- }
103
+ ts := tc .ApplicationLayer (0 )
104
+ return ts , spy , teardown
105
+ }
151
106
152
- return nil
153
- })
154
- structlogging .TelemetryHotRangesStatsInterval .Override (ctx , & ts .ClusterSettings ().SV , 1 * time .Hour )
107
+ // TestHotRangeLogger tests that hot ranges stats are logged per node.
108
+ // The test will ensure each node contains 5 distinct range replicas for hot
109
+ // ranges logging. Each node should thus log 5 distinct range ids.
110
+ func TestHotRangeLogger (t * testing.T ) {
111
+ skip .UnderRace (t )
112
+ ctx := context .Background ()
113
+ ts , spy , teardown := setupHotRangesLogTest (t , ctx )
114
+ defer teardown ()
155
115
156
- // Get first 5 logs since the logging loop may have fired multiple times.
157
- // We should have gotten 5 distinct range ids, one for each split point above.
158
- logs := spy .Logs ()[:5 ]
116
+ structlogging .TelemetryHotRangesStatsEnabled .Override (ctx , & ts .ClusterSettings ().SV , true )
117
+ structlogging .TelemetryHotRangesStatsInterval .Override (ctx , & ts .ClusterSettings ().SV , time .Millisecond )
118
+ structlogging .TelemetryHotRangesStatsLoggingDelay .Override (ctx , & ts .ClusterSettings ().SV , 0 * time .Millisecond )
119
+
120
+ testutils .SucceedsSoon (t , func () error {
121
+ logs := spy .Logs ()
122
+
123
+ // Depend on a range which we don't exist to go anywhere.
159
124
for _ , l := range logs {
160
- assert .Equal (t , l .Databases , []string {"‹test›" })
161
- assert .Equal (t , l .Tables , []string {"‹foo›" })
162
- assert .Equal (t , l .Indexes , []string {"‹foo_pkey›" })
163
- _ , ok := rangeIDs [l .RangeID ]
164
- if ok {
165
- t .Fatalf (`Logged ranges should be unique per node for this test.
166
- found range on node %d and node %d: %s %s %s %s %d` , i , l .LeaseholderNodeID , l .Databases , l .SchemaName , l .Tables , l .Indexes , l .RangeID )
125
+ if ! slices .Equal (l .Databases , []string {"‹system›" }) {
126
+ continue
127
+ }
128
+ if ! slices .Equal (l .Tables , []string {"‹sqlliveness›" }) {
129
+ continue
167
130
}
168
- rangeIDs [l .RangeID ] = struct {}{}
131
+ if ! slices .Equal (l .Indexes , []string {"‹primary›" }) {
132
+ continue
133
+ }
134
+ return nil
169
135
}
170
-
171
- }
136
+ return errors . New ( "waited too long for the synthetic data" )
137
+ })
172
138
}
0 commit comments