Skip to content

Commit ab2ca95

Browse files
Michael Andreas Dagitsespytorchmergebot
Michael Andreas Dagitses
authored andcommitted
turn on -Werror=unused-variable in our Bazel CPU build
Summary: We also fix any existing issues. Note that we only do this for the CPU build because nvcc is considered a C++ toolchain but it does not have the same flag support. Adding flags to the GPU build will cause nvcc errors. Test Plan: Built locally, rely on CI to confirm. Reviewers: malfet Subscribers: Tasks: Tags: Pull Request resolved: pytorch#79156 Approved by: https://github.com/seemethere, https://github.com/osalpekar, https://github.com/albanD
1 parent e727539 commit ab2ca95

25 files changed

+49
-129
lines changed

.bazelrc

+3-1
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,9 @@ build \
5252
--per_file_copt='^//.*\.(cpp|cc)$'@-Werror=type-limits \
5353
--per_file_copt=^//.*\.cu$@--compiler-options=-Werror=type-limits \
5454
--per_file_copt='^//.*\.(cpp|cc)$'@-Werror=unused-function \
55-
--per_file_copt=^//.*\.cu$@--compiler-options=-Werror=unused-function
55+
--per_file_copt=^//.*\.cu$@--compiler-options=-Werror=unused-function \
56+
--per_file_copt='^//.*\.(cpp|cc)$'@-Werror=unused-variable \
57+
--per_file_copt=^//.*\.cu$@--compiler-options=-Werror=unused-variable
5658

5759
build \
5860
--per_file_copt=//:aten/src/ATen/RegisterCompositeExplicitAutograd.cpp@-Wno-error=unused-function \

c10/core/DispatchKeySet.h

+2
Original file line numberDiff line numberDiff line change
@@ -504,6 +504,8 @@ class DispatchKeySet final {
504504
using iterator_category = std::input_iterator_tag;
505505
using value_type = DispatchKey;
506506
using difference_type = ptrdiff_t;
507+
using reference = value_type&;
508+
using pointer = value_type*;
507509
// final mask value should mask out the entire keyset
508510
static const uint8_t end_iter_mask_val =
509511
num_backends + num_functionality_keys;

c10/test/core/DispatchKeySet_test.cpp

+4-24
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,7 @@
11
#include <gtest/gtest.h>
22

3+
#include <cstddef>
4+
#include <iterator>
35
#include <unordered_set>
46

57
#include <c10/core/DispatchKeySet.h>
@@ -14,7 +16,6 @@ TEST(DispatchKeySet, ShowSemantics) {
1416
// It corresponds to "dense" functionality, "CPU" backend.
1517
// This means that it gets a dense functionality bit, and a cpu backend bit
1618
// set.
17-
auto undefined_set = DispatchKeySet();
1819
auto dense_cpu_set = DispatchKeySet(DispatchKey::CPU);
1920
ASSERT_TRUE(dense_cpu_set.has(DispatchKey::Dense));
2021
ASSERT_TRUE(dense_cpu_set.has_backend(BackendComponent::CPUBit));
@@ -48,8 +49,6 @@ TEST(DispatchKeySet, ShowSemantics) {
4849
DispatchKey::Dense,
4950
DispatchKey::CUDA,
5051
DispatchKey::CPU});
51-
auto fpga = DispatchKeySet(DispatchKey::FPGA);
52-
auto fpga_and_cpu = DispatchKeySet({DispatchKey::FPGA, DispatchKey::CPU});
5352
// this keyset has all of the building block keys:
5453
ASSERT_TRUE(autograd_dense_cpu_cuda.has(DispatchKey::AutogradFunctionality));
5554
ASSERT_TRUE(autograd_dense_cpu_cuda.has(DispatchKey::Dense));
@@ -86,8 +85,6 @@ TEST(DispatchKeySet, ShowSemantics) {
8685
// Iterators allow you to iterate individually through the DispatchKey's in a
8786
// DispatchKeySet
8887
auto empty_set = DispatchKeySet();
89-
auto t1 = empty_set.begin();
90-
auto t2 = empty_set.end();
9188
ASSERT_EQ(*empty_set.begin(), *empty_set.end());
9289

9390
// However, only keys that correspond to actual runtime indices of kernels in
@@ -370,29 +367,12 @@ TEST(DispatchKeySet, IteratorCrossProduct) {
370367

371368
TEST(DispatchKeySet, IteratorFull) {
372369
DispatchKeySet full_set(DispatchKeySet::FULL);
373-
uint8_t i = 0;
374-
375-
for (const auto& it : full_set) {
376-
i++;
377-
}
378-
// Total # of runtime entries includes an entry for DispatchKey::Undefined,
379-
// which is not included when iterating through the DispatchKeySet.
380-
ASSERT_EQ(i, num_runtime_entries - 1);
381-
}
382-
383-
TEST(DispatchKeySet, IteratorRangeFull) {
384-
DispatchKeySet full_set(DispatchKeySet::FULL);
385-
uint8_t i = 0;
386-
387-
for (DispatchKey dispatch_key : full_set) {
388-
i++;
389-
}
370+
std::ptrdiff_t count = std::distance(full_set.begin(), full_set.end());
390371

391372
// Total # of runtime entries includes an entry for DispatchKey::Undefined,
392373
// which is not included when iterating through the DispatchKeySet.
393-
ASSERT_EQ(i, num_runtime_entries - 1);
374+
ASSERT_EQ(count, std::ptrdiff_t{num_runtime_entries} - 1);
394375
}
395-
396376
TEST(DispatchKeySet, FailAtEndIterator) {
397377
DispatchKeySet full_set(DispatchKeySet::FULL);
398378
uint64_t raw_repr = full_set.raw_repr();

c10/test/util/intrusive_ptr_test.cpp

+10-28
Original file line numberDiff line numberDiff line change
@@ -219,8 +219,6 @@ TEST(
219219
givenInvalidPtr_whenMoveAssigning_thenNewInstanceIsValid) {
220220
intrusive_ptr<SomeClass> obj1 = make_intrusive<SomeClass>();
221221
intrusive_ptr<SomeClass> obj2;
222-
// NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores)
223-
SomeClass* obj1ptr = obj1.get();
224222
obj2 = std::move(obj1);
225223
EXPECT_TRUE(obj2.defined());
226224
}
@@ -271,8 +269,6 @@ TEST(
271269
givenInvalidPtr_whenMoveAssigningToBaseClass_thenNewInstanceIsValid) {
272270
intrusive_ptr<SomeChildClass> obj1 = make_intrusive<SomeChildClass>(5);
273271
intrusive_ptr<SomeBaseClass> obj2;
274-
// NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores)
275-
SomeBaseClass* obj1ptr = obj1.get();
276272
obj2 = std::move(obj1);
277273
EXPECT_TRUE(obj2.defined());
278274
}
@@ -358,8 +354,6 @@ TEST(
358354
givenInvalidPtr_whenCopyAssigning_thenNewInstanceIsValid) {
359355
intrusive_ptr<SomeClass> obj1 = make_intrusive<SomeClass>();
360356
intrusive_ptr<SomeClass> obj2;
361-
// NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores)
362-
SomeClass* obj1ptr = obj1.get();
363357
obj2 = obj1;
364358
EXPECT_TRUE(obj2.defined());
365359
}
@@ -387,8 +381,6 @@ TEST(
387381
givenInvalidPtr_whenCopyAssigningToBaseClass_thenNewInstanceIsValid) {
388382
intrusive_ptr<SomeChildClass> obj1 = make_intrusive<SomeChildClass>(5);
389383
intrusive_ptr<SomeBaseClass> obj2;
390-
// NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores)
391-
SomeBaseClass* obj1ptr = obj1.get();
392384
obj2 = obj1;
393385
EXPECT_TRUE(obj2.defined());
394386
}
@@ -1779,8 +1771,7 @@ TEST(
17791771
givenInvalidPtr_whenMoveAssigning_thenNewInstanceIsValid) {
17801772
IntrusiveAndWeak<SomeClass> obj1 = make_weak_intrusive<SomeClass>();
17811773
weak_intrusive_ptr<SomeClass> obj2 = make_invalid_weak<SomeClass>();
1782-
// NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores)
1783-
SomeClass* obj1ptr = obj1.weak.lock().get();
1774+
obj1.weak.lock().get();
17841775
obj2 = std::move(obj1.weak);
17851776
EXPECT_FALSE(obj2.expired());
17861777
}
@@ -1826,8 +1817,7 @@ TEST(
18261817
givenWeakOnlyPtr_whenMoveAssigning_thenNewInstanceIsValid) {
18271818
IntrusiveAndWeak<SomeClass> obj1 = make_weak_intrusive<SomeClass>();
18281819
weak_intrusive_ptr<SomeClass> obj2 = make_weak_only<SomeClass>();
1829-
// NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores)
1830-
SomeClass* obj1ptr = obj1.weak.lock().get();
1820+
obj1.weak.lock().get();
18311821
obj2 = std::move(obj1.weak);
18321822
EXPECT_FALSE(obj2.expired());
18331823
}
@@ -1846,8 +1836,7 @@ TEST(
18461836
WeakIntrusivePtrTest,
18471837
givenWeakOnlyPtr_whenMoveAssigningToSelf_thenStaysInvalid) {
18481838
weak_intrusive_ptr<SomeClass> obj1 = make_weak_only<SomeClass>();
1849-
// NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores)
1850-
SomeClass* obj1ptr = obj1.lock().get();
1839+
obj1.lock().get();
18511840
obj1 = std::move(obj1);
18521841
// NOLINTNEXTLINE(bugprone-use-after-move)
18531842
EXPECT_TRUE(obj1.expired());
@@ -1911,8 +1900,7 @@ TEST(
19111900
IntrusiveAndWeak<SomeChildClass> obj1 =
19121901
make_weak_intrusive<SomeChildClass>(5);
19131902
weak_intrusive_ptr<SomeBaseClass> obj2 = make_invalid_weak<SomeBaseClass>();
1914-
// NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores)
1915-
SomeBaseClass* obj1ptr = obj1.weak.lock().get();
1903+
obj1.weak.lock().get();
19161904
obj2 = std::move(obj1.weak);
19171905
EXPECT_FALSE(obj2.expired());
19181906
}
@@ -1945,8 +1933,7 @@ TEST(
19451933
IntrusiveAndWeak<SomeChildClass> obj1 =
19461934
make_weak_intrusive<SomeChildClass>(5);
19471935
weak_intrusive_ptr<SomeBaseClass> obj2 = make_weak_only<SomeBaseClass>(2);
1948-
// NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores)
1949-
SomeBaseClass* obj1ptr = obj1.weak.lock().get();
1936+
obj1.weak.lock().get();
19501937
obj2 = std::move(obj1.weak);
19511938
EXPECT_FALSE(obj2.expired());
19521939
}
@@ -2028,8 +2015,7 @@ TEST(
20282015
givenInvalidPtr_whenCopyAssigning_thenNewInstanceIsValid) {
20292016
IntrusiveAndWeak<SomeClass> obj1 = make_weak_intrusive<SomeClass>();
20302017
weak_intrusive_ptr<SomeClass> obj2 = make_invalid_weak<SomeClass>();
2031-
// NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores)
2032-
SomeClass* obj1ptr = obj1.weak.lock().get();
2018+
obj1.weak.lock().get();
20332019
obj2 = obj1.weak;
20342020
EXPECT_FALSE(obj2.expired());
20352021
}
@@ -2048,8 +2034,7 @@ TEST(
20482034
givenWeakOnlyPtr_whenCopyAssigning_thenNewInstanceIsValid) {
20492035
IntrusiveAndWeak<SomeClass> obj1 = make_weak_intrusive<SomeClass>();
20502036
weak_intrusive_ptr<SomeClass> obj2 = make_weak_only<SomeClass>();
2051-
// NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores)
2052-
SomeClass* obj1ptr = obj1.weak.lock().get();
2037+
obj1.weak.lock().get();
20532038
obj2 = obj1.weak;
20542039
EXPECT_FALSE(obj2.expired());
20552040
}
@@ -2068,8 +2053,7 @@ TEST(
20682053
WeakIntrusivePtrTest,
20692054
givenWeakOnlyPtr_whenCopyAssigningToSelf_thenStaysInvalid) {
20702055
weak_intrusive_ptr<SomeClass> obj1 = make_weak_only<SomeClass>();
2071-
// NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores)
2072-
SomeClass* obj1ptr = obj1.lock().get();
2056+
obj1.lock().get();
20732057
// NOLINTNEXTLINE(clang-diagnostic-self-assign-overloaded)
20742058
obj1 = obj1;
20752059
EXPECT_TRUE(obj1.expired());
@@ -2111,8 +2095,7 @@ TEST(
21112095
IntrusiveAndWeak<SomeChildClass> obj1 =
21122096
make_weak_intrusive<SomeChildClass>(5);
21132097
weak_intrusive_ptr<SomeBaseClass> obj2 = make_invalid_weak<SomeBaseClass>();
2114-
// NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores)
2115-
SomeBaseClass* obj1ptr = obj1.weak.lock().get();
2098+
obj1.weak.lock().get();
21162099
obj2 = obj1.weak;
21172100
EXPECT_FALSE(obj2.expired());
21182101
}
@@ -2145,8 +2128,7 @@ TEST(
21452128
IntrusiveAndWeak<SomeChildClass> obj1 =
21462129
make_weak_intrusive<SomeChildClass>(5);
21472130
weak_intrusive_ptr<SomeBaseClass> obj2 = make_weak_only<SomeBaseClass>(2);
2148-
// NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores)
2149-
SomeBaseClass* obj1ptr = obj1.weak.lock().get();
2131+
obj1.weak.lock().get();
21502132
obj2 = obj1.weak;
21512133
EXPECT_FALSE(obj2.expired());
21522134
}

test/cpp/api/dataloader.cpp

-5
Original file line numberDiff line numberDiff line change
@@ -1900,8 +1900,6 @@ TEST(DataLoaderTest, ChunkDatasetDoesNotHang) {
19001900
datasets::ChunkDatasetOptions(
19011901
prefetch_count, batch_size, cache_size));
19021902

1903-
samplers::SequentialSampler& chunk_sampler = dataset->chunk_sampler();
1904-
19051903
auto data_loader = torch::data::make_data_loader(
19061904
dataset.map(transforms::BatchLambda<DummyChunkDataReader::BatchType, DummyChunkDataReader::DataType>(
19071905
[](DummyChunkDataReader::BatchType batch) {
@@ -2043,7 +2041,6 @@ TEST(DataLoaderTest, ChunkDatasetLoad) {
20432041
const size_t prefetch_count = 1;
20442042
const size_t batch_size = 10;
20452043
const size_t dataloader_worker_count = 0;
2046-
const size_t save_interval = 2;
20472044

20482045
DummyChunkDataReader data_reader;
20492046
samplers::SequentialSampler sampler(0);
@@ -2223,8 +2220,6 @@ TEST(DataLoaderTest, ChunkDatasetCrossChunkShuffle) {
22232220
std::vector<int> expected_result;
22242221
{
22252222
// construct expected result
2226-
int offset = 0;
2227-
22282223
for (const auto i : c10::irange((chunk_count + cross_chunk_shuffle_count - 1) /
22292224
cross_chunk_shuffle_count)) {
22302225
for (const auto j : c10::irange(chunk_size)) {

test/cpp/api/functional.cpp

-2
Original file line numberDiff line numberDiff line change
@@ -1940,7 +1940,6 @@ TEST_F(FunctionalTest, InstanceNorm2d) {
19401940

19411941
TEST_F(FunctionalTest, InstanceNorm2dDefaultOptions) {
19421942
int num_features = 5;
1943-
double eps = 1e-05;
19441943

19451944
auto input = torch::arange(2. * num_features * 2 * 2).view({2, num_features, 2, 2});
19461945
auto output = F::instance_norm(input);
@@ -2031,7 +2030,6 @@ TEST_F(FunctionalTest, InstanceNorm3d) {
20312030

20322031
TEST_F(FunctionalTest, InstanceNorm3dDefaultOptions) {
20332032
int num_features = 5;
2034-
double eps = 1e-05;
20352033

20362034
auto input = torch::arange(2. * num_features * 2 * 2 * 2).view({2, num_features, 2, 2, 2});
20372035
auto output = F::instance_norm(input);

test/cpp/api/nn_utils.cpp

-1
Original file line numberDiff line numberDiff line change
@@ -613,7 +613,6 @@ TEST_F(NNUtilsTest, PackPaddedSequence) {
613613
}
614614
batch_sizes[i-1] = total;
615615
}
616-
int64_t offset = 0;
617616
std::vector<torch::Tensor> tensors_to_be_cat;
618617
for (int64_t i = 1; i < static_cast<int64_t>(sorted_lengths.size() + 1); i++) {
619618
int64_t l = sorted_lengths.at(i-1);

test/cpp/api/optim.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -173,7 +173,7 @@ TEST(OptimTest, OptimizerAccessors) {
173173
const auto& optimizer_ = Adagrad(params, options);
174174
optimizer_.defaults();
175175
// test for param_groups() with const reference return
176-
const auto& params_2 = optimizer_.param_groups();
176+
(void)optimizer_.param_groups();
177177
// test for state() with const reference return
178178
optimizer_.state();
179179
}

test/cpp/jit/test_backend_compiler_lib.cpp

-3
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,6 @@ class BackendWithCompiler : public PyTorchBackendInterface {
119119
IValue val = token;
120120
auto instruction = val.toTupleRef().elements()[0].toStringRef();
121121
auto debug_handle = val.toTupleRef().elements()[1].toInt();
122-
double const_val = 1.0;
123122
#ifndef NO_PROFILING
124123
auto start_time_us = torch::profiler::impl::getTime() / 1000;
125124
#endif
@@ -132,8 +131,6 @@ class BackendWithCompiler : public PyTorchBackendInterface {
132131
instruction);
133132
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
134133
auto sub = instruction.substr(15);
135-
// NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores)
136-
const_val = stod(sub);
137134
} else if (instruction == "aten::add" || instruction == "aten::sub") {
138135
TORCH_CHECK(x.sizes() == h.sizes());
139136
if (x.dim() > 1 || (x.dim() == 1 && x.size(0) > 1)) {

test/cpp/jit/test_fuser.cpp

+6-5
Original file line numberDiff line numberDiff line change
@@ -122,11 +122,12 @@ TEST_F(FuserTest, TestOne_CUDA) {
122122
// with different internal strides. To do this, we generate a tensor
123123
// with the "wrong" dimensions, and then use transpose to get an
124124
// appropriately sized view.
125-
for (const auto i : c10::irange(graph.inputs().size())) {
126-
std::vector<int64_t> dims = {128, 128, 32};
127-
std::swap(dims[ti], dims[tj]);
128-
inputs.push_back(at::rand(dims, at::kCUDA).transpose(ti, tj));
129-
}
125+
std::generate_n(
126+
std::back_inserter(inputs), graph.inputs().size(), [ti, tj] {
127+
std::array<int64_t, 3> dims = {128, 128, 32};
128+
std::swap(dims[ti], dims[tj]);
129+
return at::rand(dims, at::kCUDA).transpose(ti, tj);
130+
});
130131

131132
auto t22 = inputs[4].sigmoid();
132133
auto t20 = inputs[3].sigmoid();

test/cpp/jit/test_irparser.cpp

-1
Original file line numberDiff line numberDiff line change
@@ -301,7 +301,6 @@ graph(%a : Float(4, 5),
301301
TEST(IRParserTest, MalformedStrides) {
302302
auto graph = std::make_shared<Graph>();
303303
std::unordered_map<std::string, Value*> vmap;
304-
bool error_thrown = false;
305304
// NOLINTNEXTLINE(cppcoreguidelines-avoid-goto,hicpp-avoid-goto)
306305
EXPECT_ANY_THROW(parseIR(
307306
R"IR(

test/cpp/jit/test_lite_interpreter.cpp

-2
Original file line numberDiff line numberDiff line change
@@ -1182,7 +1182,6 @@ TEST(RunTimeTest, ParseOperator) {
11821182
std::vector<IValue> constants{
11831183
to_tuple({1}),
11841184
};
1185-
int64_t model_version = caffe2::serialize::kProducedBytecodeVersion;
11861185
// 2. Parse the function
11871186
std::string function_name("test_function");
11881187
auto function = std::unique_ptr<mobile::Function>(
@@ -1566,7 +1565,6 @@ TEST(RunTimeTest, RuntimeCall) {
15661565
std::vector<IValue> constantsCall{
15671566
1,
15681567
};
1569-
int64_t model_version = caffe2::serialize::kProducedBytecodeVersion;
15701568

15711569
auto foo = std::make_unique<mobile::Function>(c10::QualifiedName("foo"));
15721570
c10::ivalue::TupleElements debug_handles_m_tuple;

test/cpp/jit/test_misc.cpp

+3-19
Original file line numberDiff line numberDiff line change
@@ -1049,8 +1049,7 @@ TEST(RecordFunctionTest, Callbacks) {
10491049
GraphOptimizerEnabledGuard opt_guard(false);
10501050

10511051
auto h1 = add_remove_test_add_cb<1>();
1052-
// NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores)
1053-
auto h2 = add_remove_test_add_cb<2>();
1052+
add_remove_test_add_cb<2>();
10541053
auto h3 = add_remove_test_add_cb<3>();
10551054

10561055
{ RECORD_USER_SCOPE("test"); }
@@ -1144,7 +1143,6 @@ TEST(RecordFunctionTest, Callbacks) {
11441143
} // END: global test
11451144
{ // START: thread local test
11461145
auto ctx_th = std::thread([]() {
1147-
const int test_val = 234;
11481146
const std::string test_str = "test thread str";
11491147
addThreadLocalCallback(RecordFunctionCallback(
11501148
[](const RecordFunction&
@@ -1417,22 +1415,6 @@ TEST(TestSymInt, AddSymbolicInt) {
14171415
}
14181416

14191417
TEST(FallbackGraphsTest, Basic) {
1420-
static const auto nestGraphIntoFallbackGraph =
1421-
[](const std::shared_ptr<Graph>& graph) {
1422-
ProfilingRecord::removeProfileCounter(graph->block());
1423-
auto fallback =
1424-
replaceBlockWithFallbackGraph(graph->block(), graph->inputs());
1425-
for (size_t i = 0; i < graph->outputs().size(); i++) {
1426-
graph->outputs()[i]->replaceAllUsesWith(fallback->output(i));
1427-
fallback->output(i)->copyMetadata(graph->outputs()[i]);
1428-
}
1429-
for (auto it = graph->block()->nodes().rbegin();
1430-
it != fallback->iterator();
1431-
it++) {
1432-
it.destroyCurrent();
1433-
}
1434-
};
1435-
14361418
auto x = at::randn({1}, at::kCPU);
14371419
auto y = at::randn({1}, at::kCPU);
14381420
auto stack = createStack({x.clone(), y.clone()});
@@ -2656,11 +2638,13 @@ TEST(RecordDebugHandles, Basic) {
26562638
RECORD_EDGE_SCOPE_WITH_DEBUG_HANDLE_AND_INPUTS("my_function", 42, {});
26572639
float x{5.9999}, y{2.1212};
26582640
float z = x / y;
2641+
(void)z;
26592642
}
26602643
{
26612644
RECORD_USER_SCOPE_WITH_INPUTS("not_my_function", {});
26622645
float x{5.9999}, y{2.1212};
26632646
float z = x / y;
2647+
(void)z;
26642648
}
26652649
auto profiler_results_ptr = torch::autograd::profiler::disableProfiler();
26662650
const auto& kineto_events = profiler_results_ptr->events();

0 commit comments

Comments
 (0)