Skip to content

Commit ec3f52d

Browse files
cyyeverpytorchmergebot
authored andcommitted
[21/N] Fix clang-tidy warnings in jit (pytorch#134537)
Follows pytorch#133399 Pull Request resolved: pytorch#134537 Approved by: https://github.com/Skylion007
1 parent 5beb859 commit ec3f52d

12 files changed

+24
-35
lines changed

torch/csrc/jit/api/function_impl.h

-1
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@
77
namespace torch::jit {
88

99
struct TORCH_API GraphFunction : public Function {
10-
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
1110
GraphFunction(
1211
c10::QualifiedName name,
1312
std::shared_ptr<Graph> graph,

torch/csrc/jit/codegen/fuser/cuda/fused_kernel.cpp

+4-12
Original file line numberDiff line numberDiff line change
@@ -16,13 +16,9 @@
1616
#include <cmath>
1717
#include <sstream>
1818
#include <stdexcept>
19-
#include <tuple>
2019
#include <vector>
2120

22-
namespace torch {
23-
namespace jit {
24-
namespace fuser {
25-
namespace cuda {
21+
namespace torch::jit::fuser::cuda {
2622

2723
// See NOTE [ USE OF NVRTC AND DRIVER API ]
2824
const at::cuda::NVRTC& nvrtc() {
@@ -85,7 +81,6 @@ void codegenOutputQuery(
8581
}
8682

8783
// Compiles the specified kernel and stores the metadata required to run it
88-
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
8984
FusedKernelCUDA::FusedKernelCUDA(
9085
at::DeviceIndex device,
9186
std::string name,
@@ -114,6 +109,7 @@ FusedKernelCUDA::FusedKernelCUDA(
114109

115110
// Acquires device and NVRTC properties (for compile arch and occupancy
116111
// calculations)
112+
// NOLINTNEXTLINE(cppcoreguidelines-prefer-member-initializer)
117113
prop_ = at::cuda::getCurrentDeviceProperties();
118114
int major = 0, minor = 0;
119115
bool compile_to_sass = false;
@@ -197,8 +193,7 @@ static int ceilDiv(const int a, const int b) {
197193
void FusedKernelCUDA::launch_raw(
198194
const uint32_t numel,
199195
std::vector<void*>& arguments) const {
200-
// NOLINTNEXTLINE(bugprone-unused-raii)
201-
at::cuda::CUDAGuard{device_};
196+
at::cuda::CUDAGuard guard{device_};
202197
// Hacked at::DeviceGuard (see note above)
203198
const auto prior_device = at::cuda::current_device();
204199
at::cuda::set_device(device_);
@@ -269,7 +264,4 @@ static std::shared_ptr<FusedKernel> createFusionKernel(
269264

270265
RegisterFusionBackend reg(DeviceType::CUDA, createFusionKernel);
271266

272-
} // namespace cuda
273-
} // namespace fuser
274-
} // namespace jit
275-
} // namespace torch
267+
} // namespace torch::jit::fuser::cuda

torch/csrc/jit/codegen/fuser/cuda/fused_kernel.h

+4-5
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
#pragma once
22

3-
#include <ATen/ATen.h>
43
#include <torch/csrc/Export.h>
54
#include <torch/csrc/jit/codegen/fuser/fused_kernel.h>
65

@@ -50,11 +49,11 @@ struct TORCH_CUDA_CU_API FusedKernelCUDA
5049
// Note: per device to store device properties and compute launch heuristics
5150
// Acquiring these values at launch time would be too slow
5251
at::DeviceIndex device_;
53-
int maxBlocks_;
54-
cudaDeviceProp* prop_;
52+
int maxBlocks_{};
53+
cudaDeviceProp* prop_{};
5554
std::vector<char> ptx_;
56-
CUmodule module_;
57-
CUfunction function_;
55+
CUmodule module_{};
56+
CUfunction function_{};
5857
};
5958

6059
} // namespace torch::jit::fuser::cuda

torch/csrc/jit/codegen/fuser/partition_desc.h

+1-2
Original file line numberDiff line numberDiff line change
@@ -28,8 +28,7 @@ struct TORCH_API PartitionDesc {
2828
// so dim - 1 is no longer contiguous
2929
cont[dim_ - 1] = false;
3030
}
31-
// NOLINTNEXTLINE(modernize-make-shared)
32-
subTensorDesc_.reset(new TensorDesc(_desc.scalar_type, cont));
31+
subTensorDesc_ = std::make_shared<TensorDesc>(_desc.scalar_type, cont);
3332
}
3433

3534
bool isNoop() const {

torch/csrc/jit/runtime/jit_trace.cpp

+6-4
Original file line numberDiff line numberDiff line change
@@ -56,8 +56,8 @@ Node* traceNode(Node* node, TracingData& td, Stack& stack) {
5656
}
5757

5858
void eraseAllOutputs(Node* opt_pn) {
59-
// NOLINTNEXTLINE
60-
for (int i = opt_pn->outputs().size() - 1; i >= 0; i--) {
59+
for (auto i = static_cast<int64_t>(opt_pn->outputs().size()) - 1; i >= 0;
60+
i--) {
6161
opt_pn->eraseOutput(i);
6262
}
6363
}
@@ -275,10 +275,12 @@ void insertTracingNodes(Block* block, ProfilingRecord* pr, TracingData& td) {
275275
// nodes and the outputs of the node in the scripted graph.
276276
// There are a few subtleties with tracing Ifs and Loops
277277
// discussed above
278-
std::shared_ptr<Graph> TraceGraph(std::shared_ptr<Graph> graph, Stack& stack) {
278+
std::shared_ptr<Graph> TraceGraph(
279+
const std::shared_ptr<Graph>& graph,
280+
Stack& stack) {
279281
TracingData td;
280282
GRAPH_DUMP("Before Inline:", graph);
281-
Inline(*graph.get());
283+
Inline(*graph);
282284
EliminateDeadCode(graph);
283285
GRAPH_DUMP("After Inline:", graph);
284286
auto pr = ProfilingRecord::instrumentGraph(graph);

torch/csrc/jit/runtime/jit_trace.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,6 @@
33

44
namespace torch::jit {
55
TORCH_API std::shared_ptr<Graph> TraceGraph(
6-
std::shared_ptr<Graph> graph,
6+
const std::shared_ptr<Graph>& graph,
77
Stack& stack);
88
} // namespace torch::jit

torch/csrc/jit/runtime/register_ops_utils.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -287,20 +287,20 @@ void listAdd(Stack& stack) {
287287
c10::List<IValue> ret = make_result_list<IValue>(a.elementType());
288288

289289
if (a.use_count() == 1) {
290-
ret = std::move(a);
290+
ret = a;
291291
} else {
292292
ret = a.copy();
293293
}
294294

295-
ret.append(std::move(b));
295+
ret.append(b);
296296

297297
push(stack, std::move(ret));
298298
}
299299

300300
void listInplaceAdd(Stack& stack) {
301301
c10::List<IValue> b = pop(stack).to<c10::List<IValue>>();
302302
c10::List<IValue> a = pop(stack).to<c10::List<IValue>>();
303-
a.append(std::move(b));
303+
a.append(b);
304304
push(stack, std::move(a));
305305
}
306306

torch/csrc/jit/runtime/register_prim_ops.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -830,7 +830,7 @@ static const std::vector<OperatorGeneratorArgs> opGenArgs{
830830
ss << i;
831831
}
832832
drop(stack, num_inputs);
833-
ss << std::endl;
833+
ss << '\n';
834834
auto* handler = getPrintHandler();
835835
TORCH_INTERNAL_ASSERT(handler);
836836
handler(ss.str());

torch/csrc/jit/runtime/script_profile.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -147,7 +147,7 @@ const ScriptProfile::SourceMap& ScriptProfile::dumpStats() {
147147
for (const auto& datapoint : datapoints_) {
148148
if (const auto& source = datapoint->sourceRange.source()) {
149149
if (auto fileLineCol = datapoint->sourceRange.file_line_col()) {
150-
auto it = sourceMap_.find(*source.get());
150+
auto it = sourceMap_.find(*source);
151151
if (it == sourceMap_.end()) {
152152
it = sourceMap_.emplace(SourceRef{source}, LineMap{}).first;
153153
}

torch/csrc/jit/runtime/symbolic_shape_registry.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -219,7 +219,7 @@ void checkInputAndOutputTypes(
219219

220220
void transformShapeFunction(
221221
const FunctionSchema* schema_string,
222-
std::shared_ptr<Graph> graph) {
222+
const std::shared_ptr<Graph>& graph) {
223223
Inline(*graph);
224224

225225
// ATEN operators can return multiple unboxed values, this in contrast to
@@ -411,7 +411,7 @@ TORCH_API std::optional<BoundedShapeGraphs> boundedGraphsForSchema(
411411

412412
void RegisterShapeComputeGraphForSchema(
413413
const FunctionSchema& schema,
414-
std::shared_ptr<Graph> g) {
414+
const std::shared_ptr<Graph>& g) {
415415
std::lock_guard<std::mutex> guard(lock);
416416
if (cached_schema_to_graph.empty()) {
417417
loadFunctions();

torch/csrc/jit/runtime/symbolic_shape_registry.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ struct BoundedShapeGraphs {
5252

5353
TORCH_API void RegisterShapeComputeGraphForSchema(
5454
const FunctionSchema& schema,
55-
std::shared_ptr<Graph> g);
55+
const std::shared_ptr<Graph>& g);
5656

5757
TORCH_API std::optional<std::shared_ptr<Graph>> shapeComputeGraphForSchema(
5858
const FunctionSchema& schema);

torch/csrc/jit/serialization/pickler.h

-2
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,6 @@ enum class PickleOpCode : char {
9494

9595
using ::c10::IValue;
9696

97-
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
9897
struct WriteableTensorData {
9998
const char* data() const {
10099
return static_cast<const char*>(tensor_.storage().data());
@@ -140,7 +139,6 @@ class TORCH_API Pickler {
140139
memoized_class_types_(memoized_class_types),
141140
get_tensor_id_(std::move(get_tensor_id)),
142141
tag_aggregates_(tag_aggregates) {}
143-
// NOLINTNEXTLINE(bugprone-exception-escape)
144142
~Pickler();
145143

146144
// Push protocol onto the stack

0 commit comments

Comments
 (0)