Skip to content

Commit 322e4b4

Browse files
mikey dagitsespytorchmergebot
mikey dagitses
authored andcommitted
set -Wsuggest-override for builds (pytorch#89852)
Stack created with [Sapling](https://sapling-scm.com). Best reviewed with [ReviewStack](https://reviewstack.dev/pytorch/pytorch/pull/89852). * __->__ pytorch#89852 * pytorch#89851 set -Wsuggest-override for builds Summary: This was flagged by a Meta internal build. Test Plan: Rely on CI. Pull Request resolved: pytorch#89852 Approved by: https://github.com/malfet
1 parent 8ecb49b commit 322e4b4

File tree

24 files changed

+96
-46
lines changed

24 files changed

+96
-46
lines changed

CMakeLists.txt

+6
Original file line numberDiff line numberDiff line change
@@ -848,6 +848,12 @@ if(NOT MSVC)
848848
# Suppress "The ABI for passing parameters with 64-byte alignment has changed in GCC 4.6"
849849
string(APPEND CMAKE_CXX_FLAGS " -Wno-psabi")
850850
endif()
851+
if(NOT CMAKE_COMPILER_IS_GNUCXX OR GCC_VERSION VERSION_GREATER_EQUAL 9.2)
852+
# Prior to GCC 9.2, this warning misfires when a method is
853+
# labeled "final".
854+
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=78010
855+
append_cxx_flag_if_supported("-Wsuggest-override" CMAKE_CXX_FLAGS)
856+
endif()
851857

852858
# Use ld.gold if available, fall back to ld.bfd (the default ld) if not
853859
if(USE_GOLD_LINKER)

aten/src/ATen/native/cudnn/Conv_v8.cpp

+7
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
2+
23
#include <ATen/cuda/CUDAConfig.h> // for the definition of AT_CUDNN_ENABLED
34

45
#if AT_CUDNN_ENABLED()
@@ -8,7 +9,13 @@
89
#if HAS_CUDNN_V8()
910

1011
#include <ATen/cudnn/cudnn-wrapper.h>
12+
13+
#include <c10/macros/Macros.h>
14+
15+
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wsuggest-override")
1116
#include <cudnn_frontend.h>
17+
C10_DIAGNOSTIC_POP()
18+
1219
#include <cudnn_frontend_find_plan.h>
1320
#include <cudnn_frontend_get_plan.h>
1421
#include <ATen/core/Tensor.h>

aten/src/ATen/native/quantized/cudnn/utils.h

+4-1
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,10 @@ This file contains some of the auxiliary functions used by both Conv.cpp & Linea
1717
#include <ATen/native/quantized/PackedParams.h>
1818
#include <c10/core/QScheme.h>
1919
#include <c10/util/ArrayRef.h>
20+
21+
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wsuggest-override")
2022
#include <cudnn_frontend.h>
23+
C10_DIAGNOSTIC_POP()
2124

2225
#ifndef AT_PER_OPERATOR_HEADERS
2326
#include <ATen/Functions.h>
@@ -118,7 +121,7 @@ struct PackedConvWeightCudnn : public ConvPackedParamsBase<kSpatialDim> {
118121

119122
at::Tensor apply_dynamic(
120123
const at::Tensor& input,
121-
bool reduce_range) {
124+
bool reduce_range) override {
122125
TORCH_CHECK(false, "apply_dynamic is currently not reported");
123126
}
124127

c10/macros/Macros.h

+27-1
Original file line numberDiff line numberDiff line change
@@ -512,9 +512,10 @@ __device__ __attribute__((noinline)) __attribute__((weak)) void __assert_fail(
512512
#endif
513513
#endif // HAS_DEMANGLE
514514

515-
#ifdef __clang__
516515
#define _C10_PRAGMA__(string) _Pragma(#string)
517516
#define _C10_PRAGMA_(string) _C10_PRAGMA__(string)
517+
518+
#ifdef __clang__
518519
#define C10_CLANG_DIAGNOSTIC_PUSH() _Pragma("clang diagnostic push")
519520
#define C10_CLANG_DIAGNOSTIC_POP() _Pragma("clang diagnostic pop")
520521
#define C10_CLANG_DIAGNOSTIC_IGNORE(flag) \
@@ -527,4 +528,29 @@ __device__ __attribute__((noinline)) __attribute__((weak)) void __assert_fail(
527528
#define C10_CLANG_HAS_WARNING(flag) 0
528529
#endif
529530

531+
#ifdef __clang__
532+
533+
#define C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED(warning) \
534+
_C10_PRAGMA_(clang diagnostic push) \
535+
_C10_PRAGMA_(clang diagnostic ignored "-Wunknown-warning-option") \
536+
_C10_PRAGMA_(clang diagnostic ignored warning)
537+
538+
#define C10_DIAGNOSTIC_POP() _C10_PRAGMA_(clang diagnostic pop)
539+
540+
#elif __GNUC__
541+
542+
#define C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED(warning) \
543+
_C10_PRAGMA_(GCC diagnostic push) \
544+
_C10_PRAGMA_(GCC diagnostic ignored "-Wpragmas") \
545+
_C10_PRAGMA_(GCC diagnostic ignored warning)
546+
547+
#define C10_DIAGNOSTIC_POP() _C10_PRAGMA_(GCC diagnostic pop)
548+
549+
#else
550+
551+
#define C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED(warning)
552+
#define C10_DIAGNOSTIC_POP()
553+
554+
#endif
555+
530556
#endif // C10_MACROS_MACROS_H_

test/cpp/api/dataloader.cpp

+2-4
Original file line numberDiff line numberDiff line change
@@ -1050,12 +1050,10 @@ TEST(DataLoaderTest, MakeDataLoaderDefaultsAsExpected) {
10501050
}
10511051

10521052
struct UnsizedDataset : public datasets::Dataset<UnsizedDataset> {
1053-
// NOLINTNEXTLINE(cppcoreguidelines-explicit--functions,modernize-use-override)
1054-
torch::data::Example<> get(size_t i) {
1053+
torch::data::Example<> get(size_t i) override {
10551054
return {torch::ones(i), torch::ones(i)};
10561055
}
1057-
// NOLINTNEXTLINE(cppcoreguidelines-explicit--functions,modernize-use-override)
1058-
torch::optional<size_t> size() const noexcept {
1056+
torch::optional<size_t> size() const noexcept override {
10591057
return torch::nullopt;
10601058
}
10611059
};

test/cpp/dist_autograd/test_dist_autograd.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ class DistAutogradTest : public ::testing::Test {
2020
autogradContainer_ = &DistAutogradContainer::init(0);
2121
}
2222

23-
virtual void TearDown() {
23+
void TearDown() override {
2424
autogradContainer_->releaseContext(
2525
autogradContainer_->currentContext()->contextId());
2626
}

test/cpp/jit/test_misc.cpp

+1-2
Original file line numberDiff line numberDiff line change
@@ -3115,8 +3115,7 @@ TEST(TestShapeGraphLinting, Basic) {
31153115
// fusion parameters
31163116
class Composed : public ::testing::Test {
31173117
public:
3118-
// NOLINTNEXTLINE(modernize-use-override,cppcoreguidelines-explicit-virtual-functions)
3119-
void SetUp() {
3118+
void SetUp() override {
31203119
torch::jit::tensorexpr::getTEMustUseLLVMOnCPU() = false;
31213120
}
31223121
};

test/cpp/tensorexpr/test_graph_opt.cpp

+2-3
Original file line numberDiff line numberDiff line change
@@ -18,13 +18,12 @@ using namespace torch::jit::tensorexpr;
1818

1919
class GraphOpt : public ::testing::Test {
2020
public:
21-
// NOLINTNEXTLINE(modernize-use-override,cppcoreguidelines-explicit-virtual-functions)
22-
void SetUp() {
21+
void SetUp() override {
2322
old_cat_wo_conditionals_ = getCatWoConditionals();
2423
getCatWoConditionals() = true;
2524
}
2625

27-
void TearDown() {
26+
void TearDown() override {
2827
getCatWoConditionals() = old_cat_wo_conditionals_;
2928
}
3029

test/cpp/tensorexpr/test_kernel.cpp

+1-2
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,7 @@ using namespace torch::jit::tensorexpr;
2424

2525
class Kernel : public ::testing::Test {
2626
public:
27-
// NOLINTNEXTLINE(modernize-use-override,cppcoreguidelines-explicit-virtual-functions)
28-
void SetUp() {
27+
void SetUp() override {
2928
getTEMustUseLLVMOnCPU() = false;
3029
}
3130
};

test/cpp/tensorexpr/test_loopnest.cpp

+1-2
Original file line numberDiff line numberDiff line change
@@ -2292,8 +2292,7 @@ class LoopOrderHelper : public IRVisitor {
22922292
return ordering.str();
22932293
}
22942294

2295-
// NOLINTNEXTLINE(cppcoreguidelines-explicit--functions,modernize-use-override)
2296-
void visit(ForPtr v) {
2295+
void visit(ForPtr v) final {
22972296
ordering << v->var()->name_hint() << ",";
22982297
IRVisitor::visit(v);
22992298
}

test/cpp/tensorexpr/test_quantization.cpp

+1-2
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,7 @@ using namespace torch::jit::tensorexpr;
2424

2525
class Quantization : public ::testing::Test {
2626
public:
27-
// NOLINTNEXTLINE(modernize-use-override,cppcoreguidelines-explicit-virtual-functions)
28-
void SetUp() {
27+
void SetUp() override {
2928
getTEMustUseLLVMOnCPU() = false;
3029
}
3130
};

torch/csrc/cuda/CUDAPluggableAllocator.h

+2-2
Original file line numberDiff line numberDiff line change
@@ -66,8 +66,8 @@ struct CUDAPluggableAllocator
6666

6767
void* malloc(size_t size, int device, cudaStream_t stream);
6868

69-
c10::DataPtr allocate(size_t size) const;
70-
c10::DeleterFnPtr raw_deleter() const;
69+
c10::DataPtr allocate(size_t size) const override;
70+
c10::DeleterFnPtr raw_deleter() const override;
7171

7272
virtual void* raw_alloc(size_t nbytes) override;
7373
virtual void* raw_alloc_with_stream(size_t nbytes, cudaStream_t stream)

torch/csrc/distributed/c10d/PyProcessGroup.hpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,7 @@ class PyProcessGroup : public ProcessGroup {
7676
}
7777

7878
c10::intrusive_ptr<Work> barrier(
79-
const BarrierOptions& opts = BarrierOptions()) {
79+
const BarrierOptions& opts = BarrierOptions()) override {
8080
PYBIND11_OVERRIDE(
8181
c10::intrusive_ptr<Work>, /* Return type */
8282
ProcessGroup, /* Parent class */

torch/csrc/jit/codegen/cuda/codegen.cpp

+4-4
Original file line numberDiff line numberDiff line change
@@ -1281,7 +1281,7 @@ class CudaKernelGenerator : private OptOutConstDispatch {
12811281
}
12821282
}
12831283

1284-
void handle(const LoadStoreOp* ldst) {
1284+
void handle(const LoadStoreOp* ldst) final {
12851285
// TODO:
12861286
// Need to gradually merge the code path of this
12871287
// with UnaryOp::Set for vectorization.
@@ -2629,7 +2629,7 @@ class CudaKernelGenerator : private OptOutConstDispatch {
26292629
indent() << "NVFUSER_UPDATE_MAGIC_ZERO\n";
26302630
}
26312631

2632-
void handle(const kir::Swizzle2DInt* swizzle_2d) {
2632+
void handle(const kir::Swizzle2DInt* swizzle_2d) final {
26332633
TORCH_INTERNAL_ASSERT(print_inline_);
26342634
TORCH_INTERNAL_ASSERT(
26352635
swizzle_2d->swizzleType() != Swizzle2DType::NoSwizzle,
@@ -2642,14 +2642,14 @@ class CudaKernelGenerator : private OptOutConstDispatch {
26422642
}
26432643
}
26442644

2645-
void handle(const kir::IntPair* int_pair) {
2645+
void handle(const kir::IntPair* int_pair) final {
26462646
const auto def = int_pair->definition();
26472647
TORCH_INTERNAL_ASSERT(
26482648
def != nullptr, "no support for un-inlined int pair yet.");
26492649
code_ << gen(def);
26502650
}
26512651

2652-
void handle(const kir::PairSelect* pair_select) {
2652+
void handle(const kir::PairSelect* pair_select) final {
26532653
if (print_inline_) {
26542654
code_ << gen(pair_select->in());
26552655
} else {

torch/csrc/jit/codegen/cuda/python_frontend/fusion_record.h

+13-13
Original file line numberDiff line numberDiff line change
@@ -339,7 +339,7 @@ struct ViewOpRecord : RecordFunctor {
339339
fd.setFusionState(outputs_.at(0).index, output);
340340
}
341341

342-
virtual void print(std::ostream& os, bool close_function = true) const {
342+
void print(std::ostream& os, bool close_function = true) const final {
343343
RecordFunctor::print(os, false);
344344
os << ", original_shape=[";
345345
bool first_arg = true;
@@ -426,7 +426,7 @@ struct PermuteOpRecord : RecordFunctor {
426426
fd.setFusionState(outputs_.at(0).index, output);
427427
}
428428

429-
virtual void print(std::ostream& os, bool close_function = true) const {
429+
void print(std::ostream& os, bool close_function = true) const final {
430430
RecordFunctor::print(os, false);
431431
os << ", dims=[";
432432
bool first_arg = true;
@@ -510,7 +510,7 @@ struct SqueezeOpRecord : RecordFunctor {
510510
fd.setFusionState(outputs_.at(0).index, output);
511511
}
512512

513-
virtual void print(std::ostream& os, bool close_function = true) const {
513+
void print(std::ostream& os, bool close_function = true) const final {
514514
RecordFunctor::print(os, false);
515515
os << ", original_shape=[";
516516
bool first_arg = true;
@@ -662,7 +662,7 @@ struct BroadcastInDimOpRecord : RecordFunctor {
662662
fd.setFusionState(outputs_.at(0).index, output);
663663
}
664664

665-
virtual void print(std::ostream& os, bool close_function = true) const {
665+
void print(std::ostream& os, bool close_function = true) const final {
666666
RecordFunctor::print(os, false);
667667
os << ", output_shape=[";
668668
bool first_arg = true;
@@ -748,7 +748,7 @@ struct BroadcastOpRecord : RecordFunctor {
748748
fd.setFusionState(outputs_.at(0).index, output);
749749
}
750750

751-
virtual void print(std::ostream& os, bool close_function = true) const {
751+
void print(std::ostream& os, bool close_function = true) const final {
752752
RecordFunctor::print(os, false);
753753
os << ", is_broadcast_dim=[";
754754
bool first_arg = true;
@@ -844,7 +844,7 @@ struct CastOpRecord : RecordFunctor {
844844
fd.setFusionState(outputs_.at(0).index, output);
845845
}
846846

847-
virtual void print(std::ostream& os, bool close_function = true) const {
847+
void print(std::ostream& os, bool close_function = true) const final {
848848
RecordFunctor::print(os, false);
849849
os << ", dtype=" << dtypeToPyString(dtype_);
850850
if (close_function) {
@@ -897,7 +897,7 @@ struct ConstantRecord : RecordFunctor {
897897
fd.setFusionState(outputs_.at(0).index, output);
898898
}
899899

900-
virtual void print(std::ostream& os, bool close_function = true) const {
900+
void print(std::ostream& os, bool close_function = true) const final {
901901
RecordFunctor::print(os, false);
902902
if (std::is_same<ValueType, bool>::value) {
903903
os << (value_ ? "True" : "False");
@@ -1038,7 +1038,7 @@ struct TensorRecord : RecordFunctor {
10381038
fd.addInput(tv);
10391039
}
10401040

1041-
virtual void print(std::ostream& os, bool close_function = true) const {
1041+
void print(std::ostream& os, bool close_function = true) const final {
10421042
RecordFunctor::print(os, false);
10431043
os << "symbolic_sizes=[";
10441044
bool first_arg = true;
@@ -1231,7 +1231,7 @@ struct ReductionOpRecord : RecordFunctor {
12311231
fd.setFusionState(outputs_.at(0).index, output);
12321232
}
12331233

1234-
virtual void print(std::ostream& os, bool close_function = true) const {
1234+
void print(std::ostream& os, bool close_function = true) const final {
12351235
RecordFunctor::print(os, false);
12361236
os << ", axes=[";
12371237
bool first_arg = true;
@@ -1316,7 +1316,7 @@ struct ScalarRecord : RecordFunctor {
13161316
fd.setFusionState(outputs_.at(0).index, output);
13171317
}
13181318

1319-
virtual void print(std::ostream& os, bool close_function = true) const {
1319+
void print(std::ostream& os, bool close_function = true) const final {
13201320
RecordFunctor::print(os, false);
13211321
os << "dtype=" << dtypeToPyString(dtype_);
13221322
if (close_function) {
@@ -1374,7 +1374,7 @@ struct NormOpRecord : RecordFunctor {
13741374
correction_(correction),
13751375
keep_dim_(keep_dim) {}
13761376
virtual ~NormOpRecord() = default;
1377-
virtual RecordFunctor* clone() = 0;
1377+
RecordFunctor* clone() override = 0;
13781378

13791379
// I am skipping the bassel's correction value in the hash because
13801380
// I suspect we might change it to a bool from a 64-bit value
@@ -1415,7 +1415,7 @@ struct NormOpRecord : RecordFunctor {
14151415
}
14161416

14171417
//! Each NormOp Child should define the operator() to build the IR
1418-
virtual void operator()(FusionDefinition& fd) = 0;
1418+
void operator()(FusionDefinition& fd) override = 0;
14191419

14201420
virtual void print(std::ostream& os, bool close_function = true) const final {
14211421
RecordFunctor::print(os, false);
@@ -1645,7 +1645,7 @@ struct FullOpRecord : RecordFunctor {
16451645
fd.setFusionState(outputs_.at(0).index, output);
16461646
}
16471647

1648-
virtual void print(std::ostream& os, bool close_function = true) const {
1648+
virtual void print(std::ostream& os, bool close_function = true) const final {
16491649
RecordFunctor::print(os, false);
16501650
os << ", shape=[";
16511651
bool first_arg = true;

torch/csrc/jit/codegen/cuda/transform_view.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -145,7 +145,7 @@ class ViewTransform : public Transform {
145145
}
146146

147147
// Debugging utility to convert the transformation into a string.
148-
virtual std::string toString() const = 0;
148+
virtual std::string toString() const override = 0;
149149

150150
protected:
151151
ViewTransform(const int64_t& index) : Transform(index) {}

torch/csrc/jit/passes/onnx/constant_map.h

+5
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,11 @@
11
#pragma once
22

3+
#include <c10/macros/Macros.h>
4+
5+
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wsuggest-override")
36
#include <onnx/shape_inference/implementation.h>
7+
C10_DIAGNOSTIC_POP()
8+
49
#include <torch/csrc/jit/ir/ir.h>
510
#include <torch/csrc/jit/serialization/export.h>
611
#include <mutex>

torch/csrc/jit/serialization/export.cpp

+3
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
#include <ATen/ATen.h>
44
#include <ATen/Utils.h>
55
#include <ATen/core/functional.h>
6+
#include <c10/macros/Macros.h>
67
#include <c10/util/Exception.h>
78
#include <c10/util/Optional.h>
89
#include <c10/util/accumulate.h>
@@ -23,7 +24,9 @@
2324
#include <onnx/checker.h>
2425
#include <onnx/onnx_pb.h>
2526
#include <onnx/proto_utils.h>
27+
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wsuggest-override")
2628
#include <onnx/shape_inference/implementation.h>
29+
C10_DIAGNOSTIC_POP()
2730

2831
#include <fstream>
2932
#include <memory>

torch/csrc/jit/tensorexpr/analysis.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -360,7 +360,7 @@ class BufLiveRange : public IRVisitor {
360360
}
361361
}
362362

363-
void visit(BlockPtr v) {
363+
void visit(BlockPtr v) override {
364364
for (StmtPtr s : *v) {
365365
curr_index_ += 1;
366366
findAccAndUpdateLiveRange(s);

0 commit comments

Comments
 (0)