Skip to content

Commit fddabc6

Browse files
r-barnespytorchmergebot
authored andcommitted
C10_UNUSED to [[maybe_unused]] (pytorch#6357) (pytorch#138364)
Summary: Pull Request resolved: pytorch/executorch#6357 Pull Request resolved: pytorch#138364 Approved by: https://github.com/Skylion007, https://github.com/eqy
1 parent 2f6a70b commit fddabc6

File tree

139 files changed

+841
-697
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

139 files changed

+841
-697
lines changed

aten/src/ATen/CPUApplyUtils.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,7 @@ struct strided_tensor_iter_fixed {
6868
strided_tensor_iter_fixed(strided_tensor_iter_fixed&&) = default;
6969
strided_tensor_iter_fixed(
7070
Tensor& tensor,
71-
C10_UNUSED bool sort_strides = false)
71+
[[maybe_unused]] bool sort_strides = false)
7272
: data_(tensor.data_ptr<T>()) {
7373
std::memset(counter_, 0, sizeof(int64_t) * N);
7474
if (tensor.dim() > 0) {

aten/src/ATen/Dispatch.h

+25-25
Original file line numberDiff line numberDiff line change
@@ -63,38 +63,38 @@ TORCH_API void record_kernel_function_dtype(std::string name);
6363
} \
6464
} while (0)
6565

66-
#define AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, HINT, ...) \
67-
case enum_type: { \
68-
AT_PRIVATE_CHECK_SELECTIVE_BUILD(enum_type); \
69-
using HINT C10_UNUSED = c10::impl::ScalarTypeToCPPTypeT<enum_type>; \
70-
return __VA_ARGS__(); \
66+
#define AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, HINT, ...) \
67+
case enum_type: { \
68+
AT_PRIVATE_CHECK_SELECTIVE_BUILD(enum_type); \
69+
using HINT [[maybe_unused]] = c10::impl::ScalarTypeToCPPTypeT<enum_type>; \
70+
return __VA_ARGS__(); \
7171
}
7272

7373
#define AT_DISPATCH_CASE(enum_type, ...) \
7474
AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
7575

76-
#define AT_DISPATCH_CASE_QINT(enum_type, scalar_type, ...) \
77-
case enum_type: { \
78-
AT_PRIVATE_CHECK_SELECTIVE_BUILD(enum_type); \
79-
using scalar_t = scalar_type; \
80-
using underlying_t C10_UNUSED = typename scalar_t::underlying; \
81-
C10_UNUSED const auto& SCALAR_TYPE = enum_type; \
82-
C10_UNUSED const auto& UNDERLYING_TYPE = toUnderlying(enum_type); \
83-
return __VA_ARGS__(); \
76+
#define AT_DISPATCH_CASE_QINT(enum_type, scalar_type, ...) \
77+
case enum_type: { \
78+
AT_PRIVATE_CHECK_SELECTIVE_BUILD(enum_type); \
79+
using scalar_t = scalar_type; \
80+
using underlying_t [[maybe_unused]] = typename scalar_t::underlying; \
81+
[[maybe_unused]] const auto& SCALAR_TYPE = enum_type; \
82+
[[maybe_unused]] const auto& UNDERLYING_TYPE = toUnderlying(enum_type); \
83+
return __VA_ARGS__(); \
8484
}
8585

86-
#define AT_QINT_SUB_BYTE_PRIVATE_CASE_TYPE( \
87-
enum_type, scalar_type, bitwidth, qmin, qmax, ...) \
88-
case enum_type: { \
89-
AT_PRIVATE_CHECK_SELECTIVE_BUILD(enum_type); \
90-
using scalar_t = scalar_type; \
91-
using underlying_t C10_UNUSED = typename scalar_t::underlying; \
92-
C10_UNUSED const auto& SCALAR_TYPE = enum_type; \
93-
C10_UNUSED const auto& UNDERLYING_TYPE = toUnderlying(enum_type); \
94-
C10_UNUSED int bit_width = bitwidth; \
95-
C10_UNUSED int64_t quant_min = qmin; \
96-
C10_UNUSED int64_t quant_max = qmax; \
97-
return __VA_ARGS__(); \
86+
#define AT_QINT_SUB_BYTE_PRIVATE_CASE_TYPE( \
87+
enum_type, scalar_type, bitwidth, qmin, qmax, ...) \
88+
case enum_type: { \
89+
AT_PRIVATE_CHECK_SELECTIVE_BUILD(enum_type); \
90+
using scalar_t = scalar_type; \
91+
using underlying_t [[maybe_unused]] = typename scalar_t::underlying; \
92+
[[maybe_unused]] const auto& SCALAR_TYPE = enum_type; \
93+
[[maybe_unused]] const auto& UNDERLYING_TYPE = toUnderlying(enum_type); \
94+
[[maybe_unused]] int bit_width = bitwidth; \
95+
[[maybe_unused]] int64_t quant_min = qmin; \
96+
[[maybe_unused]] int64_t quant_max = qmax; \
97+
return __VA_ARGS__(); \
9898
}
9999

100100
namespace detail {

aten/src/ATen/FunctionalTensorWrapper.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -638,7 +638,7 @@ void replace_(const ITensorListRef functional_tensor, ITensorListRef other) {
638638
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(functional_tensor.size() == other.size());
639639
auto functional_tensor_it = functional_tensor.begin();
640640
auto other_it = other.begin();
641-
for (C10_UNUSED const auto i : c10::irange(functional_tensor.size())) {
641+
for ([[maybe_unused]] const auto i : c10::irange(functional_tensor.size())) {
642642
replace_(*functional_tensor_it++, *other_it++);
643643
}
644644
}
@@ -655,7 +655,7 @@ void propagate_xla_data(const ITensorListRef functional_tensor, ITensorListRef o
655655
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(functional_tensor.size() == other.size());
656656
auto functional_tensor_it = functional_tensor.begin();
657657
auto other_it = other.begin();
658-
for (C10_UNUSED const auto i : c10::irange(functional_tensor.size())) {
658+
for ([[maybe_unused]] const auto i : c10::irange(functional_tensor.size())) {
659659
propagate_xla_data(*functional_tensor_it++, *other_it++);
660660
}
661661
}
@@ -670,7 +670,7 @@ void propagate_xla_data_direct(const ITensorListRef tensor,
670670
ITensorListRef other) {
671671
auto tensor_it = tensor.begin();
672672
auto other_it = other.begin();
673-
for (C10_UNUSED const auto i : c10::irange(tensor.size())) {
673+
for ([[maybe_unused]] const auto i : c10::irange(tensor.size())) {
674674
propagate_xla_data_direct(*tensor_it++, *other_it++);
675675
}
676676
}

aten/src/ATen/code_template.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -205,7 +205,7 @@ struct CodeTemplate {
205205
// or trailing newlines. It's the responsibility of the calling function
206206
// to indent correctly in the context.
207207
void emitIndent(std::ostream& out, size_t indent) const {
208-
for (C10_UNUSED const auto i : c10::irange(indent)) {
208+
for ([[maybe_unused]] const auto i : c10::irange(indent)) {
209209
out << " ";
210210
}
211211
}

aten/src/ATen/core/Formatting.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -153,7 +153,7 @@ static std::tuple<double, int> __printFormat(std::ostream& stream, const Tensor&
153153

154154
static void __printIndent(std::ostream &stream, int64_t indent)
155155
{
156-
for (C10_UNUSED const auto i : c10::irange(indent)) {
156+
for ([[maybe_unused]] const auto i : c10::irange(indent)) {
157157
stream << " ";
158158
}
159159
}

aten/src/ATen/core/class_type.h

+2-1
Original file line numberDiff line numberDiff line change
@@ -390,7 +390,8 @@ struct TORCH_API ClassType : public NamedType {
390390
std::string doc_string = "",
391391
std::vector<std::string> unresolved_class_attributes = {});
392392

393-
std::string annotation_str_impl(C10_UNUSED const TypePrinter& printer = nullptr) const override {
393+
std::string annotation_str_impl(
394+
[[maybe_unused]] const TypePrinter& printer = nullptr) const override {
394395
const auto& n = name().value();
395396
return n.qualifiedName();
396397
}

aten/src/ATen/core/dynamic_type.cpp

+2-3
Original file line numberDiff line numberDiff line change
@@ -376,8 +376,8 @@ DynamicTypePtr ivalue::TupleTypeFactory<c10::DynamicType>::fallback(
376376
return nullptr;
377377
}
378378

379-
TORCH_API TupleTypePtr
380-
ivalue::TupleTypeFactory<TupleType>::fallback(C10_UNUSED const Type& type) {
379+
TORCH_API TupleTypePtr ivalue::TupleTypeFactory<TupleType>::fallback(
380+
[[maybe_unused]] const Type& type) {
381381
#ifdef C10_MOBILE
382382
return nullptr;
383383
#else
@@ -398,5 +398,4 @@ ivalue::TupleTypeFactory<TupleType>::fallback(C10_UNUSED const Type& type) {
398398
#endif
399399
}
400400

401-
402401
} // namespace c10

aten/src/ATen/core/enum_type.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,7 @@ struct TORCH_API EnumType : public NamedType {
8888
cu_(std::move(cu)) {}
8989

9090
std::string annotation_str_impl(
91-
C10_UNUSED const TypePrinter& printer = nullptr) const override {
91+
[[maybe_unused]] const TypePrinter& printer = nullptr) const override {
9292
const auto& n = name().value();
9393
return n.qualifiedName();
9494
}

aten/src/ATen/core/function.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ struct TORCH_API Function {
5656
virtual c10::intrusive_ptr<c10::ivalue::Future> runAsync(
5757
Stack& /*stack*/,
5858
// NOLINTNEXTLINE(performance-unnecessary-value-param)
59-
C10_UNUSED TaskLauncher taskLauncher = at::launch) {
59+
[[maybe_unused]] TaskLauncher taskLauncher = at::launch) {
6060
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(false);
6161
return {};
6262
}

aten/src/ATen/core/jit_type.h

+16-8
Original file line numberDiff line numberDiff line change
@@ -1278,7 +1278,8 @@ struct TORCH_API NumberType : public Type {
12781278
protected:
12791279
NumberType(TypeKind kind = TypeKind::NumberType) : Type(kind) {}
12801280

1281-
std::string annotation_str_impl(C10_UNUSED const TypePrinter& printer = nullptr) const override {
1281+
std::string annotation_str_impl(
1282+
[[maybe_unused]] const TypePrinter& printer = nullptr) const override {
12821283
return "number"; // technically not a valid python type, but
12831284
// we need to use it when parsing back in annotations
12841285
// for implicit conversions
@@ -1305,7 +1306,8 @@ struct TORCH_API FloatType : public NumberType {
13051306

13061307
private:
13071308
FloatType() : NumberType(TypeKind::FloatType) {}
1308-
std::string annotation_str_impl(C10_UNUSED const TypePrinter& printer = nullptr) const override {
1309+
std::string annotation_str_impl(
1310+
[[maybe_unused]] const TypePrinter& printer = nullptr) const override {
13091311
return "float";
13101312
}
13111313
};
@@ -1330,7 +1332,8 @@ struct TORCH_API ComplexType : public NumberType {
13301332

13311333
private:
13321334
ComplexType() : NumberType(TypeKind::ComplexType) {}
1333-
std::string annotation_str_impl(C10_UNUSED const TypePrinter& printer = nullptr) const override {
1335+
std::string annotation_str_impl(
1336+
[[maybe_unused]] const TypePrinter& printer = nullptr) const override {
13341337
return "complex";
13351338
}
13361339
};
@@ -1419,7 +1422,8 @@ struct TORCH_API IntType : public NumberType {
14191422

14201423
private:
14211424
IntType() : NumberType(TypeKind::IntType) {}
1422-
std::string annotation_str_impl(C10_UNUSED const TypePrinter& printer = nullptr) const override {
1425+
std::string annotation_str_impl(
1426+
[[maybe_unused]] const TypePrinter& printer = nullptr) const override {
14231427
return "int";
14241428
}
14251429
};
@@ -1453,7 +1457,8 @@ struct TORCH_API StringType : public Type {
14531457
// we only use "str" (not "string") in both FunctionSchema and script
14541458
return annotation_str();
14551459
}
1456-
std::string annotation_str_impl(C10_UNUSED const TypePrinter& printer = nullptr) const override {
1460+
std::string annotation_str_impl(
1461+
[[maybe_unused]] const TypePrinter& printer = nullptr) const override {
14571462
return "str";
14581463
}
14591464
static const TypeKind Kind = TypeKind::StringType;
@@ -1473,7 +1478,8 @@ struct TORCH_API StorageType : public Type {
14731478
std::string str() const override {
14741479
return annotation_str();
14751480
}
1476-
std::string annotation_str_impl(C10_UNUSED const TypePrinter& printer = nullptr) const override {
1481+
std::string annotation_str_impl(
1482+
[[maybe_unused]] const TypePrinter& printer = nullptr) const override {
14771483
return "Storage";
14781484
}
14791485
static const TypeKind Kind = TypeKind::StorageType;
@@ -1508,7 +1514,8 @@ struct TORCH_API FunctionType : public NamedType {
15081514

15091515
private:
15101516
FunctionType(torch::jit::Function* function);
1511-
std::string annotation_str_impl(C10_UNUSED const TypePrinter& printer = nullptr) const override {
1517+
std::string annotation_str_impl(
1518+
[[maybe_unused]] const TypePrinter& printer = nullptr) const override {
15121519
const auto& n = name().value();
15131520
return n.qualifiedName();
15141521
}
@@ -2199,7 +2206,8 @@ struct TORCH_API InterfaceType : public NamedType {
21992206
const InterfaceType& rhs,
22002207
std::ostream* why_not);
22012208

2202-
std::string annotation_str_impl(C10_UNUSED const TypePrinter& printer = nullptr) const override {
2209+
std::string annotation_str_impl(
2210+
[[maybe_unused]] const TypePrinter& printer = nullptr) const override {
22032211
return name()->qualifiedName();
22042212
}
22052213

aten/src/ATen/cpu/vec/vec_base.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -1121,7 +1121,7 @@ inline void convert(const src_T *src, dst_T *dst, int64_t n) {
11211121
#ifndef _MSC_VER
11221122
# pragma unroll
11231123
#endif
1124-
for (C10_UNUSED const auto i : c10::irange(n)) {
1124+
for ([[maybe_unused]] const auto i : c10::irange(n)) {
11251125
*dst = c10::convert<dst_T>(c10::load(src));
11261126
src++;
11271127
dst++;

aten/src/ATen/cuda/Exceptions.h

+13-12
Original file line numberDiff line numberDiff line change
@@ -157,18 +157,19 @@ constexpr const char* _cusolver_backend_suggestion = \
157157
// See NOTE [ USE OF NVRTC AND DRIVER API ].
158158
#if !defined(USE_ROCM)
159159

160-
#define AT_CUDA_DRIVER_CHECK(EXPR) \
161-
do { \
162-
CUresult __err = EXPR; \
163-
if (__err != CUDA_SUCCESS) { \
164-
const char* err_str; \
165-
C10_UNUSED CUresult get_error_str_err = at::globalContext().getNVRTC().cuGetErrorString(__err, &err_str); \
166-
if (get_error_str_err != CUDA_SUCCESS) { \
167-
AT_ERROR("CUDA driver error: unknown error"); \
168-
} else { \
169-
AT_ERROR("CUDA driver error: ", err_str); \
170-
} \
171-
} \
160+
#define AT_CUDA_DRIVER_CHECK(EXPR) \
161+
do { \
162+
CUresult __err = EXPR; \
163+
if (__err != CUDA_SUCCESS) { \
164+
const char* err_str; \
165+
[[maybe_unused]] CUresult get_error_str_err = \
166+
at::globalContext().getNVRTC().cuGetErrorString(__err, &err_str); \
167+
if (get_error_str_err != CUDA_SUCCESS) { \
168+
AT_ERROR("CUDA driver error: unknown error"); \
169+
} else { \
170+
AT_ERROR("CUDA driver error: ", err_str); \
171+
} \
172+
} \
172173
} while (0)
173174

174175
#else

aten/src/ATen/detail/CUDAHooksInterface.h

+6-2
Original file line numberDiff line numberDiff line change
@@ -69,8 +69,12 @@ struct TORCH_API CUDAHooksInterface : AcceleratorHooksInterface {
6969
TORCH_CHECK(false, "Cannot initialize CUDA without ATen_cuda library. ", CUDA_HELP);
7070
}
7171

72-
virtual const Generator& getDefaultCUDAGenerator(C10_UNUSED DeviceIndex device_index = -1) const {
73-
TORCH_CHECK(false, "Cannot get default CUDA generator without ATen_cuda library. ", CUDA_HELP);
72+
virtual const Generator& getDefaultCUDAGenerator(
73+
[[maybe_unused]] DeviceIndex device_index = -1) const {
74+
TORCH_CHECK(
75+
false,
76+
"Cannot get default CUDA generator without ATen_cuda library. ",
77+
CUDA_HELP);
7478
}
7579

7680
Device getDeviceFromPtr(void* /*data*/) const override {

aten/src/ATen/detail/XPUHooksInterface.h

+6-3
Original file line numberDiff line numberDiff line change
@@ -32,12 +32,15 @@ struct TORCH_API XPUHooksInterface : AcceleratorHooksInterface{
3232
TORCH_CHECK(false, "Cannot get XPU global device index without ATen_xpu library.");
3333
}
3434

35-
virtual Generator getXPUGenerator(C10_UNUSED DeviceIndex device_index = -1) const {
35+
virtual Generator getXPUGenerator(
36+
[[maybe_unused]] DeviceIndex device_index = -1) const {
3637
TORCH_CHECK(false, "Cannot get XPU generator without ATen_xpu library.");
3738
}
3839

39-
virtual const Generator& getDefaultXPUGenerator(C10_UNUSED DeviceIndex device_index = -1) const {
40-
TORCH_CHECK(false, "Cannot get default XPU generator without ATen_xpu library.");
40+
virtual const Generator& getDefaultXPUGenerator(
41+
[[maybe_unused]] DeviceIndex device_index = -1) const {
42+
TORCH_CHECK(
43+
false, "Cannot get default XPU generator without ATen_xpu library.");
4144
}
4245

4346
virtual DeviceIndex getNumGPUs() const {

aten/src/ATen/functorch/PyTorchOperatorHacks.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -135,7 +135,7 @@ static Tensor make_feature_noise(const Tensor& input) {
135135
sizes.reserve(input.dim());
136136
sizes.push_back(input_sizes[0]);
137137
sizes.push_back(input_sizes[1]);
138-
for (C10_UNUSED const auto i : c10::irange(2, input.dim())) {
138+
for ([[maybe_unused]] const auto i : c10::irange(2, input.dim())) {
139139
sizes.push_back(1);
140140
}
141141
// NB: THIS WAS CHANGED FROM THE ORIGINAL

aten/src/ATen/native/BatchLinearAlgebraKernel.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -1109,7 +1109,7 @@ void unpack_pivots_cpu_kernel(TensorIterator& iter, const int64_t dim_size, cons
11091109
auto* perm_ptr = data[0];
11101110
const auto* pivots_ptr = data[1];
11111111

1112-
for (C10_UNUSED const auto elem : c10::irange(nelems)) {
1112+
for ([[maybe_unused]] const auto elem : c10::irange(nelems)) {
11131113
// WARNING: linalg.lu_factor returns int32 pivots,
11141114
// this behavior could change in the future.
11151115
const auto perm_data = reinterpret_cast<int64_t*>(perm_ptr);

0 commit comments

Comments
 (0)