Skip to content

Commit ed32787

Browse files
r-barnespytorchmergebot
authored andcommitted
[codemod] c10:optional -> std::optional (pytorch#126135)
Generated by running the following from PyTorch root: ``` find . -regex ".*\.\(cpp\|h\|cu\|hpp\|cc\|cxx\)$" | grep -v "build/" | xargs -n 50 -P 4 perl -pi -e 's/c10::optional/std::optional/' ``` `c10::optional` is just an alias for `std::optional`. This removes usages of that alias in preparation for eliminating it entirely. Pull Request resolved: pytorch#126135 Approved by: https://github.com/Skylion007, https://github.com/malfet, https://github.com/albanD, https://github.com/aaronenyeshi
1 parent b55f57b commit ed32787

File tree

907 files changed

+5659
-5659
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

907 files changed

+5659
-5659
lines changed

aten/src/ATen/CPUGeneratorImpl.cpp

+12-12
Original file line numberDiff line numberDiff line change
@@ -81,8 +81,8 @@ inline uint64_t make64BitsFrom32Bits(uint32_t hi, uint32_t lo) {
8181
CPUGeneratorImpl::CPUGeneratorImpl(uint64_t seed_in)
8282
: c10::GeneratorImpl{Device(DeviceType::CPU), DispatchKeySet(c10::DispatchKey::CPU)},
8383
engine_{seed_in},
84-
next_float_normal_sample_{c10::optional<float>()},
85-
next_double_normal_sample_{c10::optional<double>()} { }
84+
next_float_normal_sample_{std::optional<float>()},
85+
next_double_normal_sample_{std::optional<double>()} { }
8686

8787
/**
8888
* Manually seeds the engine with the seed input
@@ -151,16 +151,16 @@ void CPUGeneratorImpl::set_state(const c10::TensorImpl& new_state) {
151151
detail::check_rng_state(new_state);
152152

153153
at::mt19937 engine;
154-
auto float_normal_sample = c10::optional<float>();
155-
auto double_normal_sample = c10::optional<double>();
154+
auto float_normal_sample = std::optional<float>();
155+
auto double_normal_sample = std::optional<double>();
156156

157157
// Construct the state of at::CPUGeneratorImpl based on input byte tensor size.
158158
CPUGeneratorImplStateLegacy* legacy_pod{nullptr};
159159
auto new_state_size = new_state.numel();
160160
if (new_state_size == size_legacy) {
161161
legacy_pod = (CPUGeneratorImplStateLegacy*)new_state.data();
162162
// Note that in CPUGeneratorImplStateLegacy, we didn't have float version
163-
// of normal sample and hence we leave the c10::optional<float> as is
163+
// of normal sample and hence we leave the std::optional<float> as is
164164

165165
// Update next_double_normal_sample.
166166
// Note that CPUGeneratorImplStateLegacy stores two uniform values (normal_x, normal_y)
@@ -171,22 +171,22 @@ void CPUGeneratorImpl::set_state(const c10::TensorImpl& new_state) {
171171
auto r = legacy_pod->normal_rho;
172172
auto theta = 2.0 * c10::pi<double> * legacy_pod->normal_x;
173173
// we return the sin version of the normal sample when in caching mode
174-
double_normal_sample = c10::optional<double>(r * ::sin(theta));
174+
double_normal_sample = std::optional<double>(r * ::sin(theta));
175175
}
176176
} else if (new_state_size == size_current) {
177177
auto rng_state = (CPUGeneratorImplState*)new_state.data();
178178
legacy_pod = &rng_state->legacy_pod;
179179
// update next_float_normal_sample
180180
if (rng_state->is_next_float_normal_sample_valid) {
181-
float_normal_sample = c10::optional<float>(rng_state->next_float_normal_sample);
181+
float_normal_sample = std::optional<float>(rng_state->next_float_normal_sample);
182182
}
183183

184184
// Update next_double_normal_sample.
185185
// Note that in getRNGState, we now return the actual normal sample in normal_y
186186
// and if it's valid in normal_is_valid. The redundant normal_x and normal_rho
187187
// are squashed to 0.0.
188188
if (legacy_pod->normal_is_valid) {
189-
double_normal_sample = c10::optional<double>(legacy_pod->normal_y);
189+
double_normal_sample = std::optional<double>(legacy_pod->normal_y);
190190
}
191191
} else {
192192
AT_ERROR("Expected either a CPUGeneratorImplStateLegacy of size ", size_legacy,
@@ -283,14 +283,14 @@ uint64_t CPUGeneratorImpl::random64() {
283283
/**
284284
* Get the cached normal random in float
285285
*/
286-
c10::optional<float> CPUGeneratorImpl::next_float_normal_sample() {
286+
std::optional<float> CPUGeneratorImpl::next_float_normal_sample() {
287287
return next_float_normal_sample_;
288288
}
289289

290290
/**
291291
* Get the cached normal random in double
292292
*/
293-
c10::optional<double> CPUGeneratorImpl::next_double_normal_sample() {
293+
std::optional<double> CPUGeneratorImpl::next_double_normal_sample() {
294294
return next_double_normal_sample_;
295295
}
296296

@@ -299,7 +299,7 @@ c10::optional<double> CPUGeneratorImpl::next_double_normal_sample() {
299299
*
300300
* See Note [Acquire lock when using random generators]
301301
*/
302-
void CPUGeneratorImpl::set_next_float_normal_sample(c10::optional<float> randn) {
302+
void CPUGeneratorImpl::set_next_float_normal_sample(std::optional<float> randn) {
303303
next_float_normal_sample_ = randn;
304304
}
305305

@@ -308,7 +308,7 @@ void CPUGeneratorImpl::set_next_float_normal_sample(c10::optional<float> randn)
308308
*
309309
* See Note [Acquire lock when using random generators]
310310
*/
311-
void CPUGeneratorImpl::set_next_double_normal_sample(c10::optional<double> randn) {
311+
void CPUGeneratorImpl::set_next_double_normal_sample(std::optional<double> randn) {
312312
next_double_normal_sample_ = randn;
313313
}
314314

aten/src/ATen/CPUGeneratorImpl.h

+6-6
Original file line numberDiff line numberDiff line change
@@ -24,18 +24,18 @@ struct TORCH_API CPUGeneratorImpl : public c10::GeneratorImpl {
2424
static c10::DeviceType device_type();
2525
uint32_t random();
2626
uint64_t random64();
27-
c10::optional<float> next_float_normal_sample();
28-
c10::optional<double> next_double_normal_sample();
29-
void set_next_float_normal_sample(c10::optional<float> randn);
30-
void set_next_double_normal_sample(c10::optional<double> randn);
27+
std::optional<float> next_float_normal_sample();
28+
std::optional<double> next_double_normal_sample();
29+
void set_next_float_normal_sample(std::optional<float> randn);
30+
void set_next_double_normal_sample(std::optional<double> randn);
3131
at::mt19937 engine();
3232
void set_engine(at::mt19937 engine);
3333

3434
private:
3535
CPUGeneratorImpl* clone_impl() const override;
3636
at::mt19937 engine_;
37-
c10::optional<float> next_float_normal_sample_;
38-
c10::optional<double> next_double_normal_sample_;
37+
std::optional<float> next_float_normal_sample_;
38+
std::optional<double> next_double_normal_sample_;
3939
};
4040

4141
namespace detail {

aten/src/ATen/Context.h

+2-2
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ class TORCH_API Context {
5959
}
6060
}
6161
const AcceleratorHooksInterface& getAcceleratorHooksInterface(
62-
c10::optional<c10::DeviceType> opt_device_type = c10::nullopt) {
62+
std::optional<c10::DeviceType> opt_device_type = c10::nullopt) {
6363
c10::DeviceType device_type = opt_device_type.has_value()
6464
? opt_device_type.value()
6565
: at::getAccelerator(true).value();
@@ -395,7 +395,7 @@ class TORCH_API Context {
395395
bool release_original_weights = false;
396396
#endif
397397
bool display_vmap_fallback_warnings_ = false;
398-
c10::optional<at::QEngine> quantized_engine = c10::nullopt;
398+
std::optional<at::QEngine> quantized_engine = c10::nullopt;
399399
bool enable_sparse_tensor_invariant_checks = false;
400400
bool allow_fp16_reduction_cpu = false;
401401

aten/src/ATen/DeviceGuard.h

+3-3
Original file line numberDiff line numberDiff line change
@@ -15,22 +15,22 @@ namespace at {
1515
// OptionalDeviceGuard guard(device_of(tensor));
1616

1717
/// Return the Device of a Tensor, if the Tensor is defined.
18-
inline c10::optional<Device> device_of(const Tensor& t) {
18+
inline std::optional<Device> device_of(const Tensor& t) {
1919
if (t.defined()) {
2020
return c10::make_optional(t.device());
2121
} else {
2222
return c10::nullopt;
2323
}
2424
}
2525

26-
inline c10::optional<Device> device_of(const c10::optional<Tensor>& t) {
26+
inline std::optional<Device> device_of(const c10::optional<Tensor>& t) {
2727
return t.has_value() ? device_of(t.value()) : c10::nullopt;
2828
}
2929

3030
/// Return the Device of a TensorList, if the list is non-empty and
3131
/// the first Tensor is defined. (This function implicitly assumes
3232
/// that all tensors in the list have the same device.)
33-
inline c10::optional<Device> device_of(ITensorListRef t) {
33+
inline std::optional<Device> device_of(ITensorListRef t) {
3434
if (!t.empty()) {
3535
return device_of(t.front());
3636
} else {

aten/src/ATen/EmptyTensor.cpp

+32-32
Original file line numberDiff line numberDiff line change
@@ -163,7 +163,7 @@ TensorBase _empty_generic(
163163
c10::Allocator* allocator,
164164
c10::DispatchKeySet ks,
165165
ScalarType scalar_type,
166-
c10::optional<c10::MemoryFormat> memory_format_opt) {
166+
std::optional<c10::MemoryFormat> memory_format_opt) {
167167
at::detail::check_size_nonnegative(size);
168168
at::detail::raise_warning_for_complex_half(scalar_type);
169169
caffe2::TypeMeta dtype = scalarTypeToTypeMeta(scalar_type);
@@ -197,7 +197,7 @@ TensorBase empty_generic(
197197
c10::Allocator* allocator,
198198
c10::DispatchKeySet ks,
199199
ScalarType scalar_type,
200-
c10::optional<c10::MemoryFormat> memory_format_opt) {
200+
std::optional<c10::MemoryFormat> memory_format_opt) {
201201
return _empty_generic(size, allocator, ks, scalar_type, memory_format_opt);
202202
}
203203

@@ -206,7 +206,7 @@ TensorBase empty_generic_symint(
206206
c10::Allocator* allocator,
207207
c10::DispatchKeySet ks,
208208
ScalarType scalar_type,
209-
c10::optional<c10::MemoryFormat> memory_format_opt) {
209+
std::optional<c10::MemoryFormat> memory_format_opt) {
210210
return _empty_generic(size, allocator, ks, scalar_type, memory_format_opt);
211211
}
212212

@@ -252,19 +252,19 @@ TensorBase empty_strided_symint_generic(
252252
}
253253

254254
TensorBase empty_cpu(IntArrayRef size, ScalarType dtype, bool pin_memory,
255-
c10::optional<c10::MemoryFormat> memory_format_opt) {
255+
std::optional<c10::MemoryFormat> memory_format_opt) {
256256
auto allocator = GetCPUAllocatorMaybePinned(pin_memory);
257257
constexpr c10::DispatchKeySet cpu_ks(c10::DispatchKey::CPU);
258258
return empty_generic(size, allocator, cpu_ks, dtype, memory_format_opt);
259259
}
260260

261261
TensorBase empty_cpu(
262262
IntArrayRef size,
263-
c10::optional<ScalarType> dtype_opt,
264-
c10::optional<Layout> layout_opt,
265-
c10::optional<Device> device_opt,
266-
c10::optional<bool> pin_memory_opt,
267-
c10::optional<c10::MemoryFormat> memory_format_opt) {
263+
std::optional<ScalarType> dtype_opt,
264+
std::optional<Layout> layout_opt,
265+
std::optional<Device> device_opt,
266+
std::optional<bool> pin_memory_opt,
267+
std::optional<c10::MemoryFormat> memory_format_opt) {
268268
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(device_or_default(device_opt).type() == DeviceType::CPU);
269269
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(layout_or_default(layout_opt) == Layout::Strided);
270270

@@ -295,10 +295,10 @@ TensorBase empty_strided_cpu(IntArrayRef size, IntArrayRef stride,
295295
TensorBase empty_strided_cpu(
296296
IntArrayRef size,
297297
IntArrayRef stride,
298-
c10::optional<ScalarType> dtype_opt,
299-
c10::optional<Layout> layout_opt,
300-
c10::optional<Device> device_opt,
301-
c10::optional<bool> pin_memory_opt) {
298+
std::optional<ScalarType> dtype_opt,
299+
std::optional<Layout> layout_opt,
300+
std::optional<Device> device_opt,
301+
std::optional<bool> pin_memory_opt) {
302302
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(device_or_default(device_opt).type() == DeviceType::CPU);
303303
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(layout_or_default(layout_opt) == Layout::Strided);
304304

@@ -342,7 +342,7 @@ static MetaAllocator g_meta_alloc;
342342
REGISTER_ALLOCATOR(kMeta, &g_meta_alloc);
343343

344344
TensorBase empty_meta(IntArrayRef size, ScalarType dtype,
345-
c10::optional<c10::MemoryFormat> memory_format_opt) {
345+
std::optional<c10::MemoryFormat> memory_format_opt) {
346346
auto *allocator = GetAllocator(kMeta);
347347
constexpr c10::DispatchKeySet meta_dks(c10::DispatchKey::Meta);
348348
return at::detail::empty_generic(
@@ -351,11 +351,11 @@ TensorBase empty_meta(IntArrayRef size, ScalarType dtype,
351351

352352
TensorBase empty_meta(
353353
IntArrayRef size,
354-
c10::optional<ScalarType> dtype_opt,
355-
c10::optional<Layout> layout_opt,
356-
c10::optional<Device> device_opt,
357-
c10::optional<bool> pin_memory_opt,
358-
c10::optional<c10::MemoryFormat> memory_format_opt
354+
std::optional<ScalarType> dtype_opt,
355+
std::optional<Layout> layout_opt,
356+
std::optional<Device> device_opt,
357+
std::optional<bool> pin_memory_opt,
358+
std::optional<c10::MemoryFormat> memory_format_opt
359359
) {
360360
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(device_or_default(device_opt).type() == DeviceType::Meta);
361361
// NB: because there is no SparseMeta (yet), non-strided layout is
@@ -371,11 +371,11 @@ TensorBase empty_meta(
371371

372372
TensorBase empty_symint_meta(
373373
SymIntArrayRef size,
374-
c10::optional<ScalarType> dtype_opt,
375-
c10::optional<Layout> layout_opt,
376-
c10::optional<Device> device_opt,
377-
c10::optional<bool> pin_memory_opt,
378-
c10::optional<c10::MemoryFormat> memory_format_opt
374+
std::optional<ScalarType> dtype_opt,
375+
std::optional<Layout> layout_opt,
376+
std::optional<Device> device_opt,
377+
std::optional<bool> pin_memory_opt,
378+
std::optional<c10::MemoryFormat> memory_format_opt
379379
) {
380380
auto *allocator = GetAllocator(kMeta);
381381
constexpr c10::DispatchKeySet ks(c10::DispatchKey::Meta);
@@ -405,10 +405,10 @@ TensorBase empty_strided_meta(IntArrayRef size, IntArrayRef stride,
405405
TensorBase empty_strided_meta(
406406
IntArrayRef size,
407407
IntArrayRef stride,
408-
c10::optional<ScalarType> dtype_opt,
409-
c10::optional<Layout> layout_opt,
410-
c10::optional<Device> device_opt,
411-
c10::optional<bool> pin_memory_opt) {
408+
std::optional<ScalarType> dtype_opt,
409+
std::optional<Layout> layout_opt,
410+
std::optional<Device> device_opt,
411+
std::optional<bool> pin_memory_opt) {
412412
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(device_or_default(device_opt).type() == DeviceType::Meta);
413413
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(layout_or_default(layout_opt) == Layout::Strided);
414414

@@ -440,10 +440,10 @@ TensorBase empty_strided_symint_meta(SymIntArrayRef size, SymIntArrayRef stride,
440440
TensorBase empty_strided_symint_meta(
441441
SymIntArrayRef size,
442442
SymIntArrayRef stride,
443-
c10::optional<ScalarType> dtype_opt,
444-
c10::optional<Layout> layout_opt,
445-
c10::optional<Device> device_opt,
446-
c10::optional<bool> pin_memory_opt) {
443+
std::optional<ScalarType> dtype_opt,
444+
std::optional<Layout> layout_opt,
445+
std::optional<Device> device_opt,
446+
std::optional<bool> pin_memory_opt) {
447447
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(device_or_default(device_opt).type() == DeviceType::Meta);
448448
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(layout_or_default(layout_opt) == Layout::Strided);
449449

0 commit comments

Comments
 (0)