Skip to content

Commit ac48c11

Browse files
kiszkpytorchmergebot
authored andcommitted
Fix typo under torchgen directory (pytorch#111154)
This PR fixes typo in comments and messages in files under `torchgen` directory. Pull Request resolved: pytorch#111154 Approved by: https://github.com/rajveer43, https://github.com/Skylion007
1 parent b460c30 commit ac48c11

9 files changed

+12
-12
lines changed

Diff for: torchgen/api/cpp.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -124,7 +124,7 @@ def valuetype_type(
124124
raise AssertionError(f"unrecognized type {repr(t)}")
125125

126126

127-
# Translation of types occuring in JIT arguments to a C++ argument type.
127+
# Translation of types occurring in JIT arguments to a C++ argument type.
128128
# If remove_non_owning_ref_types is set, we'll guarantee that the outputed CType is not a non-owning reference type.
129129
# For example, we'll return std::vector<int> instead of IntArrayRef.
130130
# See Note [translation from C++ reference to value types]

Diff for: torchgen/api/structured.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@
3838
# API have been fixed.
3939

4040

41-
# Translation of types occuring in JIT arguments to a C++ argument type.
41+
# Translation of types occurring in JIT arguments to a C++ argument type.
4242
# NB: For now, mutable doesn't do anything; but it could if we make
4343
# some more nominal types
4444
def argumenttype_type(t: Type, *, mutable: bool, binds: ArgName) -> NamedCType:

Diff for: torchgen/dest/lazy_ir.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -216,7 +216,7 @@ def node_base_ctor_call(self, schema: LazyIrSchema) -> str:
216216

217217
scalar_args = schema.filtered_args(values=False, scalars=True)
218218

219-
# Shape constuction.
219+
# Shape construction.
220220
# Conditionally build shape depending on specified shape property
221221
if schema.properties.ShapePrecompute:
222222
shape_ctor_arg = "std::move(shapes),"

Diff for: torchgen/executorch/api/et_cpp.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,7 @@ def valuetype_type(
9393
raise AssertionError(f"unrecognized type {repr(t)}")
9494

9595

96-
# Translation of types occuring in JIT arguments to a C++ argument type.
96+
# Translation of types occurring in JIT arguments to a C++ argument type.
9797
# If remove_non_owning_ref_types is set, we'll guarantee that the outputed CType is not a non-owning reference type.
9898
# For example, we'll return std::vector<int> instead of IntArrayRef.
9999
# See Note [translation from C++ reference to value types]

Diff for: torchgen/gen_executorch.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -879,7 +879,7 @@ def main() -> None:
879879
"--manual_registration",
880880
"--manual-registration",
881881
action="store_true",
882-
help="a boolean flag to indicate whether we want to maually call"
882+
help="a boolean flag to indicate whether we want to manually call"
883883
"register_kernels() or rely on static init. ",
884884
)
885885
parser.add_argument(

Diff for: torchgen/gen_functionalization_type.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -629,7 +629,7 @@ def emit_inplace_functionalization_body(
629629
if ({str(not any_storage_args and f.func.kind() == SchemaKind.inplace).lower()}) {{
630630
// Before converting the mutable op to its functional variant, run meta tensors through the original op.
631631
// This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
632-
// (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
632+
// (We can only do this for inplace ops today though, because they technically all support meta tensors).
633633
{meta_conversion_str}
634634
at::AutoDispatchSkipFunctionalize func_guard;
635635
c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
@@ -731,7 +731,7 @@ def emit_registration_helper(f: NativeFunction) -> str:
731731
# See Note [resize_ in Functionalization]
732732
return []
733733
assert not f.is_view_op
734-
# functionalization needs to generate and register kernals for inplace ops.
734+
# functionalization needs to generate and register kernels for inplace ops.
735735
# We *also* need to directly register CompositeImplicitAUtograd kernels
736736
# so that they decompose properly before functioanlization.
737737
if modifies_arguments(f):

Diff for: torchgen/gen_lazy_tensor.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -426,7 +426,7 @@ def concat_map_codegen(
426426
427427
Generated lazy native functions all perform shape inference, by first using a meta:: kernel
428428
if available for that op, and otherwise using a 'compute_shape_{op}' function instead. The generator
429-
knows the call signature for compute_shape_{op} becuase it matches the nativefunction (and meta::) signature,
429+
knows the call signature for compute_shape_{op} because it matches the nativefunction (and meta::) signature,
430430
so it just has to check whether the op is structured and generate a call for one or the other. It's up to the dev
431431
to supply the missing compute_shape_{op} function, but the codegen at least warns you about this and provides
432432
the expected signature which can be copy-pasted into shape_inference.h.

Diff for: torchgen/model.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -1413,7 +1413,7 @@ def __post_init__(self) -> None:
14131413
), "out= ops that accept tensor lists as out arguments "
14141414
"are expected to have no return type (since you can't do method chaining on them)"
14151415
else:
1416-
# mutable keyward arguments whose name has _scratch_ prefix are
1416+
# mutable keyword arguments whose name has _scratch_ prefix are
14171417
# scratch tensors for memory planning and should not be returned
14181418
assert len(
14191419
[
@@ -2208,7 +2208,7 @@ def strip_arg_annotation(a: Argument) -> Argument:
22082208
post_self_positional=tuple(
22092209
map(strip_arg_annotation, self.post_self_positional)
22102210
),
2211-
# Since TensorOptions are droped, the post_tensor_options_kwargs are
2211+
# Since TensorOptions are dropped, the post_tensor_options_kwargs are
22122212
# converted to pre_tensor_options_kwargs
22132213
pre_tensor_options_kwarg_only=tuple(
22142214
map(strip_arg_annotation, self.pre_tensor_options_kwarg_only)

Diff for: torchgen/native_function_generation.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -371,8 +371,8 @@ def add_generated_native_functions(
371371
rs: List[NativeFunction],
372372
indices: Dict[DispatchKey, Dict[OperatorName, BackendMetadata]],
373373
) -> None:
374-
# The main code for gnerating new NativeFunctions
375-
# First we group of NaitveFunctions by schema kind,
374+
# The main code for generating new NativeFunctions
375+
# First we group of NativeFunctions by schema kind,
376376
# then we detect which ones are missing and generate them.
377377
pre_grouped_native_functions = pre_group_native_functions(rs)
378378
for d in pre_grouped_native_functions.values():

0 commit comments

Comments
 (0)