forked from pytorch/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtest_cpp_extensions_aot.py
540 lines (450 loc) · 21 KB
/
test_cpp_extensions_aot.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
# Owner(s): ["module: cpp-extensions"]
import os
import re
import subprocess
import unittest
from itertools import repeat
from pathlib import Path
from typing import get_args, get_origin, Union
import torch
import torch.backends.cudnn
import torch.testing._internal.common_utils as common
import torch.utils.cpp_extension
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.testing._internal.common_utils import (
IS_WINDOWS,
skipIfTorchDynamo,
TEST_XPU,
xfailIfTorchDynamo,
)
try:
import pytest
HAS_PYTEST = True
except ImportError:
HAS_PYTEST = False
# TODO: Rewrite these tests so that they can be collected via pytest without
# using run_test.py
try:
if HAS_PYTEST:
cpp_extension = pytest.importorskip("torch_test_cpp_extension.cpp")
maia_extension = pytest.importorskip("torch_test_cpp_extension.maia")
rng_extension = pytest.importorskip("torch_test_cpp_extension.rng")
else:
import torch_test_cpp_extension.cpp as cpp_extension
import torch_test_cpp_extension.maia as maia_extension
import torch_test_cpp_extension.rng as rng_extension
except ImportError as e:
raise RuntimeError(
"test_cpp_extensions_aot.py cannot be invoked directly. Run "
"`python run_test.py -i test_cpp_extensions_aot_ninja` instead."
) from e
@torch.testing._internal.common_utils.markDynamoStrictTest
class TestCppExtensionAOT(common.TestCase):
"""Tests ahead-of-time cpp extensions
NOTE: run_test.py's test_cpp_extensions_aot_ninja target
also runs this test case, but with ninja enabled. If you are debugging
a test failure here from the CI, check the logs for which target
(test_cpp_extensions_aot_no_ninja vs test_cpp_extensions_aot_ninja)
failed.
"""
def test_extension_function(self):
x = torch.randn(4, 4)
y = torch.randn(4, 4)
z = cpp_extension.sigmoid_add(x, y)
self.assertEqual(z, x.sigmoid() + y.sigmoid())
# test pybind support torch.dtype cast.
self.assertEqual(
str(torch.float32), str(cpp_extension.get_math_type(torch.half))
)
def test_extension_module(self):
mm = cpp_extension.MatrixMultiplier(4, 8)
weights = torch.rand(8, 4, dtype=torch.double)
expected = mm.get().mm(weights)
result = mm.forward(weights)
self.assertEqual(expected, result)
def test_backward(self):
mm = cpp_extension.MatrixMultiplier(4, 8)
weights = torch.rand(8, 4, dtype=torch.double, requires_grad=True)
result = mm.forward(weights)
result.sum().backward()
tensor = mm.get()
expected_weights_grad = tensor.t().mm(torch.ones([4, 4], dtype=torch.double))
self.assertEqual(weights.grad, expected_weights_grad)
expected_tensor_grad = torch.ones([4, 4], dtype=torch.double).mm(weights.t())
self.assertEqual(tensor.grad, expected_tensor_grad)
@unittest.skipIf(not TEST_CUDA, "CUDA not found")
def test_cuda_extension(self):
import torch_test_cpp_extension.cuda as cuda_extension
x = torch.zeros(100, device="cuda", dtype=torch.float32)
y = torch.zeros(100, device="cuda", dtype=torch.float32)
z = cuda_extension.sigmoid_add(x, y).cpu()
# 2 * sigmoid(0) = 2 * 0.5 = 1
self.assertEqual(z, torch.ones_like(z))
@unittest.skipIf(not torch.backends.mps.is_available(), "MPS not found")
def test_mps_extension(self):
import torch_test_cpp_extension.mps as mps_extension
tensor_length = 100000
x = torch.randn(tensor_length, device="cpu", dtype=torch.float32)
y = torch.randn(tensor_length, device="cpu", dtype=torch.float32)
cpu_output = mps_extension.get_cpu_add_output(x, y)
mps_output = mps_extension.get_mps_add_output(x.to("mps"), y.to("mps"))
self.assertEqual(cpu_output, mps_output.to("cpu"))
@unittest.skipIf(not TEST_XPU, "XPU not found")
@unittest.skipIf(
os.getenv("USE_NINJA", "0") == "0",
"sycl extension requires ninja to build",
)
def test_sycl_extension(self):
import torch_test_cpp_extension.sycl as sycl_extension
x = torch.zeros(100, device="xpu", dtype=torch.float32)
y = torch.zeros(100, device="xpu", dtype=torch.float32)
z = sycl_extension.sigmoid_add(x, y).cpu()
# 2 * sigmoid(0) = 2 * 0.5 = 1
self.assertEqual(z, torch.ones_like(z))
@common.skipIfRocm
@unittest.skipIf(common.IS_WINDOWS, "Windows not supported")
@unittest.skipIf(not TEST_CUDA, "CUDA not found")
def test_cublas_extension(self):
from torch_test_cpp_extension import cublas_extension
x = torch.zeros(100, device="cuda", dtype=torch.float32)
z = cublas_extension.noop_cublas_function(x)
self.assertEqual(z, x)
@common.skipIfRocm
@unittest.skipIf(common.IS_WINDOWS, "Windows not supported")
@unittest.skipIf(not TEST_CUDA, "CUDA not found")
def test_cusolver_extension(self):
from torch_test_cpp_extension import cusolver_extension
x = torch.zeros(100, device="cuda", dtype=torch.float32)
z = cusolver_extension.noop_cusolver_function(x)
self.assertEqual(z, x)
@unittest.skipIf(IS_WINDOWS, "Not available on Windows")
def test_no_python_abi_suffix_sets_the_correct_library_name(self):
# For this test, run_test.py will call `python setup.py install` in the
# cpp_extensions/no_python_abi_suffix_test folder, where the
# `BuildExtension` class has a `no_python_abi_suffix` option set to
# `True`. This *should* mean that on Python 3, the produced shared
# library does not have an ABI suffix like
# "cpython-37m-x86_64-linux-gnu" before the library suffix, e.g. "so".
root = os.path.join("cpp_extensions", "no_python_abi_suffix_test", "build")
matches = [f for _, _, fs in os.walk(root) for f in fs if f.endswith("so")]
self.assertEqual(len(matches), 1, msg=str(matches))
self.assertEqual(matches[0], "no_python_abi_suffix_test.so", msg=str(matches))
def test_optional(self):
has_value = cpp_extension.function_taking_optional(torch.ones(5))
self.assertTrue(has_value)
has_value = cpp_extension.function_taking_optional(None)
self.assertFalse(has_value)
@common.skipIfRocm
@unittest.skipIf(common.IS_WINDOWS, "Windows not supported")
@unittest.skipIf(not TEST_CUDA, "CUDA not found")
@unittest.skipIf(
os.getenv("USE_NINJA", "0") == "0",
"cuda extension with dlink requires ninja to build",
)
def test_cuda_dlink_libs(self):
from torch_test_cpp_extension import cuda_dlink
a = torch.randn(8, dtype=torch.float, device="cuda")
b = torch.randn(8, dtype=torch.float, device="cuda")
ref = a + b
test = cuda_dlink.add(a, b)
self.assertEqual(test, ref)
@unittest.skipIf(not TEST_CUDA, "python_agnostic is a CUDA extension + needs CUDA")
@unittest.skipIf(not common.IS_LINUX, "test requires linux tools ldd and nm")
def test_python_agnostic(self):
# For this test, run_test.py will call `python setup.py bdist_wheel` in the
# cpp_extensions/python_agnostic_extension folder, where the extension and
# setup calls specify py_limited_api to `True`. To approximate that the
# extension is indeed python agnostic, we test
# a. The extension wheel name contains "cp39-abi3", meaning the wheel
# should be runnable for any Python 3 version after and including 3.9
# b. The produced shared library does not have libtorch_python.so as a
# dependency from the output of "ldd _C.so"
# c. The .so does not need any python related symbols. We approximate
# this by running "nm -u _C.so" and grepping that nothing starts with "Py"
dist_root = os.path.join("cpp_extensions", "python_agnostic_extension", "dist")
matches = list(Path(dist_root).glob("*.whl"))
self.assertEqual(len(matches), 1, msg=str(matches))
whl_file = matches[0]
self.assertRegex(str(whl_file), r".*python_agnostic-0\.0-cp39-abi3-.*\.whl")
build_root = os.path.join(
"cpp_extensions", "python_agnostic_extension", "build"
)
matches = list(Path(build_root).glob("**/*.so"))
self.assertEqual(len(matches), 1, msg=str(matches))
so_file = matches[0]
lddtree = subprocess.check_output(["ldd", so_file]).decode("utf-8")
self.assertFalse("torch_python" in lddtree)
missing_symbols = subprocess.check_output(["nm", "-u", so_file]).decode("utf-8")
self.assertFalse("Py" in missing_symbols)
@unittest.skipIf(not TEST_CUDA, "some aspects of this test require CUDA")
def test_libtorch_agnostic(self):
import libtorch_agnostic
# (1) first test that SGD CPU kernel works
param = torch.rand(5, device="cpu")
grad = torch.rand_like(param)
weight_decay = 0.01
lr = 0.001
maximize = False
new_param = libtorch_agnostic.ops.sgd_out_of_place(
param, grad, weight_decay, lr, maximize
)
torch._fused_sgd_(
(param,),
(grad,),
(),
weight_decay=weight_decay,
momentum=0.0,
lr=lr,
dampening=0.0,
nesterov=False,
maximize=maximize,
is_first_step=False,
)
self.assertEqual(new_param, param)
# (2) then test that we don't hog unnecessary memory
def _run_identity(prior_mem, device):
t = torch.rand(32, 32, device=device)
self.assertGreater(torch.cuda.memory_allocated(device), prior_mem)
identi_t = libtorch_agnostic.ops.identity(t)
assert identi_t is t
device = torch.cuda.current_device()
init_mem = torch.cuda.memory_allocated(device)
for _ in range(3):
_run_identity(init_mem, device)
curr_mem = torch.cuda.memory_allocated(device)
self.assertEqual(curr_mem, init_mem)
# (3a) test calling our dispatcher on easy API like abs
t = torch.rand(32, 16, device=device) - 0.5
def _make_cuda_tensors(prior_mem):
cuda_t = libtorch_agnostic.ops.my_abs(t)
self.assertGreater(torch.cuda.memory_allocated(device), prior_mem)
self.assertEqual(cuda_t, torch.abs(t))
init_mem = torch.cuda.memory_allocated(device)
for _ in range(3):
_make_cuda_tensors(init_mem)
curr_mem = torch.cuda.memory_allocated(device)
self.assertEqual(curr_mem, init_mem)
# (3b) and on factory API like ones_like
cpu_t = libtorch_agnostic.ops.my_ones_like(t, "cpu")
self.assertEqual(cpu_t, torch.ones_like(t, device="cpu"))
def _make_cuda_tensors(prior_mem):
cuda_t = libtorch_agnostic.ops.my_ones_like(t, t.device)
self.assertGreater(torch.cuda.memory_allocated(device), prior_mem)
self.assertEqual(cuda_t, torch.ones_like(t, device=t.device))
init_mem = torch.cuda.memory_allocated(device)
for _ in range(3):
_make_cuda_tensors(init_mem)
curr_mem = torch.cuda.memory_allocated(device)
self.assertEqual(curr_mem, init_mem)
# (4) test multiple returns
t1 = torch.rand(2, 3, device="cuda")
t2 = torch.rand(3, 2, device="cpu")
t3 = torch.rand(2, device="cpu")
exp, neg, is_leaf = libtorch_agnostic.ops.exp_neg_is_leaf(t1, t2, t3)
self.assertEqual(exp, torch.exp(t1))
self.assertEqual(neg, torch.neg(t2))
self.assertEqual(is_leaf, t3.is_leaf)
@torch.testing._internal.common_utils.markDynamoStrictTest
class TestPybindTypeCasters(common.TestCase):
"""Pybind tests for ahead-of-time cpp extensions
These tests verify the types returned from cpp code using custom type
casters. By exercising pybind, we also verify that the type casters work
properly.
For each type caster in `torch/csrc/utils/pybind.h` we create a pybind
function that takes no arguments and returns the type_caster type. The
second argument to `PYBIND11_TYPE_CASTER` should be the type we expect to
receive in python, in these tests we verify this at run-time.
"""
@staticmethod
def expected_return_type(func):
"""
Our Pybind functions have a signature of the form `() -> return_type`.
"""
# Imports needed for the `eval` below.
from typing import List, Tuple # noqa: F401, UP035
return eval(re.search("-> (.*)\n", func.__doc__).group(1))
def check(self, func):
val = func()
expected = self.expected_return_type(func)
origin = get_origin(expected)
if origin is list:
self.check_list(val, expected)
elif origin is tuple:
self.check_tuple(val, expected)
else:
self.assertIsInstance(val, expected)
def check_list(self, vals, expected):
self.assertIsInstance(vals, list)
list_type = get_args(expected)[0]
for val in vals:
self.assertIsInstance(val, list_type)
def check_tuple(self, vals, expected):
self.assertIsInstance(vals, tuple)
tuple_types = get_args(expected)
if tuple_types[1] is ...:
tuple_types = repeat(tuple_types[0])
for val, tuple_type in zip(vals, tuple_types):
self.assertIsInstance(val, tuple_type)
def check_union(self, funcs):
"""Special handling for Union type casters.
A single cpp type can sometimes be cast to different types in python.
In these cases we expect to get exactly one function per python type.
"""
# Verify that all functions have the same return type.
union_type = {self.expected_return_type(f) for f in funcs}
assert len(union_type) == 1
union_type = union_type.pop()
self.assertIs(Union, get_origin(union_type))
# SymInt is inconvenient to test, so don't require it
expected_types = set(get_args(union_type)) - {torch.SymInt}
for func in funcs:
val = func()
for tp in expected_types:
if isinstance(val, tp):
expected_types.remove(tp)
break
else:
raise AssertionError(f"{val} is not an instance of {expected_types}")
self.assertFalse(
expected_types, f"Missing functions for types {expected_types}"
)
def test_pybind_return_types(self):
functions = [
cpp_extension.get_complex,
cpp_extension.get_device,
cpp_extension.get_generator,
cpp_extension.get_intarrayref,
cpp_extension.get_memory_format,
cpp_extension.get_storage,
cpp_extension.get_symfloat,
cpp_extension.get_symintarrayref,
cpp_extension.get_tensor,
]
union_functions = [
[cpp_extension.get_symint],
]
for func in functions:
with self.subTest(msg=f"check {func.__name__}"):
self.check(func)
for funcs in union_functions:
with self.subTest(msg=f"check {[f.__name__ for f in funcs]}"):
self.check_union(funcs)
@torch.testing._internal.common_utils.markDynamoStrictTest
class TestMAIATensor(common.TestCase):
def test_unregistered(self):
torch.arange(0, 10, device="cpu")
with self.assertRaisesRegex(RuntimeError, "Could not run"):
torch.arange(0, 10, device="maia")
@skipIfTorchDynamo("dynamo cannot model maia device")
def test_zeros(self):
a = torch.empty(5, 5, device="cpu")
self.assertEqual(a.device, torch.device("cpu"))
b = torch.empty(5, 5, device="maia")
self.assertEqual(b.device, torch.device("maia", 0))
self.assertEqual(maia_extension.get_test_int(), 0)
self.assertEqual(torch.get_default_dtype(), b.dtype)
c = torch.empty((5, 5), dtype=torch.int64, device="maia")
self.assertEqual(maia_extension.get_test_int(), 0)
self.assertEqual(torch.int64, c.dtype)
def test_add(self):
a = torch.empty(5, 5, device="maia", requires_grad=True)
self.assertEqual(maia_extension.get_test_int(), 0)
b = torch.empty(5, 5, device="maia")
self.assertEqual(maia_extension.get_test_int(), 0)
a + b
self.assertEqual(maia_extension.get_test_int(), 1)
def test_conv_backend_override(self):
# To simplify tests, we use 4d input here to avoid doing view4d( which
# needs more overrides) in _convolution.
input = torch.empty(2, 4, 10, 2, device="maia", requires_grad=True)
weight = torch.empty(6, 4, 2, 2, device="maia", requires_grad=True)
bias = torch.empty(6, device="maia")
# Make sure forward is overriden
out = torch.nn.functional.conv2d(input, weight, bias, 2, 0, 1, 1)
self.assertEqual(maia_extension.get_test_int(), 2)
self.assertEqual(out.shape[0], input.shape[0])
self.assertEqual(out.shape[1], weight.shape[0])
# Make sure backward is overriden
# Double backward is dispatched to _convolution_double_backward.
# It is not tested here as it involves more computation/overrides.
grad = torch.autograd.grad(out, input, out, create_graph=True)
self.assertEqual(maia_extension.get_test_int(), 3)
self.assertEqual(grad[0].shape, input.shape)
def test_autocast_apis_for_maia_device(self):
# Default low-precision type in MAIA's autocast.
fast_dtype = torch.get_autocast_dtype("maia")
self.assertEqual(fast_dtype, torch.bfloat16)
self.assertTrue(torch._C._is_autocast_available("maia"))
@skipIfTorchDynamo(
"dynamo cannot handle maia device. Output tensor may have wrong dtype."
)
def test_matmul_autocast_float16_precision(self):
# Ensure we can change low precision dtype.
x = torch.empty((2, 4), dtype=torch.float, device="maia")
w = torch.empty((4, 2), dtype=torch.float, device="maia")
with torch.autocast(device_type="maia", dtype=torch.float16):
self.assertTrue(torch.is_autocast_enabled("maia"))
y = torch.ops.aten.matmul(x, w)
self.assertEqual(y.dtype, torch.float16)
self.assertEqual(y.shape, (2, 2))
@skipIfTorchDynamo(
"dynamo cannot handle maia device. Output tensor may have wrong dtype."
)
def test_matmul_autocast_default_precision(self):
# Use default lower precision dtype, bfloat16.
x = torch.empty((2, 4), dtype=torch.float, device="maia")
w = torch.empty((4, 2), dtype=torch.float, device="maia")
with torch.autocast(device_type="maia"):
self.assertTrue(torch.is_autocast_enabled("maia"))
y = torch.ops.aten.matmul(x, w)
self.assertEqual(y.dtype, torch.bfloat16)
self.assertEqual(y.shape, (2, 2))
@torch.testing._internal.common_utils.markDynamoStrictTest
class TestRNGExtension(common.TestCase):
def setUp(self):
super().setUp()
@xfailIfTorchDynamo
def test_rng(self):
fourty_two = torch.full((10,), 42, dtype=torch.int64)
t = torch.empty(10, dtype=torch.int64).random_()
self.assertNotEqual(t, fourty_two)
gen = torch.Generator(device="cpu")
t = torch.empty(10, dtype=torch.int64).random_(generator=gen)
self.assertNotEqual(t, fourty_two)
self.assertEqual(rng_extension.getInstanceCount(), 0)
gen = rng_extension.createTestCPUGenerator(42)
self.assertEqual(rng_extension.getInstanceCount(), 1)
copy = gen
self.assertEqual(rng_extension.getInstanceCount(), 1)
self.assertEqual(gen, copy)
copy2 = rng_extension.identity(copy)
self.assertEqual(rng_extension.getInstanceCount(), 1)
self.assertEqual(gen, copy2)
t = torch.empty(10, dtype=torch.int64).random_(generator=gen)
self.assertEqual(rng_extension.getInstanceCount(), 1)
self.assertEqual(t, fourty_two)
del gen
self.assertEqual(rng_extension.getInstanceCount(), 1)
del copy
self.assertEqual(rng_extension.getInstanceCount(), 1)
del copy2
self.assertEqual(rng_extension.getInstanceCount(), 0)
@torch.testing._internal.common_utils.markDynamoStrictTest
@unittest.skipIf(not TEST_CUDA, "CUDA not found")
class TestTorchLibrary(common.TestCase):
def test_torch_library(self):
import torch_test_cpp_extension.torch_library # noqa: F401
def f(a: bool, b: bool):
return torch.ops.torch_library.logical_and(a, b)
self.assertTrue(f(True, True))
self.assertFalse(f(True, False))
self.assertFalse(f(False, True))
self.assertFalse(f(False, False))
s = torch.jit.script(f)
self.assertTrue(s(True, True))
self.assertFalse(s(True, False))
self.assertFalse(s(False, True))
self.assertFalse(s(False, False))
self.assertIn("torch_library::logical_and", str(s.graph))
if __name__ == "__main__":
common.run_tests()