Skip to content

Commit

Permalink
Remove dynamo+nvfuser (pytorch#105789)
Browse files Browse the repository at this point in the history
  • Loading branch information
IvanYashchuk authored and pytorchmergebot committed Aug 8, 2023
1 parent ad22f0f commit 6030151
Show file tree
Hide file tree
Showing 20 changed files with 12 additions and 3,136 deletions.
4 changes: 0 additions & 4 deletions docs/source/torch.compiler.rst
Original file line number Diff line number Diff line change
Expand Up @@ -52,10 +52,6 @@ Some of the most commonly used backends include:
- Description
* - ``torch.compile(m, backend="inductor")``
- Uses the TorchInductor backend. `Read more <https://dev-discuss.pytorch.org/t/torchinductor-a-pytorch-native-compiler-with-define-by-run-ir-and-symbolic-shapes/747>`__
* - ``torch.compile(m, backend="aot_ts_nvfuser")``
- nvFuser with AOT Autograd/TorchScript. `Read more <https://dev-discuss.pytorch.org/t/tracing-with-primitives-update-1-nvfuser-and-its-primitives/593>`__
* - ``torch.compile(m, backend="nvprims_nvfuser")``
- Tracing with nvFuser and its primitives. `Read more <https://dev-discuss.pytorch.org/t/tracing-with-primitives-update-1-nvfuser-and-its-primitives/593>`__
* - ``torch.compile(m, backend="cudagraphs")``
- CUDA graphs with AOT Autograd. `Read more <https://github.com/pytorch/torchdynamo/pull/757>`__

Expand Down
2 changes: 1 addition & 1 deletion docs/source/torch.compiler_get_started.rst
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ hub.
And that is not the only available backend, you can run in a REPL
``torch.compile.list_backends()`` to see all the available backends. Try out the
``cudagraphs`` or ``nvfuser`` next as inspiration.
``cudagraphs`` next as inspiration.

Using a pretrained model
~~~~~~~~~~~~~~~~~~~~~~~~
Expand Down
2 changes: 1 addition & 1 deletion docs/source/torch.compiler_troubleshooting.rst
Original file line number Diff line number Diff line change
Expand Up @@ -202,7 +202,7 @@ If the error does not occur with the ``"eager"`` backend, then the
backend compiler is the source of the error (`example
error <https://gist.github.com/mlazos/2f13681e3cc6c43b3911f336327032de%5D>`__).
There are `different choices <./torch.compiler.rst>`__
for backend compilers for TorchDynamo, with TorchInductor or nvfuser
for backend compilers for TorchDynamo, with TorchInductor
fitting the needs of most users. This section focuses on TorchInductor
as the motivating example, but some tools can also be used with other
backend compilers.
Expand Down
16 changes: 0 additions & 16 deletions test/dynamo/test_backends.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@
from torch._dynamo.backends.onnxrt import has_onnxruntime
from torch._dynamo.backends.tvm import has_tvm
from torch._dynamo.testing import same
from torch.testing._internal.common_utils import IS_FBCODE, skipIfRocm
from torch.testing._internal.inductor_utils import HAS_CUDA

requires_cuda = functools.partial(unittest.skipIf, not HAS_CUDA, "requires cuda")
Expand Down Expand Up @@ -123,21 +122,6 @@ def test_aot_ts(self):
def test_aot_cudagraphs(self):
self._check_backend_works("cudagraphs")

@skipIfRocm
@requires_cuda()
def test_aot_ts_nvfuser(self):
self._check_backend_works("aot_ts_nvfuser")

@requires_cuda()
@unittest.skipIf(IS_FBCODE, "BackendCompilerError")
def test_nvprims_nvfuser(self):
self._check_backend_works("nvprims_nvfuser")

@requires_cuda()
@unittest.skipIf(IS_FBCODE, "BackendCompilerError")
def test_nvprims_aten(self):
self._check_backend_works("nvprims_aten")

@unittest.skipIf(not has_onnxruntime(), "requires onnxruntime")
def test_onnxrt(self):
self._check_backend_works("onnxrt")
Expand Down
11 changes: 0 additions & 11 deletions test/test_nvfuser_dynamo.py

This file was deleted.

28 changes: 1 addition & 27 deletions test/test_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -484,26 +484,9 @@ def test_python_ref_torch_fallback(self, device, dtype, op):
@unittest.skipIf(TEST_WITH_ASAN, "Skipped under ASAN")
@onlyCUDA
@ops(python_ref_db)
@parametrize('executor', ['aten', 'nvfuser'])
@parametrize('executor', ['aten',])
@skipIfTorchInductor("Takes too long for inductor")
def test_python_ref_executor(self, device, dtype, op, executor):
# TODO: Not all dtypes are supported with nvfuser
from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map
if executor == "nvfuser" and dtype not in _torch_dtype_to_nvfuser_dtype_map:
raise unittest.SkipTest(f"nvfuser doesn't support dtype {dtype}")

# nvFuser tests are rather slow so we only run int32 and float32 types
if executor == "nvfuser" and dtype not in [torch.int32, torch.float32]:
raise unittest.SkipTest("skipped for speed")

if executor == "nvfuser" and not op.supports_nvfuser:
raise unittest.SkipTest(f"{op.name} doesn't support nvfuser")

# nvFuser doesn't support reduction operations on 0-dim tensors yet
skip_zero_dim = False
if executor == "nvfuser" and isinstance(op, ReductionPythonRefInfo):
skip_zero_dim = True

# skip zero-dim tensors for some composites of reduction operations and view
skip_zero_dim_ops = [
"_refs.logsumexp",
Expand All @@ -513,25 +496,16 @@ def test_python_ref_executor(self, device, dtype, op, executor):
"_refs.sum_to_size",
"ops.nvprims.view",
]
if executor == "nvfuser" and op.name in skip_zero_dim_ops:
skip_zero_dim = True

from torch._prims.executor import make_traced
from copy import copy
op = copy(op)
executor = "strictly_nvfuser" if executor == "nvfuser" else executor
op.op = partial(make_traced(op.op), executor=executor)
self._ref_test_helper(
contextlib.nullcontext,
device,
dtype,
op,
skip_zero_numel=("nvfuser" in executor), # nvfuser doesn't support zero-sized tensors
skip_zero_dim=skip_zero_dim,
skip_bfloat=("nvfuser" in executor), # nvfuser doesn't support bfloat tensors for pre-11 cuda TK
# # nvfuser doesn't support view consistency
# https://github.com/pytorch/pytorch/issues/84863
skip_view_consistency=("nvfuser" in executor),
)

@skipMeta
Expand Down
Loading

0 comments on commit 6030151

Please sign in to comment.