Skip to content

Commit

Permalink
Delete Python <= 3.5 specific checks from the code (pytorch#39879)
Browse files Browse the repository at this point in the history
Summary:
Remove PY3 and PY34 checks from `torch/testing/_internal/common_utils.py`
 Remove PY35 global var from `torch.jit.annotations`
Always call `try_get_real_signature` in `torch/jit/annotations.py`
Use `map` instead of `imap`, since Python-2 is no longer support, so map is always lazy.
Remove all pre Python-3.6 checks from `torch/_six.py` and `torch/_appdirs.py`
Pull Request resolved: pytorch#39879

Differential Revision: D22037811

Pulled By: malfet

fbshipit-source-id: af0c79f976569c2059d39ecb49c6b8285161734f
  • Loading branch information
malfet authored and facebook-github-bot committed Jun 15, 2020
1 parent c8c53c8 commit c6b69a4
Show file tree
Hide file tree
Showing 6 changed files with 36 additions and 160 deletions.
3 changes: 2 additions & 1 deletion test/test_jit.py
Original file line number Diff line number Diff line change
Expand Up @@ -4378,6 +4378,7 @@ def test_is_optional(self):
torch._jit_internal.is_optional(ann)

def test_interpreter_fuzz(self):
import builtins
# This test generates random tree-like programs to fuzz test
# that the interpreter does not have a bug in its stack manipulation
# code. An assert in that code ensures individual operators are
Expand Down Expand Up @@ -4427,7 +4428,7 @@ def select_expr_or_var():
for i in range(100):
g = {'torch': torch}
code = gen_code()
torch._six.exec_(code, g, None)
builtins.exec(code, g, None)
cu = torch.jit.CompilationUnit(code)
with freeze_rng_state():
o1 = g['f']()
Expand Down
10 changes: 2 additions & 8 deletions torch/_appdirs.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,10 +53,7 @@
import sys
import os

PY3 = sys.version_info[0] == 3

if PY3:
unicode = str
unicode = str

if sys.platform.startswith('java'):
import platform
Expand Down Expand Up @@ -498,10 +495,7 @@ def _get_win_folder_from_registry(csidl_name):
registry for this guarantees us the correct answer for all CSIDL_*
names.
"""
if PY3:
import winreg as _winreg
else:
import _winreg
import winreg as _winreg

shell_folder_name = {
"CSIDL_APPDATA": "AppData",
Expand Down
128 changes: 17 additions & 111 deletions torch/_six.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,55 +18,24 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.

import itertools
import builtins
import collections.abc
import io
import math
import sys
import types
import inspect


PY2 = sys.version_info[0] == 2
import queue # noqa: F401

inf = math.inf
nan = math.nan
string_classes = (str, bytes)
int_classes = int
FileNotFoundError = builtins.FileNotFoundError
StringIO = io.StringIO
container_abcs = collections.abc
PY3 = sys.version_info[0] == 3
PY37 = sys.version_info[0] == 3 and sys.version_info[1] == 7


if PY2:
import __builtin__ as builtins
elif PY3:
import builtins


if PY2:
inf = float('inf')
nan = float('nan')
else:
import math
inf = math.inf
nan = math.nan

if PY2:
string_classes = basestring
else:
string_classes = (str, bytes)


if PY2:
int_classes = (int, long)
else:
int_classes = int


if PY2:
FileNotFoundError = IOError
else:
FileNotFoundError = builtins.FileNotFoundError


if PY2:
import Queue as queue # noqa: F401
else:
import queue # noqa: F401


def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
Expand All @@ -79,74 +48,16 @@ def __new__(cls, name, this_bases, d):
return type.__new__(metaclass, 'temporary_class', (), {})


# A portable way of referring to the generator version of map
# in both Python 2 and Python 3.
if hasattr(itertools, 'imap'):
imap = itertools.imap # type: ignore
else:
imap = map # type: ignore


if PY3:
import builtins
# See https://github.com/PyCQA/flake8-bugbear/issues/64
exec_ = getattr(builtins, "exec") # noqa: B009
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")


if sys.version_info[:2] == (3, 2):
exec_("""def raise_from(value, from_value):
def raise_from(value, from_value):
try:
if from_value is None:
raise value
raise value from from_value
finally:
value = None
""")
elif sys.version_info[:2] > (3, 2):
exec_("""def raise_from(value, from_value):
try:
raise value from from_value
finally:
value = None
""")
else:
def raise_from(value, from_value):
raise value

if PY2:
import collections
container_abcs = collections
elif PY3:
import collections.abc
container_abcs = collections.abc

# Gets a function from the name of a method on a type
if PY2:
def get_function_from_type(cls, name):
method = getattr(cls, name, None)
return getattr(method, "__func__", None)
elif PY3:
def get_function_from_type(cls, name):
return getattr(cls, name, None)

if PY2:
import StringIO
StringIO = StringIO.StringIO
elif PY3:
import io
StringIO = io.StringIO
def get_function_from_type(cls, name):
return getattr(cls, name, None)


# The codes below is not copied from the six package, so the copyright
Expand All @@ -164,9 +75,4 @@ def istuple(obj):
return isinstance(obj, tuple) or t.__module__ == 'torch.return_types'

def bind_method(fn, obj, obj_type):
if PY2:
if inspect.ismethod(fn):
fn = fn.__func__
return types.MethodType(fn, obj, obj_type)
else:
return types.MethodType(fn, obj)
return types.MethodType(fn, obj)
25 changes: 9 additions & 16 deletions torch/jit/annotations.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
import sys
import ast
import inspect
import re
Expand All @@ -17,9 +16,6 @@
from typing import Callable


PY35 = sys.version_info >= (3, 5)


class Module(object):
def __init__(self, name, members):
self.name = name
Expand Down Expand Up @@ -56,18 +52,15 @@ def __getitem__(self, name):
return getattr(builtins, name, None)

def get_signature(fn, rcb, loc, is_method):
# Python 3.5 adds support for the nice annotation syntax, so try that first.
signature = None
if PY35:
signature = try_real_annotations(fn, loc)
if signature is not None and is_method:
# If this is a method, then the signaure will include a type for
# `self`, but type comments do not contain a `self`. So strip it
# away here so everything is consistent (`inspect.ismethod` does
# not work here since `fn` is unbound at this point)
param_types, return_type = signature
param_types = param_types[1:]
signature = (param_types, return_type)
signature = try_real_annotations(fn, loc)
if signature is not None and is_method:
# If this is a method, then the signature will include a type for
# `self`, but type comments do not contain a `self`. So strip it
# away here so everything is consistent (`inspect.ismethod` does
# not work here since `fn` is unbound at this point)
param_types, return_type = signature
param_types = param_types[1:]
signature = (param_types, return_type)

if signature is None:
type_line, source = None, None
Expand Down
9 changes: 1 addition & 8 deletions torch/tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
import torch.utils.hooks as hooks
import warnings
import weakref
from torch._six import imap
from torch._C import _add_docstr
from numbers import Number
import functools
Expand Down Expand Up @@ -445,20 +444,14 @@ def __len__(self):
return self.shape[0]

def __iter__(self):
# NB: we use 'imap' and not 'map' here, so that in Python 2 we get a
# generator and don't eagerly perform all the indexes. This could
# save us work, and also helps keep trace ordering deterministic
# (e.g., if you zip(*hiddens), the eager map will force all the
# indexes of hiddens[0] before hiddens[1], while the generator
# map will interleave them.)
if self.dim() == 0:
raise TypeError('iteration over a 0-d tensor')
if torch._C._get_tracing_state():
warnings.warn('Iterating over a tensor might cause the trace to be incorrect. '
'Passing a tensor of different shape won\'t change the number of '
'iterations executed (and might lead to errors or silently give '
'incorrect results).', category=RuntimeWarning)
return iter(imap(lambda i: self[i], range(self.size(0))))
return iter(map(lambda i: self[i], range(self.size(0))))

def __hash__(self):
return id(self)
Expand Down
21 changes: 5 additions & 16 deletions torch/testing/_internal/common_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -212,10 +212,7 @@ def repeat_helper(f):
@wraps(f)
def call_helper(self, *args):
for dtype in dtypes:
if PY34:
with TestCase.subTest(self, dtype=dtype):
f(self, *args, dtype=dtype)
else:
with TestCase.subTest(self, dtype=dtype):
f(self, *args, dtype=dtype)

return call_helper
Expand All @@ -224,8 +221,6 @@ def call_helper(self, *args):
# Environment variable `IS_PYTORCH_CI` is set in `.jenkins/common.sh`.
IS_PYTORCH_CI = bool(os.environ.get('IS_PYTORCH_CI'))

PY3 = sys.version_info > (3, 0)
PY34 = sys.version_info >= (3, 4)

def discover_test_cases_recursively(suite_or_case):
if isinstance(suite_or_case, unittest.TestCase):
Expand All @@ -242,7 +237,6 @@ def chunk_list(lst, nchunks):
return [lst[i::nchunks] for i in range(nchunks)]



def run_tests(argv=UNITTEST_ARGS):
if TEST_DISCOVER:
suite = unittest.TestLoader().loadTestsFromModule(__main__)
Expand Down Expand Up @@ -319,15 +313,10 @@ def _check_module_exists(name):
our tests, e.g., setting multiprocessing start method when imported
(see librosa/#747, torchvision/#544).
"""
if not PY34: # Python [3, 3.4)
import importlib
loader = importlib.find_loader(name)
return loader is not None
else: # Python >= 3.4
import importlib
import importlib.util
spec = importlib.util.find_spec(name)
return spec is not None
import importlib
import importlib.util
spec = importlib.util.find_spec(name)
return spec is not None

TEST_NUMPY = _check_module_exists('numpy')
TEST_SCIPY = _check_module_exists('scipy')
Expand Down

0 comments on commit c6b69a4

Please sign in to comment.