Skip to content

Commit

Permalink
[BE] Use f-string in various Python functions (pytorch#44161)
Browse files Browse the repository at this point in the history
Summary: Pull Request resolved: pytorch#44161

Reviewed By: seemethere

Differential Revision: D23515874

Pulled By: malfet

fbshipit-source-id: 868cf65aedd58fce943c08f8e079e84e0a36df1f
  • Loading branch information
malfet authored and facebook-github-bot committed Sep 4, 2020
1 parent 28b1360 commit 0c01f13
Show file tree
Hide file tree
Showing 9 changed files with 44 additions and 48 deletions.
6 changes: 3 additions & 3 deletions torch/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@
res = kernel32.AddDllDirectory(dll_path)
if res is None:
err = ctypes.WinError(ctypes.get_last_error())
err.strerror += ' Error adding "{}" to the DLL directories.'.format(dll_path)
err.strerror += f' Error adding "{dll_path}" to the DLL directories.'
raise err

try:
Expand All @@ -112,7 +112,7 @@
last_error = ctypes.get_last_error()
if res is None and last_error != 126:
err = ctypes.WinError(last_error)
err.strerror += ' Error loading "{}" or one of its dependencies.'.format(dll)
err.strerror += f' Error loading "{dll}" or one of its dependencies.'
raise err
elif res is not None:
is_loaded = True
Expand All @@ -123,7 +123,7 @@
res = kernel32.LoadLibraryW(dll)
if res is None:
err = ctypes.WinError(ctypes.get_last_error())
err.strerror += ' Error loading "{}" or one of its dependencies.'.format(dll)
err.strerror += f' Error loading "{dll}" or one of its dependencies.'
raise err

kernel32.SetErrorMode(prev_error_mode)
Expand Down
2 changes: 1 addition & 1 deletion torch/_classes.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ def __init__(self, name):
def __getattr__(self, attr):
proxy = torch._C._get_custom_class_python_wrapper(self.name, attr)
if proxy is None:
raise RuntimeError('Class {}.{} not registered!'.format(self.name, attr))
raise RuntimeError(f'Class {self.name}.{attr} not registered!')
return proxy

class _Classes(types.ModuleType):
Expand Down
18 changes: 9 additions & 9 deletions torch/_jit_internal.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ def parseNestedExpr(expr, module) -> Tuple[Any, int]:
i += 1

base = lookupInModule(expr[:i].strip(), module)
assert base is not None, "Unresolvable type {}".format(expr[:i])
assert base is not None, f"Unresolvable type {expr[:i]}"
if i == len(expr) or expr[i] != '[':
return base, i

Expand Down Expand Up @@ -465,7 +465,7 @@ def forward(self, x):

if not isinstance(drop, bool):
raise RuntimeError("Argument to @torch.jit.ignore must be a bool or "
"a function but got {}".format(drop))
f"a function but got {drop}")

# for backwards compat
drop_on_export = kwargs.pop("drop_on_export", None)
Expand Down Expand Up @@ -707,7 +707,7 @@ def __getitem__(self, types):
# list size
BroadcastingList1 = BroadcastingListCls()
for i in range(2, 7):
globals()["BroadcastingList{}".format(i)] = BroadcastingList1
globals()[f"BroadcastingList{i}"] = BroadcastingList1


def is_scripting():
Expand Down Expand Up @@ -768,12 +768,12 @@ def _qualified_name(obj):
# The Python docs are very clear that `__module__` can be None, but I can't
# figure out when it actually would be.
if module_name is None:
raise RuntimeError("Could not get qualified name for class '{}': "
"__module__ can't be None.".format(name))
raise RuntimeError(f"Could not get qualified name for class '{name}': "
"__module__ can't be None.")

# if getattr(sys.modules[module_name], name) is not obj:
# raise RuntimeError("Could not get qualified name for class '{}': "
# "the attr {} on module {} is not the the class".format(name, name, module_name))
# raise RuntimeError(f"Could not get qualified name for class '{name}': "
# f"the attr {name} on module {module_name} is not the the class")

# __main__ is a builtin module, so rewrite it to "__torch__".
if module_name == "__main__":
Expand All @@ -784,8 +784,8 @@ def _qualified_name(obj):
module_name = "__torch__." + module_name

if "." in name:
raise RuntimeError("Could not get qualified name for class '{}': "
"'{}' is not a valid identifier".format(name, name))
raise RuntimeError(f"Could not get qualified name for class '{name}': "
f"'{name}' is not a valid identifier")

return module_name + "." + name

Expand Down
12 changes: 5 additions & 7 deletions torch/functional.py
Original file line number Diff line number Diff line change
Expand Up @@ -894,7 +894,7 @@ def tensordot(a, b, dims=2):
if isinstance(dims, torch.Tensor):
dims = dims.item()
if dims < 0:
raise RuntimeError("tensordot expects dims >= 0, but got dims={}".format(dims))
raise RuntimeError(f"tensordot expects dims >= 0, but got dims={dims}")
dims_a = list(range(-dims, 0))
dims_b = list(range(dims))
return _VF.tensordot(a, b, dims_a, dims_b) # type: ignore
Expand Down Expand Up @@ -1020,7 +1020,7 @@ def cdist(x1, x2, p=2., compute_mode='use_mm_for_euclid_dist_if_necessary'):
elif compute_mode == 'donot_use_mm_for_euclid_dist':
return _VF.cdist(x1, x2, p, 2) # type: ignore
else:
raise ValueError("{} is not a valid value for compute_mode".format(compute_mode))
raise ValueError(f"{compute_mode} is not a valid value for compute_mode")

def atleast_1d(*tensors):
r"""
Expand Down Expand Up @@ -1283,7 +1283,7 @@ def norm(input, p="fro", dim=None, keepdim=False, out=None, dtype=None): # noqa
return _VF.nuclear_norm(input, _dim, keepdim=keepdim) # type: ignore
else:
return _VF.nuclear_norm(input, _dim, keepdim=keepdim, out=out) # type: ignore
raise RuntimeError("only valid string values are 'fro' and 'nuc', found {}".format(p))
raise RuntimeError(f"only valid string values are 'fro' and 'nuc', found {p}")
else:
if _dim is None:
_dim = [i for i in range(ndim)] # noqa: C416 TODO: rewrite as list(range(m))
Expand Down Expand Up @@ -1417,11 +1417,9 @@ def _lu_impl(A, pivot=True, get_infos=False, out=None):
def _check_list_size(out_len: int, get_infos: bool, out: _ListOrSeq) -> None:
get_infos_int = 1 if get_infos else 0
if out_len - get_infos_int != 2:
raise TypeError("expected tuple of {} elements but got {}"
.format(2 + int(get_infos), out_len))
raise TypeError(f"expected tuple of {2 + int(get_infos)} elements but got {out_len}")
if not isinstance(out, (tuple, list)):
raise TypeError("argument 'out' must be tuple of Tensors, not {}"
.format(type(out).__name__))
raise TypeError(f"argument 'out' must be tuple of Tensors, not {type(out).__name__}")

def _lu_with_infos(A, pivot=True, get_infos=False, out=None):
# type: (Tensor, bool, bool, Optional[Tuple[Tensor, Tensor, Tensor]]) -> Tuple[Tensor, Tensor, Tensor]
Expand Down
12 changes: 6 additions & 6 deletions torch/jit/annotations.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ def __getattr__(self, name):
try:
return self.members[name]
except KeyError:
raise RuntimeError("Module {} has no member called {}".format(self.name, name)) from None
raise RuntimeError(f"Module {self.name} has no member called {name}") from None


class EvalEnv(object):
Expand Down Expand Up @@ -131,7 +131,7 @@ def check_fn(fn, loc):
py_ast = ast.parse(source)
if len(py_ast.body) == 1 and isinstance(py_ast.body[0], ast.ClassDef):
raise torch.jit.frontend.FrontendError(
loc, "Cannot instantiate class '{}' in a script function".format(py_ast.body[0].name))
loc, f"Cannot instantiate class '{py_ast.body[0].name}' in a script function")
if len(py_ast.body) != 1 or not isinstance(py_ast.body[0], ast.FunctionDef):
raise torch.jit.frontend.FrontendError(loc, "Expected a single top-level function")

Expand Down Expand Up @@ -259,7 +259,7 @@ def as_ann(ann):
def get_enum_value_type(e: Type[enum.Enum], loc):
enum_values: List[enum.Enum] = list(e)
if not enum_values:
raise ValueError("No enum values defined for: '{}'".format(e.__class__))
raise ValueError(f"No enum values defined for: '{e.__class__}'")

types = {type(v.value) for v in enum_values}
ir_types = [try_ann_to_type(t, loc) for t in types]
Expand Down Expand Up @@ -325,8 +325,8 @@ def try_ann_to_type(ann, loc):
return IntType.get() # dtype not yet bound in as its own type
if inspect.isclass(ann) and issubclass(ann, enum.Enum):
if not is_enum_support_enabled():
warnings.warn("Enum support is work in progress, enum class {}"
" is not compiled".format(ann))
warnings.warn(f"Enum support is work in progress, enum class {ann}"
" is not compiled")
return None
if not hasattr(ann, "__torch_script_class__"):
torch.jit._script._recursive_compile_class(ann, loc)
Expand All @@ -349,7 +349,7 @@ def ann_to_type(ann, loc):
the_type = try_ann_to_type(ann, loc)
if the_type is not None:
return the_type
raise ValueError("Unknown type annotation: '{}'".format(ann))
raise ValueError(f"Unknown type annotation: '{ann}'")


__all__ = [
Expand Down
6 changes: 3 additions & 3 deletions torch/quasirandom.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ class SobolEngine(object):
def __init__(self, dimension, scramble=False, seed=None):
if dimension > self.MAXDIM or dimension < 1:
raise ValueError("Supported range of dimensionality "
"for SobolEngine is [1, {}]".format(self.MAXDIM))
f"for SobolEngine is [1, {self.MAXDIM}]")

self.seed = seed
self.scramble = scramble
Expand Down Expand Up @@ -120,9 +120,9 @@ def fast_forward(self, n):
return self

def __repr__(self):
fmt_string = ['dimension={}'.format(self.dimension)]
fmt_string = [f'dimension={self.dimension}']
if self.scramble:
fmt_string += ['scramble=True']
if self.seed is not None:
fmt_string += ['seed={}'.format(self.seed)]
fmt_string += [f'seed={self.seed}']
return self.__class__.__name__ + '(' + ', '.join(fmt_string) + ')'
27 changes: 13 additions & 14 deletions torch/serialization.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,12 +137,12 @@ def validate_cuda_device(location):
'If you are running on a CPU-only machine, '
'please use torch.load with map_location=torch.device(\'cpu\') '
'to map your storages to the CPU.')
if device >= torch.cuda.device_count():
device_count = torch.cuda.device_count()
if device >= device_count:
raise RuntimeError('Attempting to deserialize object on CUDA device '
'{device} but torch.cuda.device_count() is {device_count}. Please use '
f'{device} but torch.cuda.device_count() is {device_count}. Please use '
'torch.load with map_location to map your storages '
'to an existing device.'.format(
device=device, device_count=torch.cuda.device_count()))
'to an existing device.')
return device


Expand Down Expand Up @@ -234,7 +234,7 @@ def _open_file_like(name_or_buffer, mode):
elif 'r' in mode:
return _open_buffer_reader(name_or_buffer)
else:
raise RuntimeError("Expected 'r' or 'w' in mode but got {}".format(mode))
raise RuntimeError(f"Expected 'r' or 'w' in mode but got {mode}")


class _open_zipfile_reader(_opener):
Expand Down Expand Up @@ -479,7 +479,7 @@ def persistent_id(obj):

# Write each tensor to a file named tensor/the_tensor_key in the zip archive
for key in sorted(serialized_storages.keys()):
name = 'data/{}'.format(key)
name = f'data/{key}'
storage = serialized_storages[key]
if storage.device.type == 'cpu':
# If it's on the CPU we can directly copy it into the zip file
Expand Down Expand Up @@ -654,8 +654,7 @@ def _check_container_source(container_type, source_file, original_source):
"accessing the object's source attribute or set "
"`torch.nn.Module.dump_patches = True` and use the "
"patch tool to revert the changes.")
msg = ("source code of class '{container_type}' has changed. {msg}"
.format(container_type=torch.typename(container_type), msg=msg))
msg = f"source code of class '{torch.typename(container_type)}' has changed. {msg}"
warnings.warn(msg, SourceChangeWarning)

def legacy_load(f):
Expand Down Expand Up @@ -698,8 +697,8 @@ def persistent_load(saved_id):
ndim, = struct.unpack('<i', f.read(4))
# skip next 4 bytes; legacy encoding treated ndim as 8 bytes
f.read(4)
size = struct.unpack('<{}q'.format(ndim), f.read(8 * ndim))
stride = struct.unpack('<{}q'.format(ndim), f.read(8 * ndim))
size = struct.unpack(f'<{ndim}q', f.read(8 * ndim))
stride = struct.unpack(f'<{ndim}q', f.read(8 * ndim))
storage_offset, = struct.unpack('<q', f.read(8))
tensor = tensor_type().set_(storage, storage_offset, size, stride)
deserialized_objects[key] = tensor
Expand Down Expand Up @@ -759,8 +758,8 @@ def persistent_load(saved_id):
if not hasattr(f, 'readinto') and (3, 8, 0) <= sys.version_info < (3, 8, 2):
raise RuntimeError(
"torch.load does not work with file-like objects that do not implement readinto on Python 3.8.0 and 3.8.1. "
"Received object of type \"{}\". Please update to Python 3.8.2 or newer to restore this "
"functionality.".format(type(f)))
f"Received object of type \"{type(f)}\". Please update to Python 3.8.2 or newer to restore this "
"functionality.")

magic_number = pickle_module.load(f, **pickle_load_args)
if magic_number != MAGIC_NUMBER:
Expand Down Expand Up @@ -828,7 +827,7 @@ def _load(zip_file, map_location, pickle_module, **pickle_load_args):
loaded_storages = {}

def load_tensor(data_type, size, key, location):
name = 'data/{}'.format(key)
name = f'data/{key}'
dtype = data_type(0).dtype

storage = zip_file.get_storage_from_record(name, size, dtype).storage()
Expand All @@ -840,7 +839,7 @@ def persistent_load(saved_id):
data = saved_id[1:]

assert typename == 'storage', \
"Unknown typename for persistent_load, expected 'storage' but got '{}'".format(typename)
f"Unknown typename for persistent_load, expected 'storage' but got '{typename}'"
data_type, key, location, size = data
if key not in loaded_storages:
load_tensor(data_type, size, key, _maybe_decode_ascii(location))
Expand Down
5 changes: 2 additions & 3 deletions torch/storage.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ class _StorageBase(object):

def __str__(self):
content = ' ' + '\n '.join(str(self[i]) for i in range(len(self)))
return content + '\n[{} of size {}]'.format(torch.typename(self), len(self))
return content + f'\n[{torch.typename(self)} of size {len(self)}]'

def __repr__(self):
return str(self)
Expand Down Expand Up @@ -102,8 +102,7 @@ def complex_float(self):
def pin_memory(self):
"""Copies the storage to pinned memory, if it's not already pinned."""
if self.is_cuda:
raise TypeError("cannot pin '{0}' only CPU memory can be pinned"
.format(self.type()))
raise TypeError(f"cannot pin '{self.type()}' only CPU memory can be pinned")
import torch.cuda
allocator = torch.cuda._host_allocator()
return type(self)(self.size(), allocator=allocator).copy_(self)
Expand Down
4 changes: 2 additions & 2 deletions torch/tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ def __deepcopy__(self, memo):
self.q_per_channel_zero_points(), \
self.q_per_channel_axis()
else:
raise RuntimeError("Unsupported qscheme {} in deepcopy".format(self.qscheme()))
raise RuntimeError(f"Unsupported qscheme {self.qscheme()} in deepcopy")
new_tensor = torch._utils._rebuild_qtensor(
new_storage,
self.storage_offset(),
Expand Down Expand Up @@ -114,7 +114,7 @@ def __reduce_ex__(self, proto):
self.q_per_channel_zero_points(),
self.q_per_channel_axis())
else:
raise RuntimeError("Serialization is not supported for tensors of type {}".format(self.qscheme()))
raise RuntimeError(f"Serialization is not supported for tensors of type {self.qscheme()}")
args = (self.storage(),
self.storage_offset(),
tuple(self.size()),
Expand Down

0 comments on commit 0c01f13

Please sign in to comment.