Skip to content

Commit

Permalink
Revert "[BE]: Update ruff to 0.285 (pytorch#107519)"
Browse files Browse the repository at this point in the history
This reverts commit 88ab3e4.

Reverted pytorch#107519 on behalf of https://github.com/ZainRizvi due to Sorry, but this PR breaks internal tests. @ezyang, can you please hep them get unblocked? It seems like one of the strings was prob accidentally modified ([comment](pytorch#107519 (comment)))
  • Loading branch information
pytorchmergebot committed Aug 22, 2023
1 parent 1e9b590 commit d59a686
Show file tree
Hide file tree
Showing 86 changed files with 403 additions and 319 deletions.
2 changes: 1 addition & 1 deletion .lintrunner.toml
Original file line number Diff line number Diff line change
Expand Up @@ -2666,6 +2666,6 @@ init_command = [
'python3',
'tools/linter/adapters/pip_init.py',
'--dry-run={{DRYRUN}}',
'ruff==0.0.285',
'ruff==0.0.280',
]
is_formatter = true
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,7 @@ def __init__(
if len(replace_stride_with_dilation) != 3:
raise ValueError(
"replace_stride_with_dilation should be None "
f"or a 3-element tuple, got {replace_stride_with_dilation}"
"or a 3-element tuple, got {}".format(replace_stride_with_dilation)
)
self.groups = groups
self.base_width = width_per_group
Expand Down
6 changes: 3 additions & 3 deletions benchmarks/operator_benchmark/benchmark_core.py
Original file line number Diff line number Diff line change
Expand Up @@ -200,10 +200,10 @@ def __init__(self, args):
def _print_header(self):
DASH_LINE = "-" * 40
print(
f"# {DASH_LINE}\n"
"# {}\n"
"# PyTorch/Caffe2 Operator Micro-benchmarks\n"
f"# {DASH_LINE}\n"
f"# Tag : {self.args.tag_filter}\n"
"# {}\n"
"# Tag : {}\n".format(DASH_LINE, DASH_LINE, self.args.tag_filter)
)
if self.args.list_tests:
print("# List of tests:")
Expand Down
6 changes: 4 additions & 2 deletions benchmarks/overrides_benchmark/bench.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,8 +57,10 @@ def main():

bench_min, bench_std = bench(tensor_1, tensor_2)
print(
f"Type {t.__name__} had a minimum time of {10**6 * bench_min} us"
f" and a standard deviation of {(10**6) * bench_std} us."
"Type {} had a minimum time of {} us"
" and a standard deviation of {} us.".format(
t.__name__, (10**6 * bench_min), (10**6) * bench_std
)
)


Expand Down
1 change: 0 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,6 @@ select = [
"PIE807",
"PIE810",
"PLE",
"RUF017",
"TRY302",
]

Expand Down
2 changes: 1 addition & 1 deletion test/cpp_api_parity/functional_impl_check.py
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,7 @@ def write_test_to_test_class(

assert not ('cpp_options_args' in test_params_dict and 'cpp_function_call' in test_params_dict), (
"Only one of `cpp_options_args` and `cpp_function_call` entries "
f"should be present in test params dict:\n{pprint.pformat(test_params_dict)}")
"should be present in test params dict:\n{}").format(pprint.pformat(test_params_dict))

functional_name = compute_functional_name(test_params_dict)

Expand Down
14 changes: 7 additions & 7 deletions test/cpp_api_parity/module_impl_check.py
Original file line number Diff line number Diff line change
Expand Up @@ -209,11 +209,11 @@ def process_test_params_for_module(test_params_dict, device, test_instance_class
if 'constructor_args' in test_params_dict:
assert 'cpp_constructor_args' in test_params_dict, (
"If `constructor_args` is present in test params dict, to enable C++ API parity test, "
f"`cpp_constructor_args` must be present in:\n{pprint.pformat(test_params_dict)}"
"`cpp_constructor_args` must be present in:\n{}"
"If you are interested in adding the C++ API parity test, please see:\n"
"NOTE [How to check NN module / functional API parity between Python and C++ frontends]. \n"
"If not, please add `test_cpp_api_parity=False` to the test params dict and file an issue about this."
)
).format(pprint.pformat(test_params_dict))

return TorchNNModuleTestParams(
module_name=module_name,
Expand All @@ -233,16 +233,16 @@ def write_test_to_test_class(
module_name = compute_module_name(test_params_dict)

assert hasattr(torch.nn, module_name), (
f"`torch.nn` doesn't have module `{module_name}`. "
"`torch.nn` doesn't have module `{}`. "
"If you are adding a new test, please set `fullname` using format `ModuleName_desc` "
f"or set `module_name` using format `ModuleName` in the module test dict:\n{pprint.pformat(test_params_dict)}"
)
"or set `module_name` using format `ModuleName` in the module test dict:\n{}"
).format(module_name, pprint.pformat(test_params_dict))

module_full_name = 'torch::nn::' + module_name

assert module_full_name in parity_table['torch::nn'], (
f"Please add `{module_full_name}` entry to `torch::nn` section of `test/cpp_api_parity/parity-tracker.md`. "
f"(Discovered while processing\n{pprint.pformat(test_params_dict)}.)")
"Please add `{}` entry to `torch::nn` section of `test/cpp_api_parity/parity-tracker.md`. "
"(Discovered while processing\n{}.)").format(module_full_name, pprint.pformat(test_params_dict))

for device in devices:
test_params = process_test_params_for_module(
Expand Down
2 changes: 1 addition & 1 deletion test/nn/test_multihead_attention.py
Original file line number Diff line number Diff line change
Expand Up @@ -329,7 +329,7 @@ def test_multihead_attn_3d_attn_mask(self):
key = torch.rand(batch_size, src_len, embed_dim) # [N, S, D]
value = key # [N, S, D]
attn_mask = torch.randint(0, 2, (batch_size, tgt_len, src_len)).float() # [N, T, S]
attn_mask = attn_mask.masked_fill(attn_mask == 0, float('-inf')).masked_fill(attn_mask == 1, 0.0)
attn_mask = attn_mask.masked_fill(attn_mask == 0, float('-inf')).masked_fill(attn_mask == 1, float(0.0))

mta_model = torch.nn.MultiheadAttention(embed_dim, num_heads)

Expand Down
3 changes: 2 additions & 1 deletion test/onnx/model_defs/squeezenet.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,8 @@ def __init__(self, version=1.0, num_classes=1000, ceil_mode=False):
super().__init__()
if version not in [1.0, 1.1]:
raise ValueError(
f"Unsupported SqueezeNet version {version}:" "1.0 or 1.1 expected"
"Unsupported SqueezeNet version {version}:"
"1.0 or 1.1 expected".format(version=version)
)
self.num_classes = num_classes
if version == 1.0:
Expand Down
6 changes: 4 additions & 2 deletions test/quantization/core/test_docs.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,8 +82,10 @@ def get_correct_path(path_from_pytorch):

# want to make sure we are actually getting some code,
assert last_line_num - line_num_start > 3 or short_snippet, (
f"The code in {path_to_file} identified by {unique_identifier} seems suspiciously short:"
f"\n\n###code-start####\n{code}###code-end####"
"The code in {} identified by {} seems suspiciously short:"
"\n\n###code-start####\n{}###code-end####".format(
path_to_file, unique_identifier, code
)
)
return code

Expand Down
8 changes: 4 additions & 4 deletions test/quantization/core/test_quantized_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -806,11 +806,11 @@ def _test_binary_op_scalar_relu(self, A, b, binary_op_name, binary_op, quantized
C_relu, C_relu_hat.q_scale(), C_relu_hat.q_zero_point(), dtype)

self.assertEqual(C_ref.dequantize(), C_hat.dequantize(),
msg=f"{binary_op_name}_scalar results don't match: "
f"{C_ref.dequantize()} vs {C_hat.dequantize()}")
msg="{}_scalar results don't match: "
"{} vs {}".format(binary_op_name, C_ref.dequantize(), C_hat.dequantize()))
self.assertEqual(C_relu_ref.dequantize(), C_relu_hat.dequantize(),
msg=f"{binary_op_name}_scalar_relu results don't match: "
f"{C_relu_ref.dequantize()} vs {C_relu_hat.dequantize()}")
msg="{}_scalar_relu results don't match: "
"{} vs {}".format(binary_op_name, C_relu_ref.dequantize(), C_relu_hat.dequantize()))

@unittest.skipIf(IS_MACOS, "skipping macos test")
@given(A=hu.tensor(shapes=hu.array_shapes(1, 4, 1, 5),
Expand Down
2 changes: 1 addition & 1 deletion test/test_autocast.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ def compare(first, second):
if (output is not None) and (output_method is not None):
self.assertTrue(type(output) == type(output_method))
comparison = compare(output, output_method)
self.assertTrue(comparison, f"torch.{op} result did not match Tensor.{op} result")
self.assertTrue(comparison, "torch.{0} result did not match Tensor.{0} result".format(op))

# Compare numerics to Python-side "autocasting" that (we expect) does the same thing
# as the C++-side autocasting, and should be bitwise accurate.
Expand Down
6 changes: 3 additions & 3 deletions test/test_binary_ufuncs.py
Original file line number Diff line number Diff line change
Expand Up @@ -168,9 +168,9 @@ def _numel(x):
if _numel(l) <= 100 and _numel(r) <= 100:
msg = (
"Failed to produce expected results! Input lhs tensor was"
f" {l}, rhs tensor was {r}, torch result is {actual}, and reference result is"
f" {expected}."
)
" {}, rhs tensor was {}, torch result is {}, and reference result is"
" {}."
).format(l, r, actual, expected)
else:
msg = None

Expand Down
11 changes: 7 additions & 4 deletions test/test_cpp_extensions_jit.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,14 +150,17 @@ def _check_cuobjdump_output(expected_values, is_ptx=False):
err = err.decode("ascii")

if not p.returncode == 0 or not err == '':
raise AssertionError(f"Flags: {flags}\nReturncode: {p.returncode}\nStderr: {err}\n"
f"Output: {output} ")
raise AssertionError("Flags: {}\nReturncode: {}\nStderr: {}\n"
"Output: {} ".format(flags, p.returncode,
err, output))

actual_arches = sorted(re.findall(r'sm_\d\d', output))
expected_arches = sorted(['sm_' + xx for xx in expected_values])
self.assertEqual(actual_arches, expected_arches,
msg=f"Flags: {flags}, Actual: {actual_arches}, Expected: {expected_arches}\n"
f"Stderr: {err}\nOutput: {output}")
msg="Flags: {}, Actual: {}, Expected: {}\n"
"Stderr: {}\nOutput: {}".format(
flags, actual_arches, expected_arches,
err, output))

temp_dir = tempfile.mkdtemp()
old_envvar = os.environ.get('TORCH_CUDA_ARCH_LIST', None)
Expand Down
4 changes: 2 additions & 2 deletions test/test_cuda.py
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,7 @@ def test_out_of_memory_retry(self):
def test_set_per_process_memory_fraction(self):
# test invalid fraction value.
with self.assertRaisesRegex(TypeError, "Invalid type"):
torch.cuda.set_per_process_memory_fraction(1)
torch.cuda.set_per_process_memory_fraction(int(1))
with self.assertRaisesRegex(ValueError, "Invalid fraction value"):
torch.cuda.set_per_process_memory_fraction(-0.1)
with self.assertRaisesRegex(ValueError, "Invalid fraction value"):
Expand Down Expand Up @@ -1765,7 +1765,7 @@ def compare(first, second):
if (output is not None) and (output_method is not None):
self.assertTrue(type(output) == type(output_method))
comparison = compare(output, output_method)
self.assertTrue(comparison, f"torch.{op} result did not match Tensor.{op} result")
self.assertTrue(comparison, "torch.{0} result did not match Tensor.{0} result".format(op))

# Compare numerics to Python-side "autocasting" that (we expect) does the same thing
# as the C++-side autocasting, and should be bitwise accurate.
Expand Down
6 changes: 3 additions & 3 deletions test/test_dispatch.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,10 +152,10 @@ def check_invariants(actual_provenance):
# NB: this finally test asserts that if a registrations fails,
# the dispatcher is left in the same state *that it was before*!
check_invariants(
f"running ctors {ctor_order[:i]} and then failing to run ctor {op_ix} "
"running ctors {} and then failing to run ctor {} "
"(did this failure leave the dispatcher in a wedged state? "
"it shouldn't!)"
)
.format(ctor_order[:i], op_ix))
break
last_ctor = i
if expect_raises and len(active_ops) == len(ops):
Expand All @@ -165,7 +165,7 @@ def check_invariants(actual_provenance):
self.assertTrue(
False,
"expected exception to be raised, but nothing was raised "
f"(after running ctors {ctor_order})")
"(after running ctors {})".format(ctor_order))
# In the order specified by dtor_order, run deregistrations
for i, op_ix in enumerate(dtor_order):
# Trigger a destruction
Expand Down
18 changes: 9 additions & 9 deletions test/test_jit.py
Original file line number Diff line number Diff line change
Expand Up @@ -5810,19 +5810,19 @@ def test_dispatch(op, expects, dtype, binary=False):
raise RuntimeError('Unknown dtype')

if binary:
code = f'''
code = '''
graph(%3 : Tensor, %4 : Tensor):
%2 : {dtype_str}(*, *) = aten::{op}(%3, %4)
%1 : {dtype_str}(*, *) = aten::relu(%2)
%2 : {dtype}(*, *) = aten::{op}(%3, %4)
%1 : {dtype}(*, *) = aten::relu(%2)
return (%1)
'''
'''.format(op=op, dtype=dtype_str)
else:
code = f'''
code = '''
graph(%3 : Tensor):
%2 : {dtype_str}(*, *) = aten::{op}(%3)
%1 : {dtype_str}(*, *) = aten::relu(%2)
%2 : {dtype}(*, *) = aten::{op}(%3)
%1 : {dtype}(*, *) = aten::relu(%2)
return (%1)
'''
'''.format(op=op, dtype=dtype_str)

graph = parse_ir(code)
inputs = (2 if binary else 1) * [torch.rand(26, 2048, dtype=dtype)]
Expand Down Expand Up @@ -14936,7 +14936,7 @@ def jit_multihead_attn_forward(query, # type: Tensor
value = torch.rand((src_l, bsz, embed_size))

mask = (torch.triu(torch.ones(src_l, src_l)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, 0.0).to(torch.get_default_dtype())
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0)).to(torch.get_default_dtype())

jit_out = jit_multihead_attn_forward(query, key, value,
embed_size, nhead,
Expand Down
2 changes: 1 addition & 1 deletion test/test_mobile_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -582,7 +582,7 @@ def dummy_method_ref_attr_pqr(self):
self.assertTrue(
cloned.qualified_name.startswith('__torch__.'),
("Expected the cloned module's name to start with the string "
f"'__torch__.', but got: {cloned.qualified_name}"),
"'__torch__.', but got: {}").format(cloned.qualified_name),
)


Expand Down
4 changes: 2 additions & 2 deletions test/test_mps.py
Original file line number Diff line number Diff line change
Expand Up @@ -9436,8 +9436,8 @@ def get_grid(device='cpu', data=None):
output = F.grid_sample(input, grid, mode=mode, padding_mode=padding_mode,
align_corners=align_corners)
self.assertEqual(output, groundtruth, atol=1e-5, rtol=0,
msg=f"groundtruth comparison failed for mode={mode}, "
f"padding_mode={padding_mode}")
msg="groundtruth comparison failed for mode={}, "
"padding_mode={}".format(mode, padding_mode))

class TestAdvancedIndexing(TestCaseMPS):
supported_dtypes = [torch.float32, torch.float16, torch.int64, torch.int32, torch.int16, torch.uint8]
Expand Down
8 changes: 4 additions & 4 deletions test/test_nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -5959,8 +5959,8 @@ def get_grid(device='cpu', data=None):
output = F.grid_sample(input, grid, mode=mode, padding_mode=padding_mode,
align_corners=align_corners)
self.assertEqual(output, groundtruth, atol=1e-5, rtol=0,
msg=f"groundtruth comparison failed for mode={mode}, "
f"padding_mode={padding_mode}")
msg="groundtruth comparison failed for mode={}, "
"padding_mode={}".format(mode, padding_mode))

# See NOTE [ grid_sample CPU fallback ]
output = torch._grid_sampler_2d_cpu_fallback(
Expand Down Expand Up @@ -6047,8 +6047,8 @@ def get_grid(device='cpu', data=None):
F.grid_sample(input, grid, mode=mode, padding_mode=padding_mode,
align_corners=align_corners).sum().backward()
self.assertEqual(grid.grad, groundtruth, atol=1e-5, rtol=0,
msg=f"gradient groundtruth comparison failed for mode={mode}, "
f"padding_mode={padding_mode}, input_requires_grad={input_requires_grad}")
msg="gradient groundtruth comparison failed for mode={}, "
"padding_mode={}, input_requires_grad={}".format(mode, padding_mode, input_requires_grad))
grid.grad.zero_()

# See NOTE [ grid_sample CPU fallback ]
Expand Down
6 changes: 4 additions & 2 deletions test/test_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -1098,8 +1098,10 @@ def _test_consistency_helper(samples, variants):
RuntimeError,
msg=(
"inplace variant either incorrectly allowed "
f"resizing or you have marked the sample {sample.summary()}"
" incorrectly with `broadcasts_self=True"
"resizing or you have marked the sample {}"
" incorrectly with `broadcasts_self=True".format(
sample.summary()
)
),
):
variant_forward = variant(
Expand Down
4 changes: 2 additions & 2 deletions test/test_reductions.py
Original file line number Diff line number Diff line change
Expand Up @@ -3503,8 +3503,8 @@ def to_numpy(input):
expected = np.asarray(expected) # transform numpy scalars to numpy.ndarray instances

msg = ("Failed to produce expected results! Input tensor was"
f" {t}, torch result is {actual}, and reference result is"
f" {expected}.") if t.numel() < 10 else None
" {}, torch result is {}, and reference result is"
" {}.").format(t, actual, expected) if t.numel() < 10 else None

self.assertEqual(actual, expected, msg, exact_dtype=exact_dtype)

Expand Down
10 changes: 6 additions & 4 deletions test/test_tensor_creation_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -451,8 +451,9 @@ def dtype_name(dtype):
other_dtype = torch.float64 if dtype == torch.float32 else torch.float32
a = torch.tensor([1, 2], device=device, dtype=dtype)
b = torch.tensor([3, 4], device=device, dtype=other_dtype)
error = f"Expected object of scalar type {dtype_name(dtype)} but got scalar type " \
f"{dtype_name(other_dtype)} for second argument"
error = "Expected object of scalar type {} but got scalar type " \
"{} for second argument".format(dtype_name(dtype),
dtype_name(other_dtype))
with self.assertRaisesRegex(RuntimeError, error):
op(a, b)

Expand All @@ -471,8 +472,9 @@ def complex_dtype_name(dtype):
b = torch.tensor([3, 4], device=device, dtype=dtype)
out = torch.zeros(2, device=device, dtype=dtype)
expected_dtype = torch.complex64 if dtype == torch.float32 else torch.complex128
error = f"Expected object of scalar type {complex_dtype_name(expected_dtype)} but got scalar type " \
f"{dtype_name(dtype)} for argument 'out'"
error = "Expected object of scalar type {} but got scalar type " \
"{} for argument 'out'".format(
complex_dtype_name(expected_dtype), dtype_name(dtype))
with self.assertRaisesRegex(RuntimeError, error):
op(a, b, out=out)

Expand Down
Loading

0 comments on commit d59a686

Please sign in to comment.