Skip to content

Commit

Permalink
[BE] Enable flake8-comprehension rule C417 (pytorch#97880)
Browse files Browse the repository at this point in the history
  • Loading branch information
Skylion007 authored and pytorchmergebot committed Mar 30, 2023
1 parent 1d08b5b commit 47dca20
Show file tree
Hide file tree
Showing 36 changed files with 108 additions and 124 deletions.
2 changes: 1 addition & 1 deletion .flake8
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ ignore =
# these ignores are from flake8-bugbear; please fix!
B007,B008,B017,B019,B020,B023,B024,B026,B027,B028,B903,B904,B905,B906,B907
# these ignores are from flake8-comprehensions; please fix!
C407,C417
C407
# these ignores are from flake8-logging-format; please fix!
G001,G002,G003,G004,G100,G101,G200,G201,G202
per-file-ignores =
Expand Down
12 changes: 6 additions & 6 deletions .github/scripts/run_torchbench.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ def is_valid_ub_dir(ub_path: str) -> bool:
[os.path.join(ub_path, ubdir) for ubdir in os.listdir(ub_path)],
)
)
valid_ubs = list(map(lambda x: os.path.basename(x), ubs))
valid_ubs = [os.path.basename(x) for x in ubs]
return valid_ubs


Expand All @@ -130,13 +130,13 @@ def extract_models_from_pr(
userbenchmark_list = []
pr_list = []
with open(prbody_file, "r") as pf:
lines = map(lambda x: x.strip(), pf.read().splitlines())
lines = (x.strip() for x in pf.read().splitlines())
magic_lines = list(filter(lambda x: x.startswith(MAGIC_PREFIX), lines))
if magic_lines:
# Only the first magic line will be recognized.
pr_list = list(
map(lambda x: x.strip(), magic_lines[0][len(MAGIC_PREFIX) :].split(","))
)
pr_list = [
x.strip() for x in magic_lines[0][len(MAGIC_PREFIX) :].split(",")
]
valid_models = get_valid_models(torchbench_path)
valid_ubs = get_valid_userbenchmarks(torchbench_path)
for pr_bm in pr_list:
Expand All @@ -158,7 +158,7 @@ def extract_models_from_pr(
def find_torchbench_branch(prbody_file: str) -> str:
branch_name: str = ""
with open(prbody_file, "r") as pf:
lines = map(lambda x: x.strip(), pf.read().splitlines())
lines = (x.strip() for x in pf.read().splitlines())
magic_lines = list(
filter(lambda x: x.startswith(MAGIC_TORCHBENCH_PREFIX), lines)
)
Expand Down
11 changes: 4 additions & 7 deletions benchmarks/dynamo/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -423,7 +423,7 @@ def tensor_is_on_xla(tensors):
if not isinstance(tensors, (tuple, list)):
tensors = [tensors]
tensors = [x for x in tensors if isinstance(x, torch.Tensor)]
return any(map(lambda x: x.device.type == "xla", tensors))
return any((x.device.type == "xla" for x in tensors))


def timed(
Expand Down Expand Up @@ -757,12 +757,9 @@ def speedup_experiment_ds(args, model_iter_fn, model, example_inputs):
shapes = [x[0].shape for x in example_inputs]
shape_keys = sorted(set(shapes))
shape_speedups = {
shape: list(
map(
lambda it: it[1],
filter(lambda it: it[0] == shape, zip(shapes, speedups)),
)
)
shape: [
it[1] for it in filter(lambda it: it[0] == shape, zip(shapes, speedups))
]
for shape in shape_keys
}
output_str = (
Expand Down
2 changes: 1 addition & 1 deletion benchmarks/dynamo/runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -351,7 +351,7 @@ def get_skip_tests(suite):
if hasattr(module, "SKIP_TRAIN"):
skip_tests.update(module.SKIP_TRAIN)

skip_tests = map(lambda name: f"-x {name}", skip_tests)
skip_tests = (f"-x {name}" for name in skip_tests)
skip_str = " ".join(skip_tests)
return skip_str

Expand Down
20 changes: 10 additions & 10 deletions benchmarks/sparse/dlmc/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def sparse_grad_output(a, b):
def read_matrix_params(path):
with open(path, 'r') as file:
line = file.readline()
nrows, ncols, nnz = map(lambda el: int(el), line.split(', '))
nrows, ncols, nnz = (int(el) for el in line.split(', '))
return (nrows, ncols), nnz


Expand All @@ -39,9 +39,9 @@ def csr_to_coo(indices, indptr, shape):

def load_sparse_matrix(path, device):
with open(path, 'r') as file:
nrows, ncols, nnz = map(lambda el: int(el), file.readline().split(', '))
index_pointers = map(lambda el: int(el), file.readline().split())
indices = map(lambda el: int(el), file.readline().split())
nrows, ncols, nnz = (int(el) for el in file.readline().split(', '))
index_pointers = (int(el) for el in file.readline().split())
indices = (int(el) for el in file.readline().split())

index_pointers = list(index_pointers)
indices = list(indices)
Expand All @@ -52,17 +52,17 @@ def load_sparse_matrix(path, device):

def gen_vector(path, device):
with open(path, 'r') as file:
nrows, ncols, nnz = map(lambda el: int(el), file.readline().split(', '))
index_pointers = map(lambda el: int(el), file.readline().split())
indices = map(lambda el: int(el), file.readline().split())
nrows, ncols, nnz = (int(el) for el in file.readline().split(', '))
index_pointers = (int(el) for el in file.readline().split())
indices = (int(el) for el in file.readline().split())
return torch.randn(nrows, dtype=torch.double, device=device)


def gen_matrix(path, device):
with open(path, 'r') as file:
nrows, ncols, nnz = map(lambda el: int(el), file.readline().split(', '))
index_pointers = map(lambda el: int(el), file.readline().split())
indices = map(lambda el: int(el), file.readline().split())
nrows, ncols, nnz = (int(el) for el in file.readline().split(', '))
index_pointers = (int(el) for el in file.readline().split())
indices = (int(el) for el in file.readline().split())
return torch.randn(nrows, ncols, dtype=torch.double, device=device)


Expand Down
6 changes: 3 additions & 3 deletions test/dynamo/test_repros.py
Original file line number Diff line number Diff line change
Expand Up @@ -499,9 +499,9 @@ def forward(self, x_spt, y_spt, x_qry, y_qry):
logits = net(x_spt)
loss = F.cross_entropy(logits, y_spt)
grad = torch.autograd.grad(loss, net.parameters())
fast_weights = list(
map(lambda p: p[1] - self.update_lr * p[0], zip(grad, net.parameters()))
)
fast_weights = [
p[1] - self.update_lr * p[0] for p in zip(grad, net.parameters())
]

# this is the loss and accuracy before first update
with torch.no_grad():
Expand Down
2 changes: 1 addition & 1 deletion test/functorch/test_vmap.py
Original file line number Diff line number Diff line change
Expand Up @@ -3427,7 +3427,7 @@ def test():
check_shape_only = op.name in ('empty_like', 'new_empty')
for sample_input in sample_inputs_itr:
args = (sample_input.input,) + sample_input.args
if not any(map(lambda arg: isinstance(arg, torch.Tensor), args)):
if not any((isinstance(arg, torch.Tensor) for arg in args)):
# Atleast one tensor required for vmap.
continue
kwargs = sample_input.kwargs
Expand Down
2 changes: 1 addition & 1 deletion test/quantization/fx/test_model_report_fx.py
Original file line number Diff line number Diff line change
Expand Up @@ -1346,7 +1346,7 @@ def test_input_weight_equalization_determine_points(self):
# assert that each of the desired modules have the observers inserted
for fqn, module in prepared_for_callibrate_model.named_modules():
# check if module is a supported module
is_in_include_list = sum(list(map(lambda x: isinstance(module, x), mods_to_check))) > 0
is_in_include_list = sum([isinstance(module, x) for x in mods_to_check]) > 0

if is_in_include_list:
# make sure it has the observer attribute
Expand Down
6 changes: 2 additions & 4 deletions test/test_autograd.py
Original file line number Diff line number Diff line change
Expand Up @@ -2398,10 +2398,8 @@ def backward(ctx, grad_output):
y = torch.randn((3, 3), requires_grad=True)
MyFunction.apply(x, y).sum().backward()

has_deprecated = map(lambda warn:
'deprecated' in str(warn) and
'saved_variables' in str(warn),
warns)
has_deprecated = ('deprecated' in str(warn) and
'saved_variables' in str(warn) for warn in warns)
has_deprecated = reduce(lambda x, y: x or y, has_deprecated)
self.assertTrue(has_deprecated)

Expand Down
2 changes: 1 addition & 1 deletion test/test_binary_ufuncs.py
Original file line number Diff line number Diff line change
Expand Up @@ -498,7 +498,7 @@ def test_type_promotion(self, device, op):
)

def _supported(dtypes):
return all(map(lambda x: x in supported_dtypes, dtypes))
return all((x in supported_dtypes for x in dtypes))

# int x int type promotion
if _supported((torch.int16, torch.int32, torch.int64)):
Expand Down
2 changes: 1 addition & 1 deletion test/test_linalg.py
Original file line number Diff line number Diff line change
Expand Up @@ -6793,7 +6793,7 @@ def run_test(matsize, batchdims, mat_chars):
for fn in [torch.det, torch.logdet, torch.slogdet, torch.linalg.slogdet]:
expected_value = []
actual_value = fn(full_tensor)
for full_idx in itertools.product(*map(lambda x: list(range(x)), batchdims)):
for full_idx in itertools.product(*(list(range(x)) for x in batchdims)):
expected_value.append(fn(full_tensor[full_idx]))

if fn == torch.slogdet or fn == torch.linalg.slogdet:
Expand Down
10 changes: 5 additions & 5 deletions test/test_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ def test_multiple_devices(self, devices, dtype, op):
if isinstance(result, torch.Tensor):
self.assertTrue(result.device == cuda_device)
elif is_iterable_of_tensors(result):
self.assertTrue(all(map(lambda t: t.device == cuda_device, result)))
self.assertTrue(all((t.device == cuda_device for t in result)))
else:
self.skipTest(
"Skipped! Only supports single tensor or iterable of tensor outputs."
Expand Down Expand Up @@ -711,7 +711,7 @@ def _extract_strides(out):
return (out.stride(),)

# assumes (see above) that out is an iterable of tensors
return tuple(map(lambda t: t.stride(), out))
return tuple((t.stride() for t in out))

# Extracts data pointers from a tensor or iterable of tensors into a tuple
# NOTE: only extracts on the CPU and CUDA device types since some
Expand All @@ -724,7 +724,7 @@ def _extract_data_ptrs(out):
return (out.data_ptr(),)

# assumes (see above) that out is an iterable of tensors
return tuple(map(lambda t: t.data_ptr(), out))
return tuple((t.data_ptr() for t in out))

@suppress_warnings
def _compare_out(transform, *, compare_strides_and_data_ptrs=True):
Expand Down Expand Up @@ -831,7 +831,7 @@ def _extract_strides(out):
return (out.stride(),)

# assumes (see above) that out is an iterable of tensors
return tuple(map(lambda t: t.stride(), out))
return tuple((t.stride() for t in out))

# Extracts data pointers from a tensor or iterable of tensors into a tuple
# NOTE: only extracts on the CPU and CUDA device types since some
Expand All @@ -844,7 +844,7 @@ def _extract_data_ptrs(out):
return (out.data_ptr(),)

# assumes (see above) that out is an iterable of tensors
return tuple(map(lambda t: t.data_ptr(), out))
return tuple((t.data_ptr() for t in out))

def _compare_out(transform, *, compare_strides_and_data_ptrs=True):
out = _apply_out_transform(transform, expected)
Expand Down
4 changes: 2 additions & 2 deletions test/test_sparse_csr.py
Original file line number Diff line number Diff line change
Expand Up @@ -1584,7 +1584,7 @@ def to_sp_block_compressed(t):
return t.cpu().resolve_conj().numpy()

res = _npref_block_addmm_addmv(
*map(lambda t: prep_input(t), (c, a, b)),
*(prep_input(t) for t in (c, a, b)),
alpha,
beta
)
Expand Down Expand Up @@ -2406,7 +2406,7 @@ def test_sampled_addmm_autograd(self, device, dtype):
output.backward(covector)

# Compute dense result and compare with sparse result
c1, a1, b1 = map(lambda x: x.detach().to_dense().requires_grad_(True), [c, a, b])
c1, a1, b1 = (x.detach().to_dense().requires_grad_(True) for x in [c, a, b])
dense_output = sample.kwargs['alpha'] * (a1 @ b1) * torch.ones_like(c).to_dense() + sample.kwargs['beta'] * c1
self.assertEqual(output, dense_output)
dense_covector = covector.to_dense()
Expand Down
6 changes: 2 additions & 4 deletions test/test_type_promotion.py
Original file line number Diff line number Diff line change
Expand Up @@ -1138,8 +1138,7 @@ def expected_type(inp, max, min):
exp_type = expected_type(inp, min_v, max_v)
if exp_type != torch.bool:
actual = torch.clamp(inp, min_v, max_v)
inps = list(map(lambda x: x.to(exp_type) if isinstance(x, torch.Tensor) else x,
(inp, min_v, max_v)))
inps = [x.to(exp_type) if isinstance(x, torch.Tensor) else x for x in (inp, min_v, max_v)]
expected = torch.clamp(inps[0], inps[1], inps[2])
self.assertEqual(actual, expected)
if inp.dtype in floating_types() or exp_type == inp.dtype:
Expand All @@ -1151,8 +1150,7 @@ def expected_type(inp, val):
exp_type = expected_type(inp, val)
if exp_type != torch.bool:
actual = torch.clamp_min(inp, val)
inps = list(map(lambda x: x.to(exp_type) if isinstance(x, torch.Tensor) else x,
(inp, val)))
inps = [x.to(exp_type) if isinstance(x, torch.Tensor) else x for x in (inp, val)]
expected = torch.clamp_min(inps[0], inps[1])
self.assertEqual(actual.dtype, exp_type)
self.assertEqual(actual, expected)
Expand Down
2 changes: 1 addition & 1 deletion test/test_typing.py
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,7 @@ def _parse_reveals(file: IO[str]) -> List[str]:
string = file.read().replace("*", "")

# Grab all `# E:`-based comments
comments_array = list(map(lambda str: str.partition(" # E: ")[2], string.split("\n")))
comments_array = [str.partition(" # E: ")[2] for str in string.split("\n")]
comments = "/n".join(comments_array)

# Only search for the `{*}` pattern within comments,
Expand Down
4 changes: 2 additions & 2 deletions tools/autograd/gen_autograd_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -443,8 +443,8 @@ def gen_autograd_functions_lib(
# get a 1D list of diffinfos, we do not need them to be per FunctionSchema/DispatchKey here
# infos with the diff dispatchkeys but the same name will still be in the same shard.
infos = get_infos_with_derivatives_list(differentiability_infos)
declarations = list(map(lambda f: process_function(f, FUNCTION_DECLARATION), infos))
definitions = list(map(lambda f: process_function(f, FUNCTION_DEFINITION), infos))
declarations = [process_function(f, FUNCTION_DECLARATION) for f in infos]
definitions = [process_function(f, FUNCTION_DEFINITION) for f in infos]

file_basename = "Functions"
fm = FileManager(install_dir=out, template_dir=template_path, dry_run=False)
Expand Down
16 changes: 8 additions & 8 deletions tools/autograd/gen_python_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -159,9 +159,9 @@
"_nested_view_from_buffer_copy_out",
]

SKIP_PYTHON_BINDINGS = list(
map(lambda pattern: re.compile(rf"^{pattern}$"), _SKIP_PYTHON_BINDINGS)
)
SKIP_PYTHON_BINDINGS = [
re.compile(rf"^{pattern}$") for pattern in _SKIP_PYTHON_BINDINGS
]

# These function signatures are not exposed to Python. Note that this signature
# list does not support regex.
Expand Down Expand Up @@ -864,7 +864,7 @@ def method_impl(
name=name,
pycname=pycname,
method_header=method_header,
max_args=max(map(lambda o: o.signature.arguments_count(), overloads)),
max_args=max((o.signature.arguments_count() for o in overloads)),
signatures=signatures,
traceable=traceable,
check_has_torch_function=gen_has_torch_function_check(
Expand Down Expand Up @@ -1216,7 +1216,7 @@ def is_smaller(s1: PythonSignature, s2: PythonSignature) -> bool:
del larger_than[j]
sorted_ids.append(j)

return list(map(lambda x: grouped_overloads[x], sorted_ids))
return [grouped_overloads[x] for x in sorted_ids]


# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
Expand Down Expand Up @@ -1250,9 +1250,9 @@ def go(f: NativeFunction) -> str:
# dispatch lambda signature
name = cpp.name(f.func)
lambda_formals = ", ".join(
map(
lambda a: f"{a.type_str} {a.name}",
dispatch_lambda_args(ps, f, symint=symint),
(
f"{a.type_str} {a.name}"
for a in dispatch_lambda_args(ps, f, symint=symint)
)
)
lambda_return = dispatch_lambda_return_str(f)
Expand Down
7 changes: 1 addition & 6 deletions tools/code_analyzer/gen_oplist.py
Original file line number Diff line number Diff line change
Expand Up @@ -149,12 +149,7 @@ def main(argv: List[Any]) -> None:
model_dict = yaml.safe_load(model_file)
model_dicts.append(model_dict)

selective_builders = list(
map(
lambda m: SelectiveBuilder.from_yaml_dict(m),
model_dicts,
)
)
selective_builders = [SelectiveBuilder.from_yaml_dict(m) for m in model_dicts]

# While we have the model_dicts generate the supported mobile models api
gen_supported_mobile_models(model_dicts, options.output_dir)
Expand Down
4 changes: 1 addition & 3 deletions tools/lite_interpreter/gen_selected_mobile_ops_header.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,9 +67,7 @@ def get_selected_kernel_dtypes_code(
):
body_parts = []
for kernel_tag, dtypes in selective_builder.kernel_metadata.items():
conditions = list(
map(lambda x: "scalar_type == at::ScalarType::" + x, dtypes)
)
conditions = ["scalar_type == at::ScalarType::" + x for x in dtypes]
body_parts.append(
if_condition_template.substitute(
kernel_tag_name=kernel_tag,
Expand Down
8 changes: 4 additions & 4 deletions torch/_dynamo/variables/builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -316,11 +316,11 @@ def _wrap(self, value):
elif istype(
value, (dict, collections.defaultdict, collections.OrderedDict)
) and all(
map(
lambda k: ConstantVariable.is_literal(k)
(
ConstantVariable.is_literal(k)
or self.tensor_can_be_dict_key(k)
or isinstance(k, enum.Enum),
value.keys(),
or isinstance(k, enum.Enum)
for k in value.keys()
)
):
if not value and self.get_source().is_nn_module():
Expand Down
4 changes: 2 additions & 2 deletions torch/_functorch/aot_autograd.py
Original file line number Diff line number Diff line change
Expand Up @@ -2348,7 +2348,7 @@ def forward(ctx, *deduped_flat_tensor_args):
[isinstance(x, torch.Tensor) for x in tensors_saved_for_backwards]
)
# See Note [Detaching saved tensors in AOTAutograd]
ctx.save_for_backward(*map(lambda x: x.detach() if x._is_view() else x, tensors_saved_for_backwards))
ctx.save_for_backward(*(x.detach() if x._is_view() else x for x in tensors_saved_for_backwards))
symint_outs = fw_outs[-num_symints_saved_for_bw:]
assert all(
[
Expand All @@ -2360,7 +2360,7 @@ def forward(ctx, *deduped_flat_tensor_args):
else:
tensors_saved_for_backwards = fw_outs[num_forward_returns:]
# See Note [Detaching saved tensors in AOTAutograd]
ctx.save_for_backward(*map(lambda x: x.detach() if x._is_view() else x, tensors_saved_for_backwards))
ctx.save_for_backward(*(x.detach() if x._is_view() else x for x in tensors_saved_for_backwards))
ctx.symints = []

raw_returns = fw_outs[0:num_forward_returns]
Expand Down
Loading

0 comments on commit 47dca20

Please sign in to comment.