Skip to content

Commit

Permalink
[BE] f-stringify torch/ and scripts (pytorch#105538)
Browse files Browse the repository at this point in the history
This PR is a follow up on the pyupgrade series to convert more strings to use f-strings using `flynt`.

- https://docs.python.org/3/reference/lexical_analysis.html#f-strings
- https://pypi.org/project/flynt/

Command used:

```
flynt torch/ -ll 120
flynt scripts/ -ll 120
flynt tools/ -ll 120
```

and excluded `collect_env.py`

Pull Request resolved: pytorch#105538
Approved by: https://github.com/ezyang, https://github.com/malfet
  • Loading branch information
justinchuby authored and pytorchmergebot committed Jul 21, 2023
1 parent 4c73016 commit 4cc1745
Show file tree
Hide file tree
Showing 139 changed files with 350 additions and 670 deletions.
3 changes: 1 addition & 2 deletions benchmarks/profiler_benchmark/profiler_bench.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,8 +46,7 @@ def parallel_task(x):
print("No CUDA available")
sys.exit()

print("Payload: {}, {} iterations; timer min. runtime = {}\n".format(
args.workload, args.internal_iter, args.timer_min_run_time))
print(f"Payload: {args.workload}, {args.internal_iter} iterations; timer min. runtime = {args.timer_min_run_time}\n")
INTERNAL_ITER = args.internal_iter

for profiling_enabled in [False, True]:
Expand Down
2 changes: 1 addition & 1 deletion functorch/examples/dp_cifar10/cifar10_transforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ def compute_loss_and_output(weights, image, target):
# is not to be differentiated. `f'` returns the gradient w.r.t. the loss,
# the loss, and the auxiliary value.
grads_loss_output = grad_and_value(compute_loss_and_output, has_aux=True)
weights = {k: v for k, v in model.named_parameters()}
weights = dict(model.named_parameters())

# detaching weights since we don't need to track gradients outside of transforms
# and this is more performant
Expand Down
4 changes: 2 additions & 2 deletions scripts/get_python_cmake_flags.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,8 @@
import sys

flags = [
'-DPYTHON_EXECUTABLE:FILEPATH={}'.format(sys.executable),
'-DPYTHON_INCLUDE_DIR={}'.format(sysconfig.get_path('include')),
f'-DPYTHON_EXECUTABLE:FILEPATH={sys.executable}',
f"-DPYTHON_INCLUDE_DIR={sysconfig.get_path('include')}",
]

print(' '.join(flags), end='')
10 changes: 5 additions & 5 deletions scripts/model_zoo/update-caffe2-models.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ def _download(self, model):
# (Sep 17, 2017)
downloadFromURLToFile(url, dest)
except Exception as e:
print("Abort: {reason}".format(reason=e))
print(f"Abort: {e}")
print("Cleaning up...")
deleteDirectory(model_dir)
exit(1)
Expand All @@ -53,20 +53,20 @@ def _prepare_model_data(self, model):
if os.path.exists(model_dir):
return
os.makedirs(model_dir)
url = 'https://s3.amazonaws.com/download.onnx/models/{}.tar.gz'.format(model)
url = f'https://s3.amazonaws.com/download.onnx/models/{model}.tar.gz'

# On Windows, NamedTemporaryFile cannot be opened for a
# second time
download_file = tempfile.NamedTemporaryFile(delete=False)
try:
download_file.close()
print('Start downloading model {} from {}'.format(model, url))
print(f'Start downloading model {model} from {url}')
urlretrieve(url, download_file.name)
print('Done')
with tarfile.open(download_file.name) as t:
t.extractall(models_dir)
except Exception as e:
print('Failed to prepare data for model {}: {}'.format(model, e))
print(f'Failed to prepare data for model {model}: {e}')
raise
finally:
os.remove(download_file.name)
Expand Down Expand Up @@ -133,7 +133,7 @@ def upload_models():
's3',
'cp',
model + '.tar.gz',
"s3://download.onnx/models/{}.tar.gz".format(model),
f"s3://download.onnx/models/{model}.tar.gz",
'--acl', 'public-read'
], cwd=onnx_models_dir)

Expand Down
48 changes: 24 additions & 24 deletions scripts/model_zoo/update-models-from-caffe2.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,20 +51,20 @@ def upload_onnx_model(model_name, zoo_dir, backup=False, only_local=False):
model_dir = os.path.join(zoo_dir, model_name)
suffix = '-backup' if backup else ''
if backup:
print('Backing up the previous version of ONNX model {}...'.format(model_name))
rel_file_name = '{}{}.tar.gz'.format(model_name, suffix)
print(f'Backing up the previous version of ONNX model {model_name}...')
rel_file_name = f'{model_name}{suffix}.tar.gz'
abs_file_name = os.path.join(zoo_dir, rel_file_name)
print('Compressing {} model to {}'.format(model_name, abs_file_name))
print(f'Compressing {model_name} model to {abs_file_name}')
with tarfile.open(abs_file_name, 'w:gz') as f:
f.add(model_dir, arcname=model_name)
file_size = os.stat(abs_file_name).st_size
print('Uploading {} ({} MB) to s3 cloud...'.format(abs_file_name, float(file_size) / 1024 / 1024))
print(f'Uploading {abs_file_name} ({float(file_size) / 1024 / 1024} MB) to s3 cloud...')
client = boto3.client('s3', 'us-east-1')
transfer = boto3.s3.transfer.S3Transfer(client)
transfer.upload_file(abs_file_name, 'download.onnx', 'models/latest/{}'.format(rel_file_name),
transfer.upload_file(abs_file_name, 'download.onnx', f'models/latest/{rel_file_name}',
extra_args={'ACL': 'public-read'})

print('Successfully uploaded {} to s3!'.format(rel_file_name))
print(f'Successfully uploaded {rel_file_name} to s3!')


def download_onnx_model(model_name, zoo_dir, use_cache=True, only_local=False):
Expand All @@ -75,7 +75,7 @@ def download_onnx_model(model_name, zoo_dir, use_cache=True, only_local=False):
return
else:
shutil.rmtree(model_dir)
url = 'https://s3.amazonaws.com/download.onnx/models/latest/{}.tar.gz'.format(model_name)
url = f'https://s3.amazonaws.com/download.onnx/models/latest/{model_name}.tar.gz'

download_file = tempfile.NamedTemporaryFile(delete=False)
try:
Expand All @@ -84,10 +84,10 @@ def download_onnx_model(model_name, zoo_dir, use_cache=True, only_local=False):
model_name, url, download_file.name))
urlretrieve(url, download_file.name)
with tarfile.open(download_file.name) as t:
print('Extracting ONNX model {} to {} ...\n'.format(model_name, zoo_dir))
print(f'Extracting ONNX model {model_name} to {zoo_dir} ...\n')
t.extractall(zoo_dir)
except Exception as e:
print('Failed to download/backup data for ONNX model {}: {}'.format(model_name, e))
print(f'Failed to download/backup data for ONNX model {model_name}: {e}')
if not os.path.exists(model_dir):
os.makedirs(model_dir)
finally:
Expand Down Expand Up @@ -119,7 +119,7 @@ def download_caffe2_model(model_name, zoo_dir, use_cache=True):
# (Sep 17, 2017)
downloadFromURLToFile(url, dest)
except Exception as e:
print("Abort: {reason}".format(reason=e))
print(f"Abort: {e}")
print("Cleaning up...")
deleteDirectory(model_dir)
raise
Expand All @@ -131,14 +131,14 @@ def caffe2_to_onnx(caffe2_model_name, caffe2_model_dir):

with open(os.path.join(caffe2_model_dir, 'init_net.pb'), 'rb') as f:
caffe2_init_proto.ParseFromString(f.read())
caffe2_init_proto.name = '{}_init'.format(caffe2_model_name)
caffe2_init_proto.name = f'{caffe2_model_name}_init'
with open(os.path.join(caffe2_model_dir, 'predict_net.pb'), 'rb') as f:
caffe2_predict_proto.ParseFromString(f.read())
caffe2_predict_proto.name = caffe2_model_name
with open(os.path.join(caffe2_model_dir, 'value_info.json'), 'rb') as f:
value_info = json.loads(f.read())

print('Converting Caffe2 model {} in {} to ONNX format'.format(caffe2_model_name, caffe2_model_dir))
print(f'Converting Caffe2 model {caffe2_model_name} in {caffe2_model_dir} to ONNX format')
onnx_model = caffe2.python.onnx.frontend.caffe2_net_to_onnx_model(
init_net=caffe2_init_proto,
predict_net=caffe2_predict_proto,
Expand Down Expand Up @@ -245,7 +245,7 @@ def onnx_verify(onnx_model, inputs, ref_outputs):
for onnx_model_name in model_mapping:
c2_model_name = model_mapping[onnx_model_name]

print('####### Processing ONNX model {} ({} in Caffe2) #######'.format(onnx_model_name, c2_model_name))
print(f'####### Processing ONNX model {onnx_model_name} ({c2_model_name} in Caffe2) #######')
download_caffe2_model(c2_model_name, caffe2_zoo_dir, use_cache=use_cache)
download_onnx_model(onnx_model_name, onnx_zoo_dir, use_cache=use_cache, only_local=only_local)

Expand All @@ -261,19 +261,19 @@ def onnx_verify(onnx_model, inputs, ref_outputs):

onnx_model, c2_init_net, c2_predict_net = caffe2_to_onnx(c2_model_name, os.path.join(caffe2_zoo_dir, c2_model_name))

print('Deleteing old ONNX {} model...'.format(onnx_model_name))
print(f'Deleteing old ONNX {onnx_model_name} model...')
for f in glob.glob(os.path.join(onnx_model_dir, 'model*'.format(onnx_model_name))):
os.remove(f)

print('Serializing generated ONNX {} model ...'.format(onnx_model_name))
print(f'Serializing generated ONNX {onnx_model_name} model ...')
with open(os.path.join(onnx_model_dir, 'model.onnx'), 'wb') as file:
file.write(onnx_model.SerializeToString())

print('Verifying model {} with ONNX model checker...'.format(onnx_model_name))
print(f'Verifying model {onnx_model_name} with ONNX model checker...')
onnx.checker.check_model(onnx_model)

total_existing_data_set = 0
print('Verifying model {} with existing test data...'.format(onnx_model_name))
print(f'Verifying model {onnx_model_name} with existing test data...')
for f in glob.glob(os.path.join(onnx_model_dir, '*.npz')):
test_data = np.load(f, encoding='bytes')
inputs = list(test_data['inputs'])
Expand All @@ -285,41 +285,41 @@ def onnx_verify(onnx_model, inputs, ref_outputs):
inputs_num = len(glob.glob(os.path.join(f, 'input_*.pb')))
for i in range(inputs_num):
tensor = onnx.TensorProto()
with open(os.path.join(f, 'input_{}.pb'.format(i)), 'rb') as pf:
with open(os.path.join(f, f'input_{i}.pb'), 'rb') as pf:
tensor.ParseFromString(pf.read())
inputs.append(numpy_helper.to_array(tensor))
ref_outputs = []
ref_outputs_num = len(glob.glob(os.path.join(f, 'output_*.pb')))
for i in range(ref_outputs_num):
tensor = onnx.TensorProto()
with open(os.path.join(f, 'output_{}.pb'.format(i)), 'rb') as pf:
with open(os.path.join(f, f'output_{i}.pb'), 'rb') as pf:
tensor.ParseFromString(pf.read())
ref_outputs.append(numpy_helper.to_array(tensor))
onnx_verify(onnx_model, inputs, ref_outputs)
total_existing_data_set += 1

starting_index = 0
while os.path.exists(os.path.join(onnx_model_dir, 'test_data_set_{}'.format(starting_index))):
while os.path.exists(os.path.join(onnx_model_dir, f'test_data_set_{starting_index}')):
starting_index += 1

if total_existing_data_set == 0 and add_test_data == 0:
add_test_data = 3
total_existing_data_set = 3

print('Generating {} sets of new test data...'.format(add_test_data))
print(f'Generating {add_test_data} sets of new test data...')
for i in range(starting_index, add_test_data + starting_index):
data_dir = os.path.join(onnx_model_dir, 'test_data_set_{}'.format(i))
data_dir = os.path.join(onnx_model_dir, f'test_data_set_{i}')
os.makedirs(data_dir)
inputs = generate_test_input_data(onnx_model, 255)
ref_outputs = generate_test_output_data(c2_init_net, c2_predict_net, inputs)
onnx_verify(onnx_model, inputs, ref_outputs)
for index, input in enumerate(inputs):
tensor = numpy_helper.from_array(input[1])
with open(os.path.join(data_dir, 'input_{}.pb'.format(index)), 'wb') as file:
with open(os.path.join(data_dir, f'input_{index}.pb'), 'wb') as file:
file.write(tensor.SerializeToString())
for index, output in enumerate(ref_outputs):
tensor = numpy_helper.from_array(output)
with open(os.path.join(data_dir, 'output_{}.pb'.format(index)), 'wb') as file:
with open(os.path.join(data_dir, f'output_{index}.pb'), 'wb') as file:
file.write(tensor.SerializeToString())

del onnx_model
Expand Down
2 changes: 1 addition & 1 deletion scripts/release_notes/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,7 @@ def run_query(query):
if request.status_code == 200:
return request.json()
else:
raise Exception("Query failed to run by returning code of {}. {}".format(request.status_code, request.json()))
raise Exception(f"Query failed to run by returning code of {request.status_code}. {request.json()}")


def github_data(pr_number):
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -224,7 +224,7 @@
python_min_version = (3, 8, 0)
python_min_version_str = '.'.join(map(str, python_min_version))
if sys.version_info < python_min_version:
print("You are using Python {}. Python >={} is required.".format(platform.python_version(),
print("You are using Python {}. Python >={} is required.".format(platform.python_version(), # noqa: UP032
python_min_version_str))
sys.exit(-1)

Expand Down
8 changes: 2 additions & 6 deletions test/distributed/test_c10d_nccl.py
Original file line number Diff line number Diff line change
Expand Up @@ -1906,9 +1906,7 @@ def first_bucket_size(ddp_bucket_mb):
):
with first_bucket_size(bucketsize):
model_msg = (
"rank = {} formats = {} dtypes = {} bucketsize = {} ".format(
self.rank, formats, dtypes, bucketsize
)
f"rank = {self.rank} formats = {formats} dtypes = {dtypes} bucketsize = {bucketsize} "
)
try:
m = ConvNet(layer_devs, formats, dtypes)
Expand Down Expand Up @@ -2387,9 +2385,7 @@ def test_ddp_weight_sharing(self):
"mismatch at "
+ name
+ ".grad for "
+ "set_to_none = {}, use_bucket_view = {}".format(
try_set_to_none, use_bucket_view
),
+ f"set_to_none = {try_set_to_none}, use_bucket_view = {use_bucket_view}",
)

@requires_nccl()
Expand Down
4 changes: 1 addition & 3 deletions test/distributed/test_c10d_spawn.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,9 +57,7 @@ def _test_multiprocess(self, f, shared_tensors, init_pg, n_output):
self.assertEqual(
expected,
result,
msg=(
"Expect rank {} to receive tensor {} but got {}."
).format(pid, expected, result)
msg=f"Expect rank {pid} to receive tensor {expected} but got {result}."
)

for _ in range(ws):
Expand Down
12 changes: 4 additions & 8 deletions test/distributions/test_distributions.py
Original file line number Diff line number Diff line change
Expand Up @@ -912,8 +912,7 @@ def test_sample_detached(self):
dist = Dist(**param)
sample = dist.sample()
self.assertFalse(sample.requires_grad,
msg='{} example {}/{}, .sample() is not detached'.format(
Dist.__name__, i + 1, len(params)))
msg=f'{Dist.__name__} example {i + 1}/{len(params)}, .sample() is not detached')

@skipIfTorchDynamo("Not a TorchDynamo suitable test")
def test_rsample_requires_grad(self):
Expand All @@ -926,8 +925,7 @@ def test_rsample_requires_grad(self):
continue
sample = dist.rsample()
self.assertTrue(sample.requires_grad,
msg='{} example {}/{}, .rsample() does not require grad'.format(
Dist.__name__, i + 1, len(params)))
msg=f'{Dist.__name__} example {i + 1}/{len(params)}, .rsample() does not require grad')

def test_enumerate_support_type(self):
for Dist, params in EXAMPLES:
Expand Down Expand Up @@ -4377,8 +4375,7 @@ def test_params_constraints(self):
if is_dependent(constraint):
continue

message = '{} example {}/{} parameter {} = {}'.format(
Dist.__name__, i + 1, len(params), name, value)
message = f'{Dist.__name__} example {i + 1}/{len(params)} parameter {name} = {value}'
self.assertTrue(constraint.check(value).all(), msg=message)

def test_support_constraints(self):
Expand All @@ -4388,8 +4385,7 @@ def test_support_constraints(self):
dist = Dist(**param)
value = dist.sample()
constraint = dist.support
message = '{} example {}/{} sample = {}'.format(
Dist.__name__, i + 1, len(params), value)
message = f'{Dist.__name__} example {i + 1}/{len(params)} sample = {value}'
self.assertEqual(constraint.event_dim, len(dist.event_shape), msg=message)
ok = constraint.check(value)
self.assertEqual(ok.shape, dist.batch_shape, msg=message)
Expand Down
4 changes: 1 addition & 3 deletions test/onnx/pytorch_helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,9 +68,7 @@ def PyTorchModule(helper, model, sample_arguments, caffe2_inputs, prefix_name=No

if len(uninitialized_inputs) != len(caffe2_inputs):
raise ValueError(
"Expected {} inputs but found {}".format(
len(uninitialized_inputs), len(caffe2_inputs)
)
f"Expected {len(uninitialized_inputs)} inputs but found {len(caffe2_inputs)}"
)

def remap_blob_name(name):
Expand Down
4 changes: 1 addition & 3 deletions test/onnx/test_operators.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,9 +105,7 @@ def assertONNX(self, f, args, params=None, **kwargs):
# Assume:
# 1) the old test should be delete before the test.
# 2) only one assertONNX in each test, otherwise will override the data.
assert not os.path.exists(output_dir), "{} should not exist!".format(
output_dir
)
assert not os.path.exists(output_dir), f"{output_dir} should not exist!"
os.makedirs(output_dir)
with open(os.path.join(output_dir, "model.onnx"), "wb") as file:
file.write(model_def.SerializeToString())
Expand Down
8 changes: 2 additions & 6 deletions test/onnx_caffe2/export_onnx_tests_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,14 +145,10 @@ def convert_tests(testcases, sets=1):
failed += 1

print(
"Collect {} test cases from PyTorch repo, failed to export {} cases.".format(
len(testcases), failed
)
f"Collect {len(testcases)} test cases from PyTorch repo, failed to export {failed} cases."
)
print(
"PyTorch converted cases are stored in {}.".format(
onnx_test_common.pytorch_converted_dir
)
f"PyTorch converted cases are stored in {onnx_test_common.pytorch_converted_dir}."
)
print_stats(FunctionalModule_nums, nn_module)

Expand Down
4 changes: 1 addition & 3 deletions test/optim/test_lrscheduler.py
Original file line number Diff line number Diff line change
Expand Up @@ -1989,9 +1989,7 @@ def _test_get_last_lr(self, schedulers, targets, epochs=10):
self.assertEqual(
target,
result,
msg="LR is wrong in epoch {}: expected {}, got {}".format(
epoch, t, r
),
msg=f"LR is wrong in epoch {epoch}: expected {t}, got {r}",
atol=1e-5,
rtol=0,
)
Expand Down
4 changes: 1 addition & 3 deletions test/quantization/core/test_docs.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,9 +62,7 @@ def get_correct_path(path_from_pytorch):
if "\n" not in unique_identifier:
unique_identifier += "\n"

assert unique_identifier in content, "could not find {} in {}".format(
unique_identifier, path_to_file
)
assert unique_identifier in content, f"could not find {unique_identifier} in {path_to_file}"

# get index of first line of code
line_num_start = content.index(unique_identifier) + 1
Expand Down
Loading

0 comments on commit 4cc1745

Please sign in to comment.