diff --git a/pavelib/utils/db_utils.py b/pavelib/utils/db_utils.py
index cddca04b0e..4bb4ee585a 100644
--- a/pavelib/utils/db_utils.py
+++ b/pavelib/utils/db_utils.py
@@ -13,7 +13,7 @@ from pavelib.prereqs import compute_fingerprint
from pavelib.utils.envs import Env
CACHE_FOLDER = 'common/test/db_cache'
-FINGERPRINT_FILEPATH = '{}/{}/bok_choy_migrations.sha1'.format(Env.REPO_ROOT, CACHE_FOLDER)
+FINGERPRINT_FILEPATH = f'{Env.REPO_ROOT}/{CACHE_FOLDER}/bok_choy_migrations.sha1'
def remove_files_from_folder(files, folder):
@@ -25,9 +25,9 @@ def remove_files_from_folder(files, folder):
file_with_path = os.path.join(folder, file_name)
try:
os.remove(file_with_path)
- print('\tRemoved {}'.format(file_with_path))
+ print(f'\tRemoved {file_with_path}')
except OSError:
- print('\tCould not remove {}. Continuing.'.format(file_with_path))
+ print(f'\tCould not remove {file_with_path}. Continuing.')
continue
@@ -39,11 +39,11 @@ def reset_test_db(db_cache_files, update_cache_files=True, use_existing_db=False
exist), load in the db cache files files if they exist on disk,
and optionally apply migrations and write up-to-date cache files.
"""
- cmd = '{}/scripts/reset-test-db.sh'.format(Env.REPO_ROOT)
+ cmd = f'{Env.REPO_ROOT}/scripts/reset-test-db.sh'
if update_cache_files:
- cmd = '{} --rebuild_cache'.format(cmd)
+ cmd = f'{cmd} --rebuild_cache'
if use_existing_db:
- cmd = '{} --use-existing-db'.format(cmd)
+ cmd = f'{cmd} --use-existing-db'
sh(cmd)
verify_files_exist(db_cache_files)
@@ -78,7 +78,7 @@ def fingerprint_bokchoy_db_files(migration_output_files, all_db_files):
msg = "Computing the fingerprint."
print(msg)
fingerprint = compute_fingerprint(file_paths)
- print("The fingerprint for bokchoy db files is: {}".format(fingerprint))
+ print(f"The fingerprint for bokchoy db files is: {fingerprint}")
return fingerprint
@@ -101,7 +101,7 @@ def verify_files_exist(files):
for file_name in files:
file_path = os.path.join(CACHE_FOLDER, file_name)
if not os.path.isfile(file_path):
- msg = "Did not find expected file: {}".format(file_path)
+ msg = f"Did not find expected file: {file_path}"
raise BuildFailure(msg)
@@ -113,7 +113,7 @@ def calculate_bokchoy_migrations(migration_output_files):
NOTE: the script first clears out the database, then calculates
what migrations need to be run, which is all of them.
"""
- sh('{}/scripts/reset-test-db.sh --calculate_migrations'.format(Env.REPO_ROOT))
+ sh(f'{Env.REPO_ROOT}/scripts/reset-test-db.sh --calculate_migrations')
verify_files_exist(migration_output_files)
@@ -132,12 +132,12 @@ def is_fingerprint_in_bucket(fingerprint, bucket_name):
If there is any issue reaching the bucket, show the exception but continue by
returning False
"""
- zipfile_name = '{}.tar.gz'.format(fingerprint)
+ zipfile_name = f'{fingerprint}.tar.gz'
try:
conn = boto.connect_s3(anon=True)
bucket = conn.get_bucket(bucket_name)
except Exception as e: # pylint: disable=broad-except
- print("Exception caught trying to reach S3 bucket {}: {}".format(bucket_name, e))
+ print(f"Exception caught trying to reach S3 bucket {bucket_name}: {e}")
return False
key = boto.s3.key.Key(bucket=bucket, name=zipfile_name)
return key.exists()
@@ -159,7 +159,7 @@ def get_file_from_s3(bucket_name, zipfile_name, path):
"""
Get the file from s3 and save it to disk.
"""
- print("Retrieving {} from bucket {}.".format(zipfile_name, bucket_name))
+ print(f"Retrieving {zipfile_name} from bucket {bucket_name}.")
conn = boto.connect_s3(anon=True)
bucket = conn.get_bucket(bucket_name)
key = boto.s3.key.Key(bucket=bucket, name=zipfile_name)
@@ -191,7 +191,7 @@ def refresh_bokchoy_db_cache_from_s3(fingerprint, bucket_name, bokchoy_db_files)
"""
path = CACHE_FOLDER
if is_fingerprint_in_bucket(fingerprint, bucket_name):
- zipfile_name = '{}.tar.gz'.format(fingerprint)
+ zipfile_name = f'{fingerprint}.tar.gz'
get_file_from_s3(bucket_name, zipfile_name, path)
zipfile_path = os.path.join(path, zipfile_name)
print("Extracting db cache files.")
@@ -203,7 +203,7 @@ def create_tarfile_from_db_cache(fingerprint, files, path):
"""
Create a tar.gz file with the current bokchoy DB cache files.
"""
- zipfile_name = '{}.tar.gz'.format(fingerprint)
+ zipfile_name = f'{fingerprint}.tar.gz'
zipfile_path = os.path.join(path, zipfile_name)
with tarfile.open(name=zipfile_path, mode='w:gz') as tar_file:
for name in files:
@@ -215,7 +215,7 @@ def upload_to_s3(file_name, file_path, bucket_name, replace=False):
"""
Upload the specified files to an s3 bucket.
"""
- print("Uploading {} to s3 bucket {}".format(file_name, bucket_name))
+ print(f"Uploading {file_name} to s3 bucket {bucket_name}")
try:
conn = boto.connect_s3()
except boto.exception.NoAuthHandlerFound:
@@ -231,9 +231,9 @@ def upload_to_s3(file_name, file_path, bucket_name, replace=False):
key = boto.s3.key.Key(bucket=bucket, name=file_name)
bytes_written = key.set_contents_from_filename(file_path, replace=replace, policy='public-read')
if bytes_written:
- msg = "Wrote {} bytes to {}.".format(bytes_written, key.name)
+ msg = f"Wrote {bytes_written} bytes to {key.name}."
else:
- msg = "File {} already existed in bucket {}.".format(key.name, bucket_name)
+ msg = f"File {key.name} already existed in bucket {bucket_name}."
print(msg)
diff --git a/pavelib/utils/envs.py b/pavelib/utils/envs.py
index 34b4968040..8f7e528ddd 100644
--- a/pavelib/utils/envs.py
+++ b/pavelib/utils/envs.py
@@ -1,7 +1,7 @@
"""
Helper functions for loading environment settings.
"""
-
+import configparser
import json
import os
import sys
@@ -11,7 +11,6 @@ import memcache
from lazy import lazy
from path import Path as path
from paver.easy import BuildFailure, sh
-from six.moves import configparser
from pavelib.utils.cmd import django_cmd
@@ -33,7 +32,7 @@ def repo_root():
absolute_path = file_path.abspath()
break
except OSError:
- print('Attempt {}/180 to get an absolute path failed'.format(attempt))
+ print(f'Attempt {attempt}/180 to get an absolute path failed')
if attempt < 180:
attempt += 1
sleep(1)
@@ -137,7 +136,7 @@ class Env:
'video': {
'port': 8777,
'log': BOK_CHOY_LOG_DIR / "bok_choy_video_sources.log",
- 'config': "root_dir={}".format(VIDEO_SOURCE_DIR),
+ 'config': f"root_dir={VIDEO_SOURCE_DIR}",
},
'youtube': {
@@ -170,7 +169,7 @@ class Env:
MONGO_HOST = 'edx.devstack.mongo' if USING_DOCKER else 'localhost'
BOK_CHOY_MONGO_DATABASE = "test"
BOK_CHOY_CACHE_HOST = 'edx.devstack.memcached' if USING_DOCKER else '0.0.0.0'
- BOK_CHOY_CACHE = memcache.Client(['{}:11211'.format(BOK_CHOY_CACHE_HOST)], debug=0)
+ BOK_CHOY_CACHE = memcache.Client([f'{BOK_CHOY_CACHE_HOST}:11211'], debug=0)
# Test Ids Directory
TEST_DIR = REPO_ROOT / ".testids"
@@ -269,8 +268,8 @@ class Env:
# else for cases where values are not found & sh returns one None value
return tuple(str(value).splitlines()) if value else tuple(None for _ in range(settings_length))
except BuildFailure:
- print("Unable to print the value of the {} setting:".format(django_settings))
- with open(cls.PRINT_SETTINGS_LOG_FILE, 'r') as f:
+ print(f"Unable to print the value of the {django_settings} setting:")
+ with open(cls.PRINT_SETTINGS_LOG_FILE) as f:
print(f.read())
sys.exit(1)
@@ -315,7 +314,7 @@ class Env:
# Find the env JSON file
if self.SERVICE_VARIANT:
- env_path = self.REPO_ROOT.parent / "{service}.env.json".format(service=self.SERVICE_VARIANT)
+ env_path = self.REPO_ROOT.parent / f"{self.SERVICE_VARIANT}.env.json"
else:
env_path = path("env.json").abspath()
diff --git a/pavelib/utils/passthrough_opts.py b/pavelib/utils/passthrough_opts.py
index 9ae2917a71..013ec40bd5 100644
--- a/pavelib/utils/passthrough_opts.py
+++ b/pavelib/utils/passthrough_opts.py
@@ -10,9 +10,9 @@ Provides:
from optparse import BadOptionError, OptionParser
+from unittest.mock import patch
import paver.tasks
-from mock import patch
class PassthroughOptionParser(OptionParser):
diff --git a/pavelib/utils/process.py b/pavelib/utils/process.py
index c775b4fa21..fd2ee5bfcf 100644
--- a/pavelib/utils/process.py
+++ b/pavelib/utils/process.py
@@ -69,7 +69,7 @@ def run_multi_processes(cmd_list, out_log=None, err_log=None):
# pylint: disable=broad-except
except Exception as err:
- print("Error running process {}".format(err), file=sys.stderr)
+ print(f"Error running process {err}", file=sys.stderr)
finally:
for pid in pids:
diff --git a/pavelib/utils/test/bokchoy_utils.py b/pavelib/utils/test/bokchoy_utils.py
index 801fac296e..3c23a1afd0 100644
--- a/pavelib/utils/test/bokchoy_utils.py
+++ b/pavelib/utils/test/bokchoy_utils.py
@@ -7,8 +7,8 @@ import os
import subprocess
import sys
import time
+from http.client import HTTPConnection
-import six
from paver import tasks
from paver.easy import cmdopts, needs, sh, task
@@ -89,7 +89,7 @@ def wait_for_server(server, port):
while attempts < 120:
try:
- connection = six.moves.http_client.HTTPConnection(server, port, timeout=10)
+ connection = HTTPConnection(server, port, timeout=10)
connection.request('GET', '/')
response = connection.getresponse()
@@ -115,7 +115,7 @@ def wait_for_test_servers():
if not ready:
msg = colorize(
"red",
- "Could not contact {} test server".format(service)
+ f"Could not contact {service} test server"
)
print(msg)
sys.exit(1)
@@ -127,7 +127,7 @@ def is_mongo_running():
"""
# The mongo command will connect to the service,
# failing with a non-zero exit code if it cannot connect.
- output = os.popen('mongo --host {} --eval "print(\'running\')"'.format(Env.MONGO_HOST)).read()
+ output = os.popen(f'mongo --host {Env.MONGO_HOST} --eval "print(\'running\')"').read()
return output and "running" in output
diff --git a/pavelib/utils/test/suites/bokchoy_suite.py b/pavelib/utils/test/suites/bokchoy_suite.py
index e80e9b9a91..1155c8c54f 100644
--- a/pavelib/utils/test/suites/bokchoy_suite.py
+++ b/pavelib/utils/test/suites/bokchoy_suite.py
@@ -68,7 +68,7 @@ def load_courses(options):
`test_root/courses/`.
"""
if 'imports_dir' in options:
- msg = colorize('green', "Importing courses from {}...".format(options.imports_dir))
+ msg = colorize('green', f"Importing courses from {options.imports_dir}...")
print(msg)
sh(
@@ -239,7 +239,7 @@ class BokChoyTestSuite(TestSuite):
# Clean up data we created in the databases
msg = colorize('green', "Cleaning up databases...")
print(msg)
- sh("./manage.py lms --settings {settings} flush --traceback --noinput".format(settings=Env.SETTINGS))
+ sh(f"./manage.py lms --settings {Env.SETTINGS} flush --traceback --noinput")
clear_mongo()
@property
@@ -247,12 +247,12 @@ class BokChoyTestSuite(TestSuite):
"""
Construct the proper combination of multiprocessing, XUnit XML file, color, and verbosity for use with pytest.
"""
- command = ["--junitxml={}".format(self.xunit_report)]
+ command = [f"--junitxml={self.xunit_report}"]
if self.num_processes != 1:
# Construct "multiprocess" pytest command
command += [
- "-n {}".format(self.num_processes),
+ f"-n {self.num_processes}",
"--color=no",
]
if self.verbosity < 1:
@@ -260,7 +260,7 @@ class BokChoyTestSuite(TestSuite):
elif self.verbosity > 1:
command.append("--verbose")
if self.eval_attr:
- command.append("-a '{}'".format(self.eval_attr))
+ command.append(f"-a '{self.eval_attr}'")
return command
@@ -298,13 +298,13 @@ class BokChoyTestSuite(TestSuite):
# Construct the pytest command, specifying where to save
# screenshots and XUnit XML reports
cmd = [
- "DEFAULT_STORE={}".format(self.default_store),
- "SAVED_SOURCE_DIR='{}'".format(self.log_dir),
- "SCREENSHOT_DIR='{}'".format(self.log_dir),
- "BOK_CHOY_HAR_DIR='{}'".format(self.har_dir),
- "BOKCHOY_A11Y_CUSTOM_RULES_FILE='{}'".format(self.a11y_file),
- "SELENIUM_DRIVER_LOG_DIR='{}'".format(self.log_dir),
- "VERIFY_XSS='{}'".format(self.verify_xss),
+ f"DEFAULT_STORE={self.default_store}",
+ f"SAVED_SOURCE_DIR='{self.log_dir}'",
+ f"SCREENSHOT_DIR='{self.log_dir}'",
+ f"BOK_CHOY_HAR_DIR='{self.har_dir}'",
+ f"BOKCHOY_A11Y_CUSTOM_RULES_FILE='{self.a11y_file}'",
+ f"SELENIUM_DRIVER_LOG_DIR='{self.log_dir}'",
+ f"VERIFY_XSS='{self.verify_xss}'",
]
if self.save_screenshots:
cmd.append("NEEDLE_SAVE_BASELINE=True")
@@ -313,7 +313,7 @@ class BokChoyTestSuite(TestSuite):
"coverage",
"run",
]
- cmd.append("--rcfile={}".format(self.coveragerc))
+ cmd.append(f"--rcfile={self.coveragerc}")
else:
cmd += [
"python",
diff --git a/pavelib/utils/test/suites/js_suite.py b/pavelib/utils/test/suites/js_suite.py
index 6f22058c58..a6896e2858 100644
--- a/pavelib/utils/test/suites/js_suite.py
+++ b/pavelib/utils/test/suites/js_suite.py
@@ -68,8 +68,8 @@ class JsTestSubSuite(TestSuite):
except ValueError:
self.test_conf_file = Env.KARMA_CONFIG_FILES[0]
- self.coverage_report = self.report_dir / 'coverage-{suite}.xml'.format(suite=self.test_id)
- self.xunit_report = self.report_dir / 'javascript_xunit-{suite}.xml'.format(suite=self.test_id)
+ self.coverage_report = self.report_dir / f'coverage-{self.test_id}.xml'
+ self.xunit_report = self.report_dir / f'javascript_xunit-{self.test_id}.xml'
@property
def cmd(self):
@@ -84,17 +84,17 @@ class JsTestSubSuite(TestSuite):
self.test_conf_file,
"--single-run={}".format('false' if self.mode == 'dev' else 'true'),
"--capture-timeout=60000",
- "--junitreportpath={}".format(self.xunit_report),
- "--browsers={}".format(Env.KARMA_BROWSER),
+ f"--junitreportpath={self.xunit_report}",
+ f"--browsers={Env.KARMA_BROWSER}",
]
if self.port:
- cmd.append("--port={}".format(self.port))
+ cmd.append(f"--port={self.port}")
if self.run_under_coverage:
cmd.extend([
"--coverage",
- "--coveragereportpath={}".format(self.coverage_report),
+ f"--coveragereportpath={self.coverage_report}",
])
return cmd
diff --git a/pavelib/utils/test/suites/pytest_suite.py b/pavelib/utils/test/suites/pytest_suite.py
index dcf215dcc0..8d49f0cae7 100644
--- a/pavelib/utils/test/suites/pytest_suite.py
+++ b/pavelib/utils/test/suites/pytest_suite.py
@@ -108,9 +108,9 @@ class PytestSuite(TestSuite):
if self.with_wtw:
opts.extend([
'--wtw',
- '{}/{}'.format(COVERAGE_CACHE_BASEPATH, WHO_TESTS_WHAT_DIFF),
+ f'{COVERAGE_CACHE_BASEPATH}/{WHO_TESTS_WHAT_DIFF}',
'--wtwdb',
- '{}/{}'.format(COVERAGE_CACHE_BASEPATH, COVERAGE_CACHE_BASELINE)
+ f'{COVERAGE_CACHE_BASEPATH}/{COVERAGE_CACHE_BASELINE}'
])
return opts
@@ -160,8 +160,8 @@ class SystemTestSuite(PytestSuite):
'-Wd',
'-m',
'pytest',
- '--ds={}'.format('{}.envs.{}'.format(self.root, self.settings)),
- "--junitxml={}".format(self.xunit_report),
+ '--ds={}'.format(f'{self.root}.envs.{self.settings}'),
+ f"--junitxml={self.xunit_report}",
])
cmd.extend(self.test_options_flags)
if self.verbosity < 1:
@@ -186,7 +186,7 @@ class SystemTestSuite(PytestSuite):
for ip in self.xdist_ip_addresses.split(','):
# Propogate necessary env vars to xdist containers
env_var_cmd = 'export DJANGO_SETTINGS_MODULE={} DISABLE_COURSEENROLLMENT_HISTORY={} PYTHONHASHSEED=0'\
- .format('{}.envs.{}'.format(self.root, self.settings),
+ .format(f'{self.root}.envs.{self.settings}',
self.disable_courseenrollment_history)
xdist_string = '--tx {}*ssh="jenkins@{} -o StrictHostKeyChecking=no"' \
'//python="source edx-venv-{}/edx-venv/bin/activate; {}; python"' \
@@ -194,19 +194,19 @@ class SystemTestSuite(PytestSuite):
.format(xdist_remote_processes, ip, Env.PYTHON_VERSION, env_var_cmd)
cmd.append(xdist_string)
for rsync_dir in Env.rsync_dirs():
- cmd.append('--rsyncdir {}'.format(rsync_dir))
+ cmd.append(f'--rsyncdir {rsync_dir}')
else:
if self.processes == -1:
cmd.append('-n auto')
cmd.append('--dist=loadscope')
elif self.processes != 0:
- cmd.append('-n {}'.format(self.processes))
+ cmd.append(f'-n {self.processes}')
cmd.append('--dist=loadscope')
if not self.randomize:
cmd.append('-p no:randomly')
if self.eval_attr:
- cmd.append("-a '{}'".format(self.eval_attr))
+ cmd.append(f"-a '{self.eval_attr}'")
cmd.extend(self.passthrough_options)
cmd.append(self.test_id)
@@ -226,17 +226,17 @@ class SystemTestSuite(PytestSuite):
# thereby making sure that we load any django models that are
# only defined in test files.
default_test_globs = [
- "{system}/djangoapps/*".format(system=self.root),
+ f"{self.root}/djangoapps/*",
"common/djangoapps/*",
"openedx/core/djangoapps/*",
"openedx/tests/*",
"openedx/core/lib/*",
]
if self.root in ('lms', 'cms'):
- default_test_globs.append("{system}/lib/*".format(system=self.root))
+ default_test_globs.append(f"{self.root}/lib/*")
if self.root == 'lms':
- default_test_globs.append("{system}/tests.py".format(system=self.root))
+ default_test_globs.append(f"{self.root}/tests.py")
default_test_globs.append("openedx/core/djangolib/*")
default_test_globs.append("openedx/core/tests/*")
default_test_globs.append("openedx/features")
@@ -288,7 +288,7 @@ class LibTestSuite(PytestSuite):
'-Wd',
'-m',
'pytest',
- '--junitxml={}'.format(self.xunit_report),
+ f'--junitxml={self.xunit_report}',
])
cmd.extend(self.passthrough_options + self.test_options_flags)
if self.verbosity < 1:
@@ -322,7 +322,7 @@ class LibTestSuite(PytestSuite):
.format(xdist_remote_processes, ip, Env.PYTHON_VERSION, env_var_cmd)
cmd.append(xdist_string)
for rsync_dir in Env.rsync_dirs():
- cmd.append('--rsyncdir {}'.format(rsync_dir))
+ cmd.append(f'--rsyncdir {rsync_dir}')
# "--rsyncdir" throws off the configuration root, set it explicitly
if 'common/lib' in self.test_id:
cmd.append('--rootdir=common/lib')
@@ -334,13 +334,13 @@ class LibTestSuite(PytestSuite):
cmd.append('-n auto')
cmd.append('--dist=loadscope')
elif self.processes != 0:
- cmd.append('-n {}'.format(self.processes))
+ cmd.append(f'-n {self.processes}')
cmd.append('--dist=loadscope')
if not self.randomize:
cmd.append("-p no:randomly")
if self.eval_attr:
- cmd.append("-a '{}'".format(self.eval_attr))
+ cmd.append(f"-a '{self.eval_attr}'")
cmd.append(self.test_id)
diff --git a/pavelib/utils/test/suites/suite.py b/pavelib/utils/test/suites/suite.py
index 0ca243bb5e..9c7e96ce65 100644
--- a/pavelib/utils/test/suites/suite.py
+++ b/pavelib/utils/test/suites/suite.py
@@ -41,7 +41,7 @@ class TestSuite:
i.e. Checking for and defining required directories.
"""
- print("\nSetting up for {suite_name}".format(suite_name=self.root))
+ print(f"\nSetting up for {self.root}")
self.failed_suites = []
def __exit__(self, exc_type, exc_value, traceback):
@@ -54,7 +54,7 @@ class TestSuite:
i.e. Cleaning mongo after the lms tests run.
"""
- print("\nCleaning up after {suite_name}".format(suite_name=self.root))
+ print(f"\nCleaning up after {self.root}")
@property
def cmd(self):
diff --git a/pavelib/utils/test/utils.py b/pavelib/utils/test/utils.py
index ea537f827e..c18e3b8d26 100644
--- a/pavelib/utils/test/utils.py
+++ b/pavelib/utils/test/utils.py
@@ -63,7 +63,7 @@ def clean_dir(directory):
"""
# We delete the files but preserve the directory structure
# so that coverage.py has a place to put the reports.
- sh('find {dir} -type f -delete'.format(dir=directory))
+ sh(f'find {directory} -type f -delete')
@task
diff --git a/pavelib/utils/timer.py b/pavelib/utils/timer.py
index 88000aea58..fc6f300373 100644
--- a/pavelib/utils/timer.py
+++ b/pavelib/utils/timer.py
@@ -55,7 +55,7 @@ def timed(wrapped, instance, args, kwargs): # pylint: disable=unused-argument
log_message = {
'python_version': sys.version,
- 'task': "{}.{}".format(wrapped.__module__, wrapped.__name__),
+ 'task': f"{wrapped.__module__}.{wrapped.__name__}",
'args': [repr(arg) for arg in args],
'kwargs': {key: repr(value) for key, value in kwargs.items()},
'started_at': start.isoformat(' '),
diff --git a/scripts/xblock/xblock_counts.py b/scripts/xblock/xblock_counts.py
index 5f80e4a56e..1514a99f4f 100644
--- a/scripts/xblock/xblock_counts.py
+++ b/scripts/xblock/xblock_counts.py
@@ -1,5 +1,3 @@
-
-
import argparse
import csv
import json
@@ -8,7 +6,6 @@ import sys
from datetime import datetime
import requests
-from six import text_type
# Keys for the CSV and JSON interpretation
PAGINATION_KEY = 'pagination'
@@ -163,14 +160,14 @@ def _get_block_types_from_json_file(xblock_json_file):
if not os.path.isfile(xblock_json_file):
print('xBlock configuration file does not exist: %s' % xblock_json_file)
sys.exit(2)
- with open(xblock_json_file, 'r') as json_file:
+ with open(xblock_json_file) as json_file:
type_set = set()
try:
json_data = json.loads(json_file.read())
except ValueError as e:
print('xBlock configuration file does not match the expected layout and is '
'missing "data" list: %s' % xblock_json_file)
- sys.exit(text_type(e))
+ sys.exit(str(e))
if 'data' in json_data:
xblock_type_list = json_data['data']
for xblock in xblock_type_list:
@@ -213,11 +210,11 @@ def _get_course_block_counts(auth_token, block_url):
Returns:
dict: A dictionary containing the Block counts
"""
- headers = {'Authorization': 'Bearer {}'.format(auth_token)}
+ headers = {'Authorization': f'Bearer {auth_token}'}
response = requests.get(block_url, headers=headers)
if response.status_code != 200:
- print("url {} returned status code {}".format(block_url, response.status_code))
+ print(f"url {block_url} returned status code {response.status_code}")
return {}
response_json = response.json()
@@ -386,4 +383,4 @@ if __name__ == "__main__":
if len(course_data) > 0:
write_block_summary_report(course_data)
write_course_block_detail_report(course_data)
- print('Start time: %s Total run time: %s' % (str(start_time), str(datetime.now() - start_time)))
+ print('Start time: {} Total run time: {}'.format(str(start_time), str(datetime.now() - start_time)))
diff --git a/scripts/xdist/find_order_dependent_test_failures.py b/scripts/xdist/find_order_dependent_test_failures.py
index a3191602c9..644ec0d4d5 100644
--- a/scripts/xdist/find_order_dependent_test_failures.py
+++ b/scripts/xdist/find_order_dependent_test_failures.py
@@ -78,7 +78,7 @@ def main(log_file, test_suite, fast, verbose):
test_list_with_failures, pytest_command = _find_fewest_tests_with_failures(failing_test_list, 'ALL')
if test_list_with_failures:
print('Found failures running {} tests.'.format(len(test_list_with_failures)))
- print('Use: {}'.format(pytest_command))
+ print(f'Use: {pytest_command}')
return
if fast_option:
@@ -102,9 +102,9 @@ def _strip_console_for_tests_with_failure(log_file, test_suite):
worker_test_dict = {}
test_base_included = {}
failing_worker_num = None
- with io.open(log_file, 'r') as console_file:
+ with open(log_file, 'r') as console_file:
for line in console_file:
- regex_search = re.search(r'\[gw(\d+)] (PASSED|FAILED|SKIPPED|ERROR) (\S+)'.format(test_suite), line)
+ regex_search = re.search(fr'\[gw(\d+)] (PASSED|FAILED|SKIPPED|ERROR) (\S+)', line)
if regex_search:
worker_num_string = regex_search.group(1)
pass_fail_string = regex_search.group(2)
@@ -113,7 +113,7 @@ def _strip_console_for_tests_with_failure(log_file, test_suite):
test = regex_search.group(3)
if test_suite == "commonlib-unit":
if "pavelib" not in test and not test.startswith('scripts'):
- test = u"common/lib/{}".format(test)
+ test = f"common/lib/{test}"
if fast_option and pass_fail_string == 'PASSED':
# fast option will only take one test per class or module, in case
# the failure is a setup/teardown failure.
@@ -134,7 +134,7 @@ def _get_pytest_command(output_file_name):
"""
Return the pytest command to run.
"""
- return "pytest -p 'no:randomly' `cat {}`".format(output_file_name)
+ return f"pytest -p 'no:randomly' `cat {output_file_name}`"
def _run_tests_and_check_for_failures(output_file_name):
@@ -169,7 +169,7 @@ def _create_and_check_test_files_for_failures(test_list, test_type):
# to the command line, but this keeps the verbose output cleaner.
temp_file = tempfile.NamedTemporaryFile(prefix=output_file_name, dir=OUTPUT_FOLDER_NAME, delete=False)
- with io.open(temp_file.name, 'w') as output_file:
+ with open(temp_file.name, 'w') as output_file:
for line in test_list:
output_file.write(line + "\n")
temp_file.close()
diff --git a/scripts/xdist/get_worker_test_list.py b/scripts/xdist/get_worker_test_list.py
index 62939ffd1f..b365782a33 100644
--- a/scripts/xdist/get_worker_test_list.py
+++ b/scripts/xdist/get_worker_test_list.py
@@ -30,9 +30,9 @@ import click
)
def main(log_file, test_suite):
worker_test_dict = {}
- with io.open(log_file, 'r') as console_file:
+ with open(log_file, 'r') as console_file:
for line in console_file:
- regex_search = re.search(r'\[gw(\d+)] (PASSED|FAILED|SKIPPED|ERROR) (\S+)'.format(test_suite), line)
+ regex_search = re.search(fr'\[gw(\d+)] (PASSED|FAILED|SKIPPED|ERROR) (\S+)', line)
if regex_search:
worker_num_string = regex_search.group(1)
if worker_num_string not in worker_test_dict:
@@ -40,7 +40,7 @@ def main(log_file, test_suite):
test = regex_search.group(3)
if test_suite == "commonlib-unit":
if "pavelib" not in test and not test.startswith('scripts'):
- test = u"common/lib/{}".format(test)
+ test = f"common/lib/{test}"
worker_test_dict[worker_num_string].append(test)
output_folder_name = "worker_list_files"
@@ -49,8 +49,8 @@ def main(log_file, test_suite):
os.mkdir(output_folder_name)
for worker_num in worker_test_dict:
- output_file_name = "{}/{}_gw{}_test_list.txt".format(output_folder_name, test_suite, worker_num)
- with io.open(output_file_name, 'w') as output_file:
+ output_file_name = f"{output_folder_name}/{test_suite}_gw{worker_num}_test_list.txt"
+ with open(output_file_name, 'w') as output_file:
for line in worker_test_dict[worker_num]:
output_file.write(line + "\n")
diff --git a/scripts/xdist/pytest_worker_manager.py b/scripts/xdist/pytest_worker_manager.py
index 1a1d855729..908ee8a9cf 100644
--- a/scripts/xdist/pytest_worker_manager.py
+++ b/scripts/xdist/pytest_worker_manager.py
@@ -12,7 +12,6 @@ from botocore.config import Config
from botocore.exceptions import ClientError
import socket
from multiprocessing import Pool
-from six.moves import range
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
@@ -41,7 +40,7 @@ class PytestWorkerManager():
Spins up workers and generates two .txt files, containing the IP/ arns
of the new workers.
"""
- logging.info("Spinning up {} workers".format(number_of_workers))
+ logging.info(f"Spinning up {number_of_workers} workers")
worker_instance_ids = []
for retry in range(1, self.MAX_RUN_WORKER_RETRIES + 1):
@@ -71,11 +70,11 @@ class PytestWorkerManager():
# Handle AWS throttling with an exponential backoff
if retry == self.MAX_RUN_WORKER_RETRIES:
raise Exception(
- "MAX_RUN_WORKER_RETRIES ({}) reached while spinning up workers due to AWS throttling.".format(self.MAX_RUN_WORKER_RETRIES)
+ f"MAX_RUN_WORKER_RETRIES ({self.MAX_RUN_WORKER_RETRIES}) reached while spinning up workers due to AWS throttling."
)
- logger.info("Hit error: {}. Retrying".format(err))
+ logger.info(f"Hit error: {err}. Retrying")
countdown = 2 ** retry
- logger.info("Sleeping for {} seconds".format(countdown))
+ logger.info(f"Sleeping for {countdown} seconds")
time.sleep(countdown)
else:
break
@@ -113,7 +112,7 @@ class PytestWorkerManager():
raise Exception(
"Timed out waiting to spin up all workers."
)
- logger.info("Successfully booted up {} workers.".format(number_of_workers))
+ logger.info(f"Successfully booted up {number_of_workers} workers.")
not_ready_ip_addresses = ip_addresses[:]
logger.info("Checking ssh connection to workers.")
@@ -140,13 +139,13 @@ class PytestWorkerManager():
# Generate .txt files containing IP addresses and instance ids
ip_list_string = ",".join(ip_addresses)
- logger.info("Worker IP list: {}".format(ip_list_string))
+ logger.info(f"Worker IP list: {ip_list_string}")
ip_list_file = open("pytest_worker_ips.txt", "w")
ip_list_file.write(ip_list_string)
ip_list_file.close()
worker_instance_id_list_string = ",".join(worker_instance_ids)
- logger.info("Worker Instance Id list: {}".format(worker_instance_id_list_string))
+ logger.info(f"Worker Instance Id list: {worker_instance_id_list_string}")
worker_arn_file = open("pytest_worker_instance_ids.txt", "w")
worker_arn_file.write(worker_instance_id_list_string)
worker_arn_file.close()
diff --git a/scripts/xsslint/tests/templates/test.py b/scripts/xsslint/tests/templates/test.py
index 34675ed643..4fb183391f 100644
--- a/scripts/xsslint/tests/templates/test.py
+++ b/scripts/xsslint/tests/templates/test.py
@@ -1,5 +1,4 @@
#!/usr/bin/python
-# -*- coding: utf-8 -*-
# Testing encoding on second line does not cause violation
message = ""
x = "{}".format(message)
diff --git a/scripts/xsslint/tests/test_linters.py b/scripts/xsslint/tests/test_linters.py
index 6a9d160fc9..1c15894161 100644
--- a/scripts/xsslint/tests/test_linters.py
+++ b/scripts/xsslint/tests/test_linters.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
Tests for linters.py
"""
@@ -8,7 +7,6 @@ import textwrap
from unittest import TestCase
from ddt import data, ddt
-from six.moves import range, zip
from xsslint.linters import (
JavaScriptLinter, MakoTemplateLinter,
@@ -69,7 +67,7 @@ class TestLinter(TestCase):
# Print violations if the lengths are different.
if len(results.violations) != len(rules):
for violation in results.violations:
- print("Found violation: {}".format(violation.rule))
+ print(f"Found violation: {violation.rule}")
assert len(results.violations) == len(rules)
for violation, rule in zip(results.violations, rules):
diff --git a/scripts/xsslint/tests/test_main.py b/scripts/xsslint/tests/test_main.py
index 3ac34d7ea6..050eb3bdd5 100644
--- a/scripts/xsslint/tests/test_main.py
+++ b/scripts/xsslint/tests/test_main.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
Tests for main.py
"""
@@ -6,10 +5,10 @@ Tests for main.py
import json
import re
-from six import StringIO
+from io import StringIO
from unittest import TestCase
-import mock
+from unittest import mock
from xsslint.linters import JavaScriptLinter, MakoTemplateLinter, PythonLinter, UnderscoreTemplateLinter
from xsslint.main import _build_ruleset, _lint
@@ -79,9 +78,9 @@ class TestXSSLinter(TestCase):
output = self.out.getvalue()
# Assert violation details are displayed.
- assert re.search('test\\.html.*{}'.format(self.ruleset.mako_missing_default.rule_id), output) is not None
- assert re.search('test\\.js.*{}'.format(self.ruleset.javascript_concat_html.rule_id), output) is not None
- assert re.search('test\\.js.*{}'.format(self.ruleset.underscore_not_escaped.rule_id), output) is not None
+ assert re.search(f'test\\.html.*{self.ruleset.mako_missing_default.rule_id}', output) is not None
+ assert re.search(f'test\\.js.*{self.ruleset.javascript_concat_html.rule_id}', output) is not None
+ assert re.search(f'test\\.js.*{self.ruleset.underscore_not_escaped.rule_id}', output) is not None
lines_with_rule = 0
lines_without_rule = 0 # Output with verbose setting only.
for underscore_match in re.finditer(r'test\.underscore:.*\n', output):
@@ -91,8 +90,8 @@ class TestXSSLinter(TestCase):
lines_without_rule += 1
assert lines_with_rule >= 1
assert lines_without_rule == 0
- assert re.search('test\\.py.*{}'.format(self.ruleset.python_parse_error.rule_id), output) is None
- assert re.search('test\\.py.*{}'.format(self.ruleset.python_wrap_html.rule_id), output) is not None
+ assert re.search(f'test\\.py.*{self.ruleset.python_parse_error.rule_id}', output) is None
+ assert re.search(f'test\\.py.*{self.ruleset.python_wrap_html.rule_id}', output) is not None
# Assert no rule totals.
assert re.search('{}:\\s*{} violations'.format(self.ruleset.python_parse_error.rule_id, 0), output) is None
# Assert final total
@@ -150,7 +149,7 @@ class TestXSSLinter(TestCase):
)
output = self.out.getvalue()
- assert re.search('test\\.py.*{}'.format(self.ruleset.python_wrap_html.rule_id), output) is not None
+ assert re.search(f'test\\.py.*{self.ruleset.python_wrap_html.rule_id}', output) is not None
# Assert totals output.
assert re.search('{}:\\s*{} violations'.format(self.ruleset.python_parse_error.rule_id, 0), output) is not None
@@ -176,7 +175,7 @@ class TestXSSLinter(TestCase):
)
output = self.out.getvalue()
- assert re.search('test\\.py.*{}'.format(self.ruleset.python_wrap_html.rule_id), output) is not None
+ assert re.search(f'test\\.py.*{self.ruleset.python_wrap_html.rule_id}', output) is not None
# Find something that looks like pretty-printed JSON
json_match = re.search(r'\n\{.*\n\}', output, re.DOTALL)
@@ -207,7 +206,7 @@ class TestXSSLinter(TestCase):
output = self.out.getvalue()
# Assert file with rule is not output.
- assert re.search('test\\.py.*{}'.format(self.ruleset.python_wrap_html.rule_id), output) is None
+ assert re.search(f'test\\.py.*{self.ruleset.python_wrap_html.rule_id}', output) is None
# Assert file is output.
assert re.search('test\\.py', output) is not None
diff --git a/scripts/xsslint/tests/test_utils.py b/scripts/xsslint/tests/test_utils.py
index 95a468e1de..fe4afc570c 100644
--- a/scripts/xsslint/tests/test_utils.py
+++ b/scripts/xsslint/tests/test_utils.py
@@ -1,5 +1,3 @@
-
-
from unittest import TestCase
from ddt import data, ddt
diff --git a/scripts/xsslint/xsslint/django_linter.py b/scripts/xsslint/xsslint/django_linter.py
index 294b7274e2..ea909a6128 100644
--- a/scripts/xsslint/xsslint/django_linter.py
+++ b/scripts/xsslint/xsslint/django_linter.py
@@ -12,7 +12,7 @@ class TransExpression(Expression):
"""
def __init__(self, ruleset, results, *args, **kwargs):
- super(TransExpression, self).__init__(*args, **kwargs)
+ super().__init__(*args, **kwargs)
self.string_lines = StringLines(kwargs['template'])
self.ruleset = ruleset
self.results = results
@@ -202,7 +202,7 @@ class BlockTransExpression(Expression):
The expression handling blocktrans tag
"""
def __init__(self, ruleset, results, *args, **kwargs):
- super(BlockTransExpression, self).__init__(*args, **kwargs)
+ super().__init__(*args, **kwargs)
self.string_lines = StringLines(kwargs['template'])
self.ruleset = ruleset
self.results = results
@@ -319,7 +319,7 @@ class HtmlInterpolateExpression(Expression):
The expression handling interplate_html tag
"""
def __init__(self, ruleset, results, *args, **kwargs):
- super(HtmlInterpolateExpression, self).__init__(*args, **kwargs)
+ super().__init__(*args, **kwargs)
self.string_lines = StringLines(kwargs['template'])
self.ruleset = ruleset
self.results = results
diff --git a/scripts/xsslint/xsslint/linters.py b/scripts/xsslint/xsslint/linters.py
index 50ef4b0ba4..6274f9cec5 100644
--- a/scripts/xsslint/xsslint/linters.py
+++ b/scripts/xsslint/xsslint/linters.py
@@ -16,7 +16,7 @@ from xsslint.utils import Expression, ParseString, StringLines, is_skip_dir
from xsslint.django_linter import TransExpression, BlockTransExpression, HtmlInterpolateExpression
-class BaseLinter(object):
+class BaseLinter:
"""
BaseLinter provides some helper functions that are used by multiple linters.
@@ -53,7 +53,7 @@ class BaseLinter(object):
A string containing the files contents.
"""
- with io.open(file_full_path, 'r') as input_file:
+ with open(file_full_path, 'r') as input_file:
file_contents = input_file.read()
return file_contents
@@ -177,7 +177,7 @@ class BaseLinter(object):
"""
if self.LINE_COMMENT_DELIM is not None:
line_start_index = StringLines(template).index_to_line_start_index(start_index)
- uncommented_line_start_index_regex = re.compile(r"^(?!\s*{})".format(self.LINE_COMMENT_DELIM), re.MULTILINE)
+ uncommented_line_start_index_regex = re.compile(fr"^(?!\s*{self.LINE_COMMENT_DELIM})", re.MULTILINE)
# Finds the line start index of the first uncommented line, including the current line.
match = uncommented_line_start_index_regex.search(template, line_start_index)
if match is None:
@@ -207,7 +207,7 @@ class UnderscoreTemplateLinter(BaseLinter):
"""
Init method.
"""
- super(UnderscoreTemplateLinter, self).__init__()
+ super().__init__()
self._skip_underscore_dirs = skip_dirs or ()
def process_file(self, directory, file_name):
@@ -335,7 +335,7 @@ class JavaScriptLinter(BaseLinter):
"""
Init method.
"""
- super(JavaScriptLinter, self).__init__()
+ super().__init__()
self.underscore_linter = underscore_linter
self.ruleset = self.ruleset + self.underscore_linter.ruleset
self._skip_javascript_dirs = javascript_skip_dirs or ()
@@ -468,7 +468,7 @@ class JavaScriptLinter(BaseLinter):
"""
# Ignores calls starting with "HtmlUtils.", because those are safe
- regex = re.compile(r"(?