BOM-1701: Ran pyupgrade on pavelib (#24230)

This commit is contained in:
M. Zulqarnain
2020-06-17 17:09:38 +05:00
committed by GitHub
parent 1743e0e590
commit bc568309ec
37 changed files with 345 additions and 374 deletions

View File

@@ -11,7 +11,6 @@ from datetime import datetime
from functools import wraps
from threading import Timer
import six
from paver import tasks
from paver.easy import call_task, cmdopts, consume_args, needs, no_help, path, sh, task
from watchdog.events import PatternMatchingEventHandler
@@ -89,7 +88,7 @@ SASS_LOOKUP_DEPENDENCIES = {
COLLECTSTATIC_LOG_DIR_ARG = 'collect_log_dir'
# Webpack command
WEBPACK_COMMAND = u'STATIC_ROOT_LMS={static_root_lms} STATIC_ROOT_CMS={static_root_cms} $(npm bin)/webpack {options}'
WEBPACK_COMMAND = 'STATIC_ROOT_LMS={static_root_lms} STATIC_ROOT_CMS={static_root_cms} $(npm bin)/webpack {options}'
def get_sass_directories(system, theme_dir=None):
@@ -110,7 +109,7 @@ def get_sass_directories(system, theme_dir=None):
:param theme_dir: absolute path of theme for which to compile sass files.
"""
if system not in SYSTEMS:
raise ValueError(u"'system' must be one of ({allowed_values})".format(
raise ValueError("'system' must be one of ({allowed_values})".format(
allowed_values=', '.join(list(SYSTEMS.keys())))
)
system = SYSTEMS[system]
@@ -455,7 +454,7 @@ def compile_sass(options):
for system in systems:
for theme in themes:
print(u"Started compiling '{system}' Sass for '{theme}'.".format(system=system, theme=theme or 'system'))
print("Started compiling '{system}' Sass for '{theme}'.".format(system=system, theme=theme or 'system'))
# Compile sass files
is_successful = _compile_sass(
@@ -467,18 +466,18 @@ def compile_sass(options):
)
if is_successful:
print(u"Finished compiling '{system}' Sass for '{theme}'.".format(
print("Finished compiling '{system}' Sass for '{theme}'.".format(
system=system, theme=theme or 'system'
))
compilation_results['success' if is_successful else 'failure'].append(u'{system} sass for {theme}.'.format(
compilation_results['success' if is_successful else 'failure'].append('{system} sass for {theme}.'.format(
system=system, theme=theme or 'system',
))
print("\t\tFinished compiling Sass:")
if not dry_run:
for sass_dir, css_dir, duration in timing_info:
print(u">> {} -> {} in {}s".format(sass_dir, css_dir, duration))
print(">> {} -> {} in {}s".format(sass_dir, css_dir, duration))
if compilation_results['success']:
print("\033[92m\nSuccessful compilations:\n--- " + "\n--- ".join(compilation_results['success']) + "\n\033[00m")
@@ -524,7 +523,7 @@ def _compile_sass(system, theme, debug, force, timing_info):
lookup_paths = dirs['lookup_paths']
if not sass_source_dir.isdir():
print(u"\033[91m Sass dir '{dir}' does not exists, skipping sass compilation for '{theme}' \033[00m".format(
print("\033[91m Sass dir '{dir}' does not exists, skipping sass compilation for '{theme}' \033[00m".format(
dir=sass_dirs, theme=theme or system,
))
# theme doesn't override sass directory, so skip it
@@ -532,14 +531,14 @@ def _compile_sass(system, theme, debug, force, timing_info):
if force:
if dry_run:
tasks.environment.info(u"rm -rf {css_dir}/*.css".format(
tasks.environment.info("rm -rf {css_dir}/*.css".format(
css_dir=css_dir,
))
else:
sh(u"rm -rf {css_dir}/*.css".format(css_dir=css_dir))
sh("rm -rf {css_dir}/*.css".format(css_dir=css_dir))
if dry_run:
tasks.environment.info(u"libsass {sass_dir}".format(
tasks.environment.info("libsass {sass_dir}".format(
sass_dir=sass_source_dir,
))
else:
@@ -556,7 +555,7 @@ def _compile_sass(system, theme, debug, force, timing_info):
if should_generate_rtl_css_file(sass_file):
source_css_file = sass_file.replace(sass_source_dir, css_dir).replace('.scss', '.css')
target_css_file = source_css_file.replace('.css', '-rtl.css')
sh(u"rtlcss {source_file} {target_file}".format(
sh("rtlcss {source_file} {target_file}".format(
source_file=source_css_file,
target_file=target_css_file,
))
@@ -606,19 +605,19 @@ def process_npm_assets():
else:
vendor_dir = NPM_JS_VENDOR_DIRECTORY
if os.path.exists(library_path):
sh(u'/bin/cp -rf {library_path} {vendor_dir}'.format(
sh('/bin/cp -rf {library_path} {vendor_dir}'.format(
library_path=library_path,
vendor_dir=vendor_dir,
))
elif not skip_if_missing:
raise Exception(u'Missing vendor file {library_path}'.format(library_path=library_path))
raise Exception('Missing vendor file {library_path}'.format(library_path=library_path))
def copy_vendor_library_dir(library_dir, skip_if_missing=False):
"""
Copies all vendor libraries in directory to the shared vendor directory.
"""
library_dir_path = 'node_modules/{library_dir}'.format(library_dir=library_dir)
print(u'Copying vendor library dir: {}'.format(library_dir_path))
print('Copying vendor library dir: {}'.format(library_dir_path))
if os.path.exists(library_dir_path):
for dirpath, _, filenames in os.walk(library_dir_path):
for filename in filenames:
@@ -697,16 +696,16 @@ def collect_assets(systems, settings, **kwargs):
]
ignore_args = " ".join(
u'--ignore "{}"'.format(pattern) for pattern in ignore_patterns
'--ignore "{}"'.format(pattern) for pattern in ignore_patterns
)
for sys in systems:
collectstatic_stdout_str = _collect_assets_cmd(sys, **kwargs)
sh(django_cmd(sys, settings, u"collectstatic {ignore_args} --noinput {logfile_str}".format(
sh(django_cmd(sys, settings, "collectstatic {ignore_args} --noinput {logfile_str}".format(
ignore_args=ignore_args,
logfile_str=collectstatic_stdout_str
)))
print(u"\t\tFinished collecting {} assets.".format(sys))
print("\t\tFinished collecting {} assets.".format(sys))
def _collect_assets_cmd(system, **kwargs):
@@ -719,7 +718,7 @@ def _collect_assets_cmd(system, **kwargs):
if kwargs[COLLECTSTATIC_LOG_DIR_ARG] is None:
collectstatic_stdout_str = ""
else:
collectstatic_stdout_str = u"> {output_dir}/{sys}-collectstatic.log".format(
collectstatic_stdout_str = "> {output_dir}/{sys}-collectstatic.log".format(
output_dir=kwargs[COLLECTSTATIC_LOG_DIR_ARG],
sys=system
)
@@ -745,7 +744,7 @@ def execute_compile_sass(args):
django_cmd(
sys,
args.settings,
u"compile_sass {system} {options}".format(
"compile_sass {system} {options}".format(
system='cms' if sys == 'studio' else sys,
options=options,
),
@@ -767,14 +766,14 @@ def webpack(options):
static_root_lms = Env.get_django_setting("STATIC_ROOT", "lms", settings=settings)
static_root_cms = Env.get_django_setting("STATIC_ROOT", "cms", settings=settings)
config_path = Env.get_django_setting("WEBPACK_CONFIG_PATH", "lms", settings=settings)
environment = u'NODE_ENV={node_env} STATIC_ROOT_LMS={static_root_lms} STATIC_ROOT_CMS={static_root_cms}'.format(
environment = 'NODE_ENV={node_env} STATIC_ROOT_LMS={static_root_lms} STATIC_ROOT_CMS={static_root_cms}'.format(
node_env="development" if config_path == 'webpack.dev.config.js' else "production",
static_root_lms=static_root_lms,
static_root_cms=static_root_cms
)
sh(
cmd(
u'{environment} $(npm bin)/webpack --config={config_path}'.format(
'{environment} $(npm bin)/webpack --config={config_path}'.format(
environment=environment,
config_path=config_path
)
@@ -790,8 +789,8 @@ def execute_webpack_watch(settings=None):
# not all JS files, so we use its own watcher instead of subclassing
# from Watchdog like the other watchers do.
run_background_process(
u'STATIC_ROOT_LMS={static_root_lms} STATIC_ROOT_CMS={static_root_cms} $(npm bin)/webpack {options}'.format(
options=u'--watch --config={config_path}'.format(
'STATIC_ROOT_LMS={static_root_lms} STATIC_ROOT_CMS={static_root_cms} $(npm bin)/webpack {options}'.format(
options='--watch --config={config_path}'.format(
config_path=Env.get_django_setting("WEBPACK_CONFIG_PATH", "lms", settings=settings)
),
static_root_lms=Env.get_django_setting("STATIC_ROOT", "lms", settings=settings),
@@ -824,7 +823,7 @@ def listfy(data):
data: data structure to be converted.
"""
if isinstance(data, six.string_types):
if isinstance(data, str):
data = data.split(',')
elif not isinstance(data, list):
data = [data]
@@ -854,7 +853,7 @@ def watch_assets(options):
themes = get_parsed_option(options, 'themes')
theme_dirs = get_parsed_option(options, 'theme_dirs', [])
default_wait = [six.text_type(DEFAULT_OBSERVER_TIMEOUT)]
default_wait = [str(DEFAULT_OBSERVER_TIMEOUT)]
wait = float(get_parsed_option(options, 'wait', default_wait)[0])
if not theme_dirs and themes:

View File

@@ -109,7 +109,7 @@ def run_bokchoy(options, passthrough_options):
test_suite = BokChoyTestSuite('bok-choy', passthrough_options=passthrough_options, **options)
msg = colorize(
'green',
u'Running tests using {default_store} modulestore.'.format(
'Running tests using {default_store} modulestore.'.format(
default_store=test_suite.default_store,
)
)
@@ -126,14 +126,14 @@ def parse_coverage(report_dir, coveragerc):
msg = colorize('green', "Combining coverage reports")
print(msg)
sh(u"coverage combine --rcfile={}".format(coveragerc))
sh("coverage combine --rcfile={}".format(coveragerc))
msg = colorize('green', "Generating coverage reports")
print(msg)
sh(u"coverage html --rcfile={}".format(coveragerc))
sh(u"coverage xml --rcfile={}".format(coveragerc))
sh(u"coverage report --rcfile={}".format(coveragerc))
sh("coverage html --rcfile={}".format(coveragerc))
sh("coverage xml --rcfile={}".format(coveragerc))
sh("coverage report --rcfile={}".format(coveragerc))
@task

View File

@@ -108,7 +108,7 @@ def update_local_bokchoy_db_from_s3(options):
reset_test_db(BOKCHOY_DB_FILES, update_cache_files=False, use_existing_db=True)
else:
msg = u"{} {} {}".format(
msg = "{} {} {}".format(
"Did not find updated bokchoy db files at S3.",
"Loading the bokchoy db files from disk",
"and running migrations."
@@ -121,7 +121,7 @@ def update_local_bokchoy_db_from_s3(options):
if not is_fingerprint_in_bucket(fingerprint, CACHE_BUCKET_NAME):
upload_db_cache_to_s3(fingerprint, BOKCHOY_DB_FILES, CACHE_BUCKET_NAME)
else:
msg = u"{} {}. {}".format(
msg = "{} {}. {}".format(
"Found a matching fingerprint in bucket ",
CACHE_BUCKET_NAME,
"Not pushing to s3"

View File

@@ -40,8 +40,8 @@ def doc_path(options, allow_default=True):
if doc_type == 'default' and not allow_default:
print(
u"You must specify a documentation type using '--type'. "
u"Valid options are: {options}".format(
"You must specify a documentation type using '--type'. "
"Valid options are: {options}".format(
options=valid_doc_types()
)
)
@@ -49,8 +49,8 @@ def doc_path(options, allow_default=True):
if path is None:
print(
u"Invalid documentation type '{doc_type}'. "
u"Valid options are: {options}".format(
"Invalid documentation type '{doc_type}'. "
"Valid options are: {options}".format(
doc_type=doc_type, options=valid_doc_types()
)
)
@@ -73,7 +73,7 @@ def build_docs(options):
"""
verbose = getattr(options, 'verbose', False)
cmd = u"cd {dir}; make html quiet={quiet}".format(
cmd = "cd {dir}; make html quiet={quiet}".format(
dir=doc_path(options),
quiet="false" if verbose else "true"
)

View File

@@ -9,7 +9,6 @@ import sys
from path import Path as path
from paver.easy import cmdopts, needs, sh, task
from six.moves import input
from .utils.cmd import django_cmd
from .utils.envs import Env
@@ -139,9 +138,9 @@ def i18n_validate_transifex_config():
if not config.isfile or config.getsize == 0:
msg = colorize(
'red',
u"Cannot connect to Transifex, config file is missing"
u" or empty: {config} \nSee "
u"http://help.transifex.com/features/client/#transifexrc \n".format(
"Cannot connect to Transifex, config file is missing"
" or empty: {config} \nSee "
"http://help.transifex.com/features/client/#transifexrc \n".format(
config=config,
)
)

View File

@@ -50,7 +50,7 @@ def test_js(options):
if (suite != 'all') and (suite not in Env.JS_TEST_ID_KEYS):
sys.stderr.write(
u"Unknown test suite. Please choose from ({suites})\n".format(
"Unknown test suite. Please choose from ({suites})\n".format(
suites=", ".join(Env.JS_TEST_ID_KEYS)
)
)

View File

@@ -6,7 +6,6 @@ from unittest import TestCase
import ddt
import paver.tasks
import six
from mock import patch
from paver.easy import call_task, path
from watchdog.observers import Observer
@@ -58,27 +57,27 @@ class TestPaverAssetTasks(PaverTestCase):
if "lms" in system:
if force:
expected_messages.append(u'rm -rf lms/static/css/*.css')
expected_messages.append(u'libsass lms/static/sass')
expected_messages.append('rm -rf lms/static/css/*.css')
expected_messages.append('libsass lms/static/sass')
expected_messages.append(
u'rtlcss lms/static/css/bootstrap/lms-main.css lms/static/css/bootstrap/lms-main-rtl.css'
'rtlcss lms/static/css/bootstrap/lms-main.css lms/static/css/bootstrap/lms-main-rtl.css'
)
expected_messages.append(
u'rtlcss lms/static/css/discussion/lms-discussion-bootstrap.css'
u' lms/static/css/discussion/lms-discussion-bootstrap-rtl.css'
'rtlcss lms/static/css/discussion/lms-discussion-bootstrap.css'
' lms/static/css/discussion/lms-discussion-bootstrap-rtl.css'
)
if force:
expected_messages.append(u'rm -rf lms/static/certificates/css/*.css')
expected_messages.append(u'libsass lms/static/certificates/sass')
expected_messages.append('rm -rf lms/static/certificates/css/*.css')
expected_messages.append('libsass lms/static/certificates/sass')
if "studio" in system:
if force:
expected_messages.append(u'rm -rf cms/static/css/*.css')
expected_messages.append(u'libsass cms/static/sass')
expected_messages.append('rm -rf cms/static/css/*.css')
expected_messages.append('libsass cms/static/sass')
expected_messages.append(
u'rtlcss cms/static/css/bootstrap/studio-main.css cms/static/css/bootstrap/studio-main-rtl.css'
'rtlcss cms/static/css/bootstrap/studio-main.css cms/static/css/bootstrap/studio-main-rtl.css'
)
six.assertCountEqual(self, self.task_messages, expected_messages)
self.assertCountEqual(self.task_messages, expected_messages)
@ddt.ddt
@@ -125,77 +124,77 @@ class TestPaverThemeAssetTasks(PaverTestCase):
)
expected_messages = []
if force:
expected_messages.append(u'rm -rf common/static/css/*.css')
expected_messages.append(u'libsass common/static/sass')
expected_messages.append('rm -rf common/static/css/*.css')
expected_messages.append('libsass common/static/sass')
if 'lms' in system:
expected_messages.append(u'mkdir_p ' + repr(TEST_THEME_DIR / 'lms/static/css'))
expected_messages.append('mkdir_p ' + repr(TEST_THEME_DIR / 'lms/static/css'))
if force:
expected_messages.append(
u'rm -rf {test_theme_dir}/lms/static/css/*.css'.format(test_theme_dir=str(TEST_THEME_DIR))
'rm -rf {test_theme_dir}/lms/static/css/*.css'.format(test_theme_dir=str(TEST_THEME_DIR))
)
expected_messages.append("libsass lms/static/sass")
expected_messages.append(
u'rtlcss {test_theme_dir}/lms/static/css/bootstrap/lms-main.css'
u' {test_theme_dir}/lms/static/css/bootstrap/lms-main-rtl.css'.format(
'rtlcss {test_theme_dir}/lms/static/css/bootstrap/lms-main.css'
' {test_theme_dir}/lms/static/css/bootstrap/lms-main-rtl.css'.format(
test_theme_dir=str(TEST_THEME_DIR),
)
)
expected_messages.append(
u'rtlcss {test_theme_dir}/lms/static/css/discussion/lms-discussion-bootstrap.css'
u' {test_theme_dir}/lms/static/css/discussion/lms-discussion-bootstrap-rtl.css'.format(
'rtlcss {test_theme_dir}/lms/static/css/discussion/lms-discussion-bootstrap.css'
' {test_theme_dir}/lms/static/css/discussion/lms-discussion-bootstrap-rtl.css'.format(
test_theme_dir=str(TEST_THEME_DIR),
)
)
if force:
expected_messages.append(
u'rm -rf {test_theme_dir}/lms/static/css/*.css'.format(test_theme_dir=str(TEST_THEME_DIR))
'rm -rf {test_theme_dir}/lms/static/css/*.css'.format(test_theme_dir=str(TEST_THEME_DIR))
)
expected_messages.append(
u'libsass {test_theme_dir}/lms/static/sass'.format(test_theme_dir=str(TEST_THEME_DIR))
'libsass {test_theme_dir}/lms/static/sass'.format(test_theme_dir=str(TEST_THEME_DIR))
)
if force:
expected_messages.append(u'rm -rf lms/static/css/*.css')
expected_messages.append(u'libsass lms/static/sass')
expected_messages.append('rm -rf lms/static/css/*.css')
expected_messages.append('libsass lms/static/sass')
expected_messages.append(
u'rtlcss lms/static/css/bootstrap/lms-main.css lms/static/css/bootstrap/lms-main-rtl.css'
'rtlcss lms/static/css/bootstrap/lms-main.css lms/static/css/bootstrap/lms-main-rtl.css'
)
expected_messages.append(
u'rtlcss lms/static/css/discussion/lms-discussion-bootstrap.css'
u' lms/static/css/discussion/lms-discussion-bootstrap-rtl.css'
'rtlcss lms/static/css/discussion/lms-discussion-bootstrap.css'
' lms/static/css/discussion/lms-discussion-bootstrap-rtl.css'
)
if force:
expected_messages.append(u'rm -rf lms/static/certificates/css/*.css')
expected_messages.append(u'libsass lms/static/certificates/sass')
expected_messages.append('rm -rf lms/static/certificates/css/*.css')
expected_messages.append('libsass lms/static/certificates/sass')
if "studio" in system:
expected_messages.append(u'mkdir_p ' + repr(TEST_THEME_DIR / 'cms/static/css'))
expected_messages.append('mkdir_p ' + repr(TEST_THEME_DIR / 'cms/static/css'))
if force:
expected_messages.append(
u'rm -rf {test_theme_dir}/cms/static/css/*.css'.format(test_theme_dir=str(TEST_THEME_DIR))
'rm -rf {test_theme_dir}/cms/static/css/*.css'.format(test_theme_dir=str(TEST_THEME_DIR))
)
expected_messages.append(u'libsass cms/static/sass')
expected_messages.append('libsass cms/static/sass')
expected_messages.append(
u'rtlcss {test_theme_dir}/cms/static/css/bootstrap/studio-main.css'
u' {test_theme_dir}/cms/static/css/bootstrap/studio-main-rtl.css'.format(
'rtlcss {test_theme_dir}/cms/static/css/bootstrap/studio-main.css'
' {test_theme_dir}/cms/static/css/bootstrap/studio-main-rtl.css'.format(
test_theme_dir=str(TEST_THEME_DIR),
)
)
if force:
expected_messages.append(
u'rm -rf {test_theme_dir}/cms/static/css/*.css'.format(test_theme_dir=str(TEST_THEME_DIR))
'rm -rf {test_theme_dir}/cms/static/css/*.css'.format(test_theme_dir=str(TEST_THEME_DIR))
)
expected_messages.append(
u'libsass {test_theme_dir}/cms/static/sass'.format(test_theme_dir=str(TEST_THEME_DIR))
'libsass {test_theme_dir}/cms/static/sass'.format(test_theme_dir=str(TEST_THEME_DIR))
)
if force:
expected_messages.append(u'rm -rf cms/static/css/*.css')
expected_messages.append(u'libsass cms/static/sass')
expected_messages.append('rm -rf cms/static/css/*.css')
expected_messages.append('libsass cms/static/sass')
expected_messages.append(
u'rtlcss cms/static/css/bootstrap/studio-main.css cms/static/css/bootstrap/studio-main-rtl.css'
'rtlcss cms/static/css/bootstrap/studio-main.css cms/static/css/bootstrap/studio-main-rtl.css'
)
six.assertCountEqual(self, self.task_messages, expected_messages)
self.assertCountEqual(self.task_messages, expected_messages)
class TestPaverWatchAssetTasks(TestCase):
@@ -221,11 +220,11 @@ class TestPaverWatchAssetTasks(TestCase):
if 'pavelib.assets.watch_assets' in paver.tasks.environment.options:
del paver.tasks.environment.options['pavelib.assets.watch_assets']
super(TestPaverWatchAssetTasks, self).setUp()
super().setUp()
def tearDown(self):
self.expected_sass_directories = []
super(TestPaverWatchAssetTasks, self).tearDown()
super().tearDown()
def test_watch_assets(self):
"""
@@ -245,7 +244,7 @@ class TestPaverWatchAssetTasks(TestCase):
self.assertIsInstance(sass_watcher_args[0], Observer)
self.assertIsInstance(sass_watcher_args[1], list)
six.assertCountEqual(self, sass_watcher_args[1], self.expected_sass_directories)
self.assertCountEqual(sass_watcher_args[1], self.expected_sass_directories)
def test_watch_theme_assets(self):
"""
@@ -275,7 +274,7 @@ class TestPaverWatchAssetTasks(TestCase):
sass_watcher_args = mock_register.call_args_list[0][0]
self.assertIsInstance(sass_watcher_args[0], Observer)
self.assertIsInstance(sass_watcher_args[1], list)
six.assertCountEqual(self, sass_watcher_args[1], self.expected_sass_directories)
self.assertCountEqual(sass_watcher_args[1], self.expected_sass_directories)
@ddt.ddt
@@ -347,7 +346,7 @@ class TestCollectAssets(PaverTestCase):
"""
for i, sys in enumerate(systems):
msg = self.task_messages[i]
self.assertTrue(msg.startswith(u'python manage.py {}'.format(sys)))
self.assertTrue(msg.startswith('python manage.py {}'.format(sys)))
self.assertIn(' collectstatic ', msg)
self.assertIn('--settings={}'.format(Env.DEVSTACK_SETTINGS), msg)
self.assertTrue(msg.endswith(' {}'.format(log_location)))
@@ -374,7 +373,7 @@ class TestUpdateAssetsTask(PaverTestCase):
call_task('pavelib.assets.update_assets', args=cmd_args)
self.assertTrue(
self._is_substring_in_list(self.task_messages, expected_substring),
msg=u"{substring} not found in messages".format(substring=expected_substring)
msg="{substring} not found in messages".format(substring=expected_substring)
)
def _is_substring_in_list(self, messages_list, expected_substring):

View File

@@ -43,7 +43,7 @@ class TestPaverDbUtils(TestCase):
extracted_file = os.path.join(output_dir, 'test.txt')
assert os.path.isfile(extracted_file)
with open(extracted_file, 'r') as test_file:
with open(extracted_file) as test_file:
data = test_file.read()
assert data == 'Test file content'
@@ -65,7 +65,7 @@ class TestPaverDatabaseTasks(PaverTestCase):
"""
def setUp(self):
super(TestPaverDatabaseTasks, self).setUp()
super().setUp()
# This value is the actual sha1 fingerprint calculated for the dummy
# files used in these tests
self.expected_fingerprint = 'ccaa8d8dcc7d030cd6a6768db81f90d0ef976c3d'
@@ -94,8 +94,8 @@ class TestPaverDatabaseTasks(PaverTestCase):
# Make sure that the local cache files are used - NOT downloaded from s3
self.assertFalse(_mock_get_file.called)
calls = [
call(u'{}/scripts/reset-test-db.sh --calculate_migrations'.format(Env.REPO_ROOT)),
call(u'{}/scripts/reset-test-db.sh --use-existing-db'.format(Env.REPO_ROOT))
call('{}/scripts/reset-test-db.sh --calculate_migrations'.format(Env.REPO_ROOT)),
call('{}/scripts/reset-test-db.sh --use-existing-db'.format(Env.REPO_ROOT))
]
_mock_sh.assert_has_calls(calls)
@@ -137,8 +137,8 @@ class TestPaverDatabaseTasks(PaverTestCase):
)
calls = [
call(u'{}/scripts/reset-test-db.sh --calculate_migrations'.format(Env.REPO_ROOT)),
call(u'{}/scripts/reset-test-db.sh --use-existing-db'.format(Env.REPO_ROOT))
call('{}/scripts/reset-test-db.sh --calculate_migrations'.format(Env.REPO_ROOT)),
call('{}/scripts/reset-test-db.sh --use-existing-db'.format(Env.REPO_ROOT))
]
_mock_sh.assert_has_calls(calls)
@@ -165,8 +165,8 @@ class TestPaverDatabaseTasks(PaverTestCase):
database.update_local_bokchoy_db_from_s3() # pylint: disable=no-value-for-parameter
calls = [
call(u'{}/scripts/reset-test-db.sh --calculate_migrations'.format(Env.REPO_ROOT)),
call(u'{}/scripts/reset-test-db.sh --rebuild_cache --use-existing-db'.format(Env.REPO_ROOT))
call('{}/scripts/reset-test-db.sh --calculate_migrations'.format(Env.REPO_ROOT)),
call('{}/scripts/reset-test-db.sh --rebuild_cache --use-existing-db'.format(Env.REPO_ROOT))
]
_mock_sh.assert_has_calls(calls)

View File

@@ -17,7 +17,7 @@ class TestPaverESLint(unittest.TestCase):
"""
def setUp(self):
super(TestPaverESLint, self).setUp()
super().setUp()
# Mock the paver @needs decorator
self._mock_paver_needs = patch.object(pavelib.quality.run_eslint, 'needs').start()

View File

@@ -15,7 +15,6 @@ from unittest import TestCase
from i18n import config, dummy, extract, generate
from polib import pofile
from pytz import UTC
from six.moves import range
class TestGenerate(TestCase):
@@ -26,7 +25,7 @@ class TestGenerate(TestCase):
@classmethod
def setUpClass(cls):
super(TestGenerate, cls).setUpClass()
super().setUpClass()
sys.stderr.write(
"\nThis test tests that i18n extraction (`paver i18n_extract`) works properly. "
@@ -50,10 +49,10 @@ class TestGenerate(TestCase):
sys.stderr.flush()
returncode = subprocess.call(cmd, shell=True)
assert returncode == 0
super(TestGenerate, cls).tearDownClass()
super().tearDownClass()
def setUp(self):
super(TestGenerate, self).setUp()
super().setUp()
self.configuration = config.Configuration()
@@ -87,11 +86,11 @@ class TestGenerate(TestCase):
mofile = filename + '.mo'
path = os.path.join(self.configuration.get_messages_dir(locale), mofile)
exists = os.path.exists(path)
self.assertTrue(exists, msg=u'Missing file in locale %s: %s' % (locale, mofile))
self.assertTrue(exists, msg='Missing file in locale %s: %s' % (locale, mofile))
self.assertGreaterEqual(
datetime.fromtimestamp(os.path.getmtime(path), UTC),
self.start_time,
msg=u'File not recently modified: %s' % path
msg='File not recently modified: %s' % path
)
# Segmenting means that the merge headers don't work they way they
# used to, so don't make this check for now. I'm not sure if we'll
@@ -116,7 +115,7 @@ class TestGenerate(TestCase):
self.assertEqual(
len(match),
3,
msg=u"Found %s (should be 3) merge comments in the header for %s" % (len(match), path)
msg="Found %s (should be 3) merge comments in the header for %s" % (len(match), path)
)

View File

@@ -141,7 +141,7 @@ class TestI18nDummy(PaverTestCase):
Test the Paver i18n_dummy task.
"""
def setUp(self):
super(TestI18nDummy, self).setUp()
super().setUp()
# Mock the paver @needs decorator for i18n_extract
self._mock_paver_needs = patch.object(pavelib.i18n.i18n_extract, 'needs').start()
@@ -160,9 +160,9 @@ class TestI18nDummy(PaverTestCase):
self.assertEqual(
self.task_messages,
[
u'i18n_tool extract',
u'i18n_tool dummy',
u'i18n_tool generate',
'i18n_tool extract',
'i18n_tool dummy',
'i18n_tool generate',
]
)
@@ -172,7 +172,7 @@ class TestI18nCompileJS(PaverTestCase):
Test the Paver i18n_compilejs task.
"""
def setUp(self):
super(TestI18nCompileJS, self).setUp()
super().setUp()
# Mock the paver @needs decorator for i18n_extract
self._mock_paver_needs = patch.object(pavelib.i18n.i18n_extract, 'needs').start()
@@ -193,7 +193,7 @@ class TestI18nCompileJS(PaverTestCase):
self.assertEqual(
self.task_messages,
[
u'python manage.py lms --settings={} compilejsi18n'.format(Env.TEST_SETTINGS),
u'python manage.py cms --settings={} compilejsi18n'.format(Env.TEST_SETTINGS),
'python manage.py lms --settings={} compilejsi18n'.format(Env.TEST_SETTINGS),
'python manage.py cms --settings={} compilejsi18n'.format(Env.TEST_SETTINGS),
]
)

View File

@@ -17,30 +17,30 @@ class TestPaverJavaScriptTestTasks(PaverTestCase):
Test the Paver JavaScript testing tasks.
"""
EXPECTED_DELETE_JAVASCRIPT_REPORT_COMMAND = u'find {platform_root}/reports/javascript -type f -delete'
EXPECTED_INSTALL_NPM_ASSETS_COMMAND = u'install npm_assets'
EXPECTED_DELETE_JAVASCRIPT_REPORT_COMMAND = 'find {platform_root}/reports/javascript -type f -delete'
EXPECTED_INSTALL_NPM_ASSETS_COMMAND = 'install npm_assets'
EXPECTED_KARMA_OPTIONS = (
u"{config_file} "
u"--single-run={single_run} "
u"--capture-timeout=60000 "
u"--junitreportpath="
u"{platform_root}/reports/javascript/javascript_xunit-{suite}.xml "
u"--browsers={browser}"
"{config_file} "
"--single-run={single_run} "
"--capture-timeout=60000 "
"--junitreportpath="
"{platform_root}/reports/javascript/javascript_xunit-{suite}.xml "
"--browsers={browser}"
)
EXPECTED_COVERAGE_OPTIONS = (
u' --coverage --coveragereportpath={platform_root}/reports/javascript/coverage-{suite}.xml'
' --coverage --coveragereportpath={platform_root}/reports/javascript/coverage-{suite}.xml'
)
EXPECTED_COMMANDS = [
u"make report_dir",
u'git clean -fqdx test_root/logs test_root/data test_root/staticfiles test_root/uploads',
u"find . -name '.git' -prune -o -name '*.pyc' -exec rm {} \\;",
u'rm -rf test_root/log/auto_screenshots/*',
u"rm -rf /tmp/mako_[cl]ms",
"make report_dir",
'git clean -fqdx test_root/logs test_root/data test_root/staticfiles test_root/uploads',
"find . -name '.git' -prune -o -name '*.pyc' -exec rm {} \\;",
'rm -rf test_root/log/auto_screenshots/*',
"rm -rf /tmp/mako_[cl]ms",
]
def setUp(self):
super(TestPaverJavaScriptTestTasks, self).setUp()
super().setUp()
# Mock the paver @needs decorator
self._mock_paver_needs = patch.object(pavelib.js_test.test_js, 'needs').start()
@@ -119,7 +119,7 @@ class TestPaverJavaScriptTestTasks(PaverTestCase):
expected_messages.append(self.EXPECTED_INSTALL_NPM_ASSETS_COMMAND)
command_template = (
u'nodejs --max_old_space_size=4096 node_modules/.bin/karma start {options}'
'nodejs --max_old_space_size=4096 node_modules/.bin/karma start {options}'
)
for suite in suites:
@@ -141,9 +141,9 @@ class TestPaverJavaScriptTestTasks(PaverTestCase):
suite=suite
)
if port:
expected_test_tool_command += u" --port={port}".format(port=port)
expected_test_tool_command += " --port={port}".format(port=port)
else:
expected_test_tool_command = u'jest'
expected_test_tool_command = 'jest'
expected_messages.append(expected_test_tool_command)

View File

@@ -6,12 +6,8 @@ Run just this test with: paver test_lib -t pavelib/paver_tests/test_paver_bok_ch
import os
import unittest
import six
if six.PY2:
from test.test_support import EnvironmentVarGuard
else:
from test.support import EnvironmentVarGuard
from test.support import EnvironmentVarGuard
from pavelib.utils.test.suites import BokChoyTestSuite
@@ -53,7 +49,7 @@ class TestPaverBokChoyCmd(unittest.TestCase):
return expected_statement
def setUp(self):
super(TestPaverBokChoyCmd, self).setUp()
super().setUp()
self.shard = os.environ.get('SHARD')
self.env_var_override = EnvironmentVarGuard()
@@ -147,7 +143,7 @@ class TestPaverBokChoyCmd(unittest.TestCase):
repo_dir=REPO_DIR,
shard_str='/shard_' + self.shard if self.shard else '',
),
u"-n {}".format(process_count),
"-n {}".format(process_count),
"--color=no",
"--verbose",
]
@@ -164,7 +160,7 @@ class TestPaverBokChoyCmd(unittest.TestCase):
repo_dir=REPO_DIR,
shard_str='/shard_' + self.shard if self.shard else '',
),
u"-n {}".format(process_count),
"-n {}".format(process_count),
"--color=no",
"--verbose",
]

View File

@@ -9,7 +9,6 @@ import tempfile
import textwrap
import unittest
import six
from ddt import data, ddt, file_data, unpack
from mock import MagicMock, mock_open, patch
from path import Path as path
@@ -18,10 +17,7 @@ from paver.easy import BuildFailure
import pavelib.quality
from pavelib.paver_tests.utils import PaverTestCase, fail_on_eslint
if six.PY2:
OPEN_BUILTIN = '__builtin__.open'
else:
OPEN_BUILTIN = 'builtins.open'
OPEN_BUILTIN = 'builtins.open'
@ddt
@@ -30,7 +26,7 @@ class TestPaverQualityViolations(unittest.TestCase):
For testing the paver violations-counting tasks
"""
def setUp(self):
super(TestPaverQualityViolations, self).setUp()
super().setUp()
self.f = tempfile.NamedTemporaryFile(delete=False)
self.f.close()
self.addCleanup(os.remove, self.f.name)
@@ -84,7 +80,7 @@ class TestPaverQualityOptions(unittest.TestCase):
)
@unpack
def test_pylint_parser_other_string(self, options, expected_values):
class PaverOptions(object):
class PaverOptions:
"""
Simple options class to mimick paver's Namespace object.
"""
@@ -102,7 +98,7 @@ class TestPaverReportViolationsCounts(unittest.TestCase):
"""
def setUp(self):
super(TestPaverReportViolationsCounts, self).setUp()
super().setUp()
# Temporary file infrastructure
self.f = tempfile.NamedTemporaryFile(delete=False)
@@ -258,7 +254,7 @@ class TestPrepareReportDir(unittest.TestCase):
"""
def setUp(self):
super(TestPrepareReportDir, self).setUp()
super().setUp()
self.test_dir = tempfile.mkdtemp()
self.test_file = tempfile.NamedTemporaryFile(delete=False, dir=self.test_dir)
self.addCleanup(os.removedirs, self.test_dir)
@@ -280,7 +276,7 @@ class TestPaverRunQuality(PaverTestCase):
"""
def setUp(self):
super(TestPaverRunQuality, self).setUp()
super().setUp()
# mock the @needs decorator to skip it
patcher = patch('pavelib.quality.sh')
@@ -365,7 +361,7 @@ class TestPaverRunDiffQuality(PaverTestCase):
cases weren't tested properly.
"""
def setUp(self):
super(TestPaverRunDiffQuality, self).setUp()
super().setUp()
# mock the @needs decorator to skip it
patcher = patch('pavelib.quality.sh')

View File

@@ -2,13 +2,10 @@
Tests for Paver's PII checker task.
"""
import io
import shutil
import tempfile
import unittest
import six
from mock import patch
from path import Path as path
from paver.easy import call_task, BuildFailure
@@ -22,7 +19,7 @@ class TestPaverPIICheck(unittest.TestCase):
For testing the paver run_pii_check task
"""
def setUp(self):
super(TestPaverPIICheck, self).setUp()
super().setUp()
self.report_dir = path(tempfile.mkdtemp())
self.addCleanup(shutil.rmtree, self.report_dir)
@@ -39,14 +36,14 @@ class TestPaverPIICheck(unittest.TestCase):
lms_stdout_report.write_lines(['Coverage found 66 uncovered models:\n'])
mock_needs.return_value = 0
call_task('pavelib.quality.run_pii_check', options={"report_dir": six.text_type(self.report_dir)})
mock_calls = [six.text_type(call) for call in mock_paver_sh.mock_calls]
call_task('pavelib.quality.run_pii_check', options={"report_dir": str(self.report_dir)})
mock_calls = [str(call) for call in mock_paver_sh.mock_calls]
assert len(mock_calls) == 2
assert any(['lms.envs.test' in call for call in mock_calls])
assert any(['cms.envs.test' in call for call in mock_calls])
assert all([six.text_type(self.report_dir) in call for call in mock_calls])
assert all([str(self.report_dir) in call for call in mock_calls])
metrics_file = Env.METRICS_DIR / 'pii'
assert io.open(metrics_file, 'r').read() == 'Number of PII Annotation violations: 66\n'
assert open(metrics_file, 'r').read() == 'Number of PII Annotation violations: 66\n'
@patch.object(pavelib.quality.run_pii_check, 'needs')
@patch('pavelib.quality.sh')
@@ -65,12 +62,12 @@ class TestPaverPIICheck(unittest.TestCase):
mock_needs.return_value = 0
with self.assertRaises(SystemExit):
call_task('pavelib.quality.run_pii_check', options={"report_dir": six.text_type(self.report_dir)})
call_task('pavelib.quality.run_pii_check', options={"report_dir": str(self.report_dir)})
self.assertRaises(BuildFailure)
mock_calls = [six.text_type(call) for call in mock_paver_sh.mock_calls]
mock_calls = [str(call) for call in mock_paver_sh.mock_calls]
assert len(mock_calls) == 2
assert any(['lms.envs.test' in call for call in mock_calls])
assert any(['cms.envs.test' in call for call in mock_calls])
assert all([six.text_type(self.report_dir) in call for call in mock_calls])
assert all([str(self.report_dir) in call for call in mock_calls])
metrics_file = Env.METRICS_DIR / 'pii'
assert io.open(metrics_file, 'r').read() == 'Number of PII Annotation violations: 66\n'
assert open(metrics_file, 'r').read() == 'Number of PII Annotation violations: 66\n'

View File

@@ -31,7 +31,7 @@ class TestPaverPrereqInstall(unittest.TestCase):
self.assertEqual(
pavelib.prereqs.no_prereq_install(),
expected_val,
u'NO_PREREQ_INSTALL is set to {}, but we read it as {}'.format(
'NO_PREREQ_INSTALL is set to {}, but we read it as {}'.format(
set_val, expected_val),
)
@@ -82,7 +82,7 @@ class TestPaverNodeInstall(PaverTestCase):
"""
def setUp(self):
super(TestPaverNodeInstall, self).setUp()
super().setUp()
# Ensure prereqs will be run
os.environ['NO_PREREQ_INSTALL'] = 'false'

View File

@@ -8,49 +8,49 @@ from ..utils.envs import Env
from .utils import PaverTestCase
EXPECTED_SASS_COMMAND = (
u"libsass {sass_directory}"
"libsass {sass_directory}"
)
EXPECTED_COMMON_SASS_DIRECTORIES = [
u"common/static/sass",
"common/static/sass",
]
EXPECTED_LMS_SASS_DIRECTORIES = [
u"lms/static/sass",
u"lms/static/certificates/sass",
"lms/static/sass",
"lms/static/certificates/sass",
]
EXPECTED_CMS_SASS_DIRECTORIES = [
u"cms/static/sass",
"cms/static/sass",
]
EXPECTED_LMS_SASS_COMMAND = [
u"python manage.py lms --settings={asset_settings} compile_sass lms ",
"python manage.py lms --settings={asset_settings} compile_sass lms ",
]
EXPECTED_CMS_SASS_COMMAND = [
u"python manage.py cms --settings={asset_settings} compile_sass cms ",
"python manage.py cms --settings={asset_settings} compile_sass cms ",
]
EXPECTED_COLLECT_STATIC_COMMAND = (
u'python manage.py {system} --settings={asset_settings} collectstatic '
u'--ignore "fixtures" --ignore "karma_*.js" --ignore "spec" '
u'--ignore "spec_helpers" --ignore "spec-helpers" --ignore "xmodule_js" '
u'--ignore "geoip" --ignore "sass" '
u'--noinput {log_string}'
'python manage.py {system} --settings={asset_settings} collectstatic '
'--ignore "fixtures" --ignore "karma_*.js" --ignore "spec" '
'--ignore "spec_helpers" --ignore "spec-helpers" --ignore "xmodule_js" '
'--ignore "geoip" --ignore "sass" '
'--noinput {log_string}'
)
EXPECTED_CELERY_COMMAND = (
u"DJANGO_SETTINGS_MODULE=lms.envs.{settings} celery worker "
u"--app=lms.celery:APP --beat --loglevel=INFO --pythonpath=."
"DJANGO_SETTINGS_MODULE=lms.envs.{settings} celery worker "
"--app=lms.celery:APP --beat --loglevel=INFO --pythonpath=."
)
EXPECTED_RUN_SERVER_COMMAND = (
u"python manage.py {system} --settings={settings} runserver --traceback --pythonpath=. 0.0.0.0:{port}"
"python manage.py {system} --settings={settings} runserver --traceback --pythonpath=. 0.0.0.0:{port}"
)
EXPECTED_INDEX_COURSE_COMMAND = (
u"python manage.py {system} --settings={settings} reindex_course --setup"
"python manage.py {system} --settings={settings} reindex_course --setup"
)
EXPECTED_PRINT_SETTINGS_COMMAND = [
u"python manage.py lms --settings={settings} print_setting STATIC_ROOT 2>{log_file}",
u"python manage.py cms --settings={settings} print_setting STATIC_ROOT 2>{log_file}",
u"python manage.py lms --settings={settings} print_setting WEBPACK_CONFIG_PATH 2>{log_file}"
"python manage.py lms --settings={settings} print_setting STATIC_ROOT 2>{log_file}",
"python manage.py cms --settings={settings} print_setting STATIC_ROOT 2>{log_file}",
"python manage.py lms --settings={settings} print_setting WEBPACK_CONFIG_PATH 2>{log_file}"
]
EXPECTED_WEBPACK_COMMAND = (
u"NODE_ENV={node_env} STATIC_ROOT_LMS={static_root_lms} STATIC_ROOT_CMS={static_root_cms} "
u"$(npm bin)/webpack --config={webpack_config_path}"
"NODE_ENV={node_env} STATIC_ROOT_LMS={static_root_lms} STATIC_ROOT_CMS={static_root_cms} "
"$(npm bin)/webpack --config={webpack_config_path}"
)
@@ -169,7 +169,7 @@ class TestPaverServerTasks(PaverTestCase):
settings = options.get("settings", Env.DEVSTACK_SETTINGS)
call_task("pavelib.servers.update_db", options=options)
# pylint: disable=line-too-long
db_command = u"NO_EDXAPP_SUDO=1 EDX_PLATFORM_SETTINGS_OVERRIDE={settings} /edx/bin/edxapp-migrate-{server} --traceback --pythonpath=. "
db_command = "NO_EDXAPP_SUDO=1 EDX_PLATFORM_SETTINGS_OVERRIDE={settings} /edx/bin/edxapp-migrate-{server} --traceback --pythonpath=. "
self.assertEqual(
self.task_messages,
[
@@ -194,8 +194,8 @@ class TestPaverServerTasks(PaverTestCase):
self.assertEqual(
self.task_messages,
[
u"echo 'import {system}.envs.{settings}' "
u"| python manage.py {system} --settings={settings} shell --plain --pythonpath=.".format(
"echo 'import {system}.envs.{settings}' "
"| python manage.py {system} --settings={settings} shell --plain --pythonpath=.".format(
system=system, settings=settings
),
]
@@ -242,8 +242,8 @@ class TestPaverServerTasks(PaverTestCase):
expected_asset_settings = "test_static_optimized"
expected_collect_static = not is_fast and expected_settings != Env.DEVSTACK_SETTINGS
if not is_fast:
expected_messages.append(u"xmodule_assets common/static/xmodule")
expected_messages.append(u"install npm_assets")
expected_messages.append("xmodule_assets common/static/xmodule")
expected_messages.append("install npm_assets")
expected_messages.extend(
[c.format(settings=expected_asset_settings,
log_file=Env.PRINT_SETTINGS_LOG_FILE) for c in EXPECTED_PRINT_SETTINGS_COMMAND]
@@ -288,8 +288,8 @@ class TestPaverServerTasks(PaverTestCase):
expected_collect_static = not is_fast and expected_settings != Env.DEVSTACK_SETTINGS
expected_messages = []
if not is_fast:
expected_messages.append(u"xmodule_assets common/static/xmodule")
expected_messages.append(u"install npm_assets")
expected_messages.append("xmodule_assets common/static/xmodule")
expected_messages.append("install npm_assets")
expected_messages.extend(
[c.format(settings=expected_asset_settings,
log_file=Env.PRINT_SETTINGS_LOG_FILE) for c in EXPECTED_PRINT_SETTINGS_COMMAND]
@@ -325,7 +325,7 @@ class TestPaverServerTasks(PaverTestCase):
expected_messages.append(EXPECTED_CELERY_COMMAND.format(settings="devstack_with_worker"))
self.assertEqual(self.task_messages, expected_messages)
def expected_sass_commands(self, system=None, asset_settings=u"test_static_optimized"):
def expected_sass_commands(self, system=None, asset_settings="test_static_optimized"):
"""
Returns the expected SASS commands for the specified system.
"""

View File

@@ -28,7 +28,7 @@ class TimedDecoratorTests(TestCase):
Tests of the pavelib.utils.timer:timed decorator.
"""
def setUp(self):
super(TimedDecoratorTests, self).setUp()
super().setUp()
patch_dumps = patch.object(timer.json, 'dump', autospec=True)
self.mock_dump = patch_dumps.start()

View File

@@ -20,7 +20,7 @@ class TestUtils(unittest.TestCase):
@patch('subprocess.check_output')
def test_firefox_version_ok(self, _mock_subprocesss):
test_version = MINIMUM_FIREFOX_VERSION
_mock_subprocesss.return_value = u"Mozilla Firefox {version}".format(
_mock_subprocesss.return_value = "Mozilla Firefox {version}".format(
version=str(test_version)
)
# No exception should be raised
@@ -29,7 +29,7 @@ class TestUtils(unittest.TestCase):
@patch('subprocess.check_output')
def test_firefox_version_below_expected(self, _mock_subprocesss):
test_version = MINIMUM_FIREFOX_VERSION - 1
_mock_subprocesss.return_value = u"Mozilla Firefox {version}".format(
_mock_subprocesss.return_value = "Mozilla Firefox {version}".format(
version=test_version
)
with self.assertRaises(Exception):

View File

@@ -19,7 +19,7 @@ class PaverXSSCommitLintTest(PaverTestCase):
"""
def setUp(self):
super(PaverXSSCommitLintTest, self).setUp()
super().setUp()
self.reset_task_messages()
@patch.object(pavelib.quality, '_write_metric')

View File

@@ -17,7 +17,7 @@ class PaverXSSLintTest(PaverTestCase):
"""
def setUp(self):
super(PaverXSSLintTest, self).setUp()
super().setUp()
self.reset_task_messages()
@patch.object(pavelib.quality, '_write_metric')

View File

@@ -5,7 +5,6 @@ import os
from unittest import TestCase
from uuid import uuid4
import six
from paver import tasks
from paver.easy import BuildFailure
@@ -15,7 +14,7 @@ class PaverTestCase(TestCase):
Base class for Paver test cases.
"""
def setUp(self):
super(PaverTestCase, self).setUp()
super().setUp()
# Show full length diffs upon test failure
self.maxDiff = None # pylint: disable=invalid-name
@@ -27,7 +26,7 @@ class PaverTestCase(TestCase):
os.environ['NO_PREREQ_INSTALL'] = 'true'
def tearDown(self):
super(PaverTestCase, self).tearDown()
super().tearDown()
tasks.environment = tasks.Environment()
del os.environ['NO_PREREQ_INSTALL']
@@ -51,7 +50,7 @@ class MockEnvironment(tasks.Environment):
Mock environment that collects information about Paver commands.
"""
def __init__(self):
super(MockEnvironment, self).__init__()
super().__init__()
self.dry_run = True
self.messages = []
@@ -62,7 +61,7 @@ class MockEnvironment(tasks.Environment):
else:
output = message
if not output.startswith("--->"):
self.messages.append(six.text_type(output))
self.messages.append(str(output))
def fail_on_eslint(*args, **kwargs):

View File

@@ -4,16 +4,13 @@ Install Python and Node prerequisites.
import hashlib
import io
import os
import re
import subprocess
import sys
from distutils import sysconfig
import six
from paver.easy import BuildFailure, sh, task
from six.moves import range
from .utils.envs import Env
from .utils.timer import timed
@@ -84,7 +81,7 @@ def compute_fingerprint(path_list):
# For files, hash the contents of the file
if os.path.isfile(path_item):
with io.open(path_item, "rb") as file_handle:
with open(path_item, "rb") as file_handle:
hasher.update(file_handle.read())
return hasher.hexdigest()
@@ -103,7 +100,7 @@ def prereq_cache(cache_name, paths, install_func):
cache_file_path = os.path.join(PREREQS_STATE_DIR, "{}.sha1".format(cache_filename))
old_hash = None
if os.path.isfile(cache_file_path):
with io.open(cache_file_path, "r") as cache_file:
with open(cache_file_path, "r") as cache_file:
old_hash = cache_file.read()
# Compare the old hash to the new hash
@@ -117,13 +114,13 @@ def prereq_cache(cache_name, paths, install_func):
# If the code executed within the context fails (throws an exception),
# then this step won't get executed.
create_prereqs_cache_dir()
with io.open(cache_file_path, "wb") as cache_file:
with open(cache_file_path, "wb") as cache_file:
# Since the pip requirement files are modified during the install
# process, we need to store the hash generated AFTER the installation
post_install_hash = compute_fingerprint(paths)
cache_file.write(post_install_hash.encode('utf-8'))
else:
print(u'{cache} unchanged, skipping...'.format(cache=cache_name))
print('{cache} unchanged, skipping...'.format(cache=cache_name))
def node_prereqs_installation():
@@ -138,7 +135,7 @@ def node_prereqs_installation():
npm_log_file_path = '{}/npm-install.{}.log'.format(Env.GEN_LOG_DIR, shard_str)
else:
npm_log_file_path = '{}/npm-install.log'.format(Env.GEN_LOG_DIR)
npm_log_file = io.open(npm_log_file_path, 'wb')
npm_log_file = open(npm_log_file_path, 'wb')
npm_command = 'npm install --verbose'.split()
cb_error_text = "Subprocess return code: 1"
@@ -153,13 +150,13 @@ def node_prereqs_installation():
proc = subprocess.Popen(npm_command, stderr=npm_log_file)
proc.wait()
except BuildFailure as error:
if cb_error_text in six.text_type(error):
if cb_error_text in str(error):
print("npm install error detected. Retrying...")
proc = subprocess.Popen(npm_command, stderr=npm_log_file)
proc.wait()
else:
raise
print(u"Successfully installed NPM packages. Log found at {}".format(
print("Successfully installed NPM packages. Log found at {}".format(
npm_log_file_path
))
@@ -175,7 +172,7 @@ def python_prereqs_installation():
def pip_install_req_file(req_file):
"""Pip install the requirements file."""
pip_cmd = 'pip install -q --disable-pip-version-check --exists-action w'
sh(u"{pip_cmd} -r {req_file}".format(pip_cmd=pip_cmd, req_file=req_file))
sh("{pip_cmd} -r {req_file}".format(pip_cmd=pip_cmd, req_file=req_file))
@task
@@ -234,7 +231,7 @@ def uninstall_python_packages():
create_prereqs_cache_dir()
if os.path.isfile(state_file_path):
with io.open(state_file_path) as state_file:
with open(state_file_path) as state_file:
version = state_file.read()
if version == expected_version:
print('Python uninstalls unchanged, skipping...')
@@ -250,7 +247,7 @@ def uninstall_python_packages():
for package_name in PACKAGES_TO_UNINSTALL:
if package_in_frozen(package_name, frozen):
# Uninstall the pacakge
sh(u"pip uninstall --disable-pip-version-check -y {}".format(package_name))
sh("pip uninstall --disable-pip-version-check -y {}".format(package_name))
uninstalled = True
if not uninstalled:
break
@@ -260,7 +257,7 @@ def uninstall_python_packages():
return
# Write our version.
with io.open(state_file_path, "wb") as state_file:
with open(state_file_path, "wb") as state_file:
state_file.write(expected_version.encode('utf-8'))
@@ -344,8 +341,7 @@ def install_prereqs():
def log_installed_python_prereqs():
""" Logs output of pip freeze for debugging. """
sh(u"pip freeze > {}".format(Env.GEN_LOG_DIR + "/pip_freeze.log"))
return
sh("pip freeze > {}".format(Env.GEN_LOG_DIR + "/pip_freeze.log"))
def print_devstack_warning():

View File

@@ -1,18 +1,13 @@
# coding=utf-8
"""
Check code quality using pycodestyle, pylint, and diff_quality.
"""
import io
import json
import os
import re
from datetime import datetime
from xml.sax.saxutils import quoteattr
import six
from paver.easy import BuildFailure, cmdopts, needs, sh, task
from openedx.core.djangolib.markup import HTML
@@ -21,12 +16,12 @@ from .utils.envs import Env
from .utils.timer import timed
ALL_SYSTEMS = 'lms,cms,common,openedx,pavelib,scripts'
JUNIT_XML_TEMPLATE = u"""<?xml version="1.0" encoding="UTF-8"?>
JUNIT_XML_TEMPLATE = """<?xml version="1.0" encoding="UTF-8"?>
<testsuite name="{name}" tests="1" errors="0" failures="{failure_count}" skip="0">
<testcase classname="pavelib.quality" name="{name}" time="{seconds}">{failure_element}</testcase>
</testsuite>
"""
JUNIT_XML_FAILURE_TEMPLATE = u'<failure message={message}/>'
JUNIT_XML_FAILURE_TEMPLATE = '<failure message={message}/>'
START_TIME = datetime.utcnow()
@@ -100,7 +95,7 @@ def find_fixme(options):
apps_list = ' '.join(top_python_dirs(system))
cmd = (
u"pylint --disable all --enable=fixme "
"pylint --disable all --enable=fixme "
"--output-format=parseable {apps} "
"> {report_dir}/pylint_fixme.report".format(
apps=apps_list,
@@ -142,7 +137,7 @@ def _get_pylint_violations(systems=ALL_SYSTEMS.split(','), errors_only=False, cl
system_report = report_dir / 'pylint.report'
if clean or not system_report.exists():
sh(
u"pylint {flags} --output-format=parseable {apps} "
"pylint {flags} --output-format=parseable {apps} "
"> {report_dir}/pylint.report".format(
flags=" ".join(flags),
apps=apps_list,
@@ -193,7 +188,7 @@ def run_pylint(options):
if num_violations < lower_violations_limit > -1:
fail_quality(
result_name,
u"FAILURE: Too few pylint violations. "
"FAILURE: Too few pylint violations. "
"Expected to see at least {lower_limit} pylint violations. "
"Either pylint is not running correctly -or- "
"the limits should be lowered and/or the lower limit should be removed.".format(
@@ -205,7 +200,7 @@ def run_pylint(options):
if num_violations > upper_violations_limit > -1:
fail_quality(
result_name,
u"FAILURE: Too many pylint violations. "
"FAILURE: Too many pylint violations. "
"The limit is {upper_limit}.".format(upper_limit=upper_violations_limit)
)
else:
@@ -267,7 +262,7 @@ def _get_pep8_violations(clean=True):
Env.METRICS_DIR.makedirs_p()
if not report.exists():
sh(u'pycodestyle . | tee {} -a'.format(report))
sh('pycodestyle . | tee {} -a'.format(report))
violations_list = _pep8_violations(report)
@@ -297,7 +292,7 @@ def run_pep8(options): # pylint: disable=unused-argument
violations_list = ''.join(violations_list)
# Print number of violations to log
violations_count_str = u"Number of PEP 8 violations: {count}".format(count=count)
violations_count_str = "Number of PEP 8 violations: {count}".format(count=count)
print(violations_count_str)
print(violations_list)
@@ -333,7 +328,7 @@ def run_complexity():
print("--> Calculating cyclomatic complexity of python files...")
try:
sh(
u"radon cc {system_string} --total-average > {complexity_report}".format(
"radon cc {system_string} --total-average > {complexity_report}".format(
system_string=system_string,
complexity_report=complexity_report
)
@@ -344,7 +339,7 @@ def run_complexity():
(Env.METRICS_DIR / "python_complexity")
)
print("--> Python cyclomatic complexity report complete.")
print(u"radon cyclomatic complexity score: {metric}".format(metric=str(complexity_metric)))
print("radon cyclomatic complexity score: {metric}".format(metric=str(complexity_metric)))
except BuildFailure:
print("FAILURE: Unable to calculate python-only code-complexity.")
@@ -371,7 +366,7 @@ def run_eslint(options):
violations_limit = int(getattr(options, 'limit', -1))
sh(
u"nodejs --max_old_space_size=4096 node_modules/.bin/eslint "
"nodejs --max_old_space_size=4096 node_modules/.bin/eslint "
"--ext .js --ext .jsx --format=compact . | tee {eslint_report}".format(
eslint_report=eslint_report
),
@@ -383,7 +378,7 @@ def run_eslint(options):
except TypeError:
fail_quality(
'eslint',
u"FAILURE: Number of eslint violations could not be found in {eslint_report}".format(
"FAILURE: Number of eslint violations could not be found in {eslint_report}".format(
eslint_report=eslint_report
)
)
@@ -395,7 +390,7 @@ def run_eslint(options):
if num_violations > violations_limit > -1:
fail_quality(
'eslint',
u"FAILURE: Too many eslint violations ({count}).\nThe limit is {violations_limit}.".format(
"FAILURE: Too many eslint violations ({count}).\nThe limit is {violations_limit}.".format(
count=num_violations, violations_limit=violations_limit
)
)
@@ -413,7 +408,7 @@ def _get_stylelint_violations():
formatter = 'node_modules/stylelint-formatter-pretty'
sh(
u"stylelint **/*.scss --custom-formatter={formatter} | tee {stylelint_report}".format(
"stylelint **/*.scss --custom-formatter={formatter} | tee {stylelint_report}".format(
formatter=formatter,
stylelint_report=stylelint_report,
),
@@ -425,7 +420,7 @@ def _get_stylelint_violations():
except TypeError:
fail_quality(
'stylelint',
u"FAILURE: Number of stylelint violations could not be found in {stylelint_report}".format(
"FAILURE: Number of stylelint violations could not be found in {stylelint_report}".format(
stylelint_report=stylelint_report
)
)
@@ -452,7 +447,7 @@ def run_stylelint(options):
if num_violations > violations_limit > -1:
fail_quality(
'stylelint',
u"FAILURE: Stylelint failed with too many violations: ({count}).\nThe limit is {violations_limit}.".format(
"FAILURE: Stylelint failed with too many violations: ({count}).\nThe limit is {violations_limit}.".format(
count=num_violations,
violations_limit=violations_limit,
)
@@ -495,7 +490,7 @@ def run_xsslint(options):
_prepare_report_dir(xsslint_report_dir)
sh(
u"{repo_root}/scripts/xsslint/{xsslint_script} --rule-totals --config={cfg_module} >> {xsslint_report}".format(
"{repo_root}/scripts/xsslint/{xsslint_script} --rule-totals --config={cfg_module} >> {xsslint_report}".format(
repo_root=Env.REPO_ROOT,
xsslint_script=xsslint_script,
xsslint_report=xsslint_report,
@@ -507,21 +502,21 @@ def run_xsslint(options):
xsslint_counts = _get_xsslint_counts(xsslint_report)
try:
metrics_str = u"Number of {xsslint_script} violations: {num_violations}\n".format(
metrics_str = "Number of {xsslint_script} violations: {num_violations}\n".format(
xsslint_script=xsslint_script, num_violations=int(xsslint_counts['total'])
)
if 'rules' in xsslint_counts and any(xsslint_counts['rules']):
metrics_str += "\n"
rule_keys = sorted(xsslint_counts['rules'].keys())
for rule in rule_keys:
metrics_str += u"{rule} violations: {count}\n".format(
metrics_str += "{rule} violations: {count}\n".format(
rule=rule,
count=int(xsslint_counts['rules'][rule])
)
except TypeError:
fail_quality(
'xsslint',
u"FAILURE: Number of {xsslint_script} violations could not be found in {xsslint_report}".format(
"FAILURE: Number of {xsslint_script} violations could not be found in {xsslint_report}".format(
xsslint_script=xsslint_script, xsslint_report=xsslint_report
)
)
@@ -530,14 +525,14 @@ def run_xsslint(options):
# Record the metric
_write_metric(metrics_str, metrics_report)
# Print number of violations to log.
sh(u"cat {metrics_report}".format(metrics_report=metrics_report), ignore_error=True)
sh("cat {metrics_report}".format(metrics_report=metrics_report), ignore_error=True)
error_message = ""
# Test total violations against threshold.
if 'total' in list(violation_thresholds.keys()):
if violation_thresholds['total'] < xsslint_counts['total']:
error_message = u"Too many violations total ({count}).\nThe limit is {violations_limit}.".format(
error_message = "Too many violations total ({count}).\nThe limit is {violations_limit}.".format(
count=xsslint_counts['total'], violations_limit=violation_thresholds['total']
)
@@ -547,14 +542,14 @@ def run_xsslint(options):
for threshold_key in threshold_keys:
if threshold_key not in xsslint_counts['rules']:
error_message += (
u"\nNumber of {xsslint_script} violations for {rule} could not be found in "
"\nNumber of {xsslint_script} violations for {rule} could not be found in "
"{xsslint_report}."
).format(
xsslint_script=xsslint_script, rule=threshold_key, xsslint_report=xsslint_report
)
elif violation_thresholds['rules'][threshold_key] < xsslint_counts['rules'][threshold_key]:
error_message += \
u"\nToo many {rule} violations ({count}).\nThe {rule} limit is {violations_limit}.".format(
"\nToo many {rule} violations ({count}).\nThe {rule} limit is {violations_limit}.".format(
rule=threshold_key, count=xsslint_counts['rules'][threshold_key],
violations_limit=violation_thresholds['rules'][threshold_key],
)
@@ -562,7 +557,7 @@ def run_xsslint(options):
if error_message:
fail_quality(
'xsslint',
u"FAILURE: XSSLinter Failed.\n{error_message}\n"
"FAILURE: XSSLinter Failed.\n{error_message}\n"
"See {xsslint_report} or run the following command to hone in on the problem:\n"
" ./scripts/xss-commit-linter.sh -h".format(
error_message=error_message, xsslint_report=xsslint_report
@@ -585,7 +580,7 @@ def run_xsscommitlint():
_prepare_report_dir(xsscommitlint_report_dir)
sh(
u"{repo_root}/scripts/{xsscommitlint_script} | tee {xsscommitlint_report}".format(
"{repo_root}/scripts/{xsscommitlint_script} | tee {xsscommitlint_report}".format(
repo_root=Env.REPO_ROOT,
xsscommitlint_script=xsscommitlint_script,
xsscommitlint_report=xsscommitlint_report,
@@ -600,13 +595,13 @@ def run_xsscommitlint():
except TypeError:
fail_quality(
'xsscommitlint',
u"FAILURE: Number of {xsscommitlint_script} violations could not be found in {xsscommitlint_report}".format(
"FAILURE: Number of {xsscommitlint_script} violations could not be found in {xsscommitlint_report}".format(
xsscommitlint_script=xsscommitlint_script, xsscommitlint_report=xsscommitlint_report
)
)
# Print number of violations to log.
violations_count_str = u"Number of {xsscommitlint_script} violations: {num_violations}\n".format(
violations_count_str = "Number of {xsscommitlint_script} violations: {num_violations}\n".format(
xsscommitlint_script=xsscommitlint_script, num_violations=num_violations
)
@@ -614,10 +609,10 @@ def run_xsscommitlint():
metrics_report = (Env.METRICS_DIR / "xsscommitlint")
_write_metric(violations_count_str, metrics_report)
# Output report to console.
sh(u"cat {metrics_report}".format(metrics_report=metrics_report), ignore_error=True)
sh("cat {metrics_report}".format(metrics_report=metrics_report), ignore_error=True)
if num_violations:
fail_quality(
u'xsscommitlint',
'xsscommitlint',
"FAILURE: XSSCommitLinter Failed.\n{error_message}\n"
"See {xsscommitlint_report} or run the following command to hone in on the problem:\n"
" ./scripts/xss-commit-linter.sh -h".format(
@@ -636,7 +631,7 @@ def _write_metric(metric, filename):
Env.METRICS_DIR.makedirs_p()
with open(filename, "w") as metric_file:
metric_file.write(six.text_type(metric))
metric_file.write(str(metric))
def _prepare_report_dir(dir_name):
@@ -662,7 +657,7 @@ def _get_report_contents(filename, report_name, last_line_only=False):
"""
if os.path.isfile(filename):
with open(filename, 'r') as report_file:
with open(filename) as report_file:
if last_line_only:
lines = report_file.readlines()
for line in reversed(lines):
@@ -672,7 +667,7 @@ def _get_report_contents(filename, report_name, last_line_only=False):
else:
return report_file.read()
else:
file_not_found_message = u"FAILURE: The following log file could not be found: {file}".format(file=filename)
file_not_found_message = "FAILURE: The following log file could not be found: {file}".format(file=filename)
fail_quality(report_name, file_not_found_message)
@@ -779,7 +774,7 @@ def _extract_missing_pii_annotations(filename):
uncovered_models = 0
pii_check_passed = True
if os.path.isfile(filename):
with io.open(filename, 'r') as report_file:
with open(filename, 'r') as report_file:
lines = report_file.readlines()
# Find the count of uncovered models.
@@ -801,7 +796,7 @@ def _extract_missing_pii_annotations(filename):
# Each line in lines already contains a newline.
full_log = ''.join(lines)
else:
fail_quality('pii', u'FAILURE: Log file could not be found: {}'.format(filename))
fail_quality('pii', 'FAILURE: Log file could not be found: {}'.format(filename))
return (uncovered_models, pii_check_passed, full_log)
@@ -827,7 +822,7 @@ def run_pii_check(options):
print()
print("Running {} PII Annotation check and report".format(env_name))
print("-" * 45)
run_output_file = six.text_type(output_file).format(env_name.lower())
run_output_file = str(output_file).format(env_name.lower())
sh(
"mkdir -p {} && "
"export DJANGO_SETTINGS_MODULE={}; "
@@ -855,7 +850,7 @@ def run_pii_check(options):
# Write metric file.
if uncovered_count is None:
uncovered_count = 0
metrics_str = u"Number of PII Annotation violations: {}\n".format(uncovered_count)
metrics_str = "Number of PII Annotation violations: {}\n".format(uncovered_count)
_write_metric(metrics_str, (Env.METRICS_DIR / pii_report_name))
# Finally, fail the paver task if code_annotations suggests that the check failed.
@@ -933,20 +928,20 @@ def run_quality(options):
if is_html:
lines = ['<body>\n']
sep = '-------------<br/>\n'
title = HTML(u"<h1>Quality Report: {}</h1>\n").format(linter)
title = HTML("<h1>Quality Report: {}</h1>\n").format(linter)
violations_bullets = ''.join(
[HTML('<li>{violation}</li><br/>\n').format(violation=violation) for violation in violations_list]
)
violations_str = HTML('<ul>\n{bullets}</ul>\n').format(bullets=HTML(violations_bullets))
violations_count_str = HTML(u"<b>Violations</b>: {count}<br/>\n")
fail_line = HTML(u"<b>FAILURE</b>: {} count should be 0<br/>\n").format(linter)
violations_count_str = HTML("<b>Violations</b>: {count}<br/>\n")
fail_line = HTML("<b>FAILURE</b>: {} count should be 0<br/>\n").format(linter)
else:
lines = []
sep = '-------------\n'
title = u"Quality Report: {}\n".format(linter)
title = "Quality Report: {}\n".format(linter)
violations_str = ''.join(violations_list)
violations_count_str = u"Violations: {count}\n"
fail_line = u"FAILURE: {} count should be {}\n".format(linter, limit)
violations_count_str = "Violations: {count}\n"
fail_line = "FAILURE: {} count should be {}\n".format(linter, limit)
violations_count_str = violations_count_str.format(count=count)
@@ -973,19 +968,19 @@ def run_quality(options):
# ----- Set up for diff-quality pylint call -----
# Set the string to be used for the diff-quality --compare-branch switch.
compare_branch = getattr(options, 'compare_branch', u'origin/master')
compare_commit = sh(u'git merge-base HEAD {}'.format(compare_branch), capture=True).strip()
compare_branch = getattr(options, 'compare_branch', 'origin/master')
compare_commit = sh('git merge-base HEAD {}'.format(compare_branch), capture=True).strip()
if sh('git rev-parse HEAD', capture=True).strip() != compare_commit:
compare_branch_string = u'--compare-branch={0}'.format(compare_commit)
compare_branch_string = '--compare-branch={}'.format(compare_commit)
# Set the string, if needed, to be used for the diff-quality --fail-under switch.
diff_threshold = int(getattr(options, 'percentage', -1))
percentage_string = u''
percentage_string = ''
if diff_threshold > -1:
percentage_string = u'--fail-under={0}'.format(diff_threshold)
percentage_string = '--fail-under={}'.format(diff_threshold)
pylint_files = get_violations_reports("pylint")
pylint_reports = u' '.join(pylint_files)
pylint_reports = ' '.join(pylint_files)
if not run_diff_quality(
violations_type="pylint",
reports=pylint_reports,
@@ -997,7 +992,7 @@ def run_quality(options):
failure_reasons.append('Pylint violation(s) were found in the lines of code that were added or changed.')
eslint_files = get_violations_reports("eslint")
eslint_reports = u' '.join(eslint_files)
eslint_reports = ' '.join(eslint_files)
if not run_diff_quality(
violations_type="eslint",
reports=eslint_reports,
@@ -1026,7 +1021,7 @@ def run_diff_quality(
"""
try:
sh(
u"diff-quality --violations={type} "
"diff-quality --violations={type} "
"{reports} {percentage_string} {compare_branch_string} "
"--html-report {dquality_dir}/diff_quality_{type}.html ".format(
type=violations_type,

View File

@@ -259,7 +259,7 @@ def update_db(options):
fake = "--fake-initial" if getattr(options, 'fake_initial', False) else ""
for system in ('lms', 'cms'):
# pylint: disable=line-too-long
sh(u"NO_EDXAPP_SUDO=1 EDX_PLATFORM_SETTINGS_OVERRIDE={settings} /edx/bin/edxapp-migrate-{system} --traceback --pythonpath=. {fake}".format(
sh("NO_EDXAPP_SUDO=1 EDX_PLATFORM_SETTINGS_OVERRIDE={settings} /edx/bin/edxapp-migrate-{system} --traceback --pythonpath=. {fake}".format(
settings=settings,
system=system,
fake=fake))
@@ -282,9 +282,9 @@ def check_settings(args):
settings = args.settings[0]
try:
import_cmd = u"echo 'import {system}.envs.{settings}'".format(system=system, settings=settings)
import_cmd = "echo 'import {system}.envs.{settings}'".format(system=system, settings=settings)
django_shell_cmd = django_cmd(system, settings, 'shell', '--plain', '--pythonpath=.')
sh(u"{import_cmd} | {shell_cmd}".format(import_cmd=import_cmd, shell_cmd=django_shell_cmd))
sh("{import_cmd} | {shell_cmd}".format(import_cmd=import_cmd, shell_cmd=django_shell_cmd))
except: # pylint: disable=bare-except
print("Failed to import settings", file=sys.stderr)

View File

@@ -337,7 +337,7 @@ def coverage(options):
if not combined_report_file.isfile():
# This may be that the coverage files were generated using -p,
# try to combine them to the one file that we need.
sh(u"coverage combine --rcfile={}".format(rcfile))
sh("coverage combine --rcfile={}".format(rcfile))
if not os.path.getsize(combined_report_file) > 50:
# Check if the .coverage data file is larger than the base file,
@@ -352,9 +352,9 @@ def coverage(options):
return
# Generate the coverage.py XML report
sh(u"coverage xml --rcfile={}".format(rcfile))
sh("coverage xml --rcfile={}".format(rcfile))
# Generate the coverage.py HTML report
sh(u"coverage html --rcfile={}".format(rcfile))
sh("coverage html --rcfile={}".format(rcfile))
diff_coverage() # pylint: disable=no-value-for-parameter
@@ -390,8 +390,8 @@ def diff_coverage(options):
# Generate the diff coverage reports (HTML and console)
sh(
u"diff-cover {xml_report_str} --compare-branch={compare_branch} "
u"--html-report {diff_html_path}".format(
"diff-cover {xml_report_str} --compare-branch={compare_branch} "
"--html-report {diff_html_path}".format(
xml_report_str=xml_report_str,
compare_branch=compare_branch,
diff_html_path=diff_html_path,

View File

@@ -25,9 +25,9 @@ def remove_files_from_folder(files, folder):
file_with_path = os.path.join(folder, file_name)
try:
os.remove(file_with_path)
print(u'\tRemoved {}'.format(file_with_path))
print('\tRemoved {}'.format(file_with_path))
except OSError:
print(u'\tCould not remove {}. Continuing.'.format(file_with_path))
print('\tCould not remove {}. Continuing.'.format(file_with_path))
continue
@@ -41,9 +41,9 @@ def reset_test_db(db_cache_files, update_cache_files=True, use_existing_db=False
"""
cmd = '{}/scripts/reset-test-db.sh'.format(Env.REPO_ROOT)
if update_cache_files:
cmd = u'{} --rebuild_cache'.format(cmd)
cmd = '{} --rebuild_cache'.format(cmd)
if use_existing_db:
cmd = u'{} --use-existing-db'.format(cmd)
cmd = '{} --use-existing-db'.format(cmd)
sh(cmd)
verify_files_exist(db_cache_files)
@@ -78,7 +78,7 @@ def fingerprint_bokchoy_db_files(migration_output_files, all_db_files):
msg = "Computing the fingerprint."
print(msg)
fingerprint = compute_fingerprint(file_paths)
print(u"The fingerprint for bokchoy db files is: {}".format(fingerprint))
print("The fingerprint for bokchoy db files is: {}".format(fingerprint))
return fingerprint
@@ -101,7 +101,7 @@ def verify_files_exist(files):
for file_name in files:
file_path = os.path.join(CACHE_FOLDER, file_name)
if not os.path.isfile(file_path):
msg = u"Did not find expected file: {}".format(file_path)
msg = "Did not find expected file: {}".format(file_path)
raise BuildFailure(msg)
@@ -113,7 +113,7 @@ def calculate_bokchoy_migrations(migration_output_files):
NOTE: the script first clears out the database, then calculates
what migrations need to be run, which is all of them.
"""
sh(u'{}/scripts/reset-test-db.sh --calculate_migrations'.format(Env.REPO_ROOT))
sh('{}/scripts/reset-test-db.sh --calculate_migrations'.format(Env.REPO_ROOT))
verify_files_exist(migration_output_files)
@@ -137,7 +137,7 @@ def is_fingerprint_in_bucket(fingerprint, bucket_name):
conn = boto.connect_s3(anon=True)
bucket = conn.get_bucket(bucket_name)
except Exception as e: # pylint: disable=broad-except
print(u"Exception caught trying to reach S3 bucket {}: {}".format(bucket_name, e))
print("Exception caught trying to reach S3 bucket {}: {}".format(bucket_name, e))
return False
key = boto.s3.key.Key(bucket=bucket, name=zipfile_name)
return key.exists()
@@ -148,9 +148,9 @@ def get_bokchoy_db_fingerprint_from_file():
Return the value recorded in the fingerprint file.
"""
try:
with open(FINGERPRINT_FILEPATH, 'r') as fingerprint_file:
with open(FINGERPRINT_FILEPATH) as fingerprint_file:
cached_fingerprint = fingerprint_file.read().strip()
except IOError:
except OSError:
return None
return cached_fingerprint
@@ -159,12 +159,12 @@ def get_file_from_s3(bucket_name, zipfile_name, path):
"""
Get the file from s3 and save it to disk.
"""
print(u"Retrieving {} from bucket {}.".format(zipfile_name, bucket_name))
print("Retrieving {} from bucket {}.".format(zipfile_name, bucket_name))
conn = boto.connect_s3(anon=True)
bucket = conn.get_bucket(bucket_name)
key = boto.s3.key.Key(bucket=bucket, name=zipfile_name)
if not key.exists():
msg = u"Did not find expected file {} in the S3 bucket {}".format(
msg = "Did not find expected file {} in the S3 bucket {}".format(
zipfile_name, bucket_name
)
raise BuildFailure(msg)
@@ -215,7 +215,7 @@ def upload_to_s3(file_name, file_path, bucket_name, replace=False):
"""
Upload the specified files to an s3 bucket.
"""
print(u"Uploading {} to s3 bucket {}".format(file_name, bucket_name))
print("Uploading {} to s3 bucket {}".format(file_name, bucket_name))
try:
conn = boto.connect_s3()
except boto.exception.NoAuthHandlerFound:
@@ -231,9 +231,9 @@ def upload_to_s3(file_name, file_path, bucket_name, replace=False):
key = boto.s3.key.Key(bucket=bucket, name=file_name)
bytes_written = key.set_contents_from_filename(file_path, replace=replace, policy='public-read')
if bytes_written:
msg = u"Wrote {} bytes to {}.".format(bytes_written, key.name)
msg = "Wrote {} bytes to {}.".format(bytes_written, key.name)
else:
msg = u"File {} already existed in bucket {}.".format(key.name, bucket_name)
msg = "File {} already existed in bucket {}.".format(key.name, bucket_name)
print(msg)

View File

@@ -2,15 +2,12 @@
Helper functions for loading environment settings.
"""
import io
import json
import os
import sys
from time import sleep
import memcache
import six
from lazy import lazy
from path import Path as path
from paver.easy import BuildFailure, sh
@@ -36,7 +33,7 @@ def repo_root():
absolute_path = file_path.abspath()
break
except OSError:
print(u'Attempt {}/180 to get an absolute path failed'.format(attempt))
print('Attempt {}/180 to get an absolute path failed'.format(attempt))
if attempt < 180:
attempt += 1
sleep(1)
@@ -46,7 +43,7 @@ def repo_root():
return absolute_path.parent.parent.parent
class Env(object):
class Env:
"""
Load information about the execution environment.
"""
@@ -257,17 +254,17 @@ class Env(object):
django_cmd(
system,
settings,
u"print_setting {django_setting} 2>{log_file}".format(
"print_setting {django_setting} 2>{log_file}".format(
django_setting=django_setting,
log_file=cls.PRINT_SETTINGS_LOG_FILE
)
),
capture=True
)
return six.text_type(value).strip()
return str(value).strip()
except BuildFailure:
print(u"Unable to print the value of the {} setting:".format(django_setting))
with io.open(cls.PRINT_SETTINGS_LOG_FILE, 'r') as f:
print("Unable to print the value of the {} setting:".format(django_setting))
with open(cls.PRINT_SETTINGS_LOG_FILE, 'r') as f:
print(f.read())
sys.exit(1)
@@ -306,7 +303,7 @@ class Env(object):
env_path = env_path.parent.parent / env_path.basename()
if not env_path.isfile():
print(
u"Warning: could not find environment JSON file "
"Warning: could not find environment JSON file "
"at '{path}'".format(path=env_path),
file=sys.stderr,
)
@@ -319,7 +316,7 @@ class Env(object):
except ValueError:
print(
u"Error: Could not parse JSON "
"Error: Could not parse JSON "
"in {path}".format(path=env_path),
file=sys.stderr,
)

View File

@@ -59,9 +59,9 @@ class PassthroughOptionParser(OptionParser):
if len(rargs) < nargs:
if nargs == 1:
self.error(u"%s option requires an argument" % opt)
self.error("%s option requires an argument" % opt)
else:
self.error(u"%s option requires %d arguments"
self.error("%s option requires %d arguments"
% (opt, nargs))
elif nargs == 1:
value = rargs.pop(0)
@@ -70,7 +70,7 @@ class PassthroughOptionParser(OptionParser):
del rargs[0:nargs]
elif had_explicit_value:
self.error(u"%s option does not take a value" % opt)
self.error("%s option does not take a value" % opt)
else:
value = None
@@ -104,9 +104,9 @@ class PassthroughOptionParser(OptionParser):
nargs = option.nargs
if len(rargs) < nargs:
if nargs == 1:
self.error(u"%s option requires an argument" % opt)
self.error("%s option requires an argument" % opt)
else:
self.error(u"%s option requires %d arguments"
self.error("%s option requires %d arguments"
% (opt, nargs))
elif nargs == 1:
@@ -136,11 +136,11 @@ class PassthroughTask(paver.tasks.Task):
@property
def parser(self):
with patch.object(paver.tasks.optparse, 'OptionParser', PassthroughOptionParser):
return super(PassthroughTask, self).parser
return super().parser
def __call__(self, *args, **kwargs):
paver.tasks.environment.passthrough_options = self._parser.passthrough_options # pylint: disable=no-member
try:
return super(PassthroughTask, self).__call__(*args, **kwargs)
return super().__call__(*args, **kwargs)
finally:
del paver.tasks.environment.passthrough_options

View File

@@ -69,7 +69,7 @@ def run_multi_processes(cmd_list, out_log=None, err_log=None):
# pylint: disable=broad-except
except Exception as err:
print(u"Error running process {}".format(err), file=sys.stderr)
print("Error running process {}".format(err), file=sys.stderr)
finally:
for pid in pids:

View File

@@ -41,16 +41,16 @@ def start_servers(options):
print(cmd, logfile)
run_background_process(cmd, out_log=logfile, err_log=logfile, cwd=cwd)
for service, info in six.iteritems(Env.BOK_CHOY_SERVERS):
for service, info in Env.BOK_CHOY_SERVERS.items():
address = "0.0.0.0:{}".format(info['port'])
cmd = (u"DEFAULT_STORE={default_store} ").format(default_store=options.default_store)
cmd = ("DEFAULT_STORE={default_store} ").format(default_store=options.default_store)
if coveragerc:
cmd += (u"coverage run --rcfile={coveragerc} -m ").format(coveragerc=coveragerc)
cmd += ("coverage run --rcfile={coveragerc} -m ").format(coveragerc=coveragerc)
else:
cmd += "python -m "
cmd += (
u"manage {service} --settings {settings} runserver "
u"{address} --traceback --noreload".format(
"manage {service} --settings {settings} runserver "
"{address} --traceback --noreload".format(
service=service,
settings=Env.SETTINGS,
address=address,
@@ -58,9 +58,9 @@ def start_servers(options):
)
start_server(cmd, info['log'])
for service, info in six.iteritems(Env.BOK_CHOY_STUBS):
for service, info in Env.BOK_CHOY_STUBS.items():
cmd = (
u"python -m stubs.start {service} {port} "
"python -m stubs.start {service} {port} "
"{config}".format(
service=service,
port=info['port'],
@@ -75,7 +75,7 @@ def wait_for_server(server, port):
Wait for a server to respond with status 200
"""
print(
u"Checking server {server} on port {port}".format(
"Checking server {server} on port {port}".format(
server=server,
port=port,
)
@@ -110,12 +110,12 @@ def wait_for_test_servers():
Wait until we get a successful response from the servers or time out
"""
for service, info in six.iteritems(Env.BOK_CHOY_SERVERS):
for service, info in Env.BOK_CHOY_SERVERS.items():
ready = wait_for_server(info['host'], info['port'])
if not ready:
msg = colorize(
"red",
u"Could not contact {} test server".format(service)
"Could not contact {} test server".format(service)
)
print(msg)
sys.exit(1)
@@ -127,7 +127,7 @@ def is_mongo_running():
"""
# The mongo command will connect to the service,
# failing with a non-zero exit code if it cannot connect.
output = os.popen(u'mongo --host {} --eval "print(\'running\')"'.format(Env.MONGO_HOST)).read()
output = os.popen('mongo --host {} --eval "print(\'running\')"'.format(Env.MONGO_HOST)).read()
return output and "running" in output
@@ -159,7 +159,7 @@ def clear_mongo():
Clears mongo database.
"""
sh(
u"mongo --host {} {} --eval 'db.dropDatabase()' > /dev/null".format(
"mongo --host {} {} --eval 'db.dropDatabase()' > /dev/null".format(
Env.MONGO_HOST,
Env.BOK_CHOY_MONGO_DATABASE,
)

View File

@@ -43,7 +43,7 @@ def load_bok_choy_data(options):
"""
print('Loading data from json fixtures in db_fixtures directory')
sh(
u"DEFAULT_STORE={default_store}"
"DEFAULT_STORE={default_store}"
" ./manage.py lms --settings {settings} loaddata --traceback"
" common/test/db_fixtures/*.json".format(
default_store=options.default_store,
@@ -68,11 +68,11 @@ def load_courses(options):
`test_root/courses/`.
"""
if 'imports_dir' in options:
msg = colorize('green', u"Importing courses from {}...".format(options.imports_dir))
msg = colorize('green', "Importing courses from {}...".format(options.imports_dir))
print(msg)
sh(
u"DEFAULT_STORE={default_store}"
"DEFAULT_STORE={default_store}"
" ./manage.py cms --settings={settings} import {import_dir}".format(
default_store=options.default_store,
import_dir=options.imports_dir,
@@ -95,7 +95,7 @@ def update_fixtures():
print(msg)
sh(
u" ./manage.py lms --settings={settings} update_fixtures".format(
" ./manage.py lms --settings={settings} update_fixtures".format(
settings=Env.SETTINGS
)
)
@@ -159,7 +159,7 @@ class BokChoyTestSuite(TestSuite):
See pytest documentation: https://docs.pytest.org/en/latest/
"""
def __init__(self, *args, **kwargs):
super(BokChoyTestSuite, self).__init__(*args, **kwargs)
super().__init__(*args, **kwargs)
self.test_dir = Env.BOK_CHOY_DIR / kwargs.get('test_dir', 'tests')
self.log_dir = Env.BOK_CHOY_LOG_DIR
self.report_dir = kwargs.get('report_dir', Env.BOK_CHOY_REPORT_DIR)
@@ -182,7 +182,7 @@ class BokChoyTestSuite(TestSuite):
self.save_screenshots = kwargs.get('save_screenshots', False)
def __enter__(self):
super(BokChoyTestSuite, self).__enter__()
super().__enter__()
# Ensure that we have a directory to put logs and reports
self.log_dir.makedirs_p()
@@ -229,7 +229,7 @@ class BokChoyTestSuite(TestSuite):
self.run_servers_continuously()
def __exit__(self, exc_type, exc_value, traceback):
super(BokChoyTestSuite, self).__exit__(exc_type, exc_value, traceback)
super().__exit__(exc_type, exc_value, traceback)
# Using testsonly will leave all fixtures in place (Note: the db will also be dirtier.)
if self.testsonly:
@@ -239,7 +239,7 @@ class BokChoyTestSuite(TestSuite):
# Clean up data we created in the databases
msg = colorize('green', "Cleaning up databases...")
print(msg)
sh(u"./manage.py lms --settings {settings} flush --traceback --noinput".format(settings=Env.SETTINGS))
sh("./manage.py lms --settings {settings} flush --traceback --noinput".format(settings=Env.SETTINGS))
clear_mongo()
@property
@@ -252,7 +252,7 @@ class BokChoyTestSuite(TestSuite):
if self.num_processes != 1:
# Construct "multiprocess" pytest command
command += [
u"-n {}".format(self.num_processes),
"-n {}".format(self.num_processes),
"--color=no",
]
if self.verbosity < 1:
@@ -260,7 +260,7 @@ class BokChoyTestSuite(TestSuite):
elif self.verbosity > 1:
command.append("--verbose")
if self.eval_attr:
command.append(u"-a '{}'".format(self.eval_attr))
command.append("-a '{}'".format(self.eval_attr))
return command

View File

@@ -18,7 +18,7 @@ class JsTestSuite(TestSuite):
A class for running JavaScript tests.
"""
def __init__(self, *args, **kwargs):
super(JsTestSuite, self).__init__(*args, **kwargs)
super().__init__(*args, **kwargs)
self.run_under_coverage = kwargs.get('with_coverage', True)
self.mode = kwargs.get('mode', 'run')
self.report_dir = Env.JS_REPORT_DIR
@@ -28,7 +28,7 @@ class JsTestSuite(TestSuite):
self.subsuites = self._default_subsuites if suite == 'all' else [JsTestSubSuite(*args, **kwargs)]
def __enter__(self):
super(JsTestSuite, self).__enter__()
super().__enter__()
if tasks.environment.dry_run:
tasks.environment.info("make report_dir")
else:
@@ -55,7 +55,7 @@ class JsTestSubSuite(TestSuite):
common-requirejs and xmodule
"""
def __init__(self, *args, **kwargs):
super(JsTestSubSuite, self).__init__(*args, **kwargs)
super().__init__(*args, **kwargs)
self.test_id = args[0]
self.run_under_coverage = kwargs.get('with_coverage', True)
self.mode = kwargs.get('mode', 'run')

View File

@@ -20,7 +20,7 @@ class PytestSuite(TestSuite):
to pytest tests
"""
def __init__(self, *args, **kwargs):
super(PytestSuite, self).__init__(*args, **kwargs)
super().__init__(*args, **kwargs)
self.failed_only = kwargs.get('failed_only', False)
self.fail_fast = kwargs.get('fail_fast', False)
self.run_under_coverage = kwargs.get('with_coverage', True)
@@ -50,14 +50,14 @@ class PytestSuite(TestSuite):
self.with_wtw = kwargs.get('with_wtw', False)
def __enter__(self):
super(PytestSuite, self).__enter__()
super().__enter__()
self.report_dir.makedirs_p()
def __exit__(self, exc_type, exc_value, traceback):
"""
Cleans mongo afer the tests run.
"""
super(PytestSuite, self).__exit__(exc_type, exc_value, traceback)
super().__exit__(exc_type, exc_value, traceback)
test_utils.clean_mongo()
def _under_coverage_cmd(self, cmd):
@@ -121,7 +121,7 @@ class SystemTestSuite(PytestSuite):
TestSuite for lms and cms python unit tests
"""
def __init__(self, *args, **kwargs):
super(SystemTestSuite, self).__init__(*args, **kwargs)
super().__init__(*args, **kwargs)
self.eval_attr = kwargs.get('eval_attr', None)
self.test_id = kwargs.get('test_id', self._default_test_id)
self.fasttest = kwargs.get('fasttest', False)
@@ -185,28 +185,28 @@ class SystemTestSuite(PytestSuite):
xdist_remote_processes = self.processes
for ip in self.xdist_ip_addresses.split(','):
# Propogate necessary env vars to xdist containers
env_var_cmd = u'export DJANGO_SETTINGS_MODULE={} DISABLE_COURSEENROLLMENT_HISTORY={} PYTHONHASHSEED=0'\
env_var_cmd = 'export DJANGO_SETTINGS_MODULE={} DISABLE_COURSEENROLLMENT_HISTORY={} PYTHONHASHSEED=0'\
.format('{}.envs.{}'.format(self.root, self.settings),
self.disable_courseenrollment_history)
xdist_string = u'--tx {}*ssh="jenkins@{} -o StrictHostKeyChecking=no"' \
xdist_string = '--tx {}*ssh="jenkins@{} -o StrictHostKeyChecking=no"' \
'//python="source edx-venv-{}/edx-venv/bin/activate; {}; python"' \
'//chdir="edx-platform"' \
.format(xdist_remote_processes, ip, Env.PYTHON_VERSION, env_var_cmd)
cmd.append(xdist_string)
for rsync_dir in Env.rsync_dirs():
cmd.append(u'--rsyncdir {}'.format(rsync_dir))
cmd.append('--rsyncdir {}'.format(rsync_dir))
else:
if self.processes == -1:
cmd.append('-n auto')
cmd.append('--dist=loadscope')
elif self.processes != 0:
cmd.append(u'-n {}'.format(self.processes))
cmd.append('-n {}'.format(self.processes))
cmd.append('--dist=loadscope')
if not self.randomize:
cmd.append('-p no:randomly')
if self.eval_attr:
cmd.append(u"-a '{}'".format(self.eval_attr))
cmd.append("-a '{}'".format(self.eval_attr))
cmd.extend(self.passthrough_options)
cmd.append(self.test_id)
@@ -263,7 +263,7 @@ class LibTestSuite(PytestSuite):
TestSuite for edx-platform/common/lib python unit tests
"""
def __init__(self, *args, **kwargs):
super(LibTestSuite, self).__init__(*args, **kwargs)
super().__init__(*args, **kwargs)
self.append_coverage = kwargs.get('append_coverage', False)
self.test_id = kwargs.get('test_id', self.root)
self.eval_attr = kwargs.get('eval_attr', None)
@@ -313,16 +313,16 @@ class LibTestSuite(PytestSuite):
else:
django_env_var_cmd = "export DJANGO_SETTINGS_MODULE='openedx.tests.settings'"
env_var_cmd = u'{} DISABLE_COURSEENROLLMENT_HISTORY={}' \
env_var_cmd = '{} DISABLE_COURSEENROLLMENT_HISTORY={}' \
.format(django_env_var_cmd, self.disable_courseenrollment_history)
xdist_string = u'--tx {}*ssh="jenkins@{} -o StrictHostKeyChecking=no"' \
xdist_string = '--tx {}*ssh="jenkins@{} -o StrictHostKeyChecking=no"' \
'//python="source edx-venv-{}/edx-venv/bin/activate; {}; python"' \
'//chdir="edx-platform"' \
.format(xdist_remote_processes, ip, Env.PYTHON_VERSION, env_var_cmd)
cmd.append(xdist_string)
for rsync_dir in Env.rsync_dirs():
cmd.append(u'--rsyncdir {}'.format(rsync_dir))
cmd.append('--rsyncdir {}'.format(rsync_dir))
# "--rsyncdir" throws off the configuration root, set it explicitly
if 'common/lib' in self.test_id:
cmd.append('--rootdir=common/lib')
@@ -334,13 +334,13 @@ class LibTestSuite(PytestSuite):
cmd.append('-n auto')
cmd.append('--dist=loadscope')
elif self.processes != 0:
cmd.append(u'-n {}'.format(self.processes))
cmd.append('-n {}'.format(self.processes))
cmd.append('--dist=loadscope')
if not self.randomize:
cmd.append("-p no:randomly")
if self.eval_attr:
cmd.append(u"-a '{}'".format(self.eval_attr))
cmd.append("-a '{}'".format(self.eval_attr))
cmd.append(self.test_id)

View File

@@ -18,14 +18,14 @@ class PythonTestSuite(TestSuite):
A subclass of TestSuite with extra setup for python tests
"""
def __init__(self, *args, **kwargs):
super(PythonTestSuite, self).__init__(*args, **kwargs)
super().__init__(*args, **kwargs)
self.opts = kwargs
self.disable_migrations = kwargs.get('disable_migrations', True)
self.fasttest = kwargs.get('fasttest', False)
self.subsuites = kwargs.get('subsuites', self._default_subsuites)
def __enter__(self):
super(PythonTestSuite, self).__enter__()
super().__enter__()
if self.disable_migrations:
os.environ['DISABLE_MIGRATIONS'] = '1'

View File

@@ -19,7 +19,7 @@ except ImportError:
__test__ = False # do not collect
class TestSuite(object):
class TestSuite:
"""
TestSuite is a class that defines how groups of tests run.
"""
@@ -41,7 +41,7 @@ class TestSuite(object):
i.e. Checking for and defining required directories.
"""
print(u"\nSetting up for {suite_name}".format(suite_name=self.root))
print("\nSetting up for {suite_name}".format(suite_name=self.root))
self.failed_suites = []
def __exit__(self, exc_type, exc_value, traceback):
@@ -54,7 +54,7 @@ class TestSuite(object):
i.e. Cleaning mongo after the lms tests run.
"""
print(u"\nCleaning up after {suite_name}".format(suite_name=self.root))
print("\nCleaning up after {suite_name}".format(suite_name=self.root))
@property
def cmd(self):
@@ -127,10 +127,10 @@ class TestSuite(object):
Writes a list of failed_suites to sys.stderr
"""
if self.failed_suites:
msg = colorize('red', u"\n\n{bar}\nTests failed in the following suites:\n* ".format(bar="=" * 48))
msg = colorize('red', "\n\n{bar}\nTests failed in the following suites:\n* ".format(bar="=" * 48))
msg += colorize('red', '\n* '.join([s.root for s in self.failed_suites]) + '\n\n')
else:
msg = colorize('green', u"\n\n{bar}\nNo test failures ".format(bar="=" * 48))
msg = colorize('green', "\n\n{bar}\nNo test failures ".format(bar="=" * 48))
print(msg)

View File

@@ -7,7 +7,6 @@ import os
import re
import subprocess
import six
from paver.easy import cmdopts, sh, task
from pavelib.utils.envs import Env
@@ -64,7 +63,7 @@ def clean_dir(directory):
"""
# We delete the files but preserve the directory structure
# so that coverage.py has a place to put the reports.
sh(u'find {dir} -type f -delete'.format(dir=directory))
sh('find {dir} -type f -delete'.format(dir=directory))
@task
@@ -93,7 +92,7 @@ def clean_mongo():
"""
Clean mongo test databases
"""
sh(u"mongo {host}:{port} {repo_root}/scripts/delete-mongo-test-dbs.js".format(
sh("mongo {host}:{port} {repo_root}/scripts/delete-mongo-test-dbs.js".format(
host=Env.MONGO_HOST,
port=MONGO_PORT_NUM,
repo_root=Env.REPO_ROOT,
@@ -120,10 +119,10 @@ def check_firefox_version():
driver.close()
if firefox_ver < MINIMUM_FIREFOX_VERSION:
raise Exception(
u'Required firefox version not found.\n'
u'Expected: {expected_version}; Actual: {actual_version}.\n\n'
u'Make sure that the edx.devstack.firefox container is up-to-date and running\n'
u'\t{expected_version}'.format(
'Required firefox version not found.\n'
'Expected: {expected_version}; Actual: {actual_version}.\n\n'
'Make sure that the edx.devstack.firefox container is up-to-date and running\n'
'\t{expected_version}'.format(
actual_version=firefox_ver,
expected_version=MINIMUM_FIREFOX_VERSION
)
@@ -133,7 +132,7 @@ def check_firefox_version():
# Firefox will be run as a local process
expected_firefox_ver = "Mozilla Firefox " + str(MINIMUM_FIREFOX_VERSION)
firefox_ver_string = subprocess.check_output("firefox --version", shell=True).strip()
if isinstance(firefox_ver_string, six.binary_type):
if isinstance(firefox_ver_string, bytes):
firefox_ver_string = firefox_ver_string.decode('utf-8')
firefox_version_regex = re.compile(r"Mozilla Firefox (\d+.\d+)")
try:
@@ -143,8 +142,8 @@ def check_firefox_version():
if firefox_ver < MINIMUM_FIREFOX_VERSION:
raise Exception(
u'Required firefox version not found.\n'
u'Expected: {expected_version}; Actual: {actual_version}.'.format(
'Required firefox version not found.\n'
'Expected: {expected_version}; Actual: {actual_version}.'.format(
actual_version=firefox_ver,
expected_version=expected_firefox_ver
)
@@ -166,7 +165,7 @@ def fetch_coverage_test_selection_data(options):
except OSError:
pass # Directory already exists
sh(u'git diff $(git merge-base {} HEAD) > {}/{}'.format(
sh('git diff $(git merge-base {} HEAD) > {}/{}'.format(
getattr(options, 'compare_branch', 'origin/master'),
COVERAGE_CACHE_BASEPATH,
WHO_TESTS_WHAT_DIFF