refactor: pyupgrade on docs & pavelib (#26768)

This commit is contained in:
M. Zulqarnain
2021-03-05 15:24:59 +05:00
committed by GitHub
parent 41e5403f4e
commit e35d3de376
27 changed files with 139 additions and 140 deletions

View File

@@ -1,4 +1,4 @@
# -*- coding: utf-8 -*- # lint-amnesty, pylint: disable=missing-module-docstring
# lint-amnesty, pylint: disable=missing-module-docstring
#
# Configuration file for the Sphinx documentation builder.
#
@@ -24,14 +24,14 @@ import edx_theme
# -- Project information -----------------------------------------------------
project = u'Open edX REST APIs'
project = 'Open edX REST APIs'
copyright = edx_theme.COPYRIGHT # lint-amnesty, pylint: disable=redefined-builtin
author = edx_theme.AUTHOR
# The short X.Y version
version = u''
version = ''
# The full version, including alpha/beta/rc tags
release = u''
release = ''
# -- General configuration ---------------------------------------------------
@@ -72,7 +72,7 @@ language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store']
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
@@ -143,8 +143,8 @@ latex_elements = {
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'api-docs.tex', u'api-docs Documentation',
u'Nobody', 'manual'),
(master_doc, 'api-docs.tex', 'api-docs Documentation',
'Nobody', 'manual'),
]
@@ -153,7 +153,7 @@ latex_documents = [
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'api-docs', u'api-docs Documentation',
(master_doc, 'api-docs', 'api-docs Documentation',
[author], 1)
]
@@ -164,7 +164,7 @@ man_pages = [
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'api-docs', u'api-docs Documentation',
(master_doc, 'api-docs', 'api-docs Documentation',
author, 'api-docs', 'One line description of project.',
'Miscellaneous'),
]

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*- # lint-amnesty, pylint: disable=missing-module-docstring
#
# Configuration file for the Sphinx documentation builder.
#
@@ -14,7 +13,6 @@ from subprocess import check_call
import django
import edx_theme
import six
from path import Path
root = Path('../..').abspath()
@@ -44,14 +42,14 @@ django.setup()
# -- Project information -----------------------------------------------------
project = u'edx-platform'
project = 'edx-platform'
copyright = edx_theme.COPYRIGHT # lint-amnesty, pylint: disable=redefined-builtin
author = edx_theme.AUTHOR
# The short X.Y version
version = u''
version = ''
# The full version, including alpha/beta/rc tags
release = u''
release = ''
# -- General configuration ---------------------------------------------------
@@ -95,7 +93,7 @@ language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store']
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
@@ -165,7 +163,7 @@ latex_elements = {
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'edx-platform.tex', u'edx-platform Documentation',
(master_doc, 'edx-platform.tex', 'edx-platform Documentation',
author, 'manual'),
]
@@ -175,7 +173,7 @@ latex_documents = [
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'edx-platform', u'edx-platform Documentation',
(master_doc, 'edx-platform', 'edx-platform Documentation',
[author], 1)
]
@@ -186,7 +184,7 @@ man_pages = [
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'edx-platform', u'edx-platform Documentation',
(master_doc, 'edx-platform', 'edx-platform Documentation',
author, 'edx-platform', 'The Open edX platform, the software that powers edX!',
'Miscellaneous'),
]
@@ -249,9 +247,9 @@ for app in cms_djangoapps:
modules[path] = path
# The Django apps under common must be imported directly, not under their path
for app in os.listdir(six.text_type(root / 'common' / 'djangoapps')):
for app in os.listdir(str(root / 'common' / 'djangoapps')):
path = os.path.join('common', 'djangoapps', app)
if os.path.isdir(six.text_type(root / path)) and app != 'terrain':
if os.path.isdir(str(root / path)) and app != 'terrain':
modules[path] = path
@@ -272,9 +270,9 @@ def update_settings_module(service='lms'):
for the module sphinx-apidoc is about to be run on.
"""
if os.environ['EDX_PLATFORM_SETTINGS'] == 'devstack_docker':
settings_module = '{}.envs.devstack_docker'.format(service)
settings_module = f'{service}.envs.devstack_docker'
else:
settings_module = '{}.envs.devstack'.format(service)
settings_module = f'{service}.envs.devstack'
os.environ['DJANGO_SETTINGS_MODULE'] = settings_module
@@ -297,8 +295,8 @@ def on_init(app): # lint-amnesty, pylint: disable=redefined-outer-name, unused-
exclude_files = ['admin.py', 'test.py', 'testing.py', 'tests.py', 'testutils.py', 'wsgi.py']
for module in modules:
module_path = six.text_type(root / module)
output_path = six.text_type(docs_path / modules[module])
module_path = str(root / module)
output_path = str(docs_path / modules[module])
args = [apidoc_path, '--ext-intersphinx', '-o',
output_path, module_path]
exclude = []

View File

@@ -35,7 +35,7 @@ def rst_header(text, level, anchor=None):
"""
rst = []
if anchor:
rst.append(".. _{}:".format(anchor))
rst.append(f".. _{anchor}:")
rst.append("")
char = " #=-"[level]
if level == 1:
@@ -102,10 +102,10 @@ def convert_swagger_to_sphinx(swagger_file, output_dir):
with open(os.path.join(output_dir, outfile + '.rst'), 'w') as outf:
pr_outf = functools.partial(print, file=outf)
pr_outf(rst_header(segment, level=1, anchor="gen_" + outfile))
pr_outf(".. openapi:: {}".format(rel_swagger_path))
pr_outf(f".. openapi:: {rel_swagger_path}")
pr_outf(" :format: markdown")
pr_outf(" :include:")
pr_outf(" {}.*".format(segment))
pr_outf(f" {segment}.*")
pr_index(rst_header(segment, level=2))
@@ -115,7 +115,7 @@ def convert_swagger_to_sphinx(swagger_file, output_dir):
summary = ''
if 'summary' in op_data:
summary = " --- {}".format(op_data['summary'])
pr_index(":ref:`{} {}<gen_{}>`{}\n".format(method.upper(), uri, outfile, summary))
pr_index(f":ref:`{method.upper()} {uri}<gen_{outfile}>`{summary}\n")
def main(args):

View File

@@ -490,7 +490,7 @@ def compile_sass(options):
print("\t\tFinished compiling Sass:")
if not dry_run:
for sass_dir, css_dir, duration in timing_info:
print(">> {} -> {} in {}s".format(sass_dir, css_dir, duration))
print(f">> {sass_dir} -> {css_dir} in {duration}s")
if compilation_results['success']:
print("\033[92m\nSuccessful compilations:\n--- " + "\n--- ".join(compilation_results['success']) + "\n\033[00m")
@@ -548,7 +548,7 @@ def _compile_sass(system, theme, debug, force, timing_info):
css_dir=css_dir,
))
else:
sh("rm -rf {css_dir}/*.css".format(css_dir=css_dir))
sh(f"rm -rf {css_dir}/*.css")
if dry_run:
tasks.environment.info("libsass {sass_dir}".format(
@@ -611,7 +611,7 @@ def process_npm_assets():
if library.startswith('node_modules/'):
library_path = library
else:
library_path = 'node_modules/{library}'.format(library=library)
library_path = f'node_modules/{library}'
if library.endswith('.css') or library.endswith('.css.map'):
vendor_dir = NPM_CSS_VENDOR_DIRECTORY
@@ -623,14 +623,14 @@ def process_npm_assets():
vendor_dir=vendor_dir,
))
elif not skip_if_missing:
raise Exception('Missing vendor file {library_path}'.format(library_path=library_path))
raise Exception(f'Missing vendor file {library_path}')
def copy_vendor_library_dir(library_dir, skip_if_missing=False):
"""
Copies all vendor libraries in directory to the shared vendor directory.
"""
library_dir_path = 'node_modules/{library_dir}'.format(library_dir=library_dir)
print('Copying vendor library dir: {}'.format(library_dir_path))
library_dir_path = f'node_modules/{library_dir}'
print(f'Copying vendor library dir: {library_dir_path}')
if os.path.exists(library_dir_path):
for dirpath, _, filenames in os.walk(library_dir_path):
for filename in filenames:
@@ -709,7 +709,7 @@ def collect_assets(systems, settings, **kwargs):
]
ignore_args = " ".join(
'--ignore "{}"'.format(pattern) for pattern in ignore_patterns
f'--ignore "{pattern}"' for pattern in ignore_patterns
)
for sys in systems:
@@ -718,7 +718,7 @@ def collect_assets(systems, settings, **kwargs):
ignore_args=ignore_args,
logfile_str=collectstatic_stdout_str
)))
print("\t\tFinished collecting {} assets.".format(sys))
print(f"\t\tFinished collecting {sys} assets.")
def _collect_assets_cmd(system, **kwargs):

View File

@@ -126,14 +126,14 @@ def parse_coverage(report_dir, coveragerc):
msg = colorize('green', "Combining coverage reports")
print(msg)
sh("coverage combine --rcfile={}".format(coveragerc))
sh(f"coverage combine --rcfile={coveragerc}")
msg = colorize('green', "Generating coverage reports")
print(msg)
sh("coverage html --rcfile={}".format(coveragerc))
sh("coverage xml --rcfile={}".format(coveragerc))
sh("coverage report --rcfile={}".format(coveragerc))
sh(f"coverage html --rcfile={coveragerc}")
sh(f"coverage xml --rcfile={coveragerc}")
sh(f"coverage report --rcfile={coveragerc}")
@task

View File

@@ -3,10 +3,10 @@
import os
from unittest import TestCase
from unittest.mock import patch
import ddt
import paver.tasks
from mock import patch
from paver.easy import call_task, path
from watchdog.observers import Observer
@@ -346,10 +346,10 @@ class TestCollectAssets(PaverTestCase):
"""
for i, sys in enumerate(systems):
msg = self.task_messages[i]
assert msg.startswith('python manage.py {}'.format(sys))
assert msg.startswith(f'python manage.py {sys}')
assert ' collectstatic ' in msg
assert '--settings={}'.format(Env.DEVSTACK_SETTINGS) in msg
assert msg.endswith(' {}'.format(log_location))
assert f'--settings={Env.DEVSTACK_SETTINGS}' in msg
assert msg.endswith(f' {log_location}')
@ddt.ddt
@@ -373,7 +373,7 @@ class TestUpdateAssetsTask(PaverTestCase):
call_task('pavelib.assets.update_assets', args=cmd_args)
self.assertTrue(
self._is_substring_in_list(self.task_messages, expected_substring),
msg="{substring} not found in messages".format(substring=expected_substring)
msg=f"{expected_substring} not found in messages"
)
def _is_substring_in_list(self, messages_list, expected_substring):

View File

@@ -8,9 +8,9 @@ import shutil
import tarfile
from tempfile import mkdtemp
from unittest import TestCase
from unittest.mock import call, patch, Mock
import boto
from mock import call, patch, Mock
from pavelib import database
from pavelib.utils import db_utils
@@ -69,7 +69,7 @@ class TestPaverDatabaseTasks(PaverTestCase):
# This value is the actual sha1 fingerprint calculated for the dummy
# files used in these tests
self.expected_fingerprint = 'ccaa8d8dcc7d030cd6a6768db81f90d0ef976c3d'
self.fingerprint_filename = '{}.tar.gz'.format(self.expected_fingerprint)
self.fingerprint_filename = f'{self.expected_fingerprint}.tar.gz'
self.bucket = Mock(name='test_bucket')
@patch.object(db_utils, 'CACHE_FOLDER', mkdtemp())
@@ -94,8 +94,8 @@ class TestPaverDatabaseTasks(PaverTestCase):
# Make sure that the local cache files are used - NOT downloaded from s3
assert not _mock_get_file.called
calls = [
call('{}/scripts/reset-test-db.sh --calculate_migrations'.format(Env.REPO_ROOT)),
call('{}/scripts/reset-test-db.sh --use-existing-db'.format(Env.REPO_ROOT))
call(f'{Env.REPO_ROOT}/scripts/reset-test-db.sh --calculate_migrations'),
call(f'{Env.REPO_ROOT}/scripts/reset-test-db.sh --use-existing-db')
]
_mock_sh.assert_has_calls(calls)
@@ -137,8 +137,8 @@ class TestPaverDatabaseTasks(PaverTestCase):
)
calls = [
call('{}/scripts/reset-test-db.sh --calculate_migrations'.format(Env.REPO_ROOT)),
call('{}/scripts/reset-test-db.sh --use-existing-db'.format(Env.REPO_ROOT))
call(f'{Env.REPO_ROOT}/scripts/reset-test-db.sh --calculate_migrations'),
call(f'{Env.REPO_ROOT}/scripts/reset-test-db.sh --use-existing-db')
]
_mock_sh.assert_has_calls(calls)
@@ -165,8 +165,8 @@ class TestPaverDatabaseTasks(PaverTestCase):
database.update_local_bokchoy_db_from_s3() # pylint: disable=no-value-for-parameter
calls = [
call('{}/scripts/reset-test-db.sh --calculate_migrations'.format(Env.REPO_ROOT)),
call('{}/scripts/reset-test-db.sh --rebuild_cache --use-existing-db'.format(Env.REPO_ROOT))
call(f'{Env.REPO_ROOT}/scripts/reset-test-db.sh --calculate_migrations'),
call(f'{Env.REPO_ROOT}/scripts/reset-test-db.sh --rebuild_cache --use-existing-db')
]
_mock_sh.assert_has_calls(calls)

View File

@@ -4,9 +4,9 @@ Tests for Paver's Stylelint tasks.
import unittest
import pytest
from unittest.mock import patch
from mock import patch
import pytest
from paver.easy import BuildFailure, call_task
import pavelib.quality

View File

@@ -86,7 +86,7 @@ class TestGenerate(TestCase):
mofile = filename + '.mo'
path = os.path.join(self.configuration.get_messages_dir(locale), mofile)
exists = os.path.exists(path)
assert exists, ('Missing file in locale %s: %s' % (locale, mofile))
assert exists, (f'Missing file in locale {locale}: {mofile}')
assert datetime.fromtimestamp(os.path.getmtime(path), UTC) >= \
self.start_time, ('File not recently modified: %s' % path)
# Segmenting means that the merge headers don't work they way they
@@ -109,7 +109,7 @@ class TestGenerate(TestCase):
pof = pofile(path)
pattern = re.compile('^#-#-#-#-#', re.M)
match = pattern.findall(pof.header)
assert len(match) == 3, ('Found %s (should be 3) merge comments in the header for %s' % (len(match), path))
assert len(match) == 3, ('Found {} (should be 3) merge comments in the header for {}'.format(len(match), path))
def random_name(size=6):

View File

@@ -6,9 +6,9 @@ Tests for pavelib/i18n.py.
import os
import textwrap
import unittest
import pytest
from unittest.mock import mock_open, patch
from mock import mock_open, patch
import pytest
from paver.easy import call_task, task
import pavelib.i18n
@@ -182,5 +182,5 @@ class TestI18nCompileJS(PaverTestCase):
self.reset_task_messages()
os.environ['NO_PREREQ_INSTALL'] = "true"
call_task('pavelib.i18n.i18n_compilejs', options={"settings": Env.TEST_SETTINGS})
assert self.task_messages == ['python manage.py lms --settings={} compilejsi18n'.format(Env.TEST_SETTINGS),
'python manage.py cms --settings={} compilejsi18n'.format(Env.TEST_SETTINGS)]
assert self.task_messages == [f'python manage.py lms --settings={Env.TEST_SETTINGS} compilejsi18n',
f'python manage.py cms --settings={Env.TEST_SETTINGS} compilejsi18n']

View File

@@ -1,8 +1,8 @@
"""Unit tests for the Paver JavaScript testing tasks."""
from unittest.mock import patch
import ddt
from mock import patch
from paver.easy import call_task
import pavelib.js_test
@@ -141,7 +141,7 @@ class TestPaverJavaScriptTestTasks(PaverTestCase):
suite=suite
)
if port:
expected_test_tool_command += " --port={port}".format(port=port)
expected_test_tool_command += f" --port={port}"
else:
expected_test_tool_command = 'jest'

View File

@@ -28,22 +28,22 @@ class TestPaverBokChoyCmd(unittest.TestCase):
shard_str = '/shard_' + self.shard if self.shard else ''
expected_statement = [
"DEFAULT_STORE={}".format(store),
"SAVED_SOURCE_DIR='{}/test_root/log{}'".format(REPO_DIR, shard_str),
"SCREENSHOT_DIR='{}/test_root/log{}'".format(REPO_DIR, shard_str),
"BOK_CHOY_HAR_DIR='{}/test_root/log{}/hars'".format(REPO_DIR, shard_str),
f"DEFAULT_STORE={store}",
f"SAVED_SOURCE_DIR='{REPO_DIR}/test_root/log{shard_str}'",
f"SCREENSHOT_DIR='{REPO_DIR}/test_root/log{shard_str}'",
f"BOK_CHOY_HAR_DIR='{REPO_DIR}/test_root/log{shard_str}/hars'",
"BOKCHOY_A11Y_CUSTOM_RULES_FILE='{}/{}'".format(
REPO_DIR,
'node_modules/edx-custom-a11y-rules/lib/custom_a11y_rules.js'
),
"SELENIUM_DRIVER_LOG_DIR='{}/test_root/log{}'".format(REPO_DIR, shard_str),
"VERIFY_XSS='{}'".format(verify_xss),
f"SELENIUM_DRIVER_LOG_DIR='{REPO_DIR}/test_root/log{shard_str}'",
f"VERIFY_XSS='{verify_xss}'",
"python",
"-Wd",
"-m",
"pytest",
"{}/common/test/acceptance/{}".format(REPO_DIR, name),
"--junitxml={}/reports/bok_choy{}/xunit.xml".format(REPO_DIR, shard_str),
f"{REPO_DIR}/common/test/acceptance/{name}",
f"--junitxml={REPO_DIR}/reports/bok_choy{shard_str}/xunit.xml",
"--verbose",
]
return expected_statement
@@ -61,25 +61,25 @@ class TestPaverBokChoyCmd(unittest.TestCase):
def test_suite_spec(self):
spec = 'test_foo.py'
suite = BokChoyTestSuite('', test_spec=spec)
name = 'tests/{}'.format(spec)
name = f'tests/{spec}'
assert suite.cmd == self._expected_command(name=name)
def test_class_spec(self):
spec = 'test_foo.py:FooTest'
suite = BokChoyTestSuite('', test_spec=spec)
name = 'tests/{}'.format(spec)
name = f'tests/{spec}'
assert suite.cmd == self._expected_command(name=name)
def test_testcase_spec(self):
spec = 'test_foo.py:FooTest.test_bar'
suite = BokChoyTestSuite('', test_spec=spec)
name = 'tests/{}'.format(spec)
name = f'tests/{spec}'
assert suite.cmd == self._expected_command(name=name)
def test_spec_with_draft_default_store(self):
spec = 'test_foo.py'
suite = BokChoyTestSuite('', test_spec=spec, default_store='draft')
name = 'tests/{}'.format(spec)
name = f'tests/{spec}'
assert suite.cmd == self._expected_command(name=name, store='draft')
def test_invalid_default_store(self):
@@ -134,7 +134,7 @@ class TestPaverBokChoyCmd(unittest.TestCase):
repo_dir=REPO_DIR,
shard_str='/shard_' + self.shard if self.shard else '',
),
"-n {}".format(process_count),
f"-n {process_count}",
"--color=no",
"--verbose",
]
@@ -151,7 +151,7 @@ class TestPaverBokChoyCmd(unittest.TestCase):
repo_dir=REPO_DIR,
shard_str='/shard_' + self.shard if self.shard else '',
),
"-n {}".format(process_count),
f"-n {process_count}",
"--color=no",
"--verbose",
]

View File

@@ -5,7 +5,7 @@ Tests to ensure only the report files we want are returned as part of run_qualit
import unittest
from mock import patch
from unittest.mock import patch
import pavelib.quality

View File

@@ -8,10 +8,10 @@ import shutil
import tempfile
import textwrap
import unittest
import pytest
from unittest.mock import MagicMock, mock_open, patch
import pytest
from ddt import data, ddt, file_data, unpack
from mock import MagicMock, mock_open, patch
from path import Path as path
from paver.easy import BuildFailure

View File

@@ -5,9 +5,9 @@ Tests for Paver's PII checker task.
import shutil
import tempfile
import unittest
import pytest
from unittest.mock import patch
from mock import patch
import pytest
from path import Path as path
from paver.easy import call_task, BuildFailure
@@ -44,7 +44,7 @@ class TestPaverPIICheck(unittest.TestCase):
assert any('cms.envs.test' in call for call in mock_calls)
assert all(str(self.report_dir) in call for call in mock_calls)
metrics_file = Env.METRICS_DIR / 'pii'
assert open(metrics_file, 'r').read() == 'Number of PII Annotation violations: 66\n'
assert open(metrics_file).read() == 'Number of PII Annotation violations: 66\n'
@patch.object(pavelib.quality.run_pii_check, 'needs')
@patch('pavelib.quality.sh')
@@ -71,4 +71,4 @@ class TestPaverPIICheck(unittest.TestCase):
assert any('cms.envs.test' in call for call in mock_calls)
assert all(str(self.report_dir) in call for call in mock_calls)
metrics_file = Env.METRICS_DIR / 'pii'
assert open(metrics_file, 'r').read() == 'Number of PII Annotation violations: 66\n'
assert open(metrics_file).read() == 'Number of PII Annotation violations: 66\n'

View File

@@ -5,10 +5,10 @@ Tests covering the Open edX Paver prequisites installation workflow
import os
import unittest
import pytest
from unittest import mock
from unittest.mock import patch
import mock
from mock import patch
import pytest
from paver.easy import BuildFailure
import pavelib.prereqs
@@ -31,7 +31,7 @@ class TestPaverPrereqInstall(unittest.TestCase):
_orig_environ = dict(os.environ)
os.environ['NO_PREREQ_INSTALL'] = set_val
assert pavelib.prereqs.no_prereq_install() == expected_val,\
'NO_PREREQ_INSTALL is set to {}, but we read it as {}'.format(set_val, expected_val)
f'NO_PREREQ_INSTALL is set to {set_val}, but we read it as {expected_val}'
# Reset Environment back to original state
os.environ.clear()

View File

@@ -214,9 +214,9 @@ class TestPaverServerTasks(PaverTestCase):
if task_name == "devstack":
args = ["studio" if system == "cms" else system]
if settings:
args.append("--settings={settings}".format(settings=settings))
args.append(f"--settings={settings}")
if asset_settings:
args.append("--asset-settings={asset_settings}".format(asset_settings=asset_settings))
args.append(f"--asset-settings={asset_settings}")
if is_optimized:
args.append("--optimized")
if is_fast:
@@ -225,7 +225,7 @@ class TestPaverServerTasks(PaverTestCase):
args.append("--no-contracts")
call_task("pavelib.servers.devstack", args=args)
else:
call_task("pavelib.servers.{task_name}".format(task_name=task_name), options=options)
call_task(f"pavelib.servers.{task_name}", options=options)
expected_messages = options.get("expected_messages", [])
expected_settings = settings if settings else Env.DEVSTACK_SETTINGS
expected_asset_settings = asset_settings if asset_settings else expected_settings

View File

@@ -2,9 +2,10 @@
Tests for Paver's Stylelint tasks.
"""
from unittest.mock import MagicMock, patch
import pytest
import ddt
from mock import MagicMock, patch
from paver.easy import call_task
from .utils import PaverTestCase

View File

@@ -6,7 +6,7 @@ Tests of the pavelib.utils.timer module.
from datetime import datetime, timedelta
from unittest import TestCase
from mock import MagicMock, patch
from unittest.mock import MagicMock, patch
from pavelib.utils import timer

View File

@@ -4,9 +4,9 @@ Tests for pavelib/utils/test/utils
import unittest
import pytest
from unittest.mock import patch
from mock import patch
import pytest
from pavelib.utils.envs import Env
from pavelib.utils.test.utils import MINIMUM_FIREFOX_VERSION, check_firefox_version

View File

@@ -1,9 +1,9 @@
"""
Tests for paver xsscommitlint quality tasks
"""
from unittest.mock import patch
import pytest
from mock import patch
from paver.easy import call_task
import pavelib.quality

View File

@@ -1,9 +1,9 @@
"""
Tests for paver xsslint quality tasks
"""
from unittest.mock import patch
import pytest
from mock import patch
from paver.easy import call_task
import pavelib.quality

View File

@@ -97,10 +97,10 @@ def prereq_cache(cache_name, paths, install_func):
"""
# Retrieve the old hash
cache_filename = cache_name.replace(" ", "_")
cache_file_path = os.path.join(PREREQS_STATE_DIR, "{}.sha1".format(cache_filename))
cache_file_path = os.path.join(PREREQS_STATE_DIR, f"{cache_filename}.sha1")
old_hash = None
if os.path.isfile(cache_file_path):
with open(cache_file_path, "r") as cache_file:
with open(cache_file_path) as cache_file:
old_hash = cache_file.read()
# Compare the old hash to the new hash
@@ -120,7 +120,7 @@ def prereq_cache(cache_name, paths, install_func):
post_install_hash = compute_fingerprint(paths)
cache_file.write(post_install_hash.encode('utf-8'))
else:
print('{cache} unchanged, skipping...'.format(cache=cache_name))
print(f'{cache_name} unchanged, skipping...')
def node_prereqs_installation():
@@ -132,9 +132,9 @@ def node_prereqs_installation():
# determine if any packages are chronic offenders.
shard_str = os.getenv('SHARD', None)
if shard_str:
npm_log_file_path = '{}/npm-install.{}.log'.format(Env.GEN_LOG_DIR, shard_str)
npm_log_file_path = f'{Env.GEN_LOG_DIR}/npm-install.{shard_str}.log'
else:
npm_log_file_path = '{}/npm-install.log'.format(Env.GEN_LOG_DIR)
npm_log_file_path = f'{Env.GEN_LOG_DIR}/npm-install.log'
npm_log_file = open(npm_log_file_path, 'wb')
npm_command = 'npm install --verbose'.split()
@@ -151,7 +151,7 @@ def node_prereqs_installation():
proc = subprocess.Popen(npm_command, stderr=npm_log_file)
retcode = proc.wait()
if retcode == 1:
raise Exception("npm install failed: See {}".format(npm_log_file_path))
raise Exception(f"npm install failed: See {npm_log_file_path}")
print("Successfully installed NPM packages. Log found at {}".format(
npm_log_file_path
))
@@ -168,7 +168,7 @@ def python_prereqs_installation():
def pip_install_req_file(req_file):
"""Pip install the requirements file."""
pip_cmd = 'pip install -q --disable-pip-version-check --exists-action w'
sh("{pip_cmd} -r {req_file}".format(pip_cmd=pip_cmd, req_file=req_file))
sh(f"{pip_cmd} -r {req_file}")
@task
@@ -243,7 +243,7 @@ def uninstall_python_packages():
for package_name in PACKAGES_TO_UNINSTALL:
if package_in_frozen(package_name, frozen):
# Uninstall the pacakge
sh("pip uninstall --disable-pip-version-check -y {}".format(package_name))
sh(f"pip uninstall --disable-pip-version-check -y {package_name}")
uninstalled = True
if not uninstalled:
break

View File

@@ -40,7 +40,7 @@ def write_junit_xml(name, message=None):
'seconds': (datetime.utcnow() - START_TIME).total_seconds(),
}
Env.QUALITY_DIR.makedirs_p()
filename = Env.QUALITY_DIR / '{}.xml'.format(name)
filename = Env.QUALITY_DIR / f'{name}.xml'
with open(filename, 'w') as f:
f.write(JUNIT_XML_TEMPLATE.format(**data))
@@ -106,7 +106,7 @@ def find_fixme(options):
sh(cmd, ignore_error=True)
num_fixme += _count_pylint_violations(
"{report_dir}/pylint_fixme.report".format(report_dir=report_dir))
f"{report_dir}/pylint_fixme.report")
print("Number of pylint fixmes: " + str(num_fixme))
@@ -264,7 +264,7 @@ def _get_pep8_violations(clean=True):
Env.METRICS_DIR.makedirs_p()
if not report.exists():
sh('pycodestyle . | tee {} -a'.format(report))
sh(f'pycodestyle . | tee {report} -a')
violations_list = _pep8_violations(report)
@@ -294,7 +294,7 @@ def run_pep8(options): # pylint: disable=unused-argument
violations_list = ''.join(violations_list)
# Print number of violations to log
violations_count_str = "Number of PEP 8 violations: {count}".format(count=count)
violations_count_str = f"Number of PEP 8 violations: {count}"
print(violations_count_str)
print(violations_list)
@@ -306,7 +306,7 @@ def run_pep8(options): # pylint: disable=unused-argument
# Fail if any violations are found
if count:
failure_string = "FAILURE: Too many PEP 8 violations. " + violations_count_str
failure_string += "\n\nViolations:\n{violations_list}".format(violations_list=violations_list)
failure_string += f"\n\nViolations:\n{violations_list}"
fail_quality('pep8', failure_string)
else:
write_junit_xml('pep8')
@@ -492,7 +492,7 @@ def run_xsslint(options):
# Record the metric
_write_metric(metrics_str, metrics_report)
# Print number of violations to log.
sh("cat {metrics_report}".format(metrics_report=metrics_report), ignore_error=True)
sh(f"cat {metrics_report}", ignore_error=True)
error_message = ""
@@ -576,7 +576,7 @@ def run_xsscommitlint():
metrics_report = (Env.METRICS_DIR / "xsscommitlint")
_write_metric(violations_count_str, metrics_report)
# Output report to console.
sh("cat {metrics_report}".format(metrics_report=metrics_report), ignore_error=True)
sh(f"cat {metrics_report}", ignore_error=True)
if num_violations:
fail_quality(
'xsscommitlint',
@@ -634,7 +634,7 @@ def _get_report_contents(filename, report_name, last_line_only=False):
else:
return report_file.read()
else:
file_not_found_message = "FAILURE: The following log file could not be found: {file}".format(file=filename)
file_not_found_message = f"FAILURE: The following log file could not be found: {filename}"
fail_quality(report_name, file_not_found_message)
@@ -736,7 +736,7 @@ def _extract_missing_pii_annotations(filename):
uncovered_models = 0
pii_check_passed = True
if os.path.isfile(filename):
with open(filename, 'r') as report_file:
with open(filename) as report_file:
lines = report_file.readlines()
# Find the count of uncovered models.
@@ -758,7 +758,7 @@ def _extract_missing_pii_annotations(filename):
# Each line in lines already contains a newline.
full_log = ''.join(lines)
else:
fail_quality('pii', 'FAILURE: Log file could not be found: {}'.format(filename))
fail_quality('pii', f'FAILURE: Log file could not be found: {filename}')
return (uncovered_models, pii_check_passed, full_log)
@@ -782,7 +782,7 @@ def run_pii_check(options):
for env_name, env_settings_file in (("CMS", "cms.envs.test"), ("LMS", "lms.envs.test")):
try:
print()
print("Running {} PII Annotation check and report".format(env_name))
print(f"Running {env_name} PII Annotation check and report")
print("-" * 45)
run_output_file = str(output_file).format(env_name.lower())
sh(
@@ -801,7 +801,7 @@ def run_pii_check(options):
))
except BuildFailure as error_message:
fail_quality(pii_report_name, 'FAILURE: {}'.format(error_message))
fail_quality(pii_report_name, f'FAILURE: {error_message}')
if not pii_check_passed_env:
pii_check_passed = False
@@ -812,7 +812,7 @@ def run_pii_check(options):
# Write metric file.
if uncovered_count is None:
uncovered_count = 0
metrics_str = "Number of PII Annotation violations: {}\n".format(uncovered_count)
metrics_str = f"Number of PII Annotation violations: {uncovered_count}\n"
_write_metric(metrics_str, (Env.METRICS_DIR / pii_report_name))
# Finally, fail the paver task if code_annotations suggests that the check failed.
@@ -828,11 +828,11 @@ def check_keywords():
Check Django model fields for names that conflict with a list of reserved keywords
"""
report_path = os.path.join(Env.REPORT_DIR, 'reserved_keywords')
sh("mkdir -p {}".format(report_path))
sh(f"mkdir -p {report_path}")
overall_status = True
for env, env_settings_file in [('lms', 'lms.envs.test'), ('cms', 'cms.envs.test')]:
report_file = "{}_reserved_keyword_report.csv".format(env)
report_file = f"{env}_reserved_keyword_report.csv"
override_file = os.path.join(Env.REPO_ROOT, "db_keyword_overrides.yml")
try:
sh(
@@ -900,10 +900,10 @@ def run_quality(options):
else:
lines = []
sep = '-------------\n'
title = "Quality Report: {}\n".format(linter)
title = f"Quality Report: {linter}\n"
violations_str = ''.join(violations_list)
violations_count_str = "Violations: {count}\n"
fail_line = "FAILURE: {} count should be {}\n".format(linter, limit)
fail_line = f"FAILURE: {linter} count should be {limit}\n"
violations_count_str = violations_count_str.format(count=count)
@@ -931,15 +931,15 @@ def run_quality(options):
# ----- Set up for diff-quality pylint call -----
# Set the string to be used for the diff-quality --compare-branch switch.
compare_branch = getattr(options, 'compare_branch', 'origin/master')
compare_commit = sh('git merge-base HEAD {}'.format(compare_branch), capture=True).strip()
compare_commit = sh(f'git merge-base HEAD {compare_branch}', capture=True).strip()
if sh('git rev-parse HEAD', capture=True).strip() != compare_commit:
compare_branch_string = '--compare-branch={}'.format(compare_commit)
compare_branch_string = f'--compare-branch={compare_commit}'
# Set the string, if needed, to be used for the diff-quality --fail-under switch.
diff_threshold = int(getattr(options, 'percentage', -1))
percentage_string = ''
if diff_threshold > -1:
percentage_string = '--fail-under={}'.format(diff_threshold)
percentage_string = f'--fail-under={diff_threshold}'
pylint_files = get_violations_reports("pylint")
pylint_reports = ' '.join(pylint_files)
@@ -1000,7 +1000,7 @@ def run_diff_quality(
else:
fail_quality(
'diff_quality',
'FAILURE: See "Diff Quality Report" in Jenkins left-sidebar for details. {}'.format(failure)
f'FAILURE: See "Diff Quality Report" in Jenkins left-sidebar for details. {failure}'
)
@@ -1023,6 +1023,6 @@ def get_violations_reports(violations_type):
violations_files = []
for subdir, _dirs, files in os.walk(os.path.join(Env.REPORT_DIR)):
for f in files:
if f == "{violations_type}.report".format(violations_type=violations_type):
if f == f"{violations_type}.report":
violations_files.append(os.path.join(subdir, f))
return violations_files

View File

@@ -45,7 +45,7 @@ def run_server(
settings = DEFAULT_SETTINGS
if not fast and asset_settings:
args = [system, '--settings={}'.format(asset_settings), '--watch']
args = [system, f'--settings={asset_settings}', '--watch']
# The default settings use DEBUG mode for running the server which means that
# the optimized assets are ignored, so we skip collectstatic in that case
# to save time.
@@ -56,7 +56,7 @@ def run_server(
if port is None:
port = DEFAULT_PORT[system]
args = [settings, 'runserver', '--traceback', '--pythonpath=.', '0.0.0.0:{}'.format(port)]
args = [settings, 'runserver', '--traceback', '--pythonpath=.', f'0.0.0.0:{port}']
if contracts:
args.append("--contracts")
@@ -159,7 +159,7 @@ def celery(options):
Runs Celery workers.
"""
settings = getattr(options, 'settings', 'devstack_with_worker')
run_process(cmd('DJANGO_SETTINGS_MODULE=lms.envs.{}'.format(settings),
run_process(cmd(f'DJANGO_SETTINGS_MODULE=lms.envs.{settings}',
'celery', 'worker', '--app=lms.celery:APP',
'--beat', '--loglevel=INFO', '--pythonpath=.'))
@@ -207,7 +207,7 @@ def run_all_servers(options):
# First update assets for both LMS and Studio but don't collect static yet
args = [
'lms', 'studio',
'--settings={}'.format(asset_settings),
f'--settings={asset_settings}',
'--skip-collect'
]
call_task('pavelib.assets.update_assets', args=args)
@@ -226,8 +226,8 @@ def run_all_servers(options):
# Start up LMS, CMS and Celery
lms_port = DEFAULT_PORT['lms']
cms_port = DEFAULT_PORT['studio']
lms_runserver_args = ["0.0.0.0:{}".format(lms_port)]
cms_runserver_args = ["0.0.0.0:{}".format(cms_port)]
lms_runserver_args = [f"0.0.0.0:{lms_port}"]
cms_runserver_args = [f"0.0.0.0:{cms_port}"]
run_multi_processes([
django_cmd(
@@ -237,7 +237,7 @@ def run_all_servers(options):
'studio', settings_cms, 'runserver', '--traceback', '--pythonpath=.', *cms_runserver_args
),
cmd(
'DJANGO_SETTINGS_MODULE=lms.envs.{}'.format(worker_settings),
f'DJANGO_SETTINGS_MODULE=lms.envs.{worker_settings}',
'celery', 'worker', '--app=lms.celery:APP',
'--beat', '--loglevel=INFO', '--pythonpath=.'
)
@@ -282,9 +282,9 @@ def check_settings(args):
settings = args.settings[0]
try:
import_cmd = "echo 'import {system}.envs.{settings}'".format(system=system, settings=settings)
import_cmd = f"echo 'import {system}.envs.{settings}'"
django_shell_cmd = django_cmd(system, settings, 'shell', '--plain', '--pythonpath=.')
sh("{import_cmd} | {shell_cmd}".format(import_cmd=import_cmd, shell_cmd=django_shell_cmd))
sh(f"{import_cmd} | {django_shell_cmd}")
except: # pylint: disable=bare-except
print("Failed to import settings", file=sys.stderr)

View File

@@ -337,7 +337,7 @@ def coverage(options):
if not combined_report_file.isfile():
# This may be that the coverage files were generated using -p,
# try to combine them to the one file that we need.
sh("coverage combine --rcfile={}".format(rcfile))
sh(f"coverage combine --rcfile={rcfile}")
if not os.path.getsize(combined_report_file) > 50:
# Check if the .coverage data file is larger than the base file,
@@ -352,9 +352,9 @@ def coverage(options):
return
# Generate the coverage.py XML report
sh("coverage xml --rcfile={}".format(rcfile))
sh(f"coverage xml --rcfile={rcfile}")
# Generate the coverage.py HTML report
sh("coverage html --rcfile={}".format(rcfile))
sh(f"coverage html --rcfile={rcfile}")
diff_coverage() # pylint: disable=no-value-for-parameter

View File

@@ -21,4 +21,4 @@ def django_cmd(sys, settings, *args):
# Maintain backwards compatibility with manage.py,
# which calls "studio" "cms"
sys = 'cms' if sys == 'studio' else sys
return cmd("python manage.py", sys, "--settings={}".format(settings), *args)
return cmd("python manage.py", sys, f"--settings={settings}", *args)