feat!: remove paver docs, i18n, servers, and pylint

This is the first step of Paver removal.
These are the parts that we are confident are unused.

Part of: https://github.com/openedx/edx-platform/issues/34467
This commit is contained in:
Kyle D. McCormick
2024-05-16 19:14:47 -04:00
committed by Kyle McCormick
parent 5c3dd11c6a
commit c4a1f800e9
12 changed files with 1 additions and 1317 deletions

View File

@@ -3,4 +3,4 @@ paver commands
"""
from . import assets, docs, i18n, js_test, prereqs, quality, servers
from . import assets, js_test, prereqs, quality

View File

@@ -1,81 +0,0 @@
"""
Open edX Documentation Builder
Ties into Sphinx to generate files at the specified location(s)
"""
import sys
from paver.easy import cmdopts, needs, sh, task
from .utils.timer import timed
DOC_PATHS = {
"dev": "docs/en_us/developers",
"author": "docs/en_us/course_authors",
"data": "docs/en_us/data",
"default": "docs/en_us"
}
def valid_doc_types():
"""
Return a comma-separated string of valid doc types.
"""
return ", ".join(list(DOC_PATHS.keys()))
def doc_path(options, allow_default=True):
"""
Parse `options` (from the Paver task args) to determine the path
to the documentation directory.
If the specified path is not one of the valid options, print an error
message and exit.
If `allow_default` is False, then require that a type is specified,
and exit with an error message if it isn't.
"""
doc_type = getattr(options, 'type', 'default')
path = DOC_PATHS.get(doc_type)
if doc_type == 'default' and not allow_default:
print(
"You must specify a documentation type using '--type'. "
"Valid options are: {options}".format(
options=valid_doc_types()
)
)
sys.exit(1)
if path is None:
print(
"Invalid documentation type '{doc_type}'. "
"Valid options are: {options}".format(
doc_type=doc_type, options=valid_doc_types()
)
)
sys.exit(1)
else:
return path
@task
@needs('pavelib.prereqs.install_prereqs')
@cmdopts([
("type=", "t", "Type of docs to compile"),
("verbose", "v", "Display verbose output"),
])
@timed
def build_docs(options):
"""
Invoke sphinx 'make build' to generate docs.
"""
verbose = getattr(options, 'verbose', False)
cmd = "cd {dir}; make html quiet={quiet}".format(
dir=doc_path(options),
quiet="false" if verbose else "true"
)
sh(cmd)

View File

@@ -1,106 +0,0 @@
"""
Internationalization tasks
"""
import subprocess
import sys
from paver.easy import cmdopts, needs, sh, task
from .utils.envs import Env
from .utils.timer import timed
try:
from pygments.console import colorize
except ImportError:
colorize = lambda color, text: text
DEFAULT_SETTINGS = Env.DEVSTACK_SETTINGS
@task
@needs(
"pavelib.prereqs.install_prereqs",
"pavelib.i18n.i18n_validate_gettext",
)
@cmdopts([
("verbose", "v", "Sets 'verbose' to True"),
])
@timed
def i18n_extract(options):
"""
Extract localizable strings from sources
"""
verbose = getattr(options, "verbose", None)
cmd = "i18n_tool extract"
if verbose:
cmd += " -v"
sh(cmd)
@task
@needs("pavelib.i18n.i18n_extract")
@timed
def i18n_generate():
"""
Compile localizable strings from sources, extracting strings first.
"""
sh("i18n_tool generate")
@task
@needs("pavelib.i18n.i18n_extract")
@timed
def i18n_generate_strict():
"""
Compile localizable strings from sources, extracting strings first.
Complains if files are missing.
"""
sh("i18n_tool generate --strict")
@task
@needs("pavelib.i18n.i18n_extract")
@timed
def i18n_dummy():
"""
Simulate international translation by generating dummy strings
corresponding to source strings.
"""
sh("i18n_tool dummy")
# Need to then compile the new dummy strings
sh("i18n_tool generate")
@task
@timed
def i18n_validate_gettext():
"""
Make sure GNU gettext utilities are available
"""
returncode = subprocess.call(['which', 'xgettext'])
if returncode != 0:
msg = colorize(
'red',
"Cannot locate GNU gettext utilities, which are "
"required by django for internationalization.\n (see "
"https://docs.djangoproject.com/en/dev/topics/i18n/"
"translation/#message-files)\nTry downloading them from "
"http://www.gnu.org/software/gettext/ \n"
)
sys.stderr.write(msg)
sys.exit(1)
@task
@timed
def i18n_clean():
"""
Clean the i18n directory of artifacts
"""
sh('git clean -fdX conf/locale')

View File

@@ -1,127 +0,0 @@
"""
This test tests that i18n extraction (`paver i18n_extract -v`) works properly.
"""
import os
import random
import re
import string
import subprocess
import sys
from datetime import datetime, timedelta
from unittest import TestCase
from i18n import config, dummy, extract, generate
from polib import pofile
from pytz import UTC
class TestGenerate(TestCase):
"""
Tests functionality of i18n/generate.py
"""
generated_files = ('django-partial.po', 'djangojs-partial.po', 'mako.po')
@classmethod
def setUpClass(cls):
super().setUpClass()
sys.stderr.write(
"\nThis test tests that i18n extraction (`paver i18n_extract`) works properly. "
"If you experience failures, please check that all instances of `gettext` and "
"`ngettext` are used correctly. You can also try running `paver i18n_extract -v` "
"locally for more detail.\n"
)
sys.stderr.write(
"\nExtracting i18n strings and generating dummy translations; "
"this may take a few minutes\n"
)
sys.stderr.flush()
extract.main(verbose=0)
dummy.main(verbose=0)
@classmethod
def tearDownClass(cls):
# Clear the Esperanto & RTL directories of any test artifacts
cmd = "git checkout conf/locale/eo conf/locale/rtl"
sys.stderr.write("Cleaning up dummy language directories: " + cmd)
sys.stderr.flush()
returncode = subprocess.call(cmd, shell=True)
assert returncode == 0
super().tearDownClass()
def setUp(self):
super().setUp()
self.configuration = config.Configuration()
# Subtract 1 second to help comparisons with file-modify time succeed,
# since os.path.getmtime() is not millisecond-accurate
self.start_time = datetime.now(UTC) - timedelta(seconds=1)
def test_merge(self):
"""
Tests merge script on English source files.
"""
filename = os.path.join(self.configuration.source_messages_dir, random_name())
generate.merge(self.configuration, self.configuration.source_locale, target=filename)
assert os.path.exists(filename)
os.remove(filename)
def test_main(self):
"""
Runs generate.main() which should merge source files,
then compile all sources in all configured languages.
Validates output by checking all .mo files in all configured languages.
.mo files should exist, and be recently created (modified
after start of test suite)
"""
# Change dummy_locales to only contain Esperanto.
self.configuration.dummy_locales = ['eo']
# Clear previous files.
for locale in self.configuration.dummy_locales:
for filename in ('django', 'djangojs'):
mofile = filename + '.mo'
path = os.path.join(self.configuration.get_messages_dir(locale), mofile)
if os.path.exists(path):
os.remove(path)
# Regenerate files.
generate.main(verbosity=0, strict=False)
for locale in self.configuration.dummy_locales:
for filename in ('django', 'djangojs'):
mofile = filename + '.mo'
path = os.path.join(self.configuration.get_messages_dir(locale), mofile)
exists = os.path.exists(path)
assert exists, (f'Missing file in locale {locale}: {mofile}')
assert datetime.fromtimestamp(os.path.getmtime(path), UTC) >= \
self.start_time, ('File not recently modified: %s' % path)
# Segmenting means that the merge headers don't work they way they
# used to, so don't make this check for now. I'm not sure if we'll
# get the merge header back eventually, or delete this code eventually.
# self.assert_merge_headers(locale)
def assert_merge_headers(self, locale):
"""
This is invoked by test_main to ensure that it runs after
calling generate.main().
There should be exactly three merge comment headers
in our merged .po file. This counts them to be sure.
A merge comment looks like this:
# #-#-#-#-# django-partial.po (0.1a) #-#-#-#-#
"""
path = os.path.join(self.configuration.get_messages_dir(locale), 'django.po')
pof = pofile(path)
pattern = re.compile('^#-#-#-#-#', re.M)
match = pattern.findall(pof.header)
assert len(match) == 3, (f'Found {len(match)} (should be 3) merge comments in the header for {path}')
def random_name(size=6):
"""Returns random filename as string, like test-4BZ81W"""
chars = string.ascii_uppercase + string.digits
return 'test-' + ''.join(random.choice(chars) for x in range(size))

View File

@@ -1,35 +0,0 @@
"""
Tests for pavelib/i18n.py.
"""
import os
from unittest.mock import patch
from paver.easy import call_task
import pavelib.i18n
from pavelib.paver_tests.utils import PaverTestCase
class TestI18nDummy(PaverTestCase):
"""
Test the Paver i18n_dummy task.
"""
def setUp(self):
super().setUp()
# Mock the paver @needs decorator for i18n_extract
self._mock_paver_needs = patch.object(pavelib.i18n.i18n_extract, 'needs').start()
self._mock_paver_needs.return_value = 0
# Cleanup mocks
self.addCleanup(self._mock_paver_needs.stop)
def test_i18n_dummy(self):
"""
Test the "i18n_dummy" task.
"""
self.reset_task_messages()
os.environ['NO_PREREQ_INSTALL'] = "true"
call_task('pavelib.i18n.i18n_dummy')
assert self.task_messages == ['i18n_tool extract', 'i18n_tool dummy', 'i18n_tool generate']

View File

@@ -1,48 +0,0 @@
"""
Tests to ensure only the report files we want are returned as part of run_quality.
"""
import unittest
from unittest.mock import patch
import pavelib.quality
class TestGetReportFiles(unittest.TestCase):
"""
Ensure only the report files we want are returned as part of run_quality.
"""
@patch('os.walk')
def test_get_pylint_reports(self, my_mock):
my_mock.return_value = iter([
('/foo', (None,), ('pylint.report',)),
('/bar', ('/baz',), ('pylint.report',))
])
reports = pavelib.quality.get_violations_reports("pylint")
assert len(reports) == 2
@patch('os.walk')
def test_get_pep8_reports(self, my_mock):
my_mock.return_value = iter([
('/foo', (None,), ('pep8.report',)),
('/bar', ('/baz',), ('pep8.report',))
])
reports = pavelib.quality.get_violations_reports("pep8")
assert len(reports) == 2
@patch('os.walk')
def test_get_pep8_reports_noisy(self, my_mock):
""" Several conditions: different report types, different files, multiple files """
my_mock.return_value = iter([
('/foo', (None,), ('pep8.report',)),
('/fooz', ('/ball',), ('pylint.report',)),
('/fooz', ('/ball',), ('non.report',)),
('/fooz', ('/ball',), ('lms.xml',)),
('/bar', ('/baz',), ('pep8.report',))
])
reports = pavelib.quality.get_violations_reports("pep8")
assert len(reports) == 2

View File

@@ -32,31 +32,6 @@ class TestPaverQualityViolations(unittest.TestCase):
self.f.close()
self.addCleanup(os.remove, self.f.name)
def test_pylint_parser_other_string(self):
with open(self.f.name, 'w') as f:
f.write("hello")
num = pavelib.quality._count_pylint_violations(f.name) # pylint: disable=protected-access
assert num == 0
def test_pylint_parser_pep8(self):
# Pep8 violations should be ignored.
with open(self.f.name, 'w') as f:
f.write("foo/hello/test.py:304:15: E203 whitespace before ':'")
num = pavelib.quality._count_pylint_violations(f.name) # pylint: disable=protected-access
assert num == 0
@file_data('pylint_test_list.json')
def test_pylint_parser_count_violations(self, value):
"""
Tests:
- Different types of violations
- One violation covering multiple lines
"""
with open(self.f.name, 'w') as f:
f.write(value)
num = pavelib.quality._count_pylint_violations(f.name) # pylint: disable=protected-access
assert num == 1
def test_pep8_parser(self):
with open(self.f.name, 'w') as f:
f.write("hello\nhithere")

View File

@@ -1,120 +0,0 @@
"""
Tests covering the Open edX Paver prequisites installation workflow
"""
import os
import unittest
from unittest import mock
from unittest.mock import patch
import pytest
from paver.easy import BuildFailure
import pavelib.prereqs
from pavelib.paver_tests.utils import PaverTestCase, fail_on_npm_install, unexpected_fail_on_npm_install
class TestPaverPrereqInstall(unittest.TestCase):
"""
Test the status of the NO_PREREQ_INSTALL variable, its presence and how
paver handles it.
"""
def check_val(self, set_val, expected_val):
"""
Verify that setting the variable to a certain value returns
the expected boolean for it.
As environment variables are only stored as strings, we have to cast
whatever it's set at to a boolean that does not violate expectations.
"""
_orig_environ = dict(os.environ)
os.environ['NO_PREREQ_INSTALL'] = set_val
assert pavelib.prereqs.no_prereq_install() == expected_val,\
f'NO_PREREQ_INSTALL is set to {set_val}, but we read it as {expected_val}'
# Reset Environment back to original state
os.environ.clear()
os.environ.update(_orig_environ)
def test_no_prereq_install_true_lowercase(self):
"""
Ensure that 'true' will be True.
"""
self.check_val('true', True)
def test_no_prereq_install_false_lowercase(self):
"""
Ensure that 'false' will be False.
"""
self.check_val('false', False)
def test_no_prereq_install_true(self):
"""
Ensure that 'True' will be True.
"""
self.check_val('True', True)
def test_no_prereq_install_false(self):
"""
Ensure that 'False' will be False.
"""
self.check_val('False', False)
def test_no_prereq_install_0(self):
"""
Ensure that '0' will be False.
"""
self.check_val('0', False)
def test_no_prereq_install_1(self):
"""
Ensure that '1' will be True.
"""
self.check_val('1', True)
class TestPaverNodeInstall(PaverTestCase):
"""
Test node install logic
"""
def setUp(self):
super().setUp()
# Ensure prereqs will be run
os.environ['NO_PREREQ_INSTALL'] = 'false'
def test_npm_install_with_subprocess_error(self):
"""
Test an error in 'npm ci' execution
"""
with patch('subprocess.Popen') as _mock_popen:
_mock_subprocess = mock.Mock()
attrs = {'wait': fail_on_npm_install}
_mock_subprocess.configure_mock(**attrs)
_mock_popen.return_value = _mock_subprocess
with pytest.raises(Exception):
pavelib.prereqs.node_prereqs_installation()
# npm ci will be called twice
assert _mock_popen.call_count == 2
def test_npm_install_called_once_when_successful(self):
"""
Vanilla npm ci should only be calling npm ci one time
"""
with patch('subprocess.Popen') as _mock_popen:
pavelib.prereqs.node_prereqs_installation()
# when there's no failure, npm ci is only called once
assert _mock_popen.call_count == 1
def test_npm_install_with_unexpected_subprocess_error(self):
"""
If there's some other error, only call npm ci once, and raise a failure
"""
with patch('subprocess.Popen') as _mock_popen:
_mock_popen.side_effect = unexpected_fail_on_npm_install
with pytest.raises(BuildFailure):
pavelib.prereqs.node_prereqs_installation()
assert _mock_popen.call_count == 1

View File

@@ -1,307 +0,0 @@
"""Unit tests for the Paver server tasks."""
import json
import ddt
from paver.easy import call_task
from ..utils.envs import Env
from .utils import PaverTestCase
EXPECTED_SASS_COMMAND = (
"libsass {sass_directory}"
)
EXPECTED_COMMON_SASS_DIRECTORIES = [
"common/static/sass",
]
EXPECTED_LMS_SASS_DIRECTORIES = [
"lms/static/sass",
"lms/static/certificates/sass",
]
EXPECTED_CMS_SASS_DIRECTORIES = [
"cms/static/sass",
]
EXPECTED_LMS_SASS_COMMAND = [
"python manage.py lms --settings={asset_settings} compile_sass lms ",
]
EXPECTED_CMS_SASS_COMMAND = [
"python manage.py cms --settings={asset_settings} compile_sass cms ",
]
EXPECTED_COLLECT_STATIC_COMMAND = (
'python manage.py {system} --settings={asset_settings} collectstatic '
'--ignore "fixtures" --ignore "karma_*.js" --ignore "spec" '
'--ignore "spec_helpers" --ignore "spec-helpers" --ignore "xmodule_js" '
'--ignore "geoip" --ignore "sass" '
'--noinput {log_string}'
)
EXPECTED_CELERY_COMMAND = (
"DJANGO_SETTINGS_MODULE=lms.envs.{settings} celery worker "
"--app=lms.celery:APP --beat --loglevel=INFO --pythonpath=."
)
EXPECTED_RUN_SERVER_COMMAND = (
"python manage.py {system} --settings={settings} runserver --traceback --pythonpath=. 0.0.0.0:{port}"
)
EXPECTED_INDEX_COURSE_COMMAND = (
"python manage.py {system} --settings={settings} reindex_course --setup"
)
EXPECTED_PRINT_SETTINGS_COMMAND = [
"python manage.py lms --settings={settings} print_setting STATIC_ROOT WEBPACK_CONFIG_PATH 2>{log_file}",
"python manage.py cms --settings={settings} print_setting STATIC_ROOT 2>{log_file}",
"python manage.py cms --settings={settings} print_setting JS_ENV_EXTRA_CONFIG 2>{log_file} --json",
]
EXPECTED_WEBPACK_COMMAND = (
"NODE_ENV={node_env} STATIC_ROOT_LMS={static_root_lms} STATIC_ROOT_CMS={static_root_cms} "
"JS_ENV_EXTRA_CONFIG={js_env_extra_config} "
"$(npm bin)/webpack --config={webpack_config_path}"
)
@ddt.ddt
class TestPaverServerTasks(PaverTestCase):
"""
Test the Paver server tasks.
"""
@ddt.data(
[{}],
[{"settings": "aws"}],
[{"asset-settings": "test_static_optimized"}],
[{"settings": "devstack_optimized", "asset-settings": "test_static_optimized"}],
[{"fast": True}],
[{"port": 8030}],
)
@ddt.unpack
def test_lms(self, options):
"""
Test the "devstack" task.
"""
self.verify_server_task("lms", options)
@ddt.data(
[{}],
[{"settings": "aws"}],
[{"asset-settings": "test_static_optimized"}],
[{"settings": "devstack_optimized", "asset-settings": "test_static_optimized"}],
[{"fast": True}],
[{"port": 8031}],
)
@ddt.unpack
def test_studio(self, options):
"""
Test the "devstack" task.
"""
self.verify_server_task("studio", options)
@ddt.data(
[{}],
[{"settings": "aws"}],
[{"asset-settings": "test_static_optimized"}],
[{"settings": "devstack_optimized", "asset-settings": "test_static_optimized"}],
[{"fast": True}],
[{"optimized": True}],
[{"optimized": True, "fast": True}],
)
@ddt.unpack
def test_devstack(self, server_options):
"""
Test the "devstack" task.
"""
options = server_options.copy()
is_optimized = options.get("optimized", False)
expected_settings = "devstack_optimized" if is_optimized else options.get("settings", Env.DEVSTACK_SETTINGS)
# First test with LMS
options["system"] = "lms"
options["expected_messages"] = [
EXPECTED_INDEX_COURSE_COMMAND.format(
system="cms",
settings=expected_settings,
)
]
self.verify_server_task("devstack", options)
# Then test with Studio
options["system"] = "cms"
options["expected_messages"] = [
EXPECTED_INDEX_COURSE_COMMAND.format(
system="cms",
settings=expected_settings,
)
]
self.verify_server_task("devstack", options)
@ddt.data(
[{}],
[{"settings": "aws"}],
[{"asset_settings": "test_static_optimized"}],
[{"settings": "devstack_optimized", "asset-settings": "test_static_optimized"}],
[{"fast": True}],
[{"optimized": True}],
[{"optimized": True, "fast": True}],
)
@ddt.unpack
def test_run_all_servers(self, options):
"""
Test the "run_all_servers" task.
"""
self.verify_run_all_servers_task(options)
@ddt.data(
[{}],
[{"settings": "aws"}],
)
@ddt.unpack
def test_celery(self, options):
"""
Test the "celery" task.
"""
settings = options.get("settings", "devstack_with_worker")
call_task("pavelib.servers.celery", options=options)
assert self.task_messages == [EXPECTED_CELERY_COMMAND.format(settings=settings)]
@ddt.data(
["lms", {}],
["lms", {"settings": "aws"}],
["cms", {}],
["cms", {"settings": "aws"}],
)
@ddt.unpack
def test_check_settings(self, system, options):
"""
Test the "check_settings" task.
"""
settings = options.get("settings", Env.DEVSTACK_SETTINGS)
call_task("pavelib.servers.check_settings", args=[system, settings])
assert self.task_messages ==\
["echo 'import {system}.envs.{settings}' | python manage.py {system} "
"--settings={settings} shell --plain --pythonpath=.".format(system=system, settings=settings)]
def verify_server_task(self, task_name, options):
"""
Verify the output of a server task.
"""
log_string = options.get("log_string", "> /dev/null")
settings = options.get("settings", None)
asset_settings = options.get("asset-settings", None)
is_optimized = options.get("optimized", False)
is_fast = options.get("fast", False)
if task_name == "devstack":
system = options.get("system")
elif task_name == "studio":
system = "cms"
else:
system = "lms"
port = options.get("port", "8000" if system == "lms" else "8001")
self.reset_task_messages()
if task_name == "devstack":
args = ["studio" if system == "cms" else system]
if settings:
args.append(f"--settings={settings}")
if asset_settings:
args.append(f"--asset-settings={asset_settings}")
if is_optimized:
args.append("--optimized")
if is_fast:
args.append("--fast")
call_task("pavelib.servers.devstack", args=args)
else:
call_task(f"pavelib.servers.{task_name}", options=options)
expected_messages = options.get("expected_messages", [])
expected_settings = settings if settings else Env.DEVSTACK_SETTINGS
expected_asset_settings = asset_settings if asset_settings else expected_settings
if is_optimized:
expected_settings = "devstack_optimized"
expected_asset_settings = "test_static_optimized"
expected_collect_static = not is_fast and expected_settings != Env.DEVSTACK_SETTINGS
if not is_fast:
expected_messages.append("install npm_assets")
expected_messages.extend(
[c.format(settings=expected_asset_settings,
log_file=Env.PRINT_SETTINGS_LOG_FILE) for c in EXPECTED_PRINT_SETTINGS_COMMAND]
)
expected_messages.append(EXPECTED_WEBPACK_COMMAND.format(
node_env="production",
static_root_lms=None,
static_root_cms=None,
js_env_extra_config=json.dumps("{}"),
webpack_config_path=None
))
expected_messages.extend(self.expected_sass_commands(system=system, asset_settings=expected_asset_settings))
if expected_collect_static:
expected_messages.append(EXPECTED_COLLECT_STATIC_COMMAND.format(
system=system, asset_settings=expected_asset_settings, log_string=log_string
))
expected_run_server_command = EXPECTED_RUN_SERVER_COMMAND.format(
system=system,
settings=expected_settings,
port=port,
)
expected_messages.append(expected_run_server_command)
assert self.task_messages == expected_messages
def verify_run_all_servers_task(self, options):
"""
Verify the output of a server task.
"""
log_string = options.get("log_string", "> /dev/null")
settings = options.get("settings", None)
asset_settings = options.get("asset_settings", None)
is_optimized = options.get("optimized", False)
is_fast = options.get("fast", False)
self.reset_task_messages()
call_task("pavelib.servers.run_all_servers", options=options)
expected_settings = settings if settings else Env.DEVSTACK_SETTINGS
expected_asset_settings = asset_settings if asset_settings else expected_settings
if is_optimized:
expected_settings = "devstack_optimized"
expected_asset_settings = "test_static_optimized"
expected_collect_static = not is_fast and expected_settings != Env.DEVSTACK_SETTINGS
expected_messages = []
if not is_fast:
expected_messages.append("install npm_assets")
expected_messages.extend(
[c.format(settings=expected_asset_settings,
log_file=Env.PRINT_SETTINGS_LOG_FILE) for c in EXPECTED_PRINT_SETTINGS_COMMAND]
)
expected_messages.append(EXPECTED_WEBPACK_COMMAND.format(
node_env="production",
static_root_lms=None,
static_root_cms=None,
js_env_extra_config=json.dumps("{}"),
webpack_config_path=None
))
expected_messages.extend(self.expected_sass_commands(asset_settings=expected_asset_settings))
if expected_collect_static:
expected_messages.append(EXPECTED_COLLECT_STATIC_COMMAND.format(
system="lms", asset_settings=expected_asset_settings, log_string=log_string
))
expected_messages.append(EXPECTED_COLLECT_STATIC_COMMAND.format(
system="cms", asset_settings=expected_asset_settings, log_string=log_string
))
expected_messages.append(
EXPECTED_RUN_SERVER_COMMAND.format(
system="lms",
settings=expected_settings,
port=8000,
)
)
expected_messages.append(
EXPECTED_RUN_SERVER_COMMAND.format(
system="cms",
settings=expected_settings,
port=8001,
)
)
expected_messages.append(EXPECTED_CELERY_COMMAND.format(settings="devstack_with_worker"))
assert self.task_messages == expected_messages
def expected_sass_commands(self, system=None, asset_settings="test_static_optimized"):
"""
Returns the expected SASS commands for the specified system.
"""
expected_sass_commands = []
if system != 'cms':
expected_sass_commands.extend(EXPECTED_LMS_SASS_COMMAND)
if system != 'lms':
expected_sass_commands.extend(EXPECTED_CMS_SASS_COMMAND)
return [command.format(asset_settings=asset_settings) for command in expected_sass_commands]

View File

@@ -10,8 +10,6 @@ from xml.sax.saxutils import quoteattr
from paver.easy import BuildFailure, cmdopts, needs, sh, task
from openedx.core.djangolib.markup import HTML
from .utils.envs import Env
from .utils.timer import timed
@@ -79,143 +77,6 @@ def top_python_dirs(dirname):
return top_dirs
@task
@needs('pavelib.prereqs.install_python_prereqs')
@cmdopts([
("system=", "s", "System to act on"),
])
@timed
def find_fixme(options):
"""
Run pylint on system code, only looking for fixme items.
"""
num_fixme = 0
systems = getattr(options, 'system', ALL_SYSTEMS).split(',')
for system in systems:
# Directory to put the pylint report in.
# This makes the folder if it doesn't already exist.
report_dir = (Env.REPORT_DIR / system).makedirs_p()
apps_list = ' '.join(top_python_dirs(system))
cmd = (
"pylint --disable all --enable=fixme "
"--output-format=parseable {apps} "
"> {report_dir}/pylint_fixme.report".format(
apps=apps_list,
report_dir=report_dir
)
)
sh(cmd, ignore_error=True)
num_fixme += _count_pylint_violations(
f"{report_dir}/pylint_fixme.report")
print("Number of pylint fixmes: " + str(num_fixme))
def _get_pylint_violations(systems=ALL_SYSTEMS.split(','), errors_only=False, clean=True):
"""
Runs pylint. Returns a tuple of (number_of_violations, list_of_violations)
where list_of_violations is a list of all pylint violations found, separated
by new lines.
"""
# Make sure the metrics subdirectory exists
Env.METRICS_DIR.makedirs_p()
num_violations = 0
violations_list = []
for system in systems:
# Directory to put the pylint report in.
# This makes the folder if it doesn't already exist.
report_dir = (Env.REPORT_DIR / system).makedirs_p()
flags = []
if errors_only:
flags.append("--errors-only")
apps_list = ' '.join(top_python_dirs(system))
system_report = report_dir / 'pylint.report'
if clean or not system_report.exists():
sh(
"export DJANGO_SETTINGS_MODULE={env}.envs.test; "
"pylint {flags} --output-format=parseable {apps} "
"> {report_dir}/pylint.report".format(
flags=" ".join(flags),
apps=apps_list,
report_dir=report_dir,
env=('cms' if system == 'cms' else 'lms')
),
ignore_error=True,
)
num_violations += _count_pylint_violations(system_report)
with open(system_report) as report_contents:
violations_list.extend(report_contents)
# Print number of violations to log
return num_violations, violations_list
@task
@needs('pavelib.prereqs.install_python_prereqs')
@cmdopts([
("system=", "s", "System to act on"),
("errors", "e", "Check for errors only"),
])
@timed
def run_pylint(options):
"""
Run pylint on system code. When violations limit is passed in,
fail the task if too many violations are found.
"""
errors = getattr(options, 'errors', False)
systems = getattr(options, 'system', ALL_SYSTEMS).split(',')
result_name = 'pylint_{}'.format('_'.join(systems))
num_violations, violations_list = _get_pylint_violations(systems, errors)
# Print number of violations to log
violations_count_str = "Number of pylint violations: " + str(num_violations)
print(violations_count_str)
# Also write the number of violations to a file
with open(Env.METRICS_DIR / "pylint", "w") as f:
f.write(violations_count_str)
# Fail if there are violations found in pylint report.
if num_violations > 0:
failure_message = "FAILURE: Pylint violations found.\n"
for violation in violations_list:
failure_message += violation # lint-amnesty, pylint: disable=consider-using-join
fail_quality(result_name, failure_message)
else:
write_junit_xml(result_name)
def _count_pylint_violations(report_file):
"""
Parses a pylint report line-by-line and determines the number of violations reported
"""
num_violations_report = 0
# An example string:
# xmodule/xmodule/tests/test_conditional.py:21: [C0111(missing-docstring), DummySystem] Missing docstring
# More examples can be found in the unit tests for this method
pylint_pattern = re.compile(r".(\d+):\ \[(\D\d+.+\]).")
for line in open(report_file):
violation_list_for_line = pylint_pattern.split(line)
# If the string is parsed into four parts, then we've found a violation. Example of split parts:
# test file, line number, violation name, violation details
if len(violation_list_for_line) == 4:
num_violations_report += 1
return num_violations_report
def _get_pep8_violations(clean=True):
"""
Runs pycodestyle. Returns a tuple of (number_of_violations, violations_string)
@@ -248,7 +109,6 @@ def _pep8_violations(report_file):
@task
@needs('pavelib.prereqs.install_python_prereqs')
@cmdopts([
("system=", "s", "System to act on"),
])
@@ -740,70 +600,3 @@ def check_keywords():
report_path
)
)
@task
@needs('pavelib.prereqs.install_python_prereqs')
@timed
# pylint: disable=too-many-statements
def run_quality():
"""
Build the html quality reports, and print the reports to the console.
"""
failure_reasons = []
def _lint_output(linter, count, violations_list, is_html=False):
"""
Given a count & list of pylint violations, pretty-print the output.
If `is_html`, will print out with HTML markup.
"""
if is_html:
lines = ['<body>\n']
sep = '-------------<br/>\n'
title = HTML("<h1>Quality Report: {}</h1>\n").format(linter)
violations_bullets = ''.join(
[HTML('<li>{violation}</li><br/>\n').format(violation=violation) for violation in violations_list]
)
violations_str = HTML('<ul>\n{bullets}</ul>\n').format(bullets=HTML(violations_bullets))
violations_count_str = HTML("<b>Violations</b>: {count}<br/>\n")
fail_line = HTML("<b>FAILURE</b>: {} count should be 0<br/>\n").format(linter)
else:
lines = []
sep = '-------------\n'
title = f"Quality Report: {linter}\n"
violations_str = ''.join(violations_list)
violations_count_str = "Violations: {count}\n"
fail_line = f"FAILURE: {linter} count should be 0\n"
violations_count_str = violations_count_str.format(count=count)
lines.extend([sep, title, sep, violations_str, sep, violations_count_str])
if count > 0:
lines.append(fail_line)
lines.append(sep + '\n')
if is_html:
lines.append('</body>')
return ''.join(lines)
(count, violations_list) = _get_pylint_violations(clean=False)
# Print total number of violations to log
print(_lint_output('pylint', count, violations_list))
if count > 0:
failure_reasons.append('Too many total pylint violations.')
msg = "FAILURE: " + " ".join(failure_reasons)
fail_quality('Quality Failure', msg)
def get_violations_reports(violations_type):
"""
Finds violations reports files by naming convention (e.g., all "pep8.report" files)
"""
violations_files = []
for subdir, _dirs, files in os.walk(os.path.join(Env.REPORT_DIR)):
for f in files:
if f == f"{violations_type}.report":
violations_files.append(os.path.join(subdir, f))
return violations_files

View File

@@ -1,258 +0,0 @@
"""
Run and manage servers for local development.
"""
import argparse
import sys
from paver.easy import call_task, cmdopts, consume_args, needs, sh, task
from .assets import collect_assets
from .utils.cmd import cmd, django_cmd
from .utils.envs import Env
from .utils.process import run_multi_processes, run_process
from .utils.timer import timed
DEFAULT_PORT = {"lms": 8000, "studio": 8001}
DEFAULT_SETTINGS = Env.DEVSTACK_SETTINGS
OPTIMIZED_SETTINGS = "devstack_optimized"
OPTIMIZED_ASSETS_SETTINGS = "test_static_optimized"
ASSET_SETTINGS_HELP = (
"Settings file used for updating assets. Defaults to the value of the settings variable if not provided."
)
def run_server(
system, fast=False, settings=None, asset_settings=None, port=None
):
"""Start the server for LMS or Studio.
Args:
system (str): The system to be run (lms or studio).
fast (bool): If true, then start the server immediately without updating assets (defaults to False).
settings (str): The Django settings module to use; if not provided, use the default.
asset_settings (str) The settings to use when generating assets. If not provided, assets are not generated.
port (str): The port number to run the server on. If not provided, uses the default port for the system.
"""
if system not in ['lms', 'studio']:
print("System must be either lms or studio", file=sys.stderr)
exit(1) # lint-amnesty, pylint: disable=consider-using-sys-exit
if not settings:
settings = DEFAULT_SETTINGS
if not fast and asset_settings:
args = [system, f'--settings={asset_settings}', '--watch']
# The default settings use DEBUG mode for running the server which means that
# the optimized assets are ignored, so we skip collectstatic in that case
# to save time.
if settings == DEFAULT_SETTINGS:
args.append('--skip-collect')
call_task('pavelib.assets.update_assets', args=args)
if port is None:
port = DEFAULT_PORT[system]
args = [settings, 'runserver', '--traceback', '--pythonpath=.', f'0.0.0.0:{port}']
run_process(django_cmd(system, *args))
@task
@needs('pavelib.prereqs.install_prereqs')
@cmdopts([
("settings=", "s", "Django settings"),
("asset-settings=", "a", ASSET_SETTINGS_HELP),
("port=", "p", "Port"),
("fast", "f", "Skip updating assets"),
])
def lms(options):
"""
Run the LMS server.
"""
settings = getattr(options, 'settings', DEFAULT_SETTINGS)
asset_settings = getattr(options, 'asset-settings', settings)
port = getattr(options, 'port', None)
fast = getattr(options, 'fast', False)
run_server(
'lms',
fast=fast,
settings=settings,
asset_settings=asset_settings,
port=port,
)
@task
@needs('pavelib.prereqs.install_prereqs')
@cmdopts([
("settings=", "s", "Django settings"),
("asset-settings=", "a", ASSET_SETTINGS_HELP),
("port=", "p", "Port"),
("fast", "f", "Skip updating assets"),
])
def studio(options):
"""
Run the Studio server.
"""
settings = getattr(options, 'settings', DEFAULT_SETTINGS)
asset_settings = getattr(options, 'asset-settings', settings)
port = getattr(options, 'port', None)
fast = getattr(options, 'fast', False)
run_server(
'studio',
fast=fast,
settings=settings,
asset_settings=asset_settings,
port=port,
)
@task
@needs('pavelib.prereqs.install_prereqs')
@consume_args
def devstack(args):
"""
Start the devstack lms or studio server
"""
parser = argparse.ArgumentParser(prog='paver devstack')
parser.add_argument('system', type=str, nargs=1, help="lms or studio")
parser.add_argument('--fast', action='store_true', default=False, help="Skip updating assets")
parser.add_argument('--optimized', action='store_true', default=False, help="Run with optimized assets")
parser.add_argument('--settings', type=str, default=DEFAULT_SETTINGS, help="Settings file")
parser.add_argument('--asset-settings', type=str, default=None, help=ASSET_SETTINGS_HELP)
args = parser.parse_args(args)
settings = args.settings
asset_settings = args.asset_settings if args.asset_settings else settings
if args.optimized:
settings = OPTIMIZED_SETTINGS
asset_settings = OPTIMIZED_ASSETS_SETTINGS
sh(django_cmd('cms', settings, 'reindex_course', '--setup'))
run_server(
args.system[0],
fast=args.fast,
settings=settings,
asset_settings=asset_settings,
)
@task
@needs('pavelib.prereqs.install_prereqs')
@cmdopts([
("settings=", "s", "Django settings"),
])
def celery(options):
"""
Runs Celery workers.
"""
settings = getattr(options, 'settings', 'devstack_with_worker')
run_process(cmd(f'DJANGO_SETTINGS_MODULE=lms.envs.{settings}',
'celery', 'worker', '--app=lms.celery:APP',
'--beat', '--loglevel=INFO', '--pythonpath=.'))
@task
@needs('pavelib.prereqs.install_prereqs')
@cmdopts([
("settings=", "s", "Django settings for both LMS and Studio"),
("asset-settings=", "a", "Django settings for updating assets for both LMS and Studio (defaults to settings)"),
("worker-settings=", "w", "Celery worker Django settings"),
("fast", "f", "Skip updating assets"),
("optimized", "o", "Run with optimized assets"),
("settings-lms=", "l", "Set LMS only, overriding the value from --settings (if provided)"),
("asset-settings-lms=", None, "Set LMS only, overriding the value from --asset-settings (if provided)"),
("settings-cms=", "c", "Set Studio only, overriding the value from --settings (if provided)"),
("asset-settings-cms=", None, "Set Studio only, overriding the value from --asset-settings (if provided)"),
("asset_settings=", None, "deprecated in favor of asset-settings"),
("asset_settings_cms=", None, "deprecated in favor of asset-settings-cms"),
("asset_settings_lms=", None, "deprecated in favor of asset-settings-lms"),
("settings_cms=", None, "deprecated in favor of settings-cms"),
("settings_lms=", None, "deprecated in favor of settings-lms"),
("worker_settings=", None, "deprecated in favor of worker-settings"),
])
def run_all_servers(options):
"""
Runs Celery workers, Studio, and LMS.
"""
settings = getattr(options, 'settings', DEFAULT_SETTINGS)
asset_settings = getattr(options, 'asset_settings', settings)
worker_settings = getattr(options, 'worker_settings', 'devstack_with_worker')
fast = getattr(options, 'fast', False)
optimized = getattr(options, 'optimized', False)
if optimized:
settings = OPTIMIZED_SETTINGS
asset_settings = OPTIMIZED_ASSETS_SETTINGS
settings_lms = getattr(options, 'settings_lms', settings)
settings_cms = getattr(options, 'settings_cms', settings)
asset_settings_lms = getattr(options, 'asset_settings_lms', asset_settings)
asset_settings_cms = getattr(options, 'asset_settings_cms', asset_settings)
if not fast:
# First update assets for both LMS and Studio but don't collect static yet
args = [
'lms', 'studio',
f'--settings={asset_settings}',
'--skip-collect'
]
call_task('pavelib.assets.update_assets', args=args)
# Now collect static for each system separately with the appropriate settings.
# Note that the default settings use DEBUG mode for running the server which
# means that the optimized assets are ignored, so we skip collectstatic in that
# case to save time.
if settings != DEFAULT_SETTINGS:
collect_assets(['lms'], asset_settings_lms)
collect_assets(['studio'], asset_settings_cms)
# Install an asset watcher to regenerate files that change
call_task('pavelib.assets.watch_assets', options={'background': True})
# Start up LMS, CMS and Celery
lms_port = DEFAULT_PORT['lms']
cms_port = DEFAULT_PORT['studio']
lms_runserver_args = [f"0.0.0.0:{lms_port}"]
cms_runserver_args = [f"0.0.0.0:{cms_port}"]
run_multi_processes([
django_cmd(
'lms', settings_lms, 'runserver', '--traceback', '--pythonpath=.', *lms_runserver_args
),
django_cmd(
'studio', settings_cms, 'runserver', '--traceback', '--pythonpath=.', *cms_runserver_args
),
cmd(
f'DJANGO_SETTINGS_MODULE=lms.envs.{worker_settings}',
'celery', 'worker', '--app=lms.celery:APP',
'--beat', '--loglevel=INFO', '--pythonpath=.'
)
])
@task
@needs('pavelib.prereqs.install_prereqs')
@consume_args
@timed
def check_settings(args):
"""
Checks settings files.
"""
parser = argparse.ArgumentParser(prog='paver check_settings')
parser.add_argument('system', type=str, nargs=1, help="lms or studio")
parser.add_argument('settings', type=str, nargs=1, help='Django settings')
args = parser.parse_args(args)
system = args.system[0]
settings = args.settings[0]
try:
import_cmd = f"echo 'import {system}.envs.{settings}'"
django_shell_cmd = django_cmd(system, settings, 'shell', '--plain', '--pythonpath=.')
sh(f"{import_cmd} | {django_shell_cmd}")
except: # pylint: disable=bare-except
print("Failed to import settings", file=sys.stderr)

View File

@@ -77,8 +77,6 @@ case "$TEST_SUITE" in
mkdir -p reports
echo "Finding fixme's and storing report..."
run_paver_quality find_fixme || { EXIT=1; }
echo "Finding pycodestyle violations and storing report..."
run_paver_quality run_pep8 || { EXIT=1; }
echo "Finding ESLint violations and storing report..."