refactor: remove all paver code related to python unit tests (#31180)

This commit is contained in:
Zubair Shakoor
2023-01-18 17:41:56 +05:00
committed by GitHub
parent 8ac55ed4af
commit 141d6d4ce6
13 changed files with 54 additions and 1205 deletions

View File

@@ -220,8 +220,7 @@ The report is then saved in reports/xmodule/cover/index.html
To run tests for stub servers, for example for `YouTube stub server`_, you can
run one of these commands::
paver test_system -s cms -t common/djangoapps/terrain/stubs/tests/test_youtube_stub.py
pytest common/djangoapps/terrain/stubs/tests/test_youtube_stub.py
pytest --ds=cms.env.test common/djangoapps/terrain/stubs/tests/test_youtube_stub.py
.. _YouTube stub server: https://github.com/openedx/edx-platform/blob/master/common/djangoapps/terrain/stubs/tests/test_youtube_stub.py

View File

@@ -3,4 +3,4 @@ paver commands
"""
from . import assets, docs, i18n, js_test, prereqs, quality, servers, tests
from . import assets, docs, i18n, js_test, prereqs, quality, servers

View File

@@ -3,6 +3,8 @@ Javascript test tasks
"""
import os
import re
import sys
from paver.easy import cmdopts, needs, task
@@ -10,6 +12,12 @@ from paver.easy import cmdopts, needs, task
from pavelib.utils.envs import Env
from pavelib.utils.test.suites import JestSnapshotTestSuite, JsTestSuite
from pavelib.utils.timer import timed
from paver.easy import cmdopts, needs, sh, task
try:
from pygments.console import colorize
except ImportError:
colorize = lambda color, text: text
__test__ = False # do not collect
@@ -91,3 +99,47 @@ def test_js_dev(options):
"""
options.mode = 'dev'
test_js(options)
@task
@needs('pavelib.prereqs.install_coverage_prereqs')
@cmdopts([
("compare-branch=", "b", "Branch to compare against, defaults to origin/master"),
], share_with=['coverage'])
@timed
def diff_coverage(options):
"""
Build the diff coverage reports
"""
compare_branch = options.get('compare_branch', 'origin/master')
# Find all coverage XML files (both Python and JavaScript)
xml_reports = []
for filepath in Env.REPORT_DIR.walk():
if bool(re.match(r'^coverage.*\.xml$', filepath.basename())):
xml_reports.append(filepath)
if not xml_reports:
err_msg = colorize(
'red',
"No coverage info found. Run `paver test` before running "
"`paver coverage`.\n"
)
sys.stderr.write(err_msg)
else:
xml_report_str = ' '.join(xml_reports)
diff_html_path = os.path.join(Env.REPORT_DIR, 'diff_coverage_combined.html')
# Generate the diff coverage reports (HTML and console)
# The --diff-range-notation parameter is a workaround for https://github.com/Bachmann1234/diff_cover/issues/153
sh(
"diff-cover {xml_report_str} --diff-range-notation '..' --compare-branch={compare_branch} "
"--html-report {diff_html_path}".format(
xml_report_str=xml_report_str,
compare_branch=compare_branch,
diff_html_path=diff_html_path,
)
)
print("\n")

View File

@@ -1,389 +0,0 @@
"""
Unit test tasks
"""
import os
import re
import sys
from optparse import make_option # pylint: disable=deprecated-module
from paver.easy import cmdopts, needs, sh, task, call_task
from pavelib.utils.envs import Env
from pavelib.utils.passthrough_opts import PassthroughTask
from pavelib.utils.test import suites
from pavelib.utils.timer import timed
try:
from pygments.console import colorize
except ImportError:
colorize = lambda color, text: text
__test__ = False # do not collect
@needs(
'pavelib.prereqs.install_prereqs',
'pavelib.utils.test.utils.clean_reports_dir',
)
@cmdopts([
("system=", "s", "System to act on"),
("test-id=", "t", "Test id"),
("fail-fast", "x", "Fail suite on first failed test"),
("fasttest", "a", "Run without collectstatic"),
make_option(
"--eval-attr", dest="eval_attr",
help="Only run tests matching given attribute expression."
),
make_option(
'-c', '--cov-args', default='',
help='adds as args to coverage for the test run'
),
('skip-clean', 'C', 'skip cleaning repository before running tests'),
make_option('-p', '--processes', dest='processes', default=0, help='number of processes to use running tests'),
make_option('-r', '--randomize', action='store_true', help='run the tests in a random order'),
make_option('--no-randomize', action='store_false', dest='randomize', help="don't run the tests in a random order"),
make_option("--verbose", action="store_const", const=2, dest="verbosity"),
make_option("-q", "--quiet", action="store_const", const=0, dest="verbosity"),
make_option("-v", "--verbosity", action="count", dest="verbosity", default=1),
make_option(
"--disable_capture", action="store_true", dest="disable_capture",
help="Disable capturing of stdout/stderr"
),
make_option(
"--disable-coverage", action="store_false", dest="with_coverage",
help="Run the unit tests directly through pytest, NOT coverage"
),
make_option(
'--disable-migrations',
action='store_true',
dest='disable_migrations',
help="Create tables directly from apps' models. Can also be used by exporting DISABLE_MIGRATIONS=1."
),
make_option(
'--enable-migrations',
action='store_false',
dest='disable_migrations',
help="Create tables by applying migrations."
),
make_option(
'--disable_courseenrollment_history',
action='store_true',
dest='disable_courseenrollment_history',
help="Disable history on student.CourseEnrollent. Can also be used by exporting"
"DISABLE_COURSEENROLLMENT_HISTORY=1."
),
make_option(
'--enable_courseenrollment_history',
action='store_false',
dest='disable_courseenrollment_history',
help="Enable django-simple-history on student.CourseEnrollment."
),
make_option(
'--xdist_ip_addresses',
dest='xdist_ip_addresses',
help="Comma separated string of ip addresses to shard tests to via xdist."
),
make_option(
'--with-wtw',
dest='with_wtw',
action='store',
help="Only run tests based on the lines changed relative to the specified branch"
),
], share_with=[
'pavelib.utils.test.utils.clean_reports_dir',
])
@PassthroughTask
@timed
def test_system(options, passthrough_options):
"""
Run tests on our djangoapps for lms and cms
"""
system = getattr(options, 'system', None)
test_id = getattr(options, 'test_id', None)
assert system in (None, 'lms', 'cms')
if hasattr(options.test_system, 'with_wtw'):
call_task('fetch_coverage_test_selection_data', options={
'compare_branch': options.test_system.with_wtw
})
if test_id:
# Testing a single test ID.
# Ensure the proper system for the test id.
if not system:
system = test_id.split('/')[0]
if system in ['common', 'openedx']:
system = 'lms'
system_tests = [suites.SystemTestSuite(
system,
passthrough_options=passthrough_options,
**options.test_system
)]
else:
# Testing a single system -or- both systems.
if system:
systems = [system]
else:
# No specified system or test_id, so run all tests of both systems.
systems = ['cms', 'lms']
system_tests = []
for syst in systems:
system_tests.append(suites.SystemTestSuite(
syst,
passthrough_options=passthrough_options,
**options.test_system
))
test_suite = suites.PythonTestSuite(
'python tests',
subsuites=system_tests,
passthrough_options=passthrough_options,
**options.test_system
)
test_suite.run()
@needs(
'pavelib.prereqs.install_prereqs',
'pavelib.utils.test.utils.clean_reports_dir',
)
@cmdopts([
("lib=", "l", "lib to test"),
("test-id=", "t", "Test id"),
("failed", "f", "Run only failed tests"),
("fail-fast", "x", "Run only failed tests"),
make_option(
"--eval-attr", dest="eval_attr",
help="Only run tests matching given attribute expression."
),
make_option(
'-c', '--cov-args', default='',
help='adds as args to coverage for the test run'
),
('skip-clean', 'C', 'skip cleaning repository before running tests'),
make_option("--verbose", action="store_const", const=2, dest="verbosity"),
make_option("-q", "--quiet", action="store_const", const=0, dest="verbosity"),
make_option("-v", "--verbosity", action="count", dest="verbosity", default=1),
make_option(
"--disable_capture", action="store_true", dest="disable_capture",
help="Disable capturing of stdout/stderr"
),
make_option(
"--disable-coverage", action="store_false", dest="with_coverage",
help="Run the unit tests directly through pytest, NOT coverage"
),
make_option(
'--xdist_ip_addresses',
dest='xdist_ip_addresses',
help="Comma separated string of ip addresses to shard tests to via xdist."
),
make_option('-p', '--processes', dest='processes', default=0, help='number of processes to use running tests'),
make_option('-r', '--randomize', action='store_true', help='run the tests in a random order'),
], share_with=['pavelib.utils.test.utils.clean_reports_dir'])
@PassthroughTask
@timed
def test_lib(options, passthrough_options):
"""
Run tests for pavelib/ (paver-tests)
"""
lib = getattr(options, 'lib', None)
test_id = getattr(options, 'test_id', lib)
if test_id:
# Testing a single test id.
if '/' in test_id:
lib = '/'.join(test_id.split('/')[0:3])
else:
lib = 'pavelib/paver_tests' + test_id.split('.')[0]
options.test_lib['test_id'] = test_id
lib_tests = [suites.LibTestSuite(
lib,
passthrough_options=passthrough_options,
**options.test_lib
)]
else:
# Testing all tests within pavelib/paver_tests dir.
lib_tests = [
suites.LibTestSuite(
d,
passthrough_options=passthrough_options,
append_coverage=(i != 0),
**options.test_lib
) for i, d in enumerate(Env.LIB_TEST_DIRS)
]
test_suite = suites.PythonTestSuite(
'python tests',
subsuites=lib_tests,
passthrough_options=passthrough_options,
**options.test_lib
)
test_suite.run()
@needs(
'pavelib.prereqs.install_prereqs',
'pavelib.utils.test.utils.clean_reports_dir',
)
@cmdopts([
("failed", "f", "Run only failed tests"),
("fail-fast", "x", "Run only failed tests"),
make_option(
'-c', '--cov-args', default='',
help='adds as args to coverage for the test run'
),
make_option("--verbose", action="store_const", const=2, dest="verbosity"),
make_option("-q", "--quiet", action="store_const", const=0, dest="verbosity"),
make_option("-v", "--verbosity", action="count", dest="verbosity", default=1),
make_option(
'--disable-migrations',
action='store_true',
dest='disable_migrations',
help="Create tables directly from apps' models. Can also be used by exporting DISABLE_MIGRATIONS=1."
),
make_option(
'--disable_courseenrollment_history',
action='store_true',
dest='disable_courseenrollment_history',
help="Disable history on student.CourseEnrollent. Can also be used by exporting"
"DISABLE_COURSEENROLLMENT_HISTORY=1."
),
make_option(
'--enable_courseenrollment_history',
action='store_false',
dest='disable_courseenrollment_history',
help="Enable django-simple-history on student.CourseEnrollment."
),
])
@PassthroughTask
@timed
def test_python(options, passthrough_options):
"""
Run all python tests
"""
python_suite = suites.PythonTestSuite(
'Python Tests',
passthrough_options=passthrough_options,
**options.test_python
)
python_suite.run()
@needs(
'pavelib.prereqs.install_prereqs',
'pavelib.utils.test.utils.clean_reports_dir',
)
@cmdopts([
("suites", "s", "List of unit test suites to run. (js, lib, cms, lms)"),
make_option(
'-c', '--cov-args', default='',
help='adds as args to coverage for the test run'
),
make_option("--verbose", action="store_const", const=2, dest="verbosity"),
make_option("-q", "--quiet", action="store_const", const=0, dest="verbosity"),
make_option("-v", "--verbosity", action="count", dest="verbosity", default=1),
])
@PassthroughTask
@timed
def test(options, passthrough_options):
"""
Run all tests
"""
# Subsuites to be added to the main suite
python_suite = suites.PythonTestSuite(
'Python Tests',
passthrough_options=passthrough_options,
**options.test
)
js_suite = suites.JsTestSuite('JS Tests', mode='run', with_coverage=True)
# Main suite to be run
all_unittests_suite = suites.TestSuite('All Tests', subsuites=[js_suite, python_suite])
all_unittests_suite.run()
@task
@needs('pavelib.prereqs.install_coverage_prereqs')
@cmdopts([
("compare-branch=", "b", "Branch to compare against, defaults to origin/master"),
("rcfile=", "c", "Coveragerc file to use, defaults to .coveragerc"),
])
@timed
def coverage(options):
"""
Build the html, xml, and diff coverage reports
"""
report_dir = Env.REPORT_DIR
rcfile = getattr(options.coverage, 'rcfile', Env.PYTHON_COVERAGERC)
combined_report_file = report_dir / '{}.coverage'.format(os.environ.get('TEST_SUITE', ''))
if not combined_report_file.isfile():
# This may be that the coverage files were generated using -p,
# try to combine them to the one file that we need.
sh(f"coverage combine --rcfile={rcfile}")
if not os.path.getsize(combined_report_file) > 50:
# Check if the .coverage data file is larger than the base file,
# because coverage combine will always at least make the "empty" data
# file even when there isn't any data to be combined.
err_msg = colorize(
'red',
"No coverage info found. Run `paver test` before running "
"`paver coverage`.\n"
)
sys.stderr.write(err_msg)
return
# Generate the coverage.py XML report
sh(f"coverage xml --rcfile={rcfile}")
# Generate the coverage.py HTML report
sh(f"coverage html --rcfile={rcfile}")
diff_coverage() # pylint: disable=no-value-for-parameter
@task
@needs('pavelib.prereqs.install_coverage_prereqs')
@cmdopts([
("compare-branch=", "b", "Branch to compare against, defaults to origin/master"),
], share_with=['coverage'])
@timed
def diff_coverage(options):
"""
Build the diff coverage reports
"""
compare_branch = options.get('compare_branch', 'origin/master')
# Find all coverage XML files (both Python and JavaScript)
xml_reports = []
for filepath in Env.REPORT_DIR.walk():
if bool(re.match(r'^coverage.*\.xml$', filepath.basename())):
xml_reports.append(filepath)
if not xml_reports:
err_msg = colorize(
'red',
"No coverage info found. Run `paver test` before running "
"`paver coverage`.\n"
)
sys.stderr.write(err_msg)
else:
xml_report_str = ' '.join(xml_reports)
diff_html_path = os.path.join(Env.REPORT_DIR, 'diff_coverage_combined.html')
# Generate the diff coverage reports (HTML and console)
# The --diff-range-notation parameter is a workaround for https://github.com/Bachmann1234/diff_cover/issues/153
sh(
"diff-cover {xml_report_str} --diff-range-notation '..' --compare-branch={compare_branch} "
"--html-report {diff_html_path}".format(
xml_report_str=xml_report_str,
compare_branch=compare_branch,
diff_html_path=diff_html_path,
)
)
print("\n")

View File

@@ -1,146 +0,0 @@
"""
Provides:
PassthroughOptionParser:
A subclass of :class:`optparse.OptionParser` that captures unknown options
into its ``passthrough_options`` attribute.
PassthroughTask:
A subclass of :class:`paver.tasks.Task` that supplies unknown options
as the `passthrough_options` argument to the decorated function
"""
from optparse import BadOptionError, OptionParser # pylint: disable=deprecated-module
from unittest.mock import patch
import paver.tasks
class PassthroughOptionParser(OptionParser):
"""
An :class:`optparse.OptionParser` which captures any unknown options into
the ``passthrough_options`` attribute. Handles both "--long-options" and
"-s" short options.
"""
def __init__(self, *args, **kwargs):
self.passthrough_options = []
# N.B. OptionParser is an old-style class, which is why
# this isn't using super()
OptionParser.__init__(self, *args, **kwargs)
def _process_long_opt(self, rargs, values):
# This is a copy of the OptionParser._process_long_opt method,
# modified to capture arguments that aren't understood
arg = rargs.pop(0)
# Value explicitly attached to arg? Pretend it's the next
# argument.
if "=" in arg:
(opt, next_arg) = arg.split("=", 1)
rargs.insert(0, next_arg)
had_explicit_value = True
else:
opt = arg
had_explicit_value = False
try:
opt = self._match_long_opt(opt)
except BadOptionError:
self.passthrough_options.append(arg)
if had_explicit_value:
rargs.pop(0)
return
option = self._long_opt[opt]
if option.takes_value():
nargs = option.nargs
if len(rargs) < nargs:
if nargs == 1:
self.error("%s option requires an argument" % opt)
else:
self.error("%s option requires %d arguments"
% (opt, nargs))
elif nargs == 1:
value = rargs.pop(0)
else:
value = tuple(rargs[0:nargs])
del rargs[0:nargs]
elif had_explicit_value:
self.error("%s option does not take a value" % opt)
else:
value = None
option.process(opt, value, values, self)
def _process_short_opts(self, rargs, values):
arg = rargs.pop(0)
stop = False
i = 1
passthrough_opts = []
for char in arg[1:]:
opt = "-" + char
option = self._short_opt.get(opt)
i += 1 # we have consumed a character
if not option:
passthrough_opts.append(char)
continue
if option.takes_value():
# Any characters left in arg? Pretend they're the
# next arg, and stop consuming characters of arg.
if i < len(arg):
rargs.insert(0, arg[i:])
stop = True
nargs = option.nargs
if len(rargs) < nargs:
if nargs == 1:
self.error("%s option requires an argument" % opt)
else:
self.error("%s option requires %d arguments"
% (opt, nargs))
elif nargs == 1:
value = rargs.pop(0)
else:
value = tuple(rargs[0:nargs])
del rargs[0:nargs]
else: # option doesn't take a value
value = None
option.process(opt, value, values, self)
if stop:
break
if passthrough_opts:
self.passthrough_options.append('-{}'.format("".join(passthrough_opts)))
class PassthroughTask(paver.tasks.Task):
"""
A :class:`paver.tasks.Task` subclass that supplies any options that it doesn't
understand to the task function as the ``passthrough_options`` argument.
"""
@property
def parser(self):
with patch.object(paver.tasks.optparse, 'OptionParser', PassthroughOptionParser):
return super().parser
def __call__(self, *args, **kwargs):
paver.tasks.environment.passthrough_options = self._parser.passthrough_options # pylint: disable=no-member
try:
return super().__call__(*args, **kwargs)
finally:
del paver.tasks.environment.passthrough_options

View File

@@ -2,6 +2,4 @@
TestSuite class and subclasses
"""
from .js_suite import JestSnapshotTestSuite, JsTestSuite
from .pytest_suite import LibTestSuite, PytestSuite, SystemTestSuite
from .python_suite import PythonTestSuite
from .suite import TestSuite

View File

@@ -1,341 +0,0 @@
"""
Classes used for defining and running pytest test suites
"""
import os
from glob import glob
from pavelib.utils.envs import Env
from pavelib.utils.test import utils as test_utils
from pavelib.utils.test.suites.suite import TestSuite
from pavelib.utils.test.utils import COVERAGE_CACHE_BASELINE, COVERAGE_CACHE_BASEPATH, WHO_TESTS_WHAT_DIFF
__test__ = False # do not collect
class PytestSuite(TestSuite):
"""
A subclass of TestSuite with extra methods that are specific
to pytest tests
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.failed_only = kwargs.get('failed_only', False)
self.fail_fast = kwargs.get('fail_fast', False)
self.run_under_coverage = kwargs.get('with_coverage', True)
self.disable_courseenrollment_history = kwargs.get('disable_courseenrollment_history', '1')
self.disable_capture = kwargs.get('disable_capture', None)
self.report_dir = Env.REPORT_DIR / self.root
# If set, put reports for run in "unique" directories.
# The main purpose of this is to ensure that the reports can be 'slurped'
# in the main jenkins flow job without overwriting the reports from other
# build steps. For local development/testing, this shouldn't be needed.
if os.environ.get("SHARD", None):
shard_str = "shard_{}".format(os.environ.get("SHARD"))
self.report_dir = self.report_dir / shard_str
if self.disable_courseenrollment_history:
os.environ['DISABLE_COURSEENROLLMENT_HISTORY'] = '1'
self.xunit_report = self.report_dir / "nosetests.xml"
self.cov_args = kwargs.get('cov_args', '')
self.with_wtw = kwargs.get('with_wtw', False)
def __enter__(self):
super().__enter__()
self.report_dir.makedirs_p()
def __exit__(self, exc_type, exc_value, traceback):
"""
Cleans mongo afer the tests run.
"""
super().__exit__(exc_type, exc_value, traceback)
test_utils.clean_mongo()
def _under_coverage_cmd(self, cmd):
"""
If self.run_under_coverage is True, it returns the arg 'cmd'
altered to be run under coverage. It returns the command
unaltered otherwise.
"""
if self.run_under_coverage:
cmd.append('--cov')
cmd.append('--cov-report=')
return cmd
@staticmethod
def is_success(exit_code):
"""
An exit code of zero means all tests passed, 5 means no tests were
found.
"""
return exit_code in [0, 5]
@property
def test_options_flags(self):
"""
Takes the test options and returns the appropriate flags
for the command.
"""
opts = []
# Handle "--failed" as a special case: we want to re-run only
# the tests that failed within our Django apps
# This sets the --last-failed flag for the pytest command, so this
# functionality is the same as described in the pytest documentation
if self.failed_only:
opts.append("--last-failed")
# This makes it so we use pytest's fail-fast feature in two cases.
# Case 1: --fail-fast is passed as an arg in the paver command
# Case 2: The environment variable TESTS_FAIL_FAST is set as True
env_fail_fast_set = (
'TESTS_FAIL_FAST' in os.environ and os.environ['TEST_FAIL_FAST']
)
if self.fail_fast or env_fail_fast_set:
opts.append("--exitfirst")
if self.with_wtw:
opts.extend([
'--wtw',
f'{COVERAGE_CACHE_BASEPATH}/{WHO_TESTS_WHAT_DIFF}',
'--wtwdb',
f'{COVERAGE_CACHE_BASEPATH}/{COVERAGE_CACHE_BASELINE}'
])
return opts
class SystemTestSuite(PytestSuite):
"""
TestSuite for lms and cms python unit tests
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.eval_attr = kwargs.get('eval_attr', None)
self.test_id = kwargs.get('test_id', self._default_test_id)
self.fasttest = kwargs.get('fasttest', False)
self.disable_migrations = kwargs.get('disable_migrations', True)
self.processes = kwargs.get('processes', None)
self.randomize = kwargs.get('randomize', None)
self.settings = kwargs.get('settings', Env.TEST_SETTINGS)
self.xdist_ip_addresses = kwargs.get('xdist_ip_addresses', None)
if self.processes is None:
# Don't use multiprocessing by default
self.processes = 0
self.processes = int(self.processes)
def _under_coverage_cmd(self, cmd):
"""
If self.run_under_coverage is True, it returns the arg 'cmd'
altered to be run under coverage. It returns the command
unaltered otherwise.
"""
if self.run_under_coverage:
cmd.append('--cov')
cmd.append('--cov-report=')
return cmd
@property
def cmd(self):
cmd = [
'python',
'-Wd',
'-m',
'pytest',
'--ds={}'.format(f'{self.root}.envs.{self.settings}'),
f"--junitxml={self.xunit_report}",
]
cmd.extend(self.test_options_flags)
if self.verbosity < 1:
cmd.append("--quiet")
elif self.verbosity > 1:
# currently only two verbosity settings are supported, so using `-vvv`
# in place of `--verbose`, because it is needed to see migrations.
cmd.append("-vvv")
if self.disable_capture:
cmd.append("-s")
if not self.disable_migrations:
cmd.append("--migrations")
if self.xdist_ip_addresses:
cmd.append('--dist=loadscope')
if self.processes <= 0:
xdist_remote_processes = 1
else:
xdist_remote_processes = self.processes
for ip in self.xdist_ip_addresses.split(','):
# Propogate necessary env vars to xdist containers
env_var_cmd = 'export DJANGO_SETTINGS_MODULE={} DISABLE_COURSEENROLLMENT_HISTORY={} PYTHONHASHSEED=0'\
.format(f'{self.root}.envs.{self.settings}',
self.disable_courseenrollment_history)
xdist_string = '--tx {}*ssh="jenkins@{} -o StrictHostKeyChecking=no"' \
'//python="source edx-venv-{}/edx-venv/bin/activate; {}; python"' \
'//chdir="edx-platform"' \
.format(xdist_remote_processes, ip, Env.PYTHON_VERSION, env_var_cmd)
cmd.append(xdist_string)
for rsync_dir in Env.rsync_dirs():
cmd.append(f'--rsyncdir {rsync_dir}')
else:
if self.processes == -1:
cmd.append('-n auto')
cmd.append('--dist=loadscope')
elif self.processes != 0:
cmd.append(f'-n {self.processes}')
cmd.append('--dist=loadscope')
if not self.randomize:
cmd.append('-p no:randomly')
if self.eval_attr:
cmd.append(f"-a '{self.eval_attr}'")
cmd.extend(self.passthrough_options)
cmd.append(self.test_id)
return self._under_coverage_cmd(cmd)
@property
def _default_test_id(self):
"""
If no test id is provided, we need to limit the test runner
to the Djangoapps we want to test. Otherwise, it will
run tests on all installed packages. We do this by
using a default test id.
"""
# We need to use $DIR/*, rather than just $DIR so that
# pytest will import them early in the test process,
# thereby making sure that we load any django models that are
# only defined in test files.
default_test_globs = [
f"{self.root}/djangoapps/*",
"common/djangoapps/*",
"openedx/core/djangoapps/*",
"openedx/tests/*",
"openedx/core/lib/*",
]
if self.root in ('lms', 'cms'):
default_test_globs.append(f"{self.root}/lib/*")
if self.root == 'lms':
default_test_globs.append(f"{self.root}/tests.py")
default_test_globs.append("openedx/core/djangolib/*")
default_test_globs.append("openedx/core/tests/*")
default_test_globs.append("openedx/features")
def included(path):
"""
Should this path be included in the pytest arguments?
"""
if path.endswith(Env.IGNORED_TEST_DIRS):
return False
return path.endswith('.py') or os.path.isdir(path)
default_test_paths = []
for path_glob in default_test_globs:
if '*' in path_glob:
default_test_paths += [path for path in glob(path_glob) if included(path)]
else:
default_test_paths += [path_glob]
return ' '.join(default_test_paths)
class LibTestSuite(PytestSuite):
"""
TestSuite for edx-platform/pavelib/paver_tests python unit tests
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.append_coverage = kwargs.get('append_coverage', False)
self.test_id = kwargs.get('test_id', self.root)
self.eval_attr = kwargs.get('eval_attr', None)
self.xdist_ip_addresses = kwargs.get('xdist_ip_addresses', None)
self.randomize = kwargs.get('randomize', None)
self.processes = kwargs.get('processes', None)
if self.processes is None:
# Don't use multiprocessing by default
self.processes = 0
self.processes = int(self.processes)
@property
def cmd(self):
cmd = [
'python',
'-Wd',
'-m',
'pytest',
f'--junitxml={self.xunit_report}',
]
cmd.extend(self.passthrough_options + self.test_options_flags)
if self.verbosity < 1:
cmd.append("--quiet")
elif self.verbosity > 1:
# currently only two verbosity settings are supported, so using `-vvv`
# in place of `--verbose`, because it is needed to see migrations.
cmd.append("-vvv")
if self.disable_capture:
cmd.append("-s")
if self.xdist_ip_addresses:
cmd.append('--dist=loadscope')
if self.processes <= 0:
xdist_remote_processes = 1
else:
xdist_remote_processes = self.processes
for ip in self.xdist_ip_addresses.split(','):
# Propogate necessary env vars to xdist containers
django_env_var_cmd = "export DJANGO_SETTINGS_MODULE='lms.envs.test'"
env_var_cmd = '{} DISABLE_COURSEENROLLMENT_HISTORY={}' \
.format(django_env_var_cmd, self.disable_courseenrollment_history)
xdist_string = '--tx {}*ssh="jenkins@{} -o StrictHostKeyChecking=no"' \
'//python="source edx-venv-{}/edx-venv/bin/activate; {}; python"' \
'//chdir="edx-platform"' \
.format(xdist_remote_processes, ip, Env.PYTHON_VERSION, env_var_cmd)
cmd.append(xdist_string)
for rsync_dir in Env.rsync_dirs():
cmd.append(f'--rsyncdir {rsync_dir}')
# "--rsyncdir" throws off the configuration root, set it explicitly
cmd.append('--rootdir=pavelib/paver_tests')
else:
if self.processes == -1:
cmd.append('-n auto')
cmd.append('--dist=loadscope')
elif self.processes != 0:
cmd.append(f'-n {self.processes}')
cmd.append('--dist=loadscope')
if not self.randomize:
cmd.append("-p no:randomly")
if self.eval_attr:
cmd.append(f"-a '{self.eval_attr}'")
cmd.append(self.test_id)
return self._under_coverage_cmd(cmd)
def _under_coverage_cmd(self, cmd):
"""
If self.run_under_coverage is True, it returns the arg 'cmd'
altered to be run under coverage. It returns the command
unaltered otherwise.
"""
if self.run_under_coverage:
cmd.append('--cov')
if self.append_coverage:
cmd.append('--cov-append')
cmd.append('--cov-report=')
return cmd

View File

@@ -1,51 +0,0 @@
"""
Classes used for defining and running python test suites
"""
import os
from pavelib.utils.envs import Env
from pavelib.utils.test import utils as test_utils
from pavelib.utils.test.suites.pytest_suite import LibTestSuite, SystemTestSuite
from pavelib.utils.test.suites.suite import TestSuite
__test__ = False # do not collect
class PythonTestSuite(TestSuite):
"""
A subclass of TestSuite with extra setup for python tests
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.opts = kwargs
self.disable_migrations = kwargs.get('disable_migrations', True)
self.fasttest = kwargs.get('fasttest', False)
self.subsuites = kwargs.get('subsuites', self._default_subsuites)
def __enter__(self):
super().__enter__()
if self.disable_migrations:
os.environ['DISABLE_MIGRATIONS'] = '1'
if not (self.fasttest or self.skip_clean):
test_utils.clean_test_files()
@property
def _default_subsuites(self):
"""
The default subsuites to be run. They include lms, cms,
and all of the libraries in pavelib/paver_tests.
"""
lib_suites = [
LibTestSuite(d, **self.opts) for d in Env.LIB_TEST_DIRS
]
system_suites = [
SystemTestSuite('cms', **self.opts),
SystemTestSuite('lms', **self.opts),
]
return system_suites + lib_suites

View File

@@ -16,18 +16,6 @@ source scripts/thresholds.sh
XSSLINT_THRESHOLDS=`cat scripts/xsslint_thresholds.json`
export XSSLINT_THRESHOLDS=${XSSLINT_THRESHOLDS//[[:space:]]/}
doCheckVars() {
if [ -n "$CIRCLECI" ] ; then
SCRIPT_TO_RUN=scripts/circle-ci-tests.sh
elif [ -n "$JENKINS_HOME" ] ; then
source scripts/jenkins-common.sh
SCRIPT_TO_RUN=scripts/generic-ci-tests.sh
fi
}
# Determine the CI system for the environment
doCheckVars
# Run appropriate CI system script
if [ -n "$SCRIPT_TO_RUN" ] ; then

View File

@@ -1,95 +0,0 @@
#!/usr/bin/env bash
###############################################################################
#
# circle-ci-tests.sh
#
# Execute tests for edx-platform on circleci.com
#
# Forks should configure parallelism, and use this script
# to define which tests to run in each of the containers.
#
###############################################################################
# From the sh(1) man page of FreeBSD:
# Exit immediately if any untested command fails. in non-interactive
# mode. The exit status of a command is considered to be explicitly
# tested if the command is part of the list used to control an if,
# elif, while, or until; if the command is the left hand operand of
# an “&&” or “||” operator; or if the command is a pipeline preceded
# by the ! operator. If a shell function is executed and its exit
# status is explicitly tested, all commands of the function are con
# sidered to be tested as well.
set -e
# Return status is that of the last command to fail in a
# piped command, or a zero if they all succeed.
set -o pipefail
# There is no need to install the prereqs, as this was already
# just done via the dependencies override section of circle.yml.
export NO_PREREQ_INSTALL='true'
EXIT=0
if [ "$CIRCLE_NODE_TOTAL" == "1" ] ; then
echo "Only 1 container is being used to run the tests."
echo "To run in more containers, configure parallelism for this repo's settings "
echo "via the CircleCI UI and adjust scripts/circle-ci-tests.sh to match."
echo "Running tests for pavelib/"
paver test_lib --cov-args="-p" || EXIT=1
echo "Running python tests for Studio"
paver test_system -s cms --cov-args="-p" || EXIT=1
echo "Running python tests for lms"
paver test_system -s lms --cov-args="-p" || EXIT=1
exit $EXIT
else
# Split up the tests to run in parallel on 4 containers
case $CIRCLE_NODE_INDEX in
0) # run the quality metrics
echo "Finding fixme's and storing report..."
paver find_fixme > fixme.log || { cat fixme.log; EXIT=1; }
echo "Finding PEP 8 violations and storing report..."
paver run_pep8 > pep8.log || { cat pep8.log; EXIT=1; }
echo "Finding pylint violations and storing in report..."
# HACK: we need to print something to the console, otherwise circleci
# fails and aborts the job because nothing is displayed for > 10 minutes.
paver run_pylint -l $LOWER_PYLINT_THRESHOLD:$UPPER_PYLINT_THRESHOLD | tee pylint.log || EXIT=1
mkdir -p reports
PATH=$PATH:node_modules/.bin
echo "Finding ESLint violations and storing report..."
paver run_eslint -l $ESLINT_THRESHOLD > eslint.log || { cat eslint.log; EXIT=1; }
echo "Finding Stylelint violations and storing report..."
paver run_stylelint -l $STYLELINT_THRESHOLD > stylelint.log || { cat stylelint.log; EXIT=1; }
# Run quality task. Pass in the 'fail-under' percentage to diff-quality
paver run_quality -p 100 || EXIT=1
exit $EXIT
;;
1) # run all of the lms unit tests
paver test_system -s lms --cov-args="-p"
;;
2) # run all of the cms unit tests
paver test_system -s cms --cov-args="-p"
;;
3) # run the pavelib unit tests
paver test_lib --cov-args="-p"
;;
*)
echo "No tests were executed in this container."
echo "Please adjust scripts/circle-ci-tests.sh to match your parallelism."
exit 1
;;
esac
fi

View File

@@ -147,10 +147,6 @@ case "$TEST_SUITE" in
exit $EXIT
;;
"lms-unit"|"cms-unit"|"pavelib-unit")
$TOX bash scripts/unit-tests.sh
;;
"js-unit")
$TOX paver test_js --coverage
$TOX paver diff_coverage

View File

@@ -56,16 +56,6 @@ _paver()
if [[ -n $subcmd ]]
then
case $subcmd in
test_system)
_test_system_args
if [[ -n $COMPREPLY ]]
then
return 0
fi
;;
esac
if [[ ${#COMP_WORDS[*]} == 3 ]]
then
@@ -88,22 +78,6 @@ _paver()
fi
}
_test_system_args()
{
local cur prev
cur="${COMP_WORDS[COMP_CWORD]}"
prev="${COMP_WORDS[COMP_CWORD - 1]}"
case "$prev" in
-s|--system)
COMPREPLY=( $(compgen -W "lms cms" -- "$cur") )
return 0
;;
*)
;;
esac
}
# Assign the auto-completion function for our command.
complete -F _paver -o default paver

View File

@@ -1,136 +0,0 @@
#!/bin/bash
set -e
###############################################################################
#
# unit-tests.sh
#
# Execute Python unit tests for edx-platform.
#
# This script is typically called from generic-ci-tests.sh, which defines
# these environment variables:
#
# `TEST_SUITE` defines which kind of test to run.
# Possible values are:
#
# - "lms-unit": Run the LMS Python unit tests
# - "cms-unit": Run the CMS Python unit tests
# - "pavelib-unit": Run Python unit tests from the pavelib/paver_tests directory
#
# `SHARD` is a number indicating which subset of the tests to build.
#
# For "lms-unit", the tests are put into shard groups
# using the 'attr' decorator (e.g. "@attr(shard=1)"). Anything with
# the 'shard=n' attribute will run in the nth shard. If there isn't a
# shard explicitly assigned, the test will run in the last shard.
#
# This script is broken out so it can be run by tox and redirect stderr to
# the specified file before tox gets a chance to redirect it to stdout.
#
###############################################################################
export SKIP_NPM_INSTALL="True"
# Skip re-installation of Python prerequisites inside a tox execution.
if [[ -n "$TOXENV" ]]; then
export NO_PREREQ_INSTALL="True"
fi
if [[ -n "$XDIST_NUM_WORKERS" ]]; then
bash scripts/xdist/prepare_xdist_nodes.sh
PAVER_ARGS="-v --xdist_ip_addresses="$(<pytest_worker_ips.txt)""
export SHARD="all"
if [[ -n "$XDIST_REMOTE_NUM_PROCESSES" ]]; then
PARALLEL="--processes=$XDIST_REMOTE_NUM_PROCESSES"
else
PARALLEL="--processes=1"
fi
else
PAVER_ARGS="-v"
PARALLEL="--processes=-1"
fi
if [[ -n "$FILTER_WHO_TESTS_WHAT" ]]; then
PAVER_ARGS="$PAVER_ARGS --with-wtw=origin/master"
fi
if [[ -n "$COLLECT_WHO_TESTS_WHAT" ]]; then
PAVER_ARGS="$PAVER_ARGS --pytest-contexts"
fi
case "${TEST_SUITE}" in
"lms-unit")
case "$SHARD" in
"all")
paver test_system -s lms --disable_capture ${PAVER_ARGS} ${PARALLEL} 2> lms-tests.log
mv reports/${TEST_SUITE}.coverage reports/.coverage.lms
;;
[1-9])
paver test_system -s lms --disable_capture --eval-attr="shard==$SHARD" ${PAVER_ARGS} ${PARALLEL} 2> lms-tests.${SHARD}.log
mv reports/${TEST_SUITE}.coverage reports/.coverage.lms.${SHARD}
;;
10|"noshard")
paver test_system -s lms --disable_capture --eval-attr="shard>=$SHARD or not shard" ${PAVER_ARGS} ${PARALLEL} 2> lms-tests.10.log
mv reports/${TEST_SUITE}.coverage reports/.coverage.lms.10
;;
*)
# If no shard is specified, rather than running all tests, create an empty xunit file. This is a
# backwards compatibility feature. If a new shard (e.g., shard n) is introduced in the build
# system, but the tests are called with the old code, then builds will not fail because the
# code is out of date. Instead, there will be an instantly-passing shard.
mkdir -p reports/lms
emptyxunit "lms/nosetests"
;;
esac
;;
"cms-unit")
case "$SHARD" in
"all")
paver test_system -s cms --disable_capture ${PAVER_ARGS} ${PARALLEL} 2> cms-tests.log
mv reports/${TEST_SUITE}.coverage reports/.coverage.cms
;;
1)
paver test_system -s cms --disable_capture --eval-attr="shard==$SHARD" ${PAVER_ARGS} 2> cms-tests.${SHARD}.log
mv reports/${TEST_SUITE}.coverage reports/.coverage.cms.${SHARD}
;;
2|"noshard")
paver test_system -s cms --disable_capture --eval-attr="shard>=$SHARD or not shard" ${PAVER_ARGS} 2> cms-tests.2.log
mv reports/${TEST_SUITE}.coverage reports/.coverage.cms.2
;;
*)
# If no shard is specified, rather than running all tests, create an empty xunit file. This is a
# backwards compatibility feature. If a new shard (e.g., shard n) is introduced in the build
# system, but the tests are called with the old code, then builds will not fail because the
# code is out of date. Instead, there will be an instantly-passing shard.
mkdir -p reports/cms
emptyxunit "cms/nosetests"
;;
esac
;;
"pavelib-unit")
case "$SHARD" in
"all")
paver test_lib --disable_capture ${PAVER_ARGS} ${PARALLEL} 2> pavelib-tests.log
mv reports/${TEST_SUITE}.coverage reports/.coverage.pavelib
;;
[1-2])
paver test_lib -l ./xmodule --disable_capture --eval-attr="shard==$SHARD" ${PAVER_ARGS} 2> pavelib-tests.${SHARD}.log
mv reports/${TEST_SUITE}.coverage reports/.coverage.pavelib.${SHARD}
;;
3|"noshard")
paver test_lib --disable_capture --eval-attr="shard>=$SHARD or not shard" ${PAVER_ARGS} 2> pavelib-tests.3.log
mv reports/${TEST_SUITE}.coverage reports/.coverage.pavelib.3
;;
*)
# If no shard is specified, rather than running all tests, create an empty xunit file. This is a
# backwards compatibility feature. If a new shard (e.g., shard n) is introduced in the build
# system, but the tests are called with the old code, then builds will not fail because the
# code is out of date. Instead, there will be an instantly-passing shard.
mkdir -p reports/pavelib
emptyxunit "pavelib/nosetests"
;;
esac
;;
esac