build: replace paver quality tests and js commands (#35159)
This commit is contained in:
@@ -142,8 +142,6 @@ workflow.AssessmentWorkflowStep:
|
||||
# Via edx-celeryutils
|
||||
celery_utils.ChordData:
|
||||
".. no_pii:": "No PII"
|
||||
celery_utils.FailedTask:
|
||||
".. no_pii:": "No PII"
|
||||
|
||||
# Via completion XBlock
|
||||
completion.BlockCompletion:
|
||||
|
||||
10
.github/workflows/js-tests.yml
vendored
10
.github/workflows/js-tests.yml
vendored
@@ -64,13 +64,13 @@ jobs:
|
||||
make base-requirements
|
||||
|
||||
- uses: c-hive/gha-npm-cache@v1
|
||||
|
||||
- name: Install npm
|
||||
run: npm ci
|
||||
|
||||
- name: Run JS Tests
|
||||
env:
|
||||
TEST_SUITE: js-unit
|
||||
SCRIPT_TO_RUN: ./scripts/generic-ci-tests.sh
|
||||
run: |
|
||||
npm install -g jest
|
||||
xvfb-run --auto-servernum ./scripts/all-tests.sh
|
||||
npm run test
|
||||
|
||||
- name: Save Job Artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
|
||||
23
.github/workflows/quality-checks.yml
vendored
23
.github/workflows/quality-checks.yml
vendored
@@ -60,16 +60,29 @@ jobs:
|
||||
PIP_SRC: ${{ runner.temp }}
|
||||
run: |
|
||||
make test-requirements
|
||||
|
||||
|
||||
- name: Install npm
|
||||
env:
|
||||
PIP_SRC: ${{ runner.temp }}
|
||||
run: npm ci
|
||||
|
||||
- name: Install python packages
|
||||
env:
|
||||
PIP_SRC: ${{ runner.temp }}
|
||||
run: |
|
||||
pip install -e .
|
||||
|
||||
- name: Run Quality Tests
|
||||
env:
|
||||
TEST_SUITE: quality
|
||||
SCRIPT_TO_RUN: ./scripts/generic-ci-tests.sh
|
||||
PIP_SRC: ${{ runner.temp }}
|
||||
TARGET_BRANCH: ${{ github.base_ref }}
|
||||
run: |
|
||||
./scripts/all-tests.sh
|
||||
|
||||
make pycodestyle
|
||||
npm run lint
|
||||
make xsslint
|
||||
make pii_check
|
||||
make check_keywords
|
||||
|
||||
- name: Save Job Artifacts
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
source_path: ./
|
||||
report_path: pii_report
|
||||
safelist_path: .annotation_safe_list.yml
|
||||
coverage_target: 94.5
|
||||
coverage_target: 85.3
|
||||
# See OEP-30 for more information on these values and what they mean:
|
||||
# https://open-edx-proposals.readthedocs.io/en/latest/oep-0030-arch-pii-markup-and-auditing.html#docstring-annotations
|
||||
annotations:
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
xmodule/css
|
||||
common/static/sass/bourbon
|
||||
common/static/xmodule/modules/css
|
||||
common/test/test-theme
|
||||
lms/static/sass/vendor
|
||||
34
Makefile
34
Makefile
@@ -204,3 +204,37 @@ migrate: migrate-lms migrate-cms
|
||||
# Part of https://github.com/openedx/wg-developer-experience/issues/136
|
||||
ubuntu-requirements: ## Install ubuntu 22.04 system packages needed for `pip install` to work on ubuntu.
|
||||
sudo apt install libmysqlclient-dev libxmlsec1-dev
|
||||
|
||||
xsslint: ## check xss for quality issuest
|
||||
python scripts/xsslint/xss_linter.py \
|
||||
--rule-totals \
|
||||
--config=scripts.xsslint_config \
|
||||
--thresholds=scripts/xsslint_thresholds.json
|
||||
|
||||
pycodestyle: ## check python files for quality issues
|
||||
pycodestyle .
|
||||
|
||||
## Re-enable --lint flag when this issue https://github.com/openedx/edx-platform/issues/35775 is resolved
|
||||
pii_check: ## check django models for pii annotations
|
||||
DJANGO_SETTINGS_MODULE=cms.envs.test \
|
||||
code_annotations django_find_annotations \
|
||||
--config_file .pii_annotations.yml \
|
||||
--app_name cms \
|
||||
--coverage \
|
||||
--lint
|
||||
|
||||
DJANGO_SETTINGS_MODULE=lms.envs.test \
|
||||
code_annotations django_find_annotations \
|
||||
--config_file .pii_annotations.yml \
|
||||
--app_name lms \
|
||||
--coverage \
|
||||
--lint
|
||||
|
||||
check_keywords: ## check django models for reserve keywords
|
||||
DJANGO_SETTINGS_MODULE=cms.envs.test \
|
||||
python manage.py cms check_reserved_keywords \
|
||||
--override_file db_keyword_overrides.yml
|
||||
|
||||
DJANGO_SETTINGS_MODULE=lms.envs.test \
|
||||
python manage.py lms check_reserved_keywords \
|
||||
--override_file db_keyword_overrides.yml
|
||||
|
||||
21
package.json
21
package.json
@@ -12,7 +12,26 @@
|
||||
"compile-sass-dev": "scripts/compile_sass.py --env=development",
|
||||
"watch": "{ npm run watch-webpack& npm run watch-sass& } && sleep infinity",
|
||||
"watch-webpack": "npm run webpack-dev -- --watch",
|
||||
"watch-sass": "scripts/watch_sass.sh"
|
||||
"watch-sass": "scripts/watch_sass.sh",
|
||||
"lint": "python scripts/eslint.py",
|
||||
"test": "npm run test-cms && npm run test-lms && npm run test-xmodule && npm run test-common && npm run test-jest",
|
||||
"test-kind-vanilla": "npm run test-cms-vanilla && npm run test-xmodule-vanilla && npm run test-common-vanilla",
|
||||
"test-kind-require": "npm run test-cms-require && npm run test-common-require",
|
||||
"test-kind-webpack": "npm run test-cms-webpack && npm run test-lms-webpack && npm run test-xmodule-webpack",
|
||||
"test-cms": "npm run test-cms-vanilla && npm run test-cms-require",
|
||||
"test-cms-vanilla": "npm run test-suite -- cms/static/karma_cms.conf.js",
|
||||
"test-cms-require": "npm run test-suite -- cms/static/karma_cms_squire.conf.js",
|
||||
"test-cms-webpack": "npm run test-suite -- cms/static/karma_cms_webpack.conf.js",
|
||||
"test-lms": "echo 'WARNING: Webpack JS tests are disabled. No LMS JS tests will be run. See https://github.com/openedx/edx-platform/issues/35956 for details.'",
|
||||
"test-lms-webpack": "npm run test-suite -- lms/static/karma_lms.conf.js",
|
||||
"test-xmodule": "npm run test-xmodule-vanilla",
|
||||
"test-xmodule-vanilla": "npm run test-suite -- xmodule/js/karma_xmodule.conf.js",
|
||||
"test-xmodule-webpack": "npm run test-suite -- xmodule/js/karma_xmodule_webpack.conf.js",
|
||||
"test-common": "npm run test-common-vanilla && npm run test-common-require",
|
||||
"test-common-vanilla": "npm run test-suite -- common/static/karma_common.conf.js",
|
||||
"test-common-require": "npm run test-suite -- common/static/karma_common_requirejs.conf.js",
|
||||
"test-suite": "${NODE_WRAPPER:-xvfb-run --auto-servernum} node --max_old_space_size=4096 node_modules/.bin/karma start --single-run=true --capture-timeout=60000 --browsers=FirefoxNoUpdates",
|
||||
"test-jest": "jest"
|
||||
},
|
||||
"dependencies": {
|
||||
"@babel/core": "7.26.0",
|
||||
|
||||
@@ -3,4 +3,4 @@ paver commands
|
||||
"""
|
||||
|
||||
|
||||
from . import assets, js_test, prereqs, quality
|
||||
from . import assets
|
||||
|
||||
@@ -1,143 +0,0 @@
|
||||
"""
|
||||
Javascript test tasks
|
||||
"""
|
||||
|
||||
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
from paver.easy import cmdopts, needs, sh, task
|
||||
|
||||
from pavelib.utils.envs import Env
|
||||
from pavelib.utils.test.suites import JestSnapshotTestSuite, JsTestSuite
|
||||
from pavelib.utils.timer import timed
|
||||
|
||||
try:
|
||||
from pygments.console import colorize
|
||||
except ImportError:
|
||||
colorize = lambda color, text: text
|
||||
|
||||
__test__ = False # do not collect
|
||||
|
||||
|
||||
@task
|
||||
@needs(
|
||||
'pavelib.prereqs.install_node_prereqs',
|
||||
'pavelib.utils.test.utils.clean_reports_dir',
|
||||
)
|
||||
@cmdopts([
|
||||
("suite=", "s", "Test suite to run"),
|
||||
("mode=", "m", "dev or run"),
|
||||
("coverage", "c", "Run test under coverage"),
|
||||
("port=", "p", "Port to run test server on (dev mode only)"),
|
||||
('skip-clean', 'C', 'skip cleaning repository before running tests'),
|
||||
('skip_clean', None, 'deprecated in favor of skip-clean'),
|
||||
], share_with=["pavelib.utils.tests.utils.clean_reports_dir"])
|
||||
@timed
|
||||
def test_js(options):
|
||||
"""
|
||||
Run the JavaScript tests
|
||||
"""
|
||||
mode = getattr(options, 'mode', 'run')
|
||||
port = None
|
||||
skip_clean = getattr(options, 'skip_clean', False)
|
||||
|
||||
if mode == 'run':
|
||||
suite = getattr(options, 'suite', 'all')
|
||||
coverage = getattr(options, 'coverage', False)
|
||||
elif mode == 'dev':
|
||||
suite = getattr(options, 'suite', None)
|
||||
coverage = False
|
||||
port = getattr(options, 'port', None)
|
||||
else:
|
||||
sys.stderr.write("Invalid mode. Please choose 'dev' or 'run'.")
|
||||
return
|
||||
|
||||
if (suite != 'all') and (suite not in Env.JS_TEST_ID_KEYS):
|
||||
sys.stderr.write(
|
||||
"Unknown test suite. Please choose from ({suites})\n".format(
|
||||
suites=", ".join(Env.JS_TEST_ID_KEYS)
|
||||
)
|
||||
)
|
||||
return
|
||||
|
||||
if suite != 'jest-snapshot':
|
||||
test_suite = JsTestSuite(suite, mode=mode, with_coverage=coverage, port=port, skip_clean=skip_clean)
|
||||
test_suite.run()
|
||||
|
||||
if (suite == 'jest-snapshot') or (suite == 'all'): # lint-amnesty, pylint: disable=consider-using-in
|
||||
test_suite = JestSnapshotTestSuite('jest')
|
||||
test_suite.run()
|
||||
|
||||
|
||||
@task
|
||||
@cmdopts([
|
||||
("suite=", "s", "Test suite to run"),
|
||||
("coverage", "c", "Run test under coverage"),
|
||||
])
|
||||
@timed
|
||||
def test_js_run(options):
|
||||
"""
|
||||
Run the JavaScript tests and print results to the console
|
||||
"""
|
||||
options.mode = 'run'
|
||||
test_js(options)
|
||||
|
||||
|
||||
@task
|
||||
@cmdopts([
|
||||
("suite=", "s", "Test suite to run"),
|
||||
("port=", "p", "Port to run test server on"),
|
||||
])
|
||||
@timed
|
||||
def test_js_dev(options):
|
||||
"""
|
||||
Run the JavaScript tests in your default browsers
|
||||
"""
|
||||
options.mode = 'dev'
|
||||
test_js(options)
|
||||
|
||||
|
||||
@task
|
||||
@needs('pavelib.prereqs.install_coverage_prereqs')
|
||||
@cmdopts([
|
||||
("compare-branch=", "b", "Branch to compare against, defaults to origin/master"),
|
||||
], share_with=['coverage'])
|
||||
@timed
|
||||
def diff_coverage(options):
|
||||
"""
|
||||
Build the diff coverage reports
|
||||
"""
|
||||
compare_branch = options.get('compare_branch', 'origin/master')
|
||||
|
||||
# Find all coverage XML files (both Python and JavaScript)
|
||||
xml_reports = []
|
||||
|
||||
for filepath in Env.REPORT_DIR.walk():
|
||||
if bool(re.match(r'^coverage.*\.xml$', filepath.basename())):
|
||||
xml_reports.append(filepath)
|
||||
|
||||
if not xml_reports:
|
||||
err_msg = colorize(
|
||||
'red',
|
||||
"No coverage info found. Run `paver test` before running "
|
||||
"`paver coverage`.\n"
|
||||
)
|
||||
sys.stderr.write(err_msg)
|
||||
else:
|
||||
xml_report_str = ' '.join(xml_reports)
|
||||
diff_html_path = os.path.join(Env.REPORT_DIR, 'diff_coverage_combined.html')
|
||||
|
||||
# Generate the diff coverage reports (HTML and console)
|
||||
# The --diff-range-notation parameter is a workaround for https://github.com/Bachmann1234/diff_cover/issues/153
|
||||
sh(
|
||||
"diff-cover {xml_report_str} --diff-range-notation '..' --compare-branch={compare_branch} "
|
||||
"--html-report {diff_html_path}".format(
|
||||
xml_report_str=xml_report_str,
|
||||
compare_branch=compare_branch,
|
||||
diff_html_path=diff_html_path,
|
||||
)
|
||||
)
|
||||
|
||||
print("\n")
|
||||
@@ -1,22 +0,0 @@
|
||||
"""
|
||||
Pytest fixtures for the pavelib unit tests.
|
||||
"""
|
||||
|
||||
|
||||
import os
|
||||
from shutil import rmtree
|
||||
|
||||
import pytest
|
||||
|
||||
from pavelib.utils.envs import Env
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True, scope='session')
|
||||
def delete_quality_junit_xml():
|
||||
"""
|
||||
Delete the JUnit XML results files for quality check tasks run during the
|
||||
unit tests.
|
||||
"""
|
||||
yield
|
||||
if os.path.exists(Env.QUALITY_DIR):
|
||||
rmtree(Env.QUALITY_DIR, ignore_errors=True)
|
||||
@@ -1,54 +0,0 @@
|
||||
"""
|
||||
Tests for Paver's Stylelint tasks.
|
||||
"""
|
||||
|
||||
|
||||
import unittest
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
from paver.easy import BuildFailure, call_task
|
||||
|
||||
import pavelib.quality
|
||||
|
||||
|
||||
class TestPaverESLint(unittest.TestCase):
|
||||
"""
|
||||
For testing run_eslint
|
||||
"""
|
||||
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
|
||||
# Mock the paver @needs decorator
|
||||
self._mock_paver_needs = patch.object(pavelib.quality.run_eslint, 'needs').start()
|
||||
self._mock_paver_needs.return_value = 0
|
||||
|
||||
# Mock shell commands
|
||||
patcher = patch('pavelib.quality.sh')
|
||||
self._mock_paver_sh = patcher.start()
|
||||
|
||||
# Cleanup mocks
|
||||
self.addCleanup(patcher.stop)
|
||||
self.addCleanup(self._mock_paver_needs.stop)
|
||||
|
||||
@patch.object(pavelib.quality, '_write_metric')
|
||||
@patch.object(pavelib.quality, '_prepare_report_dir')
|
||||
@patch.object(pavelib.quality, '_get_count_from_last_line')
|
||||
def test_eslint_violation_number_not_found(self, mock_count, mock_report_dir, mock_write_metric): # pylint: disable=unused-argument
|
||||
"""
|
||||
run_eslint encounters an error parsing the eslint output log
|
||||
"""
|
||||
mock_count.return_value = None
|
||||
with pytest.raises(BuildFailure):
|
||||
call_task('pavelib.quality.run_eslint', args=[''])
|
||||
|
||||
@patch.object(pavelib.quality, '_write_metric')
|
||||
@patch.object(pavelib.quality, '_prepare_report_dir')
|
||||
@patch.object(pavelib.quality, '_get_count_from_last_line')
|
||||
def test_eslint_vanilla(self, mock_count, mock_report_dir, mock_write_metric): # pylint: disable=unused-argument
|
||||
"""
|
||||
eslint finds violations, but a limit was not set
|
||||
"""
|
||||
mock_count.return_value = 1
|
||||
pavelib.quality.run_eslint("")
|
||||
@@ -1,148 +0,0 @@
|
||||
"""Unit tests for the Paver JavaScript testing tasks."""
|
||||
|
||||
from unittest.mock import patch
|
||||
|
||||
import ddt
|
||||
from paver.easy import call_task
|
||||
|
||||
import pavelib.js_test
|
||||
from pavelib.utils.envs import Env
|
||||
|
||||
from .utils import PaverTestCase
|
||||
|
||||
|
||||
@ddt.ddt
|
||||
class TestPaverJavaScriptTestTasks(PaverTestCase):
|
||||
"""
|
||||
Test the Paver JavaScript testing tasks.
|
||||
"""
|
||||
|
||||
EXPECTED_DELETE_JAVASCRIPT_REPORT_COMMAND = 'find {platform_root}/reports/javascript -type f -delete'
|
||||
EXPECTED_KARMA_OPTIONS = (
|
||||
"{config_file} "
|
||||
"--single-run={single_run} "
|
||||
"--capture-timeout=60000 "
|
||||
"--junitreportpath="
|
||||
"{platform_root}/reports/javascript/javascript_xunit-{suite}.xml "
|
||||
"--browsers={browser}"
|
||||
)
|
||||
EXPECTED_COVERAGE_OPTIONS = (
|
||||
' --coverage --coveragereportpath={platform_root}/reports/javascript/coverage-{suite}.xml'
|
||||
)
|
||||
|
||||
EXPECTED_COMMANDS = [
|
||||
"make report_dir",
|
||||
'git clean -fqdx test_root/logs test_root/data test_root/staticfiles test_root/uploads',
|
||||
"find . -name '.git' -prune -o -name '*.pyc' -exec rm {} \\;",
|
||||
'rm -rf test_root/log/auto_screenshots/*',
|
||||
"rm -rf /tmp/mako_[cl]ms",
|
||||
]
|
||||
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
|
||||
# Mock the paver @needs decorator
|
||||
self._mock_paver_needs = patch.object(pavelib.js_test.test_js, 'needs').start()
|
||||
self._mock_paver_needs.return_value = 0
|
||||
|
||||
# Cleanup mocks
|
||||
self.addCleanup(self._mock_paver_needs.stop)
|
||||
|
||||
@ddt.data(
|
||||
[""],
|
||||
["--coverage"],
|
||||
["--suite=lms"],
|
||||
["--suite=lms --coverage"],
|
||||
)
|
||||
@ddt.unpack
|
||||
def test_test_js_run(self, options_string):
|
||||
"""
|
||||
Test the "test_js_run" task.
|
||||
"""
|
||||
options = self.parse_options_string(options_string)
|
||||
self.reset_task_messages()
|
||||
call_task("pavelib.js_test.test_js_run", options=options)
|
||||
self.verify_messages(options=options, dev_mode=False)
|
||||
|
||||
@ddt.data(
|
||||
[""],
|
||||
["--port=9999"],
|
||||
["--suite=lms"],
|
||||
["--suite=lms --port=9999"],
|
||||
)
|
||||
@ddt.unpack
|
||||
def test_test_js_dev(self, options_string):
|
||||
"""
|
||||
Test the "test_js_run" task.
|
||||
"""
|
||||
options = self.parse_options_string(options_string)
|
||||
self.reset_task_messages()
|
||||
call_task("pavelib.js_test.test_js_dev", options=options)
|
||||
self.verify_messages(options=options, dev_mode=True)
|
||||
|
||||
def parse_options_string(self, options_string):
|
||||
"""
|
||||
Parse a string containing the options for a test run
|
||||
"""
|
||||
parameters = options_string.split(" ")
|
||||
suite = "all"
|
||||
if "--system=lms" in parameters:
|
||||
suite = "lms"
|
||||
elif "--system=common" in parameters:
|
||||
suite = "common"
|
||||
coverage = "--coverage" in parameters
|
||||
port = None
|
||||
if "--port=9999" in parameters:
|
||||
port = 9999
|
||||
return {
|
||||
"suite": suite,
|
||||
"coverage": coverage,
|
||||
"port": port,
|
||||
}
|
||||
|
||||
def verify_messages(self, options, dev_mode):
|
||||
"""
|
||||
Verify that the messages generated when running tests are as expected
|
||||
for the specified options and dev_mode.
|
||||
"""
|
||||
is_coverage = options['coverage']
|
||||
port = options['port']
|
||||
expected_messages = []
|
||||
suites = Env.JS_TEST_ID_KEYS if options['suite'] == 'all' else [options['suite']]
|
||||
|
||||
expected_messages.extend(self.EXPECTED_COMMANDS)
|
||||
if not dev_mode and not is_coverage:
|
||||
expected_messages.append(self.EXPECTED_DELETE_JAVASCRIPT_REPORT_COMMAND.format(
|
||||
platform_root=self.platform_root
|
||||
))
|
||||
|
||||
command_template = (
|
||||
'node --max_old_space_size=4096 node_modules/.bin/karma start {options}'
|
||||
)
|
||||
|
||||
for suite in suites:
|
||||
# Karma test command
|
||||
if suite != 'jest-snapshot':
|
||||
karma_config_file = Env.KARMA_CONFIG_FILES[Env.JS_TEST_ID_KEYS.index(suite)]
|
||||
expected_test_tool_command = command_template.format(
|
||||
options=self.EXPECTED_KARMA_OPTIONS.format(
|
||||
config_file=karma_config_file,
|
||||
single_run='false' if dev_mode else 'true',
|
||||
suite=suite,
|
||||
platform_root=self.platform_root,
|
||||
browser=Env.KARMA_BROWSER,
|
||||
),
|
||||
)
|
||||
if is_coverage:
|
||||
expected_test_tool_command += self.EXPECTED_COVERAGE_OPTIONS.format(
|
||||
platform_root=self.platform_root,
|
||||
suite=suite
|
||||
)
|
||||
if port:
|
||||
expected_test_tool_command += f" --port={port}"
|
||||
else:
|
||||
expected_test_tool_command = 'jest'
|
||||
|
||||
expected_messages.append(expected_test_tool_command)
|
||||
|
||||
assert self.task_messages == expected_messages
|
||||
@@ -1,156 +0,0 @@
|
||||
""" # lint-amnesty, pylint: disable=django-not-configured
|
||||
Tests for paver quality tasks
|
||||
"""
|
||||
|
||||
|
||||
import os
|
||||
import shutil # lint-amnesty, pylint: disable=unused-import
|
||||
import tempfile
|
||||
import textwrap
|
||||
import unittest
|
||||
from unittest.mock import MagicMock, mock_open, patch # lint-amnesty, pylint: disable=unused-import
|
||||
|
||||
import pytest # lint-amnesty, pylint: disable=unused-import
|
||||
from ddt import data, ddt, file_data, unpack # lint-amnesty, pylint: disable=unused-import
|
||||
from path import Path as path
|
||||
from paver.easy import BuildFailure # lint-amnesty, pylint: disable=unused-import
|
||||
|
||||
import pavelib.quality
|
||||
from pavelib.paver_tests.utils import PaverTestCase, fail_on_eslint # lint-amnesty, pylint: disable=unused-import
|
||||
|
||||
OPEN_BUILTIN = 'builtins.open'
|
||||
|
||||
|
||||
@ddt
|
||||
class TestPaverQualityViolations(unittest.TestCase):
|
||||
"""
|
||||
For testing the paver violations-counting tasks
|
||||
"""
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
self.f = tempfile.NamedTemporaryFile(delete=False) # lint-amnesty, pylint: disable=consider-using-with
|
||||
self.f.close()
|
||||
self.addCleanup(os.remove, self.f.name)
|
||||
|
||||
def test_pep8_parser(self):
|
||||
with open(self.f.name, 'w') as f:
|
||||
f.write("hello\nhithere")
|
||||
num = len(pavelib.quality._pep8_violations(f.name)) # pylint: disable=protected-access
|
||||
assert num == 2
|
||||
|
||||
|
||||
class TestPaverReportViolationsCounts(unittest.TestCase):
|
||||
"""
|
||||
For testing utility functions for getting counts from reports for
|
||||
run_eslint and run_xsslint.
|
||||
"""
|
||||
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
|
||||
# Temporary file infrastructure
|
||||
self.f = tempfile.NamedTemporaryFile(delete=False) # lint-amnesty, pylint: disable=consider-using-with
|
||||
self.f.close()
|
||||
|
||||
# Cleanup various mocks and tempfiles
|
||||
self.addCleanup(os.remove, self.f.name)
|
||||
|
||||
def test_get_eslint_violations_count(self):
|
||||
with open(self.f.name, 'w') as f:
|
||||
f.write("3000 violations found")
|
||||
actual_count = pavelib.quality._get_count_from_last_line(self.f.name, "eslint") # pylint: disable=protected-access
|
||||
assert actual_count == 3000
|
||||
|
||||
def test_get_eslint_violations_no_number_found(self):
|
||||
with open(self.f.name, 'w') as f:
|
||||
f.write("Not expected string regex")
|
||||
actual_count = pavelib.quality._get_count_from_last_line(self.f.name, "eslint") # pylint: disable=protected-access
|
||||
assert actual_count is None
|
||||
|
||||
def test_get_eslint_violations_count_truncated_report(self):
|
||||
"""
|
||||
A truncated report (i.e. last line is just a violation)
|
||||
"""
|
||||
with open(self.f.name, 'w') as f:
|
||||
f.write("foo/bar/js/fizzbuzz.js: line 45, col 59, Missing semicolon.")
|
||||
actual_count = pavelib.quality._get_count_from_last_line(self.f.name, "eslint") # pylint: disable=protected-access
|
||||
assert actual_count is None
|
||||
|
||||
def test_generic_value(self):
|
||||
"""
|
||||
Default behavior is to look for an integer appearing at head of line
|
||||
"""
|
||||
with open(self.f.name, 'w') as f:
|
||||
f.write("5.777 good to see you")
|
||||
actual_count = pavelib.quality._get_count_from_last_line(self.f.name, "foo") # pylint: disable=protected-access
|
||||
assert actual_count == 5
|
||||
|
||||
def test_generic_value_none_found(self):
|
||||
"""
|
||||
Default behavior is to look for an integer appearing at head of line
|
||||
"""
|
||||
with open(self.f.name, 'w') as f:
|
||||
f.write("hello 5.777 good to see you")
|
||||
actual_count = pavelib.quality._get_count_from_last_line(self.f.name, "foo") # pylint: disable=protected-access
|
||||
assert actual_count is None
|
||||
|
||||
def test_get_xsslint_counts_happy(self):
|
||||
"""
|
||||
Test happy path getting violation counts from xsslint report.
|
||||
"""
|
||||
report = textwrap.dedent("""
|
||||
test.html: 30:53: javascript-jquery-append: $('#test').append(print_tos);
|
||||
|
||||
javascript-concat-html: 310 violations
|
||||
javascript-escape: 7 violations
|
||||
|
||||
2608 violations total
|
||||
""")
|
||||
with open(self.f.name, 'w') as f:
|
||||
f.write(report)
|
||||
counts = pavelib.quality._get_xsslint_counts(self.f.name) # pylint: disable=protected-access
|
||||
self.assertDictEqual(counts, {
|
||||
'rules': {
|
||||
'javascript-concat-html': 310,
|
||||
'javascript-escape': 7,
|
||||
},
|
||||
'total': 2608,
|
||||
})
|
||||
|
||||
def test_get_xsslint_counts_bad_counts(self):
|
||||
"""
|
||||
Test getting violation counts from truncated and malformed xsslint
|
||||
report.
|
||||
"""
|
||||
report = textwrap.dedent("""
|
||||
javascript-concat-html: violations
|
||||
""")
|
||||
with open(self.f.name, 'w') as f:
|
||||
f.write(report)
|
||||
counts = pavelib.quality._get_xsslint_counts(self.f.name) # pylint: disable=protected-access
|
||||
self.assertDictEqual(counts, {
|
||||
'rules': {},
|
||||
'total': None,
|
||||
})
|
||||
|
||||
|
||||
class TestPrepareReportDir(unittest.TestCase):
|
||||
"""
|
||||
Tests the report directory preparation
|
||||
"""
|
||||
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
self.test_dir = tempfile.mkdtemp()
|
||||
self.test_file = tempfile.NamedTemporaryFile(delete=False, dir=self.test_dir) # lint-amnesty, pylint: disable=consider-using-with
|
||||
self.addCleanup(os.removedirs, self.test_dir)
|
||||
|
||||
def test_report_dir_with_files(self):
|
||||
assert os.path.exists(self.test_file.name)
|
||||
pavelib.quality._prepare_report_dir(path(self.test_dir)) # pylint: disable=protected-access
|
||||
assert not os.path.exists(self.test_file.name)
|
||||
|
||||
def test_report_dir_without_files(self):
|
||||
os.remove(self.test_file.name)
|
||||
pavelib.quality._prepare_report_dir(path(self.test_dir)) # pylint: disable=protected-access
|
||||
assert os.listdir(path(self.test_dir)) == []
|
||||
@@ -1,79 +0,0 @@
|
||||
"""
|
||||
Tests for Paver's PII checker task.
|
||||
"""
|
||||
|
||||
import shutil
|
||||
import tempfile
|
||||
import unittest
|
||||
from unittest.mock import patch
|
||||
|
||||
from path import Path as path
|
||||
from paver.easy import call_task, BuildFailure
|
||||
|
||||
import pavelib.quality
|
||||
from pavelib.utils.envs import Env
|
||||
|
||||
|
||||
class TestPaverPIICheck(unittest.TestCase):
|
||||
"""
|
||||
For testing the paver run_pii_check task
|
||||
"""
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
self.report_dir = path(tempfile.mkdtemp())
|
||||
self.addCleanup(shutil.rmtree, self.report_dir)
|
||||
|
||||
@patch.object(pavelib.quality.run_pii_check, 'needs')
|
||||
@patch('pavelib.quality.sh')
|
||||
def test_pii_check_report_dir_override(self, mock_paver_sh, mock_needs):
|
||||
"""
|
||||
run_pii_check succeeds with proper report dir
|
||||
"""
|
||||
# Make the expected stdout files.
|
||||
cms_stdout_report = self.report_dir / 'pii_check_cms.report'
|
||||
cms_stdout_report.write_lines(['Coverage found 33 uncovered models:\n'])
|
||||
lms_stdout_report = self.report_dir / 'pii_check_lms.report'
|
||||
lms_stdout_report.write_lines(['Coverage found 66 uncovered models:\n'])
|
||||
|
||||
mock_needs.return_value = 0
|
||||
call_task('pavelib.quality.run_pii_check', options={"report_dir": str(self.report_dir)})
|
||||
mock_calls = [str(call) for call in mock_paver_sh.mock_calls]
|
||||
assert len(mock_calls) == 2
|
||||
assert any('lms.envs.test' in call for call in mock_calls)
|
||||
assert any('cms.envs.test' in call for call in mock_calls)
|
||||
assert all(str(self.report_dir) in call for call in mock_calls)
|
||||
metrics_file = Env.METRICS_DIR / 'pii'
|
||||
assert open(metrics_file).read() == 'Number of PII Annotation violations: 66\n'
|
||||
|
||||
@patch.object(pavelib.quality.run_pii_check, 'needs')
|
||||
@patch('pavelib.quality.sh')
|
||||
def test_pii_check_failed(self, mock_paver_sh, mock_needs):
|
||||
"""
|
||||
run_pii_check fails due to crossing the threshold.
|
||||
"""
|
||||
# Make the expected stdout files.
|
||||
cms_stdout_report = self.report_dir / 'pii_check_cms.report'
|
||||
cms_stdout_report.write_lines(['Coverage found 33 uncovered models:\n'])
|
||||
lms_stdout_report = self.report_dir / 'pii_check_lms.report'
|
||||
lms_stdout_report.write_lines([
|
||||
'Coverage found 66 uncovered models:',
|
||||
'Coverage threshold not met! Needed 100.0, actually 95.0!',
|
||||
])
|
||||
|
||||
mock_needs.return_value = 0
|
||||
try:
|
||||
with self.assertRaises(BuildFailure):
|
||||
call_task('pavelib.quality.run_pii_check', options={"report_dir": str(self.report_dir)})
|
||||
except SystemExit:
|
||||
# Sometimes the BuildFailure raises a SystemExit, sometimes it doesn't, not sure why.
|
||||
# As a hack, we just wrap it in try-except.
|
||||
# This is not good, but these tests weren't even running for years, and we're removing this whole test
|
||||
# suite soon anyway.
|
||||
pass
|
||||
mock_calls = [str(call) for call in mock_paver_sh.mock_calls]
|
||||
assert len(mock_calls) == 2
|
||||
assert any('lms.envs.test' in call for call in mock_calls)
|
||||
assert any('cms.envs.test' in call for call in mock_calls)
|
||||
assert all(str(self.report_dir) in call for call in mock_calls)
|
||||
metrics_file = Env.METRICS_DIR / 'pii'
|
||||
assert open(metrics_file).read() == 'Number of PII Annotation violations: 66\n'
|
||||
@@ -1,36 +0,0 @@
|
||||
"""
|
||||
Tests for Paver's Stylelint tasks.
|
||||
"""
|
||||
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
import ddt
|
||||
from paver.easy import call_task
|
||||
|
||||
from .utils import PaverTestCase
|
||||
|
||||
|
||||
@ddt.ddt
|
||||
class TestPaverStylelint(PaverTestCase):
|
||||
"""
|
||||
Tests for Paver's Stylelint tasks.
|
||||
"""
|
||||
@ddt.data(
|
||||
[False],
|
||||
[True],
|
||||
)
|
||||
@ddt.unpack
|
||||
def test_run_stylelint(self, should_pass):
|
||||
"""
|
||||
Verify that the quality task fails with Stylelint violations.
|
||||
"""
|
||||
if should_pass:
|
||||
_mock_stylelint_violations = MagicMock(return_value=0)
|
||||
with patch('pavelib.quality._get_stylelint_violations', _mock_stylelint_violations):
|
||||
call_task('pavelib.quality.run_stylelint')
|
||||
else:
|
||||
_mock_stylelint_violations = MagicMock(return_value=100)
|
||||
with patch('pavelib.quality._get_stylelint_violations', _mock_stylelint_violations):
|
||||
with pytest.raises(SystemExit):
|
||||
call_task('pavelib.quality.run_stylelint')
|
||||
@@ -1,168 +0,0 @@
|
||||
"""
|
||||
Tests of the pavelib.utils.timer module.
|
||||
"""
|
||||
|
||||
|
||||
from datetime import datetime, timedelta
|
||||
from unittest import TestCase
|
||||
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
from pavelib.utils import timer
|
||||
|
||||
|
||||
@timer.timed
|
||||
def identity(*args, **kwargs):
|
||||
"""
|
||||
An identity function used as a default task to test the timing of.
|
||||
"""
|
||||
return args, kwargs
|
||||
|
||||
|
||||
MOCK_OPEN = MagicMock(spec=open)
|
||||
|
||||
|
||||
@patch.dict('pavelib.utils.timer.__builtins__', open=MOCK_OPEN)
|
||||
class TimedDecoratorTests(TestCase):
|
||||
"""
|
||||
Tests of the pavelib.utils.timer:timed decorator.
|
||||
"""
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
|
||||
patch_dumps = patch.object(timer.json, 'dump', autospec=True)
|
||||
self.mock_dump = patch_dumps.start()
|
||||
self.addCleanup(patch_dumps.stop)
|
||||
|
||||
patch_makedirs = patch.object(timer.os, 'makedirs', autospec=True)
|
||||
self.mock_makedirs = patch_makedirs.start()
|
||||
self.addCleanup(patch_makedirs.stop)
|
||||
|
||||
patch_datetime = patch.object(timer, 'datetime', autospec=True)
|
||||
self.mock_datetime = patch_datetime.start()
|
||||
self.addCleanup(patch_datetime.stop)
|
||||
|
||||
patch_exists = patch.object(timer, 'exists', autospec=True)
|
||||
self.mock_exists = patch_exists.start()
|
||||
self.addCleanup(patch_exists.stop)
|
||||
|
||||
MOCK_OPEN.reset_mock()
|
||||
|
||||
def get_log_messages(self, task=identity, args=None, kwargs=None, raises=None):
|
||||
"""
|
||||
Return all timing messages recorded during the execution of ``task``.
|
||||
"""
|
||||
if args is None:
|
||||
args = []
|
||||
if kwargs is None:
|
||||
kwargs = {}
|
||||
|
||||
if raises is None:
|
||||
task(*args, **kwargs)
|
||||
else:
|
||||
self.assertRaises(raises, task, *args, **kwargs)
|
||||
|
||||
return [
|
||||
call[0][0] # log_message
|
||||
for call in self.mock_dump.call_args_list
|
||||
]
|
||||
|
||||
@patch.object(timer, 'PAVER_TIMER_LOG', '/tmp/some-log')
|
||||
def test_times(self):
|
||||
start = datetime(2016, 7, 20, 10, 56, 19)
|
||||
end = start + timedelta(seconds=35.6)
|
||||
|
||||
self.mock_datetime.utcnow.side_effect = [start, end]
|
||||
|
||||
messages = self.get_log_messages()
|
||||
assert len(messages) == 1
|
||||
|
||||
assert 'duration' in messages[0] and messages[0]['duration'] == 35.6
|
||||
assert 'started_at' in messages[0] and messages[0]['started_at'] == start.isoformat(' ')
|
||||
assert 'ended_at' in messages[0] and messages[0]['ended_at'] == end.isoformat(' ')
|
||||
|
||||
@patch.object(timer, 'PAVER_TIMER_LOG', None)
|
||||
def test_no_logs(self):
|
||||
messages = self.get_log_messages()
|
||||
assert len(messages) == 0
|
||||
|
||||
@patch.object(timer, 'PAVER_TIMER_LOG', '/tmp/some-log')
|
||||
def test_arguments(self):
|
||||
messages = self.get_log_messages(args=(1, 'foo'), kwargs=dict(bar='baz'))
|
||||
assert len(messages) == 1
|
||||
|
||||
assert 'args' in messages[0] and messages[0]['args'] == [repr(1), repr('foo')]
|
||||
assert 'kwargs' in messages[0] and messages[0]['kwargs'] == {'bar': repr('baz')}
|
||||
|
||||
@patch.object(timer, 'PAVER_TIMER_LOG', '/tmp/some-log')
|
||||
def test_task_name(self):
|
||||
messages = self.get_log_messages()
|
||||
assert len(messages) == 1
|
||||
|
||||
assert 'task' in messages[0] and messages[0]['task'] == 'pavelib.paver_tests.test_timer.identity'
|
||||
|
||||
@patch.object(timer, 'PAVER_TIMER_LOG', '/tmp/some-log')
|
||||
def test_exceptions(self):
|
||||
@timer.timed
|
||||
def raises():
|
||||
"""
|
||||
A task used for testing exception handling of the timed decorator.
|
||||
"""
|
||||
raise Exception('The Message!')
|
||||
|
||||
messages = self.get_log_messages(task=raises, raises=Exception)
|
||||
assert len(messages) == 1
|
||||
|
||||
assert 'exception' in messages[0] and messages[0]['exception'] == 'Exception: The Message!'
|
||||
|
||||
@patch.object(timer, 'PAVER_TIMER_LOG', '/tmp/some-log-%Y-%m-%d-%H-%M-%S.log')
|
||||
def test_date_formatting(self):
|
||||
start = datetime(2016, 7, 20, 10, 56, 19)
|
||||
end = start + timedelta(seconds=35.6)
|
||||
|
||||
self.mock_datetime.utcnow.side_effect = [start, end]
|
||||
|
||||
messages = self.get_log_messages()
|
||||
assert len(messages) == 1
|
||||
|
||||
MOCK_OPEN.assert_called_once_with('/tmp/some-log-2016-07-20-10-56-19.log', 'a')
|
||||
|
||||
@patch.object(timer, 'PAVER_TIMER_LOG', '/tmp/some-log')
|
||||
def test_nested_tasks(self):
|
||||
|
||||
@timer.timed
|
||||
def parent():
|
||||
"""
|
||||
A timed task that calls another task
|
||||
"""
|
||||
identity()
|
||||
|
||||
parent_start = datetime(2016, 7, 20, 10, 56, 19)
|
||||
parent_end = parent_start + timedelta(seconds=60)
|
||||
child_start = parent_start + timedelta(seconds=10)
|
||||
child_end = parent_end - timedelta(seconds=10)
|
||||
|
||||
self.mock_datetime.utcnow.side_effect = [parent_start, child_start, child_end, parent_end]
|
||||
|
||||
messages = self.get_log_messages(task=parent)
|
||||
assert len(messages) == 2
|
||||
|
||||
# Child messages first
|
||||
assert 'duration' in messages[0]
|
||||
assert 40 == messages[0]['duration']
|
||||
|
||||
assert 'started_at' in messages[0]
|
||||
assert child_start.isoformat(' ') == messages[0]['started_at']
|
||||
|
||||
assert 'ended_at' in messages[0]
|
||||
assert child_end.isoformat(' ') == messages[0]['ended_at']
|
||||
|
||||
# Parent messages after
|
||||
assert 'duration' in messages[1]
|
||||
assert 60 == messages[1]['duration']
|
||||
|
||||
assert 'started_at' in messages[1]
|
||||
assert parent_start.isoformat(' ') == messages[1]['started_at']
|
||||
|
||||
assert 'ended_at' in messages[1]
|
||||
assert parent_end.isoformat(' ') == messages[1]['ended_at']
|
||||
@@ -1,120 +0,0 @@
|
||||
"""
|
||||
Tests for paver xsslint quality tasks
|
||||
"""
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
from paver.easy import call_task
|
||||
|
||||
import pavelib.quality
|
||||
|
||||
from .utils import PaverTestCase
|
||||
|
||||
|
||||
class PaverXSSLintTest(PaverTestCase):
|
||||
"""
|
||||
Test run_xsslint with a mocked environment in order to pass in opts
|
||||
"""
|
||||
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
self.reset_task_messages()
|
||||
|
||||
@patch.object(pavelib.quality, '_write_metric')
|
||||
@patch.object(pavelib.quality, '_prepare_report_dir')
|
||||
@patch.object(pavelib.quality, '_get_xsslint_counts')
|
||||
def test_xsslint_violation_number_not_found(self, _mock_counts, _mock_report_dir, _mock_write_metric):
|
||||
"""
|
||||
run_xsslint encounters an error parsing the xsslint output log
|
||||
"""
|
||||
_mock_counts.return_value = {}
|
||||
with pytest.raises(SystemExit):
|
||||
call_task('pavelib.quality.run_xsslint')
|
||||
|
||||
@patch.object(pavelib.quality, '_write_metric')
|
||||
@patch.object(pavelib.quality, '_prepare_report_dir')
|
||||
@patch.object(pavelib.quality, '_get_xsslint_counts')
|
||||
def test_xsslint_vanilla(self, _mock_counts, _mock_report_dir, _mock_write_metric):
|
||||
"""
|
||||
run_xsslint finds violations, but a limit was not set
|
||||
"""
|
||||
_mock_counts.return_value = {'total': 0}
|
||||
call_task('pavelib.quality.run_xsslint')
|
||||
|
||||
@patch.object(pavelib.quality, '_write_metric')
|
||||
@patch.object(pavelib.quality, '_prepare_report_dir')
|
||||
@patch.object(pavelib.quality, '_get_xsslint_counts')
|
||||
def test_xsslint_invalid_thresholds_option(self, _mock_counts, _mock_report_dir, _mock_write_metric):
|
||||
"""
|
||||
run_xsslint fails when thresholds option is poorly formatted
|
||||
"""
|
||||
_mock_counts.return_value = {'total': 0}
|
||||
with pytest.raises(SystemExit):
|
||||
call_task('pavelib.quality.run_xsslint', options={"thresholds": "invalid"})
|
||||
|
||||
@patch.object(pavelib.quality, '_write_metric')
|
||||
@patch.object(pavelib.quality, '_prepare_report_dir')
|
||||
@patch.object(pavelib.quality, '_get_xsslint_counts')
|
||||
def test_xsslint_invalid_thresholds_option_key(self, _mock_counts, _mock_report_dir, _mock_write_metric):
|
||||
"""
|
||||
run_xsslint fails when thresholds option is poorly formatted
|
||||
"""
|
||||
_mock_counts.return_value = {'total': 0}
|
||||
with pytest.raises(SystemExit):
|
||||
call_task('pavelib.quality.run_xsslint', options={"thresholds": '{"invalid": 3}'})
|
||||
|
||||
@patch.object(pavelib.quality, '_write_metric')
|
||||
@patch.object(pavelib.quality, '_prepare_report_dir')
|
||||
@patch.object(pavelib.quality, '_get_xsslint_counts')
|
||||
def test_xsslint_too_many_violations(self, _mock_counts, _mock_report_dir, _mock_write_metric):
|
||||
"""
|
||||
run_xsslint finds more violations than are allowed
|
||||
"""
|
||||
_mock_counts.return_value = {'total': 4}
|
||||
with pytest.raises(SystemExit):
|
||||
call_task('pavelib.quality.run_xsslint', options={"thresholds": '{"total": 3}'})
|
||||
|
||||
@patch.object(pavelib.quality, '_write_metric')
|
||||
@patch.object(pavelib.quality, '_prepare_report_dir')
|
||||
@patch.object(pavelib.quality, '_get_xsslint_counts')
|
||||
def test_xsslint_under_limit(self, _mock_counts, _mock_report_dir, _mock_write_metric):
|
||||
"""
|
||||
run_xsslint finds fewer violations than are allowed
|
||||
"""
|
||||
_mock_counts.return_value = {'total': 4}
|
||||
# No System Exit is expected
|
||||
call_task('pavelib.quality.run_xsslint', options={"thresholds": '{"total": 5}'})
|
||||
|
||||
@patch.object(pavelib.quality, '_write_metric')
|
||||
@patch.object(pavelib.quality, '_prepare_report_dir')
|
||||
@patch.object(pavelib.quality, '_get_xsslint_counts')
|
||||
def test_xsslint_rule_violation_number_not_found(self, _mock_counts, _mock_report_dir, _mock_write_metric):
|
||||
"""
|
||||
run_xsslint encounters an error parsing the xsslint output log for a
|
||||
given rule threshold that was set.
|
||||
"""
|
||||
_mock_counts.return_value = {'total': 4}
|
||||
with pytest.raises(SystemExit):
|
||||
call_task('pavelib.quality.run_xsslint', options={"thresholds": '{"rules": {"javascript-escape": 3}}'})
|
||||
|
||||
@patch.object(pavelib.quality, '_write_metric')
|
||||
@patch.object(pavelib.quality, '_prepare_report_dir')
|
||||
@patch.object(pavelib.quality, '_get_xsslint_counts')
|
||||
def test_xsslint_too_many_rule_violations(self, _mock_counts, _mock_report_dir, _mock_write_metric):
|
||||
"""
|
||||
run_xsslint finds more rule violations than are allowed
|
||||
"""
|
||||
_mock_counts.return_value = {'total': 4, 'rules': {'javascript-escape': 4}}
|
||||
with pytest.raises(SystemExit):
|
||||
call_task('pavelib.quality.run_xsslint', options={"thresholds": '{"rules": {"javascript-escape": 3}}'})
|
||||
|
||||
@patch.object(pavelib.quality, '_write_metric')
|
||||
@patch.object(pavelib.quality, '_prepare_report_dir')
|
||||
@patch.object(pavelib.quality, '_get_xsslint_counts')
|
||||
def test_xsslint_under_rule_limit(self, _mock_counts, _mock_report_dir, _mock_write_metric):
|
||||
"""
|
||||
run_xsslint finds fewer rule violations than are allowed
|
||||
"""
|
||||
_mock_counts.return_value = {'total': 4, 'rules': {'javascript-escape': 4}}
|
||||
# No System Exit is expected
|
||||
call_task('pavelib.quality.run_xsslint', options={"thresholds": '{"rules": {"javascript-escape": 5}}'})
|
||||
@@ -1,602 +0,0 @@
|
||||
""" # lint-amnesty, pylint: disable=django-not-configured
|
||||
Check code quality using pycodestyle, pylint, and diff_quality.
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
from datetime import datetime
|
||||
from xml.sax.saxutils import quoteattr
|
||||
|
||||
from paver.easy import BuildFailure, cmdopts, needs, sh, task
|
||||
|
||||
from .utils.envs import Env
|
||||
from .utils.timer import timed
|
||||
|
||||
ALL_SYSTEMS = 'lms,cms,common,openedx,pavelib,scripts'
|
||||
JUNIT_XML_TEMPLATE = """<?xml version="1.0" encoding="UTF-8"?>
|
||||
<testsuite name="{name}" tests="1" errors="0" failures="{failure_count}" skip="0">
|
||||
<testcase classname="pavelib.quality" name="{name}" time="{seconds}">{failure_element}</testcase>
|
||||
</testsuite>
|
||||
"""
|
||||
JUNIT_XML_FAILURE_TEMPLATE = '<failure message={message}/>'
|
||||
START_TIME = datetime.utcnow()
|
||||
|
||||
|
||||
def write_junit_xml(name, message=None):
|
||||
"""
|
||||
Write a JUnit results XML file describing the outcome of a quality check.
|
||||
"""
|
||||
if message:
|
||||
failure_element = JUNIT_XML_FAILURE_TEMPLATE.format(message=quoteattr(message))
|
||||
else:
|
||||
failure_element = ''
|
||||
data = {
|
||||
'failure_count': 1 if message else 0,
|
||||
'failure_element': failure_element,
|
||||
'name': name,
|
||||
'seconds': (datetime.utcnow() - START_TIME).total_seconds(),
|
||||
}
|
||||
Env.QUALITY_DIR.makedirs_p()
|
||||
filename = Env.QUALITY_DIR / f'{name}.xml'
|
||||
with open(filename, 'w') as f:
|
||||
f.write(JUNIT_XML_TEMPLATE.format(**data))
|
||||
|
||||
|
||||
def fail_quality(name, message):
|
||||
"""
|
||||
Fail the specified quality check by generating the JUnit XML results file
|
||||
and raising a ``BuildFailure``.
|
||||
"""
|
||||
write_junit_xml(name, message)
|
||||
raise BuildFailure(message)
|
||||
|
||||
|
||||
def top_python_dirs(dirname):
|
||||
"""
|
||||
Find the directories to start from in order to find all the Python files in `dirname`.
|
||||
"""
|
||||
top_dirs = []
|
||||
|
||||
dir_init = os.path.join(dirname, "__init__.py")
|
||||
if os.path.exists(dir_init):
|
||||
top_dirs.append(dirname)
|
||||
|
||||
for directory in ['djangoapps', 'lib']:
|
||||
subdir = os.path.join(dirname, directory)
|
||||
subdir_init = os.path.join(subdir, "__init__.py")
|
||||
if os.path.exists(subdir) and not os.path.exists(subdir_init):
|
||||
dirs = os.listdir(subdir)
|
||||
top_dirs.extend(d for d in dirs if os.path.isdir(os.path.join(subdir, d)))
|
||||
|
||||
modules_to_remove = ['__pycache__']
|
||||
for module in modules_to_remove:
|
||||
if module in top_dirs:
|
||||
top_dirs.remove(module)
|
||||
|
||||
return top_dirs
|
||||
|
||||
|
||||
def _get_pep8_violations(clean=True):
|
||||
"""
|
||||
Runs pycodestyle. Returns a tuple of (number_of_violations, violations_string)
|
||||
where violations_string is a string of all PEP 8 violations found, separated
|
||||
by new lines.
|
||||
"""
|
||||
report_dir = (Env.REPORT_DIR / 'pep8')
|
||||
if clean:
|
||||
report_dir.rmtree(ignore_errors=True)
|
||||
report_dir.makedirs_p()
|
||||
report = report_dir / 'pep8.report'
|
||||
|
||||
# Make sure the metrics subdirectory exists
|
||||
Env.METRICS_DIR.makedirs_p()
|
||||
|
||||
if not report.exists():
|
||||
sh(f'pycodestyle . | tee {report} -a')
|
||||
|
||||
violations_list = _pep8_violations(report)
|
||||
|
||||
return len(violations_list), violations_list
|
||||
|
||||
|
||||
def _pep8_violations(report_file):
|
||||
"""
|
||||
Returns the list of all PEP 8 violations in the given report_file.
|
||||
"""
|
||||
with open(report_file) as f:
|
||||
return f.readlines()
|
||||
|
||||
|
||||
@task
|
||||
@cmdopts([
|
||||
("system=", "s", "System to act on"),
|
||||
])
|
||||
@timed
|
||||
def run_pep8(options): # pylint: disable=unused-argument
|
||||
"""
|
||||
Run pycodestyle on system code.
|
||||
Fail the task if any violations are found.
|
||||
"""
|
||||
(count, violations_list) = _get_pep8_violations()
|
||||
violations_list = ''.join(violations_list)
|
||||
|
||||
# Print number of violations to log
|
||||
violations_count_str = f"Number of PEP 8 violations: {count}"
|
||||
print(violations_count_str)
|
||||
print(violations_list)
|
||||
|
||||
# Also write the number of violations to a file
|
||||
with open(Env.METRICS_DIR / "pep8", "w") as f:
|
||||
f.write(violations_count_str + '\n\n')
|
||||
f.write(violations_list)
|
||||
|
||||
# Fail if any violations are found
|
||||
if count:
|
||||
failure_string = "FAILURE: Too many PEP 8 violations. " + violations_count_str
|
||||
failure_string += f"\n\nViolations:\n{violations_list}"
|
||||
fail_quality('pep8', failure_string)
|
||||
else:
|
||||
write_junit_xml('pep8')
|
||||
|
||||
|
||||
@task
|
||||
@needs(
|
||||
'pavelib.prereqs.install_node_prereqs',
|
||||
'pavelib.utils.test.utils.ensure_clean_package_lock',
|
||||
)
|
||||
@cmdopts([
|
||||
("limit=", "l", "limit for number of acceptable violations"),
|
||||
])
|
||||
@timed
|
||||
def run_eslint(options):
|
||||
"""
|
||||
Runs eslint on static asset directories.
|
||||
If limit option is passed, fails build if more violations than the limit are found.
|
||||
"""
|
||||
|
||||
eslint_report_dir = (Env.REPORT_DIR / "eslint")
|
||||
eslint_report = eslint_report_dir / "eslint.report"
|
||||
_prepare_report_dir(eslint_report_dir)
|
||||
violations_limit = int(getattr(options, 'limit', -1))
|
||||
|
||||
sh(
|
||||
"node --max_old_space_size=4096 node_modules/.bin/eslint "
|
||||
"--ext .js --ext .jsx --format=compact . | tee {eslint_report}".format(
|
||||
eslint_report=eslint_report
|
||||
),
|
||||
ignore_error=True
|
||||
)
|
||||
|
||||
try:
|
||||
num_violations = int(_get_count_from_last_line(eslint_report, "eslint"))
|
||||
except TypeError:
|
||||
fail_quality(
|
||||
'eslint',
|
||||
"FAILURE: Number of eslint violations could not be found in {eslint_report}".format(
|
||||
eslint_report=eslint_report
|
||||
)
|
||||
)
|
||||
|
||||
# Record the metric
|
||||
_write_metric(num_violations, (Env.METRICS_DIR / "eslint"))
|
||||
|
||||
# Fail if number of violations is greater than the limit
|
||||
if num_violations > violations_limit > -1:
|
||||
fail_quality(
|
||||
'eslint',
|
||||
"FAILURE: Too many eslint violations ({count}).\nThe limit is {violations_limit}.".format(
|
||||
count=num_violations, violations_limit=violations_limit
|
||||
)
|
||||
)
|
||||
else:
|
||||
write_junit_xml('eslint')
|
||||
|
||||
|
||||
def _get_stylelint_violations():
|
||||
"""
|
||||
Returns the number of Stylelint violations.
|
||||
"""
|
||||
stylelint_report_dir = (Env.REPORT_DIR / "stylelint")
|
||||
stylelint_report = stylelint_report_dir / "stylelint.report"
|
||||
_prepare_report_dir(stylelint_report_dir)
|
||||
formatter = 'node_modules/stylelint-formatter-pretty'
|
||||
|
||||
sh(
|
||||
"stylelint **/*.scss --custom-formatter={formatter} | tee {stylelint_report}".format(
|
||||
formatter=formatter,
|
||||
stylelint_report=stylelint_report,
|
||||
),
|
||||
ignore_error=True
|
||||
)
|
||||
|
||||
try:
|
||||
return int(_get_count_from_last_line(stylelint_report, "stylelint"))
|
||||
except TypeError:
|
||||
fail_quality(
|
||||
'stylelint',
|
||||
"FAILURE: Number of stylelint violations could not be found in {stylelint_report}".format(
|
||||
stylelint_report=stylelint_report
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@task
|
||||
@needs('pavelib.prereqs.install_node_prereqs')
|
||||
@cmdopts([
|
||||
("limit=", "l", "limit for number of acceptable violations"),
|
||||
])
|
||||
@timed
|
||||
def run_stylelint(options):
|
||||
"""
|
||||
Runs stylelint on Sass files.
|
||||
If limit option is passed, fails build if more violations than the limit are found.
|
||||
"""
|
||||
violations_limit = 0
|
||||
num_violations = _get_stylelint_violations()
|
||||
|
||||
# Record the metric
|
||||
_write_metric(num_violations, (Env.METRICS_DIR / "stylelint"))
|
||||
|
||||
# Fail if number of violations is greater than the limit
|
||||
if num_violations > violations_limit:
|
||||
fail_quality(
|
||||
'stylelint',
|
||||
"FAILURE: Stylelint failed with too many violations: ({count}).\nThe limit is {violations_limit}.".format(
|
||||
count=num_violations,
|
||||
violations_limit=violations_limit,
|
||||
)
|
||||
)
|
||||
else:
|
||||
write_junit_xml('stylelint')
|
||||
|
||||
|
||||
@task
|
||||
@needs('pavelib.prereqs.install_python_prereqs')
|
||||
@cmdopts([
|
||||
("thresholds=", "t", "json containing limit for number of acceptable violations per rule"),
|
||||
])
|
||||
@timed
|
||||
def run_xsslint(options):
|
||||
"""
|
||||
Runs xsslint/xss_linter.py on the codebase
|
||||
"""
|
||||
|
||||
thresholds_option = getattr(options, 'thresholds', '{}')
|
||||
try:
|
||||
violation_thresholds = json.loads(thresholds_option)
|
||||
except ValueError:
|
||||
violation_thresholds = None
|
||||
if isinstance(violation_thresholds, dict) is False or \
|
||||
any(key not in ("total", "rules") for key in violation_thresholds.keys()):
|
||||
|
||||
fail_quality(
|
||||
'xsslint',
|
||||
"""FAILURE: Thresholds option "{thresholds_option}" was not supplied using proper format.\n"""
|
||||
"""Here is a properly formatted example, '{{"total":100,"rules":{{"javascript-escape":0}}}}' """
|
||||
"""with property names in double-quotes.""".format(
|
||||
thresholds_option=thresholds_option
|
||||
)
|
||||
)
|
||||
|
||||
xsslint_script = "xss_linter.py"
|
||||
xsslint_report_dir = (Env.REPORT_DIR / "xsslint")
|
||||
xsslint_report = xsslint_report_dir / "xsslint.report"
|
||||
_prepare_report_dir(xsslint_report_dir)
|
||||
|
||||
sh(
|
||||
"{repo_root}/scripts/xsslint/{xsslint_script} --rule-totals --config={cfg_module} >> {xsslint_report}".format(
|
||||
repo_root=Env.REPO_ROOT,
|
||||
xsslint_script=xsslint_script,
|
||||
xsslint_report=xsslint_report,
|
||||
cfg_module='scripts.xsslint_config'
|
||||
),
|
||||
ignore_error=True
|
||||
)
|
||||
|
||||
xsslint_counts = _get_xsslint_counts(xsslint_report)
|
||||
|
||||
try:
|
||||
metrics_str = "Number of {xsslint_script} violations: {num_violations}\n".format(
|
||||
xsslint_script=xsslint_script, num_violations=int(xsslint_counts['total'])
|
||||
)
|
||||
if 'rules' in xsslint_counts and any(xsslint_counts['rules']):
|
||||
metrics_str += "\n"
|
||||
rule_keys = sorted(xsslint_counts['rules'].keys())
|
||||
for rule in rule_keys:
|
||||
metrics_str += "{rule} violations: {count}\n".format(
|
||||
rule=rule,
|
||||
count=int(xsslint_counts['rules'][rule])
|
||||
)
|
||||
except TypeError:
|
||||
fail_quality(
|
||||
'xsslint',
|
||||
"FAILURE: Number of {xsslint_script} violations could not be found in {xsslint_report}".format(
|
||||
xsslint_script=xsslint_script, xsslint_report=xsslint_report
|
||||
)
|
||||
)
|
||||
|
||||
metrics_report = (Env.METRICS_DIR / "xsslint")
|
||||
# Record the metric
|
||||
_write_metric(metrics_str, metrics_report)
|
||||
# Print number of violations to log.
|
||||
sh(f"cat {metrics_report}", ignore_error=True)
|
||||
|
||||
error_message = ""
|
||||
|
||||
# Test total violations against threshold.
|
||||
if 'total' in list(violation_thresholds.keys()):
|
||||
if violation_thresholds['total'] < xsslint_counts['total']:
|
||||
error_message = "Too many violations total ({count}).\nThe limit is {violations_limit}.".format(
|
||||
count=xsslint_counts['total'], violations_limit=violation_thresholds['total']
|
||||
)
|
||||
|
||||
# Test rule violations against thresholds.
|
||||
if 'rules' in violation_thresholds:
|
||||
threshold_keys = sorted(violation_thresholds['rules'].keys())
|
||||
for threshold_key in threshold_keys:
|
||||
if threshold_key not in xsslint_counts['rules']:
|
||||
error_message += (
|
||||
"\nNumber of {xsslint_script} violations for {rule} could not be found in "
|
||||
"{xsslint_report}."
|
||||
).format(
|
||||
xsslint_script=xsslint_script, rule=threshold_key, xsslint_report=xsslint_report
|
||||
)
|
||||
elif violation_thresholds['rules'][threshold_key] < xsslint_counts['rules'][threshold_key]:
|
||||
error_message += \
|
||||
"\nToo many {rule} violations ({count}).\nThe {rule} limit is {violations_limit}.".format(
|
||||
rule=threshold_key, count=xsslint_counts['rules'][threshold_key],
|
||||
violations_limit=violation_thresholds['rules'][threshold_key],
|
||||
)
|
||||
|
||||
if error_message:
|
||||
fail_quality(
|
||||
'xsslint',
|
||||
"FAILURE: XSSLinter Failed.\n{error_message}\n"
|
||||
"See {xsslint_report} or run the following command to hone in on the problem:\n"
|
||||
" ./scripts/xss-commit-linter.sh -h".format(
|
||||
error_message=error_message, xsslint_report=xsslint_report
|
||||
)
|
||||
)
|
||||
else:
|
||||
write_junit_xml('xsslint')
|
||||
|
||||
|
||||
def _write_metric(metric, filename):
|
||||
"""
|
||||
Write a given metric to a given file
|
||||
Used for things like reports/metrics/eslint, which will simply tell you the number of
|
||||
eslint violations found
|
||||
"""
|
||||
Env.METRICS_DIR.makedirs_p()
|
||||
|
||||
with open(filename, "w") as metric_file:
|
||||
metric_file.write(str(metric))
|
||||
|
||||
|
||||
def _prepare_report_dir(dir_name):
|
||||
"""
|
||||
Sets a given directory to a created, but empty state
|
||||
"""
|
||||
dir_name.rmtree_p()
|
||||
dir_name.mkdir_p()
|
||||
|
||||
|
||||
def _get_report_contents(filename, report_name, last_line_only=False):
|
||||
"""
|
||||
Returns the contents of the given file. Use last_line_only to only return
|
||||
the last line, which can be used for getting output from quality output
|
||||
files.
|
||||
|
||||
Arguments:
|
||||
last_line_only: True to return the last line only, False to return a
|
||||
string with full contents.
|
||||
|
||||
Returns:
|
||||
String containing full contents of the report, or the last line.
|
||||
|
||||
"""
|
||||
if os.path.isfile(filename):
|
||||
with open(filename) as report_file:
|
||||
if last_line_only:
|
||||
lines = report_file.readlines()
|
||||
for line in reversed(lines):
|
||||
if line != '\n':
|
||||
return line
|
||||
return None
|
||||
else:
|
||||
return report_file.read()
|
||||
else:
|
||||
file_not_found_message = f"FAILURE: The following log file could not be found: {filename}"
|
||||
fail_quality(report_name, file_not_found_message)
|
||||
|
||||
|
||||
def _get_count_from_last_line(filename, file_type):
|
||||
"""
|
||||
This will return the number in the last line of a file.
|
||||
It is returning only the value (as a floating number).
|
||||
"""
|
||||
report_contents = _get_report_contents(filename, file_type, last_line_only=True)
|
||||
|
||||
if report_contents is None:
|
||||
return 0
|
||||
|
||||
last_line = report_contents.strip()
|
||||
# Example of the last line of a compact-formatted eslint report (for example): "62829 problems"
|
||||
regex = r'^\d+'
|
||||
|
||||
try:
|
||||
return float(re.search(regex, last_line).group(0))
|
||||
# An AttributeError will occur if the regex finds no matches.
|
||||
# A ValueError will occur if the returned regex cannot be cast as a float.
|
||||
except (AttributeError, ValueError):
|
||||
return None
|
||||
|
||||
|
||||
def _get_xsslint_counts(filename):
|
||||
"""
|
||||
This returns a dict of violations from the xsslint report.
|
||||
|
||||
Arguments:
|
||||
filename: The name of the xsslint report.
|
||||
|
||||
Returns:
|
||||
A dict containing the following:
|
||||
rules: A dict containing the count for each rule as follows:
|
||||
violation-rule-id: N, where N is the number of violations
|
||||
total: M, where M is the number of total violations
|
||||
|
||||
"""
|
||||
report_contents = _get_report_contents(filename, 'xsslint')
|
||||
rule_count_regex = re.compile(r"^(?P<rule_id>[a-z-]+):\s+(?P<count>\d+) violations", re.MULTILINE)
|
||||
total_count_regex = re.compile(r"^(?P<count>\d+) violations total", re.MULTILINE)
|
||||
violations = {'rules': {}}
|
||||
for violation_match in rule_count_regex.finditer(report_contents):
|
||||
try:
|
||||
violations['rules'][violation_match.group('rule_id')] = int(violation_match.group('count'))
|
||||
except ValueError:
|
||||
violations['rules'][violation_match.group('rule_id')] = None
|
||||
try:
|
||||
violations['total'] = int(total_count_regex.search(report_contents).group('count'))
|
||||
# An AttributeError will occur if the regex finds no matches.
|
||||
# A ValueError will occur if the returned regex cannot be cast as a float.
|
||||
except (AttributeError, ValueError):
|
||||
violations['total'] = None
|
||||
return violations
|
||||
|
||||
|
||||
def _extract_missing_pii_annotations(filename):
|
||||
"""
|
||||
Returns the number of uncovered models from the stdout report of django_find_annotations.
|
||||
|
||||
Arguments:
|
||||
filename: Filename where stdout of django_find_annotations was captured.
|
||||
|
||||
Returns:
|
||||
three-tuple containing:
|
||||
1. The number of uncovered models,
|
||||
2. A bool indicating whether the coverage is still below the threshold, and
|
||||
3. The full report as a string.
|
||||
"""
|
||||
uncovered_models = 0
|
||||
pii_check_passed = True
|
||||
if os.path.isfile(filename):
|
||||
with open(filename) as report_file:
|
||||
lines = report_file.readlines()
|
||||
|
||||
# Find the count of uncovered models.
|
||||
uncovered_regex = re.compile(r'^Coverage found ([\d]+) uncovered')
|
||||
for line in lines:
|
||||
uncovered_match = uncovered_regex.match(line)
|
||||
if uncovered_match:
|
||||
uncovered_models = int(uncovered_match.groups()[0])
|
||||
break
|
||||
|
||||
# Find a message which suggests the check failed.
|
||||
failure_regex = re.compile(r'^Coverage threshold not met!')
|
||||
for line in lines:
|
||||
failure_match = failure_regex.match(line)
|
||||
if failure_match:
|
||||
pii_check_passed = False
|
||||
break
|
||||
|
||||
# Each line in lines already contains a newline.
|
||||
full_log = ''.join(lines)
|
||||
else:
|
||||
fail_quality('pii', f'FAILURE: Log file could not be found: {filename}')
|
||||
|
||||
return (uncovered_models, pii_check_passed, full_log)
|
||||
|
||||
|
||||
@task
|
||||
@needs('pavelib.prereqs.install_python_prereqs')
|
||||
@cmdopts([
|
||||
("report-dir=", "r", "Directory in which to put PII reports"),
|
||||
])
|
||||
@timed
|
||||
def run_pii_check(options):
|
||||
"""
|
||||
Guarantee that all Django models are PII-annotated.
|
||||
"""
|
||||
pii_report_name = 'pii'
|
||||
default_report_dir = (Env.REPORT_DIR / pii_report_name)
|
||||
report_dir = getattr(options, 'report_dir', default_report_dir)
|
||||
output_file = os.path.join(report_dir, 'pii_check_{}.report')
|
||||
env_report = []
|
||||
pii_check_passed = True
|
||||
for env_name, env_settings_file in (("CMS", "cms.envs.test"), ("LMS", "lms.envs.test")):
|
||||
try:
|
||||
print()
|
||||
print(f"Running {env_name} PII Annotation check and report")
|
||||
print("-" * 45)
|
||||
run_output_file = str(output_file).format(env_name.lower())
|
||||
sh(
|
||||
"mkdir -p {} && " # lint-amnesty, pylint: disable=duplicate-string-formatting-argument
|
||||
"export DJANGO_SETTINGS_MODULE={}; "
|
||||
"code_annotations django_find_annotations "
|
||||
"--config_file .pii_annotations.yml --report_path {} --app_name {} "
|
||||
"--lint --report --coverage | tee {}".format(
|
||||
report_dir, env_settings_file, report_dir, env_name.lower(), run_output_file
|
||||
)
|
||||
)
|
||||
uncovered_model_count, pii_check_passed_env, full_log = _extract_missing_pii_annotations(run_output_file)
|
||||
env_report.append((
|
||||
uncovered_model_count,
|
||||
full_log,
|
||||
))
|
||||
|
||||
except BuildFailure as error_message:
|
||||
fail_quality(pii_report_name, f'FAILURE: {error_message}')
|
||||
|
||||
if not pii_check_passed_env:
|
||||
pii_check_passed = False
|
||||
|
||||
# Determine which suite is the worst offender by obtaining the max() keying off uncovered_count.
|
||||
uncovered_count, full_log = max(env_report, key=lambda r: r[0])
|
||||
|
||||
# Write metric file.
|
||||
if uncovered_count is None:
|
||||
uncovered_count = 0
|
||||
metrics_str = f"Number of PII Annotation violations: {uncovered_count}\n"
|
||||
_write_metric(metrics_str, (Env.METRICS_DIR / pii_report_name))
|
||||
|
||||
# Finally, fail the paver task if code_annotations suggests that the check failed.
|
||||
if not pii_check_passed:
|
||||
fail_quality('pii', full_log)
|
||||
|
||||
|
||||
@task
|
||||
@needs('pavelib.prereqs.install_python_prereqs')
|
||||
@timed
|
||||
def check_keywords():
|
||||
"""
|
||||
Check Django model fields for names that conflict with a list of reserved keywords
|
||||
"""
|
||||
report_path = os.path.join(Env.REPORT_DIR, 'reserved_keywords')
|
||||
sh(f"mkdir -p {report_path}")
|
||||
|
||||
overall_status = True
|
||||
for env, env_settings_file in [('lms', 'lms.envs.test'), ('cms', 'cms.envs.test')]:
|
||||
report_file = f"{env}_reserved_keyword_report.csv"
|
||||
override_file = os.path.join(Env.REPO_ROOT, "db_keyword_overrides.yml")
|
||||
try:
|
||||
sh(
|
||||
"export DJANGO_SETTINGS_MODULE={settings_file}; "
|
||||
"python manage.py {app} check_reserved_keywords "
|
||||
"--override_file {override_file} "
|
||||
"--report_path {report_path} "
|
||||
"--report_file {report_file}".format(
|
||||
settings_file=env_settings_file, app=env, override_file=override_file,
|
||||
report_path=report_path, report_file=report_file
|
||||
)
|
||||
)
|
||||
except BuildFailure:
|
||||
overall_status = False
|
||||
|
||||
if not overall_status:
|
||||
fail_quality(
|
||||
'keywords',
|
||||
'Failure: reserved keyword checker failed. Reports can be found here: {}'.format(
|
||||
report_path
|
||||
)
|
||||
)
|
||||
@@ -1,5 +0,0 @@
|
||||
"""
|
||||
TestSuite class and subclasses
|
||||
"""
|
||||
from .js_suite import JestSnapshotTestSuite, JsTestSuite
|
||||
from .suite import TestSuite
|
||||
@@ -1,109 +0,0 @@
|
||||
"""
|
||||
Javascript test tasks
|
||||
"""
|
||||
|
||||
|
||||
from paver import tasks
|
||||
|
||||
from pavelib.utils.envs import Env
|
||||
from pavelib.utils.test import utils as test_utils
|
||||
from pavelib.utils.test.suites.suite import TestSuite
|
||||
|
||||
__test__ = False # do not collect
|
||||
|
||||
|
||||
class JsTestSuite(TestSuite):
|
||||
"""
|
||||
A class for running JavaScript tests.
|
||||
"""
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.run_under_coverage = kwargs.get('with_coverage', True)
|
||||
self.mode = kwargs.get('mode', 'run')
|
||||
self.report_dir = Env.JS_REPORT_DIR
|
||||
self.opts = kwargs
|
||||
|
||||
suite = args[0]
|
||||
self.subsuites = self._default_subsuites if suite == 'all' else [JsTestSubSuite(*args, **kwargs)]
|
||||
|
||||
def __enter__(self):
|
||||
super().__enter__()
|
||||
if tasks.environment.dry_run:
|
||||
tasks.environment.info("make report_dir")
|
||||
else:
|
||||
self.report_dir.makedirs_p()
|
||||
if not self.skip_clean:
|
||||
test_utils.clean_test_files()
|
||||
|
||||
if self.mode == 'run' and not self.run_under_coverage:
|
||||
test_utils.clean_dir(self.report_dir)
|
||||
|
||||
@property
|
||||
def _default_subsuites(self):
|
||||
"""
|
||||
Returns all JS test suites
|
||||
"""
|
||||
return [JsTestSubSuite(test_id, **self.opts) for test_id in Env.JS_TEST_ID_KEYS if test_id != 'jest-snapshot']
|
||||
|
||||
|
||||
class JsTestSubSuite(TestSuite):
|
||||
"""
|
||||
Class for JS suites like cms, cms-squire, lms, common,
|
||||
common-requirejs and xmodule
|
||||
"""
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.test_id = args[0]
|
||||
self.run_under_coverage = kwargs.get('with_coverage', True)
|
||||
self.mode = kwargs.get('mode', 'run')
|
||||
self.port = kwargs.get('port')
|
||||
self.root = self.root + ' javascript'
|
||||
self.report_dir = Env.JS_REPORT_DIR
|
||||
|
||||
try:
|
||||
self.test_conf_file = Env.KARMA_CONFIG_FILES[Env.JS_TEST_ID_KEYS.index(self.test_id)]
|
||||
except ValueError:
|
||||
self.test_conf_file = Env.KARMA_CONFIG_FILES[0]
|
||||
|
||||
self.coverage_report = self.report_dir / f'coverage-{self.test_id}.xml'
|
||||
self.xunit_report = self.report_dir / f'javascript_xunit-{self.test_id}.xml'
|
||||
|
||||
@property
|
||||
def cmd(self):
|
||||
"""
|
||||
Run the tests using karma runner.
|
||||
"""
|
||||
cmd = [
|
||||
"node",
|
||||
"--max_old_space_size=4096",
|
||||
"node_modules/.bin/karma",
|
||||
"start",
|
||||
self.test_conf_file,
|
||||
"--single-run={}".format('false' if self.mode == 'dev' else 'true'),
|
||||
"--capture-timeout=60000",
|
||||
f"--junitreportpath={self.xunit_report}",
|
||||
f"--browsers={Env.KARMA_BROWSER}",
|
||||
]
|
||||
|
||||
if self.port:
|
||||
cmd.append(f"--port={self.port}")
|
||||
|
||||
if self.run_under_coverage:
|
||||
cmd.extend([
|
||||
"--coverage",
|
||||
f"--coveragereportpath={self.coverage_report}",
|
||||
])
|
||||
|
||||
return cmd
|
||||
|
||||
|
||||
class JestSnapshotTestSuite(TestSuite):
|
||||
"""
|
||||
A class for running Jest Snapshot tests.
|
||||
"""
|
||||
@property
|
||||
def cmd(self):
|
||||
"""
|
||||
Run the tests using Jest.
|
||||
"""
|
||||
return ["jest"]
|
||||
@@ -1,149 +0,0 @@
|
||||
"""
|
||||
A class used for defining and running test suites
|
||||
"""
|
||||
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
from paver import tasks
|
||||
|
||||
from pavelib.utils.process import kill_process
|
||||
|
||||
try:
|
||||
from pygments.console import colorize
|
||||
except ImportError:
|
||||
colorize = lambda color, text: text
|
||||
|
||||
__test__ = False # do not collect
|
||||
|
||||
|
||||
class TestSuite:
|
||||
"""
|
||||
TestSuite is a class that defines how groups of tests run.
|
||||
"""
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.root = args[0]
|
||||
self.subsuites = kwargs.get('subsuites', [])
|
||||
self.failed_suites = []
|
||||
self.verbosity = int(kwargs.get('verbosity', 1))
|
||||
self.skip_clean = kwargs.get('skip_clean', False)
|
||||
self.passthrough_options = kwargs.get('passthrough_options', [])
|
||||
|
||||
def __enter__(self):
|
||||
"""
|
||||
This will run before the test suite is run with the run_suite_tests method.
|
||||
If self.run_test is called directly, it should be run in a 'with' block to
|
||||
ensure that the proper context is created.
|
||||
|
||||
Specific setup tasks should be defined in each subsuite.
|
||||
|
||||
i.e. Checking for and defining required directories.
|
||||
"""
|
||||
print(f"\nSetting up for {self.root}")
|
||||
self.failed_suites = []
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
"""
|
||||
This is run after the tests run with the run_suite_tests method finish.
|
||||
Specific clean up tasks should be defined in each subsuite.
|
||||
|
||||
If self.run_test is called directly, it should be run in a 'with' block
|
||||
to ensure that clean up happens properly.
|
||||
|
||||
i.e. Cleaning mongo after the lms tests run.
|
||||
"""
|
||||
print(f"\nCleaning up after {self.root}")
|
||||
|
||||
@property
|
||||
def cmd(self):
|
||||
"""
|
||||
The command to run tests (as a string). For this base class there is none.
|
||||
"""
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def is_success(exit_code):
|
||||
"""
|
||||
Determine if the given exit code represents a success of the test
|
||||
suite. By default, only a zero counts as a success.
|
||||
"""
|
||||
return exit_code == 0
|
||||
|
||||
def run_test(self):
|
||||
"""
|
||||
Runs a self.cmd in a subprocess and waits for it to finish.
|
||||
It returns False if errors or failures occur. Otherwise, it
|
||||
returns True.
|
||||
"""
|
||||
cmd = " ".join(self.cmd)
|
||||
|
||||
if tasks.environment.dry_run:
|
||||
tasks.environment.info(cmd)
|
||||
return
|
||||
|
||||
sys.stdout.write(cmd)
|
||||
|
||||
msg = colorize(
|
||||
'green',
|
||||
'\n{bar}\n Running tests for {suite_name} \n{bar}\n'.format(suite_name=self.root, bar='=' * 40),
|
||||
)
|
||||
|
||||
sys.stdout.write(msg)
|
||||
sys.stdout.flush()
|
||||
|
||||
if 'TEST_SUITE' not in os.environ:
|
||||
os.environ['TEST_SUITE'] = self.root.replace("/", "_")
|
||||
kwargs = {'shell': True, 'cwd': None}
|
||||
process = None
|
||||
|
||||
try:
|
||||
process = subprocess.Popen(cmd, **kwargs) # lint-amnesty, pylint: disable=consider-using-with
|
||||
return self.is_success(process.wait())
|
||||
except KeyboardInterrupt:
|
||||
kill_process(process)
|
||||
sys.exit(1)
|
||||
|
||||
def run_suite_tests(self):
|
||||
"""
|
||||
Runs each of the suites in self.subsuites while tracking failures
|
||||
"""
|
||||
# Uses __enter__ and __exit__ for context
|
||||
with self:
|
||||
# run the tests for this class, and for all subsuites
|
||||
if self.cmd:
|
||||
passed = self.run_test()
|
||||
if not passed:
|
||||
self.failed_suites.append(self)
|
||||
|
||||
for suite in self.subsuites:
|
||||
suite.run_suite_tests()
|
||||
if suite.failed_suites:
|
||||
self.failed_suites.extend(suite.failed_suites)
|
||||
|
||||
def report_test_results(self):
|
||||
"""
|
||||
Writes a list of failed_suites to sys.stderr
|
||||
"""
|
||||
if self.failed_suites:
|
||||
msg = colorize('red', "\n\n{bar}\nTests failed in the following suites:\n* ".format(bar="=" * 48))
|
||||
msg += colorize('red', '\n* '.join([s.root for s in self.failed_suites]) + '\n\n')
|
||||
else:
|
||||
msg = colorize('green', "\n\n{bar}\nNo test failures ".format(bar="=" * 48))
|
||||
|
||||
print(msg)
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
Runs the tests in the suite while tracking and reporting failures.
|
||||
"""
|
||||
self.run_suite_tests()
|
||||
|
||||
if tasks.environment.dry_run:
|
||||
return
|
||||
|
||||
self.report_test_results()
|
||||
|
||||
if self.failed_suites:
|
||||
sys.exit(1)
|
||||
@@ -1,91 +0,0 @@
|
||||
"""
|
||||
Helper functions for test tasks
|
||||
"""
|
||||
|
||||
|
||||
import os
|
||||
|
||||
from paver.easy import cmdopts, sh, task
|
||||
|
||||
from pavelib.utils.envs import Env
|
||||
from pavelib.utils.timer import timed
|
||||
|
||||
|
||||
MONGO_PORT_NUM = int(os.environ.get('EDXAPP_TEST_MONGO_PORT', '27017'))
|
||||
|
||||
COVERAGE_CACHE_BUCKET = "edx-tools-coverage-caches"
|
||||
COVERAGE_CACHE_BASEPATH = "test_root/who_tests_what"
|
||||
COVERAGE_CACHE_BASELINE = "who_tests_what.{}.baseline".format(os.environ.get('WTW_CONTEXT', 'all'))
|
||||
WHO_TESTS_WHAT_DIFF = "who_tests_what.diff"
|
||||
|
||||
|
||||
__test__ = False # do not collect
|
||||
|
||||
|
||||
@task
|
||||
@timed
|
||||
def clean_test_files():
|
||||
"""
|
||||
Clean fixture files used by tests and .pyc files
|
||||
"""
|
||||
sh("git clean -fqdx test_root/logs test_root/data test_root/staticfiles test_root/uploads")
|
||||
# This find command removes all the *.pyc files that aren't in the .git
|
||||
# directory. See this blog post for more details:
|
||||
# http://nedbatchelder.com/blog/201505/be_careful_deleting_files_around_git.html
|
||||
sh(r"find . -name '.git' -prune -o -name '*.pyc' -exec rm {} \;")
|
||||
sh("rm -rf test_root/log/auto_screenshots/*")
|
||||
sh("rm -rf /tmp/mako_[cl]ms")
|
||||
|
||||
|
||||
@task
|
||||
@timed
|
||||
def ensure_clean_package_lock():
|
||||
"""
|
||||
Ensure no untracked changes have been made in the current git context.
|
||||
"""
|
||||
sh("""
|
||||
git diff --name-only --exit-code package-lock.json ||
|
||||
(echo \"Dirty package-lock.json, run 'npm install' and commit the generated changes\" && exit 1)
|
||||
""")
|
||||
|
||||
|
||||
def clean_dir(directory):
|
||||
"""
|
||||
Delete all the files from the specified directory.
|
||||
"""
|
||||
# We delete the files but preserve the directory structure
|
||||
# so that coverage.py has a place to put the reports.
|
||||
sh(f'find {directory} -type f -delete')
|
||||
|
||||
|
||||
@task
|
||||
@cmdopts([
|
||||
('skip-clean', 'C', 'skip cleaning repository before running tests'),
|
||||
('skip_clean', None, 'deprecated in favor of skip-clean'),
|
||||
])
|
||||
@timed
|
||||
def clean_reports_dir(options):
|
||||
"""
|
||||
Clean coverage files, to ensure that we don't use stale data to generate reports.
|
||||
"""
|
||||
if getattr(options, 'skip_clean', False):
|
||||
print('--skip-clean is set, skipping...')
|
||||
return
|
||||
|
||||
# We delete the files but preserve the directory structure
|
||||
# so that coverage.py has a place to put the reports.
|
||||
reports_dir = Env.REPORT_DIR.makedirs_p()
|
||||
clean_dir(reports_dir)
|
||||
|
||||
|
||||
@task
|
||||
@timed
|
||||
def clean_mongo():
|
||||
"""
|
||||
Clean mongo test databases
|
||||
"""
|
||||
sh("mongo {host}:{port} {repo_root}/scripts/delete-mongo-test-dbs.js".format(
|
||||
host=Env.MONGO_HOST,
|
||||
port=MONGO_PORT_NUM,
|
||||
repo_root=Env.REPO_ROOT,
|
||||
))
|
||||
@@ -504,7 +504,7 @@ edx-opaque-keys[django]==2.11.0
|
||||
# ora2
|
||||
edx-organizations==6.13.0
|
||||
# via -r requirements/edx/kernel.in
|
||||
edx-proctoring==5.0.1
|
||||
edx-proctoring==4.18.4
|
||||
# via
|
||||
# -r requirements/edx/kernel.in
|
||||
# edx-proctoring-proctortrack
|
||||
|
||||
@@ -795,7 +795,7 @@ edx-organizations==6.13.0
|
||||
# via
|
||||
# -r requirements/edx/doc.txt
|
||||
# -r requirements/edx/testing.txt
|
||||
edx-proctoring==5.0.1
|
||||
edx-proctoring==4.18.4
|
||||
# via
|
||||
# -r requirements/edx/doc.txt
|
||||
# -r requirements/edx/testing.txt
|
||||
|
||||
@@ -587,7 +587,7 @@ edx-opaque-keys[django]==2.11.0
|
||||
# ora2
|
||||
edx-organizations==6.13.0
|
||||
# via -r requirements/edx/base.txt
|
||||
edx-proctoring==5.0.1
|
||||
edx-proctoring==4.18.4
|
||||
# via
|
||||
# -r requirements/edx/base.txt
|
||||
# edx-proctoring-proctortrack
|
||||
|
||||
@@ -610,7 +610,7 @@ edx-opaque-keys[django]==2.11.0
|
||||
# ora2
|
||||
edx-organizations==6.13.0
|
||||
# via -r requirements/edx/base.txt
|
||||
edx-proctoring==5.0.1
|
||||
edx-proctoring==4.18.4
|
||||
# via
|
||||
# -r requirements/edx/base.txt
|
||||
# edx-proctoring-proctortrack
|
||||
|
||||
73
scripts/eslint.py
Normal file
73
scripts/eslint.py
Normal file
@@ -0,0 +1,73 @@
|
||||
""" # pylint: disable=django-not-configured
|
||||
Check code quality using eslint.
|
||||
"""
|
||||
|
||||
import re
|
||||
import subprocess
|
||||
import shlex
|
||||
import sys
|
||||
|
||||
|
||||
class BuildFailure(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def fail_quality(message):
|
||||
"""
|
||||
Fail the specified quality check.
|
||||
"""
|
||||
|
||||
raise BuildFailure(message)
|
||||
|
||||
|
||||
def run_eslint():
|
||||
"""
|
||||
Runs eslint on static asset directories.
|
||||
If limit option is passed, fails build if more violations than the limit are found.
|
||||
"""
|
||||
violations_limit = 1285
|
||||
|
||||
command = [
|
||||
"node",
|
||||
"--max_old_space_size=4096",
|
||||
"node_modules/.bin/eslint",
|
||||
"--ext", ".js",
|
||||
"--ext", ".jsx",
|
||||
"--format=compact",
|
||||
"lms",
|
||||
"cms",
|
||||
"common",
|
||||
"openedx",
|
||||
"xmodule",
|
||||
]
|
||||
print("Running command:", shlex.join(command))
|
||||
result = subprocess.run(
|
||||
command,
|
||||
text=True,
|
||||
check=False,
|
||||
capture_output=True
|
||||
)
|
||||
|
||||
print(result.stdout)
|
||||
last_line = result.stdout.strip().splitlines()[-1] if result.stdout.strip().splitlines() else ""
|
||||
regex = r'^\d+'
|
||||
try:
|
||||
num_violations = int(re.search(regex, last_line).group(0)) if last_line else 0
|
||||
# Fail if number of violations is greater than the limit
|
||||
if num_violations > violations_limit:
|
||||
fail_quality(
|
||||
"FAILURE: Too many eslint violations ({count}).\nThe limit is {violations_limit}.".format(count=num_violations, violations_limit=violations_limit))
|
||||
else:
|
||||
print(f"successfully run eslint with '{num_violations}' violations")
|
||||
|
||||
# An AttributeError will occur if the regex finds no matches.
|
||||
except (AttributeError, ValueError):
|
||||
fail_quality(f"FAILURE: Number of eslint violations could not be found in '{last_line}'")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
run_eslint()
|
||||
except BuildFailure as e:
|
||||
print(e)
|
||||
sys.exit(1)
|
||||
@@ -1,122 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
###############################################################################
|
||||
#
|
||||
# generic-ci-tests.sh
|
||||
#
|
||||
# Execute some tests for edx-platform.
|
||||
# (Most other tests are run by invoking `pytest`, `pylint`, etc. directly)
|
||||
#
|
||||
# This script can be called from CI jobs that define
|
||||
# these environment variables:
|
||||
#
|
||||
# `TEST_SUITE` defines which kind of test to run.
|
||||
# Possible values are:
|
||||
#
|
||||
# - "quality": Run the quality (pycodestyle/pylint) checks
|
||||
# - "js-unit": Run the JavaScript tests
|
||||
# - "pavelib-js-unit": Run the JavaScript tests and the Python unit
|
||||
# tests from the pavelib/lib directory
|
||||
#
|
||||
###############################################################################
|
||||
|
||||
# Clean up previous builds
|
||||
git clean -qxfd
|
||||
|
||||
function emptyxunit {
|
||||
|
||||
cat > "reports/$1.xml" <<END
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<testsuite name="$1" tests="1" errors="0" failures="0" skip="0">
|
||||
<testcase classname="pavelib.quality" name="$1" time="0.604"></testcase>
|
||||
</testsuite>
|
||||
END
|
||||
|
||||
}
|
||||
|
||||
# if specified tox environment is supported, prepend paver commands
|
||||
# with tox env invocation
|
||||
if [ -z ${TOX_ENV+x} ] || [[ ${TOX_ENV} == 'null' ]]; then
|
||||
echo "TOX_ENV: ${TOX_ENV}"
|
||||
TOX=""
|
||||
elif tox -l |grep -q "${TOX_ENV}"; then
|
||||
if [[ "${TOX_ENV}" == 'quality' ]]; then
|
||||
TOX=""
|
||||
else
|
||||
TOX="tox -r -e ${TOX_ENV} --"
|
||||
fi
|
||||
else
|
||||
echo "${TOX_ENV} is not currently supported. Please review the"
|
||||
echo "tox.ini file to see which environments are supported"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
PAVER_ARGS="-v"
|
||||
export SUBSET_JOB=$JOB_NAME
|
||||
|
||||
function run_paver_quality {
|
||||
QUALITY_TASK=$1
|
||||
shift
|
||||
mkdir -p test_root/log/
|
||||
LOG_PREFIX="test_root/log/$QUALITY_TASK"
|
||||
$TOX paver "$QUALITY_TASK" "$@" 2> "$LOG_PREFIX.err.log" > "$LOG_PREFIX.out.log" || {
|
||||
echo "STDOUT (last 100 lines of $LOG_PREFIX.out.log):";
|
||||
tail -n 100 "$LOG_PREFIX.out.log"
|
||||
echo "STDERR (last 100 lines of $LOG_PREFIX.err.log):";
|
||||
tail -n 100 "$LOG_PREFIX.err.log"
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
case "$TEST_SUITE" in
|
||||
|
||||
"quality")
|
||||
EXIT=0
|
||||
|
||||
mkdir -p reports
|
||||
|
||||
echo "Finding pycodestyle violations and storing report..."
|
||||
run_paver_quality run_pep8 || { EXIT=1; }
|
||||
echo "Finding ESLint violations and storing report..."
|
||||
run_paver_quality run_eslint -l "$ESLINT_THRESHOLD" || { EXIT=1; }
|
||||
echo "Finding Stylelint violations and storing report..."
|
||||
run_paver_quality run_stylelint || { EXIT=1; }
|
||||
echo "Running xss linter report."
|
||||
run_paver_quality run_xsslint -t "$XSSLINT_THRESHOLDS" || { EXIT=1; }
|
||||
echo "Running PII checker on all Django models..."
|
||||
run_paver_quality run_pii_check || { EXIT=1; }
|
||||
echo "Running reserved keyword checker on all Django models..."
|
||||
run_paver_quality check_keywords || { EXIT=1; }
|
||||
|
||||
# Need to create an empty test result so the post-build
|
||||
# action doesn't fail the build.
|
||||
emptyxunit "stub"
|
||||
exit "$EXIT"
|
||||
;;
|
||||
|
||||
"js-unit")
|
||||
$TOX paver test_js --coverage
|
||||
$TOX paver diff_coverage
|
||||
;;
|
||||
|
||||
"pavelib-js-unit")
|
||||
EXIT=0
|
||||
$TOX paver test_js --coverage --skip-clean || { EXIT=1; }
|
||||
paver test_lib --skip-clean $PAVER_ARGS || { EXIT=1; }
|
||||
|
||||
# This is to ensure that the build status of the shard is properly set.
|
||||
# Because we are running two paver commands in a row, we need to capture
|
||||
# their return codes in order to exit with a non-zero code if either of
|
||||
# them fail. We put the || clause there because otherwise, when a paver
|
||||
# command fails, this entire script will exit, and not run the second
|
||||
# paver command in this case statement. So instead of exiting, the value
|
||||
# of a variable named EXIT will be set to 1 if either of the paver
|
||||
# commands fail. We then use this variable's value as our exit code.
|
||||
# Note that by default the value of this variable EXIT is not set, so if
|
||||
# neither command fails then the exit command resolves to simply exit
|
||||
# which is considered successful.
|
||||
exit "$EXIT"
|
||||
;;
|
||||
esac
|
||||
@@ -4,6 +4,316 @@ A linting tool to check for xss vulnerabilities.
|
||||
"""
|
||||
|
||||
|
||||
import argparse
|
||||
import importlib
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
from functools import reduce
|
||||
from io import StringIO
|
||||
from xsslint.reporting import SummaryResults
|
||||
from xsslint.rules import RuleSet
|
||||
from xsslint.utils import is_skip_dir
|
||||
|
||||
|
||||
class BuildFailure(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def fail_quality(message):
|
||||
"""
|
||||
Fail the specified quality check.
|
||||
"""
|
||||
|
||||
raise BuildFailure(message)
|
||||
|
||||
|
||||
def _load_config_module(module_path):
|
||||
cwd = os.getcwd()
|
||||
if cwd not in sys.path:
|
||||
# Enable config module to be imported relative to wherever the script was run from.
|
||||
sys.path.append(cwd)
|
||||
return importlib.import_module(module_path)
|
||||
|
||||
|
||||
def _build_ruleset(template_linters):
|
||||
"""
|
||||
Combines the RuleSets from the provided template_linters into a single, aggregate RuleSet.
|
||||
|
||||
Arguments:
|
||||
template_linters: A list of linting objects.
|
||||
|
||||
Returns:
|
||||
The combined RuleSet.
|
||||
"""
|
||||
return reduce(
|
||||
lambda combined, current: combined + current.ruleset,
|
||||
template_linters,
|
||||
RuleSet()
|
||||
)
|
||||
|
||||
|
||||
def _process_file(full_path, template_linters, options, summary_results, out):
|
||||
"""
|
||||
For each linter, lints the provided file. This means finding and printing
|
||||
violations.
|
||||
|
||||
Arguments:
|
||||
full_path: The full path of the file to lint.
|
||||
template_linters: A list of linting objects.
|
||||
options: A list of the options.
|
||||
summary_results: A SummaryResults with a summary of the violations.
|
||||
out: output file
|
||||
|
||||
"""
|
||||
num_violations = 0
|
||||
directory = os.path.dirname(full_path)
|
||||
file_name = os.path.basename(full_path)
|
||||
try:
|
||||
for template_linter in template_linters:
|
||||
results = template_linter.process_file(directory, file_name)
|
||||
results.print_results(options, summary_results, out)
|
||||
except BaseException as e:
|
||||
raise Exception(f"Failed to process path: {full_path}") from e
|
||||
|
||||
|
||||
def _process_os_dir(directory, files, template_linters, options, summary_results, out):
|
||||
"""
|
||||
Calls out to lint each file in the passed list of files.
|
||||
|
||||
Arguments:
|
||||
directory: Directory being linted.
|
||||
files: All files in the directory to be linted.
|
||||
template_linters: A list of linting objects.
|
||||
options: A list of the options.
|
||||
summary_results: A SummaryResults with a summary of the violations.
|
||||
out: output file
|
||||
|
||||
"""
|
||||
for current_file in sorted(files, key=lambda s: s.lower()):
|
||||
full_path = os.path.join(directory, current_file)
|
||||
_process_file(full_path, template_linters, options, summary_results, out)
|
||||
|
||||
|
||||
def _process_os_dirs(starting_dir, template_linters, options, summary_results, out):
|
||||
"""
|
||||
For each linter, lints all the directories in the starting directory.
|
||||
|
||||
Arguments:
|
||||
starting_dir: The initial directory to begin the walk.
|
||||
template_linters: A list of linting objects.
|
||||
options: A list of the options.
|
||||
summary_results: A SummaryResults with a summary of the violations.
|
||||
out: output file
|
||||
|
||||
"""
|
||||
skip_dirs = options.get('skip_dirs', ())
|
||||
for root, dirs, files in os.walk(starting_dir):
|
||||
if is_skip_dir(skip_dirs, root):
|
||||
del dirs
|
||||
continue
|
||||
dirs.sort(key=lambda s: s.lower())
|
||||
_process_os_dir(root, files, template_linters, options, summary_results, out)
|
||||
|
||||
|
||||
def _get_xsslint_counts(result_contents):
|
||||
"""
|
||||
This returns a dict of violations from the xsslint report.
|
||||
|
||||
Arguments:
|
||||
filename: The name of the xsslint report.
|
||||
|
||||
Returns:
|
||||
A dict containing the following:
|
||||
rules: A dict containing the count for each rule as follows:
|
||||
violation-rule-id: N, where N is the number of violations
|
||||
total: M, where M is the number of total violations
|
||||
|
||||
"""
|
||||
|
||||
rule_count_regex = re.compile(r"^(?P<rule_id>[a-z-]+):\s+(?P<count>\d+) violations", re.MULTILINE)
|
||||
total_count_regex = re.compile(r"^(?P<count>\d+) violations total", re.MULTILINE)
|
||||
violations = {'rules': {}}
|
||||
for violation_match in rule_count_regex.finditer(result_contents):
|
||||
try:
|
||||
violations['rules'][violation_match.group('rule_id')] = int(violation_match.group('count'))
|
||||
except ValueError:
|
||||
violations['rules'][violation_match.group('rule_id')] = None
|
||||
try:
|
||||
violations['total'] = int(total_count_regex.search(result_contents).group('count'))
|
||||
# An AttributeError will occur if the regex finds no matches.
|
||||
# A ValueError will occur if the returned regex cannot be cast as a float.
|
||||
except (AttributeError, ValueError):
|
||||
violations['total'] = None
|
||||
return violations
|
||||
|
||||
|
||||
def _check_violations(options, results):
|
||||
xsslint_script = "xss_linter.py"
|
||||
try:
|
||||
thresholds_option = options['thresholds']
|
||||
# Read the JSON file
|
||||
with open(thresholds_option, 'r') as file:
|
||||
violation_thresholds = json.load(file)
|
||||
|
||||
except ValueError:
|
||||
violation_thresholds = None
|
||||
if isinstance(violation_thresholds, dict) is False or \
|
||||
any(key not in ("total", "rules") for key in violation_thresholds.keys()):
|
||||
print('xsslint')
|
||||
fail_quality("""FAILURE: Thresholds option "{thresholds_option}" was not supplied using proper format.\n"""
|
||||
"""Here is a properly formatted example, '{{"total":100,"rules":{{"javascript-escape":0}}}}' """
|
||||
"""with property names in double-quotes.""".format(thresholds_option=thresholds_option))
|
||||
|
||||
try:
|
||||
metrics_str = "Number of {xsslint_script} violations: {num_violations}\n".format(
|
||||
xsslint_script=xsslint_script, num_violations=int(results['total'])
|
||||
)
|
||||
if 'rules' in results and any(results['rules']):
|
||||
metrics_str += "\n"
|
||||
rule_keys = sorted(results['rules'].keys())
|
||||
for rule in rule_keys:
|
||||
metrics_str += "{rule} violations: {count}\n".format(
|
||||
rule=rule,
|
||||
count=int(results['rules'][rule])
|
||||
)
|
||||
except TypeError:
|
||||
print('xsslint')
|
||||
fail_quality("FAILURE: Number of {xsslint_script} violations could not be found".format(
|
||||
xsslint_script=xsslint_script
|
||||
))
|
||||
|
||||
error_message = ""
|
||||
# Test total violations against threshold.
|
||||
if 'total' in list(violation_thresholds.keys()):
|
||||
if violation_thresholds['total'] < results['total']:
|
||||
error_message = "Too many violations total ({count}).\nThe limit is {violations_limit}.".format(
|
||||
count=results['total'], violations_limit=violation_thresholds['total']
|
||||
)
|
||||
|
||||
# Test rule violations against thresholds.
|
||||
if 'rules' in violation_thresholds:
|
||||
threshold_keys = sorted(violation_thresholds['rules'].keys())
|
||||
for threshold_key in threshold_keys:
|
||||
if threshold_key not in results['rules']:
|
||||
error_message += (
|
||||
"\nNumber of {xsslint_script} violations for {rule} could not be found"
|
||||
).format(
|
||||
xsslint_script=xsslint_script, rule=threshold_key
|
||||
)
|
||||
elif violation_thresholds['rules'][threshold_key] < results['rules'][threshold_key]:
|
||||
error_message += \
|
||||
"\nToo many {rule} violations ({count}).\nThe {rule} limit is {violations_limit}.".format(
|
||||
rule=threshold_key, count=results['rules'][threshold_key],
|
||||
violations_limit=violation_thresholds['rules'][threshold_key],
|
||||
)
|
||||
|
||||
if error_message:
|
||||
print('xsslint')
|
||||
fail_quality("FAILURE: XSSLinter Failed.\n{error_message}\n"
|
||||
"run the following command to hone in on the problem:\n"
|
||||
"./scripts/xss-commit-linter.sh -h".format(error_message=error_message))
|
||||
else:
|
||||
print("successfully run xsslint")
|
||||
|
||||
|
||||
def _lint(file_or_dir, template_linters, options, summary_results, out):
|
||||
"""
|
||||
For each linter, lints the provided file or directory.
|
||||
|
||||
Arguments:
|
||||
file_or_dir: The file or initial directory to lint.
|
||||
template_linters: A list of linting objects.
|
||||
options: A list of the options.
|
||||
summary_results: A SummaryResults with a summary of the violations.
|
||||
out: output file
|
||||
|
||||
"""
|
||||
|
||||
if file_or_dir is not None and os.path.isfile(file_or_dir):
|
||||
_process_file(file_or_dir, template_linters, options, summary_results, out)
|
||||
else:
|
||||
directory = "."
|
||||
if file_or_dir is not None:
|
||||
if os.path.exists(file_or_dir):
|
||||
directory = file_or_dir
|
||||
else:
|
||||
raise ValueError(f"Path [{file_or_dir}] is not a valid file or directory.")
|
||||
_process_os_dirs(directory, template_linters, options, summary_results, out)
|
||||
|
||||
summary_results.print_results(options, out)
|
||||
result_output = _get_xsslint_counts(out.getvalue())
|
||||
_check_violations(options, result_output)
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Used to execute the linter. Use --help option for help.
|
||||
|
||||
Prints all violations.
|
||||
"""
|
||||
epilog = "For more help using the xss linter, including details on how to\n"
|
||||
epilog += "understand and fix any violations, read the docs here:\n"
|
||||
epilog += "\n"
|
||||
# pylint: disable=line-too-long
|
||||
epilog += " https://edx.readthedocs.org/projects/edx-developer-guide/en/latest/conventions/preventing_xss.html#xss-linter\n"
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
description='Checks that templates are safe.',
|
||||
epilog=epilog,
|
||||
)
|
||||
parser.add_argument(
|
||||
'--list-files', dest='list_files', action='store_true',
|
||||
help='Only display the filenames that contain violations.'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--rule-totals', dest='rule_totals', action='store_true',
|
||||
help='Display the totals for each rule.'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--summary-format', dest='summary_format',
|
||||
choices=['eslint', 'json'], default='eslint',
|
||||
help='Choose the display format for the summary.'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--verbose', dest='verbose', action='store_true',
|
||||
help='Print multiple lines where possible for additional context of violations.'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--config', dest='config', action='store', default='xsslint.default_config',
|
||||
help='Specifies the config module to use. The config module should be in Python package syntax.'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--thresholds', dest='thresholds', action='store',
|
||||
help='Specifies the config module to use. The config module should be in Python package syntax.'
|
||||
)
|
||||
parser.add_argument('path', nargs="?", default=None, help='A file to lint or directory to recursively lint.')
|
||||
|
||||
args = parser.parse_args()
|
||||
config = _load_config_module(args.config)
|
||||
options = {
|
||||
'list_files': args.list_files,
|
||||
'rule_totals': args.rule_totals,
|
||||
'summary_format': args.summary_format,
|
||||
'verbose': args.verbose,
|
||||
'skip_dirs': getattr(config, 'SKIP_DIRS', ()),
|
||||
'thresholds': args.thresholds
|
||||
}
|
||||
template_linters = getattr(config, 'LINTERS', ())
|
||||
if not template_linters:
|
||||
raise ValueError(f"LINTERS is empty or undefined in the config module ({args.config}).")
|
||||
|
||||
ruleset = _build_ruleset(template_linters)
|
||||
summary_results = SummaryResults(ruleset)
|
||||
_lint(args.path, template_linters, options, summary_results, out=StringIO())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from xsslint.main import main
|
||||
main()
|
||||
try:
|
||||
main()
|
||||
except BuildFailure as e:
|
||||
print(e)
|
||||
sys.exit(1)
|
||||
|
||||
@@ -1,187 +0,0 @@
|
||||
"""
|
||||
The main function for the XSS linter.
|
||||
"""
|
||||
|
||||
|
||||
import argparse
|
||||
import importlib
|
||||
import os
|
||||
import sys
|
||||
from functools import reduce
|
||||
|
||||
from xsslint.reporting import SummaryResults
|
||||
from xsslint.rules import RuleSet
|
||||
from xsslint.utils import is_skip_dir
|
||||
|
||||
|
||||
def _load_config_module(module_path):
|
||||
cwd = os.getcwd()
|
||||
if cwd not in sys.path:
|
||||
# Enable config module to be imported relative to wherever the script was run from.
|
||||
sys.path.append(cwd)
|
||||
return importlib.import_module(module_path)
|
||||
|
||||
|
||||
def _build_ruleset(template_linters):
|
||||
"""
|
||||
Combines the RuleSets from the provided template_linters into a single, aggregate RuleSet.
|
||||
|
||||
Arguments:
|
||||
template_linters: A list of linting objects.
|
||||
|
||||
Returns:
|
||||
The combined RuleSet.
|
||||
"""
|
||||
return reduce(
|
||||
lambda combined, current: combined + current.ruleset,
|
||||
template_linters,
|
||||
RuleSet()
|
||||
)
|
||||
|
||||
|
||||
def _process_file(full_path, template_linters, options, summary_results, out):
|
||||
"""
|
||||
For each linter, lints the provided file. This means finding and printing
|
||||
violations.
|
||||
|
||||
Arguments:
|
||||
full_path: The full path of the file to lint.
|
||||
template_linters: A list of linting objects.
|
||||
options: A list of the options.
|
||||
summary_results: A SummaryResults with a summary of the violations.
|
||||
out: output file
|
||||
|
||||
"""
|
||||
num_violations = 0
|
||||
directory = os.path.dirname(full_path)
|
||||
file_name = os.path.basename(full_path)
|
||||
try:
|
||||
for template_linter in template_linters:
|
||||
results = template_linter.process_file(directory, file_name)
|
||||
results.print_results(options, summary_results, out)
|
||||
except BaseException as e:
|
||||
raise Exception(f"Failed to process path: {full_path}") from e
|
||||
|
||||
|
||||
def _process_os_dir(directory, files, template_linters, options, summary_results, out):
|
||||
"""
|
||||
Calls out to lint each file in the passed list of files.
|
||||
|
||||
Arguments:
|
||||
directory: Directory being linted.
|
||||
files: All files in the directory to be linted.
|
||||
template_linters: A list of linting objects.
|
||||
options: A list of the options.
|
||||
summary_results: A SummaryResults with a summary of the violations.
|
||||
out: output file
|
||||
|
||||
"""
|
||||
for current_file in sorted(files, key=lambda s: s.lower()):
|
||||
full_path = os.path.join(directory, current_file)
|
||||
_process_file(full_path, template_linters, options, summary_results, out)
|
||||
|
||||
|
||||
def _process_os_dirs(starting_dir, template_linters, options, summary_results, out):
|
||||
"""
|
||||
For each linter, lints all the directories in the starting directory.
|
||||
|
||||
Arguments:
|
||||
starting_dir: The initial directory to begin the walk.
|
||||
template_linters: A list of linting objects.
|
||||
options: A list of the options.
|
||||
summary_results: A SummaryResults with a summary of the violations.
|
||||
out: output file
|
||||
|
||||
"""
|
||||
skip_dirs = options.get('skip_dirs', ())
|
||||
for root, dirs, files in os.walk(starting_dir):
|
||||
if is_skip_dir(skip_dirs, root):
|
||||
del dirs
|
||||
continue
|
||||
dirs.sort(key=lambda s: s.lower())
|
||||
_process_os_dir(root, files, template_linters, options, summary_results, out)
|
||||
|
||||
|
||||
def _lint(file_or_dir, template_linters, options, summary_results, out):
|
||||
"""
|
||||
For each linter, lints the provided file or directory.
|
||||
|
||||
Arguments:
|
||||
file_or_dir: The file or initial directory to lint.
|
||||
template_linters: A list of linting objects.
|
||||
options: A list of the options.
|
||||
summary_results: A SummaryResults with a summary of the violations.
|
||||
out: output file
|
||||
|
||||
"""
|
||||
|
||||
if file_or_dir is not None and os.path.isfile(file_or_dir):
|
||||
_process_file(file_or_dir, template_linters, options, summary_results, out)
|
||||
else:
|
||||
directory = "."
|
||||
if file_or_dir is not None:
|
||||
if os.path.exists(file_or_dir):
|
||||
directory = file_or_dir
|
||||
else:
|
||||
raise ValueError(f"Path [{file_or_dir}] is not a valid file or directory.")
|
||||
_process_os_dirs(directory, template_linters, options, summary_results, out)
|
||||
|
||||
summary_results.print_results(options, out)
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Used to execute the linter. Use --help option for help.
|
||||
|
||||
Prints all violations.
|
||||
"""
|
||||
epilog = "For more help using the xss linter, including details on how to\n"
|
||||
epilog += "understand and fix any violations, read the docs here:\n"
|
||||
epilog += "\n"
|
||||
# pylint: disable=line-too-long
|
||||
epilog += " https://edx.readthedocs.org/projects/edx-developer-guide/en/latest/conventions/preventing_xss.html#xss-linter\n"
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
description='Checks that templates are safe.',
|
||||
epilog=epilog,
|
||||
)
|
||||
parser.add_argument(
|
||||
'--list-files', dest='list_files', action='store_true',
|
||||
help='Only display the filenames that contain violations.'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--rule-totals', dest='rule_totals', action='store_true',
|
||||
help='Display the totals for each rule.'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--summary-format', dest='summary_format',
|
||||
choices=['eslint', 'json'], default='eslint',
|
||||
help='Choose the display format for the summary.'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--verbose', dest='verbose', action='store_true',
|
||||
help='Print multiple lines where possible for additional context of violations.'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--config', dest='config', action='store', default='xsslint.default_config',
|
||||
help='Specifies the config module to use. The config module should be in Python package syntax.'
|
||||
)
|
||||
parser.add_argument('path', nargs="?", default=None, help='A file to lint or directory to recursively lint.')
|
||||
|
||||
args = parser.parse_args()
|
||||
config = _load_config_module(args.config)
|
||||
options = {
|
||||
'list_files': args.list_files,
|
||||
'rule_totals': args.rule_totals,
|
||||
'summary_format': args.summary_format,
|
||||
'verbose': args.verbose,
|
||||
'skip_dirs': getattr(config, 'SKIP_DIRS', ())
|
||||
}
|
||||
template_linters = getattr(config, 'LINTERS', ())
|
||||
if not template_linters:
|
||||
raise ValueError(f"LINTERS is empty or undefined in the config module ({args.config}).")
|
||||
|
||||
ruleset = _build_ruleset(template_linters)
|
||||
summary_results = SummaryResults(ruleset)
|
||||
_lint(args.path, template_linters, options, summary_results, out=sys.stdout)
|
||||
@@ -1,3 +0,0 @@
|
||||
module.exports = {
|
||||
extends: '@edx/stylelint-config-edx'
|
||||
};
|
||||
Reference in New Issue
Block a user