Fix paver quality tests to reflect removed diff-quality call

This commit is contained in:
Sarina Canelake
2015-03-20 16:20:44 -04:00
parent a51fbc8123
commit bd090a6e58
2 changed files with 58 additions and 52 deletions

View File

@@ -1,7 +1,10 @@
"""
Tests for paver quality tasks
"""
import os
import tempfile
import unittest
from mock import patch
from mock import patch, MagicMock
from ddt import ddt, file_data
import pavelib.quality
@@ -11,22 +14,26 @@ from paver.easy import BuildFailure
@ddt
class TestPaverQualityViolations(unittest.TestCase):
"""
For testing the paver violations-counting tasks
"""
def setUp(self):
super(TestPaverQualityViolations, self).setUp()
self.f = tempfile.NamedTemporaryFile(delete=False)
self.f.close()
self.addCleanup(os.remove, self.f.name)
def test_pylint_parser_other_string(self):
with open(self.f.name, 'w') as f:
f.write("hello")
num = pavelib.quality._count_pylint_violations(f.name)
num = pavelib.quality._count_pylint_violations(f.name) # pylint: disable=protected-access
self.assertEqual(num, 0)
def test_pylint_parser_pep8(self):
# Pep8 violations should be ignored.
with open(self.f.name, 'w') as f:
f.write("foo/hello/test.py:304:15: E203 whitespace before ':'")
num = pavelib.quality._count_pylint_violations(f.name)
num = pavelib.quality._count_pylint_violations(f.name) # pylint: disable=protected-access
self.assertEqual(num, 0)
@file_data('pylint_test_list.json')
@@ -38,18 +45,15 @@ class TestPaverQualityViolations(unittest.TestCase):
"""
with open(self.f.name, 'w') as f:
f.write(value)
num = pavelib.quality._count_pylint_violations(f.name)
num = pavelib.quality._count_pylint_violations(f.name) # pylint: disable=protected-access
self.assertEqual(num, 1)
def test_pep8_parser(self):
with open(self.f.name, 'w') as f:
f.write("hello\nhithere")
num = pavelib.quality._count_pep8_violations(f.name)
num, _violations = pavelib.quality._pep8_violations(f.name) # pylint: disable=protected-access
self.assertEqual(num, 2)
def tearDown(self):
os.remove(self.f.name)
class TestPaverRunQuality(unittest.TestCase):
"""
@@ -57,27 +61,33 @@ class TestPaverRunQuality(unittest.TestCase):
"""
def setUp(self):
super(TestPaverRunQuality, self).setUp()
# mock the @needs decorator to skip it
self._mock_paver_needs = patch.object(pavelib.quality.run_quality, 'needs').start()
self._mock_paver_needs.return_value = 0
self._mock_paver_sh = patch('pavelib.quality.sh').start()
self.addCleanup(self._mock_paver_sh.stop())
self.addCleanup(self._mock_paver_needs.stop())
patcher = patch('pavelib.quality.sh')
self._mock_paver_sh = patcher.start()
self.addCleanup(patcher.stop)
self.addCleanup(self._mock_paver_needs.stop)
def test_failure_on_diffquality_pep8(self):
"""
If pep8 diff-quality fails due to the percentage threshold, pylint
should still be run
If pep8 finds errors, pylint should still be run
"""
# Mock _get_pep8_violations to return a violation
_mock_pep8_violations = MagicMock(
return_value=(1, ['lms/envs/common.py:32:2: E225 missing whitespace around operator'])
)
with patch('pavelib.quality._get_pep8_violations', _mock_pep8_violations):
with self.assertRaises(SystemExit):
pavelib.quality.run_quality("")
self.assertRaises(BuildFailure)
# Underlying sh call must fail when it is running the pep8 diff-quality task
self._mock_paver_sh.side_effect = CustomShMock().fail_on_pep8
with self.assertRaises(SystemExit):
pavelib.quality.run_quality("")
self.assertRaises(BuildFailure)
# Test that both pep8 and pylint were called by counting the calls
self.assertEqual(self._mock_paver_sh.call_count, 2)
# Test that both pep8 and pylint were called by counting the calls to _get_pep8_violations
# (for pep8) and sh (for diff-quality pylint)
self.assertEqual(_mock_pep8_violations.call_count, 1)
self.assertEqual(self._mock_paver_sh.call_count, 1)
def test_failure_on_diffquality_pylint(self):
"""
@@ -90,7 +100,8 @@ class TestPaverRunQuality(unittest.TestCase):
pavelib.quality.run_quality("")
self.assertRaises(BuildFailure)
# Test that both pep8 and pylint were called by counting the calls
self.assertEqual(self._mock_paver_sh.call_count, 2)
# Pep8 is called twice (for lms and cms), then pylint is called an additional time.
self.assertEqual(self._mock_paver_sh.call_count, 3)
def test_other_exception(self):
"""
@@ -105,8 +116,13 @@ class TestPaverRunQuality(unittest.TestCase):
def test_no_diff_quality_failures(self):
# Assert nothing is raised
pavelib.quality.run_quality("")
self.assertEqual(self._mock_paver_sh.call_count, 2)
_mock_pep8_violations = MagicMock(return_value=(0, []))
with patch('pavelib.quality._get_pep8_violations', _mock_pep8_violations):
pavelib.quality.run_quality("")
# Assert that _get_pep8_violations (which calls "pep8") is called once
self.assertEqual(_mock_pep8_violations.call_count, 1)
# And assert that sh was called once (for the call to "pylint")
self.assertEqual(self._mock_paver_sh.call_count, 1)
class CustomShMock(object):
@@ -115,17 +131,6 @@ class CustomShMock(object):
of them need to have certain responses.
"""
def fail_on_pep8(self, arg):
"""
For our tests, we need the call for diff-quality running pep8 reports to fail, since that is what
is going to fail when we pass in a percentage ("p") requirement.
"""
if "pep8" in arg:
# Essentially mock diff-quality exiting with 1
paver.easy.sh("exit 1")
else:
return
def fail_on_pylint(self, arg):
"""
For our tests, we need the call for diff-quality running pep8 reports to fail, since that is what

View File

@@ -56,7 +56,7 @@ def find_fixme(options):
num_fixme += _count_pylint_violations(
"{report_dir}/pylint_fixme.report".format(report_dir=report_dir))
print("Number of pylint fixmes: " + str(num_fixme))
print "Number of pylint fixmes: " + str(num_fixme)
@task
@@ -75,7 +75,7 @@ def run_pylint(options):
violations_limit = int(getattr(options, 'limit', -1))
errors = getattr(options, 'errors', False)
systems = getattr(options, 'system', ALL_SYSTEMS).split(',')
# Make sure the metrics subdirectory exists
Env.METRICS_DIR.makedirs_p()
@@ -119,7 +119,7 @@ def run_pylint(options):
# Print number of violations to log
violations_count_str = "Number of pylint violations: " + str(num_violations)
print(violations_count_str)
print violations_count_str
# Also write the number of violations to a file
with open(Env.METRICS_DIR / "pylint", "w") as f:
@@ -139,7 +139,7 @@ def _count_pylint_violations(report_file):
# An example string:
# common/lib/xmodule/xmodule/tests/test_conditional.py:21: [C0111(missing-docstring), DummySystem] Missing docstring
# More examples can be found in the unit tests for this method
pylint_pattern = re.compile(".(\d+):\ \[(\D\d+.+\]).")
pylint_pattern = re.compile(r".(\d+):\ \[(\D\d+.+\]).")
for line in open(report_file):
violation_list_for_line = pylint_pattern.split(line)
@@ -161,7 +161,7 @@ def _get_pep8_violations():
report_dir = (Env.REPORT_DIR / 'pep8')
report_dir.rmtree(ignore_errors=True)
report_dir.makedirs_p()
# Make sure the metrics subdirectory exists
Env.METRICS_DIR.makedirs_p()
@@ -191,31 +191,29 @@ def _pep8_violations(report_file):
@cmdopts([
("system=", "s", "System to act on"),
])
def run_pep8(options):
def run_pep8(options): # pylint: disable=unused-argument
"""
Run pep8 on system code.
Fail the task if any violations are found.
"""
(count, violations_list) = _get_pep8_violations()
violations_str = ''.join(violations_list)
violations_list = ''.join(violations_list)
# Print number of violations to log
violations_count_str = "Number of pep8 violations: {count}".format(count=count)
print(violations_count_str)
print(violations_str)
print violations_count_str
print violations_list
# Also write the number of violations to a file
with open(Env.METRICS_DIR / "pep8", "w") as f:
f.write(violations_count_str + '\n\n')
f.write(violations_str)
f.write(violations_list)
# Fail if any violations are found
if count:
raise Exception(
"Too many pep8 violations. Number of violations found: {count}.\n\n{violations}".format(
count=count, violations=violations_str
)
)
failure_string = "Too many pep8 violations. " + violations_count_str
failure_string += "\n\nViolations:\n{violations_list}".format(violations_list=violations_list)
raise Exception(failure_string)
@task
@@ -257,9 +255,11 @@ def run_quality(options):
dquality_dir = (Env.REPORT_DIR / "diff_quality").makedirs_p()
diff_quality_percentage_failure = False
# Run pep8 directly since we have 0 violations on master
def _pep8_output(count, violations_list, is_html=False):
"""
Given a count & list of pep8 violations, pretty-print the pep8 output.
If `is_html`, will print out with HTML markup.
"""
if is_html:
lines = ['<body>\n']
sep = '-------------<br/>\n'
@@ -290,6 +290,7 @@ def run_quality(options):
return ''.join(lines)
# Run pep8 directly since we have 0 violations on master
(count, violations_list) = _get_pep8_violations()
# Print number of violations to log