From 705c8b915e6d70b1fe2a0839e45645fb8adf3532 Mon Sep 17 00:00:00 2001 From: Ben Patterson Date: Thu, 17 Nov 2016 11:31:57 -0500 Subject: [PATCH] Udpates based on feedback. --- pavelib/paver_tests/test_paver_quality.py | 6 +- pavelib/paver_tests/test_prereqs.py | 17 +++-- pavelib/paver_tests/utils.py | 79 +++++++++++------------ 3 files changed, 52 insertions(+), 50 deletions(-) diff --git a/pavelib/paver_tests/test_paver_quality.py b/pavelib/paver_tests/test_paver_quality.py index 2f36a26e0a..2ae4bdfee3 100644 --- a/pavelib/paver_tests/test_paver_quality.py +++ b/pavelib/paver_tests/test_paver_quality.py @@ -14,7 +14,7 @@ from path import Path as path from paver.easy import BuildFailure import pavelib.quality -from pavelib.paver_tests.utils import CustomShMock +from pavelib.paver_tests.utils import fail_on_pylint, fail_on_eslint @ddt @@ -298,7 +298,7 @@ class TestPaverRunQuality(unittest.TestCase): """ # Underlying sh call must fail when it is running the pylint diff-quality task - self._mock_paver_sh.side_effect = CustomShMock().fail_on_pylint + self._mock_paver_sh.side_effect = fail_on_pylint _mock_pep8_violations = MagicMock(return_value=(0, [])) with patch('pavelib.quality._get_pep8_violations', _mock_pep8_violations): with self.assertRaises(SystemExit): @@ -318,7 +318,7 @@ class TestPaverRunQuality(unittest.TestCase): """ # Underlying sh call must fail when it is running the eslint diff-quality task - self._mock_paver_sh.side_effect = CustomShMock().fail_on_eslint + self._mock_paver_sh.side_effect = fail_on_eslint _mock_pep8_violations = MagicMock(return_value=(0, [])) with patch('pavelib.quality._get_pep8_violations', _mock_pep8_violations): with self.assertRaises(SystemExit): diff --git a/pavelib/paver_tests/test_prereqs.py b/pavelib/paver_tests/test_prereqs.py index 409a6fa163..dba0c748a6 100644 --- a/pavelib/paver_tests/test_prereqs.py +++ b/pavelib/paver_tests/test_prereqs.py @@ -7,8 +7,9 @@ import unittest from mock import call, patch from paver.easy import BuildFailure from pavelib.prereqs import no_prereq_install, node_prereqs_installation -from pavelib.paver_tests.utils import PaverTestCase, CustomShMock -from pavelib.paver_tests.test_paver_quality import CustomShMock +from pavelib.paver_tests.utils import ( + PaverTestCase, unexpected_fail_on_npm_install, fail_on_npm_install +) class TestPaverPrereqInstall(unittest.TestCase): @@ -81,17 +82,21 @@ class TestPaverNodeInstall(PaverTestCase): def setUp(self): super(TestPaverNodeInstall, self).setUp() + + # Ensure prereqs will be run os.environ['NO_PREREQ_INSTALL'] = 'false' + patcher = patch('pavelib.prereqs.sh', return_value=True) self._mock_paver_sh = patcher.start() self.addCleanup(patcher.stop) def test_npm_install_with_subprocess_error(self): """ - Test that we handle a subprocess 1 (proxy for cb() never called error) - TE-1767 + An exit with subprocess exit 1 is what paver receives when there is + an npm install error ("cb() never called!"). Test that we can handle + this kind of failure. For more info see TE-1767. """ - self._mock_paver_sh.side_effect = CustomShMock().fail_on_npm_install + self._mock_paver_sh.side_effect = fail_on_npm_install with self.assertRaises(BuildFailure): node_prereqs_installation() actual_calls = self._mock_paver_sh.mock_calls @@ -113,7 +118,7 @@ class TestPaverNodeInstall(PaverTestCase): """ If there's some other error, only call npm install once, and raise a failure """ - self._mock_paver_sh.side_effect = CustomShMock().unexpected_fail_on_npm_install + self._mock_paver_sh.side_effect = unexpected_fail_on_npm_install with self.assertRaises(BuildFailure): node_prereqs_installation() actual_calls = self._mock_paver_sh.mock_calls diff --git a/pavelib/paver_tests/utils.py b/pavelib/paver_tests/utils.py index 5cdfcd3fd3..168b9c4556 100644 --- a/pavelib/paver_tests/utils.py +++ b/pavelib/paver_tests/utils.py @@ -63,50 +63,47 @@ class MockEnvironment(tasks.Environment): self.messages.append(unicode(output)) -class CustomShMock(object): +def fail_on_eslint(arg): """ - Diff-quality makes a number of sh calls. None of those calls should be made during tests; however, some - of them need to have certain responses. + For our tests, we need the call for diff-quality running pep8 reports to fail, since that is what + is going to fail when we pass in a percentage ("p") requirement. """ + if "eslint" in arg: + # Essentially mock diff-quality exiting with 1 + paver.easy.sh("exit 1") + else: + return - def fail_on_pylint(self, arg): - """ - For our tests, we need the call for diff-quality running pep8 reports to fail, since that is what - is going to fail when we pass in a percentage ("p") requirement. - """ - if "pylint" in arg: - # Essentially mock diff-quality exiting with 1 - paver.easy.sh("exit 1") - else: - return - def fail_on_eslint(self, arg): - """ - For our tests, we need the call for diff-quality running pep8 reports to fail, since that is what - is going to fail when we pass in a percentage ("p") requirement. - """ - if "eslint" in arg: - # Essentially mock diff-quality exiting with 1 - paver.easy.sh("exit 1") - else: - return +def fail_on_pylint(arg): + """ + For our tests, we need the call for diff-quality running pep8 reports to fail, since that is what + is going to fail when we pass in a percentage ("p") requirement. + """ + if "pylint" in arg: + # Essentially mock diff-quality exiting with 1 + paver.easy.sh("exit 1") + else: + return - def fail_on_npm_install(self, arg): - """ - For our tests, we need the call for diff-quality running pep8 reports to fail, since that is what - is going to fail when we pass in a percentage ("p") requirement. - """ - if "npm install" in arg: - raise BuildFailure('Subprocess return code: 1') - else: - return - def unexpected_fail_on_npm_install(self, arg): - """ - For our tests, we need the call for diff-quality running pep8 reports to fail, since that is what - is going to fail when we pass in a percentage ("p") requirement. - """ - if "npm install" in arg: - raise BuildFailure('Subprocess return code: 50') - else: - return +def fail_on_npm_install(arg): + """ + For our tests, we need the call for diff-quality running pep8 reports to fail, since that is what + is going to fail when we pass in a percentage ("p") requirement. + """ + if "npm install" in arg: + raise BuildFailure('Subprocess return code: 1') + else: + return + + +def unexpected_fail_on_npm_install(arg): + """ + For our tests, we need the call for diff-quality running pep8 reports to fail, since that is what + is going to fail when we pass in a percentage ("p") requirement. + """ + if "npm install" in arg: + raise BuildFailure('Subprocess return code: 50') + else: + return