diff --git a/pavelib/paver_tests/test_paver_quality.py b/pavelib/paver_tests/test_paver_quality.py index 7c2810c621..2ae4bdfee3 100644 --- a/pavelib/paver_tests/test_paver_quality.py +++ b/pavelib/paver_tests/test_paver_quality.py @@ -1,19 +1,21 @@ """ Tests for paver quality tasks """ -import os -from path import Path as path import tempfile import textwrap import unittest -from mock import patch, MagicMock, mock_open -from ddt import ddt, file_data -import pavelib.quality +import os import paver.easy import paver.tasks +from ddt import ddt, file_data +from mock import patch, MagicMock, mock_open +from path import Path as path from paver.easy import BuildFailure +import pavelib.quality +from pavelib.paver_tests.utils import fail_on_pylint, fail_on_eslint + @ddt class TestPaverQualityViolations(unittest.TestCase): @@ -296,7 +298,7 @@ class TestPaverRunQuality(unittest.TestCase): """ # Underlying sh call must fail when it is running the pylint diff-quality task - self._mock_paver_sh.side_effect = CustomShMock().fail_on_pylint + self._mock_paver_sh.side_effect = fail_on_pylint _mock_pep8_violations = MagicMock(return_value=(0, [])) with patch('pavelib.quality._get_pep8_violations', _mock_pep8_violations): with self.assertRaises(SystemExit): @@ -316,7 +318,7 @@ class TestPaverRunQuality(unittest.TestCase): """ # Underlying sh call must fail when it is running the eslint diff-quality task - self._mock_paver_sh.side_effect = CustomShMock().fail_on_eslint + self._mock_paver_sh.side_effect = fail_on_eslint _mock_pep8_violations = MagicMock(return_value=(0, [])) with patch('pavelib.quality._get_pep8_violations', _mock_pep8_violations): with self.assertRaises(SystemExit): @@ -351,32 +353,3 @@ class TestPaverRunQuality(unittest.TestCase): self.assertEqual(_mock_pep8_violations.call_count, 1) # And assert that sh was called twice (for the call to "pylint" & "eslint") self.assertEqual(self._mock_paver_sh.call_count, 2) - - -class CustomShMock(object): - """ - Diff-quality makes a number of sh calls. None of those calls should be made during tests; however, some - of them need to have certain responses. - """ - - def fail_on_pylint(self, arg): - """ - For our tests, we need the call for diff-quality running pep8 reports to fail, since that is what - is going to fail when we pass in a percentage ("p") requirement. - """ - if "pylint" in arg: - # Essentially mock diff-quality exiting with 1 - paver.easy.sh("exit 1") - else: - return - - def fail_on_eslint(self, arg): - """ - For our tests, we need the call for diff-quality running pep8 reports to fail, since that is what - is going to fail when we pass in a percentage ("p") requirement. - """ - if "eslint" in arg: - # Essentially mock diff-quality exiting with 1 - paver.easy.sh("exit 1") - else: - return diff --git a/pavelib/paver_tests/test_prereqs.py b/pavelib/paver_tests/test_prereqs.py index 4c7ed9ba10..dba0c748a6 100644 --- a/pavelib/paver_tests/test_prereqs.py +++ b/pavelib/paver_tests/test_prereqs.py @@ -4,7 +4,12 @@ Tests covering the Open edX Paver prequisites installation workflow import os import unittest -from pavelib.prereqs import no_prereq_install +from mock import call, patch +from paver.easy import BuildFailure +from pavelib.prereqs import no_prereq_install, node_prereqs_installation +from pavelib.paver_tests.utils import ( + PaverTestCase, unexpected_fail_on_npm_install, fail_on_npm_install +) class TestPaverPrereqInstall(unittest.TestCase): @@ -68,3 +73,54 @@ class TestPaverPrereqInstall(unittest.TestCase): Ensure that '1' will be True. """ self.check_val('1', True) + + +class TestPaverNodeInstall(PaverTestCase): + """ + Test node install logic + """ + + def setUp(self): + super(TestPaverNodeInstall, self).setUp() + + # Ensure prereqs will be run + os.environ['NO_PREREQ_INSTALL'] = 'false' + + patcher = patch('pavelib.prereqs.sh', return_value=True) + self._mock_paver_sh = patcher.start() + self.addCleanup(patcher.stop) + + def test_npm_install_with_subprocess_error(self): + """ + An exit with subprocess exit 1 is what paver receives when there is + an npm install error ("cb() never called!"). Test that we can handle + this kind of failure. For more info see TE-1767. + """ + self._mock_paver_sh.side_effect = fail_on_npm_install + with self.assertRaises(BuildFailure): + node_prereqs_installation() + actual_calls = self._mock_paver_sh.mock_calls + + # npm install will be called twice + self.assertEqual(actual_calls.count(call('npm install')), 2) + + def test_npm_install_called_once_when_successful(self): + """ + Vanilla npm install should only be calling npm install one time + """ + node_prereqs_installation() + actual_calls = self._mock_paver_sh.mock_calls + + # when there's no failure, npm install is only called once + self.assertEqual(actual_calls.count(call('npm install')), 1) + + def test_npm_install_with_unexpected_subprocess_error(self): + """ + If there's some other error, only call npm install once, and raise a failure + """ + self._mock_paver_sh.side_effect = unexpected_fail_on_npm_install + with self.assertRaises(BuildFailure): + node_prereqs_installation() + actual_calls = self._mock_paver_sh.mock_calls + + self.assertEqual(actual_calls.count(call('npm install')), 1) diff --git a/pavelib/paver_tests/utils.py b/pavelib/paver_tests/utils.py index e4ca1b88f7..168b9c4556 100644 --- a/pavelib/paver_tests/utils.py +++ b/pavelib/paver_tests/utils.py @@ -1,9 +1,12 @@ """Unit tests for the Paver server tasks.""" import os +import paver.easy from paver import tasks from unittest import TestCase +from paver.easy import BuildFailure + class PaverTestCase(TestCase): """ @@ -58,3 +61,49 @@ class MockEnvironment(tasks.Environment): output = message if not output.startswith("--->"): self.messages.append(unicode(output)) + + +def fail_on_eslint(arg): + """ + For our tests, we need the call for diff-quality running pep8 reports to fail, since that is what + is going to fail when we pass in a percentage ("p") requirement. + """ + if "eslint" in arg: + # Essentially mock diff-quality exiting with 1 + paver.easy.sh("exit 1") + else: + return + + +def fail_on_pylint(arg): + """ + For our tests, we need the call for diff-quality running pep8 reports to fail, since that is what + is going to fail when we pass in a percentage ("p") requirement. + """ + if "pylint" in arg: + # Essentially mock diff-quality exiting with 1 + paver.easy.sh("exit 1") + else: + return + + +def fail_on_npm_install(arg): + """ + For our tests, we need the call for diff-quality running pep8 reports to fail, since that is what + is going to fail when we pass in a percentage ("p") requirement. + """ + if "npm install" in arg: + raise BuildFailure('Subprocess return code: 1') + else: + return + + +def unexpected_fail_on_npm_install(arg): + """ + For our tests, we need the call for diff-quality running pep8 reports to fail, since that is what + is going to fail when we pass in a percentage ("p") requirement. + """ + if "npm install" in arg: + raise BuildFailure('Subprocess return code: 50') + else: + return diff --git a/pavelib/prereqs.py b/pavelib/prereqs.py index 9612dd022e..56974e0c1d 100644 --- a/pavelib/prereqs.py +++ b/pavelib/prereqs.py @@ -8,7 +8,7 @@ import os import re import sys -from paver.easy import sh, task +from paver.easy import sh, task, BuildFailure from .utils.envs import Env from .utils.timer import timed @@ -132,10 +132,22 @@ def node_prereqs_installation(): """ Configures npm and installs Node prerequisites """ + cb_error_text = "Subprocess return code: 1" sh("test `npm config get registry` = \"{reg}\" || " "(echo setting registry; npm config set registry" " {reg})".format(reg=NPM_REGISTRY)) - sh('npm install') + + # Error handling around a race condition that produces "cb() never called" error. This + # evinces itself as `cb_error_text` and it ought to disappear when we upgrade + # npm to 3 or higher. TODO: clean this up when we do that. + try: + sh('npm install') + except BuildFailure, error_text: + if cb_error_text in error_text: + print "npm install error detected. Retrying..." + sh('npm install') + else: + raise BuildFailure(error_text) def python_prereqs_installation():