Add tests. Enjoy your coverage and feel confident.
This commit is contained in:
@@ -1,19 +1,21 @@
|
||||
"""
|
||||
Tests for paver quality tasks
|
||||
"""
|
||||
import os
|
||||
from path import Path as path
|
||||
import tempfile
|
||||
import textwrap
|
||||
import unittest
|
||||
from mock import patch, MagicMock, mock_open
|
||||
from ddt import ddt, file_data
|
||||
|
||||
import pavelib.quality
|
||||
import os
|
||||
import paver.easy
|
||||
import paver.tasks
|
||||
from ddt import ddt, file_data
|
||||
from mock import patch, MagicMock, mock_open
|
||||
from path import Path as path
|
||||
from paver.easy import BuildFailure
|
||||
|
||||
import pavelib.quality
|
||||
from pavelib.paver_tests.utils import CustomShMock
|
||||
|
||||
|
||||
@ddt
|
||||
class TestPaverQualityViolations(unittest.TestCase):
|
||||
@@ -351,32 +353,3 @@ class TestPaverRunQuality(unittest.TestCase):
|
||||
self.assertEqual(_mock_pep8_violations.call_count, 1)
|
||||
# And assert that sh was called twice (for the call to "pylint" & "eslint")
|
||||
self.assertEqual(self._mock_paver_sh.call_count, 2)
|
||||
|
||||
|
||||
class CustomShMock(object):
|
||||
"""
|
||||
Diff-quality makes a number of sh calls. None of those calls should be made during tests; however, some
|
||||
of them need to have certain responses.
|
||||
"""
|
||||
|
||||
def fail_on_pylint(self, arg):
|
||||
"""
|
||||
For our tests, we need the call for diff-quality running pep8 reports to fail, since that is what
|
||||
is going to fail when we pass in a percentage ("p") requirement.
|
||||
"""
|
||||
if "pylint" in arg:
|
||||
# Essentially mock diff-quality exiting with 1
|
||||
paver.easy.sh("exit 1")
|
||||
else:
|
||||
return
|
||||
|
||||
def fail_on_eslint(self, arg):
|
||||
"""
|
||||
For our tests, we need the call for diff-quality running pep8 reports to fail, since that is what
|
||||
is going to fail when we pass in a percentage ("p") requirement.
|
||||
"""
|
||||
if "eslint" in arg:
|
||||
# Essentially mock diff-quality exiting with 1
|
||||
paver.easy.sh("exit 1")
|
||||
else:
|
||||
return
|
||||
|
||||
@@ -4,7 +4,11 @@ Tests covering the Open edX Paver prequisites installation workflow
|
||||
|
||||
import os
|
||||
import unittest
|
||||
from pavelib.prereqs import no_prereq_install
|
||||
from mock import call, patch
|
||||
from paver.easy import BuildFailure
|
||||
from pavelib.prereqs import no_prereq_install, node_prereqs_installation
|
||||
from pavelib.paver_tests.utils import PaverTestCase, CustomShMock
|
||||
from pavelib.paver_tests.test_paver_quality import CustomShMock
|
||||
|
||||
|
||||
class TestPaverPrereqInstall(unittest.TestCase):
|
||||
@@ -68,3 +72,50 @@ class TestPaverPrereqInstall(unittest.TestCase):
|
||||
Ensure that '1' will be True.
|
||||
"""
|
||||
self.check_val('1', True)
|
||||
|
||||
|
||||
class TestPaverNodeInstall(PaverTestCase):
|
||||
"""
|
||||
Test node install logic
|
||||
"""
|
||||
|
||||
def setUp(self):
|
||||
super(TestPaverNodeInstall, self).setUp()
|
||||
os.environ['NO_PREREQ_INSTALL'] = 'false'
|
||||
patcher = patch('pavelib.prereqs.sh', return_value=True)
|
||||
self._mock_paver_sh = patcher.start()
|
||||
self.addCleanup(patcher.stop)
|
||||
|
||||
def test_npm_install_with_subprocess_error(self):
|
||||
"""
|
||||
Test that we handle a subprocess 1 (proxy for cb() never called error)
|
||||
TE-1767
|
||||
"""
|
||||
self._mock_paver_sh.side_effect = CustomShMock().fail_on_npm_install
|
||||
with self.assertRaises(BuildFailure):
|
||||
node_prereqs_installation()
|
||||
actual_calls = self._mock_paver_sh.mock_calls
|
||||
|
||||
# npm install will be called twice
|
||||
self.assertEqual(actual_calls.count(call('npm install')), 2)
|
||||
|
||||
def test_npm_install_called_once_when_successful(self):
|
||||
"""
|
||||
Vanilla npm install should only be calling npm install one time
|
||||
"""
|
||||
node_prereqs_installation()
|
||||
actual_calls = self._mock_paver_sh.mock_calls
|
||||
|
||||
# when there's no failure, npm install is only called once
|
||||
self.assertEqual(actual_calls.count(call('npm install')), 1)
|
||||
|
||||
def test_npm_install_with_unexpected_subprocess_error(self):
|
||||
"""
|
||||
If there's some other error, only call npm install once, and raise a failure
|
||||
"""
|
||||
self._mock_paver_sh.side_effect = CustomShMock().unexpected_fail_on_npm_install
|
||||
with self.assertRaises(BuildFailure):
|
||||
node_prereqs_installation()
|
||||
actual_calls = self._mock_paver_sh.mock_calls
|
||||
|
||||
self.assertEqual(actual_calls.count(call('npm install')), 1)
|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
"""Unit tests for the Paver server tasks."""
|
||||
|
||||
import os
|
||||
import paver.easy
|
||||
from paver import tasks
|
||||
from unittest import TestCase
|
||||
|
||||
from paver.easy import BuildFailure
|
||||
|
||||
|
||||
class PaverTestCase(TestCase):
|
||||
"""
|
||||
@@ -58,3 +61,52 @@ class MockEnvironment(tasks.Environment):
|
||||
output = message
|
||||
if not output.startswith("--->"):
|
||||
self.messages.append(unicode(output))
|
||||
|
||||
|
||||
class CustomShMock(object):
|
||||
"""
|
||||
Diff-quality makes a number of sh calls. None of those calls should be made during tests; however, some
|
||||
of them need to have certain responses.
|
||||
"""
|
||||
|
||||
def fail_on_pylint(self, arg):
|
||||
"""
|
||||
For our tests, we need the call for diff-quality running pep8 reports to fail, since that is what
|
||||
is going to fail when we pass in a percentage ("p") requirement.
|
||||
"""
|
||||
if "pylint" in arg:
|
||||
# Essentially mock diff-quality exiting with 1
|
||||
paver.easy.sh("exit 1")
|
||||
else:
|
||||
return
|
||||
|
||||
def fail_on_eslint(self, arg):
|
||||
"""
|
||||
For our tests, we need the call for diff-quality running pep8 reports to fail, since that is what
|
||||
is going to fail when we pass in a percentage ("p") requirement.
|
||||
"""
|
||||
if "eslint" in arg:
|
||||
# Essentially mock diff-quality exiting with 1
|
||||
paver.easy.sh("exit 1")
|
||||
else:
|
||||
return
|
||||
|
||||
def fail_on_npm_install(self, arg):
|
||||
"""
|
||||
For our tests, we need the call for diff-quality running pep8 reports to fail, since that is what
|
||||
is going to fail when we pass in a percentage ("p") requirement.
|
||||
"""
|
||||
if "npm install" in arg:
|
||||
raise BuildFailure('Subprocess return code: 1')
|
||||
else:
|
||||
return
|
||||
|
||||
def unexpected_fail_on_npm_install(self, arg):
|
||||
"""
|
||||
For our tests, we need the call for diff-quality running pep8 reports to fail, since that is what
|
||||
is going to fail when we pass in a percentage ("p") requirement.
|
||||
"""
|
||||
if "npm install" in arg:
|
||||
raise BuildFailure('Subprocess return code: 50')
|
||||
else:
|
||||
return
|
||||
|
||||
@@ -138,13 +138,16 @@ def node_prereqs_installation():
|
||||
" {reg})".format(reg=NPM_REGISTRY))
|
||||
|
||||
# Error handling around a race condition that produces "cb() never called" error. This
|
||||
# ought to disappear when we upgrade npm to 3 or higher. TODO: clean this up when we do that.
|
||||
# evinces itself as `cb_error_text` and it ought to disappear when we upgrade
|
||||
# npm to 3 or higher. TODO: clean this up when we do that.
|
||||
try:
|
||||
sh('npm install')
|
||||
except BuildFailure, error_text:
|
||||
if cb_error_text in error_text:
|
||||
print "npm install error detected. Retrying..."
|
||||
sh('npm install')
|
||||
else:
|
||||
raise BuildFailure(error_text)
|
||||
|
||||
|
||||
def python_prereqs_installation():
|
||||
|
||||
Reference in New Issue
Block a user