@@ -166,7 +167,7 @@ class CapaHtmlRenderTest(unittest.TestCase):
mock.call('textline.html', expected_textline_context),
mock.call('solutionspan.html', expected_solution_context)]
- self.assertEqual(test_system.render_template.call_args_list,
+ self.assertEqual(the_system.render_template.call_args_list,
expected_calls)
@@ -184,7 +185,7 @@ class CapaHtmlRenderTest(unittest.TestCase):
xml_str = CustomResponseXMLFactory().build_xml(**kwargs)
# Create the problem and render the html
- problem = LoncapaProblem(xml_str, '1', system=test_system)
+ problem = new_loncapa_problem(xml_str)
# Grade the problem
correctmap = problem.grade_answers({'1_2_1': 'test'})
@@ -219,7 +220,7 @@ class CapaHtmlRenderTest(unittest.TestCase):
""")
# Create the problem and render the HTML
- problem = LoncapaProblem(xml_str, '1', system=test_system)
+ problem = new_loncapa_problem(xml_str)
rendered_html = etree.XML(problem.get_html())
# Expect that the variable $test has been replaced with its value
@@ -227,7 +228,7 @@ class CapaHtmlRenderTest(unittest.TestCase):
self.assertEqual(span_element.get('attr'), "TEST")
def _create_test_file(self, path, content_str):
- test_fp = test_system.filestore.open(path, "w")
+ test_fp = self.system.filestore.open(path, "w")
test_fp.write(content_str)
test_fp.close()
diff --git a/common/lib/capa/capa/tests/test_inputtypes.py b/common/lib/capa/capa/tests/test_inputtypes.py
index 54edb5bf9f..313eb28249 100644
--- a/common/lib/capa/capa/tests/test_inputtypes.py
+++ b/common/lib/capa/capa/tests/test_inputtypes.py
@@ -45,7 +45,7 @@ class OptionInputTest(unittest.TestCase):
state = {'value': 'Down',
'id': 'sky_input',
'status': 'answered'}
- option_input = lookup_tag('optioninput')(test_system, element, state)
+ option_input = lookup_tag('optioninput')(test_system(), element, state)
context = option_input._get_render_context()
@@ -92,7 +92,7 @@ class ChoiceGroupTest(unittest.TestCase):
'id': 'sky_input',
'status': 'answered'}
- the_input = lookup_tag(tag)(test_system, element, state)
+ the_input = lookup_tag(tag)(test_system(), element, state)
context = the_input._get_render_context()
@@ -142,7 +142,7 @@ class JavascriptInputTest(unittest.TestCase):
element = etree.fromstring(xml_str)
state = {'value': '3', }
- the_input = lookup_tag('javascriptinput')(test_system, element, state)
+ the_input = lookup_tag('javascriptinput')(test_system(), element, state)
context = the_input._get_render_context()
@@ -170,7 +170,7 @@ class TextLineTest(unittest.TestCase):
element = etree.fromstring(xml_str)
state = {'value': 'BumbleBee', }
- the_input = lookup_tag('textline')(test_system, element, state)
+ the_input = lookup_tag('textline')(test_system(), element, state)
context = the_input._get_render_context()
@@ -198,7 +198,7 @@ class TextLineTest(unittest.TestCase):
element = etree.fromstring(xml_str)
state = {'value': 'BumbleBee', }
- the_input = lookup_tag('textline')(test_system, element, state)
+ the_input = lookup_tag('textline')(test_system(), element, state)
context = the_input._get_render_context()
@@ -236,7 +236,7 @@ class TextLineTest(unittest.TestCase):
element = etree.fromstring(xml_str)
state = {'value': 'BumbleBee', }
- the_input = lookup_tag('textline')(test_system, element, state)
+ the_input = lookup_tag('textline')(test_system(), element, state)
context = the_input._get_render_context()
@@ -274,7 +274,7 @@ class FileSubmissionTest(unittest.TestCase):
'status': 'incomplete',
'feedback': {'message': '3'}, }
input_class = lookup_tag('filesubmission')
- the_input = input_class(test_system, element, state)
+ the_input = input_class(test_system(), element, state)
context = the_input._get_render_context()
@@ -319,7 +319,7 @@ class CodeInputTest(unittest.TestCase):
'feedback': {'message': '3'}, }
input_class = lookup_tag('codeinput')
- the_input = input_class(test_system, element, state)
+ the_input = input_class(test_system(), element, state)
context = the_input._get_render_context()
@@ -368,7 +368,7 @@ class MatlabTest(unittest.TestCase):
'feedback': {'message': '3'}, }
self.input_class = lookup_tag('matlabinput')
- self.the_input = self.input_class(test_system, elt, state)
+ self.the_input = self.input_class(test_system(), elt, state)
def test_rendering(self):
context = self.the_input._get_render_context()
@@ -396,7 +396,7 @@ class MatlabTest(unittest.TestCase):
'feedback': {'message': '3'}, }
elt = etree.fromstring(self.xml)
- the_input = self.input_class(test_system, elt, state)
+ the_input = self.input_class(test_system(), elt, state)
context = the_input._get_render_context()
expected = {'id': 'prob_1_2',
@@ -423,7 +423,7 @@ class MatlabTest(unittest.TestCase):
}
elt = etree.fromstring(self.xml)
- the_input = self.input_class(test_system, elt, state)
+ the_input = self.input_class(test_system(), elt, state)
context = the_input._get_render_context()
expected = {'id': 'prob_1_2',
'value': 'print "good evening"',
@@ -448,7 +448,7 @@ class MatlabTest(unittest.TestCase):
}
elt = etree.fromstring(self.xml)
- the_input = self.input_class(test_system, elt, state)
+ the_input = self.input_class(test_system(), elt, state)
context = the_input._get_render_context()
expected = {'id': 'prob_1_2',
'value': 'print "good evening"',
@@ -470,7 +470,7 @@ class MatlabTest(unittest.TestCase):
get = {'submission': 'x = 1234;'}
response = self.the_input.handle_ajax("plot", get)
- test_system.xqueue['interface'].send_to_queue.assert_called_with(header=ANY, body=ANY)
+ test_system().xqueue['interface'].send_to_queue.assert_called_with(header=ANY, body=ANY)
self.assertTrue(response['success'])
self.assertTrue(self.the_input.input_state['queuekey'] is not None)
@@ -479,13 +479,12 @@ class MatlabTest(unittest.TestCase):
def test_plot_data_failure(self):
get = {'submission': 'x = 1234;'}
error_message = 'Error message!'
- test_system.xqueue['interface'].send_to_queue.return_value = (1, error_message)
+ test_system().xqueue['interface'].send_to_queue.return_value = (1, error_message)
response = self.the_input.handle_ajax("plot", get)
self.assertFalse(response['success'])
self.assertEqual(response['message'], error_message)
self.assertTrue('queuekey' not in self.the_input.input_state)
self.assertTrue('queuestate' not in self.the_input.input_state)
- test_system.xqueue['interface'].send_to_queue.return_value = (0, 'Success!')
def test_ungraded_response_success(self):
queuekey = 'abcd'
@@ -496,7 +495,7 @@ class MatlabTest(unittest.TestCase):
'feedback': {'message': '3'}, }
elt = etree.fromstring(self.xml)
- the_input = self.input_class(test_system, elt, state)
+ the_input = self.input_class(test_system(), elt, state)
inner_msg = 'hello!'
queue_msg = json.dumps({'msg': inner_msg})
@@ -514,7 +513,7 @@ class MatlabTest(unittest.TestCase):
'feedback': {'message': '3'}, }
elt = etree.fromstring(self.xml)
- the_input = self.input_class(test_system, elt, state)
+ the_input = self.input_class(test_system(), elt, state)
inner_msg = 'hello!'
queue_msg = json.dumps({'msg': inner_msg})
@@ -553,7 +552,7 @@ class SchematicTest(unittest.TestCase):
state = {'value': value,
'status': 'unsubmitted'}
- the_input = lookup_tag('schematic')(test_system, element, state)
+ the_input = lookup_tag('schematic')(test_system(), element, state)
context = the_input._get_render_context()
@@ -592,7 +591,7 @@ class ImageInputTest(unittest.TestCase):
state = {'value': value,
'status': 'unsubmitted'}
- the_input = lookup_tag('imageinput')(test_system, element, state)
+ the_input = lookup_tag('imageinput')(test_system(), element, state)
context = the_input._get_render_context()
@@ -643,7 +642,7 @@ class CrystallographyTest(unittest.TestCase):
state = {'value': value,
'status': 'unsubmitted'}
- the_input = lookup_tag('crystallography')(test_system, element, state)
+ the_input = lookup_tag('crystallography')(test_system(), element, state)
context = the_input._get_render_context()
@@ -681,7 +680,7 @@ class VseprTest(unittest.TestCase):
state = {'value': value,
'status': 'unsubmitted'}
- the_input = lookup_tag('vsepr_input')(test_system, element, state)
+ the_input = lookup_tag('vsepr_input')(test_system(), element, state)
context = the_input._get_render_context()
@@ -708,7 +707,7 @@ class ChemicalEquationTest(unittest.TestCase):
element = etree.fromstring(xml_str)
state = {'value': 'H2OYeah', }
- self.the_input = lookup_tag('chemicalequationinput')(test_system, element, state)
+ self.the_input = lookup_tag('chemicalequationinput')(test_system(), element, state)
def test_rendering(self):
''' Verify that the render context matches the expected render context'''
@@ -783,7 +782,7 @@ class DragAndDropTest(unittest.TestCase):
]
}
- the_input = lookup_tag('drag_and_drop_input')(test_system, element, state)
+ the_input = lookup_tag('drag_and_drop_input')(test_system(), element, state)
context = the_input._get_render_context()
expected = {'id': 'prob_1_2',
@@ -832,7 +831,7 @@ class AnnotationInputTest(unittest.TestCase):
tag = 'annotationinput'
- the_input = lookup_tag(tag)(test_system, element, state)
+ the_input = lookup_tag(tag)(test_system(), element, state)
context = the_input._get_render_context()
diff --git a/common/lib/capa/capa/tests/test_responsetypes.py b/common/lib/capa/capa/tests/test_responsetypes.py
index 5fbc7f8c87..8bf6954139 100644
--- a/common/lib/capa/capa/tests/test_responsetypes.py
+++ b/common/lib/capa/capa/tests/test_responsetypes.py
@@ -2,7 +2,6 @@
Tests of responsetypes
"""
-
from datetime import datetime
import json
from nose.plugins.skip import SkipTest
@@ -10,10 +9,11 @@ import os
import random
import unittest
import textwrap
+import mock
+import textwrap
-from . import test_system
+from . import new_loncapa_problem, test_system
-import capa.capa_problem as lcp
from capa.responsetypes import LoncapaProblemError, \
StudentInputError, ResponseError
from capa.correctmap import CorrectMap
@@ -30,9 +30,9 @@ class ResponseTest(unittest.TestCase):
if self.xml_factory_class:
self.xml_factory = self.xml_factory_class()
- def build_problem(self, **kwargs):
+ def build_problem(self, system=None, **kwargs):
xml = self.xml_factory.build_xml(**kwargs)
- return lcp.LoncapaProblem(xml, '1', system=test_system)
+ return new_loncapa_problem(xml, system=system)
def assert_grade(self, problem, submission, expected_correctness, msg=None):
input_dict = {'1_2_1': submission}
@@ -184,94 +184,151 @@ class ImageResponseTest(ResponseTest):
self.assert_answer_format(problem)
-class SymbolicResponseTest(unittest.TestCase):
- def test_sr_grade(self):
- raise SkipTest() # This test fails due to dependencies on a local copy of snuggletex-webapp. Until we have figured that out, we'll just skip this test
- symbolicresponse_file = os.path.dirname(__file__) + "/test_files/symbolicresponse.xml"
- test_lcp = lcp.LoncapaProblem(open(symbolicresponse_file).read(), '1', system=test_system)
- correct_answers = {'1_2_1': 'cos(theta)*[[1,0],[0,1]] + i*sin(theta)*[[0,1],[1,0]]',
- '1_2_1_dynamath': '''
-
- ''',
- }
- wrong_answers = {'1_2_1': '2',
- '1_2_1_dynamath': '''
-
''',
- }
- self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct')
- self.assertEquals(test_lcp.grade_answers(wrong_answers).get_correctness('1_2_1'), 'incorrect')
+class SymbolicResponseTest(ResponseTest):
+ from response_xml_factory import SymbolicResponseXMLFactory
+ xml_factory_class = SymbolicResponseXMLFactory
+
+ def test_grade_single_input(self):
+ problem = self.build_problem(math_display=True,
+ expect="2*x+3*y")
+
+ # Correct answers
+ correct_inputs = [
+ ('2x+3y', textwrap.dedent("""
+
""")),
+
+ ('x+x+3y', textwrap.dedent("""
+
""")),
+ ]
+
+ for (input_str, input_mathml) in correct_inputs:
+ self._assert_symbolic_grade(problem, input_str, input_mathml, 'correct')
+
+ # Incorrect answers
+ incorrect_inputs = [
+ ('0', ''),
+ ('4x+3y', textwrap.dedent("""
+
""")),
+ ]
+
+ for (input_str, input_mathml) in incorrect_inputs:
+ self._assert_symbolic_grade(problem, input_str, input_mathml, 'incorrect')
+
+
+ def test_complex_number_grade(self):
+ problem = self.build_problem(math_display=True,
+ expect="[[cos(theta),i*sin(theta)],[i*sin(theta),cos(theta)]]",
+ options=["matrix", "imaginary"])
+
+ # For LaTeX-style inputs, symmath_check() will try to contact
+ # a server to convert the input to MathML.
+ # We mock out the server, simulating the response that it would give
+ # for this input.
+ import requests
+ dirpath = os.path.dirname(__file__)
+ correct_snuggletex_response = open(os.path.join(dirpath, "test_files/snuggletex_correct.html")).read().decode('utf8')
+ wrong_snuggletex_response = open(os.path.join(dirpath, "test_files/snuggletex_wrong.html")).read().decode('utf8')
+
+ # Correct answer
+ with mock.patch.object(requests, 'post') as mock_post:
+
+ # Simulate what the LaTeX-to-MathML server would
+ # send for the correct response input
+ mock_post.return_value.text = correct_snuggletex_response
+
+ self._assert_symbolic_grade(problem,
+ "cos(theta)*[[1,0],[0,1]] + i*sin(theta)*[[0,1],[1,0]]",
+ textwrap.dedent("""
+
+ """),
+ 'correct')
+
+ # Incorrect answer
+ with mock.patch.object(requests, 'post') as mock_post:
+
+ # Simulate what the LaTeX-to-MathML server would
+ # send for the incorrect response input
+ mock_post.return_value.text = wrong_snuggletex_response
+
+ self._assert_symbolic_grade(problem, "2",
+ textwrap.dedent("""
+
+ """),
+ 'incorrect')
+
+ def test_multiple_inputs_exception(self):
+
+ # Should not allow multiple inputs, since we specify
+ # only one "expect" value
+ with self.assertRaises(Exception):
+ problem = self.build_problem(math_display=True,
+ expect="2*x+3*y",
+ num_inputs=3)
+
+ def _assert_symbolic_grade(self, problem,
+ student_input,
+ dynamath_input,
+ expected_correctness):
+ input_dict = {'1_2_1': str(student_input),
+ '1_2_1_dynamath': str(dynamath_input) }
+
+ correct_map = problem.grade_answers(input_dict)
+
+ self.assertEqual(correct_map.get_correctness('1_2_1'),
+ expected_correctness)
class OptionResponseTest(ResponseTest):
@@ -531,6 +588,22 @@ class StringResponseTest(ResponseTest):
correct_map = problem.grade_answers(input_dict)
self.assertEquals(correct_map.get_hint('1_2_1'), "")
+ def test_computed_hints(self):
+ problem = self.build_problem(
+ answer="Michigan",
+ hintfn="gimme_a_hint",
+ script = textwrap.dedent("""
+ def gimme_a_hint(answer_ids, student_answers, new_cmap, old_cmap):
+ aid = answer_ids[0]
+ answer = student_answers[aid]
+ new_cmap.set_hint_and_mode(aid, answer+"??", "always")
+ """)
+ )
+
+ input_dict = {'1_2_1': 'Hello'}
+ correct_map = problem.grade_answers(input_dict)
+ self.assertEquals(correct_map.get_hint('1_2_1'), "Hello??")
+
class CodeResponseTest(ResponseTest):
from response_xml_factory import CodeResponseXMLFactory
@@ -710,16 +783,37 @@ class JavascriptResponseTest(ResponseTest):
coffee_file_path = os.path.dirname(__file__) + "/test_files/js/*.coffee"
os.system("node_modules/.bin/coffee -c %s" % (coffee_file_path))
- problem = self.build_problem(generator_src="test_problem_generator.js",
- grader_src="test_problem_grader.js",
- display_class="TestProblemDisplay",
- display_src="test_problem_display.js",
- param_dict={'value': '4'})
+ system = test_system()
+ system.can_execute_unsafe_code = lambda: True
+ problem = self.build_problem(
+ system=system,
+ generator_src="test_problem_generator.js",
+ grader_src="test_problem_grader.js",
+ display_class="TestProblemDisplay",
+ display_src="test_problem_display.js",
+ param_dict={'value': '4'},
+ )
# Test that we get graded correctly
self.assert_grade(problem, json.dumps({0: 4}), "correct")
self.assert_grade(problem, json.dumps({0: 5}), "incorrect")
+ def test_cant_execute_javascript(self):
+ # If the system says to disallow unsafe code execution, then making
+ # this problem will raise an exception.
+ system = test_system()
+ system.can_execute_unsafe_code = lambda: False
+
+ with self.assertRaises(LoncapaProblemError):
+ problem = self.build_problem(
+ system=system,
+ generator_src="test_problem_generator.js",
+ grader_src="test_problem_grader.js",
+ display_class="TestProblemDisplay",
+ display_src="test_problem_display.js",
+ param_dict={'value': '4'},
+ )
+
class NumericalResponseTest(ResponseTest):
from response_xml_factory import NumericalResponseXMLFactory
@@ -853,9 +947,8 @@ class CustomResponseTest(ResponseTest):
#
# 'answer_given' is the answer the student gave (if there is just one input)
# or an ordered list of answers (if there are multiple inputs)
- #
- #
- # The function should return a dict of the form
+ #
+ # The function should return a dict of the form
# { 'ok': BOOL, 'msg': STRING }
#
script = textwrap.dedent("""
@@ -964,6 +1057,35 @@ class CustomResponseTest(ResponseTest):
self.assertEqual(correct_map.get_msg('1_2_2'), 'Feedback 2')
self.assertEqual(correct_map.get_msg('1_2_3'), 'Feedback 3')
+ def test_function_code_with_extra_args(self):
+ script = textwrap.dedent("""\
+ def check_func(expect, answer_given, options, dynamath):
+ assert options == "xyzzy", "Options was %r" % options
+ return {'ok': answer_given == expect, 'msg': 'Message text'}
+ """)
+
+ problem = self.build_problem(script=script, cfn="check_func", expect="42", options="xyzzy", cfn_extra_args="options dynamath")
+
+ # Correct answer
+ input_dict = {'1_2_1': '42'}
+ correct_map = problem.grade_answers(input_dict)
+
+ correctness = correct_map.get_correctness('1_2_1')
+ msg = correct_map.get_msg('1_2_1')
+
+ self.assertEqual(correctness, 'correct')
+ self.assertEqual(msg, "Message text")
+
+ # Incorrect answer
+ input_dict = {'1_2_1': '0'}
+ correct_map = problem.grade_answers(input_dict)
+
+ correctness = correct_map.get_correctness('1_2_1')
+ msg = correct_map.get_msg('1_2_1')
+
+ self.assertEqual(correctness, 'incorrect')
+ self.assertEqual(msg, "Message text")
+
def test_multiple_inputs_return_one_status(self):
# When given multiple inputs, the 'answer_given' argument
# to the check_func() is a list of inputs
diff --git a/common/lib/capa/capa/util.py b/common/lib/capa/capa/util.py
index 8b05ea717e..ec43da6093 100644
--- a/common/lib/capa/capa/util.py
+++ b/common/lib/capa/capa/util.py
@@ -1,4 +1,4 @@
-from .calc import evaluator, UndefinedVariable
+from calc import evaluator, UndefinedVariable
from cmath import isinf
#-----------------------------------------------------------------------------
diff --git a/common/lib/capa/setup.py b/common/lib/capa/setup.py
index 7719626c3e..2e73701060 100644
--- a/common/lib/capa/setup.py
+++ b/common/lib/capa/setup.py
@@ -4,5 +4,5 @@ setup(
name="capa",
version="0.1",
packages=find_packages(exclude=["tests"]),
- install_requires=['distribute==0.6.28', 'pyparsing==1.5.6'],
+ install_requires=["distribute==0.6.28"],
)
diff --git a/lms/lib/symmath/README.md b/common/lib/capa/symmath/README.md
similarity index 100%
rename from lms/lib/symmath/README.md
rename to common/lib/capa/symmath/README.md
diff --git a/lms/lib/symmath/__init__.py b/common/lib/capa/symmath/__init__.py
similarity index 100%
rename from lms/lib/symmath/__init__.py
rename to common/lib/capa/symmath/__init__.py
diff --git a/lms/lib/symmath/formula.py b/common/lib/capa/symmath/formula.py
similarity index 99%
rename from lms/lib/symmath/formula.py
rename to common/lib/capa/symmath/formula.py
index 604941ffdd..8369baa27c 100644
--- a/lms/lib/symmath/formula.py
+++ b/common/lib/capa/symmath/formula.py
@@ -736,4 +736,4 @@ def test6(): # imaginary numbers
'''
- return formula(xmlstr, options='imaginaryi')
+ return formula(xmlstr, options='imaginary')
diff --git a/lms/lib/symmath/symmath_check.py b/common/lib/capa/symmath/symmath_check.py
similarity index 99%
rename from lms/lib/symmath/symmath_check.py
rename to common/lib/capa/symmath/symmath_check.py
index 151debee71..65a17883f5 100644
--- a/lms/lib/symmath/symmath_check.py
+++ b/common/lib/capa/symmath/symmath_check.py
@@ -324,4 +324,5 @@ def symmath_check(expect, ans, dynamath=None, options=None, debug=None, xml=None
msg += "
Difference: %s
" % to_latex(diff)
msg += '
'
- return {'ok': False, 'msg': msg, 'ex': fexpect, 'got': fsym}
+ # Used to return more keys: 'ex': fexpect, 'got': fsym
+ return {'ok': False, 'msg': msg}
diff --git a/common/lib/capa/capa/chem/__init__.py b/common/lib/chem/chem/__init__.py
similarity index 100%
rename from common/lib/capa/capa/chem/__init__.py
rename to common/lib/chem/chem/__init__.py
diff --git a/common/lib/capa/capa/chem/chemcalc.py b/common/lib/chem/chem/chemcalc.py
similarity index 100%
rename from common/lib/capa/capa/chem/chemcalc.py
rename to common/lib/chem/chem/chemcalc.py
diff --git a/common/lib/capa/capa/chem/chemtools.py b/common/lib/chem/chem/chemtools.py
similarity index 100%
rename from common/lib/capa/capa/chem/chemtools.py
rename to common/lib/chem/chem/chemtools.py
diff --git a/common/lib/capa/capa/chem/miller.py b/common/lib/chem/chem/miller.py
similarity index 100%
rename from common/lib/capa/capa/chem/miller.py
rename to common/lib/chem/chem/miller.py
diff --git a/common/lib/capa/capa/chem/tests.py b/common/lib/chem/chem/tests.py
similarity index 100%
rename from common/lib/capa/capa/chem/tests.py
rename to common/lib/chem/chem/tests.py
diff --git a/common/lib/chem/setup.py b/common/lib/chem/setup.py
new file mode 100644
index 0000000000..4f2b24ddee
--- /dev/null
+++ b/common/lib/chem/setup.py
@@ -0,0 +1,13 @@
+from setuptools import setup
+
+setup(
+ name="chem",
+ version="0.1",
+ packages=["chem"],
+ install_requires=[
+ "pyparsing==1.5.6",
+ "numpy",
+ "scipy",
+ "nltk==2.0.4",
+ ],
+)
diff --git a/common/lib/sandbox-packages/README b/common/lib/sandbox-packages/README
new file mode 100644
index 0000000000..706998b08e
--- /dev/null
+++ b/common/lib/sandbox-packages/README
@@ -0,0 +1 @@
+This directory is in the Python path for sandboxed Python execution.
diff --git a/common/lib/capa/capa/eia.py b/common/lib/sandbox-packages/eia.py
similarity index 100%
rename from common/lib/capa/capa/eia.py
rename to common/lib/sandbox-packages/eia.py
diff --git a/common/lib/sandbox-packages/setup.py b/common/lib/sandbox-packages/setup.py
new file mode 100644
index 0000000000..1b99118aca
--- /dev/null
+++ b/common/lib/sandbox-packages/setup.py
@@ -0,0 +1,14 @@
+from setuptools import setup
+
+setup(
+ name="sandbox-packages",
+ version="0.1",
+ packages=[
+ "verifiers",
+ ],
+ py_modules=[
+ "eia",
+ ],
+ install_requires=[
+ ],
+)
diff --git a/common/lib/capa/capa/verifiers/__init__.py b/common/lib/sandbox-packages/verifiers/__init__.py
similarity index 100%
rename from common/lib/capa/capa/verifiers/__init__.py
rename to common/lib/sandbox-packages/verifiers/__init__.py
diff --git a/common/lib/capa/capa/verifiers/draganddrop.py b/common/lib/sandbox-packages/verifiers/draganddrop.py
similarity index 100%
rename from common/lib/capa/capa/verifiers/draganddrop.py
rename to common/lib/sandbox-packages/verifiers/draganddrop.py
diff --git a/common/lib/capa/capa/verifiers/tests_draganddrop.py b/common/lib/sandbox-packages/verifiers/tests_draganddrop.py
similarity index 100%
rename from common/lib/capa/capa/verifiers/tests_draganddrop.py
rename to common/lib/sandbox-packages/verifiers/tests_draganddrop.py
diff --git a/common/lib/xmodule/test_files/symbolicresponse.xml b/common/lib/xmodule/test_files/symbolicresponse.xml
index 4dc2bc9d7b..8443366ffe 100644
--- a/common/lib/xmodule/test_files/symbolicresponse.xml
+++ b/common/lib/xmodule/test_files/symbolicresponse.xml
@@ -13,13 +13,10 @@ real time, next to the input box.
This is a correct answer which may be entered below:
cos(theta)*[[1,0],[0,1]] + i*sin(theta)*[[0,1],[1,0]]
-
Compute [mathjax] U = \exp\left( i \theta \left[ \begin{matrix} 0 & 1 \\ 1 & 0 \end{matrix} \right] \right) [/mathjax]
and give the resulting \(2 \times 2\) matrix.
Your input should be typed in as a list of lists, eg [[1,2],[3,4]].
- [mathjax]U=[/mathjax]
+ [mathjax]U=[/mathjax]
diff --git a/common/lib/xmodule/xmodule/capa_module.py b/common/lib/xmodule/xmodule/capa_module.py
index 479cd5a759..eb6bdc18c9 100644
--- a/common/lib/xmodule/xmodule/capa_module.py
+++ b/common/lib/xmodule/xmodule/capa_module.py
@@ -3,7 +3,9 @@ import datetime
import hashlib
import json
import logging
+import os
import traceback
+import struct
import sys
from pkg_resources import resource_string
@@ -23,8 +25,10 @@ from xmodule.util.date_utils import time_to_datetime
log = logging.getLogger("mitx.courseware")
-# Generated this many different variants of problems with rerandomize=per_student
+# Generate this many different variants of problems with rerandomize=per_student
NUM_RANDOMIZATION_BINS = 20
+# Never produce more than this many different seeds, no matter what.
+MAX_RANDOMIZATION_BINS = 1000
def randomization_bin(seed, problem_id):
@@ -109,11 +113,7 @@ class CapaModule(CapaFields, XModule):
self.close_date = due_date
if self.seed is None:
- if self.rerandomize == 'never':
- self.seed = 1
- elif self.rerandomize == "per_student" and hasattr(self.system, 'seed'):
- # see comment on randomization_bin
- self.seed = randomization_bin(system.seed, self.location.url)
+ self.choose_new_seed()
# Need the problem location in openendedresponse to send out. Adding
# it to the system here seems like the least clunky way to get it
@@ -157,6 +157,22 @@ class CapaModule(CapaFields, XModule):
self.set_state_from_lcp()
+ assert self.seed is not None
+
+ def choose_new_seed(self):
+ """Choose a new seed."""
+ if self.rerandomize == 'never':
+ self.seed = 1
+ elif self.rerandomize == "per_student" and hasattr(self.system, 'seed'):
+ # see comment on randomization_bin
+ self.seed = randomization_bin(self.system.seed, self.location.url)
+ else:
+ self.seed = struct.unpack('i', os.urandom(4))[0]
+
+ # So that sandboxed code execution can be cached, but still have an interesting
+ # number of possibilities, cap the number of different random seeds.
+ self.seed %= MAX_RANDOMIZATION_BINS
+
def new_lcp(self, state, text=None):
if text is None:
text = self.data
@@ -165,6 +181,7 @@ class CapaModule(CapaFields, XModule):
problem_text=text,
id=self.location.html_id(),
state=state,
+ seed=self.seed,
system=self.system,
)
@@ -832,14 +849,11 @@ class CapaModule(CapaFields, XModule):
'error': "Refresh the page and make an attempt before resetting."}
if self.rerandomize in ["always", "onreset"]:
- # reset random number generator seed (note the self.lcp.get_state()
- # in next line)
- seed = None
- else:
- seed = self.lcp.seed
+ # Reset random number generator seed.
+ self.choose_new_seed()
# Generate a new problem with either the previous seed or a new seed
- self.lcp = self.new_lcp({'seed': seed})
+ self.lcp = self.new_lcp(None)
# Pull in the new problem seed
self.set_state_from_lcp()
diff --git a/common/lib/xmodule/xmodule/tests/__init__.py b/common/lib/xmodule/xmodule/tests/__init__.py
index 0a2f22aa68..6af11a3ac8 100644
--- a/common/lib/xmodule/xmodule/tests/__init__.py
+++ b/common/lib/xmodule/xmodule/tests/__init__.py
@@ -14,7 +14,7 @@ import fs.osfs
import numpy
-import capa.calc as calc
+import calc
import xmodule
from xmodule.x_module import ModuleSystem
from mock import Mock
@@ -33,15 +33,14 @@ def test_system():
"""
Construct a test ModuleSystem instance.
- By default, the render_template() method simply returns
- the context it is passed as a string.
- You can override this behavior by monkey patching:
+ By default, the render_template() method simply returns the context it is
+ passed as a string. You can override this behavior by monkey patching::
- system = test_system()
- system.render_template = my_render_func
+ system = test_system()
+ system.render_template = my_render_func
+
+ where `my_render_func` is a function of the form my_render_func(template, context).
- where my_render_func is a function of the form
- my_render_func(template, context)
"""
return ModuleSystem(
ajax_url='courses/course_id/modx/a_location',
@@ -86,10 +85,12 @@ class ModelsTest(unittest.TestCase):
self.assertTrue(abs(calc.evaluator(variables, functions, "e^(j*pi)") + 1) < 0.00001)
self.assertTrue(abs(calc.evaluator(variables, functions, "j||1") - 0.5 - 0.5j) < 0.00001)
variables['t'] = 1.0
+ # Use self.assertAlmostEqual here...
self.assertTrue(abs(calc.evaluator(variables, functions, "t") - 1.0) < 0.00001)
self.assertTrue(abs(calc.evaluator(variables, functions, "T") - 1.0) < 0.00001)
self.assertTrue(abs(calc.evaluator(variables, functions, "t", cs=True) - 1.0) < 0.00001)
self.assertTrue(abs(calc.evaluator(variables, functions, "T", cs=True) - 298) < 0.2)
+ # Use self.assertRaises here...
exception_happened = False
try:
calc.evaluator({}, {}, "5+7 QWSEKO")
diff --git a/common/lib/xmodule/xmodule/tests/test_capa_module.py b/common/lib/xmodule/xmodule/tests/test_capa_module.py
index f948f5bdfe..61de21b129 100644
--- a/common/lib/xmodule/xmodule/tests/test_capa_module.py
+++ b/common/lib/xmodule/xmodule/tests/test_capa_module.py
@@ -550,6 +550,7 @@ class CapaModuleTest(unittest.TestCase):
def test_reset_problem(self):
module = CapaFactory.create(done=True)
module.new_lcp = Mock(wraps=module.new_lcp)
+ module.choose_new_seed = Mock(wraps=module.choose_new_seed)
# Stub out HTML rendering
with patch('xmodule.capa_module.CapaModule.get_problem_html') as mock_html:
@@ -567,7 +568,8 @@ class CapaModuleTest(unittest.TestCase):
self.assertEqual(result['html'], "Test HTML
")
# Expect that the problem was reset
- module.new_lcp.assert_called_once_with({'seed': None})
+ module.new_lcp.assert_called_once_with(None)
+ module.choose_new_seed.assert_called_once_with()
def test_reset_problem_closed(self):
module = CapaFactory.create()
@@ -1033,3 +1035,13 @@ class CapaModuleTest(unittest.TestCase):
self.assertTrue(module.seed is not None)
msg = 'Could not get a new seed from reset after 5 tries'
self.assertTrue(success, msg)
+
+ def test_random_seed_bins(self):
+ # Assert that we are limiting the number of possible seeds.
+
+ # Check the conditions that generate random seeds
+ for rerandomize in ['always', 'per_student', 'true', 'onreset']:
+ # Get a bunch of seeds, they should all be in 0-999.
+ for i in range(200):
+ module = CapaFactory.create(rerandomize=rerandomize)
+ assert 0 <= module.seed < 1000
diff --git a/common/lib/xmodule/xmodule/tests/test_progress.py b/common/lib/xmodule/xmodule/tests/test_progress.py
index 0114ba4ad3..4bb663ad85 100644
--- a/common/lib/xmodule/xmodule/tests/test_progress.py
+++ b/common/lib/xmodule/xmodule/tests/test_progress.py
@@ -134,6 +134,6 @@ class ModuleProgressTest(unittest.TestCase):
'''
def test_xmodule_default(self):
'''Make sure default get_progress exists, returns None'''
- xm = x_module.XModule(test_system, 'a://b/c/d/e', None, {})
+ xm = x_module.XModule(test_system(), 'a://b/c/d/e', None, {})
p = xm.get_progress()
self.assertEqual(p, None)
diff --git a/common/lib/xmodule/xmodule/tests/test_randomize_module.py b/common/lib/xmodule/xmodule/tests/test_randomize_module.py
index 59cf5a59f3..81935c4013 100644
--- a/common/lib/xmodule/xmodule/tests/test_randomize_module.py
+++ b/common/lib/xmodule/xmodule/tests/test_randomize_module.py
@@ -14,7 +14,6 @@ START = '2013-01-01T01:00:00'
from .test_course_module import DummySystem as DummyImportSystem
-from . import test_system
class RandomizeModuleTestCase(unittest.TestCase):
diff --git a/common/lib/xmodule/xmodule/x_module.py b/common/lib/xmodule/xmodule/x_module.py
index 7c24d593e3..76ac6a1ff6 100644
--- a/common/lib/xmodule/xmodule/x_module.py
+++ b/common/lib/xmodule/xmodule/x_module.py
@@ -737,7 +737,10 @@ class ModuleSystem(object):
anonymous_student_id='',
course_id=None,
open_ended_grading_interface=None,
- s3_interface=None):
+ s3_interface=None,
+ cache=None,
+ can_execute_unsafe_code=None,
+ ):
'''
Create a closure around the system environment.
@@ -779,6 +782,14 @@ class ModuleSystem(object):
xblock_model_data - A dict-like object containing the all data available to this
xblock
+
+ cache - A cache object with two methods:
+ .get(key) returns an object from the cache or None.
+ .set(key, value, timeout_secs=None) stores a value in the cache with a timeout.
+
+ can_execute_unsafe_code - A function returning a boolean, whether or
+ not to allow the execution of unsafe, unsandboxed code.
+
'''
self.ajax_url = ajax_url
self.xqueue = xqueue
@@ -803,6 +814,9 @@ class ModuleSystem(object):
self.open_ended_grading_interface = open_ended_grading_interface
self.s3_interface = s3_interface
+ self.cache = cache or DoNothingCache()
+ self.can_execute_unsafe_code = can_execute_unsafe_code or (lambda: False)
+
def get(self, attr):
''' provide uniform access to attributes (like etree).'''
return self.__dict__.get(attr)
@@ -816,3 +830,12 @@ class ModuleSystem(object):
def __str__(self):
return str(self.__dict__)
+
+
+class DoNothingCache(object):
+ """A duck-compatible object to use in ModuleSystem when there's no cache."""
+ def get(self, key):
+ return None
+
+ def set(self, key, value, timeout=None):
+ pass
diff --git a/common/test/data/embedded_python/course.xml b/common/test/data/embedded_python/course.xml
new file mode 100644
index 0000000000..1662543b4d
--- /dev/null
+++ b/common/test/data/embedded_python/course.xml
@@ -0,0 +1 @@
+
diff --git a/common/test/data/embedded_python/course/2013_Spring.xml b/common/test/data/embedded_python/course/2013_Spring.xml
new file mode 100644
index 0000000000..fa6881c37b
--- /dev/null
+++ b/common/test/data/embedded_python/course/2013_Spring.xml
@@ -0,0 +1,111 @@
+
+
+
+
+
+
+
+
+
+
+# for a schematic response, submission[i] is the json representation
+# of the diagram and analysis results for the i-th schematic tag
+
+def get_tran(json,signal):
+ for element in json:
+ if element[0] == 'transient':
+ return element[1].get(signal,[])
+ return []
+
+def get_value(at,output):
+ for (t,v) in output:
+ if at == t: return v
+ return None
+
+output = get_tran(submission[0],'Z')
+okay = True
+
+# output should be 1, 1, 1, 1, 1, 0, 0, 0
+if get_value(0.0000004,output) < 2.7: okay = False;
+if get_value(0.0000009,output) < 2.7: okay = False;
+if get_value(0.0000014,output) < 2.7: okay = False;
+if get_value(0.0000019,output) < 2.7: okay = False;
+if get_value(0.0000024,output) < 2.7: okay = False;
+if get_value(0.0000029,output) > 0.25: okay = False;
+if get_value(0.0000034,output) > 0.25: okay = False;
+if get_value(0.0000039,output) > 0.25: okay = False;
+
+correct = ['correct' if okay else 'incorrect']
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ -
+
+num = 0
+while num <= 5:
+ print(num)
+ num += 1
+
+print("Outside of loop")
+print(num)
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+if submission[0] == "Xyzzy":
+ correct = ['correct']
+else:
+ correct = ['incorrect']
+
+
+
+
+
+
+
+
diff --git a/common/test/data/embedded_python/roots/2013_Spring.xml b/common/test/data/embedded_python/roots/2013_Spring.xml
new file mode 100644
index 0000000000..1662543b4d
--- /dev/null
+++ b/common/test/data/embedded_python/roots/2013_Spring.xml
@@ -0,0 +1 @@
+
diff --git a/common/test/data/full/problem/test_files/symbolicresponse.xml b/common/test/data/full/problem/test_files/symbolicresponse.xml
index 4dc2bc9d7b..85945b1d8c 100644
--- a/common/test/data/full/problem/test_files/symbolicresponse.xml
+++ b/common/test/data/full/problem/test_files/symbolicresponse.xml
@@ -19,7 +19,7 @@ from symmath import *
Compute [mathjax] U = \exp\left( i \theta \left[ \begin{matrix} 0 & 1 \\ 1 & 0 \end{matrix} \right] \right) [/mathjax]
and give the resulting \(2 \times 2\) matrix.
Your input should be typed in as a list of lists, eg [[1,2],[3,4]].
- [mathjax]U=[/mathjax]
+ [mathjax]U=[/mathjax]
diff --git a/lms/djangoapps/courseware/module_render.py b/lms/djangoapps/courseware/module_render.py
index 6f05b32778..d6c104a83c 100644
--- a/lms/djangoapps/courseware/module_render.py
+++ b/lms/djangoapps/courseware/module_render.py
@@ -1,6 +1,7 @@
import json
import logging
import pyparsing
+import re
import sys
import static_replace
@@ -8,6 +9,7 @@ from functools import partial
from django.conf import settings
from django.contrib.auth.models import User
+from django.core.cache import cache
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.http import Http404
@@ -273,6 +275,14 @@ def get_module_for_descriptor(user, request, descriptor, model_data_cache, cours
statsd.increment("lms.courseware.question_answered", tags=tags)
+ def can_execute_unsafe_code():
+ # To decide if we can run unsafe code, we check the course id against
+ # a list of regexes configured on the server.
+ for regex in settings.COURSES_WITH_UNSAFE_CODE:
+ if re.match(regex, course_id):
+ return True
+ return False
+
# TODO (cpennington): When modules are shared between courses, the static
# prefix is going to have to be specific to the module, not the directory
# that the xml was loaded from
@@ -299,6 +309,8 @@ def get_module_for_descriptor(user, request, descriptor, model_data_cache, cours
course_id=course_id,
open_ended_grading_interface=open_ended_grading_interface,
s3_interface=s3_interface,
+ cache=cache,
+ can_execute_unsafe_code=can_execute_unsafe_code,
)
# pass position specified in URL to module through ModuleSystem
system.set('position', position)
diff --git a/lms/djangoapps/courseware/tests/load_tests/README.md b/lms/djangoapps/courseware/tests/load_tests/README.md
new file mode 100644
index 0000000000..09d8797947
--- /dev/null
+++ b/lms/djangoapps/courseware/tests/load_tests/README.md
@@ -0,0 +1,4 @@
+# Load Testing
+
+Scripts for load testing the courseware app,
+mostly using [multimechanize](http://testutils.org/multi-mechanize/)
diff --git a/lms/djangoapps/courseware/tests/load_tests/custom_response/README.md b/lms/djangoapps/courseware/tests/load_tests/custom_response/README.md
new file mode 100644
index 0000000000..e3fae8c817
--- /dev/null
+++ b/lms/djangoapps/courseware/tests/load_tests/custom_response/README.md
@@ -0,0 +1,51 @@
+# Custom Response Load Test
+
+## Optional Installations
+
+* [memcached](http://pypi.python.org/pypi/python-memcached/): Install this
+and make sure it is running, or the Capa problem will not cache results.
+
+* [AppArmor](http://wiki.apparmor.net): Follow the instructions in
+`common/lib/codejail/README` to set up the Python sandbox environment.
+If you do not set up the sandbox, the tests will still execute code in the CustomResponse,
+so you can still run the tests.
+
+* [matplotlib](http://matplotlib.org): Multi-mechanize uses this to create graphs.
+
+
+## Running the Tests
+
+This test simulates student submissions for a custom response problem.
+
+First, clear the cache:
+
+ /etc/init.d/memcached restart
+
+Then, run the test:
+
+ multimech-run custom_response
+
+You can configure the parameters in `customresponse/config.cfg`,
+and you can change the CustomResponse script and student submissions
+in `customresponse/test_scripts/v_user.py`.
+
+## Components Under Test
+
+Components under test:
+
+* Python sandbox (see `common/lib/codejail`), which uses `AppArmor`
+* Caching (see `common/lib/capa/capa/safe_exec/`), which uses `memcache` in production
+
+Components NOT under test:
+
+* Django views
+* `XModule`
+* gunicorn
+
+This allows us to avoid creating courses in mongo, logging in, using CSRF tokens,
+and other inconveniences. Instead, we create a capa problem (from the capa package),
+pass it Django's memcache backend, and pass the problem student submissions.
+
+Even though the test uses `capa.capa_problem.LoncapaProblem` directly,
+the `capa` should not depend on Django. For this reason, we put the
+test in the `courseware` Django app.
diff --git a/lms/djangoapps/courseware/tests/load_tests/custom_response/config.cfg b/lms/djangoapps/courseware/tests/load_tests/custom_response/config.cfg
new file mode 100644
index 0000000000..c75f02a669
--- /dev/null
+++ b/lms/djangoapps/courseware/tests/load_tests/custom_response/config.cfg
@@ -0,0 +1,22 @@
+
+[global]
+run_time = 240
+rampup = 30
+results_ts_interval = 10
+progress_bar = on
+console_logging = off
+xml_report = off
+
+
+[user_group-1]
+threads = 10
+script = v_user.py
+
+[user_group-2]
+threads = 10
+script = v_user.py
+
+[user_group-3]
+threads = 10
+script = v_user.py
+
diff --git a/lms/djangoapps/courseware/tests/load_tests/custom_response/test_scripts/v_user.py b/lms/djangoapps/courseware/tests/load_tests/custom_response/test_scripts/v_user.py
new file mode 100644
index 0000000000..9bfc39e55b
--- /dev/null
+++ b/lms/djangoapps/courseware/tests/load_tests/custom_response/test_scripts/v_user.py
@@ -0,0 +1,115 @@
+""" User script for load testing CustomResponse """
+
+from capa.tests.response_xml_factory import CustomResponseXMLFactory
+import capa.capa_problem as lcp
+from xmodule.x_module import ModuleSystem
+import mock
+import fs.osfs
+import random
+import textwrap
+
+# Use memcache running locally
+CACHE_SETTINGS = {
+ 'default': {
+ 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
+ 'LOCATION': '127.0.0.1:11211'
+ },
+}
+
+# Configure settings so Django will let us import its cache wrapper
+# Caching is the only part of Django being tested
+from django.conf import settings
+settings.configure(CACHES=CACHE_SETTINGS)
+
+from django.core.cache import cache
+
+# Script to install as the checker for the CustomResponse
+TEST_SCRIPT = textwrap.dedent("""
+ def check_func(expect, answer_given):
+ return {'ok': answer_given == expect, 'msg': 'Message text'}
+""")
+
+# Submissions submitted by the student
+TEST_SUBMISSIONS = [random.randint(-100, 100) for i in range(100)]
+
+class TestContext(object):
+ """ One-time set up for the test that is shared across transactions.
+ Uses a Singleton design pattern."""
+
+ SINGLETON = None
+ NUM_UNIQUE_SEEDS = 20
+
+ @classmethod
+ def singleton(cls):
+ """ Return the singleton, creating one if it does not already exist."""
+
+ # If we haven't created the singleton yet, create it now
+ if cls.SINGLETON is None:
+
+ # Create a mock ModuleSystem, installing our cache
+ system = mock.MagicMock(ModuleSystem)
+ system.render_template = lambda template, context: "%s
" % template
+ system.cache = cache
+ system.filestore = mock.MagicMock(fs.osfs.OSFS)
+ system.filestore.root_path = ""
+ system.DEBUG = True
+
+ # Create a custom response problem
+ xml_factory = CustomResponseXMLFactory()
+ xml = xml_factory.build_xml(script=TEST_SCRIPT, cfn="check_func", expect="42")
+
+ # Create and store the context
+ cls.SINGLETON = cls(system, xml)
+
+ else:
+ pass
+
+ # Return the singleton
+ return cls.SINGLETON
+
+ def __init__(self, system, xml):
+ """ Store context needed for the test across transactions """
+ self.system = system
+ self.xml = xml
+
+ # Construct a small pool of unique seeds
+ # To keep our implementation in line with the one capa actually uses,
+ # construct the problems, then use the seeds they generate
+ self.seeds = [lcp.LoncapaProblem(self.xml, 'problem_id', system=self.system).seed
+ for i in range(self.NUM_UNIQUE_SEEDS)]
+
+ def random_seed(self):
+ """ Return one of a small number of unique random seeds """
+ return random.choice(self.seeds)
+
+ def student_submission(self):
+ """ Return one of a small number of student submissions """
+ return random.choice(TEST_SUBMISSIONS)
+
+class Transaction(object):
+ """ User script that submits a response to a CustomResponse problem """
+
+ def __init__(self):
+ """ Create the problem """
+
+ # Get the context (re-used across transactions)
+ self.context = TestContext.singleton()
+
+ # Create a new custom response problem
+ # using one of a small number of unique seeds
+ # We're assuming that the capa module is limiting the number
+ # of seeds (currently not the case for certain settings)
+ self.problem = lcp.LoncapaProblem(self.context.xml,
+ '1',
+ state=None,
+ seed=self.context.random_seed(),
+ system=self.context.system)
+
+ def run(self):
+ """ Submit a response to the CustomResponse problem """
+ answers = {'1_2_1': self.context.student_submission()}
+ self.problem.grade_answers(answers)
+
+if __name__ == '__main__':
+ trans = Transaction()
+ trans.run()
diff --git a/lms/djangoapps/courseware/tests/tests.py b/lms/djangoapps/courseware/tests/tests.py
index d50e0b4526..a189160a48 100644
--- a/lms/djangoapps/courseware/tests/tests.py
+++ b/lms/djangoapps/courseware/tests/tests.py
@@ -372,6 +372,7 @@ class TestCoursesLoadTestCase_XmlModulestore(PageLoaderTestCase):
'''Check that all pages in test courses load properly from XML'''
def setUp(self):
+ super(TestCoursesLoadTestCase_XmlModulestore, self).setUp()
self.setup_viewtest_user()
xmodule.modulestore.django._MODULESTORES = {}
@@ -390,6 +391,7 @@ class TestCoursesLoadTestCase_MongoModulestore(PageLoaderTestCase):
'''Check that all pages in test courses load properly from Mongo'''
def setUp(self):
+ super(TestCoursesLoadTestCase_MongoModulestore, self).setUp()
self.setup_viewtest_user()
xmodule.modulestore.django._MODULESTORES = {}
modulestore().collection.drop()
@@ -487,9 +489,6 @@ class TestDraftModuleStore(TestCase):
class TestViewAuth(LoginEnrollmentTestCase):
"""Check that view authentication works properly"""
- # NOTE: setUpClass() runs before override_settings takes effect, so
- # can't do imports there without manually hacking settings.
-
def setUp(self):
xmodule.modulestore.django._MODULESTORES = {}
@@ -810,43 +809,85 @@ class TestViewAuth(LoginEnrollmentTestCase):
@override_settings(MODULESTORE=TEST_DATA_XML_MODULESTORE)
-class TestCourseGrader(LoginEnrollmentTestCase):
+class TestSubmittingProblems(LoginEnrollmentTestCase):
"""Check that a course gets graded properly"""
- # NOTE: setUpClass() runs before override_settings takes effect, so
- # can't do imports there without manually hacking settings.
+ # Subclasses should specify the course slug
+ course_slug = "UNKNOWN"
+ course_when = "UNKNOWN"
def setUp(self):
xmodule.modulestore.django._MODULESTORES = {}
- courses = modulestore().get_courses()
- def find_course(course_id):
- """Assumes the course is present"""
- return [c for c in courses if c.id == course_id][0]
-
- self.graded_course = find_course("edX/graded/2012_Fall")
+ course_name = "edX/%s/%s" % (self.course_slug, self.course_when)
+ self.course = modulestore().get_course(course_name)
+ assert self.course, "Couldn't load course %r" % course_name
# create a test student
self.student = 'view@test.com'
self.password = 'foo'
self.create_account('u1', self.student, self.password)
self.activate_user(self.student)
- self.enroll(self.graded_course)
+ self.enroll(self.course)
self.student_user = get_user(self.student)
self.factory = RequestFactory()
+ def problem_location(self, problem_url_name):
+ return "i4x://edX/{}/problem/{}".format(self.course_slug, problem_url_name)
+
+ def modx_url(self, problem_location, dispatch):
+ return reverse(
+ 'modx_dispatch',
+ kwargs={
+ 'course_id': self.course.id,
+ 'location': problem_location,
+ 'dispatch': dispatch,
+ }
+ )
+
+ def submit_question_answer(self, problem_url_name, responses):
+ """
+ Submit answers to a question.
+
+ Responses is a dict mapping problem ids (not sure of the right term)
+ to answers:
+ {'2_1': 'Correct', '2_2': 'Incorrect'}
+
+ """
+ problem_location = self.problem_location(problem_url_name)
+ modx_url = self.modx_url(problem_location, 'problem_check')
+ answer_key_prefix = 'input_i4x-edX-{}-problem-{}_'.format(self.course_slug, problem_url_name)
+ resp = self.client.post(modx_url,
+ { (answer_key_prefix + k): v for k,v in responses.items() }
+ )
+ return resp
+
+ def reset_question_answer(self, problem_url_name):
+ '''resets specified problem for current user'''
+ problem_location = self.problem_location(problem_url_name)
+ modx_url = self.modx_url(problem_location, 'problem_reset')
+ resp = self.client.post(modx_url)
+ return resp
+
+
+class TestCourseGrader(TestSubmittingProblems):
+ """Check that a course gets graded properly"""
+
+ course_slug = "graded"
+ course_when = "2012_Fall"
+
def get_grade_summary(self):
'''calls grades.grade for current user and course'''
model_data_cache = ModelDataCache.cache_for_descriptor_descendents(
- self.graded_course.id, self.student_user, self.graded_course)
+ self.course.id, self.student_user, self.course)
fake_request = self.factory.get(reverse('progress',
- kwargs={'course_id': self.graded_course.id}))
+ kwargs={'course_id': self.course.id}))
return grades.grade(self.student_user, fake_request,
- self.graded_course, model_data_cache)
+ self.course, model_data_cache)
def get_homework_scores(self):
'''get scores for homeworks'''
@@ -855,14 +896,14 @@ class TestCourseGrader(LoginEnrollmentTestCase):
def get_progress_summary(self):
'''return progress summary structure for current user and course'''
model_data_cache = ModelDataCache.cache_for_descriptor_descendents(
- self.graded_course.id, self.student_user, self.graded_course)
+ self.course.id, self.student_user, self.course)
fake_request = self.factory.get(reverse('progress',
- kwargs={'course_id': self.graded_course.id}))
+ kwargs={'course_id': self.course.id}))
progress_summary = grades.progress_summary(self.student_user,
fake_request,
- self.graded_course,
+ self.course,
model_data_cache)
return progress_summary
@@ -871,46 +912,6 @@ class TestCourseGrader(LoginEnrollmentTestCase):
grade_summary = self.get_grade_summary()
self.assertEqual(grade_summary['percent'], percent)
- def submit_question_answer(self, problem_url_name, responses):
- """
- The field names of a problem are hard to determine. This method only works
- for the problems used in the edX/graded course, which has fields named in the
- following form:
- input_i4x-edX-graded-problem-H1P3_2_1
- input_i4x-edX-graded-problem-H1P3_2_2
- """
- problem_location = "i4x://edX/graded/problem/%s" % problem_url_name
-
- modx_url = reverse('modx_dispatch',
- kwargs={'course_id': self.graded_course.id,
- 'location': problem_location,
- 'dispatch': 'problem_check', })
-
- resp = self.client.post(modx_url, {
- 'input_i4x-edX-graded-problem-%s_2_1' % problem_url_name: responses[0],
- 'input_i4x-edX-graded-problem-%s_2_2' % problem_url_name: responses[1],
- })
- print "modx_url", modx_url, "responses", responses
- print "resp", resp
-
- return resp
-
- def problem_location(self, problem_url_name):
- '''Get location string for problem, assuming hardcoded course_id'''
- return "i4x://edX/graded/problem/{0}".format(problem_url_name)
-
- def reset_question_answer(self, problem_url_name):
- '''resets specified problem for current user'''
- problem_location = self.problem_location(problem_url_name)
-
- modx_url = reverse('modx_dispatch',
- kwargs={'course_id': self.graded_course.id,
- 'location': problem_location,
- 'dispatch': 'problem_reset', })
-
- resp = self.client.post(modx_url)
- return resp
-
def test_get_graded(self):
#### Check that the grader shows we have 0% in the course
self.check_grade_percent(0)
@@ -928,27 +929,27 @@ class TestCourseGrader(LoginEnrollmentTestCase):
return [s.earned for s in hw_section['scores']]
# Only get half of the first problem correct
- self.submit_question_answer('H1P1', ['Correct', 'Incorrect'])
+ self.submit_question_answer('H1P1', {'2_1': 'Correct', '2_2': 'Incorrect'})
self.check_grade_percent(0.06)
self.assertEqual(earned_hw_scores(), [1.0, 0, 0]) # Order matters
self.assertEqual(score_for_hw('Homework1'), [1.0, 0.0])
# Get both parts of the first problem correct
self.reset_question_answer('H1P1')
- self.submit_question_answer('H1P1', ['Correct', 'Correct'])
+ self.submit_question_answer('H1P1', {'2_1': 'Correct', '2_2': 'Correct'})
self.check_grade_percent(0.13)
self.assertEqual(earned_hw_scores(), [2.0, 0, 0])
self.assertEqual(score_for_hw('Homework1'), [2.0, 0.0])
# This problem is shown in an ABTest
- self.submit_question_answer('H1P2', ['Correct', 'Correct'])
+ self.submit_question_answer('H1P2', {'2_1': 'Correct', '2_2': 'Correct'})
self.check_grade_percent(0.25)
self.assertEqual(earned_hw_scores(), [4.0, 0.0, 0])
self.assertEqual(score_for_hw('Homework1'), [2.0, 2.0])
# This problem is hidden in an ABTest.
# Getting it correct doesn't change total grade
- self.submit_question_answer('H1P3', ['Correct', 'Correct'])
+ self.submit_question_answer('H1P3', {'2_1': 'Correct', '2_2': 'Correct'})
self.check_grade_percent(0.25)
self.assertEqual(score_for_hw('Homework1'), [2.0, 2.0])
@@ -957,19 +958,85 @@ class TestCourseGrader(LoginEnrollmentTestCase):
# This problem is also weighted to be 4 points (instead of default of 2)
# If the problem was unweighted the percent would have been 0.38 so we
# know it works.
- self.submit_question_answer('H2P1', ['Correct', 'Correct'])
+ self.submit_question_answer('H2P1', {'2_1': 'Correct', '2_2': 'Correct'})
self.check_grade_percent(0.42)
self.assertEqual(earned_hw_scores(), [4.0, 4.0, 0])
# Third homework
- self.submit_question_answer('H3P1', ['Correct', 'Correct'])
+ self.submit_question_answer('H3P1', {'2_1': 'Correct', '2_2': 'Correct'})
self.check_grade_percent(0.42) # Score didn't change
self.assertEqual(earned_hw_scores(), [4.0, 4.0, 2.0])
- self.submit_question_answer('H3P2', ['Correct', 'Correct'])
+ self.submit_question_answer('H3P2', {'2_1': 'Correct', '2_2': 'Correct'})
self.check_grade_percent(0.5) # Now homework2 dropped. Score changes
self.assertEqual(earned_hw_scores(), [4.0, 4.0, 4.0])
# Now we answer the final question (worth half of the grade)
- self.submit_question_answer('FinalQuestion', ['Correct', 'Correct'])
+ self.submit_question_answer('FinalQuestion', {'2_1': 'Correct', '2_2': 'Correct'})
self.check_grade_percent(1.0) # Hooray! We got 100%
+
+
+@override_settings(MODULESTORE=TEST_DATA_XML_MODULESTORE)
+class TestSchematicResponse(TestSubmittingProblems):
+ """Check that we can submit a schematic response, and it answers properly."""
+
+ course_slug = "embedded_python"
+ course_when = "2013_Spring"
+
+ def test_schematic(self):
+ resp = self.submit_question_answer('schematic_problem',
+ { '2_1': json.dumps(
+ [['transient', {'Z': [
+ [0.0000004, 2.8],
+ [0.0000009, 2.8],
+ [0.0000014, 2.8],
+ [0.0000019, 2.8],
+ [0.0000024, 2.8],
+ [0.0000029, 0.2],
+ [0.0000034, 0.2],
+ [0.0000039, 0.2]
+ ]}]]
+ )
+ })
+ respdata = json.loads(resp.content)
+ self.assertEqual(respdata['success'], 'correct')
+
+ self.reset_question_answer('schematic_problem')
+ resp = self.submit_question_answer('schematic_problem',
+ { '2_1': json.dumps(
+ [['transient', {'Z': [
+ [0.0000004, 2.8],
+ [0.0000009, 0.0], # wrong.
+ [0.0000014, 2.8],
+ [0.0000019, 2.8],
+ [0.0000024, 2.8],
+ [0.0000029, 0.2],
+ [0.0000034, 0.2],
+ [0.0000039, 0.2]
+ ]}]]
+ )
+ })
+ respdata = json.loads(resp.content)
+ self.assertEqual(respdata['success'], 'incorrect')
+
+ def test_check_function(self):
+ resp = self.submit_question_answer('cfn_problem', {'2_1': "0, 1, 2, 3, 4, 5, 'Outside of loop', 6"})
+ respdata = json.loads(resp.content)
+ self.assertEqual(respdata['success'], 'correct')
+
+ self.reset_question_answer('cfn_problem')
+
+ resp = self.submit_question_answer('cfn_problem', {'2_1': "xyzzy!"})
+ respdata = json.loads(resp.content)
+ self.assertEqual(respdata['success'], 'incorrect')
+
+ def test_computed_answer(self):
+ resp = self.submit_question_answer('computed_answer', {'2_1': "Xyzzy"})
+ respdata = json.loads(resp.content)
+ self.assertEqual(respdata['success'], 'correct')
+
+ self.reset_question_answer('computed_answer')
+
+ resp = self.submit_question_answer('computed_answer', {'2_1': "NO!"})
+ respdata = json.loads(resp.content)
+ self.assertEqual(respdata['success'], 'incorrect')
diff --git a/lms/djangoapps/debug/__init__.py b/lms/djangoapps/debug/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/lms/djangoapps/debug/models.py b/lms/djangoapps/debug/models.py
new file mode 100644
index 0000000000..71a8362390
--- /dev/null
+++ b/lms/djangoapps/debug/models.py
@@ -0,0 +1,3 @@
+from django.db import models
+
+# Create your models here.
diff --git a/lms/djangoapps/debug/views.py b/lms/djangoapps/debug/views.py
new file mode 100644
index 0000000000..c1d4155fdd
--- /dev/null
+++ b/lms/djangoapps/debug/views.py
@@ -0,0 +1,31 @@
+"""Views for debugging and diagnostics"""
+
+import pprint
+import traceback
+
+from django.http import Http404
+from django.contrib.auth.decorators import login_required
+from django_future.csrf import ensure_csrf_cookie, csrf_exempt
+from mitxmako.shortcuts import render_to_response
+
+from codejail.safe_exec import safe_exec
+
+@login_required
+@ensure_csrf_cookie
+def run_python(request):
+ """A page to allow testing the Python sandbox on a production server."""
+ if not request.user.is_staff:
+ raise Http404
+ c = {}
+ c['code'] = ''
+ c['results'] = None
+ if request.method == 'POST':
+ py_code = c['code'] = request.POST.get('code')
+ g = {}
+ try:
+ safe_exec(py_code, g)
+ except Exception as e:
+ c['results'] = traceback.format_exc()
+ else:
+ c['results'] = pprint.pformat(g)
+ return render_to_response("debug/run_python_form.html", c)
diff --git a/lms/envs/aws.py b/lms/envs/aws.py
index 83b57e7642..74540b7dec 100644
--- a/lms/envs/aws.py
+++ b/lms/envs/aws.py
@@ -92,6 +92,16 @@ CERT_QUEUE = ENV_TOKENS.get("CERT_QUEUE", 'test-pull')
ZENDESK_URL = ENV_TOKENS.get("ZENDESK_URL")
FEEDBACK_SUBMISSION_EMAIL = ENV_TOKENS.get("FEEDBACK_SUBMISSION_EMAIL")
+for name, value in ENV_TOKENS.get("CODE_JAIL", {}).items():
+ oldvalue = CODE_JAIL.get(name)
+ if isinstance(oldvalue, dict):
+ for subname, subvalue in value.items():
+ oldvalue[subname] = subvalue
+ else:
+ CODE_JAIL[name] = value
+
+COURSES_WITH_UNSAFE_CODE = ENV_TOKENS.get("COURSES_WITH_UNSAFE_CODE", [])
+
############################## SECURE AUTH ITEMS ###############
# Secret things: passwords, access keys, etc.
with open(ENV_ROOT / CONFIG_PREFIX + "auth.json") as auth_file:
diff --git a/lms/envs/common.py b/lms/envs/common.py
index c111b3c18e..4cd7dd9843 100644
--- a/lms/envs/common.py
+++ b/lms/envs/common.py
@@ -97,6 +97,10 @@ MITX_FEATURES = {
# Provide a UI to allow users to submit feedback from the LMS
'ENABLE_FEEDBACK_SUBMISSION': False,
+
+ # Turn on a page that lets staff enter Python code to be run in the
+ # sandbox, for testing whether it's enabled properly.
+ 'ENABLE_DEBUG_RUN_PYTHON': False,
}
# Used for A/B testing
@@ -246,6 +250,31 @@ MODULESTORE = {
}
CONTENTSTORE = None
+#################### Python sandbox ############################################
+
+CODE_JAIL = {
+ # Path to a sandboxed Python executable. None means don't bother.
+ 'python_bin': None,
+ # User to run as in the sandbox.
+ 'user': 'sandbox',
+
+ # Configurable limits.
+ 'limits': {
+ # How many CPU seconds can jailed code use?
+ 'CPU': 1,
+ },
+}
+
+# Some courses are allowed to run unsafe code. This is a list of regexes, one
+# of them must match the course id for that course to run unsafe code.
+#
+# For example:
+#
+# COURSES_WITH_UNSAFE_CODE = [
+# r"Harvard/XY123.1/.*"
+# ]
+COURSES_WITH_UNSAFE_CODE = []
+
############################ SIGNAL HANDLERS ################################
# This is imported to register the exception signal handling that logs exceptions
import monitoring.exceptions # noqa
@@ -398,6 +427,7 @@ MIDDLEWARE_CLASSES = (
# 'debug_toolbar.middleware.DebugToolbarMiddleware',
'django_comment_client.utils.ViewNameMiddleware',
+ 'codejail.django_integration.ConfigureCodeJailMiddleware',
)
############################### Pipeline #######################################
@@ -601,6 +631,7 @@ INSTALLED_APPS = (
# For testing
'django.contrib.admin', # only used in DEBUG mode
+ 'debug',
# Discussion forums
'django_comment_client',
diff --git a/lms/templates/debug/run_python_form.html b/lms/templates/debug/run_python_form.html
new file mode 100644
index 0000000000..daecdf2abd
--- /dev/null
+++ b/lms/templates/debug/run_python_form.html
@@ -0,0 +1,19 @@
+
+
+%if results:
+
+
Results:
+
+${results|h}
+
+
+%endif
diff --git a/lms/urls.py b/lms/urls.py
index 2846e091be..99b55fdb54 100644
--- a/lms/urls.py
+++ b/lms/urls.py
@@ -363,6 +363,11 @@ urlpatterns += (
url(r'^comm/foldit_ops', 'foldit.views.foldit_ops', name="foldit_ops"),
)
+if settings.MITX_FEATURES.get('ENABLE_DEBUG_RUN_PYTHON'):
+ urlpatterns += (
+ url(r'^debug/run_python', 'debug.views.run_python'),
+ )
+
urlpatterns = patterns(*urlpatterns)
if settings.DEBUG:
diff --git a/requirements/edx-sandbox/base.txt b/requirements/edx-sandbox/base.txt
new file mode 100644
index 0000000000..d801f46c8e
--- /dev/null
+++ b/requirements/edx-sandbox/base.txt
@@ -0,0 +1 @@
+numpy==1.6.2
diff --git a/requirements/edx-sandbox/post.txt b/requirements/edx-sandbox/post.txt
new file mode 100644
index 0000000000..f99e8a8c4b
--- /dev/null
+++ b/requirements/edx-sandbox/post.txt
@@ -0,0 +1,6 @@
+# Packages to install in the Python sandbox for secured execution.
+scipy==0.11.0
+lxml==3.0.1
+-e common/lib/calc
+-e common/lib/chem
+-e common/lib/sandbox-packages
diff --git a/requirements/edx/github.txt b/requirements/edx/github.txt
index 35ad8af027..d3f90d5abc 100644
--- a/requirements/edx/github.txt
+++ b/requirements/edx/github.txt
@@ -9,3 +9,4 @@
# Our libraries:
-e git+https://github.com/edx/XBlock.git@483e0cb1#egg=XBlock
+-e git+https://github.com/edx/codejail.git@07494f1#egg=codejail
diff --git a/requirements/edx/local.txt b/requirements/edx/local.txt
index 201467d11e..a72f1f6dea 100644
--- a/requirements/edx/local.txt
+++ b/requirements/edx/local.txt
@@ -1,4 +1,6 @@
# Python libraries to install that are local to the mitx repo
+-e common/lib/calc
-e common/lib/capa
+-e common/lib/chem
-e common/lib/xmodule
-e .
diff --git a/scripts/runone.py b/scripts/runone.py
index 2227ae0adf..a644aa077b 100755
--- a/scripts/runone.py
+++ b/scripts/runone.py
@@ -1,5 +1,4 @@
#!/usr/bin/env python
-from django.core import management
import argparse
import os
@@ -42,21 +41,34 @@ def main(argv):
test_py_path = find_full_path(test_py_path)
test_spec = "%s:%s.%s" % (test_py_path, test_class, test_method)
+ settings = None
if test_py_path.startswith('cms'):
settings = 'cms.envs.test'
elif test_py_path.startswith('lms'):
settings = 'lms.envs.test'
+
+ if settings:
+ # Run as a django test suite
+ from django.core import management
+
+ django_args = ["django-admin.py", "test", "--pythonpath=."]
+ django_args.append("--settings=%s" % settings)
+ if args.nocapture:
+ django_args.append("-s")
+ django_args.append(test_spec)
+
+ print " ".join(django_args)
+ management.execute_from_command_line(django_args)
else:
- raise Exception("Couldn't determine settings to use!")
+ # Run as a nose test suite
+ import nose.core
+ nose_args = ["nosetests"]
+ if args.nocapture:
+ nose_args.append("-s")
+ nose_args.append(test_spec)
+ print " ".join(nose_args)
+ nose.core.main(argv=nose_args)
- django_args = ["django-admin.py", "test", "--pythonpath=."]
- django_args.append("--settings=%s" % settings)
- if args.nocapture:
- django_args.append("-s")
- django_args.append(test_spec)
-
- print " ".join(django_args)
- management.execute_from_command_line(django_args)
if __name__ == "__main__":
main(sys.argv[1:])