diff --git a/.gitignore b/.gitignore index 76cc1efa95..9c82bb8ea9 100644 --- a/.gitignore +++ b/.gitignore @@ -9,7 +9,7 @@ :2e# .AppleDouble database.sqlite -private-requirements.txt +requirements/private.txt courseware/static/js/mathjax/* flushdb.sh build diff --git a/common/djangoapps/util/views.py b/common/djangoapps/util/views.py index 4eae1d66e5..991d6e2e75 100644 --- a/common/djangoapps/util/views.py +++ b/common/djangoapps/util/views.py @@ -16,7 +16,7 @@ from mitxmako.shortcuts import render_to_response, render_to_string from urllib import urlencode import zendesk -import capa.calc +import calc import track.views @@ -27,7 +27,7 @@ def calculate(request): ''' Calculator in footer of every page. ''' equation = request.GET['equation'] try: - result = capa.calc.evaluator({}, {}, equation) + result = calc.evaluator({}, {}, equation) except: event = {'error': map(str, sys.exc_info()), 'equation': equation} diff --git a/common/lib/capa/capa/calc.py b/common/lib/calc/calc.py similarity index 100% rename from common/lib/capa/capa/calc.py rename to common/lib/calc/calc.py diff --git a/common/lib/calc/setup.py b/common/lib/calc/setup.py new file mode 100644 index 0000000000..f7bb1708af --- /dev/null +++ b/common/lib/calc/setup.py @@ -0,0 +1,12 @@ +from setuptools import setup + +setup( + name="calc", + version="0.1", + py_modules=["calc"], + install_requires=[ + "pyparsing==1.5.6", + "numpy", + "scipy" + ], +) diff --git a/common/lib/capa/capa/capa_problem.py b/common/lib/capa/capa/capa_problem.py index 6580114bcc..7ead599d67 100644 --- a/common/lib/capa/capa/capa_problem.py +++ b/common/lib/capa/capa/capa_problem.py @@ -13,33 +13,19 @@ Main module which shows problems (of "capa" type). This is used by capa_module. ''' -from __future__ import division - from datetime import datetime import logging import math import numpy -import os -import random +import os.path import re -import scipy -import struct import sys from lxml import etree from xml.sax.saxutils import unescape from copy import deepcopy -import chem -import chem.miller -import chem.chemcalc -import chem.chemtools -import verifiers -import verifiers.draganddrop - -import calc from .correctmap import CorrectMap -import eia import inputtypes import customrender from .util import contextualize_text, convert_files_to_filenames @@ -47,6 +33,7 @@ import xqueue_interface # to be replaced with auto-registering import responsetypes +import safe_exec # dict of tagname, Response Class -- this should come from auto-registering response_tag_dict = dict([(x.response_tag, x) for x in responsetypes.__all__]) @@ -63,17 +50,6 @@ html_transforms = {'problem': {'tag': 'div'}, "math": {'tag': 'span'}, } -global_context = {'random': random, - 'numpy': numpy, - 'math': math, - 'scipy': scipy, - 'calc': calc, - 'eia': eia, - 'chemcalc': chem.chemcalc, - 'chemtools': chem.chemtools, - 'miller': chem.miller, - 'draganddrop': verifiers.draganddrop} - # These should be removed from HTML output, including all subelements html_problem_semantics = ["codeparam", "responseparam", "answer", "script", "hintgroup", "openendedparam", "openendedrubric"] @@ -96,7 +72,7 @@ class LoncapaProblem(object): - problem_text (string): xml defining the problem - id (string): identifier for this problem; often a filename (no spaces) - - seed (int): random number generator seed (int) + - seed (int): random number generator seed (int) - state (dict): containing the following keys: - 'seed' - (int) random number generator seed - 'student_answers' - (dict) maps input id to the stored answer for that input @@ -115,23 +91,20 @@ class LoncapaProblem(object): if self.system is None: raise Exception() - state = state if state else {} + state = state or {} # Set seed according to the following priority: # 1. Contained in problem's state # 2. Passed into capa_problem via constructor - # 3. Assign from the OS's random number generator self.seed = state.get('seed', seed) - if self.seed is None: - self.seed = struct.unpack('i', os.urandom(4))[0] + assert self.seed is not None, "Seed must be provided for LoncapaProblem." + self.student_answers = state.get('student_answers', {}) if 'correct_map' in state: self.correct_map.set_dict(state['correct_map']) self.done = state.get('done', False) self.input_state = state.get('input_state', {}) - - # Convert startouttext and endouttext to proper problem_text = re.sub("startouttext\s*/", "text", problem_text) problem_text = re.sub("endouttext\s*/", "/text", problem_text) @@ -144,7 +117,7 @@ class LoncapaProblem(object): self._process_includes() # construct script processor context (eg for customresponse problems) - self.context = self._extract_context(self.tree, seed=self.seed) + self.context = self._extract_context(self.tree) # Pre-parse the XML tree: modifies it to add ID's and perform some in-place # transformations. This also creates the dict (self.responders) of Response @@ -440,18 +413,23 @@ class LoncapaProblem(object): path = [] for dir in raw_path: - if not dir: continue # path is an absolute path or a path relative to the data dir dir = os.path.join(self.system.filestore.root_path, dir) + # Check that we are within the filestore tree. + reldir = os.path.relpath(dir, self.system.filestore.root_path) + if ".." in reldir: + log.warning("Ignoring Python directory outside of course: %r" % dir) + continue + abs_dir = os.path.normpath(dir) path.append(abs_dir) return path - def _extract_context(self, tree, seed=struct.unpack('i', os.urandom(4))[0]): # private + def _extract_context(self, tree): ''' Extract content of from the problem.xml file, and exec it in the context of this problem. Provides ability to randomize problems, and also set @@ -459,55 +437,47 @@ class LoncapaProblem(object): Problem XML goes to Python execution context. Runs everything in script tags. ''' - random.seed(self.seed) - # save global context in here also - context = {'global_context': global_context} + context = {} + context['seed'] = self.seed + all_code = '' - # initialize context to have stuff in global_context - context.update(global_context) + python_path = [] - # put globals there also - context['__builtins__'] = globals()['__builtins__'] - - # pass instance of LoncapaProblem in - context['the_lcp'] = self - context['script_code'] = '' - - self._execute_scripts(tree.findall('.//script'), context) - - return context - - def _execute_scripts(self, scripts, context): - ''' - Executes scripts in the given context. - ''' - original_path = sys.path - - for script in scripts: - sys.path = original_path + self._extract_system_path(script) + for script in tree.findall('.//script'): stype = script.get('type') - if stype: if 'javascript' in stype: continue # skip javascript if 'perl' in stype: continue # skip perl # TODO: evaluate only python - code = script.text + + for d in self._extract_system_path(script): + if d not in python_path and os.path.exists(d): + python_path.append(d) + XMLESC = {"'": "'", """: '"'} - code = unescape(code, XMLESC) - # store code source in context - context['script_code'] += code + code = unescape(script.text, XMLESC) + all_code += code + + if all_code: try: - # use "context" for global context; thus defs in code are global within code - exec code in context, context + safe_exec.safe_exec( + all_code, + context, + random_seed=self.seed, + python_path=python_path, + cache=self.system.cache, + ) except Exception as err: - log.exception("Error while execing script code: " + code) + log.exception("Error while execing script code: " + all_code) msg = "Error while executing script code: %s" % str(err).replace('<', '<') raise responsetypes.LoncapaProblemError(msg) - finally: - sys.path = original_path + + # store code source in context + context['script_code'] = all_code + return context diff --git a/common/lib/capa/capa/inputtypes.py b/common/lib/capa/capa/inputtypes.py index e253b61948..65280d6d29 100644 --- a/common/lib/capa/capa/inputtypes.py +++ b/common/lib/capa/capa/inputtypes.py @@ -46,7 +46,7 @@ import sys import pyparsing from .registry import TagRegistry -from capa.chem import chemcalc +from chem import chemcalc import xqueue_interface from datetime import datetime diff --git a/common/lib/capa/capa/responsetypes.py b/common/lib/capa/capa/responsetypes.py index 9db91496be..c7a99f1271 100644 --- a/common/lib/capa/capa/responsetypes.py +++ b/common/lib/capa/capa/responsetypes.py @@ -23,6 +23,7 @@ import random import re import requests import subprocess +import textwrap import traceback import xml.sax.saxutils as saxutils @@ -30,17 +31,23 @@ from collections import namedtuple from shapely.geometry import Point, MultiPoint # specific library imports -from .calc import evaluator, UndefinedVariable -from .correctmap import CorrectMap +from calc import evaluator, UndefinedVariable +from . import correctmap from datetime import datetime from .util import * from lxml import etree from lxml.html.soupparser import fromstring as fromstring_bs # uses Beautiful Soup!!! FIXME? import capa.xqueue_interface as xqueue_interface +import safe_exec + log = logging.getLogger(__name__) +CorrectMap = correctmap.CorrectMap +CORRECTMAP_PY = None + + #----------------------------------------------------------------------------- # Exceptions @@ -252,20 +259,41 @@ class LoncapaResponse(object): # We may extend this in the future to add another argument which provides a # callback procedure to a social hint generation system. - if not hintfn in self.context: - msg = 'missing specified hint function %s in script context' % hintfn - msg += "\nSee XML source line %s" % getattr( - self.xml, 'sourceline', '') - raise LoncapaProblemError(msg) + + global CORRECTMAP_PY + if CORRECTMAP_PY is None: + # We need the CorrectMap code for hint functions. No, this is not great. + CORRECTMAP_PY = inspect.getsource(correctmap) + + code = ( + CORRECTMAP_PY + "\n" + + self.context['script_code'] + "\n" + + textwrap.dedent(""" + new_cmap = CorrectMap() + new_cmap.set_dict(new_cmap_dict) + old_cmap = CorrectMap() + old_cmap.set_dict(old_cmap_dict) + {hintfn}(answer_ids, student_answers, new_cmap, old_cmap) + new_cmap_dict.update(new_cmap.get_dict()) + old_cmap_dict.update(old_cmap.get_dict()) + """).format(hintfn=hintfn) + ) + globals_dict = { + 'answer_ids': self.answer_ids, + 'student_answers': student_answers, + 'new_cmap_dict': new_cmap.get_dict(), + 'old_cmap_dict': old_cmap.get_dict(), + } try: - self.context[hintfn]( - self.answer_ids, student_answers, new_cmap, old_cmap) + safe_exec.safe_exec(code, globals_dict) except Exception as err: msg = 'Error %s in evaluating hint function %s' % (err, hintfn) msg += "\nSee XML source line %s" % getattr( self.xml, 'sourceline', '') raise ResponseError(msg) + + new_cmap.set_dict(globals_dict['new_cmap_dict']) return # hint specified by conditions and text dependent on conditions (a-la Loncapa design) @@ -475,6 +503,10 @@ class JavascriptResponse(LoncapaResponse): return tmp_env def call_node(self, args): + # Node.js code is un-sandboxed. If the XModuleSystem says we aren't + # allowed to run unsafe code, then stop now. + if not self.system.can_execute_unsafe_code(): + raise LoncapaProblemError("Execution of unsafe Javascript code is not allowed.") subprocess_args = ["node"] subprocess_args.extend(args) @@ -488,7 +520,7 @@ class JavascriptResponse(LoncapaResponse): output = self.call_node([generator_file, self.generator, json.dumps(self.generator_dependencies), - json.dumps(str(self.context['the_lcp'].seed)), + json.dumps(str(self.context['seed'])), json.dumps(self.params)]).strip() return json.loads(output) @@ -660,15 +692,6 @@ class ChoiceResponse(LoncapaResponse): class MultipleChoiceResponse(LoncapaResponse): # TODO: handle direction and randomize - snippets = [{'snippet': ''' - - `a+b`
- a+b^2
- a+b+c - a+b+d -
-
- '''}] response_tag = 'multiplechoiceresponse' max_inputfields = 1 @@ -754,14 +777,6 @@ class OptionResponse(LoncapaResponse): ''' TODO: handle direction and randomize ''' - snippets = [{'snippet': """ - - The location of the sky - - - The location of the earth - - """}] response_tag = 'optionresponse' hint_tag = 'optionhint' @@ -905,39 +920,6 @@ class CustomResponse(LoncapaResponse): Custom response. The python code to be run should be in ... or in a ''' - snippets = [{'snippet': r""" - -
- Suppose that \(I(t)\) rises from \(0\) to \(I_S\) at a time \(t_0 \neq 0\) - In the space provided below write an algebraic expression for \(I(t)\). -
- -
- - correct=['correct'] - try: - r = str(submission[0]) - except ValueError: - correct[0] ='incorrect' - r = '0' - if not(r=="IS*u(t-t0)"): - correct[0] ='incorrect' - -
"""}, - {'snippet': """ - - - - - """}] response_tag = 'customresponse' @@ -972,14 +954,29 @@ def sympy_check2(): cfn = xml.get('cfn') if cfn: log.debug("cfn = %s" % cfn) - if cfn in self.context: - self.code = self.context[cfn] - else: - msg = "%s: can't find cfn %s in context" % ( - unicode(self), cfn) - msg += "\nSee XML source line %s" % getattr(self.xml, 'sourceline', - '') - raise LoncapaProblemError(msg) + + # This is a bit twisty. We used to grab the cfn function from + # the context, but now that we sandbox Python execution, we + # can't get functions from previous executions. So we make an + # actual function that will re-execute the original script, + # and invoke the function with the data needed. + def make_check_function(script_code, cfn): + def check_function(expect, ans, **kwargs): + extra_args = "".join(", {0}={0}".format(k) for k in kwargs) + code = ( + script_code + "\n" + + "cfn_return = %s(expect, ans%s)\n" % (cfn, extra_args) + ) + globals_dict = { + 'expect': expect, + 'ans': ans, + } + globals_dict.update(kwargs) + safe_exec.safe_exec(code, globals_dict, cache=self.system.cache) + return globals_dict['cfn_return'] + return check_function + + self.code = make_check_function(self.context['script_code'], cfn) if not self.code: if answer is None: @@ -1036,9 +1033,6 @@ def sympy_check2(): # put these in the context of the check function evaluator # note that this doesn't help the "cfn" version - only the exec version self.context.update({ - # our subtree - 'xml': self.xml, - # my ID 'response_id': self.myid, @@ -1075,65 +1069,63 @@ def sympy_check2(): # pass self.system.debug to cfn self.context['debug'] = self.system.DEBUG + # Run the check function + self.execute_check_function(idset, submission) + + # build map giving "correct"ness of the answer(s) + correct = self.context['correct'] + messages = self.context['messages'] + overall_message = self.clean_message_html(self.context['overall_message']) + correct_map = CorrectMap() + correct_map.set_overall_message(overall_message) + + for k in range(len(idset)): + npoints = self.maxpoints[idset[k]] if correct[k] == 'correct' else 0 + correct_map.set(idset[k], correct[k], msg=messages[k], + npoints=npoints) + return correct_map + + def execute_check_function(self, idset, submission): # exec the check function if isinstance(self.code, basestring): try: - exec self.code in self.context['global_context'], self.context - correct = self.context['correct'] - messages = self.context['messages'] - overall_message = self.context['overall_message'] - + safe_exec.safe_exec(self.code, self.context, cache=self.system.cache) except Exception as err: self._handle_exec_exception(err) else: - # self.code is not a string; assume its a function + # self.code is not a string; it's a function we created earlier. # this is an interface to the Tutor2 check functions fn = self.code - ret = None + answer_given = submission[0] if (len(idset) == 1) else submission + kwnames = self.xml.get("cfn_extra_args", "").split() + kwargs = {n:self.context.get(n) for n in kwnames} log.debug(" submission = %s" % submission) try: - answer_given = submission[0] if ( - len(idset) == 1) else submission - # handle variable number of arguments in check function, for backwards compatibility - # with various Tutor2 check functions - args = [self.expect, answer_given, - student_answers, self.answer_ids[0]] - argspec = inspect.getargspec(fn) - nargs = len(argspec.args) - len(argspec.defaults or []) - kwargs = {} - for argname in argspec.args[nargs:]: - kwargs[argname] = self.context[ - argname] if argname in self.context else None - - log.debug('[customresponse] answer_given=%s' % answer_given) - log.debug('nargs=%d, args=%s, kwargs=%s' % ( - nargs, args, kwargs)) - - ret = fn(*args[:nargs], **kwargs) - + ret = fn(self.expect, answer_given, **kwargs) except Exception as err: self._handle_exec_exception(err) - - if type(ret) == dict: - + log.debug( + "[courseware.capa.responsetypes.customresponse.get_score] ret = %s", + ret + ) + if isinstance(ret, dict): # One kind of dictionary the check function can return has the # form {'ok': BOOLEAN, 'msg': STRING} # If there are multiple inputs, they all get marked # to the same correct/incorrect value if 'ok' in ret: - correct = ['correct'] * len(idset) if ret[ - 'ok'] else ['incorrect'] * len(idset) + correct = ['correct' if ret['ok'] else 'incorrect'] * len(idset) msg = ret.get('msg', None) msg = self.clean_message_html(msg) # If there is only one input, apply the message to that input # Otherwise, apply the message to the whole problem if len(idset) > 1: - overall_message = msg + self.context['overall_message'] = msg else: - messages[0] = msg + self.context['messages'][0] = msg # Another kind of dictionary the check function can return has # the form: @@ -1155,6 +1147,8 @@ def sympy_check2(): msg = (self.clean_message_html(input_dict['msg']) if 'msg' in input_dict else None) messages.append(msg) + self.context['messages'] = messages + self.context['overall_message'] = overall_message # Otherwise, we do not recognize the dictionary # Raise an exception @@ -1163,25 +1157,10 @@ def sympy_check2(): raise ResponseError( "CustomResponse: check function returned an invalid dict") - # The check function can return a boolean value, - # indicating whether all inputs should be marked - # correct or incorrect else: - n = len(idset) - correct = ['correct'] * n if ret else ['incorrect'] * n + correct = ['correct' if ret else 'incorrect'] * len(idset) - # build map giving "correct"ness of the answer(s) - correct_map = CorrectMap() - - overall_message = self.clean_message_html(overall_message) - correct_map.set_overall_message(overall_message) - - for k in range(len(idset)): - npoints = (self.maxpoints[idset[k]] - if correct[k] == 'correct' else 0) - correct_map.set(idset[k], correct[k], msg=messages[k], - npoints=npoints) - return correct_map + self.context['correct'] = correct def clean_message_html(self, msg): @@ -1253,24 +1232,38 @@ class SymbolicResponse(CustomResponse): """ Symbolic math response checking, using symmath library. """ - snippets = [{'snippet': r''' - Compute \[ \exp\left(-i \frac{\theta}{2} \left[ \begin{matrix} 0 & 1 \\ 1 & 0 \end{matrix} \right] \right) \] - and give the resulting \(2\times 2\) matrix:
- - - -
- Your input should be typed in as a list of lists, eg [[1,2],[3,4]]. -
-
'''}] response_tag = 'symbolicresponse' + max_inputfields = 1 def setup_response(self): + # Symbolic response always uses symmath_check() + # If the XML did not specify this, then set it now + # Otherwise, we get an error from the superclass self.xml.set('cfn', 'symmath_check') - code = "from symmath import *" - exec code in self.context, self.context - CustomResponse.setup_response(self) + + # Let CustomResponse do its setup + super(SymbolicResponse, self).setup_response() + + def execute_check_function(self, idset, submission): + from symmath import symmath_check + try: + # Since we have limited max_inputfields to 1, + # we can assume that there is only one submission + answer_given = submission[0] + + ret = symmath_check( + self.expect, answer_given, + dynamath=self.context.get('dynamath'), + options=self.context.get('options'), + debug=self.context.get('debug'), + ) + except Exception as err: + log.error("oops in symbolicresponse (cfn) error %s" % err) + log.error(traceback.format_exc()) + raise Exception("oops in symbolicresponse (cfn) error %s" % err) + self.context['messages'][0] = self.clean_message_html(ret['msg']) + self.context['correct'] = ['correct' if ret['ok'] else 'incorrect'] * len(idset) #----------------------------------------------------------------------------- @@ -1325,10 +1318,8 @@ class CodeResponse(LoncapaResponse): # Check if XML uses the ExternalResponse format or the generic # CodeResponse format codeparam = self.xml.find('codeparam') - if codeparam is None: - self._parse_externalresponse_xml() - else: - self._parse_coderesponse_xml(codeparam) + assert codeparam is not None, "Unsupported old format! without " + self._parse_coderesponse_xml(codeparam) def _parse_coderesponse_xml(self, codeparam): ''' @@ -1348,62 +1339,6 @@ class CodeResponse(LoncapaResponse): self.answer = find_with_default(codeparam, 'answer_display', 'No answer provided.') - def _parse_externalresponse_xml(self): - ''' - VS[compat]: Suppport for old ExternalResponse XML format. When successful, sets: - self.initial_display - self.answer (an answer to display to the student in the LMS) - self.payload - ''' - answer = self.xml.find('answer') - - if answer is not None: - answer_src = answer.get('src') - if answer_src is not None: - code = self.system.filesystem.open('src/' + answer_src).read() - else: - code = answer.text - else: # no stanza; get code from - - -
- Give an equation for the relativistic energy of an object with mass m. -
- - - - - - '''}] response_tag = 'formularesponse' hint_tag = 'formulahint' @@ -1927,21 +1807,18 @@ class SchematicResponse(LoncapaResponse): self.code = answer.text def get_score(self, student_answers): - from capa_problem import global_context - submission = [json.loads(student_answers[ - k]) for k in sorted(self.answer_ids)] + #from capa_problem import global_context + submission = [ + json.loads(student_answers[k]) for k in sorted(self.answer_ids) + ] self.context.update({'submission': submission}) - try: - exec self.code in global_context, self.context - + safe_exec.safe_exec(self.code, self.context, cache=self.system.cache) except Exception as err: - _, _, traceback_obj = sys.exc_info() - raise ResponseError, ResponseError(err.message), traceback_obj - + msg = 'Error %s in evaluating SchematicResponse' % err + raise ResponseError(msg) cmap = CorrectMap() - cmap.set_dict(dict(zip(sorted( - self.answer_ids), self.context['correct']))) + cmap.set_dict(dict(zip(sorted(self.answer_ids), self.context['correct']))) return cmap def get_answers(self): @@ -1977,19 +1854,6 @@ class ImageResponse(LoncapaResponse): Returns: True, if click is inside any region or rectangle. Otherwise False. """ - snippets = [{'snippet': ''' - - - - - - '''}] response_tag = 'imageresponse' allowed_inputfields = ['imageinput'] diff --git a/common/lib/capa/capa/safe_exec/README.rst b/common/lib/capa/capa/safe_exec/README.rst new file mode 100644 index 0000000000..c61100f709 --- /dev/null +++ b/common/lib/capa/capa/safe_exec/README.rst @@ -0,0 +1,51 @@ +Configuring Capa sandboxed execution +==================================== + +Capa problems can contain code authored by the course author. We need to +execute that code in a sandbox. We use CodeJail as the sandboxing facility, +but it needs to be configured specifically for Capa's use. + +As a developer, you don't have to do anything to configure sandboxing if you +don't want to, and everything will operate properly, you just won't have +protection on that code. + +If you want to configure sandboxing, you're going to use the `README from +CodeJail`__, with a few customized tweaks. + +__ https://github.com/edx/codejail/blob/master/README.rst + + +1. At the instruction to install packages into the sandboxed code, you'll + need to install both `pre-sandbox-requirements.txt` and + `sandbox-requirements.txt`:: + + $ sudo pip install -r pre-sandbox-requirements.txt + $ sudo pip install -r sandbox-requirements.txt + +2. At the instruction to create the AppArmor profile, you'll need a line in + the profile for the sandbox packages. is the full path to + your edx_platform repo:: + + /common/lib/sandbox-packages/** r, + +3. You can configure resource limits in settings.py. A CODE_JAIL setting is + available, a dictionary. The "limits" key lets you adjust the limits for + CPU time, real time, and memory use. Setting any of them to zero disables + that limit:: + + # in settings.py... + CODE_JAIL = { + # Configurable limits. + 'limits': { + # How many CPU seconds can jailed code use? + 'CPU': 1, + # How many real-time seconds will a sandbox survive? + 'REALTIME': 1, + # How much memory (in bytes) can a sandbox use? + 'VMEM': 30000000, + }, + } + + +That's it. Once you've finished the CodeJail configuration instructions, +your course-hosted Python code should be run securely. diff --git a/common/lib/capa/capa/safe_exec/__init__.py b/common/lib/capa/capa/safe_exec/__init__.py new file mode 100644 index 0000000000..ffbe8f2320 --- /dev/null +++ b/common/lib/capa/capa/safe_exec/__init__.py @@ -0,0 +1,3 @@ +"""Capa's specialized use of codejail.safe_exec.""" + +from .safe_exec import safe_exec, update_hash diff --git a/common/lib/capa/capa/safe_exec/lazymod.py b/common/lib/capa/capa/safe_exec/lazymod.py new file mode 100644 index 0000000000..cdd8410f2c --- /dev/null +++ b/common/lib/capa/capa/safe_exec/lazymod.py @@ -0,0 +1,42 @@ +"""A module proxy for delayed importing of modules. + +From http://barnesc.blogspot.com/2006/06/automatic-python-imports-with-autoimp.html, +in the public domain. + +""" + +import sys + +class LazyModule(object): + """A lazy module proxy.""" + + def __init__(self, modname): + self.__dict__['__name__'] = modname + self._set_mod(None) + + def _set_mod(self, mod): + if mod is not None: + self.__dict__ = mod.__dict__ + self.__dict__['_lazymod_mod'] = mod + + def _load_mod(self): + __import__(self.__name__) + self._set_mod(sys.modules[self.__name__]) + + def __getattr__(self, name): + if self.__dict__['_lazymod_mod'] is None: + self._load_mod() + + mod = self.__dict__['_lazymod_mod'] + + if hasattr(mod, name): + return getattr(mod, name) + else: + try: + subname = '%s.%s' % (self.__name__, name) + __import__(subname) + submod = getattr(mod, name) + except ImportError: + raise AttributeError("'module' object has no attribute %r" % name) + self.__dict__[name] = LazyModule(subname, submod) + return self.__dict__[name] diff --git a/common/lib/capa/capa/safe_exec/safe_exec.py b/common/lib/capa/capa/safe_exec/safe_exec.py new file mode 100644 index 0000000000..b9cdf236bd --- /dev/null +++ b/common/lib/capa/capa/safe_exec/safe_exec.py @@ -0,0 +1,130 @@ +"""Capa's specialized use of codejail.safe_exec.""" + +from codejail.safe_exec import safe_exec as codejail_safe_exec +from codejail.safe_exec import json_safe, SafeExecException +from . import lazymod +from statsd import statsd + +import hashlib + +# Establish the Python environment for Capa. +# Capa assumes float-friendly division always. +# The name "random" is a properly-seeded stand-in for the random module. +CODE_PROLOG = """\ +from __future__ import division + +import random as random_module +import sys +random = random_module.Random(%r) +random.Random = random_module.Random +del random_module +sys.modules['random'] = random +""" + +ASSUMED_IMPORTS=[ + ("numpy", "numpy"), + ("math", "math"), + ("scipy", "scipy"), + ("calc", "calc"), + ("eia", "eia"), + ("chemcalc", "chem.chemcalc"), + ("chemtools", "chem.chemtools"), + ("miller", "chem.miller"), + ("draganddrop", "verifiers.draganddrop"), +] + +# We'll need the code from lazymod.py for use in safe_exec, so read it now. +lazymod_py_file = lazymod.__file__ +if lazymod_py_file.endswith("c"): + lazymod_py_file = lazymod_py_file[:-1] + +lazymod_py = open(lazymod_py_file).read() + +LAZY_IMPORTS = [lazymod_py] +for name, modname in ASSUMED_IMPORTS: + LAZY_IMPORTS.append("{} = LazyModule('{}')\n".format(name, modname)) + +LAZY_IMPORTS = "".join(LAZY_IMPORTS) + + +def update_hash(hasher, obj): + """ + Update a `hashlib` hasher with a nested object. + + To properly cache nested structures, we need to compute a hash from the + entire structure, canonicalizing at every level. + + `hasher`'s `.update()` method is called a number of times, touching all of + `obj` in the process. Only primitive JSON-safe types are supported. + + """ + hasher.update(str(type(obj))) + if isinstance(obj, (tuple, list)): + for e in obj: + update_hash(hasher, e) + elif isinstance(obj, dict): + for k in sorted(obj): + update_hash(hasher, k) + update_hash(hasher, obj[k]) + else: + hasher.update(repr(obj)) + + +@statsd.timed('capa.safe_exec.time') +def safe_exec(code, globals_dict, random_seed=None, python_path=None, cache=None): + """ + Execute python code safely. + + `code` is the Python code to execute. It has access to the globals in `globals_dict`, + and any changes it makes to those globals are visible in `globals_dict` when this + function returns. + + `random_seed` will be used to see the `random` module available to the code. + + `python_path` is a list of directories to add to the Python path before execution. + + `cache` is an object with .get(key) and .set(key, value) methods. It will be used + to cache the execution, taking into account the code, the values of the globals, + and the random seed. + + """ + # Check the cache for a previous result. + if cache: + safe_globals = json_safe(globals_dict) + md5er = hashlib.md5() + md5er.update(repr(code)) + update_hash(md5er, safe_globals) + key = "safe_exec.%r.%s" % (random_seed, md5er.hexdigest()) + cached = cache.get(key) + if cached is not None: + # We have a cached result. The result is a pair: the exception + # message, if any, else None; and the resulting globals dictionary. + emsg, cleaned_results = cached + globals_dict.update(cleaned_results) + if emsg: + raise SafeExecException(emsg) + return + + # Create the complete code we'll run. + code_prolog = CODE_PROLOG % random_seed + + # Run the code! Results are side effects in globals_dict. + try: + codejail_safe_exec( + code_prolog + LAZY_IMPORTS + code, globals_dict, + python_path=python_path, + ) + except SafeExecException as e: + emsg = e.message + else: + emsg = None + + # Put the result back in the cache. This is complicated by the fact that + # the globals dict might not be entirely serializable. + if cache: + cleaned_results = json_safe(globals_dict) + cache.set(key, (emsg, cleaned_results)) + + # If an exception happened, raise it now. + if emsg: + raise e diff --git a/common/lib/capa/capa/safe_exec/tests/test_files/pylib/constant.py b/common/lib/capa/capa/safe_exec/tests/test_files/pylib/constant.py new file mode 100644 index 0000000000..0769d528ba --- /dev/null +++ b/common/lib/capa/capa/safe_exec/tests/test_files/pylib/constant.py @@ -0,0 +1 @@ +THE_CONST = 23 diff --git a/common/lib/capa/capa/safe_exec/tests/test_lazymod.py b/common/lib/capa/capa/safe_exec/tests/test_lazymod.py new file mode 100644 index 0000000000..68dcd81ea7 --- /dev/null +++ b/common/lib/capa/capa/safe_exec/tests/test_lazymod.py @@ -0,0 +1,44 @@ +"""Test lazymod.py""" + +import sys +import unittest + +from capa.safe_exec.lazymod import LazyModule + + +class ModuleIsolation(object): + """ + Manage changes to sys.modules so that we can roll back imported modules. + + Create this object, it will snapshot the currently imported modules. When + you call `clean_up()`, it will delete any module imported since its creation. + """ + def __init__(self): + # Save all the names of all the imported modules. + self.mods = set(sys.modules) + + def clean_up(self): + # Get a list of modules that didn't exist when we were created + new_mods = [m for m in sys.modules if m not in self.mods] + # and delete them all so another import will run code for real again. + for m in new_mods: + del sys.modules[m] + + +class TestLazyMod(unittest.TestCase): + + def setUp(self): + # Each test will remove modules that it imported. + self.addCleanup(ModuleIsolation().clean_up) + + def test_simple(self): + # Import some stdlib module that has not been imported before + self.assertNotIn("colorsys", sys.modules) + colorsys = LazyModule("colorsys") + hsv = colorsys.rgb_to_hsv(.3, .4, .2) + self.assertEqual(hsv[0], 0.25) + + def test_dotted(self): + self.assertNotIn("email.utils", sys.modules) + email_utils = LazyModule("email.utils") + self.assertEqual(email_utils.quote('"hi"'), r'\"hi\"') diff --git a/common/lib/capa/capa/safe_exec/tests/test_safe_exec.py b/common/lib/capa/capa/safe_exec/tests/test_safe_exec.py new file mode 100644 index 0000000000..4592af8305 --- /dev/null +++ b/common/lib/capa/capa/safe_exec/tests/test_safe_exec.py @@ -0,0 +1,281 @@ +"""Test safe_exec.py""" + +import hashlib +import os.path +import random +import textwrap +import unittest + +from capa.safe_exec import safe_exec, update_hash +from codejail.safe_exec import SafeExecException + + +class TestSafeExec(unittest.TestCase): + def test_set_values(self): + g = {} + safe_exec("a = 17", g) + self.assertEqual(g['a'], 17) + + def test_division(self): + g = {} + # Future division: 1/2 is 0.5. + safe_exec("a = 1/2", g) + self.assertEqual(g['a'], 0.5) + + def test_assumed_imports(self): + g = {} + # Math is always available. + safe_exec("a = int(math.pi)", g) + self.assertEqual(g['a'], 3) + + def test_random_seeding(self): + g = {} + r = random.Random(17) + rnums = [r.randint(0, 999) for _ in xrange(100)] + + # Without a seed, the results are unpredictable + safe_exec("rnums = [random.randint(0, 999) for _ in xrange(100)]", g) + self.assertNotEqual(g['rnums'], rnums) + + # With a seed, the results are predictable + safe_exec("rnums = [random.randint(0, 999) for _ in xrange(100)]", g, random_seed=17) + self.assertEqual(g['rnums'], rnums) + + def test_random_is_still_importable(self): + g = {} + r = random.Random(17) + rnums = [r.randint(0, 999) for _ in xrange(100)] + + # With a seed, the results are predictable even from the random module + safe_exec( + "import random\n" + "rnums = [random.randint(0, 999) for _ in xrange(100)]\n", + g, random_seed=17) + self.assertEqual(g['rnums'], rnums) + + def test_python_lib(self): + pylib = os.path.dirname(__file__) + "/test_files/pylib" + g = {} + safe_exec( + "import constant; a = constant.THE_CONST", + g, python_path=[pylib] + ) + + def test_raising_exceptions(self): + g = {} + with self.assertRaises(SafeExecException) as cm: + safe_exec("1/0", g) + self.assertIn("ZeroDivisionError", cm.exception.message) + + +class DictCache(object): + """A cache implementation over a simple dict, for testing.""" + + def __init__(self, d): + self.cache = d + + def get(self, key): + # Actual cache implementations have limits on key length + assert len(key) <= 250 + return self.cache.get(key) + + def set(self, key, value): + # Actual cache implementations have limits on key length + assert len(key) <= 250 + self.cache[key] = value + + +class TestSafeExecCaching(unittest.TestCase): + """Test that caching works on safe_exec.""" + + def test_cache_miss_then_hit(self): + g = {} + cache = {} + + # Cache miss + safe_exec("a = int(math.pi)", g, cache=DictCache(cache)) + self.assertEqual(g['a'], 3) + # A result has been cached + self.assertEqual(cache.values()[0], (None, {'a': 3})) + + # Fiddle with the cache, then try it again. + cache[cache.keys()[0]] = (None, {'a': 17}) + + g = {} + safe_exec("a = int(math.pi)", g, cache=DictCache(cache)) + self.assertEqual(g['a'], 17) + + def test_cache_large_code_chunk(self): + # Caching used to die on memcache with more than 250 bytes of code. + # Check that it doesn't any more. + code = "a = 0\n" + ("a += 1\n" * 12345) + + g = {} + cache = {} + safe_exec(code, g, cache=DictCache(cache)) + self.assertEqual(g['a'], 12345) + + def test_cache_exceptions(self): + # Used to be that running code that raised an exception didn't cache + # the result. Check that now it does. + code = "1/0" + g = {} + cache = {} + with self.assertRaises(SafeExecException): + safe_exec(code, g, cache=DictCache(cache)) + + # The exception should be in the cache now. + self.assertEqual(len(cache), 1) + cache_exc_msg, cache_globals = cache.values()[0] + self.assertIn("ZeroDivisionError", cache_exc_msg) + + # Change the value stored in the cache, the result should change. + cache[cache.keys()[0]] = ("Hey there!", {}) + + with self.assertRaises(SafeExecException): + safe_exec(code, g, cache=DictCache(cache)) + + self.assertEqual(len(cache), 1) + cache_exc_msg, cache_globals = cache.values()[0] + self.assertEqual("Hey there!", cache_exc_msg) + + # Change it again, now no exception! + cache[cache.keys()[0]] = (None, {'a': 17}) + safe_exec(code, g, cache=DictCache(cache)) + self.assertEqual(g['a'], 17) + + def test_unicode_submission(self): + # Check that using non-ASCII unicode does not raise an encoding error. + # Try several non-ASCII unicode characters + for code in [129, 500, 2**8 - 1, 2**16 - 1]: + code_with_unichr = unicode("# ") + unichr(code) + try: + safe_exec(code_with_unichr, {}, cache=DictCache({})) + except UnicodeEncodeError: + self.fail("Tried executing code with non-ASCII unicode: {0}".format(code)) + + +class TestUpdateHash(unittest.TestCase): + """Test the safe_exec.update_hash function to be sure it canonicalizes properly.""" + + def hash_obj(self, obj): + """Return the md5 hash that `update_hash` makes us.""" + md5er = hashlib.md5() + update_hash(md5er, obj) + return md5er.hexdigest() + + def equal_but_different_dicts(self): + """ + Make two equal dicts with different key order. + + Simple literals won't do it. Filling one and then shrinking it will + make them different. + + """ + d1 = {k:1 for k in "abcdefghijklmnopqrstuvwxyz"} + d2 = dict(d1) + for i in xrange(10000): + d2[i] = 1 + for i in xrange(10000): + del d2[i] + + # Check that our dicts are equal, but with different key order. + self.assertEqual(d1, d2) + self.assertNotEqual(d1.keys(), d2.keys()) + + return d1, d2 + + def test_simple_cases(self): + h1 = self.hash_obj(1) + h10 = self.hash_obj(10) + hs1 = self.hash_obj("1") + + self.assertNotEqual(h1, h10) + self.assertNotEqual(h1, hs1) + + def test_list_ordering(self): + h1 = self.hash_obj({'a': [1,2,3]}) + h2 = self.hash_obj({'a': [3,2,1]}) + self.assertNotEqual(h1, h2) + + def test_dict_ordering(self): + d1, d2 = self.equal_but_different_dicts() + h1 = self.hash_obj(d1) + h2 = self.hash_obj(d2) + self.assertEqual(h1, h2) + + def test_deep_ordering(self): + d1, d2 = self.equal_but_different_dicts() + o1 = {'a':[1, 2, [d1], 3, 4]} + o2 = {'a':[1, 2, [d2], 3, 4]} + h1 = self.hash_obj(o1) + h2 = self.hash_obj(o2) + self.assertEqual(h1, h2) + + +class TestRealProblems(unittest.TestCase): + def test_802x(self): + code = textwrap.dedent("""\ + import math + import random + import numpy + e=1.602e-19 #C + me=9.1e-31 #kg + mp=1.672e-27 #kg + eps0=8.854e-12 #SI units + mu0=4e-7*math.pi #SI units + + Rd1=random.randrange(1,30,1) + Rd2=random.randrange(30,50,1) + Rd3=random.randrange(50,70,1) + Rd4=random.randrange(70,100,1) + Rd5=random.randrange(100,120,1) + + Vd1=random.randrange(1,20,1) + Vd2=random.randrange(20,40,1) + Vd3=random.randrange(40,60,1) + + #R=[0,10,30,50,70,100] #Ohm + #V=[0,12,24,36] # Volt + + R=[0,Rd1,Rd2,Rd3,Rd4,Rd5] #Ohms + V=[0,Vd1,Vd2,Vd3] #Volts + #here the currents IL and IR are defined as in figure ps3_p3_fig2 + a=numpy.array([ [ R[1]+R[4]+R[5],R[4] ],[R[4], R[2]+R[3]+R[4] ] ]) + b=numpy.array([V[1]-V[2],-V[3]-V[2]]) + x=numpy.linalg.solve(a,b) + IL='%.2e' % x[0] + IR='%.2e' % x[1] + ILR='%.2e' % (x[0]+x[1]) + def sign(x): + return abs(x)/x + + RW="Rightwards" + LW="Leftwards" + UW="Upwards" + DW="Downwards" + I1='%.2e' % abs(x[0]) + I1d=LW if sign(x[0])==1 else RW + I1not=LW if I1d==RW else RW + I2='%.2e' % abs(x[1]) + I2d=RW if sign(x[1])==1 else LW + I2not=LW if I2d==RW else RW + I3='%.2e' % abs(x[1]) + I3d=DW if sign(x[1])==1 else UW + I3not=DW if I3d==UW else UW + I4='%.2e' % abs(x[0]+x[1]) + I4d=UW if sign(x[1]+x[0])==1 else DW + I4not=DW if I4d==UW else UW + I5='%.2e' % abs(x[0]) + I5d=RW if sign(x[0])==1 else LW + I5not=LW if I5d==RW else RW + VAP=-x[0]*R[1]-(x[0]+x[1])*R[4] + VPN=-V[2] + VGD=+V[1]-x[0]*R[1]+V[3]+x[1]*R[2] + aVAP='%.2e' % VAP + aVPN='%.2e' % VPN + aVGD='%.2e' % VGD + """) + g = {} + safe_exec(code, g) + self.assertIn("aVAP", g) diff --git a/common/lib/capa/capa/tests/__init__.py b/common/lib/capa/capa/tests/__init__.py index 72d82c683b..ac81ff66c4 100644 --- a/common/lib/capa/capa/tests/__init__.py +++ b/common/lib/capa/capa/tests/__init__.py @@ -1,7 +1,7 @@ -import fs import fs.osfs -import os +import os, os.path +from capa.capa_problem import LoncapaProblem from mock import Mock, MagicMock import xml.sax.saxutils as saxutils @@ -22,16 +22,28 @@ def calledback_url(dispatch = 'score_update'): xqueue_interface = MagicMock() xqueue_interface.send_to_queue.return_value = (0, 'Success!') -test_system = Mock( - ajax_url='courses/course_id/modx/a_location', - track_function=Mock(), - get_module=Mock(), - render_template=tst_render_template, - replace_urls=Mock(), - user=Mock(), - filestore=fs.osfs.OSFS(os.path.join(TEST_DIR, "test_files")), - debug=True, - xqueue={'interface': xqueue_interface, 'construct_callback': calledback_url, 'default_queuename': 'testqueue', 'waittime': 10}, - node_path=os.environ.get("NODE_PATH", "/usr/local/lib/node_modules"), - anonymous_student_id='student' -) +def test_system(): + """ + Construct a mock ModuleSystem instance. + + """ + the_system = Mock( + ajax_url='courses/course_id/modx/a_location', + track_function=Mock(), + get_module=Mock(), + render_template=tst_render_template, + replace_urls=Mock(), + user=Mock(), + filestore=fs.osfs.OSFS(os.path.join(TEST_DIR, "test_files")), + debug=True, + xqueue={'interface': xqueue_interface, 'construct_callback': calledback_url, 'default_queuename': 'testqueue', 'waittime': 10}, + node_path=os.environ.get("NODE_PATH", "/usr/local/lib/node_modules"), + anonymous_student_id='student', + cache=None, + can_execute_unsafe_code=lambda: False, + ) + return the_system + +def new_loncapa_problem(xml, system=None): + """Construct a `LoncapaProblem` suitable for unit tests.""" + return LoncapaProblem(xml, id='1', seed=723, system=system or test_system()) diff --git a/common/lib/capa/capa/tests/response_xml_factory.py b/common/lib/capa/capa/tests/response_xml_factory.py index aa401b70cd..35c12800ae 100644 --- a/common/lib/capa/capa/tests/response_xml_factory.py +++ b/common/lib/capa/capa/tests/response_xml_factory.py @@ -221,6 +221,8 @@ class CustomResponseXMLFactory(ResponseXMLFactory): cfn = kwargs.get('cfn', None) expect = kwargs.get('expect', None) answer = kwargs.get('answer', None) + options = kwargs.get('options', None) + cfn_extra_args = kwargs.get('cfn_extra_args', None) # Create the response element response_element = etree.Element("customresponse") @@ -235,6 +237,33 @@ class CustomResponseXMLFactory(ResponseXMLFactory): answer_element = etree.SubElement(response_element, "answer") answer_element.text = str(answer) + if options: + response_element.set('options', str(options)) + + if cfn_extra_args: + response_element.set('cfn_extra_args', str(cfn_extra_args)) + + return response_element + + def create_input_element(self, **kwargs): + return ResponseXMLFactory.textline_input_xml(**kwargs) + + +class SymbolicResponseXMLFactory(ResponseXMLFactory): + """ Factory for creating XML trees """ + + def create_response_element(self, **kwargs): + cfn = kwargs.get('cfn', None) + answer = kwargs.get('answer', None) + options = kwargs.get('options', None) + + response_element = etree.Element("symbolicresponse") + if cfn: + response_element.set('cfn', str(cfn)) + if answer: + response_element.set('answer', str(answer)) + if options: + response_element.set('options', str(options)) return response_element def create_input_element(self, **kwargs): @@ -638,12 +667,16 @@ class StringResponseXMLFactory(ResponseXMLFactory): Where *hint_prompt* is the string for which we show the hint, *hint_name* is an internal identifier for the hint, and *hint_text* is the text we show for the hint. + + *hintfn*: The name of a function in the script to use for hints. + """ # Retrieve the **kwargs answer = kwargs.get("answer", None) case_sensitive = kwargs.get("case_sensitive", True) hint_list = kwargs.get('hints', None) - assert(answer) + hint_fn = kwargs.get('hintfn', None) + assert answer # Create the element response_element = etree.Element("stringresponse") @@ -655,18 +688,24 @@ class StringResponseXMLFactory(ResponseXMLFactory): response_element.set("type", "cs" if case_sensitive else "ci") # Add the hints if specified - if hint_list: + if hint_list or hint_fn: hintgroup_element = etree.SubElement(response_element, "hintgroup") - for (hint_prompt, hint_name, hint_text) in hint_list: - stringhint_element = etree.SubElement(hintgroup_element, "stringhint") - stringhint_element.set("answer", str(hint_prompt)) - stringhint_element.set("name", str(hint_name)) + if hint_list: + assert not hint_fn + for (hint_prompt, hint_name, hint_text) in hint_list: + stringhint_element = etree.SubElement(hintgroup_element, "stringhint") + stringhint_element.set("answer", str(hint_prompt)) + stringhint_element.set("name", str(hint_name)) - hintpart_element = etree.SubElement(hintgroup_element, "hintpart") - hintpart_element.set("on", str(hint_name)) + hintpart_element = etree.SubElement(hintgroup_element, "hintpart") + hintpart_element.set("on", str(hint_name)) - hint_text_element = etree.SubElement(hintpart_element, "text") - hint_text_element.text = str(hint_text) + hint_text_element = etree.SubElement(hintpart_element, "text") + hint_text_element.text = str(hint_text) + + if hint_fn: + assert not hint_list + hintgroup_element.set("hintfn", hint_fn) return response_element @@ -705,3 +744,38 @@ class AnnotationResponseXMLFactory(ResponseXMLFactory): option_element.text = description return input_element + + +class SymbolicResponseXMLFactory(ResponseXMLFactory): + """ Factory for producing xml """ + + def create_response_element(self, **kwargs): + """ Build the XML element. + + Uses **kwargs: + + *expect*: The correct answer (a sympy string) + + *options*: list of option strings to pass to symmath_check + (e.g. 'matrix', 'qbit', 'imaginary', 'numerical')""" + + # Retrieve **kwargs + expect = kwargs.get('expect', '') + options = kwargs.get('options', []) + + # Symmath check expects a string of options + options_str = ",".join(options) + + # Construct the element + response_element = etree.Element('symbolicresponse') + + if expect: + response_element.set('expect', str(expect)) + + if options_str: + response_element.set('options', str(options_str)) + + return response_element + + def create_input_element(self, **kwargs): + return ResponseXMLFactory.textline_input_xml(**kwargs) diff --git a/common/lib/capa/capa/tests/test_customrender.py b/common/lib/capa/capa/tests/test_customrender.py index eece275b05..8012804a40 100644 --- a/common/lib/capa/capa/tests/test_customrender.py +++ b/common/lib/capa/capa/tests/test_customrender.py @@ -26,7 +26,7 @@ class HelperTest(unittest.TestCase): Make sure that our helper function works! ''' def check(self, d): - xml = etree.XML(test_system.render_template('blah', d)) + xml = etree.XML(test_system().render_template('blah', d)) self.assertEqual(d, extract_context(xml)) def test_extract_context(self): @@ -46,11 +46,11 @@ class SolutionRenderTest(unittest.TestCase): xml_str = """{s}""".format(s=solution) element = etree.fromstring(xml_str) - renderer = lookup_tag('solution')(test_system, element) + renderer = lookup_tag('solution')(test_system(), element) self.assertEqual(renderer.id, 'solution_12') - # our test_system "renders" templates to a div with the repr of the context + # Our test_system "renders" templates to a div with the repr of the context. xml = renderer.get_html() context = extract_context(xml) self.assertEqual(context, {'id': 'solution_12'}) @@ -65,7 +65,7 @@ class MathRenderTest(unittest.TestCase): xml_str = """{tex}""".format(tex=latex_in) element = etree.fromstring(xml_str) - renderer = lookup_tag('math')(test_system, element) + renderer = lookup_tag('math')(test_system(), element) self.assertEqual(renderer.mathstr, mathjax_out) diff --git a/common/lib/capa/capa/tests/test_files/snuggletex_correct.html b/common/lib/capa/capa/tests/test_files/snuggletex_correct.html new file mode 100644 index 0000000000..0d10f7f56d --- /dev/null +++ b/common/lib/capa/capa/tests/test_files/snuggletex_correct.html @@ -0,0 +1,480 @@ + + + + + + + + + + + + SnuggleTeX - ASCIIMathML Enrichment Demo + + + + + + + +

SnuggleTeX (1.2.2)

+
+ + +
+ +
+

ASCIIMathML Enrichment Demo

+

Input

+

+ This demo is similar to the + MathML Semantic Enrichnment Demo + but uses + ASCIIMathML as + an alternative input format, which provides real-time feedback as you + type but can often generate MathML with odd semantics in it. + SnuggleTeX includes some functionality that can to convert this raw MathML into + something equivalent to its own MathML output, thereby allowing you to + semantically enrich it in + certain simple cases, making ASCIIMathML a possibly viable input format + for simple semantic maths. + +

+

+ To try the demo, simply enter some some ASCIIMathML into the box below. + You should see a real time preview of this while you type. + Then hit Go! to use SnuggleTeX to semantically enrich your + input. + +

+
+
+ ASCIIMath Input: +
+
+

Live Preview

+

+ This is a MathML rendering of your input, generated by ASCIIMathML as you type. + +

+
+
+
+

+ This is the underlying MathML source generated by ASCIIMathML, again updated in real time. + +

+
 
+

Enhanced Presentation MathML

+

+ This shows the result of attempting to enrich the raw Presentation MathML + generated by ASCIIMathML: + +

<math xmlns="http://www.w3.org/1998/Math/MathML">
+   <mrow>
+      <mrow>
+         <mrow>
+            <mi>cos</mi>
+            <mo>&ApplyFunction;</mo>
+            <mfenced close=")" open="(">
+               <mi>theta</mi>
+            </mfenced>
+         </mrow>
+         <mo>&sdot;</mo>
+         <mfenced close="]" open="[">
+            <mtable>
+               <mtr>
+                  <mtd>
+                     <mn>1</mn>
+                  </mtd>
+                  <mtd>
+                     <mn>0</mn>
+                  </mtd>
+               </mtr>
+               <mtr>
+                  <mtd>
+                     <mn>0</mn>
+                  </mtd>
+                  <mtd>
+                     <mn>1</mn>
+                  </mtd>
+               </mtr>
+            </mtable>
+         </mfenced>
+      </mrow>
+      <mo>+</mo>
+      <mrow>
+         <mi>i</mi>
+         <mo>&sdot;</mo>
+         <mrow>
+            <mi>sin</mi>
+            <mo>&ApplyFunction;</mo>
+            <mfenced close=")" open="(">
+               <mi>theta</mi>
+            </mfenced>
+         </mrow>
+         <mo>&sdot;</mo>
+         <mfenced close="]" open="[">
+            <mtable>
+               <mtr>
+                  <mtd>
+                     <mn>0</mn>
+                  </mtd>
+                  <mtd>
+                     <mn>1</mn>
+                  </mtd>
+               </mtr>
+               <mtr>
+                  <mtd>
+                     <mn>1</mn>
+                  </mtd>
+                  <mtd>
+                     <mn>0</mn>
+                  </mtd>
+               </mtr>
+            </mtable>
+         </mfenced>
+      </mrow>
+   </mrow>
+</math>

Content MathML

+

+ This shows the result of an attempted + conversion to Content MathML: + +

<math xmlns="http://www.w3.org/1998/Math/MathML">
+   <apply>
+      <plus/>
+      <apply>
+         <times/>
+         <apply>
+            <cos/>
+            <ci>theta</ci>
+         </apply>
+         <list>
+            <matrix>
+               <vector>
+                  <cn>1</cn>
+                  <cn>0</cn>
+               </vector>
+               <vector>
+                  <cn>0</cn>
+                  <cn>1</cn>
+               </vector>
+            </matrix>
+         </list>
+      </apply>
+      <apply>
+         <times/>
+         <ci>i</ci>
+         <apply>
+            <sin/>
+            <ci>theta</ci>
+         </apply>
+         <list>
+            <matrix>
+               <vector>
+                  <cn>0</cn>
+                  <cn>1</cn>
+               </vector>
+               <vector>
+                  <cn>1</cn>
+                  <cn>0</cn>
+               </vector>
+            </matrix>
+         </list>
+      </apply>
+   </apply>
+</math>

Maxima Input Form

+

+ This shows the result of an attempted + conversion to Maxima Input syntax: + +

+

+ The conversion from Content MathML to Maxima Input was not successful for + this input. + +

+ + + + + + + + + + + + + + + + + + + + + + + +
Failure CodeMessageXPathContext
UMFG00Content MathML element matrix not supportedapply[1]/apply[1]/list[1]/matrix[1]
<matrix>
+   <vector>
+      <cn>1</cn>
+      <cn>0</cn>
+   </vector>
+   <vector>
+      <cn>0</cn>
+      <cn>1</cn>
+   </vector>
+</matrix>
UMFG00Content MathML element matrix not supportedapply[1]/apply[2]/list[1]/matrix[1]
<matrix>
+   <vector>
+      <cn>0</cn>
+      <cn>1</cn>
+   </vector>
+   <vector>
+      <cn>1</cn>
+      <cn>0</cn>
+   </vector>
+</matrix>
+

MathML Parallel Markup

+

+ This shows the enhanced Presentation MathML with other forms encapsulated + as annotations: + +

<math xmlns="http://www.w3.org/1998/Math/MathML">
+   <semantics>
+      <mrow>
+         <mrow>
+            <mrow>
+               <mi>cos</mi>
+               <mo>&ApplyFunction;</mo>
+               <mfenced close=")" open="(">
+                  <mi>theta</mi>
+               </mfenced>
+            </mrow>
+            <mo>&sdot;</mo>
+            <mfenced close="]" open="[">
+               <mtable>
+                  <mtr>
+                     <mtd>
+                        <mn>1</mn>
+                     </mtd>
+                     <mtd>
+                        <mn>0</mn>
+                     </mtd>
+                  </mtr>
+                  <mtr>
+                     <mtd>
+                        <mn>0</mn>
+                     </mtd>
+                     <mtd>
+                        <mn>1</mn>
+                     </mtd>
+                  </mtr>
+               </mtable>
+            </mfenced>
+         </mrow>
+         <mo>+</mo>
+         <mrow>
+            <mi>i</mi>
+            <mo>&sdot;</mo>
+            <mrow>
+               <mi>sin</mi>
+               <mo>&ApplyFunction;</mo>
+               <mfenced close=")" open="(">
+                  <mi>theta</mi>
+               </mfenced>
+            </mrow>
+            <mo>&sdot;</mo>
+            <mfenced close="]" open="[">
+               <mtable>
+                  <mtr>
+                     <mtd>
+                        <mn>0</mn>
+                     </mtd>
+                     <mtd>
+                        <mn>1</mn>
+                     </mtd>
+                  </mtr>
+                  <mtr>
+                     <mtd>
+                        <mn>1</mn>
+                     </mtd>
+                     <mtd>
+                        <mn>0</mn>
+                     </mtd>
+                  </mtr>
+               </mtable>
+            </mfenced>
+         </mrow>
+      </mrow>
+      <annotation-xml encoding="MathML-Content">
+         <apply>
+            <plus/>
+            <apply>
+               <times/>
+               <apply>
+                  <cos/>
+                  <ci>theta</ci>
+               </apply>
+               <list>
+                  <matrix>
+                     <vector>
+                        <cn>1</cn>
+                        <cn>0</cn>
+                     </vector>
+                     <vector>
+                        <cn>0</cn>
+                        <cn>1</cn>
+                     </vector>
+                  </matrix>
+               </list>
+            </apply>
+            <apply>
+               <times/>
+               <ci>i</ci>
+               <apply>
+                  <sin/>
+                  <ci>theta</ci>
+               </apply>
+               <list>
+                  <matrix>
+                     <vector>
+                        <cn>0</cn>
+                        <cn>1</cn>
+                     </vector>
+                     <vector>
+                        <cn>1</cn>
+                        <cn>0</cn>
+                     </vector>
+                  </matrix>
+               </list>
+            </apply>
+         </apply>
+      </annotation-xml>
+      <annotation encoding="ASCIIMathInput"/>
+      <annotation-xml encoding="Maxima-upconversion-failures">
+         <s:fail xmlns:s="http://www.ph.ed.ac.uk/snuggletex" code="UMFG00"
+                 message="Content MathML element matrix not supported">
+            <s:arg>matrix</s:arg>
+            <s:xpath>apply[1]/apply[1]/list[1]/matrix[1]</s:xpath>
+            <s:context>
+               <matrix>
+                  <vector>
+                     <cn>1</cn>
+                     <cn>0</cn>
+                  </vector>
+                  <vector>
+                     <cn>0</cn>
+                     <cn>1</cn>
+                  </vector>
+               </matrix>
+            </s:context>
+         </s:fail>
+         <s:fail xmlns:s="http://www.ph.ed.ac.uk/snuggletex" code="UMFG00"
+                 message="Content MathML element matrix not supported">
+            <s:arg>matrix</s:arg>
+            <s:xpath>apply[1]/apply[2]/list[1]/matrix[1]</s:xpath>
+            <s:context>
+               <matrix>
+                  <vector>
+                     <cn>0</cn>
+                     <cn>1</cn>
+                  </vector>
+                  <vector>
+                     <cn>1</cn>
+                     <cn>0</cn>
+                  </vector>
+               </matrix>
+            </s:context>
+         </s:fail>
+      </annotation-xml>
+   </semantics>
+</math>
+
+
+
+ + + \ No newline at end of file diff --git a/common/lib/capa/capa/tests/test_files/snuggletex_wrong.html b/common/lib/capa/capa/tests/test_files/snuggletex_wrong.html new file mode 100644 index 0000000000..abd62ca4d2 --- /dev/null +++ b/common/lib/capa/capa/tests/test_files/snuggletex_wrong.html @@ -0,0 +1,187 @@ + + + + + + + + + + + + SnuggleTeX - ASCIIMathML Enrichment Demo + + + + + + + +

SnuggleTeX (1.2.2)

+
+ + +
+ +
+

ASCIIMathML Enrichment Demo

+

Input

+

+ This demo is similar to the + MathML Semantic Enrichnment Demo + but uses + ASCIIMathML as + an alternative input format, which provides real-time feedback as you + type but can often generate MathML with odd semantics in it. + SnuggleTeX includes some functionality that can to convert this raw MathML into + something equivalent to its own MathML output, thereby allowing you to + semantically enrich it in + certain simple cases, making ASCIIMathML a possibly viable input format + for simple semantic maths. + +

+

+ To try the demo, simply enter some some ASCIIMathML into the box below. + You should see a real time preview of this while you type. + Then hit Go! to use SnuggleTeX to semantically enrich your + input. + +

+
+
+ ASCIIMath Input: +
+
+

Live Preview

+

+ This is a MathML rendering of your input, generated by ASCIIMathML as you type. + +

+
+
+
+

+ This is the underlying MathML source generated by ASCIIMathML, again updated in real time. + +

+
 
+

Enhanced Presentation MathML

+

+ This shows the result of attempting to enrich the raw Presentation MathML + generated by ASCIIMathML: + +

<math xmlns="http://www.w3.org/1998/Math/MathML">
+   <mn>2</mn>
+</math>

Content MathML

+

+ This shows the result of an attempted + conversion to Content MathML: + +

<math xmlns="http://www.w3.org/1998/Math/MathML">
+   <cn>2</cn>
+</math>

Maxima Input Form

+

+ This shows the result of an attempted + conversion to Maxima Input syntax: + +

2

MathML Parallel Markup

+

+ This shows the enhanced Presentation MathML with other forms encapsulated + as annotations: + +

<math xmlns="http://www.w3.org/1998/Math/MathML">
+   <semantics>
+      <mn>2</mn>
+      <annotation-xml encoding="MathML-Content">
+         <cn>2</cn>
+      </annotation-xml>
+      <annotation encoding="ASCIIMathInput"/>
+      <annotation encoding="Maxima">2</annotation>
+   </semantics>
+</math>
+
+
+
+ + + \ No newline at end of file diff --git a/common/lib/capa/capa/tests/test_html_render.py b/common/lib/capa/capa/tests/test_html_render.py index 492fcb2743..62605b48f5 100644 --- a/common/lib/capa/capa/tests/test_html_render.py +++ b/common/lib/capa/capa/tests/test_html_render.py @@ -6,12 +6,15 @@ import json import mock -from capa.capa_problem import LoncapaProblem from .response_xml_factory import StringResponseXMLFactory, CustomResponseXMLFactory -from . import test_system +from . import test_system, new_loncapa_problem class CapaHtmlRenderTest(unittest.TestCase): + def setUp(self): + super(CapaHtmlRenderTest, self).setUp() + self.system = test_system() + def test_blank_problem(self): """ It's important that blank problems don't break, since that's @@ -20,7 +23,7 @@ class CapaHtmlRenderTest(unittest.TestCase): xml_str = " " # Create the problem - problem = LoncapaProblem(xml_str, '1', system=test_system) + problem = new_loncapa_problem(xml_str) # Render the HTML rendered_html = etree.XML(problem.get_html()) @@ -39,7 +42,7 @@ class CapaHtmlRenderTest(unittest.TestCase): """) # Create the problem - problem = LoncapaProblem(xml_str, '1', system=test_system) + problem = new_loncapa_problem(xml_str, system=self.system) # Render the HTML rendered_html = etree.XML(problem.get_html()) @@ -49,9 +52,6 @@ class CapaHtmlRenderTest(unittest.TestCase): self.assertEqual(test_element.tag, "test") self.assertEqual(test_element.text, "Test include") - - - def test_process_outtext(self): # Generate some XML with and xml_str = textwrap.dedent(""" @@ -61,7 +61,7 @@ class CapaHtmlRenderTest(unittest.TestCase): """) # Create the problem - problem = LoncapaProblem(xml_str, '1', system=test_system) + problem = new_loncapa_problem(xml_str) # Render the HTML rendered_html = etree.XML(problem.get_html()) @@ -80,7 +80,7 @@ class CapaHtmlRenderTest(unittest.TestCase): """) # Create the problem - problem = LoncapaProblem(xml_str, '1', system=test_system) + problem = new_loncapa_problem(xml_str) # Render the HTML rendered_html = etree.XML(problem.get_html()) @@ -98,7 +98,7 @@ class CapaHtmlRenderTest(unittest.TestCase): """) # Create the problem - problem = LoncapaProblem(xml_str, '1', system=test_system) + problem = new_loncapa_problem(xml_str) # Render the HTML rendered_html = etree.XML(problem.get_html()) @@ -117,11 +117,12 @@ class CapaHtmlRenderTest(unittest.TestCase): xml_str = StringResponseXMLFactory().build_xml(**kwargs) # Mock out the template renderer - test_system.render_template = mock.Mock() - test_system.render_template.return_value = "
Input Template Render
" + the_system = test_system() + the_system.render_template = mock.Mock() + the_system.render_template.return_value = "
Input Template Render
" # Create the problem and render the HTML - problem = LoncapaProblem(xml_str, '1', system=test_system) + problem = new_loncapa_problem(xml_str, system=the_system) rendered_html = etree.XML(problem.get_html()) # Expect problem has been turned into a
@@ -166,7 +167,7 @@ class CapaHtmlRenderTest(unittest.TestCase): mock.call('textline.html', expected_textline_context), mock.call('solutionspan.html', expected_solution_context)] - self.assertEqual(test_system.render_template.call_args_list, + self.assertEqual(the_system.render_template.call_args_list, expected_calls) @@ -184,7 +185,7 @@ class CapaHtmlRenderTest(unittest.TestCase): xml_str = CustomResponseXMLFactory().build_xml(**kwargs) # Create the problem and render the html - problem = LoncapaProblem(xml_str, '1', system=test_system) + problem = new_loncapa_problem(xml_str) # Grade the problem correctmap = problem.grade_answers({'1_2_1': 'test'}) @@ -219,7 +220,7 @@ class CapaHtmlRenderTest(unittest.TestCase): """) # Create the problem and render the HTML - problem = LoncapaProblem(xml_str, '1', system=test_system) + problem = new_loncapa_problem(xml_str) rendered_html = etree.XML(problem.get_html()) # Expect that the variable $test has been replaced with its value @@ -227,7 +228,7 @@ class CapaHtmlRenderTest(unittest.TestCase): self.assertEqual(span_element.get('attr'), "TEST") def _create_test_file(self, path, content_str): - test_fp = test_system.filestore.open(path, "w") + test_fp = self.system.filestore.open(path, "w") test_fp.write(content_str) test_fp.close() diff --git a/common/lib/capa/capa/tests/test_inputtypes.py b/common/lib/capa/capa/tests/test_inputtypes.py index 54edb5bf9f..313eb28249 100644 --- a/common/lib/capa/capa/tests/test_inputtypes.py +++ b/common/lib/capa/capa/tests/test_inputtypes.py @@ -45,7 +45,7 @@ class OptionInputTest(unittest.TestCase): state = {'value': 'Down', 'id': 'sky_input', 'status': 'answered'} - option_input = lookup_tag('optioninput')(test_system, element, state) + option_input = lookup_tag('optioninput')(test_system(), element, state) context = option_input._get_render_context() @@ -92,7 +92,7 @@ class ChoiceGroupTest(unittest.TestCase): 'id': 'sky_input', 'status': 'answered'} - the_input = lookup_tag(tag)(test_system, element, state) + the_input = lookup_tag(tag)(test_system(), element, state) context = the_input._get_render_context() @@ -142,7 +142,7 @@ class JavascriptInputTest(unittest.TestCase): element = etree.fromstring(xml_str) state = {'value': '3', } - the_input = lookup_tag('javascriptinput')(test_system, element, state) + the_input = lookup_tag('javascriptinput')(test_system(), element, state) context = the_input._get_render_context() @@ -170,7 +170,7 @@ class TextLineTest(unittest.TestCase): element = etree.fromstring(xml_str) state = {'value': 'BumbleBee', } - the_input = lookup_tag('textline')(test_system, element, state) + the_input = lookup_tag('textline')(test_system(), element, state) context = the_input._get_render_context() @@ -198,7 +198,7 @@ class TextLineTest(unittest.TestCase): element = etree.fromstring(xml_str) state = {'value': 'BumbleBee', } - the_input = lookup_tag('textline')(test_system, element, state) + the_input = lookup_tag('textline')(test_system(), element, state) context = the_input._get_render_context() @@ -236,7 +236,7 @@ class TextLineTest(unittest.TestCase): element = etree.fromstring(xml_str) state = {'value': 'BumbleBee', } - the_input = lookup_tag('textline')(test_system, element, state) + the_input = lookup_tag('textline')(test_system(), element, state) context = the_input._get_render_context() @@ -274,7 +274,7 @@ class FileSubmissionTest(unittest.TestCase): 'status': 'incomplete', 'feedback': {'message': '3'}, } input_class = lookup_tag('filesubmission') - the_input = input_class(test_system, element, state) + the_input = input_class(test_system(), element, state) context = the_input._get_render_context() @@ -319,7 +319,7 @@ class CodeInputTest(unittest.TestCase): 'feedback': {'message': '3'}, } input_class = lookup_tag('codeinput') - the_input = input_class(test_system, element, state) + the_input = input_class(test_system(), element, state) context = the_input._get_render_context() @@ -368,7 +368,7 @@ class MatlabTest(unittest.TestCase): 'feedback': {'message': '3'}, } self.input_class = lookup_tag('matlabinput') - self.the_input = self.input_class(test_system, elt, state) + self.the_input = self.input_class(test_system(), elt, state) def test_rendering(self): context = self.the_input._get_render_context() @@ -396,7 +396,7 @@ class MatlabTest(unittest.TestCase): 'feedback': {'message': '3'}, } elt = etree.fromstring(self.xml) - the_input = self.input_class(test_system, elt, state) + the_input = self.input_class(test_system(), elt, state) context = the_input._get_render_context() expected = {'id': 'prob_1_2', @@ -423,7 +423,7 @@ class MatlabTest(unittest.TestCase): } elt = etree.fromstring(self.xml) - the_input = self.input_class(test_system, elt, state) + the_input = self.input_class(test_system(), elt, state) context = the_input._get_render_context() expected = {'id': 'prob_1_2', 'value': 'print "good evening"', @@ -448,7 +448,7 @@ class MatlabTest(unittest.TestCase): } elt = etree.fromstring(self.xml) - the_input = self.input_class(test_system, elt, state) + the_input = self.input_class(test_system(), elt, state) context = the_input._get_render_context() expected = {'id': 'prob_1_2', 'value': 'print "good evening"', @@ -470,7 +470,7 @@ class MatlabTest(unittest.TestCase): get = {'submission': 'x = 1234;'} response = self.the_input.handle_ajax("plot", get) - test_system.xqueue['interface'].send_to_queue.assert_called_with(header=ANY, body=ANY) + test_system().xqueue['interface'].send_to_queue.assert_called_with(header=ANY, body=ANY) self.assertTrue(response['success']) self.assertTrue(self.the_input.input_state['queuekey'] is not None) @@ -479,13 +479,12 @@ class MatlabTest(unittest.TestCase): def test_plot_data_failure(self): get = {'submission': 'x = 1234;'} error_message = 'Error message!' - test_system.xqueue['interface'].send_to_queue.return_value = (1, error_message) + test_system().xqueue['interface'].send_to_queue.return_value = (1, error_message) response = self.the_input.handle_ajax("plot", get) self.assertFalse(response['success']) self.assertEqual(response['message'], error_message) self.assertTrue('queuekey' not in self.the_input.input_state) self.assertTrue('queuestate' not in self.the_input.input_state) - test_system.xqueue['interface'].send_to_queue.return_value = (0, 'Success!') def test_ungraded_response_success(self): queuekey = 'abcd' @@ -496,7 +495,7 @@ class MatlabTest(unittest.TestCase): 'feedback': {'message': '3'}, } elt = etree.fromstring(self.xml) - the_input = self.input_class(test_system, elt, state) + the_input = self.input_class(test_system(), elt, state) inner_msg = 'hello!' queue_msg = json.dumps({'msg': inner_msg}) @@ -514,7 +513,7 @@ class MatlabTest(unittest.TestCase): 'feedback': {'message': '3'}, } elt = etree.fromstring(self.xml) - the_input = self.input_class(test_system, elt, state) + the_input = self.input_class(test_system(), elt, state) inner_msg = 'hello!' queue_msg = json.dumps({'msg': inner_msg}) @@ -553,7 +552,7 @@ class SchematicTest(unittest.TestCase): state = {'value': value, 'status': 'unsubmitted'} - the_input = lookup_tag('schematic')(test_system, element, state) + the_input = lookup_tag('schematic')(test_system(), element, state) context = the_input._get_render_context() @@ -592,7 +591,7 @@ class ImageInputTest(unittest.TestCase): state = {'value': value, 'status': 'unsubmitted'} - the_input = lookup_tag('imageinput')(test_system, element, state) + the_input = lookup_tag('imageinput')(test_system(), element, state) context = the_input._get_render_context() @@ -643,7 +642,7 @@ class CrystallographyTest(unittest.TestCase): state = {'value': value, 'status': 'unsubmitted'} - the_input = lookup_tag('crystallography')(test_system, element, state) + the_input = lookup_tag('crystallography')(test_system(), element, state) context = the_input._get_render_context() @@ -681,7 +680,7 @@ class VseprTest(unittest.TestCase): state = {'value': value, 'status': 'unsubmitted'} - the_input = lookup_tag('vsepr_input')(test_system, element, state) + the_input = lookup_tag('vsepr_input')(test_system(), element, state) context = the_input._get_render_context() @@ -708,7 +707,7 @@ class ChemicalEquationTest(unittest.TestCase): element = etree.fromstring(xml_str) state = {'value': 'H2OYeah', } - self.the_input = lookup_tag('chemicalequationinput')(test_system, element, state) + self.the_input = lookup_tag('chemicalequationinput')(test_system(), element, state) def test_rendering(self): ''' Verify that the render context matches the expected render context''' @@ -783,7 +782,7 @@ class DragAndDropTest(unittest.TestCase): ] } - the_input = lookup_tag('drag_and_drop_input')(test_system, element, state) + the_input = lookup_tag('drag_and_drop_input')(test_system(), element, state) context = the_input._get_render_context() expected = {'id': 'prob_1_2', @@ -832,7 +831,7 @@ class AnnotationInputTest(unittest.TestCase): tag = 'annotationinput' - the_input = lookup_tag(tag)(test_system, element, state) + the_input = lookup_tag(tag)(test_system(), element, state) context = the_input._get_render_context() diff --git a/common/lib/capa/capa/tests/test_responsetypes.py b/common/lib/capa/capa/tests/test_responsetypes.py index 5fbc7f8c87..8bf6954139 100644 --- a/common/lib/capa/capa/tests/test_responsetypes.py +++ b/common/lib/capa/capa/tests/test_responsetypes.py @@ -2,7 +2,6 @@ Tests of responsetypes """ - from datetime import datetime import json from nose.plugins.skip import SkipTest @@ -10,10 +9,11 @@ import os import random import unittest import textwrap +import mock +import textwrap -from . import test_system +from . import new_loncapa_problem, test_system -import capa.capa_problem as lcp from capa.responsetypes import LoncapaProblemError, \ StudentInputError, ResponseError from capa.correctmap import CorrectMap @@ -30,9 +30,9 @@ class ResponseTest(unittest.TestCase): if self.xml_factory_class: self.xml_factory = self.xml_factory_class() - def build_problem(self, **kwargs): + def build_problem(self, system=None, **kwargs): xml = self.xml_factory.build_xml(**kwargs) - return lcp.LoncapaProblem(xml, '1', system=test_system) + return new_loncapa_problem(xml, system=system) def assert_grade(self, problem, submission, expected_correctness, msg=None): input_dict = {'1_2_1': submission} @@ -184,94 +184,151 @@ class ImageResponseTest(ResponseTest): self.assert_answer_format(problem) -class SymbolicResponseTest(unittest.TestCase): - def test_sr_grade(self): - raise SkipTest() # This test fails due to dependencies on a local copy of snuggletex-webapp. Until we have figured that out, we'll just skip this test - symbolicresponse_file = os.path.dirname(__file__) + "/test_files/symbolicresponse.xml" - test_lcp = lcp.LoncapaProblem(open(symbolicresponse_file).read(), '1', system=test_system) - correct_answers = {'1_2_1': 'cos(theta)*[[1,0],[0,1]] + i*sin(theta)*[[0,1],[1,0]]', - '1_2_1_dynamath': ''' - - - - cos - - ( - θ - ) - - - - - [ - - - - 1 - - - 0 - - - - - 0 - - - 1 - - - - ] - - + - i - - - sin - - ( - θ - ) - - - - - [ - - - - 0 - - - 1 - - - - - 1 - - - 0 - - - - ] - - - - ''', - } - wrong_answers = {'1_2_1': '2', - '1_2_1_dynamath': ''' - - - 2 - - ''', - } - self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') - self.assertEquals(test_lcp.grade_answers(wrong_answers).get_correctness('1_2_1'), 'incorrect') +class SymbolicResponseTest(ResponseTest): + from response_xml_factory import SymbolicResponseXMLFactory + xml_factory_class = SymbolicResponseXMLFactory + + def test_grade_single_input(self): + problem = self.build_problem(math_display=True, + expect="2*x+3*y") + + # Correct answers + correct_inputs = [ + ('2x+3y', textwrap.dedent(""" + + + 2*x+3*y + """)), + + ('x+x+3y', textwrap.dedent(""" + + + x+x+3*y + """)), + ] + + for (input_str, input_mathml) in correct_inputs: + self._assert_symbolic_grade(problem, input_str, input_mathml, 'correct') + + # Incorrect answers + incorrect_inputs = [ + ('0', ''), + ('4x+3y', textwrap.dedent(""" + + + 4*x+3*y + """)), + ] + + for (input_str, input_mathml) in incorrect_inputs: + self._assert_symbolic_grade(problem, input_str, input_mathml, 'incorrect') + + + def test_complex_number_grade(self): + problem = self.build_problem(math_display=True, + expect="[[cos(theta),i*sin(theta)],[i*sin(theta),cos(theta)]]", + options=["matrix", "imaginary"]) + + # For LaTeX-style inputs, symmath_check() will try to contact + # a server to convert the input to MathML. + # We mock out the server, simulating the response that it would give + # for this input. + import requests + dirpath = os.path.dirname(__file__) + correct_snuggletex_response = open(os.path.join(dirpath, "test_files/snuggletex_correct.html")).read().decode('utf8') + wrong_snuggletex_response = open(os.path.join(dirpath, "test_files/snuggletex_wrong.html")).read().decode('utf8') + + # Correct answer + with mock.patch.object(requests, 'post') as mock_post: + + # Simulate what the LaTeX-to-MathML server would + # send for the correct response input + mock_post.return_value.text = correct_snuggletex_response + + self._assert_symbolic_grade(problem, + "cos(theta)*[[1,0],[0,1]] + i*sin(theta)*[[0,1],[1,0]]", + textwrap.dedent(""" + + + + cos + (θ) + + + + [ + + + 10 + + + 01 + + + ] + + + + i + + + sin + + (θ) + + + + + [ + + + 01 + + + 10 + + + ] + + + + """), + 'correct') + + # Incorrect answer + with mock.patch.object(requests, 'post') as mock_post: + + # Simulate what the LaTeX-to-MathML server would + # send for the incorrect response input + mock_post.return_value.text = wrong_snuggletex_response + + self._assert_symbolic_grade(problem, "2", + textwrap.dedent(""" + + 2 + + """), + 'incorrect') + + def test_multiple_inputs_exception(self): + + # Should not allow multiple inputs, since we specify + # only one "expect" value + with self.assertRaises(Exception): + problem = self.build_problem(math_display=True, + expect="2*x+3*y", + num_inputs=3) + + def _assert_symbolic_grade(self, problem, + student_input, + dynamath_input, + expected_correctness): + input_dict = {'1_2_1': str(student_input), + '1_2_1_dynamath': str(dynamath_input) } + + correct_map = problem.grade_answers(input_dict) + + self.assertEqual(correct_map.get_correctness('1_2_1'), + expected_correctness) class OptionResponseTest(ResponseTest): @@ -531,6 +588,22 @@ class StringResponseTest(ResponseTest): correct_map = problem.grade_answers(input_dict) self.assertEquals(correct_map.get_hint('1_2_1'), "") + def test_computed_hints(self): + problem = self.build_problem( + answer="Michigan", + hintfn="gimme_a_hint", + script = textwrap.dedent(""" + def gimme_a_hint(answer_ids, student_answers, new_cmap, old_cmap): + aid = answer_ids[0] + answer = student_answers[aid] + new_cmap.set_hint_and_mode(aid, answer+"??", "always") + """) + ) + + input_dict = {'1_2_1': 'Hello'} + correct_map = problem.grade_answers(input_dict) + self.assertEquals(correct_map.get_hint('1_2_1'), "Hello??") + class CodeResponseTest(ResponseTest): from response_xml_factory import CodeResponseXMLFactory @@ -710,16 +783,37 @@ class JavascriptResponseTest(ResponseTest): coffee_file_path = os.path.dirname(__file__) + "/test_files/js/*.coffee" os.system("node_modules/.bin/coffee -c %s" % (coffee_file_path)) - problem = self.build_problem(generator_src="test_problem_generator.js", - grader_src="test_problem_grader.js", - display_class="TestProblemDisplay", - display_src="test_problem_display.js", - param_dict={'value': '4'}) + system = test_system() + system.can_execute_unsafe_code = lambda: True + problem = self.build_problem( + system=system, + generator_src="test_problem_generator.js", + grader_src="test_problem_grader.js", + display_class="TestProblemDisplay", + display_src="test_problem_display.js", + param_dict={'value': '4'}, + ) # Test that we get graded correctly self.assert_grade(problem, json.dumps({0: 4}), "correct") self.assert_grade(problem, json.dumps({0: 5}), "incorrect") + def test_cant_execute_javascript(self): + # If the system says to disallow unsafe code execution, then making + # this problem will raise an exception. + system = test_system() + system.can_execute_unsafe_code = lambda: False + + with self.assertRaises(LoncapaProblemError): + problem = self.build_problem( + system=system, + generator_src="test_problem_generator.js", + grader_src="test_problem_grader.js", + display_class="TestProblemDisplay", + display_src="test_problem_display.js", + param_dict={'value': '4'}, + ) + class NumericalResponseTest(ResponseTest): from response_xml_factory import NumericalResponseXMLFactory @@ -853,9 +947,8 @@ class CustomResponseTest(ResponseTest): # # 'answer_given' is the answer the student gave (if there is just one input) # or an ordered list of answers (if there are multiple inputs) - # - # - # The function should return a dict of the form + # + # The function should return a dict of the form # { 'ok': BOOL, 'msg': STRING } # script = textwrap.dedent(""" @@ -964,6 +1057,35 @@ class CustomResponseTest(ResponseTest): self.assertEqual(correct_map.get_msg('1_2_2'), 'Feedback 2') self.assertEqual(correct_map.get_msg('1_2_3'), 'Feedback 3') + def test_function_code_with_extra_args(self): + script = textwrap.dedent("""\ + def check_func(expect, answer_given, options, dynamath): + assert options == "xyzzy", "Options was %r" % options + return {'ok': answer_given == expect, 'msg': 'Message text'} + """) + + problem = self.build_problem(script=script, cfn="check_func", expect="42", options="xyzzy", cfn_extra_args="options dynamath") + + # Correct answer + input_dict = {'1_2_1': '42'} + correct_map = problem.grade_answers(input_dict) + + correctness = correct_map.get_correctness('1_2_1') + msg = correct_map.get_msg('1_2_1') + + self.assertEqual(correctness, 'correct') + self.assertEqual(msg, "Message text") + + # Incorrect answer + input_dict = {'1_2_1': '0'} + correct_map = problem.grade_answers(input_dict) + + correctness = correct_map.get_correctness('1_2_1') + msg = correct_map.get_msg('1_2_1') + + self.assertEqual(correctness, 'incorrect') + self.assertEqual(msg, "Message text") + def test_multiple_inputs_return_one_status(self): # When given multiple inputs, the 'answer_given' argument # to the check_func() is a list of inputs diff --git a/common/lib/capa/capa/util.py b/common/lib/capa/capa/util.py index 8b05ea717e..ec43da6093 100644 --- a/common/lib/capa/capa/util.py +++ b/common/lib/capa/capa/util.py @@ -1,4 +1,4 @@ -from .calc import evaluator, UndefinedVariable +from calc import evaluator, UndefinedVariable from cmath import isinf #----------------------------------------------------------------------------- diff --git a/common/lib/capa/setup.py b/common/lib/capa/setup.py index 7719626c3e..2e73701060 100644 --- a/common/lib/capa/setup.py +++ b/common/lib/capa/setup.py @@ -4,5 +4,5 @@ setup( name="capa", version="0.1", packages=find_packages(exclude=["tests"]), - install_requires=['distribute==0.6.28', 'pyparsing==1.5.6'], + install_requires=["distribute==0.6.28"], ) diff --git a/lms/lib/symmath/README.md b/common/lib/capa/symmath/README.md similarity index 100% rename from lms/lib/symmath/README.md rename to common/lib/capa/symmath/README.md diff --git a/lms/lib/symmath/__init__.py b/common/lib/capa/symmath/__init__.py similarity index 100% rename from lms/lib/symmath/__init__.py rename to common/lib/capa/symmath/__init__.py diff --git a/lms/lib/symmath/formula.py b/common/lib/capa/symmath/formula.py similarity index 99% rename from lms/lib/symmath/formula.py rename to common/lib/capa/symmath/formula.py index 604941ffdd..8369baa27c 100644 --- a/lms/lib/symmath/formula.py +++ b/common/lib/capa/symmath/formula.py @@ -736,4 +736,4 @@ def test6(): # imaginary numbers ''' - return formula(xmlstr, options='imaginaryi') + return formula(xmlstr, options='imaginary') diff --git a/lms/lib/symmath/symmath_check.py b/common/lib/capa/symmath/symmath_check.py similarity index 99% rename from lms/lib/symmath/symmath_check.py rename to common/lib/capa/symmath/symmath_check.py index 151debee71..65a17883f5 100644 --- a/lms/lib/symmath/symmath_check.py +++ b/common/lib/capa/symmath/symmath_check.py @@ -324,4 +324,5 @@ def symmath_check(expect, ans, dynamath=None, options=None, debug=None, xml=None msg += "

Difference: %s

" % to_latex(diff) msg += '
' - return {'ok': False, 'msg': msg, 'ex': fexpect, 'got': fsym} + # Used to return more keys: 'ex': fexpect, 'got': fsym + return {'ok': False, 'msg': msg} diff --git a/common/lib/capa/capa/chem/__init__.py b/common/lib/chem/chem/__init__.py similarity index 100% rename from common/lib/capa/capa/chem/__init__.py rename to common/lib/chem/chem/__init__.py diff --git a/common/lib/capa/capa/chem/chemcalc.py b/common/lib/chem/chem/chemcalc.py similarity index 100% rename from common/lib/capa/capa/chem/chemcalc.py rename to common/lib/chem/chem/chemcalc.py diff --git a/common/lib/capa/capa/chem/chemtools.py b/common/lib/chem/chem/chemtools.py similarity index 100% rename from common/lib/capa/capa/chem/chemtools.py rename to common/lib/chem/chem/chemtools.py diff --git a/common/lib/capa/capa/chem/miller.py b/common/lib/chem/chem/miller.py similarity index 100% rename from common/lib/capa/capa/chem/miller.py rename to common/lib/chem/chem/miller.py diff --git a/common/lib/capa/capa/chem/tests.py b/common/lib/chem/chem/tests.py similarity index 100% rename from common/lib/capa/capa/chem/tests.py rename to common/lib/chem/chem/tests.py diff --git a/common/lib/chem/setup.py b/common/lib/chem/setup.py new file mode 100644 index 0000000000..4f2b24ddee --- /dev/null +++ b/common/lib/chem/setup.py @@ -0,0 +1,13 @@ +from setuptools import setup + +setup( + name="chem", + version="0.1", + packages=["chem"], + install_requires=[ + "pyparsing==1.5.6", + "numpy", + "scipy", + "nltk==2.0.4", + ], +) diff --git a/common/lib/sandbox-packages/README b/common/lib/sandbox-packages/README new file mode 100644 index 0000000000..706998b08e --- /dev/null +++ b/common/lib/sandbox-packages/README @@ -0,0 +1 @@ +This directory is in the Python path for sandboxed Python execution. diff --git a/common/lib/capa/capa/eia.py b/common/lib/sandbox-packages/eia.py similarity index 100% rename from common/lib/capa/capa/eia.py rename to common/lib/sandbox-packages/eia.py diff --git a/common/lib/sandbox-packages/setup.py b/common/lib/sandbox-packages/setup.py new file mode 100644 index 0000000000..1b99118aca --- /dev/null +++ b/common/lib/sandbox-packages/setup.py @@ -0,0 +1,14 @@ +from setuptools import setup + +setup( + name="sandbox-packages", + version="0.1", + packages=[ + "verifiers", + ], + py_modules=[ + "eia", + ], + install_requires=[ + ], +) diff --git a/common/lib/capa/capa/verifiers/__init__.py b/common/lib/sandbox-packages/verifiers/__init__.py similarity index 100% rename from common/lib/capa/capa/verifiers/__init__.py rename to common/lib/sandbox-packages/verifiers/__init__.py diff --git a/common/lib/capa/capa/verifiers/draganddrop.py b/common/lib/sandbox-packages/verifiers/draganddrop.py similarity index 100% rename from common/lib/capa/capa/verifiers/draganddrop.py rename to common/lib/sandbox-packages/verifiers/draganddrop.py diff --git a/common/lib/capa/capa/verifiers/tests_draganddrop.py b/common/lib/sandbox-packages/verifiers/tests_draganddrop.py similarity index 100% rename from common/lib/capa/capa/verifiers/tests_draganddrop.py rename to common/lib/sandbox-packages/verifiers/tests_draganddrop.py diff --git a/common/lib/xmodule/test_files/symbolicresponse.xml b/common/lib/xmodule/test_files/symbolicresponse.xml index 4dc2bc9d7b..8443366ffe 100644 --- a/common/lib/xmodule/test_files/symbolicresponse.xml +++ b/common/lib/xmodule/test_files/symbolicresponse.xml @@ -13,13 +13,10 @@ real time, next to the input box.

This is a correct answer which may be entered below:

cos(theta)*[[1,0],[0,1]] + i*sin(theta)*[[0,1],[1,0]]

- Compute [mathjax] U = \exp\left( i \theta \left[ \begin{matrix} 0 & 1 \\ 1 & 0 \end{matrix} \right] \right) [/mathjax] and give the resulting \(2 \times 2\) matrix.
Your input should be typed in as a list of lists, eg [[1,2],[3,4]].
- [mathjax]U=[/mathjax] + [mathjax]U=[/mathjax]
diff --git a/common/lib/xmodule/xmodule/capa_module.py b/common/lib/xmodule/xmodule/capa_module.py index 479cd5a759..eb6bdc18c9 100644 --- a/common/lib/xmodule/xmodule/capa_module.py +++ b/common/lib/xmodule/xmodule/capa_module.py @@ -3,7 +3,9 @@ import datetime import hashlib import json import logging +import os import traceback +import struct import sys from pkg_resources import resource_string @@ -23,8 +25,10 @@ from xmodule.util.date_utils import time_to_datetime log = logging.getLogger("mitx.courseware") -# Generated this many different variants of problems with rerandomize=per_student +# Generate this many different variants of problems with rerandomize=per_student NUM_RANDOMIZATION_BINS = 20 +# Never produce more than this many different seeds, no matter what. +MAX_RANDOMIZATION_BINS = 1000 def randomization_bin(seed, problem_id): @@ -109,11 +113,7 @@ class CapaModule(CapaFields, XModule): self.close_date = due_date if self.seed is None: - if self.rerandomize == 'never': - self.seed = 1 - elif self.rerandomize == "per_student" and hasattr(self.system, 'seed'): - # see comment on randomization_bin - self.seed = randomization_bin(system.seed, self.location.url) + self.choose_new_seed() # Need the problem location in openendedresponse to send out. Adding # it to the system here seems like the least clunky way to get it @@ -157,6 +157,22 @@ class CapaModule(CapaFields, XModule): self.set_state_from_lcp() + assert self.seed is not None + + def choose_new_seed(self): + """Choose a new seed.""" + if self.rerandomize == 'never': + self.seed = 1 + elif self.rerandomize == "per_student" and hasattr(self.system, 'seed'): + # see comment on randomization_bin + self.seed = randomization_bin(self.system.seed, self.location.url) + else: + self.seed = struct.unpack('i', os.urandom(4))[0] + + # So that sandboxed code execution can be cached, but still have an interesting + # number of possibilities, cap the number of different random seeds. + self.seed %= MAX_RANDOMIZATION_BINS + def new_lcp(self, state, text=None): if text is None: text = self.data @@ -165,6 +181,7 @@ class CapaModule(CapaFields, XModule): problem_text=text, id=self.location.html_id(), state=state, + seed=self.seed, system=self.system, ) @@ -832,14 +849,11 @@ class CapaModule(CapaFields, XModule): 'error': "Refresh the page and make an attempt before resetting."} if self.rerandomize in ["always", "onreset"]: - # reset random number generator seed (note the self.lcp.get_state() - # in next line) - seed = None - else: - seed = self.lcp.seed + # Reset random number generator seed. + self.choose_new_seed() # Generate a new problem with either the previous seed or a new seed - self.lcp = self.new_lcp({'seed': seed}) + self.lcp = self.new_lcp(None) # Pull in the new problem seed self.set_state_from_lcp() diff --git a/common/lib/xmodule/xmodule/tests/__init__.py b/common/lib/xmodule/xmodule/tests/__init__.py index 0a2f22aa68..6af11a3ac8 100644 --- a/common/lib/xmodule/xmodule/tests/__init__.py +++ b/common/lib/xmodule/xmodule/tests/__init__.py @@ -14,7 +14,7 @@ import fs.osfs import numpy -import capa.calc as calc +import calc import xmodule from xmodule.x_module import ModuleSystem from mock import Mock @@ -33,15 +33,14 @@ def test_system(): """ Construct a test ModuleSystem instance. - By default, the render_template() method simply returns - the context it is passed as a string. - You can override this behavior by monkey patching: + By default, the render_template() method simply returns the context it is + passed as a string. You can override this behavior by monkey patching:: - system = test_system() - system.render_template = my_render_func + system = test_system() + system.render_template = my_render_func + + where `my_render_func` is a function of the form my_render_func(template, context). - where my_render_func is a function of the form - my_render_func(template, context) """ return ModuleSystem( ajax_url='courses/course_id/modx/a_location', @@ -86,10 +85,12 @@ class ModelsTest(unittest.TestCase): self.assertTrue(abs(calc.evaluator(variables, functions, "e^(j*pi)") + 1) < 0.00001) self.assertTrue(abs(calc.evaluator(variables, functions, "j||1") - 0.5 - 0.5j) < 0.00001) variables['t'] = 1.0 + # Use self.assertAlmostEqual here... self.assertTrue(abs(calc.evaluator(variables, functions, "t") - 1.0) < 0.00001) self.assertTrue(abs(calc.evaluator(variables, functions, "T") - 1.0) < 0.00001) self.assertTrue(abs(calc.evaluator(variables, functions, "t", cs=True) - 1.0) < 0.00001) self.assertTrue(abs(calc.evaluator(variables, functions, "T", cs=True) - 298) < 0.2) + # Use self.assertRaises here... exception_happened = False try: calc.evaluator({}, {}, "5+7 QWSEKO") diff --git a/common/lib/xmodule/xmodule/tests/test_capa_module.py b/common/lib/xmodule/xmodule/tests/test_capa_module.py index f948f5bdfe..61de21b129 100644 --- a/common/lib/xmodule/xmodule/tests/test_capa_module.py +++ b/common/lib/xmodule/xmodule/tests/test_capa_module.py @@ -550,6 +550,7 @@ class CapaModuleTest(unittest.TestCase): def test_reset_problem(self): module = CapaFactory.create(done=True) module.new_lcp = Mock(wraps=module.new_lcp) + module.choose_new_seed = Mock(wraps=module.choose_new_seed) # Stub out HTML rendering with patch('xmodule.capa_module.CapaModule.get_problem_html') as mock_html: @@ -567,7 +568,8 @@ class CapaModuleTest(unittest.TestCase): self.assertEqual(result['html'], "
Test HTML
") # Expect that the problem was reset - module.new_lcp.assert_called_once_with({'seed': None}) + module.new_lcp.assert_called_once_with(None) + module.choose_new_seed.assert_called_once_with() def test_reset_problem_closed(self): module = CapaFactory.create() @@ -1033,3 +1035,13 @@ class CapaModuleTest(unittest.TestCase): self.assertTrue(module.seed is not None) msg = 'Could not get a new seed from reset after 5 tries' self.assertTrue(success, msg) + + def test_random_seed_bins(self): + # Assert that we are limiting the number of possible seeds. + + # Check the conditions that generate random seeds + for rerandomize in ['always', 'per_student', 'true', 'onreset']: + # Get a bunch of seeds, they should all be in 0-999. + for i in range(200): + module = CapaFactory.create(rerandomize=rerandomize) + assert 0 <= module.seed < 1000 diff --git a/common/lib/xmodule/xmodule/tests/test_progress.py b/common/lib/xmodule/xmodule/tests/test_progress.py index 0114ba4ad3..4bb663ad85 100644 --- a/common/lib/xmodule/xmodule/tests/test_progress.py +++ b/common/lib/xmodule/xmodule/tests/test_progress.py @@ -134,6 +134,6 @@ class ModuleProgressTest(unittest.TestCase): ''' def test_xmodule_default(self): '''Make sure default get_progress exists, returns None''' - xm = x_module.XModule(test_system, 'a://b/c/d/e', None, {}) + xm = x_module.XModule(test_system(), 'a://b/c/d/e', None, {}) p = xm.get_progress() self.assertEqual(p, None) diff --git a/common/lib/xmodule/xmodule/tests/test_randomize_module.py b/common/lib/xmodule/xmodule/tests/test_randomize_module.py index 59cf5a59f3..81935c4013 100644 --- a/common/lib/xmodule/xmodule/tests/test_randomize_module.py +++ b/common/lib/xmodule/xmodule/tests/test_randomize_module.py @@ -14,7 +14,6 @@ START = '2013-01-01T01:00:00' from .test_course_module import DummySystem as DummyImportSystem -from . import test_system class RandomizeModuleTestCase(unittest.TestCase): diff --git a/common/lib/xmodule/xmodule/x_module.py b/common/lib/xmodule/xmodule/x_module.py index 7c24d593e3..76ac6a1ff6 100644 --- a/common/lib/xmodule/xmodule/x_module.py +++ b/common/lib/xmodule/xmodule/x_module.py @@ -737,7 +737,10 @@ class ModuleSystem(object): anonymous_student_id='', course_id=None, open_ended_grading_interface=None, - s3_interface=None): + s3_interface=None, + cache=None, + can_execute_unsafe_code=None, + ): ''' Create a closure around the system environment. @@ -779,6 +782,14 @@ class ModuleSystem(object): xblock_model_data - A dict-like object containing the all data available to this xblock + + cache - A cache object with two methods: + .get(key) returns an object from the cache or None. + .set(key, value, timeout_secs=None) stores a value in the cache with a timeout. + + can_execute_unsafe_code - A function returning a boolean, whether or + not to allow the execution of unsafe, unsandboxed code. + ''' self.ajax_url = ajax_url self.xqueue = xqueue @@ -803,6 +814,9 @@ class ModuleSystem(object): self.open_ended_grading_interface = open_ended_grading_interface self.s3_interface = s3_interface + self.cache = cache or DoNothingCache() + self.can_execute_unsafe_code = can_execute_unsafe_code or (lambda: False) + def get(self, attr): ''' provide uniform access to attributes (like etree).''' return self.__dict__.get(attr) @@ -816,3 +830,12 @@ class ModuleSystem(object): def __str__(self): return str(self.__dict__) + + +class DoNothingCache(object): + """A duck-compatible object to use in ModuleSystem when there's no cache.""" + def get(self, key): + return None + + def set(self, key, value, timeout=None): + pass diff --git a/common/test/data/embedded_python/course.xml b/common/test/data/embedded_python/course.xml new file mode 100644 index 0000000000..1662543b4d --- /dev/null +++ b/common/test/data/embedded_python/course.xml @@ -0,0 +1 @@ + diff --git a/common/test/data/embedded_python/course/2013_Spring.xml b/common/test/data/embedded_python/course/2013_Spring.xml new file mode 100644 index 0000000000..fa6881c37b --- /dev/null +++ b/common/test/data/embedded_python/course/2013_Spring.xml @@ -0,0 +1,111 @@ + + + + + + +
+ +
+ +# for a schematic response, submission[i] is the json representation +# of the diagram and analysis results for the i-th schematic tag + +def get_tran(json,signal): + for element in json: + if element[0] == 'transient': + return element[1].get(signal,[]) + return [] + +def get_value(at,output): + for (t,v) in output: + if at == t: return v + return None + +output = get_tran(submission[0],'Z') +okay = True + +# output should be 1, 1, 1, 1, 1, 0, 0, 0 +if get_value(0.0000004,output) < 2.7: okay = False; +if get_value(0.0000009,output) < 2.7: okay = False; +if get_value(0.0000014,output) < 2.7: okay = False; +if get_value(0.0000019,output) < 2.7: okay = False; +if get_value(0.0000024,output) < 2.7: okay = False; +if get_value(0.0000029,output) > 0.25: okay = False; +if get_value(0.0000034,output) > 0.25: okay = False; +if get_value(0.0000039,output) > 0.25: okay = False; + +correct = ['correct' if okay else 'incorrect'] + +
+ + + + +
+ + + + + + +
    +
  1. +
    +num = 0
    +while num <= 5:
    +    print(num)
    +    num += 1
    +
    +print("Outside of loop")
    +print(num)
    + 
    +

    + + + +

    +
  2. +
+
+
+ + + + + + +if submission[0] == "Xyzzy": + correct = ['correct'] +else: + correct = ['incorrect'] + + + + + +
+
+
diff --git a/common/test/data/embedded_python/roots/2013_Spring.xml b/common/test/data/embedded_python/roots/2013_Spring.xml new file mode 100644 index 0000000000..1662543b4d --- /dev/null +++ b/common/test/data/embedded_python/roots/2013_Spring.xml @@ -0,0 +1 @@ + diff --git a/common/test/data/full/problem/test_files/symbolicresponse.xml b/common/test/data/full/problem/test_files/symbolicresponse.xml index 4dc2bc9d7b..85945b1d8c 100644 --- a/common/test/data/full/problem/test_files/symbolicresponse.xml +++ b/common/test/data/full/problem/test_files/symbolicresponse.xml @@ -19,7 +19,7 @@ from symmath import * Compute [mathjax] U = \exp\left( i \theta \left[ \begin{matrix} 0 & 1 \\ 1 & 0 \end{matrix} \right] \right) [/mathjax] and give the resulting \(2 \times 2\) matrix.
Your input should be typed in as a list of lists, eg [[1,2],[3,4]].
- [mathjax]U=[/mathjax] + [mathjax]U=[/mathjax]
diff --git a/lms/djangoapps/courseware/module_render.py b/lms/djangoapps/courseware/module_render.py index 6f05b32778..d6c104a83c 100644 --- a/lms/djangoapps/courseware/module_render.py +++ b/lms/djangoapps/courseware/module_render.py @@ -1,6 +1,7 @@ import json import logging import pyparsing +import re import sys import static_replace @@ -8,6 +9,7 @@ from functools import partial from django.conf import settings from django.contrib.auth.models import User +from django.core.cache import cache from django.core.exceptions import PermissionDenied from django.core.urlresolvers import reverse from django.http import Http404 @@ -273,6 +275,14 @@ def get_module_for_descriptor(user, request, descriptor, model_data_cache, cours statsd.increment("lms.courseware.question_answered", tags=tags) + def can_execute_unsafe_code(): + # To decide if we can run unsafe code, we check the course id against + # a list of regexes configured on the server. + for regex in settings.COURSES_WITH_UNSAFE_CODE: + if re.match(regex, course_id): + return True + return False + # TODO (cpennington): When modules are shared between courses, the static # prefix is going to have to be specific to the module, not the directory # that the xml was loaded from @@ -299,6 +309,8 @@ def get_module_for_descriptor(user, request, descriptor, model_data_cache, cours course_id=course_id, open_ended_grading_interface=open_ended_grading_interface, s3_interface=s3_interface, + cache=cache, + can_execute_unsafe_code=can_execute_unsafe_code, ) # pass position specified in URL to module through ModuleSystem system.set('position', position) diff --git a/lms/djangoapps/courseware/tests/load_tests/README.md b/lms/djangoapps/courseware/tests/load_tests/README.md new file mode 100644 index 0000000000..09d8797947 --- /dev/null +++ b/lms/djangoapps/courseware/tests/load_tests/README.md @@ -0,0 +1,4 @@ +# Load Testing + +Scripts for load testing the courseware app, +mostly using [multimechanize](http://testutils.org/multi-mechanize/) diff --git a/lms/djangoapps/courseware/tests/load_tests/custom_response/README.md b/lms/djangoapps/courseware/tests/load_tests/custom_response/README.md new file mode 100644 index 0000000000..e3fae8c817 --- /dev/null +++ b/lms/djangoapps/courseware/tests/load_tests/custom_response/README.md @@ -0,0 +1,51 @@ +# Custom Response Load Test + +## Optional Installations + +* [memcached](http://pypi.python.org/pypi/python-memcached/): Install this +and make sure it is running, or the Capa problem will not cache results. + +* [AppArmor](http://wiki.apparmor.net): Follow the instructions in +`common/lib/codejail/README` to set up the Python sandbox environment. +If you do not set up the sandbox, the tests will still execute code in the CustomResponse, +so you can still run the tests. + +* [matplotlib](http://matplotlib.org): Multi-mechanize uses this to create graphs. + + +## Running the Tests + +This test simulates student submissions for a custom response problem. + +First, clear the cache: + + /etc/init.d/memcached restart + +Then, run the test: + + multimech-run custom_response + +You can configure the parameters in `customresponse/config.cfg`, +and you can change the CustomResponse script and student submissions +in `customresponse/test_scripts/v_user.py`. + +## Components Under Test + +Components under test: + +* Python sandbox (see `common/lib/codejail`), which uses `AppArmor` +* Caching (see `common/lib/capa/capa/safe_exec/`), which uses `memcache` in production + +Components NOT under test: + +* Django views +* `XModule` +* gunicorn + +This allows us to avoid creating courses in mongo, logging in, using CSRF tokens, +and other inconveniences. Instead, we create a capa problem (from the capa package), +pass it Django's memcache backend, and pass the problem student submissions. + +Even though the test uses `capa.capa_problem.LoncapaProblem` directly, +the `capa` should not depend on Django. For this reason, we put the +test in the `courseware` Django app. diff --git a/lms/djangoapps/courseware/tests/load_tests/custom_response/config.cfg b/lms/djangoapps/courseware/tests/load_tests/custom_response/config.cfg new file mode 100644 index 0000000000..c75f02a669 --- /dev/null +++ b/lms/djangoapps/courseware/tests/load_tests/custom_response/config.cfg @@ -0,0 +1,22 @@ + +[global] +run_time = 240 +rampup = 30 +results_ts_interval = 10 +progress_bar = on +console_logging = off +xml_report = off + + +[user_group-1] +threads = 10 +script = v_user.py + +[user_group-2] +threads = 10 +script = v_user.py + +[user_group-3] +threads = 10 +script = v_user.py + diff --git a/lms/djangoapps/courseware/tests/load_tests/custom_response/test_scripts/v_user.py b/lms/djangoapps/courseware/tests/load_tests/custom_response/test_scripts/v_user.py new file mode 100644 index 0000000000..9bfc39e55b --- /dev/null +++ b/lms/djangoapps/courseware/tests/load_tests/custom_response/test_scripts/v_user.py @@ -0,0 +1,115 @@ +""" User script for load testing CustomResponse """ + +from capa.tests.response_xml_factory import CustomResponseXMLFactory +import capa.capa_problem as lcp +from xmodule.x_module import ModuleSystem +import mock +import fs.osfs +import random +import textwrap + +# Use memcache running locally +CACHE_SETTINGS = { + 'default': { + 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', + 'LOCATION': '127.0.0.1:11211' + }, +} + +# Configure settings so Django will let us import its cache wrapper +# Caching is the only part of Django being tested +from django.conf import settings +settings.configure(CACHES=CACHE_SETTINGS) + +from django.core.cache import cache + +# Script to install as the checker for the CustomResponse +TEST_SCRIPT = textwrap.dedent(""" + def check_func(expect, answer_given): + return {'ok': answer_given == expect, 'msg': 'Message text'} +""") + +# Submissions submitted by the student +TEST_SUBMISSIONS = [random.randint(-100, 100) for i in range(100)] + +class TestContext(object): + """ One-time set up for the test that is shared across transactions. + Uses a Singleton design pattern.""" + + SINGLETON = None + NUM_UNIQUE_SEEDS = 20 + + @classmethod + def singleton(cls): + """ Return the singleton, creating one if it does not already exist.""" + + # If we haven't created the singleton yet, create it now + if cls.SINGLETON is None: + + # Create a mock ModuleSystem, installing our cache + system = mock.MagicMock(ModuleSystem) + system.render_template = lambda template, context: "
%s
" % template + system.cache = cache + system.filestore = mock.MagicMock(fs.osfs.OSFS) + system.filestore.root_path = "" + system.DEBUG = True + + # Create a custom response problem + xml_factory = CustomResponseXMLFactory() + xml = xml_factory.build_xml(script=TEST_SCRIPT, cfn="check_func", expect="42") + + # Create and store the context + cls.SINGLETON = cls(system, xml) + + else: + pass + + # Return the singleton + return cls.SINGLETON + + def __init__(self, system, xml): + """ Store context needed for the test across transactions """ + self.system = system + self.xml = xml + + # Construct a small pool of unique seeds + # To keep our implementation in line with the one capa actually uses, + # construct the problems, then use the seeds they generate + self.seeds = [lcp.LoncapaProblem(self.xml, 'problem_id', system=self.system).seed + for i in range(self.NUM_UNIQUE_SEEDS)] + + def random_seed(self): + """ Return one of a small number of unique random seeds """ + return random.choice(self.seeds) + + def student_submission(self): + """ Return one of a small number of student submissions """ + return random.choice(TEST_SUBMISSIONS) + +class Transaction(object): + """ User script that submits a response to a CustomResponse problem """ + + def __init__(self): + """ Create the problem """ + + # Get the context (re-used across transactions) + self.context = TestContext.singleton() + + # Create a new custom response problem + # using one of a small number of unique seeds + # We're assuming that the capa module is limiting the number + # of seeds (currently not the case for certain settings) + self.problem = lcp.LoncapaProblem(self.context.xml, + '1', + state=None, + seed=self.context.random_seed(), + system=self.context.system) + + def run(self): + """ Submit a response to the CustomResponse problem """ + answers = {'1_2_1': self.context.student_submission()} + self.problem.grade_answers(answers) + +if __name__ == '__main__': + trans = Transaction() + trans.run() diff --git a/lms/djangoapps/courseware/tests/tests.py b/lms/djangoapps/courseware/tests/tests.py index d50e0b4526..a189160a48 100644 --- a/lms/djangoapps/courseware/tests/tests.py +++ b/lms/djangoapps/courseware/tests/tests.py @@ -372,6 +372,7 @@ class TestCoursesLoadTestCase_XmlModulestore(PageLoaderTestCase): '''Check that all pages in test courses load properly from XML''' def setUp(self): + super(TestCoursesLoadTestCase_XmlModulestore, self).setUp() self.setup_viewtest_user() xmodule.modulestore.django._MODULESTORES = {} @@ -390,6 +391,7 @@ class TestCoursesLoadTestCase_MongoModulestore(PageLoaderTestCase): '''Check that all pages in test courses load properly from Mongo''' def setUp(self): + super(TestCoursesLoadTestCase_MongoModulestore, self).setUp() self.setup_viewtest_user() xmodule.modulestore.django._MODULESTORES = {} modulestore().collection.drop() @@ -487,9 +489,6 @@ class TestDraftModuleStore(TestCase): class TestViewAuth(LoginEnrollmentTestCase): """Check that view authentication works properly""" - # NOTE: setUpClass() runs before override_settings takes effect, so - # can't do imports there without manually hacking settings. - def setUp(self): xmodule.modulestore.django._MODULESTORES = {} @@ -810,43 +809,85 @@ class TestViewAuth(LoginEnrollmentTestCase): @override_settings(MODULESTORE=TEST_DATA_XML_MODULESTORE) -class TestCourseGrader(LoginEnrollmentTestCase): +class TestSubmittingProblems(LoginEnrollmentTestCase): """Check that a course gets graded properly""" - # NOTE: setUpClass() runs before override_settings takes effect, so - # can't do imports there without manually hacking settings. + # Subclasses should specify the course slug + course_slug = "UNKNOWN" + course_when = "UNKNOWN" def setUp(self): xmodule.modulestore.django._MODULESTORES = {} - courses = modulestore().get_courses() - def find_course(course_id): - """Assumes the course is present""" - return [c for c in courses if c.id == course_id][0] - - self.graded_course = find_course("edX/graded/2012_Fall") + course_name = "edX/%s/%s" % (self.course_slug, self.course_when) + self.course = modulestore().get_course(course_name) + assert self.course, "Couldn't load course %r" % course_name # create a test student self.student = 'view@test.com' self.password = 'foo' self.create_account('u1', self.student, self.password) self.activate_user(self.student) - self.enroll(self.graded_course) + self.enroll(self.course) self.student_user = get_user(self.student) self.factory = RequestFactory() + def problem_location(self, problem_url_name): + return "i4x://edX/{}/problem/{}".format(self.course_slug, problem_url_name) + + def modx_url(self, problem_location, dispatch): + return reverse( + 'modx_dispatch', + kwargs={ + 'course_id': self.course.id, + 'location': problem_location, + 'dispatch': dispatch, + } + ) + + def submit_question_answer(self, problem_url_name, responses): + """ + Submit answers to a question. + + Responses is a dict mapping problem ids (not sure of the right term) + to answers: + {'2_1': 'Correct', '2_2': 'Incorrect'} + + """ + problem_location = self.problem_location(problem_url_name) + modx_url = self.modx_url(problem_location, 'problem_check') + answer_key_prefix = 'input_i4x-edX-{}-problem-{}_'.format(self.course_slug, problem_url_name) + resp = self.client.post(modx_url, + { (answer_key_prefix + k): v for k,v in responses.items() } + ) + return resp + + def reset_question_answer(self, problem_url_name): + '''resets specified problem for current user''' + problem_location = self.problem_location(problem_url_name) + modx_url = self.modx_url(problem_location, 'problem_reset') + resp = self.client.post(modx_url) + return resp + + +class TestCourseGrader(TestSubmittingProblems): + """Check that a course gets graded properly""" + + course_slug = "graded" + course_when = "2012_Fall" + def get_grade_summary(self): '''calls grades.grade for current user and course''' model_data_cache = ModelDataCache.cache_for_descriptor_descendents( - self.graded_course.id, self.student_user, self.graded_course) + self.course.id, self.student_user, self.course) fake_request = self.factory.get(reverse('progress', - kwargs={'course_id': self.graded_course.id})) + kwargs={'course_id': self.course.id})) return grades.grade(self.student_user, fake_request, - self.graded_course, model_data_cache) + self.course, model_data_cache) def get_homework_scores(self): '''get scores for homeworks''' @@ -855,14 +896,14 @@ class TestCourseGrader(LoginEnrollmentTestCase): def get_progress_summary(self): '''return progress summary structure for current user and course''' model_data_cache = ModelDataCache.cache_for_descriptor_descendents( - self.graded_course.id, self.student_user, self.graded_course) + self.course.id, self.student_user, self.course) fake_request = self.factory.get(reverse('progress', - kwargs={'course_id': self.graded_course.id})) + kwargs={'course_id': self.course.id})) progress_summary = grades.progress_summary(self.student_user, fake_request, - self.graded_course, + self.course, model_data_cache) return progress_summary @@ -871,46 +912,6 @@ class TestCourseGrader(LoginEnrollmentTestCase): grade_summary = self.get_grade_summary() self.assertEqual(grade_summary['percent'], percent) - def submit_question_answer(self, problem_url_name, responses): - """ - The field names of a problem are hard to determine. This method only works - for the problems used in the edX/graded course, which has fields named in the - following form: - input_i4x-edX-graded-problem-H1P3_2_1 - input_i4x-edX-graded-problem-H1P3_2_2 - """ - problem_location = "i4x://edX/graded/problem/%s" % problem_url_name - - modx_url = reverse('modx_dispatch', - kwargs={'course_id': self.graded_course.id, - 'location': problem_location, - 'dispatch': 'problem_check', }) - - resp = self.client.post(modx_url, { - 'input_i4x-edX-graded-problem-%s_2_1' % problem_url_name: responses[0], - 'input_i4x-edX-graded-problem-%s_2_2' % problem_url_name: responses[1], - }) - print "modx_url", modx_url, "responses", responses - print "resp", resp - - return resp - - def problem_location(self, problem_url_name): - '''Get location string for problem, assuming hardcoded course_id''' - return "i4x://edX/graded/problem/{0}".format(problem_url_name) - - def reset_question_answer(self, problem_url_name): - '''resets specified problem for current user''' - problem_location = self.problem_location(problem_url_name) - - modx_url = reverse('modx_dispatch', - kwargs={'course_id': self.graded_course.id, - 'location': problem_location, - 'dispatch': 'problem_reset', }) - - resp = self.client.post(modx_url) - return resp - def test_get_graded(self): #### Check that the grader shows we have 0% in the course self.check_grade_percent(0) @@ -928,27 +929,27 @@ class TestCourseGrader(LoginEnrollmentTestCase): return [s.earned for s in hw_section['scores']] # Only get half of the first problem correct - self.submit_question_answer('H1P1', ['Correct', 'Incorrect']) + self.submit_question_answer('H1P1', {'2_1': 'Correct', '2_2': 'Incorrect'}) self.check_grade_percent(0.06) self.assertEqual(earned_hw_scores(), [1.0, 0, 0]) # Order matters self.assertEqual(score_for_hw('Homework1'), [1.0, 0.0]) # Get both parts of the first problem correct self.reset_question_answer('H1P1') - self.submit_question_answer('H1P1', ['Correct', 'Correct']) + self.submit_question_answer('H1P1', {'2_1': 'Correct', '2_2': 'Correct'}) self.check_grade_percent(0.13) self.assertEqual(earned_hw_scores(), [2.0, 0, 0]) self.assertEqual(score_for_hw('Homework1'), [2.0, 0.0]) # This problem is shown in an ABTest - self.submit_question_answer('H1P2', ['Correct', 'Correct']) + self.submit_question_answer('H1P2', {'2_1': 'Correct', '2_2': 'Correct'}) self.check_grade_percent(0.25) self.assertEqual(earned_hw_scores(), [4.0, 0.0, 0]) self.assertEqual(score_for_hw('Homework1'), [2.0, 2.0]) # This problem is hidden in an ABTest. # Getting it correct doesn't change total grade - self.submit_question_answer('H1P3', ['Correct', 'Correct']) + self.submit_question_answer('H1P3', {'2_1': 'Correct', '2_2': 'Correct'}) self.check_grade_percent(0.25) self.assertEqual(score_for_hw('Homework1'), [2.0, 2.0]) @@ -957,19 +958,85 @@ class TestCourseGrader(LoginEnrollmentTestCase): # This problem is also weighted to be 4 points (instead of default of 2) # If the problem was unweighted the percent would have been 0.38 so we # know it works. - self.submit_question_answer('H2P1', ['Correct', 'Correct']) + self.submit_question_answer('H2P1', {'2_1': 'Correct', '2_2': 'Correct'}) self.check_grade_percent(0.42) self.assertEqual(earned_hw_scores(), [4.0, 4.0, 0]) # Third homework - self.submit_question_answer('H3P1', ['Correct', 'Correct']) + self.submit_question_answer('H3P1', {'2_1': 'Correct', '2_2': 'Correct'}) self.check_grade_percent(0.42) # Score didn't change self.assertEqual(earned_hw_scores(), [4.0, 4.0, 2.0]) - self.submit_question_answer('H3P2', ['Correct', 'Correct']) + self.submit_question_answer('H3P2', {'2_1': 'Correct', '2_2': 'Correct'}) self.check_grade_percent(0.5) # Now homework2 dropped. Score changes self.assertEqual(earned_hw_scores(), [4.0, 4.0, 4.0]) # Now we answer the final question (worth half of the grade) - self.submit_question_answer('FinalQuestion', ['Correct', 'Correct']) + self.submit_question_answer('FinalQuestion', {'2_1': 'Correct', '2_2': 'Correct'}) self.check_grade_percent(1.0) # Hooray! We got 100% + + +@override_settings(MODULESTORE=TEST_DATA_XML_MODULESTORE) +class TestSchematicResponse(TestSubmittingProblems): + """Check that we can submit a schematic response, and it answers properly.""" + + course_slug = "embedded_python" + course_when = "2013_Spring" + + def test_schematic(self): + resp = self.submit_question_answer('schematic_problem', + { '2_1': json.dumps( + [['transient', {'Z': [ + [0.0000004, 2.8], + [0.0000009, 2.8], + [0.0000014, 2.8], + [0.0000019, 2.8], + [0.0000024, 2.8], + [0.0000029, 0.2], + [0.0000034, 0.2], + [0.0000039, 0.2] + ]}]] + ) + }) + respdata = json.loads(resp.content) + self.assertEqual(respdata['success'], 'correct') + + self.reset_question_answer('schematic_problem') + resp = self.submit_question_answer('schematic_problem', + { '2_1': json.dumps( + [['transient', {'Z': [ + [0.0000004, 2.8], + [0.0000009, 0.0], # wrong. + [0.0000014, 2.8], + [0.0000019, 2.8], + [0.0000024, 2.8], + [0.0000029, 0.2], + [0.0000034, 0.2], + [0.0000039, 0.2] + ]}]] + ) + }) + respdata = json.loads(resp.content) + self.assertEqual(respdata['success'], 'incorrect') + + def test_check_function(self): + resp = self.submit_question_answer('cfn_problem', {'2_1': "0, 1, 2, 3, 4, 5, 'Outside of loop', 6"}) + respdata = json.loads(resp.content) + self.assertEqual(respdata['success'], 'correct') + + self.reset_question_answer('cfn_problem') + + resp = self.submit_question_answer('cfn_problem', {'2_1': "xyzzy!"}) + respdata = json.loads(resp.content) + self.assertEqual(respdata['success'], 'incorrect') + + def test_computed_answer(self): + resp = self.submit_question_answer('computed_answer', {'2_1': "Xyzzy"}) + respdata = json.loads(resp.content) + self.assertEqual(respdata['success'], 'correct') + + self.reset_question_answer('computed_answer') + + resp = self.submit_question_answer('computed_answer', {'2_1': "NO!"}) + respdata = json.loads(resp.content) + self.assertEqual(respdata['success'], 'incorrect') diff --git a/lms/djangoapps/debug/__init__.py b/lms/djangoapps/debug/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/lms/djangoapps/debug/models.py b/lms/djangoapps/debug/models.py new file mode 100644 index 0000000000..71a8362390 --- /dev/null +++ b/lms/djangoapps/debug/models.py @@ -0,0 +1,3 @@ +from django.db import models + +# Create your models here. diff --git a/lms/djangoapps/debug/views.py b/lms/djangoapps/debug/views.py new file mode 100644 index 0000000000..c1d4155fdd --- /dev/null +++ b/lms/djangoapps/debug/views.py @@ -0,0 +1,31 @@ +"""Views for debugging and diagnostics""" + +import pprint +import traceback + +from django.http import Http404 +from django.contrib.auth.decorators import login_required +from django_future.csrf import ensure_csrf_cookie, csrf_exempt +from mitxmako.shortcuts import render_to_response + +from codejail.safe_exec import safe_exec + +@login_required +@ensure_csrf_cookie +def run_python(request): + """A page to allow testing the Python sandbox on a production server.""" + if not request.user.is_staff: + raise Http404 + c = {} + c['code'] = '' + c['results'] = None + if request.method == 'POST': + py_code = c['code'] = request.POST.get('code') + g = {} + try: + safe_exec(py_code, g) + except Exception as e: + c['results'] = traceback.format_exc() + else: + c['results'] = pprint.pformat(g) + return render_to_response("debug/run_python_form.html", c) diff --git a/lms/envs/aws.py b/lms/envs/aws.py index 83b57e7642..74540b7dec 100644 --- a/lms/envs/aws.py +++ b/lms/envs/aws.py @@ -92,6 +92,16 @@ CERT_QUEUE = ENV_TOKENS.get("CERT_QUEUE", 'test-pull') ZENDESK_URL = ENV_TOKENS.get("ZENDESK_URL") FEEDBACK_SUBMISSION_EMAIL = ENV_TOKENS.get("FEEDBACK_SUBMISSION_EMAIL") +for name, value in ENV_TOKENS.get("CODE_JAIL", {}).items(): + oldvalue = CODE_JAIL.get(name) + if isinstance(oldvalue, dict): + for subname, subvalue in value.items(): + oldvalue[subname] = subvalue + else: + CODE_JAIL[name] = value + +COURSES_WITH_UNSAFE_CODE = ENV_TOKENS.get("COURSES_WITH_UNSAFE_CODE", []) + ############################## SECURE AUTH ITEMS ############### # Secret things: passwords, access keys, etc. with open(ENV_ROOT / CONFIG_PREFIX + "auth.json") as auth_file: diff --git a/lms/envs/common.py b/lms/envs/common.py index c111b3c18e..4cd7dd9843 100644 --- a/lms/envs/common.py +++ b/lms/envs/common.py @@ -97,6 +97,10 @@ MITX_FEATURES = { # Provide a UI to allow users to submit feedback from the LMS 'ENABLE_FEEDBACK_SUBMISSION': False, + + # Turn on a page that lets staff enter Python code to be run in the + # sandbox, for testing whether it's enabled properly. + 'ENABLE_DEBUG_RUN_PYTHON': False, } # Used for A/B testing @@ -246,6 +250,31 @@ MODULESTORE = { } CONTENTSTORE = None +#################### Python sandbox ############################################ + +CODE_JAIL = { + # Path to a sandboxed Python executable. None means don't bother. + 'python_bin': None, + # User to run as in the sandbox. + 'user': 'sandbox', + + # Configurable limits. + 'limits': { + # How many CPU seconds can jailed code use? + 'CPU': 1, + }, +} + +# Some courses are allowed to run unsafe code. This is a list of regexes, one +# of them must match the course id for that course to run unsafe code. +# +# For example: +# +# COURSES_WITH_UNSAFE_CODE = [ +# r"Harvard/XY123.1/.*" +# ] +COURSES_WITH_UNSAFE_CODE = [] + ############################ SIGNAL HANDLERS ################################ # This is imported to register the exception signal handling that logs exceptions import monitoring.exceptions # noqa @@ -398,6 +427,7 @@ MIDDLEWARE_CLASSES = ( # 'debug_toolbar.middleware.DebugToolbarMiddleware', 'django_comment_client.utils.ViewNameMiddleware', + 'codejail.django_integration.ConfigureCodeJailMiddleware', ) ############################### Pipeline ####################################### @@ -601,6 +631,7 @@ INSTALLED_APPS = ( # For testing 'django.contrib.admin', # only used in DEBUG mode + 'debug', # Discussion forums 'django_comment_client', diff --git a/lms/templates/debug/run_python_form.html b/lms/templates/debug/run_python_form.html new file mode 100644 index 0000000000..daecdf2abd --- /dev/null +++ b/lms/templates/debug/run_python_form.html @@ -0,0 +1,19 @@ + +
+

Python:

+
+ +
+ +
+ +
+
+%if results: +
+

Results:

+
+${results|h}
+
+
+%endif diff --git a/lms/urls.py b/lms/urls.py index 2846e091be..99b55fdb54 100644 --- a/lms/urls.py +++ b/lms/urls.py @@ -363,6 +363,11 @@ urlpatterns += ( url(r'^comm/foldit_ops', 'foldit.views.foldit_ops', name="foldit_ops"), ) +if settings.MITX_FEATURES.get('ENABLE_DEBUG_RUN_PYTHON'): + urlpatterns += ( + url(r'^debug/run_python', 'debug.views.run_python'), + ) + urlpatterns = patterns(*urlpatterns) if settings.DEBUG: diff --git a/requirements/edx-sandbox/base.txt b/requirements/edx-sandbox/base.txt new file mode 100644 index 0000000000..d801f46c8e --- /dev/null +++ b/requirements/edx-sandbox/base.txt @@ -0,0 +1 @@ +numpy==1.6.2 diff --git a/requirements/edx-sandbox/post.txt b/requirements/edx-sandbox/post.txt new file mode 100644 index 0000000000..f99e8a8c4b --- /dev/null +++ b/requirements/edx-sandbox/post.txt @@ -0,0 +1,6 @@ +# Packages to install in the Python sandbox for secured execution. +scipy==0.11.0 +lxml==3.0.1 +-e common/lib/calc +-e common/lib/chem +-e common/lib/sandbox-packages diff --git a/requirements/edx/github.txt b/requirements/edx/github.txt index 35ad8af027..d3f90d5abc 100644 --- a/requirements/edx/github.txt +++ b/requirements/edx/github.txt @@ -9,3 +9,4 @@ # Our libraries: -e git+https://github.com/edx/XBlock.git@483e0cb1#egg=XBlock +-e git+https://github.com/edx/codejail.git@07494f1#egg=codejail diff --git a/requirements/edx/local.txt b/requirements/edx/local.txt index 201467d11e..a72f1f6dea 100644 --- a/requirements/edx/local.txt +++ b/requirements/edx/local.txt @@ -1,4 +1,6 @@ # Python libraries to install that are local to the mitx repo +-e common/lib/calc -e common/lib/capa +-e common/lib/chem -e common/lib/xmodule -e . diff --git a/scripts/runone.py b/scripts/runone.py index 2227ae0adf..a644aa077b 100755 --- a/scripts/runone.py +++ b/scripts/runone.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -from django.core import management import argparse import os @@ -42,21 +41,34 @@ def main(argv): test_py_path = find_full_path(test_py_path) test_spec = "%s:%s.%s" % (test_py_path, test_class, test_method) + settings = None if test_py_path.startswith('cms'): settings = 'cms.envs.test' elif test_py_path.startswith('lms'): settings = 'lms.envs.test' + + if settings: + # Run as a django test suite + from django.core import management + + django_args = ["django-admin.py", "test", "--pythonpath=."] + django_args.append("--settings=%s" % settings) + if args.nocapture: + django_args.append("-s") + django_args.append(test_spec) + + print " ".join(django_args) + management.execute_from_command_line(django_args) else: - raise Exception("Couldn't determine settings to use!") + # Run as a nose test suite + import nose.core + nose_args = ["nosetests"] + if args.nocapture: + nose_args.append("-s") + nose_args.append(test_spec) + print " ".join(nose_args) + nose.core.main(argv=nose_args) - django_args = ["django-admin.py", "test", "--pythonpath=."] - django_args.append("--settings=%s" % settings) - if args.nocapture: - django_args.append("-s") - django_args.append(test_spec) - - print " ".join(django_args) - management.execute_from_command_line(django_args) if __name__ == "__main__": main(sys.argv[1:])