Merge branch 'feature/ned/sandboxed-python'
This commit is contained in:
2
.gitignore
vendored
2
.gitignore
vendored
@@ -9,7 +9,7 @@
|
||||
:2e#
|
||||
.AppleDouble
|
||||
database.sqlite
|
||||
private-requirements.txt
|
||||
requirements/private.txt
|
||||
courseware/static/js/mathjax/*
|
||||
flushdb.sh
|
||||
build
|
||||
|
||||
@@ -16,7 +16,7 @@ from mitxmako.shortcuts import render_to_response, render_to_string
|
||||
from urllib import urlencode
|
||||
import zendesk
|
||||
|
||||
import capa.calc
|
||||
import calc
|
||||
import track.views
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@ def calculate(request):
|
||||
''' Calculator in footer of every page. '''
|
||||
equation = request.GET['equation']
|
||||
try:
|
||||
result = capa.calc.evaluator({}, {}, equation)
|
||||
result = calc.evaluator({}, {}, equation)
|
||||
except:
|
||||
event = {'error': map(str, sys.exc_info()),
|
||||
'equation': equation}
|
||||
|
||||
12
common/lib/calc/setup.py
Normal file
12
common/lib/calc/setup.py
Normal file
@@ -0,0 +1,12 @@
|
||||
from setuptools import setup
|
||||
|
||||
setup(
|
||||
name="calc",
|
||||
version="0.1",
|
||||
py_modules=["calc"],
|
||||
install_requires=[
|
||||
"pyparsing==1.5.6",
|
||||
"numpy",
|
||||
"scipy"
|
||||
],
|
||||
)
|
||||
@@ -13,33 +13,19 @@ Main module which shows problems (of "capa" type).
|
||||
This is used by capa_module.
|
||||
'''
|
||||
|
||||
from __future__ import division
|
||||
|
||||
from datetime import datetime
|
||||
import logging
|
||||
import math
|
||||
import numpy
|
||||
import os
|
||||
import random
|
||||
import os.path
|
||||
import re
|
||||
import scipy
|
||||
import struct
|
||||
import sys
|
||||
|
||||
from lxml import etree
|
||||
from xml.sax.saxutils import unescape
|
||||
from copy import deepcopy
|
||||
|
||||
import chem
|
||||
import chem.miller
|
||||
import chem.chemcalc
|
||||
import chem.chemtools
|
||||
import verifiers
|
||||
import verifiers.draganddrop
|
||||
|
||||
import calc
|
||||
from .correctmap import CorrectMap
|
||||
import eia
|
||||
import inputtypes
|
||||
import customrender
|
||||
from .util import contextualize_text, convert_files_to_filenames
|
||||
@@ -47,6 +33,7 @@ import xqueue_interface
|
||||
|
||||
# to be replaced with auto-registering
|
||||
import responsetypes
|
||||
import safe_exec
|
||||
|
||||
# dict of tagname, Response Class -- this should come from auto-registering
|
||||
response_tag_dict = dict([(x.response_tag, x) for x in responsetypes.__all__])
|
||||
@@ -63,17 +50,6 @@ html_transforms = {'problem': {'tag': 'div'},
|
||||
"math": {'tag': 'span'},
|
||||
}
|
||||
|
||||
global_context = {'random': random,
|
||||
'numpy': numpy,
|
||||
'math': math,
|
||||
'scipy': scipy,
|
||||
'calc': calc,
|
||||
'eia': eia,
|
||||
'chemcalc': chem.chemcalc,
|
||||
'chemtools': chem.chemtools,
|
||||
'miller': chem.miller,
|
||||
'draganddrop': verifiers.draganddrop}
|
||||
|
||||
# These should be removed from HTML output, including all subelements
|
||||
html_problem_semantics = ["codeparam", "responseparam", "answer", "script", "hintgroup", "openendedparam", "openendedrubric"]
|
||||
|
||||
@@ -96,7 +72,7 @@ class LoncapaProblem(object):
|
||||
|
||||
- problem_text (string): xml defining the problem
|
||||
- id (string): identifier for this problem; often a filename (no spaces)
|
||||
- seed (int): random number generator seed (int)
|
||||
- seed (int): random number generator seed (int)
|
||||
- state (dict): containing the following keys:
|
||||
- 'seed' - (int) random number generator seed
|
||||
- 'student_answers' - (dict) maps input id to the stored answer for that input
|
||||
@@ -115,23 +91,20 @@ class LoncapaProblem(object):
|
||||
if self.system is None:
|
||||
raise Exception()
|
||||
|
||||
state = state if state else {}
|
||||
state = state or {}
|
||||
|
||||
# Set seed according to the following priority:
|
||||
# 1. Contained in problem's state
|
||||
# 2. Passed into capa_problem via constructor
|
||||
# 3. Assign from the OS's random number generator
|
||||
self.seed = state.get('seed', seed)
|
||||
if self.seed is None:
|
||||
self.seed = struct.unpack('i', os.urandom(4))[0]
|
||||
assert self.seed is not None, "Seed must be provided for LoncapaProblem."
|
||||
|
||||
self.student_answers = state.get('student_answers', {})
|
||||
if 'correct_map' in state:
|
||||
self.correct_map.set_dict(state['correct_map'])
|
||||
self.done = state.get('done', False)
|
||||
self.input_state = state.get('input_state', {})
|
||||
|
||||
|
||||
|
||||
# Convert startouttext and endouttext to proper <text></text>
|
||||
problem_text = re.sub("startouttext\s*/", "text", problem_text)
|
||||
problem_text = re.sub("endouttext\s*/", "/text", problem_text)
|
||||
@@ -144,7 +117,7 @@ class LoncapaProblem(object):
|
||||
self._process_includes()
|
||||
|
||||
# construct script processor context (eg for customresponse problems)
|
||||
self.context = self._extract_context(self.tree, seed=self.seed)
|
||||
self.context = self._extract_context(self.tree)
|
||||
|
||||
# Pre-parse the XML tree: modifies it to add ID's and perform some in-place
|
||||
# transformations. This also creates the dict (self.responders) of Response
|
||||
@@ -440,18 +413,23 @@ class LoncapaProblem(object):
|
||||
path = []
|
||||
|
||||
for dir in raw_path:
|
||||
|
||||
if not dir:
|
||||
continue
|
||||
|
||||
# path is an absolute path or a path relative to the data dir
|
||||
dir = os.path.join(self.system.filestore.root_path, dir)
|
||||
# Check that we are within the filestore tree.
|
||||
reldir = os.path.relpath(dir, self.system.filestore.root_path)
|
||||
if ".." in reldir:
|
||||
log.warning("Ignoring Python directory outside of course: %r" % dir)
|
||||
continue
|
||||
|
||||
abs_dir = os.path.normpath(dir)
|
||||
path.append(abs_dir)
|
||||
|
||||
return path
|
||||
|
||||
def _extract_context(self, tree, seed=struct.unpack('i', os.urandom(4))[0]): # private
|
||||
def _extract_context(self, tree):
|
||||
'''
|
||||
Extract content of <script>...</script> from the problem.xml file, and exec it in the
|
||||
context of this problem. Provides ability to randomize problems, and also set
|
||||
@@ -459,55 +437,47 @@ class LoncapaProblem(object):
|
||||
|
||||
Problem XML goes to Python execution context. Runs everything in script tags.
|
||||
'''
|
||||
random.seed(self.seed)
|
||||
# save global context in here also
|
||||
context = {'global_context': global_context}
|
||||
context = {}
|
||||
context['seed'] = self.seed
|
||||
all_code = ''
|
||||
|
||||
# initialize context to have stuff in global_context
|
||||
context.update(global_context)
|
||||
python_path = []
|
||||
|
||||
# put globals there also
|
||||
context['__builtins__'] = globals()['__builtins__']
|
||||
|
||||
# pass instance of LoncapaProblem in
|
||||
context['the_lcp'] = self
|
||||
context['script_code'] = ''
|
||||
|
||||
self._execute_scripts(tree.findall('.//script'), context)
|
||||
|
||||
return context
|
||||
|
||||
def _execute_scripts(self, scripts, context):
|
||||
'''
|
||||
Executes scripts in the given context.
|
||||
'''
|
||||
original_path = sys.path
|
||||
|
||||
for script in scripts:
|
||||
sys.path = original_path + self._extract_system_path(script)
|
||||
for script in tree.findall('.//script'):
|
||||
|
||||
stype = script.get('type')
|
||||
|
||||
if stype:
|
||||
if 'javascript' in stype:
|
||||
continue # skip javascript
|
||||
if 'perl' in stype:
|
||||
continue # skip perl
|
||||
# TODO: evaluate only python
|
||||
code = script.text
|
||||
|
||||
for d in self._extract_system_path(script):
|
||||
if d not in python_path and os.path.exists(d):
|
||||
python_path.append(d)
|
||||
|
||||
XMLESC = {"'": "'", """: '"'}
|
||||
code = unescape(code, XMLESC)
|
||||
# store code source in context
|
||||
context['script_code'] += code
|
||||
code = unescape(script.text, XMLESC)
|
||||
all_code += code
|
||||
|
||||
if all_code:
|
||||
try:
|
||||
# use "context" for global context; thus defs in code are global within code
|
||||
exec code in context, context
|
||||
safe_exec.safe_exec(
|
||||
all_code,
|
||||
context,
|
||||
random_seed=self.seed,
|
||||
python_path=python_path,
|
||||
cache=self.system.cache,
|
||||
)
|
||||
except Exception as err:
|
||||
log.exception("Error while execing script code: " + code)
|
||||
log.exception("Error while execing script code: " + all_code)
|
||||
msg = "Error while executing script code: %s" % str(err).replace('<', '<')
|
||||
raise responsetypes.LoncapaProblemError(msg)
|
||||
finally:
|
||||
sys.path = original_path
|
||||
|
||||
# store code source in context
|
||||
context['script_code'] = all_code
|
||||
return context
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -46,7 +46,7 @@ import sys
|
||||
import pyparsing
|
||||
|
||||
from .registry import TagRegistry
|
||||
from capa.chem import chemcalc
|
||||
from chem import chemcalc
|
||||
import xqueue_interface
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
@@ -23,6 +23,7 @@ import random
|
||||
import re
|
||||
import requests
|
||||
import subprocess
|
||||
import textwrap
|
||||
import traceback
|
||||
import xml.sax.saxutils as saxutils
|
||||
|
||||
@@ -30,17 +31,23 @@ from collections import namedtuple
|
||||
from shapely.geometry import Point, MultiPoint
|
||||
|
||||
# specific library imports
|
||||
from .calc import evaluator, UndefinedVariable
|
||||
from .correctmap import CorrectMap
|
||||
from calc import evaluator, UndefinedVariable
|
||||
from . import correctmap
|
||||
from datetime import datetime
|
||||
from .util import *
|
||||
from lxml import etree
|
||||
from lxml.html.soupparser import fromstring as fromstring_bs # uses Beautiful Soup!!! FIXME?
|
||||
import capa.xqueue_interface as xqueue_interface
|
||||
|
||||
import safe_exec
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
CorrectMap = correctmap.CorrectMap
|
||||
CORRECTMAP_PY = None
|
||||
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Exceptions
|
||||
|
||||
@@ -252,20 +259,41 @@ class LoncapaResponse(object):
|
||||
|
||||
# We may extend this in the future to add another argument which provides a
|
||||
# callback procedure to a social hint generation system.
|
||||
if not hintfn in self.context:
|
||||
msg = 'missing specified hint function %s in script context' % hintfn
|
||||
msg += "\nSee XML source line %s" % getattr(
|
||||
self.xml, 'sourceline', '<unavailable>')
|
||||
raise LoncapaProblemError(msg)
|
||||
|
||||
global CORRECTMAP_PY
|
||||
if CORRECTMAP_PY is None:
|
||||
# We need the CorrectMap code for hint functions. No, this is not great.
|
||||
CORRECTMAP_PY = inspect.getsource(correctmap)
|
||||
|
||||
code = (
|
||||
CORRECTMAP_PY + "\n" +
|
||||
self.context['script_code'] + "\n" +
|
||||
textwrap.dedent("""
|
||||
new_cmap = CorrectMap()
|
||||
new_cmap.set_dict(new_cmap_dict)
|
||||
old_cmap = CorrectMap()
|
||||
old_cmap.set_dict(old_cmap_dict)
|
||||
{hintfn}(answer_ids, student_answers, new_cmap, old_cmap)
|
||||
new_cmap_dict.update(new_cmap.get_dict())
|
||||
old_cmap_dict.update(old_cmap.get_dict())
|
||||
""").format(hintfn=hintfn)
|
||||
)
|
||||
globals_dict = {
|
||||
'answer_ids': self.answer_ids,
|
||||
'student_answers': student_answers,
|
||||
'new_cmap_dict': new_cmap.get_dict(),
|
||||
'old_cmap_dict': old_cmap.get_dict(),
|
||||
}
|
||||
|
||||
try:
|
||||
self.context[hintfn](
|
||||
self.answer_ids, student_answers, new_cmap, old_cmap)
|
||||
safe_exec.safe_exec(code, globals_dict)
|
||||
except Exception as err:
|
||||
msg = 'Error %s in evaluating hint function %s' % (err, hintfn)
|
||||
msg += "\nSee XML source line %s" % getattr(
|
||||
self.xml, 'sourceline', '<unavailable>')
|
||||
raise ResponseError(msg)
|
||||
|
||||
new_cmap.set_dict(globals_dict['new_cmap_dict'])
|
||||
return
|
||||
|
||||
# hint specified by conditions and text dependent on conditions (a-la Loncapa design)
|
||||
@@ -475,6 +503,10 @@ class JavascriptResponse(LoncapaResponse):
|
||||
return tmp_env
|
||||
|
||||
def call_node(self, args):
|
||||
# Node.js code is un-sandboxed. If the XModuleSystem says we aren't
|
||||
# allowed to run unsafe code, then stop now.
|
||||
if not self.system.can_execute_unsafe_code():
|
||||
raise LoncapaProblemError("Execution of unsafe Javascript code is not allowed.")
|
||||
|
||||
subprocess_args = ["node"]
|
||||
subprocess_args.extend(args)
|
||||
@@ -488,7 +520,7 @@ class JavascriptResponse(LoncapaResponse):
|
||||
output = self.call_node([generator_file,
|
||||
self.generator,
|
||||
json.dumps(self.generator_dependencies),
|
||||
json.dumps(str(self.context['the_lcp'].seed)),
|
||||
json.dumps(str(self.context['seed'])),
|
||||
json.dumps(self.params)]).strip()
|
||||
|
||||
return json.loads(output)
|
||||
@@ -660,15 +692,6 @@ class ChoiceResponse(LoncapaResponse):
|
||||
|
||||
class MultipleChoiceResponse(LoncapaResponse):
|
||||
# TODO: handle direction and randomize
|
||||
snippets = [{'snippet': '''<multiplechoiceresponse direction="vertical" randomize="yes">
|
||||
<choicegroup type="MultipleChoice">
|
||||
<choice location="random" correct="false"><span>`a+b`<br/></span></choice>
|
||||
<choice location="random" correct="true"><span><math>a+b^2</math><br/></span></choice>
|
||||
<choice location="random" correct="false"><math>a+b+c</math></choice>
|
||||
<choice location="bottom" correct="false"><math>a+b+d</math></choice>
|
||||
</choicegroup>
|
||||
</multiplechoiceresponse>
|
||||
'''}]
|
||||
|
||||
response_tag = 'multiplechoiceresponse'
|
||||
max_inputfields = 1
|
||||
@@ -754,14 +777,6 @@ class OptionResponse(LoncapaResponse):
|
||||
'''
|
||||
TODO: handle direction and randomize
|
||||
'''
|
||||
snippets = [{'snippet': """<optionresponse direction="vertical" randomize="yes">
|
||||
<optioninput options="('Up','Down')" correct="Up">
|
||||
<text>The location of the sky</text>
|
||||
</optioninput>
|
||||
<optioninput options="('Up','Down')" correct="Down">
|
||||
<text>The location of the earth</text>
|
||||
</optioninput>
|
||||
</optionresponse>"""}]
|
||||
|
||||
response_tag = 'optionresponse'
|
||||
hint_tag = 'optionhint'
|
||||
@@ -905,39 +920,6 @@ class CustomResponse(LoncapaResponse):
|
||||
Custom response. The python code to be run should be in <answer>...</answer>
|
||||
or in a <script>...</script>
|
||||
'''
|
||||
snippets = [{'snippet': r"""<customresponse>
|
||||
<text>
|
||||
<br/>
|
||||
Suppose that \(I(t)\) rises from \(0\) to \(I_S\) at a time \(t_0 \neq 0\)
|
||||
In the space provided below write an algebraic expression for \(I(t)\).
|
||||
<br/>
|
||||
<textline size="5" correct_answer="IS*u(t-t0)" />
|
||||
</text>
|
||||
<answer type="loncapa/python">
|
||||
correct=['correct']
|
||||
try:
|
||||
r = str(submission[0])
|
||||
except ValueError:
|
||||
correct[0] ='incorrect'
|
||||
r = '0'
|
||||
if not(r=="IS*u(t-t0)"):
|
||||
correct[0] ='incorrect'
|
||||
</answer>
|
||||
</customresponse>"""},
|
||||
{'snippet': """<script type="loncapa/python"><![CDATA[
|
||||
|
||||
def sympy_check2():
|
||||
messages[0] = '%s:%s' % (submission[0],fromjs[0].replace('<','<'))
|
||||
#messages[0] = str(answers)
|
||||
correct[0] = 'correct'
|
||||
|
||||
]]>
|
||||
</script>
|
||||
|
||||
<customresponse cfn="sympy_check2" type="cs" expect="2.27E-39" dojs="math" size="30" answer="2.27E-39">
|
||||
<textline size="40" dojs="math" />
|
||||
<responseparam description="Numerical Tolerance" type="tolerance" default="0.00001" name="tol"/>
|
||||
</customresponse>"""}]
|
||||
|
||||
response_tag = 'customresponse'
|
||||
|
||||
@@ -972,14 +954,29 @@ def sympy_check2():
|
||||
cfn = xml.get('cfn')
|
||||
if cfn:
|
||||
log.debug("cfn = %s" % cfn)
|
||||
if cfn in self.context:
|
||||
self.code = self.context[cfn]
|
||||
else:
|
||||
msg = "%s: can't find cfn %s in context" % (
|
||||
unicode(self), cfn)
|
||||
msg += "\nSee XML source line %s" % getattr(self.xml, 'sourceline',
|
||||
'<unavailable>')
|
||||
raise LoncapaProblemError(msg)
|
||||
|
||||
# This is a bit twisty. We used to grab the cfn function from
|
||||
# the context, but now that we sandbox Python execution, we
|
||||
# can't get functions from previous executions. So we make an
|
||||
# actual function that will re-execute the original script,
|
||||
# and invoke the function with the data needed.
|
||||
def make_check_function(script_code, cfn):
|
||||
def check_function(expect, ans, **kwargs):
|
||||
extra_args = "".join(", {0}={0}".format(k) for k in kwargs)
|
||||
code = (
|
||||
script_code + "\n" +
|
||||
"cfn_return = %s(expect, ans%s)\n" % (cfn, extra_args)
|
||||
)
|
||||
globals_dict = {
|
||||
'expect': expect,
|
||||
'ans': ans,
|
||||
}
|
||||
globals_dict.update(kwargs)
|
||||
safe_exec.safe_exec(code, globals_dict, cache=self.system.cache)
|
||||
return globals_dict['cfn_return']
|
||||
return check_function
|
||||
|
||||
self.code = make_check_function(self.context['script_code'], cfn)
|
||||
|
||||
if not self.code:
|
||||
if answer is None:
|
||||
@@ -1036,9 +1033,6 @@ def sympy_check2():
|
||||
# put these in the context of the check function evaluator
|
||||
# note that this doesn't help the "cfn" version - only the exec version
|
||||
self.context.update({
|
||||
# our subtree
|
||||
'xml': self.xml,
|
||||
|
||||
# my ID
|
||||
'response_id': self.myid,
|
||||
|
||||
@@ -1075,65 +1069,63 @@ def sympy_check2():
|
||||
# pass self.system.debug to cfn
|
||||
self.context['debug'] = self.system.DEBUG
|
||||
|
||||
# Run the check function
|
||||
self.execute_check_function(idset, submission)
|
||||
|
||||
# build map giving "correct"ness of the answer(s)
|
||||
correct = self.context['correct']
|
||||
messages = self.context['messages']
|
||||
overall_message = self.clean_message_html(self.context['overall_message'])
|
||||
correct_map = CorrectMap()
|
||||
correct_map.set_overall_message(overall_message)
|
||||
|
||||
for k in range(len(idset)):
|
||||
npoints = self.maxpoints[idset[k]] if correct[k] == 'correct' else 0
|
||||
correct_map.set(idset[k], correct[k], msg=messages[k],
|
||||
npoints=npoints)
|
||||
return correct_map
|
||||
|
||||
def execute_check_function(self, idset, submission):
|
||||
# exec the check function
|
||||
if isinstance(self.code, basestring):
|
||||
try:
|
||||
exec self.code in self.context['global_context'], self.context
|
||||
correct = self.context['correct']
|
||||
messages = self.context['messages']
|
||||
overall_message = self.context['overall_message']
|
||||
|
||||
safe_exec.safe_exec(self.code, self.context, cache=self.system.cache)
|
||||
except Exception as err:
|
||||
self._handle_exec_exception(err)
|
||||
|
||||
else:
|
||||
# self.code is not a string; assume its a function
|
||||
# self.code is not a string; it's a function we created earlier.
|
||||
|
||||
# this is an interface to the Tutor2 check functions
|
||||
fn = self.code
|
||||
ret = None
|
||||
answer_given = submission[0] if (len(idset) == 1) else submission
|
||||
kwnames = self.xml.get("cfn_extra_args", "").split()
|
||||
kwargs = {n:self.context.get(n) for n in kwnames}
|
||||
log.debug(" submission = %s" % submission)
|
||||
try:
|
||||
answer_given = submission[0] if (
|
||||
len(idset) == 1) else submission
|
||||
# handle variable number of arguments in check function, for backwards compatibility
|
||||
# with various Tutor2 check functions
|
||||
args = [self.expect, answer_given,
|
||||
student_answers, self.answer_ids[0]]
|
||||
argspec = inspect.getargspec(fn)
|
||||
nargs = len(argspec.args) - len(argspec.defaults or [])
|
||||
kwargs = {}
|
||||
for argname in argspec.args[nargs:]:
|
||||
kwargs[argname] = self.context[
|
||||
argname] if argname in self.context else None
|
||||
|
||||
log.debug('[customresponse] answer_given=%s' % answer_given)
|
||||
log.debug('nargs=%d, args=%s, kwargs=%s' % (
|
||||
nargs, args, kwargs))
|
||||
|
||||
ret = fn(*args[:nargs], **kwargs)
|
||||
|
||||
ret = fn(self.expect, answer_given, **kwargs)
|
||||
except Exception as err:
|
||||
self._handle_exec_exception(err)
|
||||
|
||||
if type(ret) == dict:
|
||||
|
||||
log.debug(
|
||||
"[courseware.capa.responsetypes.customresponse.get_score] ret = %s",
|
||||
ret
|
||||
)
|
||||
if isinstance(ret, dict):
|
||||
# One kind of dictionary the check function can return has the
|
||||
# form {'ok': BOOLEAN, 'msg': STRING}
|
||||
# If there are multiple inputs, they all get marked
|
||||
# to the same correct/incorrect value
|
||||
if 'ok' in ret:
|
||||
correct = ['correct'] * len(idset) if ret[
|
||||
'ok'] else ['incorrect'] * len(idset)
|
||||
correct = ['correct' if ret['ok'] else 'incorrect'] * len(idset)
|
||||
msg = ret.get('msg', None)
|
||||
msg = self.clean_message_html(msg)
|
||||
|
||||
# If there is only one input, apply the message to that input
|
||||
# Otherwise, apply the message to the whole problem
|
||||
if len(idset) > 1:
|
||||
overall_message = msg
|
||||
self.context['overall_message'] = msg
|
||||
else:
|
||||
messages[0] = msg
|
||||
self.context['messages'][0] = msg
|
||||
|
||||
# Another kind of dictionary the check function can return has
|
||||
# the form:
|
||||
@@ -1155,6 +1147,8 @@ def sympy_check2():
|
||||
msg = (self.clean_message_html(input_dict['msg'])
|
||||
if 'msg' in input_dict else None)
|
||||
messages.append(msg)
|
||||
self.context['messages'] = messages
|
||||
self.context['overall_message'] = overall_message
|
||||
|
||||
# Otherwise, we do not recognize the dictionary
|
||||
# Raise an exception
|
||||
@@ -1163,25 +1157,10 @@ def sympy_check2():
|
||||
raise ResponseError(
|
||||
"CustomResponse: check function returned an invalid dict")
|
||||
|
||||
# The check function can return a boolean value,
|
||||
# indicating whether all inputs should be marked
|
||||
# correct or incorrect
|
||||
else:
|
||||
n = len(idset)
|
||||
correct = ['correct'] * n if ret else ['incorrect'] * n
|
||||
correct = ['correct' if ret else 'incorrect'] * len(idset)
|
||||
|
||||
# build map giving "correct"ness of the answer(s)
|
||||
correct_map = CorrectMap()
|
||||
|
||||
overall_message = self.clean_message_html(overall_message)
|
||||
correct_map.set_overall_message(overall_message)
|
||||
|
||||
for k in range(len(idset)):
|
||||
npoints = (self.maxpoints[idset[k]]
|
||||
if correct[k] == 'correct' else 0)
|
||||
correct_map.set(idset[k], correct[k], msg=messages[k],
|
||||
npoints=npoints)
|
||||
return correct_map
|
||||
self.context['correct'] = correct
|
||||
|
||||
def clean_message_html(self, msg):
|
||||
|
||||
@@ -1253,24 +1232,38 @@ class SymbolicResponse(CustomResponse):
|
||||
"""
|
||||
Symbolic math response checking, using symmath library.
|
||||
"""
|
||||
snippets = [{'snippet': r'''<problem>
|
||||
<text>Compute \[ \exp\left(-i \frac{\theta}{2} \left[ \begin{matrix} 0 & 1 \\ 1 & 0 \end{matrix} \right] \right) \]
|
||||
and give the resulting \(2\times 2\) matrix: <br/>
|
||||
<symbolicresponse answer="">
|
||||
<textline size="40" math="1" />
|
||||
</symbolicresponse>
|
||||
<br/>
|
||||
Your input should be typed in as a list of lists, eg <tt>[[1,2],[3,4]]</tt>.
|
||||
</text>
|
||||
</problem>'''}]
|
||||
|
||||
response_tag = 'symbolicresponse'
|
||||
max_inputfields = 1
|
||||
|
||||
def setup_response(self):
|
||||
# Symbolic response always uses symmath_check()
|
||||
# If the XML did not specify this, then set it now
|
||||
# Otherwise, we get an error from the superclass
|
||||
self.xml.set('cfn', 'symmath_check')
|
||||
code = "from symmath import *"
|
||||
exec code in self.context, self.context
|
||||
CustomResponse.setup_response(self)
|
||||
|
||||
# Let CustomResponse do its setup
|
||||
super(SymbolicResponse, self).setup_response()
|
||||
|
||||
def execute_check_function(self, idset, submission):
|
||||
from symmath import symmath_check
|
||||
try:
|
||||
# Since we have limited max_inputfields to 1,
|
||||
# we can assume that there is only one submission
|
||||
answer_given = submission[0]
|
||||
|
||||
ret = symmath_check(
|
||||
self.expect, answer_given,
|
||||
dynamath=self.context.get('dynamath'),
|
||||
options=self.context.get('options'),
|
||||
debug=self.context.get('debug'),
|
||||
)
|
||||
except Exception as err:
|
||||
log.error("oops in symbolicresponse (cfn) error %s" % err)
|
||||
log.error(traceback.format_exc())
|
||||
raise Exception("oops in symbolicresponse (cfn) error %s" % err)
|
||||
self.context['messages'][0] = self.clean_message_html(ret['msg'])
|
||||
self.context['correct'] = ['correct' if ret['ok'] else 'incorrect'] * len(idset)
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
@@ -1325,10 +1318,8 @@ class CodeResponse(LoncapaResponse):
|
||||
# Check if XML uses the ExternalResponse format or the generic
|
||||
# CodeResponse format
|
||||
codeparam = self.xml.find('codeparam')
|
||||
if codeparam is None:
|
||||
self._parse_externalresponse_xml()
|
||||
else:
|
||||
self._parse_coderesponse_xml(codeparam)
|
||||
assert codeparam is not None, "Unsupported old format! <coderesponse> without <codeparam>"
|
||||
self._parse_coderesponse_xml(codeparam)
|
||||
|
||||
def _parse_coderesponse_xml(self, codeparam):
|
||||
'''
|
||||
@@ -1348,62 +1339,6 @@ class CodeResponse(LoncapaResponse):
|
||||
self.answer = find_with_default(codeparam, 'answer_display',
|
||||
'No answer provided.')
|
||||
|
||||
def _parse_externalresponse_xml(self):
|
||||
'''
|
||||
VS[compat]: Suppport for old ExternalResponse XML format. When successful, sets:
|
||||
self.initial_display
|
||||
self.answer (an answer to display to the student in the LMS)
|
||||
self.payload
|
||||
'''
|
||||
answer = self.xml.find('answer')
|
||||
|
||||
if answer is not None:
|
||||
answer_src = answer.get('src')
|
||||
if answer_src is not None:
|
||||
code = self.system.filesystem.open('src/' + answer_src).read()
|
||||
else:
|
||||
code = answer.text
|
||||
else: # no <answer> stanza; get code from <script>
|
||||
code = self.context['script_code']
|
||||
if not code:
|
||||
msg = '%s: Missing answer script code for coderesponse' % unicode(
|
||||
self)
|
||||
msg += "\nSee XML source line %s" % getattr(
|
||||
self.xml, 'sourceline', '<unavailable>')
|
||||
raise LoncapaProblemError(msg)
|
||||
|
||||
tests = self.xml.get('tests')
|
||||
|
||||
# Extract 'answer' and 'initial_display' from XML. Note that the code to be exec'ed here is:
|
||||
# (1) Internal edX code, i.e. NOT student submissions, and
|
||||
# (2) The code should only define the strings 'initial_display', 'answer',
|
||||
# 'preamble', 'test_program'
|
||||
# following the ExternalResponse XML format
|
||||
penv = {}
|
||||
penv['__builtins__'] = globals()['__builtins__']
|
||||
try:
|
||||
exec(code, penv, penv)
|
||||
except Exception as err:
|
||||
log.error(
|
||||
'Error in CodeResponse %s: Error in problem reference code' % err)
|
||||
raise Exception(err)
|
||||
try:
|
||||
self.answer = penv['answer']
|
||||
self.initial_display = penv['initial_display']
|
||||
except Exception as err:
|
||||
log.error("Error in CodeResponse %s: Problem reference code does not define"
|
||||
" 'answer' and/or 'initial_display' in <answer>...</answer>" % err)
|
||||
raise Exception(err)
|
||||
|
||||
# Finally, make the ExternalResponse input XML format conform to the generic
|
||||
# exteral grader interface
|
||||
# The XML tagging of grader_payload is pyxserver-specific
|
||||
grader_payload = '<pyxserver>'
|
||||
grader_payload += '<tests>' + tests + '</tests>\n'
|
||||
grader_payload += '<processor>' + code + '</processor>'
|
||||
grader_payload += '</pyxserver>'
|
||||
self.payload = {'grader_payload': grader_payload}
|
||||
|
||||
def get_score(self, student_answers):
|
||||
try:
|
||||
# Note that submission can be a file
|
||||
@@ -1583,44 +1518,6 @@ class ExternalResponse(LoncapaResponse):
|
||||
Typically used by coding problems.
|
||||
|
||||
'''
|
||||
snippets = [{'snippet': '''<externalresponse tests="repeat:10,generate">
|
||||
<textbox rows="10" cols="70" mode="python"/>
|
||||
<answer><![CDATA[
|
||||
initial_display = """
|
||||
def inc(x):
|
||||
"""
|
||||
|
||||
answer = """
|
||||
def inc(n):
|
||||
return n+1
|
||||
"""
|
||||
preamble = """
|
||||
import sympy
|
||||
"""
|
||||
test_program = """
|
||||
import random
|
||||
|
||||
def testInc(n = None):
|
||||
if n is None:
|
||||
n = random.randint(2, 20)
|
||||
print 'Test is: inc(%d)'%n
|
||||
return str(inc(n))
|
||||
|
||||
def main():
|
||||
f = os.fdopen(3,'w')
|
||||
test = int(sys.argv[1])
|
||||
rndlist = map(int,os.getenv('rndlist').split(','))
|
||||
random.seed(rndlist[0])
|
||||
if test == 1: f.write(testInc(0))
|
||||
elif test == 2: f.write(testInc(1))
|
||||
else: f.write(testInc())
|
||||
f.close()
|
||||
|
||||
main()
|
||||
"""
|
||||
]]>
|
||||
</answer>
|
||||
</externalresponse>'''}]
|
||||
|
||||
response_tag = 'externalresponse'
|
||||
allowed_inputfields = ['textline', 'textbox']
|
||||
@@ -1766,23 +1663,6 @@ class FormulaResponse(LoncapaResponse):
|
||||
'''
|
||||
Checking of symbolic math response using numerical sampling.
|
||||
'''
|
||||
snippets = [{'snippet': '''<problem>
|
||||
|
||||
<script type="loncapa/python">
|
||||
I = "m*c^2"
|
||||
</script>
|
||||
|
||||
<text>
|
||||
<br/>
|
||||
Give an equation for the relativistic energy of an object with mass m.
|
||||
</text>
|
||||
<formularesponse type="cs" samples="m,c@1,2:3,4#10" answer="$I">
|
||||
<responseparam description="Numerical Tolerance" type="tolerance"
|
||||
default="0.00001" name="tol" />
|
||||
<textline size="40" math="1" />
|
||||
</formularesponse>
|
||||
|
||||
</problem>'''}]
|
||||
|
||||
response_tag = 'formularesponse'
|
||||
hint_tag = 'formulahint'
|
||||
@@ -1927,21 +1807,18 @@ class SchematicResponse(LoncapaResponse):
|
||||
self.code = answer.text
|
||||
|
||||
def get_score(self, student_answers):
|
||||
from capa_problem import global_context
|
||||
submission = [json.loads(student_answers[
|
||||
k]) for k in sorted(self.answer_ids)]
|
||||
#from capa_problem import global_context
|
||||
submission = [
|
||||
json.loads(student_answers[k]) for k in sorted(self.answer_ids)
|
||||
]
|
||||
self.context.update({'submission': submission})
|
||||
|
||||
try:
|
||||
exec self.code in global_context, self.context
|
||||
|
||||
safe_exec.safe_exec(self.code, self.context, cache=self.system.cache)
|
||||
except Exception as err:
|
||||
_, _, traceback_obj = sys.exc_info()
|
||||
raise ResponseError, ResponseError(err.message), traceback_obj
|
||||
|
||||
msg = 'Error %s in evaluating SchematicResponse' % err
|
||||
raise ResponseError(msg)
|
||||
cmap = CorrectMap()
|
||||
cmap.set_dict(dict(zip(sorted(
|
||||
self.answer_ids), self.context['correct'])))
|
||||
cmap.set_dict(dict(zip(sorted(self.answer_ids), self.context['correct'])))
|
||||
return cmap
|
||||
|
||||
def get_answers(self):
|
||||
@@ -1977,19 +1854,6 @@ class ImageResponse(LoncapaResponse):
|
||||
Returns:
|
||||
True, if click is inside any region or rectangle. Otherwise False.
|
||||
"""
|
||||
snippets = [{'snippet': '''<imageresponse>
|
||||
<imageinput src="image1.jpg" width="200" height="100"
|
||||
rectangle="(10,10)-(20,30)" />
|
||||
<imageinput src="image2.jpg" width="210" height="130"
|
||||
rectangle="(12,12)-(40,60)" />
|
||||
<imageinput src="image3.jpg" width="210" height="130"
|
||||
rectangle="(10,10)-(20,30);(12,12)-(40,60)" />
|
||||
<imageinput src="image4.jpg" width="811" height="610"
|
||||
rectangle="(10,10)-(20,30);(12,12)-(40,60)"
|
||||
regions="[[[10,10], [20,30], [40, 10]], [[100,100], [120,130], [110,150]]]"/>
|
||||
<imageinput src="image5.jpg" width="200" height="200"
|
||||
regions="[[[10,10], [20,30], [40, 10]], [[100,100], [120,130], [110,150]]]"/>
|
||||
</imageresponse>'''}]
|
||||
|
||||
response_tag = 'imageresponse'
|
||||
allowed_inputfields = ['imageinput']
|
||||
|
||||
51
common/lib/capa/capa/safe_exec/README.rst
Normal file
51
common/lib/capa/capa/safe_exec/README.rst
Normal file
@@ -0,0 +1,51 @@
|
||||
Configuring Capa sandboxed execution
|
||||
====================================
|
||||
|
||||
Capa problems can contain code authored by the course author. We need to
|
||||
execute that code in a sandbox. We use CodeJail as the sandboxing facility,
|
||||
but it needs to be configured specifically for Capa's use.
|
||||
|
||||
As a developer, you don't have to do anything to configure sandboxing if you
|
||||
don't want to, and everything will operate properly, you just won't have
|
||||
protection on that code.
|
||||
|
||||
If you want to configure sandboxing, you're going to use the `README from
|
||||
CodeJail`__, with a few customized tweaks.
|
||||
|
||||
__ https://github.com/edx/codejail/blob/master/README.rst
|
||||
|
||||
|
||||
1. At the instruction to install packages into the sandboxed code, you'll
|
||||
need to install both `pre-sandbox-requirements.txt` and
|
||||
`sandbox-requirements.txt`::
|
||||
|
||||
$ sudo pip install -r pre-sandbox-requirements.txt
|
||||
$ sudo pip install -r sandbox-requirements.txt
|
||||
|
||||
2. At the instruction to create the AppArmor profile, you'll need a line in
|
||||
the profile for the sandbox packages. <EDXPLATFORM> is the full path to
|
||||
your edx_platform repo::
|
||||
|
||||
<EDXPLATFORM>/common/lib/sandbox-packages/** r,
|
||||
|
||||
3. You can configure resource limits in settings.py. A CODE_JAIL setting is
|
||||
available, a dictionary. The "limits" key lets you adjust the limits for
|
||||
CPU time, real time, and memory use. Setting any of them to zero disables
|
||||
that limit::
|
||||
|
||||
# in settings.py...
|
||||
CODE_JAIL = {
|
||||
# Configurable limits.
|
||||
'limits': {
|
||||
# How many CPU seconds can jailed code use?
|
||||
'CPU': 1,
|
||||
# How many real-time seconds will a sandbox survive?
|
||||
'REALTIME': 1,
|
||||
# How much memory (in bytes) can a sandbox use?
|
||||
'VMEM': 30000000,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
That's it. Once you've finished the CodeJail configuration instructions,
|
||||
your course-hosted Python code should be run securely.
|
||||
3
common/lib/capa/capa/safe_exec/__init__.py
Normal file
3
common/lib/capa/capa/safe_exec/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
"""Capa's specialized use of codejail.safe_exec."""
|
||||
|
||||
from .safe_exec import safe_exec, update_hash
|
||||
42
common/lib/capa/capa/safe_exec/lazymod.py
Normal file
42
common/lib/capa/capa/safe_exec/lazymod.py
Normal file
@@ -0,0 +1,42 @@
|
||||
"""A module proxy for delayed importing of modules.
|
||||
|
||||
From http://barnesc.blogspot.com/2006/06/automatic-python-imports-with-autoimp.html,
|
||||
in the public domain.
|
||||
|
||||
"""
|
||||
|
||||
import sys
|
||||
|
||||
class LazyModule(object):
|
||||
"""A lazy module proxy."""
|
||||
|
||||
def __init__(self, modname):
|
||||
self.__dict__['__name__'] = modname
|
||||
self._set_mod(None)
|
||||
|
||||
def _set_mod(self, mod):
|
||||
if mod is not None:
|
||||
self.__dict__ = mod.__dict__
|
||||
self.__dict__['_lazymod_mod'] = mod
|
||||
|
||||
def _load_mod(self):
|
||||
__import__(self.__name__)
|
||||
self._set_mod(sys.modules[self.__name__])
|
||||
|
||||
def __getattr__(self, name):
|
||||
if self.__dict__['_lazymod_mod'] is None:
|
||||
self._load_mod()
|
||||
|
||||
mod = self.__dict__['_lazymod_mod']
|
||||
|
||||
if hasattr(mod, name):
|
||||
return getattr(mod, name)
|
||||
else:
|
||||
try:
|
||||
subname = '%s.%s' % (self.__name__, name)
|
||||
__import__(subname)
|
||||
submod = getattr(mod, name)
|
||||
except ImportError:
|
||||
raise AttributeError("'module' object has no attribute %r" % name)
|
||||
self.__dict__[name] = LazyModule(subname, submod)
|
||||
return self.__dict__[name]
|
||||
130
common/lib/capa/capa/safe_exec/safe_exec.py
Normal file
130
common/lib/capa/capa/safe_exec/safe_exec.py
Normal file
@@ -0,0 +1,130 @@
|
||||
"""Capa's specialized use of codejail.safe_exec."""
|
||||
|
||||
from codejail.safe_exec import safe_exec as codejail_safe_exec
|
||||
from codejail.safe_exec import json_safe, SafeExecException
|
||||
from . import lazymod
|
||||
from statsd import statsd
|
||||
|
||||
import hashlib
|
||||
|
||||
# Establish the Python environment for Capa.
|
||||
# Capa assumes float-friendly division always.
|
||||
# The name "random" is a properly-seeded stand-in for the random module.
|
||||
CODE_PROLOG = """\
|
||||
from __future__ import division
|
||||
|
||||
import random as random_module
|
||||
import sys
|
||||
random = random_module.Random(%r)
|
||||
random.Random = random_module.Random
|
||||
del random_module
|
||||
sys.modules['random'] = random
|
||||
"""
|
||||
|
||||
ASSUMED_IMPORTS=[
|
||||
("numpy", "numpy"),
|
||||
("math", "math"),
|
||||
("scipy", "scipy"),
|
||||
("calc", "calc"),
|
||||
("eia", "eia"),
|
||||
("chemcalc", "chem.chemcalc"),
|
||||
("chemtools", "chem.chemtools"),
|
||||
("miller", "chem.miller"),
|
||||
("draganddrop", "verifiers.draganddrop"),
|
||||
]
|
||||
|
||||
# We'll need the code from lazymod.py for use in safe_exec, so read it now.
|
||||
lazymod_py_file = lazymod.__file__
|
||||
if lazymod_py_file.endswith("c"):
|
||||
lazymod_py_file = lazymod_py_file[:-1]
|
||||
|
||||
lazymod_py = open(lazymod_py_file).read()
|
||||
|
||||
LAZY_IMPORTS = [lazymod_py]
|
||||
for name, modname in ASSUMED_IMPORTS:
|
||||
LAZY_IMPORTS.append("{} = LazyModule('{}')\n".format(name, modname))
|
||||
|
||||
LAZY_IMPORTS = "".join(LAZY_IMPORTS)
|
||||
|
||||
|
||||
def update_hash(hasher, obj):
|
||||
"""
|
||||
Update a `hashlib` hasher with a nested object.
|
||||
|
||||
To properly cache nested structures, we need to compute a hash from the
|
||||
entire structure, canonicalizing at every level.
|
||||
|
||||
`hasher`'s `.update()` method is called a number of times, touching all of
|
||||
`obj` in the process. Only primitive JSON-safe types are supported.
|
||||
|
||||
"""
|
||||
hasher.update(str(type(obj)))
|
||||
if isinstance(obj, (tuple, list)):
|
||||
for e in obj:
|
||||
update_hash(hasher, e)
|
||||
elif isinstance(obj, dict):
|
||||
for k in sorted(obj):
|
||||
update_hash(hasher, k)
|
||||
update_hash(hasher, obj[k])
|
||||
else:
|
||||
hasher.update(repr(obj))
|
||||
|
||||
|
||||
@statsd.timed('capa.safe_exec.time')
|
||||
def safe_exec(code, globals_dict, random_seed=None, python_path=None, cache=None):
|
||||
"""
|
||||
Execute python code safely.
|
||||
|
||||
`code` is the Python code to execute. It has access to the globals in `globals_dict`,
|
||||
and any changes it makes to those globals are visible in `globals_dict` when this
|
||||
function returns.
|
||||
|
||||
`random_seed` will be used to see the `random` module available to the code.
|
||||
|
||||
`python_path` is a list of directories to add to the Python path before execution.
|
||||
|
||||
`cache` is an object with .get(key) and .set(key, value) methods. It will be used
|
||||
to cache the execution, taking into account the code, the values of the globals,
|
||||
and the random seed.
|
||||
|
||||
"""
|
||||
# Check the cache for a previous result.
|
||||
if cache:
|
||||
safe_globals = json_safe(globals_dict)
|
||||
md5er = hashlib.md5()
|
||||
md5er.update(repr(code))
|
||||
update_hash(md5er, safe_globals)
|
||||
key = "safe_exec.%r.%s" % (random_seed, md5er.hexdigest())
|
||||
cached = cache.get(key)
|
||||
if cached is not None:
|
||||
# We have a cached result. The result is a pair: the exception
|
||||
# message, if any, else None; and the resulting globals dictionary.
|
||||
emsg, cleaned_results = cached
|
||||
globals_dict.update(cleaned_results)
|
||||
if emsg:
|
||||
raise SafeExecException(emsg)
|
||||
return
|
||||
|
||||
# Create the complete code we'll run.
|
||||
code_prolog = CODE_PROLOG % random_seed
|
||||
|
||||
# Run the code! Results are side effects in globals_dict.
|
||||
try:
|
||||
codejail_safe_exec(
|
||||
code_prolog + LAZY_IMPORTS + code, globals_dict,
|
||||
python_path=python_path,
|
||||
)
|
||||
except SafeExecException as e:
|
||||
emsg = e.message
|
||||
else:
|
||||
emsg = None
|
||||
|
||||
# Put the result back in the cache. This is complicated by the fact that
|
||||
# the globals dict might not be entirely serializable.
|
||||
if cache:
|
||||
cleaned_results = json_safe(globals_dict)
|
||||
cache.set(key, (emsg, cleaned_results))
|
||||
|
||||
# If an exception happened, raise it now.
|
||||
if emsg:
|
||||
raise e
|
||||
@@ -0,0 +1 @@
|
||||
THE_CONST = 23
|
||||
44
common/lib/capa/capa/safe_exec/tests/test_lazymod.py
Normal file
44
common/lib/capa/capa/safe_exec/tests/test_lazymod.py
Normal file
@@ -0,0 +1,44 @@
|
||||
"""Test lazymod.py"""
|
||||
|
||||
import sys
|
||||
import unittest
|
||||
|
||||
from capa.safe_exec.lazymod import LazyModule
|
||||
|
||||
|
||||
class ModuleIsolation(object):
|
||||
"""
|
||||
Manage changes to sys.modules so that we can roll back imported modules.
|
||||
|
||||
Create this object, it will snapshot the currently imported modules. When
|
||||
you call `clean_up()`, it will delete any module imported since its creation.
|
||||
"""
|
||||
def __init__(self):
|
||||
# Save all the names of all the imported modules.
|
||||
self.mods = set(sys.modules)
|
||||
|
||||
def clean_up(self):
|
||||
# Get a list of modules that didn't exist when we were created
|
||||
new_mods = [m for m in sys.modules if m not in self.mods]
|
||||
# and delete them all so another import will run code for real again.
|
||||
for m in new_mods:
|
||||
del sys.modules[m]
|
||||
|
||||
|
||||
class TestLazyMod(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
# Each test will remove modules that it imported.
|
||||
self.addCleanup(ModuleIsolation().clean_up)
|
||||
|
||||
def test_simple(self):
|
||||
# Import some stdlib module that has not been imported before
|
||||
self.assertNotIn("colorsys", sys.modules)
|
||||
colorsys = LazyModule("colorsys")
|
||||
hsv = colorsys.rgb_to_hsv(.3, .4, .2)
|
||||
self.assertEqual(hsv[0], 0.25)
|
||||
|
||||
def test_dotted(self):
|
||||
self.assertNotIn("email.utils", sys.modules)
|
||||
email_utils = LazyModule("email.utils")
|
||||
self.assertEqual(email_utils.quote('"hi"'), r'\"hi\"')
|
||||
281
common/lib/capa/capa/safe_exec/tests/test_safe_exec.py
Normal file
281
common/lib/capa/capa/safe_exec/tests/test_safe_exec.py
Normal file
@@ -0,0 +1,281 @@
|
||||
"""Test safe_exec.py"""
|
||||
|
||||
import hashlib
|
||||
import os.path
|
||||
import random
|
||||
import textwrap
|
||||
import unittest
|
||||
|
||||
from capa.safe_exec import safe_exec, update_hash
|
||||
from codejail.safe_exec import SafeExecException
|
||||
|
||||
|
||||
class TestSafeExec(unittest.TestCase):
|
||||
def test_set_values(self):
|
||||
g = {}
|
||||
safe_exec("a = 17", g)
|
||||
self.assertEqual(g['a'], 17)
|
||||
|
||||
def test_division(self):
|
||||
g = {}
|
||||
# Future division: 1/2 is 0.5.
|
||||
safe_exec("a = 1/2", g)
|
||||
self.assertEqual(g['a'], 0.5)
|
||||
|
||||
def test_assumed_imports(self):
|
||||
g = {}
|
||||
# Math is always available.
|
||||
safe_exec("a = int(math.pi)", g)
|
||||
self.assertEqual(g['a'], 3)
|
||||
|
||||
def test_random_seeding(self):
|
||||
g = {}
|
||||
r = random.Random(17)
|
||||
rnums = [r.randint(0, 999) for _ in xrange(100)]
|
||||
|
||||
# Without a seed, the results are unpredictable
|
||||
safe_exec("rnums = [random.randint(0, 999) for _ in xrange(100)]", g)
|
||||
self.assertNotEqual(g['rnums'], rnums)
|
||||
|
||||
# With a seed, the results are predictable
|
||||
safe_exec("rnums = [random.randint(0, 999) for _ in xrange(100)]", g, random_seed=17)
|
||||
self.assertEqual(g['rnums'], rnums)
|
||||
|
||||
def test_random_is_still_importable(self):
|
||||
g = {}
|
||||
r = random.Random(17)
|
||||
rnums = [r.randint(0, 999) for _ in xrange(100)]
|
||||
|
||||
# With a seed, the results are predictable even from the random module
|
||||
safe_exec(
|
||||
"import random\n"
|
||||
"rnums = [random.randint(0, 999) for _ in xrange(100)]\n",
|
||||
g, random_seed=17)
|
||||
self.assertEqual(g['rnums'], rnums)
|
||||
|
||||
def test_python_lib(self):
|
||||
pylib = os.path.dirname(__file__) + "/test_files/pylib"
|
||||
g = {}
|
||||
safe_exec(
|
||||
"import constant; a = constant.THE_CONST",
|
||||
g, python_path=[pylib]
|
||||
)
|
||||
|
||||
def test_raising_exceptions(self):
|
||||
g = {}
|
||||
with self.assertRaises(SafeExecException) as cm:
|
||||
safe_exec("1/0", g)
|
||||
self.assertIn("ZeroDivisionError", cm.exception.message)
|
||||
|
||||
|
||||
class DictCache(object):
|
||||
"""A cache implementation over a simple dict, for testing."""
|
||||
|
||||
def __init__(self, d):
|
||||
self.cache = d
|
||||
|
||||
def get(self, key):
|
||||
# Actual cache implementations have limits on key length
|
||||
assert len(key) <= 250
|
||||
return self.cache.get(key)
|
||||
|
||||
def set(self, key, value):
|
||||
# Actual cache implementations have limits on key length
|
||||
assert len(key) <= 250
|
||||
self.cache[key] = value
|
||||
|
||||
|
||||
class TestSafeExecCaching(unittest.TestCase):
|
||||
"""Test that caching works on safe_exec."""
|
||||
|
||||
def test_cache_miss_then_hit(self):
|
||||
g = {}
|
||||
cache = {}
|
||||
|
||||
# Cache miss
|
||||
safe_exec("a = int(math.pi)", g, cache=DictCache(cache))
|
||||
self.assertEqual(g['a'], 3)
|
||||
# A result has been cached
|
||||
self.assertEqual(cache.values()[0], (None, {'a': 3}))
|
||||
|
||||
# Fiddle with the cache, then try it again.
|
||||
cache[cache.keys()[0]] = (None, {'a': 17})
|
||||
|
||||
g = {}
|
||||
safe_exec("a = int(math.pi)", g, cache=DictCache(cache))
|
||||
self.assertEqual(g['a'], 17)
|
||||
|
||||
def test_cache_large_code_chunk(self):
|
||||
# Caching used to die on memcache with more than 250 bytes of code.
|
||||
# Check that it doesn't any more.
|
||||
code = "a = 0\n" + ("a += 1\n" * 12345)
|
||||
|
||||
g = {}
|
||||
cache = {}
|
||||
safe_exec(code, g, cache=DictCache(cache))
|
||||
self.assertEqual(g['a'], 12345)
|
||||
|
||||
def test_cache_exceptions(self):
|
||||
# Used to be that running code that raised an exception didn't cache
|
||||
# the result. Check that now it does.
|
||||
code = "1/0"
|
||||
g = {}
|
||||
cache = {}
|
||||
with self.assertRaises(SafeExecException):
|
||||
safe_exec(code, g, cache=DictCache(cache))
|
||||
|
||||
# The exception should be in the cache now.
|
||||
self.assertEqual(len(cache), 1)
|
||||
cache_exc_msg, cache_globals = cache.values()[0]
|
||||
self.assertIn("ZeroDivisionError", cache_exc_msg)
|
||||
|
||||
# Change the value stored in the cache, the result should change.
|
||||
cache[cache.keys()[0]] = ("Hey there!", {})
|
||||
|
||||
with self.assertRaises(SafeExecException):
|
||||
safe_exec(code, g, cache=DictCache(cache))
|
||||
|
||||
self.assertEqual(len(cache), 1)
|
||||
cache_exc_msg, cache_globals = cache.values()[0]
|
||||
self.assertEqual("Hey there!", cache_exc_msg)
|
||||
|
||||
# Change it again, now no exception!
|
||||
cache[cache.keys()[0]] = (None, {'a': 17})
|
||||
safe_exec(code, g, cache=DictCache(cache))
|
||||
self.assertEqual(g['a'], 17)
|
||||
|
||||
def test_unicode_submission(self):
|
||||
# Check that using non-ASCII unicode does not raise an encoding error.
|
||||
# Try several non-ASCII unicode characters
|
||||
for code in [129, 500, 2**8 - 1, 2**16 - 1]:
|
||||
code_with_unichr = unicode("# ") + unichr(code)
|
||||
try:
|
||||
safe_exec(code_with_unichr, {}, cache=DictCache({}))
|
||||
except UnicodeEncodeError:
|
||||
self.fail("Tried executing code with non-ASCII unicode: {0}".format(code))
|
||||
|
||||
|
||||
class TestUpdateHash(unittest.TestCase):
|
||||
"""Test the safe_exec.update_hash function to be sure it canonicalizes properly."""
|
||||
|
||||
def hash_obj(self, obj):
|
||||
"""Return the md5 hash that `update_hash` makes us."""
|
||||
md5er = hashlib.md5()
|
||||
update_hash(md5er, obj)
|
||||
return md5er.hexdigest()
|
||||
|
||||
def equal_but_different_dicts(self):
|
||||
"""
|
||||
Make two equal dicts with different key order.
|
||||
|
||||
Simple literals won't do it. Filling one and then shrinking it will
|
||||
make them different.
|
||||
|
||||
"""
|
||||
d1 = {k:1 for k in "abcdefghijklmnopqrstuvwxyz"}
|
||||
d2 = dict(d1)
|
||||
for i in xrange(10000):
|
||||
d2[i] = 1
|
||||
for i in xrange(10000):
|
||||
del d2[i]
|
||||
|
||||
# Check that our dicts are equal, but with different key order.
|
||||
self.assertEqual(d1, d2)
|
||||
self.assertNotEqual(d1.keys(), d2.keys())
|
||||
|
||||
return d1, d2
|
||||
|
||||
def test_simple_cases(self):
|
||||
h1 = self.hash_obj(1)
|
||||
h10 = self.hash_obj(10)
|
||||
hs1 = self.hash_obj("1")
|
||||
|
||||
self.assertNotEqual(h1, h10)
|
||||
self.assertNotEqual(h1, hs1)
|
||||
|
||||
def test_list_ordering(self):
|
||||
h1 = self.hash_obj({'a': [1,2,3]})
|
||||
h2 = self.hash_obj({'a': [3,2,1]})
|
||||
self.assertNotEqual(h1, h2)
|
||||
|
||||
def test_dict_ordering(self):
|
||||
d1, d2 = self.equal_but_different_dicts()
|
||||
h1 = self.hash_obj(d1)
|
||||
h2 = self.hash_obj(d2)
|
||||
self.assertEqual(h1, h2)
|
||||
|
||||
def test_deep_ordering(self):
|
||||
d1, d2 = self.equal_but_different_dicts()
|
||||
o1 = {'a':[1, 2, [d1], 3, 4]}
|
||||
o2 = {'a':[1, 2, [d2], 3, 4]}
|
||||
h1 = self.hash_obj(o1)
|
||||
h2 = self.hash_obj(o2)
|
||||
self.assertEqual(h1, h2)
|
||||
|
||||
|
||||
class TestRealProblems(unittest.TestCase):
|
||||
def test_802x(self):
|
||||
code = textwrap.dedent("""\
|
||||
import math
|
||||
import random
|
||||
import numpy
|
||||
e=1.602e-19 #C
|
||||
me=9.1e-31 #kg
|
||||
mp=1.672e-27 #kg
|
||||
eps0=8.854e-12 #SI units
|
||||
mu0=4e-7*math.pi #SI units
|
||||
|
||||
Rd1=random.randrange(1,30,1)
|
||||
Rd2=random.randrange(30,50,1)
|
||||
Rd3=random.randrange(50,70,1)
|
||||
Rd4=random.randrange(70,100,1)
|
||||
Rd5=random.randrange(100,120,1)
|
||||
|
||||
Vd1=random.randrange(1,20,1)
|
||||
Vd2=random.randrange(20,40,1)
|
||||
Vd3=random.randrange(40,60,1)
|
||||
|
||||
#R=[0,10,30,50,70,100] #Ohm
|
||||
#V=[0,12,24,36] # Volt
|
||||
|
||||
R=[0,Rd1,Rd2,Rd3,Rd4,Rd5] #Ohms
|
||||
V=[0,Vd1,Vd2,Vd3] #Volts
|
||||
#here the currents IL and IR are defined as in figure ps3_p3_fig2
|
||||
a=numpy.array([ [ R[1]+R[4]+R[5],R[4] ],[R[4], R[2]+R[3]+R[4] ] ])
|
||||
b=numpy.array([V[1]-V[2],-V[3]-V[2]])
|
||||
x=numpy.linalg.solve(a,b)
|
||||
IL='%.2e' % x[0]
|
||||
IR='%.2e' % x[1]
|
||||
ILR='%.2e' % (x[0]+x[1])
|
||||
def sign(x):
|
||||
return abs(x)/x
|
||||
|
||||
RW="Rightwards"
|
||||
LW="Leftwards"
|
||||
UW="Upwards"
|
||||
DW="Downwards"
|
||||
I1='%.2e' % abs(x[0])
|
||||
I1d=LW if sign(x[0])==1 else RW
|
||||
I1not=LW if I1d==RW else RW
|
||||
I2='%.2e' % abs(x[1])
|
||||
I2d=RW if sign(x[1])==1 else LW
|
||||
I2not=LW if I2d==RW else RW
|
||||
I3='%.2e' % abs(x[1])
|
||||
I3d=DW if sign(x[1])==1 else UW
|
||||
I3not=DW if I3d==UW else UW
|
||||
I4='%.2e' % abs(x[0]+x[1])
|
||||
I4d=UW if sign(x[1]+x[0])==1 else DW
|
||||
I4not=DW if I4d==UW else UW
|
||||
I5='%.2e' % abs(x[0])
|
||||
I5d=RW if sign(x[0])==1 else LW
|
||||
I5not=LW if I5d==RW else RW
|
||||
VAP=-x[0]*R[1]-(x[0]+x[1])*R[4]
|
||||
VPN=-V[2]
|
||||
VGD=+V[1]-x[0]*R[1]+V[3]+x[1]*R[2]
|
||||
aVAP='%.2e' % VAP
|
||||
aVPN='%.2e' % VPN
|
||||
aVGD='%.2e' % VGD
|
||||
""")
|
||||
g = {}
|
||||
safe_exec(code, g)
|
||||
self.assertIn("aVAP", g)
|
||||
@@ -1,7 +1,7 @@
|
||||
import fs
|
||||
import fs.osfs
|
||||
import os
|
||||
import os, os.path
|
||||
|
||||
from capa.capa_problem import LoncapaProblem
|
||||
from mock import Mock, MagicMock
|
||||
|
||||
import xml.sax.saxutils as saxutils
|
||||
@@ -22,16 +22,28 @@ def calledback_url(dispatch = 'score_update'):
|
||||
xqueue_interface = MagicMock()
|
||||
xqueue_interface.send_to_queue.return_value = (0, 'Success!')
|
||||
|
||||
test_system = Mock(
|
||||
ajax_url='courses/course_id/modx/a_location',
|
||||
track_function=Mock(),
|
||||
get_module=Mock(),
|
||||
render_template=tst_render_template,
|
||||
replace_urls=Mock(),
|
||||
user=Mock(),
|
||||
filestore=fs.osfs.OSFS(os.path.join(TEST_DIR, "test_files")),
|
||||
debug=True,
|
||||
xqueue={'interface': xqueue_interface, 'construct_callback': calledback_url, 'default_queuename': 'testqueue', 'waittime': 10},
|
||||
node_path=os.environ.get("NODE_PATH", "/usr/local/lib/node_modules"),
|
||||
anonymous_student_id='student'
|
||||
)
|
||||
def test_system():
|
||||
"""
|
||||
Construct a mock ModuleSystem instance.
|
||||
|
||||
"""
|
||||
the_system = Mock(
|
||||
ajax_url='courses/course_id/modx/a_location',
|
||||
track_function=Mock(),
|
||||
get_module=Mock(),
|
||||
render_template=tst_render_template,
|
||||
replace_urls=Mock(),
|
||||
user=Mock(),
|
||||
filestore=fs.osfs.OSFS(os.path.join(TEST_DIR, "test_files")),
|
||||
debug=True,
|
||||
xqueue={'interface': xqueue_interface, 'construct_callback': calledback_url, 'default_queuename': 'testqueue', 'waittime': 10},
|
||||
node_path=os.environ.get("NODE_PATH", "/usr/local/lib/node_modules"),
|
||||
anonymous_student_id='student',
|
||||
cache=None,
|
||||
can_execute_unsafe_code=lambda: False,
|
||||
)
|
||||
return the_system
|
||||
|
||||
def new_loncapa_problem(xml, system=None):
|
||||
"""Construct a `LoncapaProblem` suitable for unit tests."""
|
||||
return LoncapaProblem(xml, id='1', seed=723, system=system or test_system())
|
||||
|
||||
@@ -221,6 +221,8 @@ class CustomResponseXMLFactory(ResponseXMLFactory):
|
||||
cfn = kwargs.get('cfn', None)
|
||||
expect = kwargs.get('expect', None)
|
||||
answer = kwargs.get('answer', None)
|
||||
options = kwargs.get('options', None)
|
||||
cfn_extra_args = kwargs.get('cfn_extra_args', None)
|
||||
|
||||
# Create the response element
|
||||
response_element = etree.Element("customresponse")
|
||||
@@ -235,6 +237,33 @@ class CustomResponseXMLFactory(ResponseXMLFactory):
|
||||
answer_element = etree.SubElement(response_element, "answer")
|
||||
answer_element.text = str(answer)
|
||||
|
||||
if options:
|
||||
response_element.set('options', str(options))
|
||||
|
||||
if cfn_extra_args:
|
||||
response_element.set('cfn_extra_args', str(cfn_extra_args))
|
||||
|
||||
return response_element
|
||||
|
||||
def create_input_element(self, **kwargs):
|
||||
return ResponseXMLFactory.textline_input_xml(**kwargs)
|
||||
|
||||
|
||||
class SymbolicResponseXMLFactory(ResponseXMLFactory):
|
||||
""" Factory for creating <symbolicresponse> XML trees """
|
||||
|
||||
def create_response_element(self, **kwargs):
|
||||
cfn = kwargs.get('cfn', None)
|
||||
answer = kwargs.get('answer', None)
|
||||
options = kwargs.get('options', None)
|
||||
|
||||
response_element = etree.Element("symbolicresponse")
|
||||
if cfn:
|
||||
response_element.set('cfn', str(cfn))
|
||||
if answer:
|
||||
response_element.set('answer', str(answer))
|
||||
if options:
|
||||
response_element.set('options', str(options))
|
||||
return response_element
|
||||
|
||||
def create_input_element(self, **kwargs):
|
||||
@@ -638,12 +667,16 @@ class StringResponseXMLFactory(ResponseXMLFactory):
|
||||
Where *hint_prompt* is the string for which we show the hint,
|
||||
*hint_name* is an internal identifier for the hint,
|
||||
and *hint_text* is the text we show for the hint.
|
||||
|
||||
*hintfn*: The name of a function in the script to use for hints.
|
||||
|
||||
"""
|
||||
# Retrieve the **kwargs
|
||||
answer = kwargs.get("answer", None)
|
||||
case_sensitive = kwargs.get("case_sensitive", True)
|
||||
hint_list = kwargs.get('hints', None)
|
||||
assert(answer)
|
||||
hint_fn = kwargs.get('hintfn', None)
|
||||
assert answer
|
||||
|
||||
# Create the <stringresponse> element
|
||||
response_element = etree.Element("stringresponse")
|
||||
@@ -655,18 +688,24 @@ class StringResponseXMLFactory(ResponseXMLFactory):
|
||||
response_element.set("type", "cs" if case_sensitive else "ci")
|
||||
|
||||
# Add the hints if specified
|
||||
if hint_list:
|
||||
if hint_list or hint_fn:
|
||||
hintgroup_element = etree.SubElement(response_element, "hintgroup")
|
||||
for (hint_prompt, hint_name, hint_text) in hint_list:
|
||||
stringhint_element = etree.SubElement(hintgroup_element, "stringhint")
|
||||
stringhint_element.set("answer", str(hint_prompt))
|
||||
stringhint_element.set("name", str(hint_name))
|
||||
if hint_list:
|
||||
assert not hint_fn
|
||||
for (hint_prompt, hint_name, hint_text) in hint_list:
|
||||
stringhint_element = etree.SubElement(hintgroup_element, "stringhint")
|
||||
stringhint_element.set("answer", str(hint_prompt))
|
||||
stringhint_element.set("name", str(hint_name))
|
||||
|
||||
hintpart_element = etree.SubElement(hintgroup_element, "hintpart")
|
||||
hintpart_element.set("on", str(hint_name))
|
||||
hintpart_element = etree.SubElement(hintgroup_element, "hintpart")
|
||||
hintpart_element.set("on", str(hint_name))
|
||||
|
||||
hint_text_element = etree.SubElement(hintpart_element, "text")
|
||||
hint_text_element.text = str(hint_text)
|
||||
hint_text_element = etree.SubElement(hintpart_element, "text")
|
||||
hint_text_element.text = str(hint_text)
|
||||
|
||||
if hint_fn:
|
||||
assert not hint_list
|
||||
hintgroup_element.set("hintfn", hint_fn)
|
||||
|
||||
return response_element
|
||||
|
||||
@@ -705,3 +744,38 @@ class AnnotationResponseXMLFactory(ResponseXMLFactory):
|
||||
option_element.text = description
|
||||
|
||||
return input_element
|
||||
|
||||
|
||||
class SymbolicResponseXMLFactory(ResponseXMLFactory):
|
||||
""" Factory for producing <symbolicresponse> xml """
|
||||
|
||||
def create_response_element(self, **kwargs):
|
||||
""" Build the <symbolicresponse> XML element.
|
||||
|
||||
Uses **kwargs:
|
||||
|
||||
*expect*: The correct answer (a sympy string)
|
||||
|
||||
*options*: list of option strings to pass to symmath_check
|
||||
(e.g. 'matrix', 'qbit', 'imaginary', 'numerical')"""
|
||||
|
||||
# Retrieve **kwargs
|
||||
expect = kwargs.get('expect', '')
|
||||
options = kwargs.get('options', [])
|
||||
|
||||
# Symmath check expects a string of options
|
||||
options_str = ",".join(options)
|
||||
|
||||
# Construct the <symbolicresponse> element
|
||||
response_element = etree.Element('symbolicresponse')
|
||||
|
||||
if expect:
|
||||
response_element.set('expect', str(expect))
|
||||
|
||||
if options_str:
|
||||
response_element.set('options', str(options_str))
|
||||
|
||||
return response_element
|
||||
|
||||
def create_input_element(self, **kwargs):
|
||||
return ResponseXMLFactory.textline_input_xml(**kwargs)
|
||||
|
||||
@@ -26,7 +26,7 @@ class HelperTest(unittest.TestCase):
|
||||
Make sure that our helper function works!
|
||||
'''
|
||||
def check(self, d):
|
||||
xml = etree.XML(test_system.render_template('blah', d))
|
||||
xml = etree.XML(test_system().render_template('blah', d))
|
||||
self.assertEqual(d, extract_context(xml))
|
||||
|
||||
def test_extract_context(self):
|
||||
@@ -46,11 +46,11 @@ class SolutionRenderTest(unittest.TestCase):
|
||||
xml_str = """<solution id="solution_12">{s}</solution>""".format(s=solution)
|
||||
element = etree.fromstring(xml_str)
|
||||
|
||||
renderer = lookup_tag('solution')(test_system, element)
|
||||
renderer = lookup_tag('solution')(test_system(), element)
|
||||
|
||||
self.assertEqual(renderer.id, 'solution_12')
|
||||
|
||||
# our test_system "renders" templates to a div with the repr of the context
|
||||
# Our test_system "renders" templates to a div with the repr of the context.
|
||||
xml = renderer.get_html()
|
||||
context = extract_context(xml)
|
||||
self.assertEqual(context, {'id': 'solution_12'})
|
||||
@@ -65,7 +65,7 @@ class MathRenderTest(unittest.TestCase):
|
||||
xml_str = """<math>{tex}</math>""".format(tex=latex_in)
|
||||
element = etree.fromstring(xml_str)
|
||||
|
||||
renderer = lookup_tag('math')(test_system, element)
|
||||
renderer = lookup_tag('math')(test_system(), element)
|
||||
|
||||
self.assertEqual(renderer.mathstr, mathjax_out)
|
||||
|
||||
|
||||
480
common/lib/capa/capa/tests/test_files/snuggletex_correct.html
Normal file
480
common/lib/capa/capa/tests/test_files/snuggletex_correct.html
Normal file
@@ -0,0 +1,480 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE html
|
||||
PUBLIC "-//W3C//DTD XHTML 1.1 plus MathML 2.0//EN" "http://www.w3.org/Math/DTD/mathml2/xhtml-math11-f.dtd">
|
||||
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en">
|
||||
<head>
|
||||
<meta content="application/xhtml+xml; charset=UTF-8" http-equiv="Content-Type" />
|
||||
<meta content="SnuggleTeX" name="Generator" />
|
||||
<meta content="SnuggleTeX Documentation" name="description" />
|
||||
<meta content="David McKain" name="author" />
|
||||
<meta content="The University of Edinburgh" name="publisher" />
|
||||
<link href="/snuggletex-webapp-1.2.2/includes/core.css" rel="stylesheet" />
|
||||
<link href="/snuggletex-webapp-1.2.2/includes/webapp.css" rel="stylesheet" />
|
||||
<link href="/snuggletex-webapp-1.2.2/includes/snuggletex.css" rel="stylesheet" />
|
||||
<link href="/snuggletex-webapp-1.2.2/includes/jquery-ui-1.7.2.custom.css"
|
||||
rel="stylesheet" /><script src="/snuggletex-webapp-1.2.2/includes/jquery.js" type="text/javascript"></script><script src="/snuggletex-webapp-1.2.2/includes/jquery-ui-1.7.2.custom.js"
|
||||
type="text/javascript"></script><script src="/snuggletex-webapp-1.2.2/includes/webapp.js" type="text/javascript"></script><title>SnuggleTeX - ASCIIMathML Enrichment Demo</title><script src="/snuggletex-webapp-1.2.2/includes/ASCIIMathML.js" type="text/javascript"></script><script src="/snuggletex-webapp-1.2.2/includes/ASCIIMathMLwidget.js"
|
||||
type="text/javascript"></script></head>
|
||||
<body id="asciiMathMLUpConversionDemo">
|
||||
<table border="0" cellpadding="0" cellspacing="0" id="header" width="100%">
|
||||
<tr>
|
||||
<td align="left" id="logo" valign="top"><a class="headertext" href="http://www.ed.ac.uk"><img alt="The University of Edinburgh" height="84"
|
||||
src="/snuggletex-webapp-1.2.2/includes/uoe_logo.jpg"
|
||||
width="84" /></a></td>
|
||||
<td align="left">
|
||||
<h3>THE UNIVERSITY of EDINBURGH</h3>
|
||||
<h1>SCHOOL OF PHYSICS AND ASTRONOMY</h1>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
<h1 id="location"><a href="/snuggletex-webapp-1.2.2">SnuggleTeX (1.2.2)</a></h1>
|
||||
<div id="content">
|
||||
<div id="skipnavigation"><a href="#maincontent">Skip Navigation</a></div>
|
||||
<div id="navigation">
|
||||
<div id="navinner">
|
||||
<h2>About SnuggleTeX</h2>
|
||||
<ul>
|
||||
<li><a href="/snuggletex-webapp-1.2.2/documentation/overview-and-features.html">Overview & Features</a></li>
|
||||
<li><a href="/snuggletex-webapp-1.2.2/documentation/use-cases.html">Why Use SnuggleTeX?</a></li>
|
||||
<li><a href="/snuggletex-webapp-1.2.2/documentation/license.html">License</a></li>
|
||||
<li><a href="/snuggletex-webapp-1.2.2/documentation/release-notes.html">Release Notes</a></li>
|
||||
</ul>
|
||||
<h2>Demos & Samples</h2>
|
||||
<ul>
|
||||
<li><a href="/snuggletex-webapp-1.2.2/MathInputDemo">Simple Math Input Demo</a></li>
|
||||
<li><a href="/snuggletex-webapp-1.2.2/FullLaTeXInputDemo">Full LaTeX Input Demo</a></li>
|
||||
<li><a href="/snuggletex-webapp-1.2.2/UpConversionDemo">MathML Semantic Enrichment Demo</a></li>
|
||||
<li><a class="selected" href="/snuggletex-webapp-1.2.2/ASCIIMathMLUpConversionDemo">ASCIIMathML Enrichment Demo</a></li>
|
||||
<li><a href="/snuggletex-webapp-1.2.2/documentation/web-output-samples.html">Web Output Samples</a></li>
|
||||
</ul>
|
||||
<h2>User Guide</h2>
|
||||
<ul>
|
||||
<li><a href="/snuggletex-webapp-1.2.2/documentation/getting-snuggletex.html">Getting SnuggleTeX</a></li>
|
||||
<li><a href="/snuggletex-webapp-1.2.2/documentation/software-requirements.html">Software Requirements</a></li>
|
||||
<li><a href="/snuggletex-webapp-1.2.2/documentation/your-classpath.html">Setting up Your ClassPath</a></li>
|
||||
<li><a href="/snuggletex-webapp-1.2.2/documentation/examples.html">Examples</a></li>
|
||||
<li><a href="/snuggletex-webapp-1.2.2/documentation/basic-usage.html">Basic Usage</a></li>
|
||||
<li><a href="/snuggletex-webapp-1.2.2/documentation/inputs.html">Parsing LaTeX Inputs</a></li>
|
||||
<li><a href="/snuggletex-webapp-1.2.2/documentation/xml-or-dom-output.html">Creating XML String or DOM Outputs</a></li>
|
||||
<li><a href="/snuggletex-webapp-1.2.2/documentation/web-output.html">Creating Web Pages</a></li>
|
||||
<li><a href="/snuggletex-webapp-1.2.2/documentation/error-reporting.html">Error Reporting</a></li>
|
||||
<li><a href="/snuggletex-webapp-1.2.2/documentation/supported-latex.html">Supported LaTeX</a></li>
|
||||
<li><a href="/snuggletex-webapp-1.2.2/documentation/advanced-usage.html">Advanced Usage</a></li>
|
||||
<li><a href="/snuggletex-webapp-1.2.2/documentation/semantic-enrichment.html">Semantic Enrichment</a></li>
|
||||
<li><a href="/snuggletex-webapp-1.2.2/documentation/migrating-from-older-versions.html">Migrating from older versions</a></li>
|
||||
<li><a href="http://snuggletex.sourceforge.net/maven/apidocs/index.html">API Documentation<span class="extlink"> </span></a></li>
|
||||
<li><a href="http://snuggletex.sourceforge.net/maven/xref/index.html">Source Code Cross-Reference<span class="extlink"> </span></a></li>
|
||||
</ul>
|
||||
<h2>SnuggleTeX Project Links</h2>
|
||||
<ul>
|
||||
<li><a href="http://sourceforge.net/project/showfiles.php?group_id=221375">Download from SourceForge.net<span class="extlink"> </span></a></li>
|
||||
<li><a href="http://sourceforge.net/projects/snuggletex/">SnuggleTeX on SourceForge.net<span class="extlink"> </span></a></li>
|
||||
<li><a href="http://snuggletex.sourceforge.net/maven/">SnuggleTeX Maven Developer Reports<span class="extlink"> </span></a></li>
|
||||
<li><a href="https://www.wiki.ed.ac.uk/display/Physics/SnuggleTeX">SnuggleTeX Wiki<span class="extlink"> </span></a></li>
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
<div id="maincontent">
|
||||
<div id="popup"></div>
|
||||
<div id="maininner">
|
||||
<h2>ASCIIMathML Enrichment Demo</h2>
|
||||
<h3>Input</h3>
|
||||
<p>
|
||||
This demo is similar to the
|
||||
<a href="/snuggletex-webapp-1.2.2/UpConversionDemo">MathML Semantic Enrichnment Demo</a>
|
||||
but uses
|
||||
<a href="http://www1.chapman.edu/~jipsen/asciimath.html">ASCIIMathML</a> as
|
||||
an alternative input format, which provides real-time feedback as you
|
||||
type but can often generate MathML with odd semantics in it.
|
||||
SnuggleTeX includes some functionality that can to convert this raw MathML into
|
||||
something equivalent to its own MathML output, thereby allowing you to
|
||||
<a href="/snuggletex-webapp-1.2.2/documentation/semantic-enrichment.html">semantically enrich</a> it in
|
||||
certain simple cases, making ASCIIMathML a possibly viable input format
|
||||
for simple semantic maths.
|
||||
|
||||
</p>
|
||||
<p>
|
||||
To try the demo, simply enter some some ASCIIMathML into the box below.
|
||||
You should see a real time preview of this while you type.
|
||||
Then hit <tt>Go!</tt> to use SnuggleTeX to semantically enrich your
|
||||
input.
|
||||
|
||||
</p>
|
||||
<form action="/snuggletex-webapp-1.2.2/ASCIIMathMLUpConversionDemo" class="input"
|
||||
method="post">
|
||||
<div class="inputBox">
|
||||
ASCIIMath Input:
|
||||
<input id="asciiMathInput" name="asciiMathInput" type="text" value="" /><input id="asciiMathML" name="asciiMathML" type="hidden" /><input type="submit" value="Go!" /></div>
|
||||
</form>
|
||||
<h3>Live Preview</h3>
|
||||
<p>
|
||||
This is a MathML rendering of your input, generated by ASCIIMathML as you type.
|
||||
|
||||
</p>
|
||||
<div class="result">
|
||||
<div id="preview"> </div>
|
||||
</div>
|
||||
<p>
|
||||
This is the underlying MathML source generated by ASCIIMathML, again updated in real time.
|
||||
|
||||
</p>
|
||||
<div class="result"><pre id="previewSource"> </pre></div><script type="text/javascript">
|
||||
registerASCIIMathMLInputWidget('asciiMathInput', 'preview', 'asciiMathML', 'previewSource');
|
||||
var inputChanged = false;
|
||||
// Hide any existing output stuff in page on first change, as it will no longer be in sync
|
||||
jQuery(document).ready(function() {
|
||||
jQuery('#asciiMathInput').bind('keydown', function() {
|
||||
if (!inputChanged) jQuery('.outputContainer').css('visibility', 'hidden');
|
||||
inputChanged = true;
|
||||
});
|
||||
});
|
||||
</script><div class="outputContainer">
|
||||
<h3>Enhanced Presentation MathML</h3>
|
||||
<p>
|
||||
This shows the result of attempting to enrich the raw Presentation MathML
|
||||
generated by ASCIIMathML:
|
||||
|
||||
</p><pre class="result"><math xmlns="http://www.w3.org/1998/Math/MathML">
|
||||
<mrow>
|
||||
<mrow>
|
||||
<mrow>
|
||||
<mi>cos</mi>
|
||||
<mo>&ApplyFunction;</mo>
|
||||
<mfenced close=")" open="(">
|
||||
<mi>theta</mi>
|
||||
</mfenced>
|
||||
</mrow>
|
||||
<mo>&sdot;</mo>
|
||||
<mfenced close="]" open="[">
|
||||
<mtable>
|
||||
<mtr>
|
||||
<mtd>
|
||||
<mn>1</mn>
|
||||
</mtd>
|
||||
<mtd>
|
||||
<mn>0</mn>
|
||||
</mtd>
|
||||
</mtr>
|
||||
<mtr>
|
||||
<mtd>
|
||||
<mn>0</mn>
|
||||
</mtd>
|
||||
<mtd>
|
||||
<mn>1</mn>
|
||||
</mtd>
|
||||
</mtr>
|
||||
</mtable>
|
||||
</mfenced>
|
||||
</mrow>
|
||||
<mo>+</mo>
|
||||
<mrow>
|
||||
<mi>i</mi>
|
||||
<mo>&sdot;</mo>
|
||||
<mrow>
|
||||
<mi>sin</mi>
|
||||
<mo>&ApplyFunction;</mo>
|
||||
<mfenced close=")" open="(">
|
||||
<mi>theta</mi>
|
||||
</mfenced>
|
||||
</mrow>
|
||||
<mo>&sdot;</mo>
|
||||
<mfenced close="]" open="[">
|
||||
<mtable>
|
||||
<mtr>
|
||||
<mtd>
|
||||
<mn>0</mn>
|
||||
</mtd>
|
||||
<mtd>
|
||||
<mn>1</mn>
|
||||
</mtd>
|
||||
</mtr>
|
||||
<mtr>
|
||||
<mtd>
|
||||
<mn>1</mn>
|
||||
</mtd>
|
||||
<mtd>
|
||||
<mn>0</mn>
|
||||
</mtd>
|
||||
</mtr>
|
||||
</mtable>
|
||||
</mfenced>
|
||||
</mrow>
|
||||
</mrow>
|
||||
</math></pre><h3>Content MathML</h3>
|
||||
<p>
|
||||
This shows the result of an attempted
|
||||
<a href="documentation/content-mathml.html">conversion to Content MathML</a>:
|
||||
|
||||
</p><pre class="result"><math xmlns="http://www.w3.org/1998/Math/MathML">
|
||||
<apply>
|
||||
<plus/>
|
||||
<apply>
|
||||
<times/>
|
||||
<apply>
|
||||
<cos/>
|
||||
<ci>theta</ci>
|
||||
</apply>
|
||||
<list>
|
||||
<matrix>
|
||||
<vector>
|
||||
<cn>1</cn>
|
||||
<cn>0</cn>
|
||||
</vector>
|
||||
<vector>
|
||||
<cn>0</cn>
|
||||
<cn>1</cn>
|
||||
</vector>
|
||||
</matrix>
|
||||
</list>
|
||||
</apply>
|
||||
<apply>
|
||||
<times/>
|
||||
<ci>i</ci>
|
||||
<apply>
|
||||
<sin/>
|
||||
<ci>theta</ci>
|
||||
</apply>
|
||||
<list>
|
||||
<matrix>
|
||||
<vector>
|
||||
<cn>0</cn>
|
||||
<cn>1</cn>
|
||||
</vector>
|
||||
<vector>
|
||||
<cn>1</cn>
|
||||
<cn>0</cn>
|
||||
</vector>
|
||||
</matrix>
|
||||
</list>
|
||||
</apply>
|
||||
</apply>
|
||||
</math></pre><h3>Maxima Input Form</h3>
|
||||
<p>
|
||||
This shows the result of an attempted
|
||||
<a href="documentation/maxima-input.html">conversion to Maxima Input syntax</a>:
|
||||
|
||||
</p>
|
||||
<p>
|
||||
The conversion from Content MathML to Maxima Input was not successful for
|
||||
this input.
|
||||
|
||||
</p>
|
||||
<table class="failures">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Failure Code</th>
|
||||
<th>Message</th>
|
||||
<th>XPath</th>
|
||||
<th>Context</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td><a href="/snuggletex-webapp-1.2.2/documentation/error-codes.html#UMFG00">UMFG00</a></td>
|
||||
<td>Content MathML element matrix not supported</td>
|
||||
<td>apply[1]/apply[1]/list[1]/matrix[1]</td>
|
||||
<td><pre><matrix>
|
||||
<vector>
|
||||
<cn>1</cn>
|
||||
<cn>0</cn>
|
||||
</vector>
|
||||
<vector>
|
||||
<cn>0</cn>
|
||||
<cn>1</cn>
|
||||
</vector>
|
||||
</matrix></pre></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><a href="/snuggletex-webapp-1.2.2/documentation/error-codes.html#UMFG00">UMFG00</a></td>
|
||||
<td>Content MathML element matrix not supported</td>
|
||||
<td>apply[1]/apply[2]/list[1]/matrix[1]</td>
|
||||
<td><pre><matrix>
|
||||
<vector>
|
||||
<cn>0</cn>
|
||||
<cn>1</cn>
|
||||
</vector>
|
||||
<vector>
|
||||
<cn>1</cn>
|
||||
<cn>0</cn>
|
||||
</vector>
|
||||
</matrix></pre></td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
<h3>MathML Parallel Markup</h3>
|
||||
<p>
|
||||
This shows the enhanced Presentation MathML with other forms encapsulated
|
||||
as annotations:
|
||||
|
||||
</p><pre class="result"><math xmlns="http://www.w3.org/1998/Math/MathML">
|
||||
<semantics>
|
||||
<mrow>
|
||||
<mrow>
|
||||
<mrow>
|
||||
<mi>cos</mi>
|
||||
<mo>&ApplyFunction;</mo>
|
||||
<mfenced close=")" open="(">
|
||||
<mi>theta</mi>
|
||||
</mfenced>
|
||||
</mrow>
|
||||
<mo>&sdot;</mo>
|
||||
<mfenced close="]" open="[">
|
||||
<mtable>
|
||||
<mtr>
|
||||
<mtd>
|
||||
<mn>1</mn>
|
||||
</mtd>
|
||||
<mtd>
|
||||
<mn>0</mn>
|
||||
</mtd>
|
||||
</mtr>
|
||||
<mtr>
|
||||
<mtd>
|
||||
<mn>0</mn>
|
||||
</mtd>
|
||||
<mtd>
|
||||
<mn>1</mn>
|
||||
</mtd>
|
||||
</mtr>
|
||||
</mtable>
|
||||
</mfenced>
|
||||
</mrow>
|
||||
<mo>+</mo>
|
||||
<mrow>
|
||||
<mi>i</mi>
|
||||
<mo>&sdot;</mo>
|
||||
<mrow>
|
||||
<mi>sin</mi>
|
||||
<mo>&ApplyFunction;</mo>
|
||||
<mfenced close=")" open="(">
|
||||
<mi>theta</mi>
|
||||
</mfenced>
|
||||
</mrow>
|
||||
<mo>&sdot;</mo>
|
||||
<mfenced close="]" open="[">
|
||||
<mtable>
|
||||
<mtr>
|
||||
<mtd>
|
||||
<mn>0</mn>
|
||||
</mtd>
|
||||
<mtd>
|
||||
<mn>1</mn>
|
||||
</mtd>
|
||||
</mtr>
|
||||
<mtr>
|
||||
<mtd>
|
||||
<mn>1</mn>
|
||||
</mtd>
|
||||
<mtd>
|
||||
<mn>0</mn>
|
||||
</mtd>
|
||||
</mtr>
|
||||
</mtable>
|
||||
</mfenced>
|
||||
</mrow>
|
||||
</mrow>
|
||||
<annotation-xml encoding="MathML-Content">
|
||||
<apply>
|
||||
<plus/>
|
||||
<apply>
|
||||
<times/>
|
||||
<apply>
|
||||
<cos/>
|
||||
<ci>theta</ci>
|
||||
</apply>
|
||||
<list>
|
||||
<matrix>
|
||||
<vector>
|
||||
<cn>1</cn>
|
||||
<cn>0</cn>
|
||||
</vector>
|
||||
<vector>
|
||||
<cn>0</cn>
|
||||
<cn>1</cn>
|
||||
</vector>
|
||||
</matrix>
|
||||
</list>
|
||||
</apply>
|
||||
<apply>
|
||||
<times/>
|
||||
<ci>i</ci>
|
||||
<apply>
|
||||
<sin/>
|
||||
<ci>theta</ci>
|
||||
</apply>
|
||||
<list>
|
||||
<matrix>
|
||||
<vector>
|
||||
<cn>0</cn>
|
||||
<cn>1</cn>
|
||||
</vector>
|
||||
<vector>
|
||||
<cn>1</cn>
|
||||
<cn>0</cn>
|
||||
</vector>
|
||||
</matrix>
|
||||
</list>
|
||||
</apply>
|
||||
</apply>
|
||||
</annotation-xml>
|
||||
<annotation encoding="ASCIIMathInput"/>
|
||||
<annotation-xml encoding="Maxima-upconversion-failures">
|
||||
<s:fail xmlns:s="http://www.ph.ed.ac.uk/snuggletex" code="UMFG00"
|
||||
message="Content MathML element matrix not supported">
|
||||
<s:arg>matrix</s:arg>
|
||||
<s:xpath>apply[1]/apply[1]/list[1]/matrix[1]</s:xpath>
|
||||
<s:context>
|
||||
<matrix>
|
||||
<vector>
|
||||
<cn>1</cn>
|
||||
<cn>0</cn>
|
||||
</vector>
|
||||
<vector>
|
||||
<cn>0</cn>
|
||||
<cn>1</cn>
|
||||
</vector>
|
||||
</matrix>
|
||||
</s:context>
|
||||
</s:fail>
|
||||
<s:fail xmlns:s="http://www.ph.ed.ac.uk/snuggletex" code="UMFG00"
|
||||
message="Content MathML element matrix not supported">
|
||||
<s:arg>matrix</s:arg>
|
||||
<s:xpath>apply[1]/apply[2]/list[1]/matrix[1]</s:xpath>
|
||||
<s:context>
|
||||
<matrix>
|
||||
<vector>
|
||||
<cn>0</cn>
|
||||
<cn>1</cn>
|
||||
</vector>
|
||||
<vector>
|
||||
<cn>1</cn>
|
||||
<cn>0</cn>
|
||||
</vector>
|
||||
</matrix>
|
||||
</s:context>
|
||||
</s:fail>
|
||||
</annotation-xml>
|
||||
</semantics>
|
||||
</math></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div id="copyright">
|
||||
<p>
|
||||
SnuggleTeX Release 1.2.2 —
|
||||
<a href="/snuggletex-webapp-1.2.2/documentation/release-notes.html">Release Notes</a><br />
|
||||
Copyright © 2009
|
||||
<a href="http://www.ph.ed.ac.uk">The School of Physics and Astronomy</a>,
|
||||
<a href="http://www.ed.ac.uk">The University of Edinburgh</a>.
|
||||
<br />
|
||||
For more information, contact
|
||||
<a href="http://www.ph.ed.ac.uk/elearning/contacts/#dmckain">David McKain</a>.
|
||||
|
||||
</p>
|
||||
<p>
|
||||
The University of Edinburgh is a charitable body, registered in Scotland,
|
||||
with registration number SC005336.
|
||||
|
||||
</p>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
187
common/lib/capa/capa/tests/test_files/snuggletex_wrong.html
Normal file
187
common/lib/capa/capa/tests/test_files/snuggletex_wrong.html
Normal file
@@ -0,0 +1,187 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE html
|
||||
PUBLIC "-//W3C//DTD XHTML 1.1 plus MathML 2.0//EN" "http://www.w3.org/Math/DTD/mathml2/xhtml-math11-f.dtd">
|
||||
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en">
|
||||
<head>
|
||||
<meta content="application/xhtml+xml; charset=UTF-8" http-equiv="Content-Type" />
|
||||
<meta content="SnuggleTeX" name="Generator" />
|
||||
<meta content="SnuggleTeX Documentation" name="description" />
|
||||
<meta content="David McKain" name="author" />
|
||||
<meta content="The University of Edinburgh" name="publisher" />
|
||||
<link href="/snuggletex-webapp-1.2.2/includes/core.css" rel="stylesheet" />
|
||||
<link href="/snuggletex-webapp-1.2.2/includes/webapp.css" rel="stylesheet" />
|
||||
<link href="/snuggletex-webapp-1.2.2/includes/snuggletex.css" rel="stylesheet" />
|
||||
<link href="/snuggletex-webapp-1.2.2/includes/jquery-ui-1.7.2.custom.css"
|
||||
rel="stylesheet" /><script src="/snuggletex-webapp-1.2.2/includes/jquery.js" type="text/javascript"></script><script src="/snuggletex-webapp-1.2.2/includes/jquery-ui-1.7.2.custom.js"
|
||||
type="text/javascript"></script><script src="/snuggletex-webapp-1.2.2/includes/webapp.js" type="text/javascript"></script><title>SnuggleTeX - ASCIIMathML Enrichment Demo</title><script src="/snuggletex-webapp-1.2.2/includes/ASCIIMathML.js" type="text/javascript"></script><script src="/snuggletex-webapp-1.2.2/includes/ASCIIMathMLwidget.js"
|
||||
type="text/javascript"></script></head>
|
||||
<body id="asciiMathMLUpConversionDemo">
|
||||
<table border="0" cellpadding="0" cellspacing="0" id="header" width="100%">
|
||||
<tr>
|
||||
<td align="left" id="logo" valign="top"><a class="headertext" href="http://www.ed.ac.uk"><img alt="The University of Edinburgh" height="84"
|
||||
src="/snuggletex-webapp-1.2.2/includes/uoe_logo.jpg"
|
||||
width="84" /></a></td>
|
||||
<td align="left">
|
||||
<h3>THE UNIVERSITY of EDINBURGH</h3>
|
||||
<h1>SCHOOL OF PHYSICS AND ASTRONOMY</h1>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
<h1 id="location"><a href="/snuggletex-webapp-1.2.2">SnuggleTeX (1.2.2)</a></h1>
|
||||
<div id="content">
|
||||
<div id="skipnavigation"><a href="#maincontent">Skip Navigation</a></div>
|
||||
<div id="navigation">
|
||||
<div id="navinner">
|
||||
<h2>About SnuggleTeX</h2>
|
||||
<ul>
|
||||
<li><a href="/snuggletex-webapp-1.2.2/documentation/overview-and-features.html">Overview & Features</a></li>
|
||||
<li><a href="/snuggletex-webapp-1.2.2/documentation/use-cases.html">Why Use SnuggleTeX?</a></li>
|
||||
<li><a href="/snuggletex-webapp-1.2.2/documentation/license.html">License</a></li>
|
||||
<li><a href="/snuggletex-webapp-1.2.2/documentation/release-notes.html">Release Notes</a></li>
|
||||
</ul>
|
||||
<h2>Demos & Samples</h2>
|
||||
<ul>
|
||||
<li><a href="/snuggletex-webapp-1.2.2/MathInputDemo">Simple Math Input Demo</a></li>
|
||||
<li><a href="/snuggletex-webapp-1.2.2/FullLaTeXInputDemo">Full LaTeX Input Demo</a></li>
|
||||
<li><a href="/snuggletex-webapp-1.2.2/UpConversionDemo">MathML Semantic Enrichment Demo</a></li>
|
||||
<li><a class="selected" href="/snuggletex-webapp-1.2.2/ASCIIMathMLUpConversionDemo">ASCIIMathML Enrichment Demo</a></li>
|
||||
<li><a href="/snuggletex-webapp-1.2.2/documentation/web-output-samples.html">Web Output Samples</a></li>
|
||||
</ul>
|
||||
<h2>User Guide</h2>
|
||||
<ul>
|
||||
<li><a href="/snuggletex-webapp-1.2.2/documentation/getting-snuggletex.html">Getting SnuggleTeX</a></li>
|
||||
<li><a href="/snuggletex-webapp-1.2.2/documentation/software-requirements.html">Software Requirements</a></li>
|
||||
<li><a href="/snuggletex-webapp-1.2.2/documentation/your-classpath.html">Setting up Your ClassPath</a></li>
|
||||
<li><a href="/snuggletex-webapp-1.2.2/documentation/examples.html">Examples</a></li>
|
||||
<li><a href="/snuggletex-webapp-1.2.2/documentation/basic-usage.html">Basic Usage</a></li>
|
||||
<li><a href="/snuggletex-webapp-1.2.2/documentation/inputs.html">Parsing LaTeX Inputs</a></li>
|
||||
<li><a href="/snuggletex-webapp-1.2.2/documentation/xml-or-dom-output.html">Creating XML String or DOM Outputs</a></li>
|
||||
<li><a href="/snuggletex-webapp-1.2.2/documentation/web-output.html">Creating Web Pages</a></li>
|
||||
<li><a href="/snuggletex-webapp-1.2.2/documentation/error-reporting.html">Error Reporting</a></li>
|
||||
<li><a href="/snuggletex-webapp-1.2.2/documentation/supported-latex.html">Supported LaTeX</a></li>
|
||||
<li><a href="/snuggletex-webapp-1.2.2/documentation/advanced-usage.html">Advanced Usage</a></li>
|
||||
<li><a href="/snuggletex-webapp-1.2.2/documentation/semantic-enrichment.html">Semantic Enrichment</a></li>
|
||||
<li><a href="/snuggletex-webapp-1.2.2/documentation/migrating-from-older-versions.html">Migrating from older versions</a></li>
|
||||
<li><a href="http://snuggletex.sourceforge.net/maven/apidocs/index.html">API Documentation<span class="extlink"> </span></a></li>
|
||||
<li><a href="http://snuggletex.sourceforge.net/maven/xref/index.html">Source Code Cross-Reference<span class="extlink"> </span></a></li>
|
||||
</ul>
|
||||
<h2>SnuggleTeX Project Links</h2>
|
||||
<ul>
|
||||
<li><a href="http://sourceforge.net/project/showfiles.php?group_id=221375">Download from SourceForge.net<span class="extlink"> </span></a></li>
|
||||
<li><a href="http://sourceforge.net/projects/snuggletex/">SnuggleTeX on SourceForge.net<span class="extlink"> </span></a></li>
|
||||
<li><a href="http://snuggletex.sourceforge.net/maven/">SnuggleTeX Maven Developer Reports<span class="extlink"> </span></a></li>
|
||||
<li><a href="https://www.wiki.ed.ac.uk/display/Physics/SnuggleTeX">SnuggleTeX Wiki<span class="extlink"> </span></a></li>
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
<div id="maincontent">
|
||||
<div id="popup"></div>
|
||||
<div id="maininner">
|
||||
<h2>ASCIIMathML Enrichment Demo</h2>
|
||||
<h3>Input</h3>
|
||||
<p>
|
||||
This demo is similar to the
|
||||
<a href="/snuggletex-webapp-1.2.2/UpConversionDemo">MathML Semantic Enrichnment Demo</a>
|
||||
but uses
|
||||
<a href="http://www1.chapman.edu/~jipsen/asciimath.html">ASCIIMathML</a> as
|
||||
an alternative input format, which provides real-time feedback as you
|
||||
type but can often generate MathML with odd semantics in it.
|
||||
SnuggleTeX includes some functionality that can to convert this raw MathML into
|
||||
something equivalent to its own MathML output, thereby allowing you to
|
||||
<a href="/snuggletex-webapp-1.2.2/documentation/semantic-enrichment.html">semantically enrich</a> it in
|
||||
certain simple cases, making ASCIIMathML a possibly viable input format
|
||||
for simple semantic maths.
|
||||
|
||||
</p>
|
||||
<p>
|
||||
To try the demo, simply enter some some ASCIIMathML into the box below.
|
||||
You should see a real time preview of this while you type.
|
||||
Then hit <tt>Go!</tt> to use SnuggleTeX to semantically enrich your
|
||||
input.
|
||||
|
||||
</p>
|
||||
<form action="/snuggletex-webapp-1.2.2/ASCIIMathMLUpConversionDemo" class="input"
|
||||
method="post">
|
||||
<div class="inputBox">
|
||||
ASCIIMath Input:
|
||||
<input id="asciiMathInput" name="asciiMathInput" type="text" value="" /><input id="asciiMathML" name="asciiMathML" type="hidden" /><input type="submit" value="Go!" /></div>
|
||||
</form>
|
||||
<h3>Live Preview</h3>
|
||||
<p>
|
||||
This is a MathML rendering of your input, generated by ASCIIMathML as you type.
|
||||
|
||||
</p>
|
||||
<div class="result">
|
||||
<div id="preview"> </div>
|
||||
</div>
|
||||
<p>
|
||||
This is the underlying MathML source generated by ASCIIMathML, again updated in real time.
|
||||
|
||||
</p>
|
||||
<div class="result"><pre id="previewSource"> </pre></div><script type="text/javascript">
|
||||
registerASCIIMathMLInputWidget('asciiMathInput', 'preview', 'asciiMathML', 'previewSource');
|
||||
var inputChanged = false;
|
||||
// Hide any existing output stuff in page on first change, as it will no longer be in sync
|
||||
jQuery(document).ready(function() {
|
||||
jQuery('#asciiMathInput').bind('keydown', function() {
|
||||
if (!inputChanged) jQuery('.outputContainer').css('visibility', 'hidden');
|
||||
inputChanged = true;
|
||||
});
|
||||
});
|
||||
</script><div class="outputContainer">
|
||||
<h3>Enhanced Presentation MathML</h3>
|
||||
<p>
|
||||
This shows the result of attempting to enrich the raw Presentation MathML
|
||||
generated by ASCIIMathML:
|
||||
|
||||
</p><pre class="result"><math xmlns="http://www.w3.org/1998/Math/MathML">
|
||||
<mn>2</mn>
|
||||
</math></pre><h3>Content MathML</h3>
|
||||
<p>
|
||||
This shows the result of an attempted
|
||||
<a href="documentation/content-mathml.html">conversion to Content MathML</a>:
|
||||
|
||||
</p><pre class="result"><math xmlns="http://www.w3.org/1998/Math/MathML">
|
||||
<cn>2</cn>
|
||||
</math></pre><h3>Maxima Input Form</h3>
|
||||
<p>
|
||||
This shows the result of an attempted
|
||||
<a href="documentation/maxima-input.html">conversion to Maxima Input syntax</a>:
|
||||
|
||||
</p><pre class="result">2</pre><h3>MathML Parallel Markup</h3>
|
||||
<p>
|
||||
This shows the enhanced Presentation MathML with other forms encapsulated
|
||||
as annotations:
|
||||
|
||||
</p><pre class="result"><math xmlns="http://www.w3.org/1998/Math/MathML">
|
||||
<semantics>
|
||||
<mn>2</mn>
|
||||
<annotation-xml encoding="MathML-Content">
|
||||
<cn>2</cn>
|
||||
</annotation-xml>
|
||||
<annotation encoding="ASCIIMathInput"/>
|
||||
<annotation encoding="Maxima">2</annotation>
|
||||
</semantics>
|
||||
</math></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div id="copyright">
|
||||
<p>
|
||||
SnuggleTeX Release 1.2.2 —
|
||||
<a href="/snuggletex-webapp-1.2.2/documentation/release-notes.html">Release Notes</a><br />
|
||||
Copyright © 2009
|
||||
<a href="http://www.ph.ed.ac.uk">The School of Physics and Astronomy</a>,
|
||||
<a href="http://www.ed.ac.uk">The University of Edinburgh</a>.
|
||||
<br />
|
||||
For more information, contact
|
||||
<a href="http://www.ph.ed.ac.uk/elearning/contacts/#dmckain">David McKain</a>.
|
||||
|
||||
</p>
|
||||
<p>
|
||||
The University of Edinburgh is a charitable body, registered in Scotland,
|
||||
with registration number SC005336.
|
||||
|
||||
</p>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
@@ -6,12 +6,15 @@ import json
|
||||
|
||||
import mock
|
||||
|
||||
from capa.capa_problem import LoncapaProblem
|
||||
from .response_xml_factory import StringResponseXMLFactory, CustomResponseXMLFactory
|
||||
from . import test_system
|
||||
from . import test_system, new_loncapa_problem
|
||||
|
||||
class CapaHtmlRenderTest(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(CapaHtmlRenderTest, self).setUp()
|
||||
self.system = test_system()
|
||||
|
||||
def test_blank_problem(self):
|
||||
"""
|
||||
It's important that blank problems don't break, since that's
|
||||
@@ -20,7 +23,7 @@ class CapaHtmlRenderTest(unittest.TestCase):
|
||||
xml_str = "<problem> </problem>"
|
||||
|
||||
# Create the problem
|
||||
problem = LoncapaProblem(xml_str, '1', system=test_system)
|
||||
problem = new_loncapa_problem(xml_str)
|
||||
|
||||
# Render the HTML
|
||||
rendered_html = etree.XML(problem.get_html())
|
||||
@@ -39,7 +42,7 @@ class CapaHtmlRenderTest(unittest.TestCase):
|
||||
""")
|
||||
|
||||
# Create the problem
|
||||
problem = LoncapaProblem(xml_str, '1', system=test_system)
|
||||
problem = new_loncapa_problem(xml_str, system=self.system)
|
||||
|
||||
# Render the HTML
|
||||
rendered_html = etree.XML(problem.get_html())
|
||||
@@ -49,9 +52,6 @@ class CapaHtmlRenderTest(unittest.TestCase):
|
||||
self.assertEqual(test_element.tag, "test")
|
||||
self.assertEqual(test_element.text, "Test include")
|
||||
|
||||
|
||||
|
||||
|
||||
def test_process_outtext(self):
|
||||
# Generate some XML with <startouttext /> and <endouttext />
|
||||
xml_str = textwrap.dedent("""
|
||||
@@ -61,7 +61,7 @@ class CapaHtmlRenderTest(unittest.TestCase):
|
||||
""")
|
||||
|
||||
# Create the problem
|
||||
problem = LoncapaProblem(xml_str, '1', system=test_system)
|
||||
problem = new_loncapa_problem(xml_str)
|
||||
|
||||
# Render the HTML
|
||||
rendered_html = etree.XML(problem.get_html())
|
||||
@@ -80,7 +80,7 @@ class CapaHtmlRenderTest(unittest.TestCase):
|
||||
""")
|
||||
|
||||
# Create the problem
|
||||
problem = LoncapaProblem(xml_str, '1', system=test_system)
|
||||
problem = new_loncapa_problem(xml_str)
|
||||
|
||||
# Render the HTML
|
||||
rendered_html = etree.XML(problem.get_html())
|
||||
@@ -98,7 +98,7 @@ class CapaHtmlRenderTest(unittest.TestCase):
|
||||
""")
|
||||
|
||||
# Create the problem
|
||||
problem = LoncapaProblem(xml_str, '1', system=test_system)
|
||||
problem = new_loncapa_problem(xml_str)
|
||||
|
||||
# Render the HTML
|
||||
rendered_html = etree.XML(problem.get_html())
|
||||
@@ -117,11 +117,12 @@ class CapaHtmlRenderTest(unittest.TestCase):
|
||||
xml_str = StringResponseXMLFactory().build_xml(**kwargs)
|
||||
|
||||
# Mock out the template renderer
|
||||
test_system.render_template = mock.Mock()
|
||||
test_system.render_template.return_value = "<div>Input Template Render</div>"
|
||||
the_system = test_system()
|
||||
the_system.render_template = mock.Mock()
|
||||
the_system.render_template.return_value = "<div>Input Template Render</div>"
|
||||
|
||||
# Create the problem and render the HTML
|
||||
problem = LoncapaProblem(xml_str, '1', system=test_system)
|
||||
problem = new_loncapa_problem(xml_str, system=the_system)
|
||||
rendered_html = etree.XML(problem.get_html())
|
||||
|
||||
# Expect problem has been turned into a <div>
|
||||
@@ -166,7 +167,7 @@ class CapaHtmlRenderTest(unittest.TestCase):
|
||||
mock.call('textline.html', expected_textline_context),
|
||||
mock.call('solutionspan.html', expected_solution_context)]
|
||||
|
||||
self.assertEqual(test_system.render_template.call_args_list,
|
||||
self.assertEqual(the_system.render_template.call_args_list,
|
||||
expected_calls)
|
||||
|
||||
|
||||
@@ -184,7 +185,7 @@ class CapaHtmlRenderTest(unittest.TestCase):
|
||||
xml_str = CustomResponseXMLFactory().build_xml(**kwargs)
|
||||
|
||||
# Create the problem and render the html
|
||||
problem = LoncapaProblem(xml_str, '1', system=test_system)
|
||||
problem = new_loncapa_problem(xml_str)
|
||||
|
||||
# Grade the problem
|
||||
correctmap = problem.grade_answers({'1_2_1': 'test'})
|
||||
@@ -219,7 +220,7 @@ class CapaHtmlRenderTest(unittest.TestCase):
|
||||
""")
|
||||
|
||||
# Create the problem and render the HTML
|
||||
problem = LoncapaProblem(xml_str, '1', system=test_system)
|
||||
problem = new_loncapa_problem(xml_str)
|
||||
rendered_html = etree.XML(problem.get_html())
|
||||
|
||||
# Expect that the variable $test has been replaced with its value
|
||||
@@ -227,7 +228,7 @@ class CapaHtmlRenderTest(unittest.TestCase):
|
||||
self.assertEqual(span_element.get('attr'), "TEST")
|
||||
|
||||
def _create_test_file(self, path, content_str):
|
||||
test_fp = test_system.filestore.open(path, "w")
|
||||
test_fp = self.system.filestore.open(path, "w")
|
||||
test_fp.write(content_str)
|
||||
test_fp.close()
|
||||
|
||||
|
||||
@@ -45,7 +45,7 @@ class OptionInputTest(unittest.TestCase):
|
||||
state = {'value': 'Down',
|
||||
'id': 'sky_input',
|
||||
'status': 'answered'}
|
||||
option_input = lookup_tag('optioninput')(test_system, element, state)
|
||||
option_input = lookup_tag('optioninput')(test_system(), element, state)
|
||||
|
||||
context = option_input._get_render_context()
|
||||
|
||||
@@ -92,7 +92,7 @@ class ChoiceGroupTest(unittest.TestCase):
|
||||
'id': 'sky_input',
|
||||
'status': 'answered'}
|
||||
|
||||
the_input = lookup_tag(tag)(test_system, element, state)
|
||||
the_input = lookup_tag(tag)(test_system(), element, state)
|
||||
|
||||
context = the_input._get_render_context()
|
||||
|
||||
@@ -142,7 +142,7 @@ class JavascriptInputTest(unittest.TestCase):
|
||||
element = etree.fromstring(xml_str)
|
||||
|
||||
state = {'value': '3', }
|
||||
the_input = lookup_tag('javascriptinput')(test_system, element, state)
|
||||
the_input = lookup_tag('javascriptinput')(test_system(), element, state)
|
||||
|
||||
context = the_input._get_render_context()
|
||||
|
||||
@@ -170,7 +170,7 @@ class TextLineTest(unittest.TestCase):
|
||||
element = etree.fromstring(xml_str)
|
||||
|
||||
state = {'value': 'BumbleBee', }
|
||||
the_input = lookup_tag('textline')(test_system, element, state)
|
||||
the_input = lookup_tag('textline')(test_system(), element, state)
|
||||
|
||||
context = the_input._get_render_context()
|
||||
|
||||
@@ -198,7 +198,7 @@ class TextLineTest(unittest.TestCase):
|
||||
element = etree.fromstring(xml_str)
|
||||
|
||||
state = {'value': 'BumbleBee', }
|
||||
the_input = lookup_tag('textline')(test_system, element, state)
|
||||
the_input = lookup_tag('textline')(test_system(), element, state)
|
||||
|
||||
context = the_input._get_render_context()
|
||||
|
||||
@@ -236,7 +236,7 @@ class TextLineTest(unittest.TestCase):
|
||||
element = etree.fromstring(xml_str)
|
||||
|
||||
state = {'value': 'BumbleBee', }
|
||||
the_input = lookup_tag('textline')(test_system, element, state)
|
||||
the_input = lookup_tag('textline')(test_system(), element, state)
|
||||
|
||||
context = the_input._get_render_context()
|
||||
|
||||
@@ -274,7 +274,7 @@ class FileSubmissionTest(unittest.TestCase):
|
||||
'status': 'incomplete',
|
||||
'feedback': {'message': '3'}, }
|
||||
input_class = lookup_tag('filesubmission')
|
||||
the_input = input_class(test_system, element, state)
|
||||
the_input = input_class(test_system(), element, state)
|
||||
|
||||
context = the_input._get_render_context()
|
||||
|
||||
@@ -319,7 +319,7 @@ class CodeInputTest(unittest.TestCase):
|
||||
'feedback': {'message': '3'}, }
|
||||
|
||||
input_class = lookup_tag('codeinput')
|
||||
the_input = input_class(test_system, element, state)
|
||||
the_input = input_class(test_system(), element, state)
|
||||
|
||||
context = the_input._get_render_context()
|
||||
|
||||
@@ -368,7 +368,7 @@ class MatlabTest(unittest.TestCase):
|
||||
'feedback': {'message': '3'}, }
|
||||
|
||||
self.input_class = lookup_tag('matlabinput')
|
||||
self.the_input = self.input_class(test_system, elt, state)
|
||||
self.the_input = self.input_class(test_system(), elt, state)
|
||||
|
||||
def test_rendering(self):
|
||||
context = self.the_input._get_render_context()
|
||||
@@ -396,7 +396,7 @@ class MatlabTest(unittest.TestCase):
|
||||
'feedback': {'message': '3'}, }
|
||||
elt = etree.fromstring(self.xml)
|
||||
|
||||
the_input = self.input_class(test_system, elt, state)
|
||||
the_input = self.input_class(test_system(), elt, state)
|
||||
context = the_input._get_render_context()
|
||||
|
||||
expected = {'id': 'prob_1_2',
|
||||
@@ -423,7 +423,7 @@ class MatlabTest(unittest.TestCase):
|
||||
}
|
||||
elt = etree.fromstring(self.xml)
|
||||
|
||||
the_input = self.input_class(test_system, elt, state)
|
||||
the_input = self.input_class(test_system(), elt, state)
|
||||
context = the_input._get_render_context()
|
||||
expected = {'id': 'prob_1_2',
|
||||
'value': 'print "good evening"',
|
||||
@@ -448,7 +448,7 @@ class MatlabTest(unittest.TestCase):
|
||||
}
|
||||
elt = etree.fromstring(self.xml)
|
||||
|
||||
the_input = self.input_class(test_system, elt, state)
|
||||
the_input = self.input_class(test_system(), elt, state)
|
||||
context = the_input._get_render_context()
|
||||
expected = {'id': 'prob_1_2',
|
||||
'value': 'print "good evening"',
|
||||
@@ -470,7 +470,7 @@ class MatlabTest(unittest.TestCase):
|
||||
get = {'submission': 'x = 1234;'}
|
||||
response = self.the_input.handle_ajax("plot", get)
|
||||
|
||||
test_system.xqueue['interface'].send_to_queue.assert_called_with(header=ANY, body=ANY)
|
||||
test_system().xqueue['interface'].send_to_queue.assert_called_with(header=ANY, body=ANY)
|
||||
|
||||
self.assertTrue(response['success'])
|
||||
self.assertTrue(self.the_input.input_state['queuekey'] is not None)
|
||||
@@ -479,13 +479,12 @@ class MatlabTest(unittest.TestCase):
|
||||
def test_plot_data_failure(self):
|
||||
get = {'submission': 'x = 1234;'}
|
||||
error_message = 'Error message!'
|
||||
test_system.xqueue['interface'].send_to_queue.return_value = (1, error_message)
|
||||
test_system().xqueue['interface'].send_to_queue.return_value = (1, error_message)
|
||||
response = self.the_input.handle_ajax("plot", get)
|
||||
self.assertFalse(response['success'])
|
||||
self.assertEqual(response['message'], error_message)
|
||||
self.assertTrue('queuekey' not in self.the_input.input_state)
|
||||
self.assertTrue('queuestate' not in self.the_input.input_state)
|
||||
test_system.xqueue['interface'].send_to_queue.return_value = (0, 'Success!')
|
||||
|
||||
def test_ungraded_response_success(self):
|
||||
queuekey = 'abcd'
|
||||
@@ -496,7 +495,7 @@ class MatlabTest(unittest.TestCase):
|
||||
'feedback': {'message': '3'}, }
|
||||
elt = etree.fromstring(self.xml)
|
||||
|
||||
the_input = self.input_class(test_system, elt, state)
|
||||
the_input = self.input_class(test_system(), elt, state)
|
||||
inner_msg = 'hello!'
|
||||
queue_msg = json.dumps({'msg': inner_msg})
|
||||
|
||||
@@ -514,7 +513,7 @@ class MatlabTest(unittest.TestCase):
|
||||
'feedback': {'message': '3'}, }
|
||||
elt = etree.fromstring(self.xml)
|
||||
|
||||
the_input = self.input_class(test_system, elt, state)
|
||||
the_input = self.input_class(test_system(), elt, state)
|
||||
inner_msg = 'hello!'
|
||||
queue_msg = json.dumps({'msg': inner_msg})
|
||||
|
||||
@@ -553,7 +552,7 @@ class SchematicTest(unittest.TestCase):
|
||||
state = {'value': value,
|
||||
'status': 'unsubmitted'}
|
||||
|
||||
the_input = lookup_tag('schematic')(test_system, element, state)
|
||||
the_input = lookup_tag('schematic')(test_system(), element, state)
|
||||
|
||||
context = the_input._get_render_context()
|
||||
|
||||
@@ -592,7 +591,7 @@ class ImageInputTest(unittest.TestCase):
|
||||
state = {'value': value,
|
||||
'status': 'unsubmitted'}
|
||||
|
||||
the_input = lookup_tag('imageinput')(test_system, element, state)
|
||||
the_input = lookup_tag('imageinput')(test_system(), element, state)
|
||||
|
||||
context = the_input._get_render_context()
|
||||
|
||||
@@ -643,7 +642,7 @@ class CrystallographyTest(unittest.TestCase):
|
||||
state = {'value': value,
|
||||
'status': 'unsubmitted'}
|
||||
|
||||
the_input = lookup_tag('crystallography')(test_system, element, state)
|
||||
the_input = lookup_tag('crystallography')(test_system(), element, state)
|
||||
|
||||
context = the_input._get_render_context()
|
||||
|
||||
@@ -681,7 +680,7 @@ class VseprTest(unittest.TestCase):
|
||||
state = {'value': value,
|
||||
'status': 'unsubmitted'}
|
||||
|
||||
the_input = lookup_tag('vsepr_input')(test_system, element, state)
|
||||
the_input = lookup_tag('vsepr_input')(test_system(), element, state)
|
||||
|
||||
context = the_input._get_render_context()
|
||||
|
||||
@@ -708,7 +707,7 @@ class ChemicalEquationTest(unittest.TestCase):
|
||||
element = etree.fromstring(xml_str)
|
||||
|
||||
state = {'value': 'H2OYeah', }
|
||||
self.the_input = lookup_tag('chemicalequationinput')(test_system, element, state)
|
||||
self.the_input = lookup_tag('chemicalequationinput')(test_system(), element, state)
|
||||
|
||||
def test_rendering(self):
|
||||
''' Verify that the render context matches the expected render context'''
|
||||
@@ -783,7 +782,7 @@ class DragAndDropTest(unittest.TestCase):
|
||||
]
|
||||
}
|
||||
|
||||
the_input = lookup_tag('drag_and_drop_input')(test_system, element, state)
|
||||
the_input = lookup_tag('drag_and_drop_input')(test_system(), element, state)
|
||||
|
||||
context = the_input._get_render_context()
|
||||
expected = {'id': 'prob_1_2',
|
||||
@@ -832,7 +831,7 @@ class AnnotationInputTest(unittest.TestCase):
|
||||
|
||||
tag = 'annotationinput'
|
||||
|
||||
the_input = lookup_tag(tag)(test_system, element, state)
|
||||
the_input = lookup_tag(tag)(test_system(), element, state)
|
||||
|
||||
context = the_input._get_render_context()
|
||||
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
Tests of responsetypes
|
||||
"""
|
||||
|
||||
|
||||
from datetime import datetime
|
||||
import json
|
||||
from nose.plugins.skip import SkipTest
|
||||
@@ -10,10 +9,11 @@ import os
|
||||
import random
|
||||
import unittest
|
||||
import textwrap
|
||||
import mock
|
||||
import textwrap
|
||||
|
||||
from . import test_system
|
||||
from . import new_loncapa_problem, test_system
|
||||
|
||||
import capa.capa_problem as lcp
|
||||
from capa.responsetypes import LoncapaProblemError, \
|
||||
StudentInputError, ResponseError
|
||||
from capa.correctmap import CorrectMap
|
||||
@@ -30,9 +30,9 @@ class ResponseTest(unittest.TestCase):
|
||||
if self.xml_factory_class:
|
||||
self.xml_factory = self.xml_factory_class()
|
||||
|
||||
def build_problem(self, **kwargs):
|
||||
def build_problem(self, system=None, **kwargs):
|
||||
xml = self.xml_factory.build_xml(**kwargs)
|
||||
return lcp.LoncapaProblem(xml, '1', system=test_system)
|
||||
return new_loncapa_problem(xml, system=system)
|
||||
|
||||
def assert_grade(self, problem, submission, expected_correctness, msg=None):
|
||||
input_dict = {'1_2_1': submission}
|
||||
@@ -184,94 +184,151 @@ class ImageResponseTest(ResponseTest):
|
||||
self.assert_answer_format(problem)
|
||||
|
||||
|
||||
class SymbolicResponseTest(unittest.TestCase):
|
||||
def test_sr_grade(self):
|
||||
raise SkipTest() # This test fails due to dependencies on a local copy of snuggletex-webapp. Until we have figured that out, we'll just skip this test
|
||||
symbolicresponse_file = os.path.dirname(__file__) + "/test_files/symbolicresponse.xml"
|
||||
test_lcp = lcp.LoncapaProblem(open(symbolicresponse_file).read(), '1', system=test_system)
|
||||
correct_answers = {'1_2_1': 'cos(theta)*[[1,0],[0,1]] + i*sin(theta)*[[0,1],[1,0]]',
|
||||
'1_2_1_dynamath': '''
|
||||
<math xmlns="http://www.w3.org/1998/Math/MathML">
|
||||
<mstyle displaystyle="true">
|
||||
<mrow>
|
||||
<mi>cos</mi>
|
||||
<mrow>
|
||||
<mo>(</mo>
|
||||
<mi>θ</mi>
|
||||
<mo>)</mo>
|
||||
</mrow>
|
||||
</mrow>
|
||||
<mo>⋅</mo>
|
||||
<mrow>
|
||||
<mo>[</mo>
|
||||
<mtable>
|
||||
<mtr>
|
||||
<mtd>
|
||||
<mn>1</mn>
|
||||
</mtd>
|
||||
<mtd>
|
||||
<mn>0</mn>
|
||||
</mtd>
|
||||
</mtr>
|
||||
<mtr>
|
||||
<mtd>
|
||||
<mn>0</mn>
|
||||
</mtd>
|
||||
<mtd>
|
||||
<mn>1</mn>
|
||||
</mtd>
|
||||
</mtr>
|
||||
</mtable>
|
||||
<mo>]</mo>
|
||||
</mrow>
|
||||
<mo>+</mo>
|
||||
<mi>i</mi>
|
||||
<mo>⋅</mo>
|
||||
<mrow>
|
||||
<mi>sin</mi>
|
||||
<mrow>
|
||||
<mo>(</mo>
|
||||
<mi>θ</mi>
|
||||
<mo>)</mo>
|
||||
</mrow>
|
||||
</mrow>
|
||||
<mo>⋅</mo>
|
||||
<mrow>
|
||||
<mo>[</mo>
|
||||
<mtable>
|
||||
<mtr>
|
||||
<mtd>
|
||||
<mn>0</mn>
|
||||
</mtd>
|
||||
<mtd>
|
||||
<mn>1</mn>
|
||||
</mtd>
|
||||
</mtr>
|
||||
<mtr>
|
||||
<mtd>
|
||||
<mn>1</mn>
|
||||
</mtd>
|
||||
<mtd>
|
||||
<mn>0</mn>
|
||||
</mtd>
|
||||
</mtr>
|
||||
</mtable>
|
||||
<mo>]</mo>
|
||||
</mrow>
|
||||
</mstyle>
|
||||
</math>
|
||||
''',
|
||||
}
|
||||
wrong_answers = {'1_2_1': '2',
|
||||
'1_2_1_dynamath': '''
|
||||
<math xmlns="http://www.w3.org/1998/Math/MathML">
|
||||
<mstyle displaystyle="true">
|
||||
<mn>2</mn>
|
||||
</mstyle>
|
||||
</math>''',
|
||||
}
|
||||
self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct')
|
||||
self.assertEquals(test_lcp.grade_answers(wrong_answers).get_correctness('1_2_1'), 'incorrect')
|
||||
class SymbolicResponseTest(ResponseTest):
|
||||
from response_xml_factory import SymbolicResponseXMLFactory
|
||||
xml_factory_class = SymbolicResponseXMLFactory
|
||||
|
||||
def test_grade_single_input(self):
|
||||
problem = self.build_problem(math_display=True,
|
||||
expect="2*x+3*y")
|
||||
|
||||
# Correct answers
|
||||
correct_inputs = [
|
||||
('2x+3y', textwrap.dedent("""
|
||||
<math xmlns="http://www.w3.org/1998/Math/MathML">
|
||||
<mstyle displaystyle="true">
|
||||
<mn>2</mn><mo>*</mo><mi>x</mi><mo>+</mo><mn>3</mn><mo>*</mo><mi>y</mi>
|
||||
</mstyle></math>""")),
|
||||
|
||||
('x+x+3y', textwrap.dedent("""
|
||||
<math xmlns="http://www.w3.org/1998/Math/MathML">
|
||||
<mstyle displaystyle="true">
|
||||
<mi>x</mi><mo>+</mo><mi>x</mi><mo>+</mo><mn>3</mn><mo>*</mo><mi>y</mi>
|
||||
</mstyle></math>""")),
|
||||
]
|
||||
|
||||
for (input_str, input_mathml) in correct_inputs:
|
||||
self._assert_symbolic_grade(problem, input_str, input_mathml, 'correct')
|
||||
|
||||
# Incorrect answers
|
||||
incorrect_inputs = [
|
||||
('0', ''),
|
||||
('4x+3y', textwrap.dedent("""
|
||||
<math xmlns="http://www.w3.org/1998/Math/MathML">
|
||||
<mstyle displaystyle="true">
|
||||
<mn>4</mn><mo>*</mo><mi>x</mi><mo>+</mo><mn>3</mn><mo>*</mo><mi>y</mi>
|
||||
</mstyle></math>""")),
|
||||
]
|
||||
|
||||
for (input_str, input_mathml) in incorrect_inputs:
|
||||
self._assert_symbolic_grade(problem, input_str, input_mathml, 'incorrect')
|
||||
|
||||
|
||||
def test_complex_number_grade(self):
|
||||
problem = self.build_problem(math_display=True,
|
||||
expect="[[cos(theta),i*sin(theta)],[i*sin(theta),cos(theta)]]",
|
||||
options=["matrix", "imaginary"])
|
||||
|
||||
# For LaTeX-style inputs, symmath_check() will try to contact
|
||||
# a server to convert the input to MathML.
|
||||
# We mock out the server, simulating the response that it would give
|
||||
# for this input.
|
||||
import requests
|
||||
dirpath = os.path.dirname(__file__)
|
||||
correct_snuggletex_response = open(os.path.join(dirpath, "test_files/snuggletex_correct.html")).read().decode('utf8')
|
||||
wrong_snuggletex_response = open(os.path.join(dirpath, "test_files/snuggletex_wrong.html")).read().decode('utf8')
|
||||
|
||||
# Correct answer
|
||||
with mock.patch.object(requests, 'post') as mock_post:
|
||||
|
||||
# Simulate what the LaTeX-to-MathML server would
|
||||
# send for the correct response input
|
||||
mock_post.return_value.text = correct_snuggletex_response
|
||||
|
||||
self._assert_symbolic_grade(problem,
|
||||
"cos(theta)*[[1,0],[0,1]] + i*sin(theta)*[[0,1],[1,0]]",
|
||||
textwrap.dedent("""
|
||||
<math xmlns="http://www.w3.org/1998/Math/MathML">
|
||||
<mstyle displaystyle="true">
|
||||
<mrow>
|
||||
<mi>cos</mi>
|
||||
<mrow><mo>(</mo><mi>θ</mi><mo>)</mo></mrow>
|
||||
</mrow>
|
||||
<mo>⋅</mo>
|
||||
<mrow>
|
||||
<mo>[</mo>
|
||||
<mtable>
|
||||
<mtr>
|
||||
<mtd><mn>1</mn></mtd><mtd><mn>0</mn></mtd>
|
||||
</mtr>
|
||||
<mtr>
|
||||
<mtd><mn>0</mn></mtd><mtd><mn>1</mn></mtd>
|
||||
</mtr>
|
||||
</mtable>
|
||||
<mo>]</mo>
|
||||
</mrow>
|
||||
<mo>+</mo>
|
||||
<mi>i</mi>
|
||||
<mo>⋅</mo>
|
||||
<mrow>
|
||||
<mi>sin</mi>
|
||||
<mrow>
|
||||
<mo>(</mo><mi>θ</mi><mo>)</mo>
|
||||
</mrow>
|
||||
</mrow>
|
||||
<mo>⋅</mo>
|
||||
<mrow>
|
||||
<mo>[</mo>
|
||||
<mtable>
|
||||
<mtr>
|
||||
<mtd><mn>0</mn></mtd><mtd><mn>1</mn></mtd>
|
||||
</mtr>
|
||||
<mtr>
|
||||
<mtd><mn>1</mn></mtd><mtd><mn>0</mn></mtd>
|
||||
</mtr>
|
||||
</mtable>
|
||||
<mo>]</mo>
|
||||
</mrow>
|
||||
</mstyle>
|
||||
</math>
|
||||
"""),
|
||||
'correct')
|
||||
|
||||
# Incorrect answer
|
||||
with mock.patch.object(requests, 'post') as mock_post:
|
||||
|
||||
# Simulate what the LaTeX-to-MathML server would
|
||||
# send for the incorrect response input
|
||||
mock_post.return_value.text = wrong_snuggletex_response
|
||||
|
||||
self._assert_symbolic_grade(problem, "2",
|
||||
textwrap.dedent("""
|
||||
<math xmlns="http://www.w3.org/1998/Math/MathML">
|
||||
<mstyle displaystyle="true"><mn>2</mn></mstyle>
|
||||
</math>
|
||||
"""),
|
||||
'incorrect')
|
||||
|
||||
def test_multiple_inputs_exception(self):
|
||||
|
||||
# Should not allow multiple inputs, since we specify
|
||||
# only one "expect" value
|
||||
with self.assertRaises(Exception):
|
||||
problem = self.build_problem(math_display=True,
|
||||
expect="2*x+3*y",
|
||||
num_inputs=3)
|
||||
|
||||
def _assert_symbolic_grade(self, problem,
|
||||
student_input,
|
||||
dynamath_input,
|
||||
expected_correctness):
|
||||
input_dict = {'1_2_1': str(student_input),
|
||||
'1_2_1_dynamath': str(dynamath_input) }
|
||||
|
||||
correct_map = problem.grade_answers(input_dict)
|
||||
|
||||
self.assertEqual(correct_map.get_correctness('1_2_1'),
|
||||
expected_correctness)
|
||||
|
||||
|
||||
class OptionResponseTest(ResponseTest):
|
||||
@@ -531,6 +588,22 @@ class StringResponseTest(ResponseTest):
|
||||
correct_map = problem.grade_answers(input_dict)
|
||||
self.assertEquals(correct_map.get_hint('1_2_1'), "")
|
||||
|
||||
def test_computed_hints(self):
|
||||
problem = self.build_problem(
|
||||
answer="Michigan",
|
||||
hintfn="gimme_a_hint",
|
||||
script = textwrap.dedent("""
|
||||
def gimme_a_hint(answer_ids, student_answers, new_cmap, old_cmap):
|
||||
aid = answer_ids[0]
|
||||
answer = student_answers[aid]
|
||||
new_cmap.set_hint_and_mode(aid, answer+"??", "always")
|
||||
""")
|
||||
)
|
||||
|
||||
input_dict = {'1_2_1': 'Hello'}
|
||||
correct_map = problem.grade_answers(input_dict)
|
||||
self.assertEquals(correct_map.get_hint('1_2_1'), "Hello??")
|
||||
|
||||
|
||||
class CodeResponseTest(ResponseTest):
|
||||
from response_xml_factory import CodeResponseXMLFactory
|
||||
@@ -710,16 +783,37 @@ class JavascriptResponseTest(ResponseTest):
|
||||
coffee_file_path = os.path.dirname(__file__) + "/test_files/js/*.coffee"
|
||||
os.system("node_modules/.bin/coffee -c %s" % (coffee_file_path))
|
||||
|
||||
problem = self.build_problem(generator_src="test_problem_generator.js",
|
||||
grader_src="test_problem_grader.js",
|
||||
display_class="TestProblemDisplay",
|
||||
display_src="test_problem_display.js",
|
||||
param_dict={'value': '4'})
|
||||
system = test_system()
|
||||
system.can_execute_unsafe_code = lambda: True
|
||||
problem = self.build_problem(
|
||||
system=system,
|
||||
generator_src="test_problem_generator.js",
|
||||
grader_src="test_problem_grader.js",
|
||||
display_class="TestProblemDisplay",
|
||||
display_src="test_problem_display.js",
|
||||
param_dict={'value': '4'},
|
||||
)
|
||||
|
||||
# Test that we get graded correctly
|
||||
self.assert_grade(problem, json.dumps({0: 4}), "correct")
|
||||
self.assert_grade(problem, json.dumps({0: 5}), "incorrect")
|
||||
|
||||
def test_cant_execute_javascript(self):
|
||||
# If the system says to disallow unsafe code execution, then making
|
||||
# this problem will raise an exception.
|
||||
system = test_system()
|
||||
system.can_execute_unsafe_code = lambda: False
|
||||
|
||||
with self.assertRaises(LoncapaProblemError):
|
||||
problem = self.build_problem(
|
||||
system=system,
|
||||
generator_src="test_problem_generator.js",
|
||||
grader_src="test_problem_grader.js",
|
||||
display_class="TestProblemDisplay",
|
||||
display_src="test_problem_display.js",
|
||||
param_dict={'value': '4'},
|
||||
)
|
||||
|
||||
|
||||
class NumericalResponseTest(ResponseTest):
|
||||
from response_xml_factory import NumericalResponseXMLFactory
|
||||
@@ -853,9 +947,8 @@ class CustomResponseTest(ResponseTest):
|
||||
#
|
||||
# 'answer_given' is the answer the student gave (if there is just one input)
|
||||
# or an ordered list of answers (if there are multiple inputs)
|
||||
#
|
||||
#
|
||||
# The function should return a dict of the form
|
||||
#
|
||||
# The function should return a dict of the form
|
||||
# { 'ok': BOOL, 'msg': STRING }
|
||||
#
|
||||
script = textwrap.dedent("""
|
||||
@@ -964,6 +1057,35 @@ class CustomResponseTest(ResponseTest):
|
||||
self.assertEqual(correct_map.get_msg('1_2_2'), 'Feedback 2')
|
||||
self.assertEqual(correct_map.get_msg('1_2_3'), 'Feedback 3')
|
||||
|
||||
def test_function_code_with_extra_args(self):
|
||||
script = textwrap.dedent("""\
|
||||
def check_func(expect, answer_given, options, dynamath):
|
||||
assert options == "xyzzy", "Options was %r" % options
|
||||
return {'ok': answer_given == expect, 'msg': 'Message text'}
|
||||
""")
|
||||
|
||||
problem = self.build_problem(script=script, cfn="check_func", expect="42", options="xyzzy", cfn_extra_args="options dynamath")
|
||||
|
||||
# Correct answer
|
||||
input_dict = {'1_2_1': '42'}
|
||||
correct_map = problem.grade_answers(input_dict)
|
||||
|
||||
correctness = correct_map.get_correctness('1_2_1')
|
||||
msg = correct_map.get_msg('1_2_1')
|
||||
|
||||
self.assertEqual(correctness, 'correct')
|
||||
self.assertEqual(msg, "Message text")
|
||||
|
||||
# Incorrect answer
|
||||
input_dict = {'1_2_1': '0'}
|
||||
correct_map = problem.grade_answers(input_dict)
|
||||
|
||||
correctness = correct_map.get_correctness('1_2_1')
|
||||
msg = correct_map.get_msg('1_2_1')
|
||||
|
||||
self.assertEqual(correctness, 'incorrect')
|
||||
self.assertEqual(msg, "Message text")
|
||||
|
||||
def test_multiple_inputs_return_one_status(self):
|
||||
# When given multiple inputs, the 'answer_given' argument
|
||||
# to the check_func() is a list of inputs
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from .calc import evaluator, UndefinedVariable
|
||||
from calc import evaluator, UndefinedVariable
|
||||
from cmath import isinf
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
@@ -4,5 +4,5 @@ setup(
|
||||
name="capa",
|
||||
version="0.1",
|
||||
packages=find_packages(exclude=["tests"]),
|
||||
install_requires=['distribute==0.6.28', 'pyparsing==1.5.6'],
|
||||
install_requires=["distribute==0.6.28"],
|
||||
)
|
||||
|
||||
@@ -736,4 +736,4 @@ def test6(): # imaginary numbers
|
||||
</mstyle>
|
||||
</math>
|
||||
'''
|
||||
return formula(xmlstr, options='imaginaryi')
|
||||
return formula(xmlstr, options='imaginary')
|
||||
@@ -324,4 +324,5 @@ def symmath_check(expect, ans, dynamath=None, options=None, debug=None, xml=None
|
||||
msg += "<p>Difference: %s</p>" % to_latex(diff)
|
||||
msg += '<hr>'
|
||||
|
||||
return {'ok': False, 'msg': msg, 'ex': fexpect, 'got': fsym}
|
||||
# Used to return more keys: 'ex': fexpect, 'got': fsym
|
||||
return {'ok': False, 'msg': msg}
|
||||
13
common/lib/chem/setup.py
Normal file
13
common/lib/chem/setup.py
Normal file
@@ -0,0 +1,13 @@
|
||||
from setuptools import setup
|
||||
|
||||
setup(
|
||||
name="chem",
|
||||
version="0.1",
|
||||
packages=["chem"],
|
||||
install_requires=[
|
||||
"pyparsing==1.5.6",
|
||||
"numpy",
|
||||
"scipy",
|
||||
"nltk==2.0.4",
|
||||
],
|
||||
)
|
||||
1
common/lib/sandbox-packages/README
Normal file
1
common/lib/sandbox-packages/README
Normal file
@@ -0,0 +1 @@
|
||||
This directory is in the Python path for sandboxed Python execution.
|
||||
14
common/lib/sandbox-packages/setup.py
Normal file
14
common/lib/sandbox-packages/setup.py
Normal file
@@ -0,0 +1,14 @@
|
||||
from setuptools import setup
|
||||
|
||||
setup(
|
||||
name="sandbox-packages",
|
||||
version="0.1",
|
||||
packages=[
|
||||
"verifiers",
|
||||
],
|
||||
py_modules=[
|
||||
"eia",
|
||||
],
|
||||
install_requires=[
|
||||
],
|
||||
)
|
||||
@@ -13,13 +13,10 @@ real time, next to the input box.
|
||||
<p>This is a correct answer which may be entered below: </p>
|
||||
<p><tt>cos(theta)*[[1,0],[0,1]] + i*sin(theta)*[[0,1],[1,0]]</tt></p>
|
||||
|
||||
<script>
|
||||
from symmath import *
|
||||
</script>
|
||||
<text>Compute [mathjax] U = \exp\left( i \theta \left[ \begin{matrix} 0 & 1 \\ 1 & 0 \end{matrix} \right] \right) [/mathjax]
|
||||
and give the resulting \(2 \times 2\) matrix. <br/>
|
||||
Your input should be typed in as a list of lists, eg <tt>[[1,2],[3,4]]</tt>. <br/>
|
||||
[mathjax]U=[/mathjax] <symbolicresponse cfn="symmath_check" answer="[[cos(theta),I*sin(theta)],[I*sin(theta),cos(theta)]]" options="matrix,imaginaryi" id="filenamedogi0VpEBOWedxsymmathresponse_1" state="unsubmitted">
|
||||
[mathjax]U=[/mathjax] <symbolicresponse cfn="symmath_check" answer="[[cos(theta),i*sin(theta)],[i*sin(theta),cos(theta)]]" options="matrix,imaginary" id="filenamedogi0VpEBOWedxsymmathresponse_1" state="unsubmitted">
|
||||
<textline size="80" math="1" response_id="2" answer_id="1" id="filenamedogi0VpEBOWedxsymmathresponse_2_1"/>
|
||||
</symbolicresponse>
|
||||
<br/>
|
||||
|
||||
@@ -3,7 +3,9 @@ import datetime
|
||||
import hashlib
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import traceback
|
||||
import struct
|
||||
import sys
|
||||
|
||||
from pkg_resources import resource_string
|
||||
@@ -23,8 +25,10 @@ from xmodule.util.date_utils import time_to_datetime
|
||||
log = logging.getLogger("mitx.courseware")
|
||||
|
||||
|
||||
# Generated this many different variants of problems with rerandomize=per_student
|
||||
# Generate this many different variants of problems with rerandomize=per_student
|
||||
NUM_RANDOMIZATION_BINS = 20
|
||||
# Never produce more than this many different seeds, no matter what.
|
||||
MAX_RANDOMIZATION_BINS = 1000
|
||||
|
||||
|
||||
def randomization_bin(seed, problem_id):
|
||||
@@ -109,11 +113,7 @@ class CapaModule(CapaFields, XModule):
|
||||
self.close_date = due_date
|
||||
|
||||
if self.seed is None:
|
||||
if self.rerandomize == 'never':
|
||||
self.seed = 1
|
||||
elif self.rerandomize == "per_student" and hasattr(self.system, 'seed'):
|
||||
# see comment on randomization_bin
|
||||
self.seed = randomization_bin(system.seed, self.location.url)
|
||||
self.choose_new_seed()
|
||||
|
||||
# Need the problem location in openendedresponse to send out. Adding
|
||||
# it to the system here seems like the least clunky way to get it
|
||||
@@ -157,6 +157,22 @@ class CapaModule(CapaFields, XModule):
|
||||
|
||||
self.set_state_from_lcp()
|
||||
|
||||
assert self.seed is not None
|
||||
|
||||
def choose_new_seed(self):
|
||||
"""Choose a new seed."""
|
||||
if self.rerandomize == 'never':
|
||||
self.seed = 1
|
||||
elif self.rerandomize == "per_student" and hasattr(self.system, 'seed'):
|
||||
# see comment on randomization_bin
|
||||
self.seed = randomization_bin(self.system.seed, self.location.url)
|
||||
else:
|
||||
self.seed = struct.unpack('i', os.urandom(4))[0]
|
||||
|
||||
# So that sandboxed code execution can be cached, but still have an interesting
|
||||
# number of possibilities, cap the number of different random seeds.
|
||||
self.seed %= MAX_RANDOMIZATION_BINS
|
||||
|
||||
def new_lcp(self, state, text=None):
|
||||
if text is None:
|
||||
text = self.data
|
||||
@@ -165,6 +181,7 @@ class CapaModule(CapaFields, XModule):
|
||||
problem_text=text,
|
||||
id=self.location.html_id(),
|
||||
state=state,
|
||||
seed=self.seed,
|
||||
system=self.system,
|
||||
)
|
||||
|
||||
@@ -832,14 +849,11 @@ class CapaModule(CapaFields, XModule):
|
||||
'error': "Refresh the page and make an attempt before resetting."}
|
||||
|
||||
if self.rerandomize in ["always", "onreset"]:
|
||||
# reset random number generator seed (note the self.lcp.get_state()
|
||||
# in next line)
|
||||
seed = None
|
||||
else:
|
||||
seed = self.lcp.seed
|
||||
# Reset random number generator seed.
|
||||
self.choose_new_seed()
|
||||
|
||||
# Generate a new problem with either the previous seed or a new seed
|
||||
self.lcp = self.new_lcp({'seed': seed})
|
||||
self.lcp = self.new_lcp(None)
|
||||
|
||||
# Pull in the new problem seed
|
||||
self.set_state_from_lcp()
|
||||
|
||||
@@ -14,7 +14,7 @@ import fs.osfs
|
||||
|
||||
import numpy
|
||||
|
||||
import capa.calc as calc
|
||||
import calc
|
||||
import xmodule
|
||||
from xmodule.x_module import ModuleSystem
|
||||
from mock import Mock
|
||||
@@ -33,15 +33,14 @@ def test_system():
|
||||
"""
|
||||
Construct a test ModuleSystem instance.
|
||||
|
||||
By default, the render_template() method simply returns
|
||||
the context it is passed as a string.
|
||||
You can override this behavior by monkey patching:
|
||||
By default, the render_template() method simply returns the context it is
|
||||
passed as a string. You can override this behavior by monkey patching::
|
||||
|
||||
system = test_system()
|
||||
system.render_template = my_render_func
|
||||
system = test_system()
|
||||
system.render_template = my_render_func
|
||||
|
||||
where `my_render_func` is a function of the form my_render_func(template, context).
|
||||
|
||||
where my_render_func is a function of the form
|
||||
my_render_func(template, context)
|
||||
"""
|
||||
return ModuleSystem(
|
||||
ajax_url='courses/course_id/modx/a_location',
|
||||
@@ -86,10 +85,12 @@ class ModelsTest(unittest.TestCase):
|
||||
self.assertTrue(abs(calc.evaluator(variables, functions, "e^(j*pi)") + 1) < 0.00001)
|
||||
self.assertTrue(abs(calc.evaluator(variables, functions, "j||1") - 0.5 - 0.5j) < 0.00001)
|
||||
variables['t'] = 1.0
|
||||
# Use self.assertAlmostEqual here...
|
||||
self.assertTrue(abs(calc.evaluator(variables, functions, "t") - 1.0) < 0.00001)
|
||||
self.assertTrue(abs(calc.evaluator(variables, functions, "T") - 1.0) < 0.00001)
|
||||
self.assertTrue(abs(calc.evaluator(variables, functions, "t", cs=True) - 1.0) < 0.00001)
|
||||
self.assertTrue(abs(calc.evaluator(variables, functions, "T", cs=True) - 298) < 0.2)
|
||||
# Use self.assertRaises here...
|
||||
exception_happened = False
|
||||
try:
|
||||
calc.evaluator({}, {}, "5+7 QWSEKO")
|
||||
|
||||
@@ -550,6 +550,7 @@ class CapaModuleTest(unittest.TestCase):
|
||||
def test_reset_problem(self):
|
||||
module = CapaFactory.create(done=True)
|
||||
module.new_lcp = Mock(wraps=module.new_lcp)
|
||||
module.choose_new_seed = Mock(wraps=module.choose_new_seed)
|
||||
|
||||
# Stub out HTML rendering
|
||||
with patch('xmodule.capa_module.CapaModule.get_problem_html') as mock_html:
|
||||
@@ -567,7 +568,8 @@ class CapaModuleTest(unittest.TestCase):
|
||||
self.assertEqual(result['html'], "<div>Test HTML</div>")
|
||||
|
||||
# Expect that the problem was reset
|
||||
module.new_lcp.assert_called_once_with({'seed': None})
|
||||
module.new_lcp.assert_called_once_with(None)
|
||||
module.choose_new_seed.assert_called_once_with()
|
||||
|
||||
def test_reset_problem_closed(self):
|
||||
module = CapaFactory.create()
|
||||
@@ -1033,3 +1035,13 @@ class CapaModuleTest(unittest.TestCase):
|
||||
self.assertTrue(module.seed is not None)
|
||||
msg = 'Could not get a new seed from reset after 5 tries'
|
||||
self.assertTrue(success, msg)
|
||||
|
||||
def test_random_seed_bins(self):
|
||||
# Assert that we are limiting the number of possible seeds.
|
||||
|
||||
# Check the conditions that generate random seeds
|
||||
for rerandomize in ['always', 'per_student', 'true', 'onreset']:
|
||||
# Get a bunch of seeds, they should all be in 0-999.
|
||||
for i in range(200):
|
||||
module = CapaFactory.create(rerandomize=rerandomize)
|
||||
assert 0 <= module.seed < 1000
|
||||
|
||||
@@ -134,6 +134,6 @@ class ModuleProgressTest(unittest.TestCase):
|
||||
'''
|
||||
def test_xmodule_default(self):
|
||||
'''Make sure default get_progress exists, returns None'''
|
||||
xm = x_module.XModule(test_system, 'a://b/c/d/e', None, {})
|
||||
xm = x_module.XModule(test_system(), 'a://b/c/d/e', None, {})
|
||||
p = xm.get_progress()
|
||||
self.assertEqual(p, None)
|
||||
|
||||
@@ -14,7 +14,6 @@ START = '2013-01-01T01:00:00'
|
||||
|
||||
|
||||
from .test_course_module import DummySystem as DummyImportSystem
|
||||
from . import test_system
|
||||
|
||||
|
||||
class RandomizeModuleTestCase(unittest.TestCase):
|
||||
|
||||
@@ -737,7 +737,10 @@ class ModuleSystem(object):
|
||||
anonymous_student_id='',
|
||||
course_id=None,
|
||||
open_ended_grading_interface=None,
|
||||
s3_interface=None):
|
||||
s3_interface=None,
|
||||
cache=None,
|
||||
can_execute_unsafe_code=None,
|
||||
):
|
||||
'''
|
||||
Create a closure around the system environment.
|
||||
|
||||
@@ -779,6 +782,14 @@ class ModuleSystem(object):
|
||||
|
||||
xblock_model_data - A dict-like object containing the all data available to this
|
||||
xblock
|
||||
|
||||
cache - A cache object with two methods:
|
||||
.get(key) returns an object from the cache or None.
|
||||
.set(key, value, timeout_secs=None) stores a value in the cache with a timeout.
|
||||
|
||||
can_execute_unsafe_code - A function returning a boolean, whether or
|
||||
not to allow the execution of unsafe, unsandboxed code.
|
||||
|
||||
'''
|
||||
self.ajax_url = ajax_url
|
||||
self.xqueue = xqueue
|
||||
@@ -803,6 +814,9 @@ class ModuleSystem(object):
|
||||
self.open_ended_grading_interface = open_ended_grading_interface
|
||||
self.s3_interface = s3_interface
|
||||
|
||||
self.cache = cache or DoNothingCache()
|
||||
self.can_execute_unsafe_code = can_execute_unsafe_code or (lambda: False)
|
||||
|
||||
def get(self, attr):
|
||||
''' provide uniform access to attributes (like etree).'''
|
||||
return self.__dict__.get(attr)
|
||||
@@ -816,3 +830,12 @@ class ModuleSystem(object):
|
||||
|
||||
def __str__(self):
|
||||
return str(self.__dict__)
|
||||
|
||||
|
||||
class DoNothingCache(object):
|
||||
"""A duck-compatible object to use in ModuleSystem when there's no cache."""
|
||||
def get(self, key):
|
||||
return None
|
||||
|
||||
def set(self, key, value, timeout=None):
|
||||
pass
|
||||
|
||||
1
common/test/data/embedded_python/course.xml
Normal file
1
common/test/data/embedded_python/course.xml
Normal file
@@ -0,0 +1 @@
|
||||
<course org="edX" course="embedded_python" url_name="2013_Spring"/>
|
||||
111
common/test/data/embedded_python/course/2013_Spring.xml
Normal file
111
common/test/data/embedded_python/course/2013_Spring.xml
Normal file
@@ -0,0 +1,111 @@
|
||||
<course>
|
||||
<chapter url_name="EmbeddedPythonChapter">
|
||||
|
||||
<vertical url_name="Homework1">
|
||||
<problem url_name="schematic_problem">
|
||||
<schematicresponse>
|
||||
<center>
|
||||
<schematic height="500" width="600" parts="g,n,s" analyses="dc,tran"
|
||||
submit_analyses="{"tran":[["Z",0.0000004,0.0000009,0.0000014,0.0000019,0.0000024,0.0000029,0.0000034,0.000039]]}"
|
||||
initial_value="[["w",[112,96,128,96]],["w",[256,96,240,96]],["w",[192,96,240,96]],["s",[240,96,0],{"color":"cyan","offset":"","plot offset":"0","_json_":3},["Z"]],["w",[32,224,192,224]],["w",[96,48,192,48]],["L",[256,96,3],{"label":"Z","_json_":6},["Z"]],["r",[192,48,0],{"name":"Rpullup","r":"10K","_json_":7},["1","Z"]],["w",[32,144,32,192]],["w",[32,224,32,192]],["w",[48,192,32,192]],["w",[32,96,32,144]],["w",[48,144,32,144]],["w",[32,48,32,96]],["w",[48,96,32,96]],["w",[32,48,48,48]],["g",[32,224,0],{"_json_":16},["0"]],["v",[96,192,1],{"name":"VC","value":"square(3,0,250K)","_json_":17},["C","0"]],["v",[96,144,1],{"name":"VB","value":"square(3,0,500K)","_json_":18},["B","0"]],["v",[96,96,1],{"name":"VA","value":"square(3,0,1000K)","_json_":19},["A","0"]],["v",[96,48,1],{"name":"Vpwr","value":"dc(3)","_json_":20},["1","0"]],["L",[96,96,2],{"label":"A","_json_":21},["A"]],["w",[96,96,104,96]],["L",[96,144,2],{"label":"B","_json_":23},["B"]],["w",[96,144,104,144]],["L",[96,192,2],{"label":"C","_json_":25},["C"]],["w",[96,192,104,192]],["w",[192,96,192,112]],["s",[112,96,0],{"color":"red","offset":"15","plot offset":"0","_json_":28},["A"]],["w",[104,96,112,96]],["s",[112,144,0],{"color":"green","offset":"10","plot offset":"0","_json_":30},["B"]],["w",[104,144,112,144]],["w",[128,144,112,144]],["s",[112,192,0],{"color":"blue","offset":"5","plot offset":"0","_json_":33},["C"]],["w",[104,192,112,192]],["w",[128,192,112,192]],["view",0,0,2,"5","10","10MEG",null,"100","4us"]]"
|
||||
/>
|
||||
</center>
|
||||
<answer type="loncapa/python">
|
||||
# for a schematic response, submission[i] is the json representation
|
||||
# of the diagram and analysis results for the i-th schematic tag
|
||||
|
||||
def get_tran(json,signal):
|
||||
for element in json:
|
||||
if element[0] == 'transient':
|
||||
return element[1].get(signal,[])
|
||||
return []
|
||||
|
||||
def get_value(at,output):
|
||||
for (t,v) in output:
|
||||
if at == t: return v
|
||||
return None
|
||||
|
||||
output = get_tran(submission[0],'Z')
|
||||
okay = True
|
||||
|
||||
# output should be 1, 1, 1, 1, 1, 0, 0, 0
|
||||
if get_value(0.0000004,output) < 2.7: okay = False;
|
||||
if get_value(0.0000009,output) < 2.7: okay = False;
|
||||
if get_value(0.0000014,output) < 2.7: okay = False;
|
||||
if get_value(0.0000019,output) < 2.7: okay = False;
|
||||
if get_value(0.0000024,output) < 2.7: okay = False;
|
||||
if get_value(0.0000029,output) > 0.25: okay = False;
|
||||
if get_value(0.0000034,output) > 0.25: okay = False;
|
||||
if get_value(0.0000039,output) > 0.25: okay = False;
|
||||
|
||||
correct = ['correct' if okay else 'incorrect']
|
||||
|
||||
</answer></schematicresponse>
|
||||
|
||||
|
||||
|
||||
|
||||
</problem>
|
||||
|
||||
|
||||
|
||||
<problem url_name="cfn_problem">
|
||||
<text>
|
||||
<script type="text/python" system_path="python_lib">
|
||||
def test_csv(expect, ans):
|
||||
# Take out all spaces in expected answer
|
||||
expect = [i.strip(' ') for i in str(expect).split(',')]
|
||||
# Take out all spaces in student solution
|
||||
ans = [i.strip(' ') for i in str(ans).split(',')]
|
||||
|
||||
def strip_q(x):
|
||||
# Strip quotes around strings if students have entered them
|
||||
stripped_ans = []
|
||||
for item in x:
|
||||
if item[0] == "'" and item[-1]=="'":
|
||||
item = item.strip("'")
|
||||
elif item[0] == '"' and item[-1] == '"':
|
||||
item = item.strip('"')
|
||||
stripped_ans.append(item)
|
||||
return stripped_ans
|
||||
|
||||
return strip_q(expect) == strip_q(ans)
|
||||
</script>
|
||||
<ol class="enumerate">
|
||||
<li>
|
||||
<pre>
|
||||
num = 0
|
||||
while num <= 5:
|
||||
print(num)
|
||||
num += 1
|
||||
|
||||
print("Outside of loop")
|
||||
print(num)
|
||||
</pre>
|
||||
<p>
|
||||
<customresponse cfn="test_csv" expect="0, 1, 2, 3, 4, 5, 'Outside of loop', 6">
|
||||
<textline size="50" correct_answer="0, 1, 2, 3, 4, 5, 'Outside of loop', 6"/>
|
||||
</customresponse>
|
||||
</p>
|
||||
</li>
|
||||
</ol>
|
||||
</text>
|
||||
</problem>
|
||||
|
||||
<problem url_name="computed_answer">
|
||||
|
||||
<customresponse>
|
||||
<textline size="5" correct_answer="Xyzzy"/>
|
||||
<answer type="loncapa/python">
|
||||
if submission[0] == "Xyzzy":
|
||||
correct = ['correct']
|
||||
else:
|
||||
correct = ['incorrect']
|
||||
</answer>
|
||||
</customresponse>
|
||||
|
||||
</problem>
|
||||
|
||||
</vertical>
|
||||
</chapter>
|
||||
</course>
|
||||
1
common/test/data/embedded_python/roots/2013_Spring.xml
Normal file
1
common/test/data/embedded_python/roots/2013_Spring.xml
Normal file
@@ -0,0 +1 @@
|
||||
<course org="edX" course="embedded_python" url_name="2013_Spring"/>
|
||||
@@ -19,7 +19,7 @@ from symmath import *
|
||||
<text>Compute [mathjax] U = \exp\left( i \theta \left[ \begin{matrix} 0 & 1 \\ 1 & 0 \end{matrix} \right] \right) [/mathjax]
|
||||
and give the resulting \(2 \times 2\) matrix. <br/>
|
||||
Your input should be typed in as a list of lists, eg <tt>[[1,2],[3,4]]</tt>. <br/>
|
||||
[mathjax]U=[/mathjax] <symbolicresponse cfn="symmath_check" answer="[[cos(theta),I*sin(theta)],[I*sin(theta),cos(theta)]]" options="matrix,imaginaryi" id="filenamedogi0VpEBOWedxsymmathresponse_1" state="unsubmitted">
|
||||
[mathjax]U=[/mathjax] <symbolicresponse cfn="symmath_check" answer="[[cos(theta),i*sin(theta)],[i*sin(theta),cos(theta)]]" options="matrix,imaginary" id="filenamedogi0VpEBOWedxsymmathresponse_1" state="unsubmitted">
|
||||
<textline size="80" math="1" response_id="2" answer_id="1" id="filenamedogi0VpEBOWedxsymmathresponse_2_1"/>
|
||||
</symbolicresponse>
|
||||
<br/>
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import json
|
||||
import logging
|
||||
import pyparsing
|
||||
import re
|
||||
import sys
|
||||
import static_replace
|
||||
|
||||
@@ -8,6 +9,7 @@ from functools import partial
|
||||
|
||||
from django.conf import settings
|
||||
from django.contrib.auth.models import User
|
||||
from django.core.cache import cache
|
||||
from django.core.exceptions import PermissionDenied
|
||||
from django.core.urlresolvers import reverse
|
||||
from django.http import Http404
|
||||
@@ -273,6 +275,14 @@ def get_module_for_descriptor(user, request, descriptor, model_data_cache, cours
|
||||
|
||||
statsd.increment("lms.courseware.question_answered", tags=tags)
|
||||
|
||||
def can_execute_unsafe_code():
|
||||
# To decide if we can run unsafe code, we check the course id against
|
||||
# a list of regexes configured on the server.
|
||||
for regex in settings.COURSES_WITH_UNSAFE_CODE:
|
||||
if re.match(regex, course_id):
|
||||
return True
|
||||
return False
|
||||
|
||||
# TODO (cpennington): When modules are shared between courses, the static
|
||||
# prefix is going to have to be specific to the module, not the directory
|
||||
# that the xml was loaded from
|
||||
@@ -299,6 +309,8 @@ def get_module_for_descriptor(user, request, descriptor, model_data_cache, cours
|
||||
course_id=course_id,
|
||||
open_ended_grading_interface=open_ended_grading_interface,
|
||||
s3_interface=s3_interface,
|
||||
cache=cache,
|
||||
can_execute_unsafe_code=can_execute_unsafe_code,
|
||||
)
|
||||
# pass position specified in URL to module through ModuleSystem
|
||||
system.set('position', position)
|
||||
|
||||
4
lms/djangoapps/courseware/tests/load_tests/README.md
Normal file
4
lms/djangoapps/courseware/tests/load_tests/README.md
Normal file
@@ -0,0 +1,4 @@
|
||||
# Load Testing
|
||||
|
||||
Scripts for load testing the courseware app,
|
||||
mostly using [multimechanize](http://testutils.org/multi-mechanize/)
|
||||
@@ -0,0 +1,51 @@
|
||||
# Custom Response Load Test
|
||||
|
||||
## Optional Installations
|
||||
|
||||
* [memcached](http://pypi.python.org/pypi/python-memcached/): Install this
|
||||
and make sure it is running, or the Capa problem will not cache results.
|
||||
|
||||
* [AppArmor](http://wiki.apparmor.net): Follow the instructions in
|
||||
`common/lib/codejail/README` to set up the Python sandbox environment.
|
||||
If you do not set up the sandbox, the tests will still execute code in the CustomResponse,
|
||||
so you can still run the tests.
|
||||
|
||||
* [matplotlib](http://matplotlib.org): Multi-mechanize uses this to create graphs.
|
||||
|
||||
|
||||
## Running the Tests
|
||||
|
||||
This test simulates student submissions for a custom response problem.
|
||||
|
||||
First, clear the cache:
|
||||
|
||||
/etc/init.d/memcached restart
|
||||
|
||||
Then, run the test:
|
||||
|
||||
multimech-run custom_response
|
||||
|
||||
You can configure the parameters in `customresponse/config.cfg`,
|
||||
and you can change the CustomResponse script and student submissions
|
||||
in `customresponse/test_scripts/v_user.py`.
|
||||
|
||||
## Components Under Test
|
||||
|
||||
Components under test:
|
||||
|
||||
* Python sandbox (see `common/lib/codejail`), which uses `AppArmor`
|
||||
* Caching (see `common/lib/capa/capa/safe_exec/`), which uses `memcache` in production
|
||||
|
||||
Components NOT under test:
|
||||
|
||||
* Django views
|
||||
* `XModule`
|
||||
* gunicorn
|
||||
|
||||
This allows us to avoid creating courses in mongo, logging in, using CSRF tokens,
|
||||
and other inconveniences. Instead, we create a capa problem (from the capa package),
|
||||
pass it Django's memcache backend, and pass the problem student submissions.
|
||||
|
||||
Even though the test uses `capa.capa_problem.LoncapaProblem` directly,
|
||||
the `capa` should not depend on Django. For this reason, we put the
|
||||
test in the `courseware` Django app.
|
||||
@@ -0,0 +1,22 @@
|
||||
|
||||
[global]
|
||||
run_time = 240
|
||||
rampup = 30
|
||||
results_ts_interval = 10
|
||||
progress_bar = on
|
||||
console_logging = off
|
||||
xml_report = off
|
||||
|
||||
|
||||
[user_group-1]
|
||||
threads = 10
|
||||
script = v_user.py
|
||||
|
||||
[user_group-2]
|
||||
threads = 10
|
||||
script = v_user.py
|
||||
|
||||
[user_group-3]
|
||||
threads = 10
|
||||
script = v_user.py
|
||||
|
||||
@@ -0,0 +1,115 @@
|
||||
""" User script for load testing CustomResponse """
|
||||
|
||||
from capa.tests.response_xml_factory import CustomResponseXMLFactory
|
||||
import capa.capa_problem as lcp
|
||||
from xmodule.x_module import ModuleSystem
|
||||
import mock
|
||||
import fs.osfs
|
||||
import random
|
||||
import textwrap
|
||||
|
||||
# Use memcache running locally
|
||||
CACHE_SETTINGS = {
|
||||
'default': {
|
||||
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
|
||||
'LOCATION': '127.0.0.1:11211'
|
||||
},
|
||||
}
|
||||
|
||||
# Configure settings so Django will let us import its cache wrapper
|
||||
# Caching is the only part of Django being tested
|
||||
from django.conf import settings
|
||||
settings.configure(CACHES=CACHE_SETTINGS)
|
||||
|
||||
from django.core.cache import cache
|
||||
|
||||
# Script to install as the checker for the CustomResponse
|
||||
TEST_SCRIPT = textwrap.dedent("""
|
||||
def check_func(expect, answer_given):
|
||||
return {'ok': answer_given == expect, 'msg': 'Message text'}
|
||||
""")
|
||||
|
||||
# Submissions submitted by the student
|
||||
TEST_SUBMISSIONS = [random.randint(-100, 100) for i in range(100)]
|
||||
|
||||
class TestContext(object):
|
||||
""" One-time set up for the test that is shared across transactions.
|
||||
Uses a Singleton design pattern."""
|
||||
|
||||
SINGLETON = None
|
||||
NUM_UNIQUE_SEEDS = 20
|
||||
|
||||
@classmethod
|
||||
def singleton(cls):
|
||||
""" Return the singleton, creating one if it does not already exist."""
|
||||
|
||||
# If we haven't created the singleton yet, create it now
|
||||
if cls.SINGLETON is None:
|
||||
|
||||
# Create a mock ModuleSystem, installing our cache
|
||||
system = mock.MagicMock(ModuleSystem)
|
||||
system.render_template = lambda template, context: "<div>%s</div>" % template
|
||||
system.cache = cache
|
||||
system.filestore = mock.MagicMock(fs.osfs.OSFS)
|
||||
system.filestore.root_path = ""
|
||||
system.DEBUG = True
|
||||
|
||||
# Create a custom response problem
|
||||
xml_factory = CustomResponseXMLFactory()
|
||||
xml = xml_factory.build_xml(script=TEST_SCRIPT, cfn="check_func", expect="42")
|
||||
|
||||
# Create and store the context
|
||||
cls.SINGLETON = cls(system, xml)
|
||||
|
||||
else:
|
||||
pass
|
||||
|
||||
# Return the singleton
|
||||
return cls.SINGLETON
|
||||
|
||||
def __init__(self, system, xml):
|
||||
""" Store context needed for the test across transactions """
|
||||
self.system = system
|
||||
self.xml = xml
|
||||
|
||||
# Construct a small pool of unique seeds
|
||||
# To keep our implementation in line with the one capa actually uses,
|
||||
# construct the problems, then use the seeds they generate
|
||||
self.seeds = [lcp.LoncapaProblem(self.xml, 'problem_id', system=self.system).seed
|
||||
for i in range(self.NUM_UNIQUE_SEEDS)]
|
||||
|
||||
def random_seed(self):
|
||||
""" Return one of a small number of unique random seeds """
|
||||
return random.choice(self.seeds)
|
||||
|
||||
def student_submission(self):
|
||||
""" Return one of a small number of student submissions """
|
||||
return random.choice(TEST_SUBMISSIONS)
|
||||
|
||||
class Transaction(object):
|
||||
""" User script that submits a response to a CustomResponse problem """
|
||||
|
||||
def __init__(self):
|
||||
""" Create the problem """
|
||||
|
||||
# Get the context (re-used across transactions)
|
||||
self.context = TestContext.singleton()
|
||||
|
||||
# Create a new custom response problem
|
||||
# using one of a small number of unique seeds
|
||||
# We're assuming that the capa module is limiting the number
|
||||
# of seeds (currently not the case for certain settings)
|
||||
self.problem = lcp.LoncapaProblem(self.context.xml,
|
||||
'1',
|
||||
state=None,
|
||||
seed=self.context.random_seed(),
|
||||
system=self.context.system)
|
||||
|
||||
def run(self):
|
||||
""" Submit a response to the CustomResponse problem """
|
||||
answers = {'1_2_1': self.context.student_submission()}
|
||||
self.problem.grade_answers(answers)
|
||||
|
||||
if __name__ == '__main__':
|
||||
trans = Transaction()
|
||||
trans.run()
|
||||
@@ -372,6 +372,7 @@ class TestCoursesLoadTestCase_XmlModulestore(PageLoaderTestCase):
|
||||
'''Check that all pages in test courses load properly from XML'''
|
||||
|
||||
def setUp(self):
|
||||
super(TestCoursesLoadTestCase_XmlModulestore, self).setUp()
|
||||
self.setup_viewtest_user()
|
||||
xmodule.modulestore.django._MODULESTORES = {}
|
||||
|
||||
@@ -390,6 +391,7 @@ class TestCoursesLoadTestCase_MongoModulestore(PageLoaderTestCase):
|
||||
'''Check that all pages in test courses load properly from Mongo'''
|
||||
|
||||
def setUp(self):
|
||||
super(TestCoursesLoadTestCase_MongoModulestore, self).setUp()
|
||||
self.setup_viewtest_user()
|
||||
xmodule.modulestore.django._MODULESTORES = {}
|
||||
modulestore().collection.drop()
|
||||
@@ -487,9 +489,6 @@ class TestDraftModuleStore(TestCase):
|
||||
class TestViewAuth(LoginEnrollmentTestCase):
|
||||
"""Check that view authentication works properly"""
|
||||
|
||||
# NOTE: setUpClass() runs before override_settings takes effect, so
|
||||
# can't do imports there without manually hacking settings.
|
||||
|
||||
def setUp(self):
|
||||
xmodule.modulestore.django._MODULESTORES = {}
|
||||
|
||||
@@ -810,43 +809,85 @@ class TestViewAuth(LoginEnrollmentTestCase):
|
||||
|
||||
|
||||
@override_settings(MODULESTORE=TEST_DATA_XML_MODULESTORE)
|
||||
class TestCourseGrader(LoginEnrollmentTestCase):
|
||||
class TestSubmittingProblems(LoginEnrollmentTestCase):
|
||||
"""Check that a course gets graded properly"""
|
||||
|
||||
# NOTE: setUpClass() runs before override_settings takes effect, so
|
||||
# can't do imports there without manually hacking settings.
|
||||
# Subclasses should specify the course slug
|
||||
course_slug = "UNKNOWN"
|
||||
course_when = "UNKNOWN"
|
||||
|
||||
def setUp(self):
|
||||
xmodule.modulestore.django._MODULESTORES = {}
|
||||
courses = modulestore().get_courses()
|
||||
|
||||
def find_course(course_id):
|
||||
"""Assumes the course is present"""
|
||||
return [c for c in courses if c.id == course_id][0]
|
||||
|
||||
self.graded_course = find_course("edX/graded/2012_Fall")
|
||||
course_name = "edX/%s/%s" % (self.course_slug, self.course_when)
|
||||
self.course = modulestore().get_course(course_name)
|
||||
assert self.course, "Couldn't load course %r" % course_name
|
||||
|
||||
# create a test student
|
||||
self.student = 'view@test.com'
|
||||
self.password = 'foo'
|
||||
self.create_account('u1', self.student, self.password)
|
||||
self.activate_user(self.student)
|
||||
self.enroll(self.graded_course)
|
||||
self.enroll(self.course)
|
||||
|
||||
self.student_user = get_user(self.student)
|
||||
|
||||
self.factory = RequestFactory()
|
||||
|
||||
def problem_location(self, problem_url_name):
|
||||
return "i4x://edX/{}/problem/{}".format(self.course_slug, problem_url_name)
|
||||
|
||||
def modx_url(self, problem_location, dispatch):
|
||||
return reverse(
|
||||
'modx_dispatch',
|
||||
kwargs={
|
||||
'course_id': self.course.id,
|
||||
'location': problem_location,
|
||||
'dispatch': dispatch,
|
||||
}
|
||||
)
|
||||
|
||||
def submit_question_answer(self, problem_url_name, responses):
|
||||
"""
|
||||
Submit answers to a question.
|
||||
|
||||
Responses is a dict mapping problem ids (not sure of the right term)
|
||||
to answers:
|
||||
{'2_1': 'Correct', '2_2': 'Incorrect'}
|
||||
|
||||
"""
|
||||
problem_location = self.problem_location(problem_url_name)
|
||||
modx_url = self.modx_url(problem_location, 'problem_check')
|
||||
answer_key_prefix = 'input_i4x-edX-{}-problem-{}_'.format(self.course_slug, problem_url_name)
|
||||
resp = self.client.post(modx_url,
|
||||
{ (answer_key_prefix + k): v for k,v in responses.items() }
|
||||
)
|
||||
return resp
|
||||
|
||||
def reset_question_answer(self, problem_url_name):
|
||||
'''resets specified problem for current user'''
|
||||
problem_location = self.problem_location(problem_url_name)
|
||||
modx_url = self.modx_url(problem_location, 'problem_reset')
|
||||
resp = self.client.post(modx_url)
|
||||
return resp
|
||||
|
||||
|
||||
class TestCourseGrader(TestSubmittingProblems):
|
||||
"""Check that a course gets graded properly"""
|
||||
|
||||
course_slug = "graded"
|
||||
course_when = "2012_Fall"
|
||||
|
||||
def get_grade_summary(self):
|
||||
'''calls grades.grade for current user and course'''
|
||||
model_data_cache = ModelDataCache.cache_for_descriptor_descendents(
|
||||
self.graded_course.id, self.student_user, self.graded_course)
|
||||
self.course.id, self.student_user, self.course)
|
||||
|
||||
fake_request = self.factory.get(reverse('progress',
|
||||
kwargs={'course_id': self.graded_course.id}))
|
||||
kwargs={'course_id': self.course.id}))
|
||||
|
||||
return grades.grade(self.student_user, fake_request,
|
||||
self.graded_course, model_data_cache)
|
||||
self.course, model_data_cache)
|
||||
|
||||
def get_homework_scores(self):
|
||||
'''get scores for homeworks'''
|
||||
@@ -855,14 +896,14 @@ class TestCourseGrader(LoginEnrollmentTestCase):
|
||||
def get_progress_summary(self):
|
||||
'''return progress summary structure for current user and course'''
|
||||
model_data_cache = ModelDataCache.cache_for_descriptor_descendents(
|
||||
self.graded_course.id, self.student_user, self.graded_course)
|
||||
self.course.id, self.student_user, self.course)
|
||||
|
||||
fake_request = self.factory.get(reverse('progress',
|
||||
kwargs={'course_id': self.graded_course.id}))
|
||||
kwargs={'course_id': self.course.id}))
|
||||
|
||||
progress_summary = grades.progress_summary(self.student_user,
|
||||
fake_request,
|
||||
self.graded_course,
|
||||
self.course,
|
||||
model_data_cache)
|
||||
return progress_summary
|
||||
|
||||
@@ -871,46 +912,6 @@ class TestCourseGrader(LoginEnrollmentTestCase):
|
||||
grade_summary = self.get_grade_summary()
|
||||
self.assertEqual(grade_summary['percent'], percent)
|
||||
|
||||
def submit_question_answer(self, problem_url_name, responses):
|
||||
"""
|
||||
The field names of a problem are hard to determine. This method only works
|
||||
for the problems used in the edX/graded course, which has fields named in the
|
||||
following form:
|
||||
input_i4x-edX-graded-problem-H1P3_2_1
|
||||
input_i4x-edX-graded-problem-H1P3_2_2
|
||||
"""
|
||||
problem_location = "i4x://edX/graded/problem/%s" % problem_url_name
|
||||
|
||||
modx_url = reverse('modx_dispatch',
|
||||
kwargs={'course_id': self.graded_course.id,
|
||||
'location': problem_location,
|
||||
'dispatch': 'problem_check', })
|
||||
|
||||
resp = self.client.post(modx_url, {
|
||||
'input_i4x-edX-graded-problem-%s_2_1' % problem_url_name: responses[0],
|
||||
'input_i4x-edX-graded-problem-%s_2_2' % problem_url_name: responses[1],
|
||||
})
|
||||
print "modx_url", modx_url, "responses", responses
|
||||
print "resp", resp
|
||||
|
||||
return resp
|
||||
|
||||
def problem_location(self, problem_url_name):
|
||||
'''Get location string for problem, assuming hardcoded course_id'''
|
||||
return "i4x://edX/graded/problem/{0}".format(problem_url_name)
|
||||
|
||||
def reset_question_answer(self, problem_url_name):
|
||||
'''resets specified problem for current user'''
|
||||
problem_location = self.problem_location(problem_url_name)
|
||||
|
||||
modx_url = reverse('modx_dispatch',
|
||||
kwargs={'course_id': self.graded_course.id,
|
||||
'location': problem_location,
|
||||
'dispatch': 'problem_reset', })
|
||||
|
||||
resp = self.client.post(modx_url)
|
||||
return resp
|
||||
|
||||
def test_get_graded(self):
|
||||
#### Check that the grader shows we have 0% in the course
|
||||
self.check_grade_percent(0)
|
||||
@@ -928,27 +929,27 @@ class TestCourseGrader(LoginEnrollmentTestCase):
|
||||
return [s.earned for s in hw_section['scores']]
|
||||
|
||||
# Only get half of the first problem correct
|
||||
self.submit_question_answer('H1P1', ['Correct', 'Incorrect'])
|
||||
self.submit_question_answer('H1P1', {'2_1': 'Correct', '2_2': 'Incorrect'})
|
||||
self.check_grade_percent(0.06)
|
||||
self.assertEqual(earned_hw_scores(), [1.0, 0, 0]) # Order matters
|
||||
self.assertEqual(score_for_hw('Homework1'), [1.0, 0.0])
|
||||
|
||||
# Get both parts of the first problem correct
|
||||
self.reset_question_answer('H1P1')
|
||||
self.submit_question_answer('H1P1', ['Correct', 'Correct'])
|
||||
self.submit_question_answer('H1P1', {'2_1': 'Correct', '2_2': 'Correct'})
|
||||
self.check_grade_percent(0.13)
|
||||
self.assertEqual(earned_hw_scores(), [2.0, 0, 0])
|
||||
self.assertEqual(score_for_hw('Homework1'), [2.0, 0.0])
|
||||
|
||||
# This problem is shown in an ABTest
|
||||
self.submit_question_answer('H1P2', ['Correct', 'Correct'])
|
||||
self.submit_question_answer('H1P2', {'2_1': 'Correct', '2_2': 'Correct'})
|
||||
self.check_grade_percent(0.25)
|
||||
self.assertEqual(earned_hw_scores(), [4.0, 0.0, 0])
|
||||
self.assertEqual(score_for_hw('Homework1'), [2.0, 2.0])
|
||||
|
||||
# This problem is hidden in an ABTest.
|
||||
# Getting it correct doesn't change total grade
|
||||
self.submit_question_answer('H1P3', ['Correct', 'Correct'])
|
||||
self.submit_question_answer('H1P3', {'2_1': 'Correct', '2_2': 'Correct'})
|
||||
self.check_grade_percent(0.25)
|
||||
self.assertEqual(score_for_hw('Homework1'), [2.0, 2.0])
|
||||
|
||||
@@ -957,19 +958,85 @@ class TestCourseGrader(LoginEnrollmentTestCase):
|
||||
# This problem is also weighted to be 4 points (instead of default of 2)
|
||||
# If the problem was unweighted the percent would have been 0.38 so we
|
||||
# know it works.
|
||||
self.submit_question_answer('H2P1', ['Correct', 'Correct'])
|
||||
self.submit_question_answer('H2P1', {'2_1': 'Correct', '2_2': 'Correct'})
|
||||
self.check_grade_percent(0.42)
|
||||
self.assertEqual(earned_hw_scores(), [4.0, 4.0, 0])
|
||||
|
||||
# Third homework
|
||||
self.submit_question_answer('H3P1', ['Correct', 'Correct'])
|
||||
self.submit_question_answer('H3P1', {'2_1': 'Correct', '2_2': 'Correct'})
|
||||
self.check_grade_percent(0.42) # Score didn't change
|
||||
self.assertEqual(earned_hw_scores(), [4.0, 4.0, 2.0])
|
||||
|
||||
self.submit_question_answer('H3P2', ['Correct', 'Correct'])
|
||||
self.submit_question_answer('H3P2', {'2_1': 'Correct', '2_2': 'Correct'})
|
||||
self.check_grade_percent(0.5) # Now homework2 dropped. Score changes
|
||||
self.assertEqual(earned_hw_scores(), [4.0, 4.0, 4.0])
|
||||
|
||||
# Now we answer the final question (worth half of the grade)
|
||||
self.submit_question_answer('FinalQuestion', ['Correct', 'Correct'])
|
||||
self.submit_question_answer('FinalQuestion', {'2_1': 'Correct', '2_2': 'Correct'})
|
||||
self.check_grade_percent(1.0) # Hooray! We got 100%
|
||||
|
||||
|
||||
@override_settings(MODULESTORE=TEST_DATA_XML_MODULESTORE)
|
||||
class TestSchematicResponse(TestSubmittingProblems):
|
||||
"""Check that we can submit a schematic response, and it answers properly."""
|
||||
|
||||
course_slug = "embedded_python"
|
||||
course_when = "2013_Spring"
|
||||
|
||||
def test_schematic(self):
|
||||
resp = self.submit_question_answer('schematic_problem',
|
||||
{ '2_1': json.dumps(
|
||||
[['transient', {'Z': [
|
||||
[0.0000004, 2.8],
|
||||
[0.0000009, 2.8],
|
||||
[0.0000014, 2.8],
|
||||
[0.0000019, 2.8],
|
||||
[0.0000024, 2.8],
|
||||
[0.0000029, 0.2],
|
||||
[0.0000034, 0.2],
|
||||
[0.0000039, 0.2]
|
||||
]}]]
|
||||
)
|
||||
})
|
||||
respdata = json.loads(resp.content)
|
||||
self.assertEqual(respdata['success'], 'correct')
|
||||
|
||||
self.reset_question_answer('schematic_problem')
|
||||
resp = self.submit_question_answer('schematic_problem',
|
||||
{ '2_1': json.dumps(
|
||||
[['transient', {'Z': [
|
||||
[0.0000004, 2.8],
|
||||
[0.0000009, 0.0], # wrong.
|
||||
[0.0000014, 2.8],
|
||||
[0.0000019, 2.8],
|
||||
[0.0000024, 2.8],
|
||||
[0.0000029, 0.2],
|
||||
[0.0000034, 0.2],
|
||||
[0.0000039, 0.2]
|
||||
]}]]
|
||||
)
|
||||
})
|
||||
respdata = json.loads(resp.content)
|
||||
self.assertEqual(respdata['success'], 'incorrect')
|
||||
|
||||
def test_check_function(self):
|
||||
resp = self.submit_question_answer('cfn_problem', {'2_1': "0, 1, 2, 3, 4, 5, 'Outside of loop', 6"})
|
||||
respdata = json.loads(resp.content)
|
||||
self.assertEqual(respdata['success'], 'correct')
|
||||
|
||||
self.reset_question_answer('cfn_problem')
|
||||
|
||||
resp = self.submit_question_answer('cfn_problem', {'2_1': "xyzzy!"})
|
||||
respdata = json.loads(resp.content)
|
||||
self.assertEqual(respdata['success'], 'incorrect')
|
||||
|
||||
def test_computed_answer(self):
|
||||
resp = self.submit_question_answer('computed_answer', {'2_1': "Xyzzy"})
|
||||
respdata = json.loads(resp.content)
|
||||
self.assertEqual(respdata['success'], 'correct')
|
||||
|
||||
self.reset_question_answer('computed_answer')
|
||||
|
||||
resp = self.submit_question_answer('computed_answer', {'2_1': "NO!"})
|
||||
respdata = json.loads(resp.content)
|
||||
self.assertEqual(respdata['success'], 'incorrect')
|
||||
|
||||
0
lms/djangoapps/debug/__init__.py
Normal file
0
lms/djangoapps/debug/__init__.py
Normal file
3
lms/djangoapps/debug/models.py
Normal file
3
lms/djangoapps/debug/models.py
Normal file
@@ -0,0 +1,3 @@
|
||||
from django.db import models
|
||||
|
||||
# Create your models here.
|
||||
31
lms/djangoapps/debug/views.py
Normal file
31
lms/djangoapps/debug/views.py
Normal file
@@ -0,0 +1,31 @@
|
||||
"""Views for debugging and diagnostics"""
|
||||
|
||||
import pprint
|
||||
import traceback
|
||||
|
||||
from django.http import Http404
|
||||
from django.contrib.auth.decorators import login_required
|
||||
from django_future.csrf import ensure_csrf_cookie, csrf_exempt
|
||||
from mitxmako.shortcuts import render_to_response
|
||||
|
||||
from codejail.safe_exec import safe_exec
|
||||
|
||||
@login_required
|
||||
@ensure_csrf_cookie
|
||||
def run_python(request):
|
||||
"""A page to allow testing the Python sandbox on a production server."""
|
||||
if not request.user.is_staff:
|
||||
raise Http404
|
||||
c = {}
|
||||
c['code'] = ''
|
||||
c['results'] = None
|
||||
if request.method == 'POST':
|
||||
py_code = c['code'] = request.POST.get('code')
|
||||
g = {}
|
||||
try:
|
||||
safe_exec(py_code, g)
|
||||
except Exception as e:
|
||||
c['results'] = traceback.format_exc()
|
||||
else:
|
||||
c['results'] = pprint.pformat(g)
|
||||
return render_to_response("debug/run_python_form.html", c)
|
||||
@@ -92,6 +92,16 @@ CERT_QUEUE = ENV_TOKENS.get("CERT_QUEUE", 'test-pull')
|
||||
ZENDESK_URL = ENV_TOKENS.get("ZENDESK_URL")
|
||||
FEEDBACK_SUBMISSION_EMAIL = ENV_TOKENS.get("FEEDBACK_SUBMISSION_EMAIL")
|
||||
|
||||
for name, value in ENV_TOKENS.get("CODE_JAIL", {}).items():
|
||||
oldvalue = CODE_JAIL.get(name)
|
||||
if isinstance(oldvalue, dict):
|
||||
for subname, subvalue in value.items():
|
||||
oldvalue[subname] = subvalue
|
||||
else:
|
||||
CODE_JAIL[name] = value
|
||||
|
||||
COURSES_WITH_UNSAFE_CODE = ENV_TOKENS.get("COURSES_WITH_UNSAFE_CODE", [])
|
||||
|
||||
############################## SECURE AUTH ITEMS ###############
|
||||
# Secret things: passwords, access keys, etc.
|
||||
with open(ENV_ROOT / CONFIG_PREFIX + "auth.json") as auth_file:
|
||||
|
||||
@@ -97,6 +97,10 @@ MITX_FEATURES = {
|
||||
|
||||
# Provide a UI to allow users to submit feedback from the LMS
|
||||
'ENABLE_FEEDBACK_SUBMISSION': False,
|
||||
|
||||
# Turn on a page that lets staff enter Python code to be run in the
|
||||
# sandbox, for testing whether it's enabled properly.
|
||||
'ENABLE_DEBUG_RUN_PYTHON': False,
|
||||
}
|
||||
|
||||
# Used for A/B testing
|
||||
@@ -246,6 +250,31 @@ MODULESTORE = {
|
||||
}
|
||||
CONTENTSTORE = None
|
||||
|
||||
#################### Python sandbox ############################################
|
||||
|
||||
CODE_JAIL = {
|
||||
# Path to a sandboxed Python executable. None means don't bother.
|
||||
'python_bin': None,
|
||||
# User to run as in the sandbox.
|
||||
'user': 'sandbox',
|
||||
|
||||
# Configurable limits.
|
||||
'limits': {
|
||||
# How many CPU seconds can jailed code use?
|
||||
'CPU': 1,
|
||||
},
|
||||
}
|
||||
|
||||
# Some courses are allowed to run unsafe code. This is a list of regexes, one
|
||||
# of them must match the course id for that course to run unsafe code.
|
||||
#
|
||||
# For example:
|
||||
#
|
||||
# COURSES_WITH_UNSAFE_CODE = [
|
||||
# r"Harvard/XY123.1/.*"
|
||||
# ]
|
||||
COURSES_WITH_UNSAFE_CODE = []
|
||||
|
||||
############################ SIGNAL HANDLERS ################################
|
||||
# This is imported to register the exception signal handling that logs exceptions
|
||||
import monitoring.exceptions # noqa
|
||||
@@ -398,6 +427,7 @@ MIDDLEWARE_CLASSES = (
|
||||
# 'debug_toolbar.middleware.DebugToolbarMiddleware',
|
||||
|
||||
'django_comment_client.utils.ViewNameMiddleware',
|
||||
'codejail.django_integration.ConfigureCodeJailMiddleware',
|
||||
)
|
||||
|
||||
############################### Pipeline #######################################
|
||||
@@ -601,6 +631,7 @@ INSTALLED_APPS = (
|
||||
|
||||
# For testing
|
||||
'django.contrib.admin', # only used in DEBUG mode
|
||||
'debug',
|
||||
|
||||
# Discussion forums
|
||||
'django_comment_client',
|
||||
|
||||
19
lms/templates/debug/run_python_form.html
Normal file
19
lms/templates/debug/run_python_form.html
Normal file
@@ -0,0 +1,19 @@
|
||||
<html><body>
|
||||
<div>
|
||||
<p>Python:</p>
|
||||
<form method='post'>
|
||||
<input type="hidden" name="csrfmiddlewaretoken" value="${ csrf_token }">
|
||||
<div>
|
||||
<textarea name='code' rows='20' cols='80'>${code|h}</textarea>
|
||||
</div>
|
||||
<input type='submit' value='Run it!'/>
|
||||
</form>
|
||||
</div>
|
||||
%if results:
|
||||
<div>
|
||||
<p>Results:</p>
|
||||
<pre>
|
||||
${results|h}
|
||||
</pre>
|
||||
</div>
|
||||
%endif
|
||||
@@ -363,6 +363,11 @@ urlpatterns += (
|
||||
url(r'^comm/foldit_ops', 'foldit.views.foldit_ops', name="foldit_ops"),
|
||||
)
|
||||
|
||||
if settings.MITX_FEATURES.get('ENABLE_DEBUG_RUN_PYTHON'):
|
||||
urlpatterns += (
|
||||
url(r'^debug/run_python', 'debug.views.run_python'),
|
||||
)
|
||||
|
||||
urlpatterns = patterns(*urlpatterns)
|
||||
|
||||
if settings.DEBUG:
|
||||
|
||||
1
requirements/edx-sandbox/base.txt
Normal file
1
requirements/edx-sandbox/base.txt
Normal file
@@ -0,0 +1 @@
|
||||
numpy==1.6.2
|
||||
6
requirements/edx-sandbox/post.txt
Normal file
6
requirements/edx-sandbox/post.txt
Normal file
@@ -0,0 +1,6 @@
|
||||
# Packages to install in the Python sandbox for secured execution.
|
||||
scipy==0.11.0
|
||||
lxml==3.0.1
|
||||
-e common/lib/calc
|
||||
-e common/lib/chem
|
||||
-e common/lib/sandbox-packages
|
||||
@@ -9,3 +9,4 @@
|
||||
|
||||
# Our libraries:
|
||||
-e git+https://github.com/edx/XBlock.git@483e0cb1#egg=XBlock
|
||||
-e git+https://github.com/edx/codejail.git@07494f1#egg=codejail
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
# Python libraries to install that are local to the mitx repo
|
||||
-e common/lib/calc
|
||||
-e common/lib/capa
|
||||
-e common/lib/chem
|
||||
-e common/lib/xmodule
|
||||
-e .
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
#!/usr/bin/env python
|
||||
from django.core import management
|
||||
|
||||
import argparse
|
||||
import os
|
||||
@@ -42,21 +41,34 @@ def main(argv):
|
||||
test_py_path = find_full_path(test_py_path)
|
||||
test_spec = "%s:%s.%s" % (test_py_path, test_class, test_method)
|
||||
|
||||
settings = None
|
||||
if test_py_path.startswith('cms'):
|
||||
settings = 'cms.envs.test'
|
||||
elif test_py_path.startswith('lms'):
|
||||
settings = 'lms.envs.test'
|
||||
|
||||
if settings:
|
||||
# Run as a django test suite
|
||||
from django.core import management
|
||||
|
||||
django_args = ["django-admin.py", "test", "--pythonpath=."]
|
||||
django_args.append("--settings=%s" % settings)
|
||||
if args.nocapture:
|
||||
django_args.append("-s")
|
||||
django_args.append(test_spec)
|
||||
|
||||
print " ".join(django_args)
|
||||
management.execute_from_command_line(django_args)
|
||||
else:
|
||||
raise Exception("Couldn't determine settings to use!")
|
||||
# Run as a nose test suite
|
||||
import nose.core
|
||||
nose_args = ["nosetests"]
|
||||
if args.nocapture:
|
||||
nose_args.append("-s")
|
||||
nose_args.append(test_spec)
|
||||
print " ".join(nose_args)
|
||||
nose.core.main(argv=nose_args)
|
||||
|
||||
django_args = ["django-admin.py", "test", "--pythonpath=."]
|
||||
django_args.append("--settings=%s" % settings)
|
||||
if args.nocapture:
|
||||
django_args.append("-s")
|
||||
django_args.append(test_spec)
|
||||
|
||||
print " ".join(django_args)
|
||||
management.execute_from_command_line(django_args)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main(sys.argv[1:])
|
||||
|
||||
Reference in New Issue
Block a user