diff --git a/common/lib/capa/capa/capa_problem.py b/common/lib/capa/capa/capa_problem.py index 71c9ca41c7..3d68d2d604 100644 --- a/common/lib/capa/capa/capa_problem.py +++ b/common/lib/capa/capa/capa_problem.py @@ -39,7 +39,7 @@ import responsetypes # dict of tagname, Response Class -- this should come from auto-registering response_tag_dict = dict([(x.response_tag, x) for x in responsetypes.__all__]) -entry_types = ['textline', 'schematic', 'textbox', 'imageinput', 'optioninput', 'choicegroup', 'radiogroup', 'checkboxgroup', 'filesubmission'] +entry_types = ['textline', 'schematic', 'textbox', 'imageinput', 'optioninput', 'choicegroup', 'radiogroup', 'checkboxgroup', 'filesubmission', 'javascriptinput'] solution_types = ['solution'] # extra things displayed after "show answers" is pressed response_properties = ["responseparam", "answer"] # these get captured as student responses @@ -154,21 +154,10 @@ class LoncapaProblem(object): def get_max_score(self): ''' Return maximum score for this problem. - We do this by counting the number of answers available for each question - in the problem. If the Response for a question has a get_max_score() method - then we call that and add its return value to the count. That can be - used to give complex problems (eg programming questions) multiple points. ''' maxscore = 0 for response, responder in self.responders.iteritems(): - if hasattr(responder, 'get_max_score'): - try: - maxscore += responder.get_max_score() - except Exception: - log.debug('responder %s failed to properly return from get_max_score()' % responder) # FIXME - raise - else: - maxscore += len(self.responder_answers[response]) + maxscore += responder.get_max_score() return maxscore def get_score(self): diff --git a/common/lib/capa/capa/inputtypes.py b/common/lib/capa/capa/inputtypes.py index c3acbdda91..8c513e7aec 100644 --- a/common/lib/capa/capa/inputtypes.py +++ b/common/lib/capa/capa/inputtypes.py @@ -11,6 +11,7 @@ Module containing the problem elements which render into input objects - choicegroup - radiogroup - checkboxgroup +- javascriptinput - imageinput (for clickable image) - optioninput (for option list) - filesubmission (upload a file) @@ -246,6 +247,34 @@ def checkboxgroup(element, value, status, render_template, msg=''): html = render_template("choicegroup.html", context) return etree.XML(html) +@register_render_function +def javascriptinput(element, value, status, render_template, msg='null'): + ''' + Hidden field for javascript to communicate via; also loads the required + scripts for rendering the problem and passes data to the problem. + ''' + eid = element.get('id') + params = element.get('params') + problem_state = element.get('problem_state') + display_class = element.get('display_class') + display_file = element.get('display_file') + + # Need to provide a value that JSON can parse if there is no + # student-supplied value yet. + if value == "": + value = 'null' + + escapedict = {'"': '"'} + value = saxutils.escape(value, escapedict) + msg = saxutils.escape(msg, escapedict) + context = {'id': eid, 'params': params, 'display_file': display_file, + 'display_class': display_class, 'problem_state': problem_state, + 'value': value, 'evaluation': msg, + } + html = render_template("javascriptinput.html", context) + return etree.XML(html) + + @register_render_function def textline(element, value, status, render_template, msg=""): diff --git a/common/lib/capa/capa/javascript_problem_generator.js b/common/lib/capa/capa/javascript_problem_generator.js new file mode 100644 index 0000000000..8c8d39b19f --- /dev/null +++ b/common/lib/capa/capa/javascript_problem_generator.js @@ -0,0 +1,30 @@ +require('coffee-script'); +var importAll = function (modulePath) { + module = require(modulePath); + for(key in module){ + global[key] = module[key]; + } +} + +importAll("mersenne-twister-min"); +importAll("xproblem"); + +generatorModulePath = process.argv[2]; +dependencies = JSON.parse(process.argv[3]); +seed = process.argv[4]; +params = JSON.parse(process.argv[5]); + +if(seed==null){ + seed = 4; +}else{ + seed = parseInt(seed); +} + +for(var i = 0; i < dependencies.length; i++){ + importAll(dependencies[i]); +} + +generatorModule = require(generatorModulePath); +generatorClass = generatorModule.generatorClass; +generator = new generatorClass(seed, params); +console.log(JSON.stringify(generator.generate())); diff --git a/common/lib/capa/capa/javascript_problem_grader.js b/common/lib/capa/capa/javascript_problem_grader.js new file mode 100644 index 0000000000..4f42466167 --- /dev/null +++ b/common/lib/capa/capa/javascript_problem_grader.js @@ -0,0 +1,26 @@ +require('coffee-script'); +var importAll = function (modulePath) { + module = require(modulePath); + for(key in module){ + global[key] = module[key]; + } +} + +importAll("xproblem"); + +graderModulePath = process.argv[2]; +dependencies = JSON.parse(process.argv[3]); +submission = JSON.parse(process.argv[4]); +problemState = JSON.parse(process.argv[5]); +params = JSON.parse(process.argv[6]); + +for(var i = 0; i < dependencies.length; i++){ + importAll(dependencies[i]); +} + +graderModule = require(graderModulePath); +graderClass = graderModule.graderClass; +grader = new graderClass(submission, problemState, params); +console.log(JSON.stringify(grader.grade())); +console.log(JSON.stringify(grader.evaluation)); +console.log(JSON.stringify(grader.solution)); diff --git a/common/lib/capa/capa/responsetypes.py b/common/lib/capa/capa/responsetypes.py index 25b99fc00a..4c6627b352 100644 --- a/common/lib/capa/capa/responsetypes.py +++ b/common/lib/capa/capa/responsetypes.py @@ -17,7 +17,11 @@ import random import re import requests import traceback +import hashlib import abc +import os +import subprocess +import xml.sax.saxutils as saxutils # specific library imports from calc import evaluator, UndefinedVariable @@ -71,7 +75,6 @@ class LoncapaResponse(object): In addition, these methods are optional: - - get_max_score : if defined, this is called to obtain the maximum score possible for this question - setup_response : find and note the answer input field IDs for the response; called by __init__ - check_hint_condition : check to see if the student's answers satisfy a particular condition for a hint to be displayed - render_html : render this Response as HTML (must return XHTML compliant string) @@ -130,6 +133,11 @@ class LoncapaResponse(object): if self.max_inputfields == 1: self.answer_id = self.answer_ids[0] # for convenience + self.maxpoints = dict() + for inputfield in self.inputfields: + maxpoints = inputfield.get('points','1') # By default, each answerfield is worth 1 point + self.maxpoints.update({inputfield.get('id'): int(maxpoints)}) + self.default_answer_map = {} # dict for default answer map (provided in input elements) for entry in self.inputfields: answer = entry.get('correct_answer') @@ -139,6 +147,12 @@ class LoncapaResponse(object): if hasattr(self, 'setup_response'): self.setup_response() + def get_max_score(self): + ''' + Return the total maximum points of all answer fields under this Response + ''' + return sum(self.maxpoints.values()) + def render_html(self, renderer): ''' Return XHTML Element tree representation of this Response. @@ -272,9 +286,190 @@ class LoncapaResponse(object): #----------------------------------------------------------------------------- + +class JavascriptResponse(LoncapaResponse): + ''' + This response type is used when the student's answer is graded via + Javascript using Node.js. + ''' + + response_tag = 'javascriptresponse' + max_inputfields = 1 + allowed_inputfields = ['javascriptinput'] + + def setup_response(self): + + # Sets up generator, grader, display, and their dependencies. + self.parse_xml() + + self.compile_display_javascript() + + self.params = self.extract_params() + + if self.generator: + self.problem_state = self.generate_problem_state() + else: + self.problem_state = None + + self.solution = None + + self.prepare_inputfield() + + def compile_display_javascript(self): + + latestTimestamp = 0 + basepath = self.system.filestore.root_path + '/js/' + for filename in (self.display_dependencies + [self.display]): + filepath = basepath + filename + timestamp = os.stat(filepath).st_mtime + if timestamp > latestTimestamp: + latestTimestamp = timestamp + + h = hashlib.md5() + h.update(self.answer_id + str(self.display_dependencies)) + compiled_filename = 'compiled/' + h.hexdigest() + '.js' + compiled_filepath = basepath + compiled_filename + + if not os.path.exists(compiled_filepath) or os.stat(compiled_filepath).st_mtime < latestTimestamp: + outfile = open(compiled_filepath, 'w') + for filename in (self.display_dependencies + [self.display]): + filepath = basepath + filename + infile = open(filepath, 'r') + outfile.write(infile.read()) + outfile.write(';\n') + infile.close() + outfile.close() + + self.display_filename = compiled_filename + + def parse_xml(self): + self.generator_xml = self.xml.xpath('//*[@id=$id]//generator', + id=self.xml.get('id'))[0] + + self.grader_xml = self.xml.xpath('//*[@id=$id]//grader', + id=self.xml.get('id'))[0] + + self.display_xml = self.xml.xpath('//*[@id=$id]//display', + id=self.xml.get('id'))[0] + + self.xml.remove(self.generator_xml) + self.xml.remove(self.grader_xml) + self.xml.remove(self.display_xml) + + self.generator = self.generator_xml.get("src") + self.grader = self.grader_xml.get("src") + self.display = self.display_xml.get("src") + + if self.generator_xml.get("dependencies"): + self.generator_dependencies = self.generator_xml.get("dependencies").split() + else: + self.generator_dependencies = [] + + if self.grader_xml.get("dependencies"): + self.grader_dependencies = self.grader_xml.get("dependencies").split() + else: + self.grader_dependencies = [] + + if self.display_xml.get("dependencies"): + self.display_dependencies = self.display_xml.get("dependencies").split() + else: + self.display_dependencies = [] + + self.display_class = self.display_xml.get("class") + + def get_node_env(self): + + js_dir = os.path.join(self.system.filestore.root_path, 'js') + tmp_env = os.environ.copy() + node_path = self.system.node_path + ":" + os.path.normpath(js_dir) + tmp_env["NODE_PATH"] = node_path + return tmp_env + + + def generate_problem_state(self): + + generator_file = os.path.dirname(os.path.normpath(__file__)) + '/javascript_problem_generator.js' + output = subprocess.check_output(["node", + generator_file, + self.generator, + json.dumps(self.generator_dependencies), + json.dumps(str(self.system.seed)), + json.dumps(self.params) + ], + env=self.get_node_env()).strip() + + return json.loads(output) + + def extract_params(self): + + params = {} + + for param in self.xml.xpath('//*[@id=$id]//responseparam', + id=self.xml.get('id')): + + params[param.get("name")] = json.loads(param.get("value")) + + return params + + def prepare_inputfield(self): + + for inputfield in self.xml.xpath('//*[@id=$id]//javascriptinput', + id=self.xml.get('id')): + + escapedict = {'"': '"'} + + encoded_params = json.dumps(self.params) + encoded_params = saxutils.escape(encoded_params, escapedict) + inputfield.set("params", encoded_params) + + encoded_problem_state = json.dumps(self.problem_state) + encoded_problem_state = saxutils.escape(encoded_problem_state, + escapedict) + inputfield.set("problem_state", encoded_problem_state) + + inputfield.set("display_file", self.display_filename) + inputfield.set("display_class", self.display_class) + + def get_score(self, student_answers): + json_submission = student_answers[self.answer_id] + (all_correct, evaluation, solution) = self.run_grader(json_submission) + self.solution = solution + correctness = 'correct' if all_correct else 'incorrect' + return CorrectMap(self.answer_id, correctness, msg=evaluation) + + def run_grader(self, submission): + if submission is None or submission == '': + submission = json.dumps(None) + + grader_file = os.path.dirname(os.path.normpath(__file__)) + '/javascript_problem_grader.js' + outputs = subprocess.check_output(["node", + grader_file, + self.grader, + json.dumps(self.grader_dependencies), + submission, + json.dumps(self.problem_state), + json.dumps(self.params) + ], + env=self.get_node_env()).split('\n') + + all_correct = json.loads(outputs[0].strip()) + evaluation = outputs[1].strip() + solution = outputs[2].strip() + return (all_correct, evaluation, solution) + + def get_answers(self): + if self.solution is None: + (_, _, self.solution) = self.run_grader(None) + + return {self.answer_id: self.solution} + + + +#----------------------------------------------------------------------------- + class ChoiceResponse(LoncapaResponse): ''' - This Response type is used when the student chooses from a discrete set of + This response type is used when the student chooses from a discrete set of choices. Currently, to be marked correct, all "correct" choices must be supplied by the student, and no extraneous choices may be included. @@ -313,6 +508,11 @@ class ChoiceResponse(LoncapaResponse): In the above example, radiogroup can be replaced with checkboxgroup to allow the student to select more than one choice. + TODO: In order for the inputtypes to render properly, this response type + must run setup_response prior to the input type rendering. Specifically, the + choices must be given names. This behavior seems like a leaky abstraction, + and it'd be nice to change this at some point. + ''' response_tag = 'choiceresponse' @@ -877,7 +1077,10 @@ class CodeResponse(LoncapaResponse): (err, self.answer_id, convert_files_to_filenames(student_answers))) raise Exception(err) - self.context.update({'submission': unicode(submission)}) + if is_file(submission): + self.context.update({'submission': submission.name}) + else: + self.context.update({'submission': submission}) # Prepare xqueue request #------------------------------------------------------------ @@ -924,21 +1127,24 @@ class CodeResponse(LoncapaResponse): def update_score(self, score_msg, oldcmap, queuekey): - (valid_score_msg, correct, score, msg) = self._parse_score_msg(score_msg) + (valid_score_msg, correct, points, msg) = self._parse_score_msg(score_msg) if not valid_score_msg: oldcmap.set(self.answer_id, msg='Error: Invalid grader reply.') return oldcmap - correctness = 'incorrect' - if correct: - correctness = 'correct' + correctness = 'correct' if correct else 'incorrect' self.context['correct'] = correctness # TODO: Find out how this is used elsewhere, if any # Replace 'oldcmap' with new grading results if queuekey matches. # If queuekey does not match, we keep waiting for the score_msg whose key actually matches if oldcmap.is_right_queuekey(self.answer_id, queuekey): - oldcmap.set(self.answer_id, correctness=correctness, msg=msg.replace(' ', ' '), queuekey=None) # Queuekey is consumed + # Sanity check on returned points + if points < 0: + points = 0 + elif points > self.maxpoints[self.answer_id]: + points = self.maxpoints[self.answer_id] + oldcmap.set(self.answer_id, npoints=points, correctness=correctness, msg=msg.replace(' ', ' '), queuekey=None) # Queuekey is consumed else: log.debug('CodeResponse: queuekey %s does not match for answer_id=%s.' % (queuekey, self.answer_id)) @@ -1361,4 +1567,4 @@ class ImageResponse(LoncapaResponse): # TEMPORARY: List of all response subclasses # FIXME: To be replaced by auto-registration -__all__ = [CodeResponse, NumericalResponse, FormulaResponse, CustomResponse, SchematicResponse, ExternalResponse, ImageResponse, OptionResponse, SymbolicResponse, StringResponse, ChoiceResponse, MultipleChoiceResponse, TrueFalseResponse] +__all__ = [CodeResponse, NumericalResponse, FormulaResponse, CustomResponse, SchematicResponse, ExternalResponse, ImageResponse, OptionResponse, SymbolicResponse, StringResponse, ChoiceResponse, MultipleChoiceResponse, TrueFalseResponse, JavascriptResponse] diff --git a/common/lib/capa/capa/templates/javascriptinput.html b/common/lib/capa/capa/templates/javascriptinput.html new file mode 100644 index 0000000000..bafcb23cc7 --- /dev/null +++ b/common/lib/capa/capa/templates/javascriptinput.html @@ -0,0 +1,10 @@ +
+ diff --git a/common/lib/xmodule/xmodule/capa_module.py b/common/lib/xmodule/xmodule/capa_module.py index f12b1c2be4..46e02542c8 100644 --- a/common/lib/xmodule/xmodule/capa_module.py +++ b/common/lib/xmodule/xmodule/capa_module.py @@ -464,7 +464,7 @@ class CapaModule(XModule): return {'success': msg} log.exception("Error in capa_module problem checking") raise Exception("error in capa_module") - + self.attempts = self.attempts + 1 self.lcp.done = True diff --git a/common/lib/xmodule/xmodule/js/src/capa/display.coffee b/common/lib/xmodule/xmodule/js/src/capa/display.coffee index ae589b8b04..6bdd8e1c36 100644 --- a/common/lib/xmodule/xmodule/js/src/capa/display.coffee +++ b/common/lib/xmodule/xmodule/js/src/capa/display.coffee @@ -1,4 +1,5 @@ class @Problem + constructor: (element) -> @el = $(element).find('.problems-wrapper') @id = @el.data('problem-id') @@ -52,24 +53,71 @@ class @Problem render: (content) -> if content @el.html(content) - @bind() - @queueing() + @executeProblemScripts () => + @setupInputTypes() + @bind() + @queueing() else $.postWithPrefix "#{@url}/problem_get", (response) => @el.html(response.html) - @executeProblemScripts() - @bind() - @queueing() + @executeProblemScripts () => + @setupInputTypes() + @bind() + @queueing() - executeProblemScripts: -> - @el.find(".script_placeholder").each (index, placeholder) -> - s = $("