diff --git a/common/lib/xmodule/open_ended_module.py b/common/lib/xmodule/open_ended_module.py
index 62310f1e18..0ce2007c21 100644
--- a/common/lib/xmodule/open_ended_module.py
+++ b/common/lib/xmodule/open_ended_module.py
@@ -15,6 +15,8 @@ from lxml.html import rewrite_links
from path import path
import os
import sys
+import hashlib
+import capa.xqueue_interface as xqueue_interface
from pkg_resources import resource_string
@@ -143,6 +145,375 @@ class OpenEndedModule():
self._parse(oeparam, prompt, rubric)
+ def _parse(self, oeparam, prompt, rubric):
+ '''
+ Parse OpenEndedResponse XML:
+ self.initial_display
+ self.payload - dict containing keys --
+ 'grader' : path to grader settings file, 'problem_id' : id of the problem
+
+ self.answer - What to display when show answer is clicked
+ '''
+ # Note that OpenEndedResponse is agnostic to the specific contents of grader_payload
+ prompt_string = self.stringify_children(prompt)
+ rubric_string = self.stringify_children(rubric)
+
+ grader_payload = oeparam.find('grader_payload')
+ grader_payload = grader_payload.text if grader_payload is not None else ''
+
+ #Update grader payload with student id. If grader payload not json, error.
+ try:
+ parsed_grader_payload = json.loads(grader_payload)
+ # NOTE: self.system.location is valid because the capa_module
+ # __init__ adds it (easiest way to get problem location into
+ # response types)
+ except TypeError, ValueError:
+ log.exception("Grader payload %r is not a json object!", grader_payload)
+
+ self.initial_display = find_with_default(oeparam, 'initial_display', '')
+ self.answer = find_with_default(oeparam, 'answer_display', 'No answer given.')
+
+ parsed_grader_payload.update({
+ 'location' : self.system.location,
+ 'course_id' : self.system.course_id,
+ 'prompt' : prompt_string,
+ 'rubric' : rubric_string,
+ 'initial_display' : self.initial_display,
+ 'answer' : self.answer,
+ })
+ updated_grader_payload = json.dumps(parsed_grader_payload)
+
+ self.payload = {'grader_payload': updated_grader_payload}
+
+ try:
+ self.max_score = int(find_with_default(oeparam, 'max_score', 1))
+ except ValueError:
+ self.max_score = 1
+
+ def handle_message_post(self,get):
+ """
+ Handles a student message post (a reaction to the grade they received from an open ended grader type)
+ Returns a boolean success/fail and an error message
+ """
+
+ event_info = dict()
+ event_info['problem_id'] = self.location.url()
+ event_info['student_id'] = self.system.anonymous_student_id
+ event_info['survey_responses']= get
+
+ survey_responses=event_info['survey_responses']
+ for tag in ['feedback', 'submission_id', 'grader_id', 'score']:
+ if tag not in survey_responses:
+ return False, "Could not find needed tag {0}".format(tag)
+ try:
+ submission_id=int(survey_responses['submission_id'])
+ grader_id = int(survey_responses['grader_id'])
+ feedback = str(survey_responses['feedback'].encode('ascii', 'ignore'))
+ score = int(survey_responses['score'])
+ except:
+ error_message=("Could not parse submission id, grader id, "
+ "or feedback from message_post ajax call. Here is the message data: {0}".format(survey_responses))
+ log.exception(error_message)
+ return False, "There was an error saving your feedback. Please contact course staff."
+
+ qinterface = self.system.xqueue['interface']
+ qtime = datetime.strftime(datetime.now(), xqueue_interface.dateformat)
+ anonymous_student_id = self.system.anonymous_student_id
+ queuekey = xqueue_interface.make_hashkey(str(self.system.seed) + qtime +
+ anonymous_student_id +
+ self.answer_id)
+
+ xheader = xqueue_interface.make_xheader(
+ lms_callback_url=self.system.xqueue['callback_url'],
+ lms_key=queuekey,
+ queue_name=self.message_queue_name
+ )
+
+ student_info = {'anonymous_student_id': anonymous_student_id,
+ 'submission_time': qtime,
+ }
+ contents= {
+ 'feedback' : feedback,
+ 'submission_id' : submission_id,
+ 'grader_id' : grader_id,
+ 'score': score,
+ 'student_info' : json.dumps(student_info),
+ }
+
+ (error, msg) = qinterface.send_to_queue(header=xheader,
+ body=json.dumps(contents))
+
+ #Convert error to a success value
+ success=True
+ if error:
+ success=False
+
+ return success, "Successfully submitted your feedback."
+
+ def get_score(self, student_answers):
+
+ try:
+ submission = student_answers[self.answer_id]
+ except KeyError:
+ msg = ('Cannot get student answer for answer_id: {0}. student_answers {1}'
+ .format(self.answer_id, student_answers))
+ log.exception(msg)
+ raise LoncapaProblemError(msg)
+
+ # Prepare xqueue request
+ #------------------------------------------------------------
+
+ qinterface = self.system.xqueue['interface']
+ qtime = datetime.strftime(datetime.now(), xqueue_interface.dateformat)
+
+ anonymous_student_id = self.system.anonymous_student_id
+
+ # Generate header
+ queuekey = xqueue_interface.make_hashkey(str(self.system.seed) + qtime +
+ anonymous_student_id +
+ self.answer_id)
+
+ xheader = xqueue_interface.make_xheader(lms_callback_url=self.system.xqueue['callback_url'],
+ lms_key=queuekey,
+ queue_name=self.queue_name)
+
+ self.context.update({'submission': submission})
+
+ contents = self.payload.copy()
+
+ # Metadata related to the student submission revealed to the external grader
+ student_info = {'anonymous_student_id': anonymous_student_id,
+ 'submission_time': qtime,
+ }
+
+ #Update contents with student response and student info
+ contents.update({
+ 'student_info': json.dumps(student_info),
+ 'student_response': submission,
+ 'max_score' : self.max_score,
+ })
+
+ # Submit request. When successful, 'msg' is the prior length of the queue
+ (error, msg) = qinterface.send_to_queue(header=xheader,
+ body=json.dumps(contents))
+
+ # State associated with the queueing request
+ queuestate = {'key': queuekey,
+ 'time': qtime,}
+
+ cmap = CorrectMap()
+ if error:
+ cmap.set(self.answer_id, queuestate=None,
+ msg='Unable to deliver your submission to grader. (Reason: {0}.)'
+ ' Please try again later.'.format(msg))
+ else:
+ # Queueing mechanism flags:
+ # 1) Backend: Non-null CorrectMap['queuestate'] indicates that
+ # the problem has been queued
+ # 2) Frontend: correctness='incomplete' eventually trickles down
+ # through inputtypes.textbox and .filesubmission to inform the
+ # browser that the submission is queued (and it could e.g. poll)
+ cmap.set(self.answer_id, queuestate=queuestate,
+ correctness='incomplete', msg=msg)
+
+ return cmap
+
+ def update_score(self, score_msg, oldcmap, queuekey):
+ log.debug(score_msg)
+ score_msg = self._parse_score_msg(score_msg)
+ if not score_msg.valid:
+ oldcmap.set(self.answer_id,
+ msg = 'Invalid grader reply. Please contact the course staff.')
+ return oldcmap
+
+ correctness = 'correct' if score_msg.correct else 'incorrect'
+
+ # TODO: Find out how this is used elsewhere, if any
+ self.context['correct'] = correctness
+
+ # Replace 'oldcmap' with new grading results if queuekey matches. If queuekey
+ # does not match, we keep waiting for the score_msg whose key actually matches
+ if oldcmap.is_right_queuekey(self.answer_id, queuekey):
+ # Sanity check on returned points
+ points = score_msg.points
+ if points < 0:
+ points = 0
+
+ # Queuestate is consumed, so reset it to None
+ oldcmap.set(self.answer_id, npoints=points, correctness=correctness,
+ msg = score_msg.msg.replace(' ', ' '), queuestate=None)
+ else:
+ log.debug('OpenEndedResponse: queuekey {0} does not match for answer_id={1}.'.format(
+ queuekey, self.answer_id))
+
+ return oldcmap
+
+
+ def get_answers(self):
+ anshtml = '
'.format(self.answer)
+ return {self.answer_id: anshtml}
+
+ def get_initial_display(self):
+ return {self.answer_id: self.initial_display}
+
+ def _convert_longform_feedback_to_html(self, response_items):
+ """
+ Take in a dictionary, and return html strings for display to student.
+ Input:
+ response_items: Dictionary with keys success, feedback.
+ if success is True, feedback should be a dictionary, with keys for
+ types of feedback, and the corresponding feedback values.
+ if success is False, feedback is actually an error string.
+
+ NOTE: this will need to change when we integrate peer grading, because
+ that will have more complex feedback.
+
+ Output:
+ String -- html that can be displayed to the student.
+ """
+
+ # We want to display available feedback in a particular order.
+ # This dictionary specifies which goes first--lower first.
+ priorities = {# These go at the start of the feedback
+ 'spelling': 0,
+ 'grammar': 1,
+ # needs to be after all the other feedback
+ 'markup_text': 3}
+
+ default_priority = 2
+
+ def get_priority(elt):
+ """
+ Args:
+ elt: a tuple of feedback-type, feedback
+ Returns:
+ the priority for this feedback type
+ """
+ return priorities.get(elt[0], default_priority)
+
+ def encode_values(feedback_type,value):
+ feedback_type=str(feedback_type).encode('ascii', 'ignore')
+ if not isinstance(value,basestring):
+ value=str(value)
+ value=value.encode('ascii', 'ignore')
+ return feedback_type,value
+
+ def format_feedback(feedback_type, value):
+ feedback_type,value=encode_values(feedback_type,value)
+ feedback= """
+ {0}