diff --git a/common/lib/xmodule/xmodule/combined_open_ended_module.py b/common/lib/xmodule/xmodule/combined_open_ended_module.py index d8d1f6cfe1..aa4b1f18ad 100644 --- a/common/lib/xmodule/xmodule/combined_open_ended_module.py +++ b/common/lib/xmodule/xmodule/combined_open_ended_module.py @@ -21,7 +21,7 @@ from .xml_module import XmlDescriptor from xmodule.modulestore import Location import self_assessment_module import open_ended_module -from combined_open_ended_rubric import CombinedOpenEndedRubric +from combined_open_ended_rubric import CombinedOpenEndedRubric, RubricParsingError from .stringify import stringify_children log = logging.getLogger("mitx.courseware") @@ -141,11 +141,11 @@ class CombinedOpenEndedModule(XModule): self._max_score = int(self.metadata.get('max_score', MAX_SCORE)) rubric_renderer = CombinedOpenEndedRubric(system, True) - success, rubric_feedback = rubric_renderer.render_rubric(stringify_children(definition['rubric'])) - if not success: - error_message="Could not parse rubric : {0}".format(definition['rubric']) - log.exception(error_message) - raise Exception + try: + rubric_feedback = rubric_renderer.render_rubric(stringify_children(definition['rubric'])) + except RubricParsingError: + log.error("Failed to parse rubric in location: {1}".format(location)) + raise #Static data is passed to the child modules to render self.static_data = { 'max_score': self._max_score, diff --git a/common/lib/xmodule/xmodule/combined_open_ended_rubric.py b/common/lib/xmodule/xmodule/combined_open_ended_rubric.py index bf6836bacc..4380e32d5b 100644 --- a/common/lib/xmodule/xmodule/combined_open_ended_rubric.py +++ b/common/lib/xmodule/xmodule/combined_open_ended_rubric.py @@ -3,6 +3,9 @@ from lxml import etree log=logging.getLogger(__name__) +class RubricParsingError(Exception): + pass + class CombinedOpenEndedRubric(object): def __init__ (self, system, view_only = False): @@ -10,35 +13,25 @@ class CombinedOpenEndedRubric(object): self.view_only = view_only self.system = system - ''' - render_rubric: takes in an xml string and outputs the corresponding - html for that xml, given the type of rubric we're generating - Input: - rubric_xml: an string that has not been parsed into xml that - represents this particular rubric - Output: - html: the html that corresponds to the xml given - ''' def render_rubric(self, rubric_xml): - success = False + ''' + render_rubric: takes in an xml string and outputs the corresponding + html for that xml, given the type of rubric we're generating + Input: + rubric_xml: an string that has not been parsed into xml that + represents this particular rubric + Output: + html: the html that corresponds to the xml given + ''' try: rubric_categories = self.extract_categories(rubric_xml) html = self.system.render_template('open_ended_rubric.html', {'categories' : rubric_categories, 'has_score': self.has_score, 'view_only': self.view_only}) - success = True except: - log.exception("Could not parse the rubric.") - try: - html = etree.tostring(rubric_xml, pretty_print=True) - except: - log.exception("Rubric XML is a string, not an XML object : {0}".format(rubric_xml)) - if isinstance(rubric_xml, basestring): - html = rubric_xml - else: - html = "Invalid rubric. Please contact course staff." - return success, html + raise RubricParsingError("[render_rubric] Could not parse the rubric with xml: {0}".format(rubric_xml)) + return html def extract_categories(self, element): ''' @@ -57,7 +50,7 @@ class CombinedOpenEndedRubric(object): categories = [] for category in element: if category.tag != 'category': - raise Exception("[extract_categories] Expected a tag: got {0} instead".format(category.tag)) + raise RubricParsingError("[extract_categories] Expected a tag: got {0} instead".format(category.tag)) else: categories.append(self.extract_category(category)) return categories @@ -83,12 +76,12 @@ class CombinedOpenEndedRubric(object): self.has_score = True # if we are missing the score tag and we are expecting one elif self.has_score: - raise Exception("[extract_category] Category {0} is missing a score".format(descriptionxml.text)) + raise RubricParsingError("[extract_category] Category {0} is missing a score".format(descriptionxml.text)) # parse description if descriptionxml.tag != 'description': - raise Exception("[extract_category]: expected description tag, got {0} instead".format(descriptionxml.tag)) + raise RubricParsingError("[extract_category]: expected description tag, got {0} instead".format(descriptionxml.tag)) description = descriptionxml.text @@ -98,7 +91,7 @@ class CombinedOpenEndedRubric(object): # parse options for option in optionsxml: if option.tag != 'option': - raise Exception("[extract_category]: expected option tag, got {0} instead".format(option.tag)) + raise RubricParsingError("[extract_category]: expected option tag, got {0} instead".format(option.tag)) else: pointstr = option.get("points") if pointstr: @@ -107,7 +100,7 @@ class CombinedOpenEndedRubric(object): try: points = int(pointstr) except ValueError: - raise Exception("[extract_category]: expected points to have int, got {0} instead".format(pointstr)) + raise RubricParsingError("[extract_category]: expected points to have int, got {0} instead".format(pointstr)) elif autonumbering: # use the generated one if we're in the right mode points = cur_points @@ -132,12 +125,12 @@ class CombinedOpenEndedRubric(object): Validates a set of options. This can and should be extended to filter out other bad edge cases ''' if len(options) == 0: - raise Exception("[extract_category]: no options associated with this category") + raise RubricParsingError("[extract_category]: no options associated with this category") if len(options) == 1: return prev = options[0]['points'] for option in options[1:]: if prev == option['points']: - raise Exception("[extract_category]: found duplicate point values between two different options") + raise RubricParsingError("[extract_category]: found duplicate point values between two different options") else: prev = option['points'] diff --git a/common/lib/xmodule/xmodule/open_ended_module.py b/common/lib/xmodule/xmodule/open_ended_module.py index f32e0b26ff..90b9e32cce 100644 --- a/common/lib/xmodule/xmodule/open_ended_module.py +++ b/common/lib/xmodule/xmodule/open_ended_module.py @@ -383,7 +383,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): feedback = self._convert_longform_feedback_to_html(response_items) if response_items['rubric_scores_complete']==True: rubric_renderer = CombinedOpenEndedRubric(system, True) - success, rubric_feedback = rubric_renderer.render_rubric(response_items['rubric_xml']) + rubric_feedback = rubric_renderer.render_rubric(response_items['rubric_xml']) if not response_items['success']: return system.render_template("open_ended_error.html", diff --git a/common/lib/xmodule/xmodule/self_assessment_module.py b/common/lib/xmodule/xmodule/self_assessment_module.py index 73f580c0f5..fb1d306708 100644 --- a/common/lib/xmodule/xmodule/self_assessment_module.py +++ b/common/lib/xmodule/xmodule/self_assessment_module.py @@ -123,7 +123,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): return '' rubric_renderer = CombinedOpenEndedRubric(system, True) - success, rubric_html = rubric_renderer.render_rubric(self.rubric) + rubric_html = rubric_renderer.render_rubric(self.rubric) # we'll render it context = {'rubric': rubric_html, diff --git a/lms/djangoapps/open_ended_grading/grading_service.py b/lms/djangoapps/open_ended_grading/grading_service.py index e8af5f09f6..f65554a9d6 100644 --- a/lms/djangoapps/open_ended_grading/grading_service.py +++ b/lms/djangoapps/open_ended_grading/grading_service.py @@ -11,7 +11,7 @@ from django.http import HttpResponse, Http404 from courseware.access import has_access from util.json_request import expect_json from xmodule.course_module import CourseDescriptor -from xmodule.combined_open_ended_rubric import CombinedOpenEndedRubric +from xmodule.combined_open_ended_rubric import CombinedOpenEndedRubric, RubricParsingError from lxml import etree from mitxmako.shortcuts import render_to_string from xmodule.x_module import ModuleSystem @@ -106,26 +106,30 @@ class GradingService(object): def _render_rubric(self, response, view_only=False): """ Given an HTTP Response with the key 'rubric', render out the html - required to display the rubric + required to display the rubric and put it back into the response + + returns the updated response as a dictionary that can be serialized later + """ try: response_json = json.loads(response) - if response_json.has_key('rubric'): + if 'rubric' in response_json: rubric = response_json['rubric'] rubric_renderer = CombinedOpenEndedRubric(self.system, False) - success, rubric_html = rubric_renderer.render_rubric(rubric) - if not success: - error_message = "Could not render rubric: {0}".format(rubric) - log.exception(error_message) - return json.dumps({'success': False, - 'error': error_message}) + rubric_html = rubric_renderer.render_rubric(rubric) response_json['rubric'] = rubric_html - return json.dumps(response_json) + return response_json # if we can't parse the rubric into HTML, - except etree.XMLSyntaxError: + except etree.XMLSyntaxError, RubricParsingError: log.exception("Cannot parse rubric string. Raw string: {0}" .format(rubric)) - return json.dumps({'success': False, - 'error': 'Error displaying submission'}) + return {'success': False, + 'error': 'Error displaying submission'} + except ValueError: + log.exception("Error parsing response: {0}".format(response)) + return {'success': False, + 'error': "Error displaying submission"} + + diff --git a/lms/djangoapps/open_ended_grading/peer_grading_service.py b/lms/djangoapps/open_ended_grading/peer_grading_service.py index b4f918397f..2e31dd02e0 100644 --- a/lms/djangoapps/open_ended_grading/peer_grading_service.py +++ b/lms/djangoapps/open_ended_grading/peer_grading_service.py @@ -95,7 +95,7 @@ class PeerGradingService(GradingService): def get_next_submission(self, problem_location, grader_id): response = self.get(self.get_next_submission_url, {'location': problem_location, 'grader_id': grader_id}) - return self._render_rubric(response) + return json.dumps(self._render_rubric(response)) def save_grade(self, location, grader_id, submission_id, score, feedback, submission_key, rubric_scores): data = {'grader_id' : grader_id, @@ -115,7 +115,7 @@ class PeerGradingService(GradingService): def show_calibration_essay(self, problem_location, grader_id): params = {'problem_id' : problem_location, 'student_id': grader_id} response = self.get(self.show_calibration_essay_url, params) - return self._render_rubric(response) + return json.dumps(self._render_rubric(response)) def save_calibration_essay(self, problem_location, grader_id, calibration_essay_id, submission_key, score, feedback, rubric_scores): diff --git a/lms/djangoapps/open_ended_grading/staff_grading_service.py b/lms/djangoapps/open_ended_grading/staff_grading_service.py index 226bdbc389..4e776b688b 100644 --- a/lms/djangoapps/open_ended_grading/staff_grading_service.py +++ b/lms/djangoapps/open_ended_grading/staff_grading_service.py @@ -112,7 +112,7 @@ class StaffGradingService(GradingService): response = self.get(self.get_next_url, params={'location': location, 'grader_id': grader_id}) - return self._render_rubric(response) + return json.dumps(self._render_rubric(response)) def save_grade(self, course_id, grader_id, submission_id, score, feedback, skipped, rubric_scores): @@ -148,7 +148,6 @@ class StaffGradingService(GradingService): # importing this file doesn't create objects that may not have the right config _service = None -module_system = ModuleSystem("", None, None, render_to_string, None) def staff_grading_service(): """