diff --git a/common/lib/xmodule/xmodule/combined_open_ended_module.py b/common/lib/xmodule/xmodule/combined_open_ended_module.py index a88acc6ffd..5c8a88d9f7 100644 --- a/common/lib/xmodule/xmodule/combined_open_ended_module.py +++ b/common/lib/xmodule/xmodule/combined_open_ended_module.py @@ -22,8 +22,6 @@ from xmodule.modulestore import Location import self_assessment_module import open_ended_module -from mitxmako.shortcuts import render_to_string - log = logging.getLogger("mitx.courseware") # Set the default number of max attempts. Should be 1 for production @@ -319,7 +317,7 @@ class CombinedOpenEndedModule(XModule): Output: HTML rendered directly via Mako """ context = self.get_context() - html = render_to_string('combined_open_ended.html', context) + html = self.system.render_template('combined_open_ended.html', context) return html def get_html_base(self): @@ -369,17 +367,17 @@ class CombinedOpenEndedModule(XModule): self.static_data, instance_state=task_state) last_response = task.latest_answer() last_score = task.latest_score() - last_post_assessment = task.latest_post_assessment() + last_post_assessment = task.latest_post_assessment(self.system) last_post_feedback = "" if task_type == "openended": - last_post_assessment = task.latest_post_assessment(short_feedback=False, join_feedback=False) + last_post_assessment = task.latest_post_assessment(self.system, short_feedback=False, join_feedback=False) if isinstance(last_post_assessment, list): eval_list = [] for i in xrange(0, len(last_post_assessment)): - eval_list.append(task.format_feedback_with_evaluation(last_post_assessment[i])) + eval_list.append(task.format_feedback_with_evaluation(self.system, last_post_assessment[i])) last_post_evaluation = "".join(eval_list) else: - last_post_evaluation = task.format_feedback_with_evaluation(last_post_assessment) + last_post_evaluation = task.format_feedback_with_evaluation(self.system, last_post_assessment) last_post_assessment = last_post_evaluation last_correctness = task.is_last_response_correct() max_score = task.max_score() @@ -442,7 +440,7 @@ class CombinedOpenEndedModule(XModule): self.update_task_states() response_dict = self.get_last_response(task_number) context = {'results': response_dict['post_assessment'], 'task_number': task_number + 1} - html = render_to_string('combined_open_ended_results.html', context) + html = self.system.render_template('combined_open_ended_results.html', context) return {'html': html, 'success': True} def handle_ajax(self, dispatch, get): diff --git a/common/lib/xmodule/xmodule/combined_open_ended_rubric.py b/common/lib/xmodule/xmodule/combined_open_ended_rubric.py index 0b2ca1ca2c..e4daf11f1d 100644 --- a/common/lib/xmodule/xmodule/combined_open_ended_rubric.py +++ b/common/lib/xmodule/xmodule/combined_open_ended_rubric.py @@ -1,16 +1,15 @@ -from mitxmako.shortcuts import render_to_string import logging from lxml import etree log=logging.getLogger(__name__) -class CombinedOpenEndedRubric: +class CombinedOpenEndedRubric(object): @staticmethod - def render_rubric(rubric_xml): + def render_rubric(rubric_xml, system): try: rubric_categories = CombinedOpenEndedRubric.extract_rubric_categories(rubric_xml) - html = render_to_string('open_ended_rubric.html', {'rubric_categories' : rubric_categories}) + html = system.render_template('open_ended_rubric.html', {'rubric_categories' : rubric_categories}) except: log.exception("Could not parse the rubric.") html = rubric_xml @@ -64,7 +63,7 @@ class CombinedOpenEndedRubric: if has_score: if scorexml.tag != 'score': - raise Exception("[extract_category]: expected score tag, got {0} instead".format(scorexml.tag)) + raise Exception("[extract_category]mitxmako: expected score tag, got {0} instead".format(scorexml.tag)) for option in optionsxml: if option.tag != "option": diff --git a/common/lib/xmodule/xmodule/open_ended_module.py b/common/lib/xmodule/xmodule/open_ended_module.py index 11f96c9848..0eaad34bad 100644 --- a/common/lib/xmodule/xmodule/open_ended_module.py +++ b/common/lib/xmodule/xmodule/open_ended_module.py @@ -30,7 +30,6 @@ from xmodule.modulestore import Location from capa.util import * import openendedchild -from mitxmako.shortcuts import render_to_string from numpy import median from datetime import datetime @@ -256,7 +255,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): @param system: Modulesystem @return: Boolean True (not useful currently) """ - new_score_msg = self._parse_score_msg(score_msg) + new_score_msg = self._parse_score_msg(score_msg, system) if not new_score_msg['valid']: score_msg['feedback'] = 'Invalid grader reply. Please contact the course staff.' @@ -370,7 +369,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): return u"\n".join([feedback_list_part1, feedback_list_part2]) - def _format_feedback(self, response_items): + def _format_feedback(self, response_items, system): """ Input: Dictionary called feedback. Must contain keys seen below. @@ -382,13 +381,13 @@ class OpenEndedModule(openendedchild.OpenEndedChild): rubric_feedback="" feedback = self._convert_longform_feedback_to_html(response_items) if response_items['rubric_scores_complete']==True: - rubric_feedback = CombinedOpenEndedRubric.render_rubric(response_items['rubric_xml']) + rubric_feedback = CombinedOpenEndedRubric.render_rubric(response_items['rubric_xml'], system) if not response_items['success']: return system.render_template("open_ended_error.html", {'errors': feedback}) - feedback_template = render_to_string("open_ended_feedback.html", { + feedback_template = system.render_template("open_ended_feedback.html", { 'grader_type': response_items['grader_type'], 'score': "{0} / {1}".format(response_items['score'], self.max_score()), 'feedback': feedback, @@ -398,7 +397,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): return feedback_template - def _parse_score_msg(self, score_msg, join_feedback=True): + def _parse_score_msg(self, score_msg, system, join_feedback=True): """ Grader reply is a JSON-dump of the following dict { 'correct': True/False, @@ -450,7 +449,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): 'rubric_scores_complete' : score_result['rubric_scores_complete'], 'rubric_xml' : score_result['rubric_xml'], } - feedback_items.append(self._format_feedback(new_score_result)) + feedback_items.append(self._format_feedback(new_score_result, system)) if join_feedback: feedback = "".join(feedback_items) else: @@ -458,7 +457,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): score = int(median(score_result['score'])) else: #This is for instructor and ML grading - feedback = self._format_feedback(score_result) + feedback = self._format_feedback(score_result, system) score = score_result['score'] self.submission_id = score_result['submission_id'] @@ -466,7 +465,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): return {'valid': True, 'score': score, 'feedback': feedback} - def latest_post_assessment(self, short_feedback=False, join_feedback=True): + def latest_post_assessment(self, system, short_feedback=False, join_feedback=True): """ Gets the latest feedback, parses, and returns @param short_feedback: If the long feedback is wanted or not @@ -475,7 +474,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): if not self.history: return "" - feedback_dict = self._parse_score_msg(self.history[-1].get('post_assessment', ""), join_feedback=join_feedback) + feedback_dict = self._parse_score_msg(self.history[-1].get('post_assessment', ""), system, join_feedback=join_feedback) if not short_feedback: return feedback_dict['feedback'] if feedback_dict['valid'] else '' if feedback_dict['valid']: @@ -483,14 +482,14 @@ class OpenEndedModule(openendedchild.OpenEndedChild): json.loads(self.history[-1].get('post_assessment', ""))) return short_feedback if feedback_dict['valid'] else '' - def format_feedback_with_evaluation(self, feedback): + def format_feedback_with_evaluation(self, system, feedback): """ Renders a given html feedback into an evaluation template @param feedback: HTML feedback @return: Rendered html """ context = {'msg': feedback, 'id': "1", 'rows': 50, 'cols': 50} - html = render_to_string('open_ended_evaluation.html', context) + html = system.render_template('open_ended_evaluation.html', context) return html def handle_ajax(self, dispatch, get, system): @@ -582,7 +581,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): if self.state != self.INITIAL: latest = self.latest_answer() previous_answer = latest if latest is not None else self.initial_display - post_assessment = self.latest_post_assessment() + post_assessment = self.latest_post_assessment(system) score = self.latest_score() correct = 'correct' if self.is_submission_correct(score) else 'incorrect' else: diff --git a/common/lib/xmodule/xmodule/openendedchild.py b/common/lib/xmodule/xmodule/openendedchild.py index 2ba9528237..88fed61c6d 100644 --- a/common/lib/xmodule/xmodule/openendedchild.py +++ b/common/lib/xmodule/xmodule/openendedchild.py @@ -35,7 +35,7 @@ MAX_ATTEMPTS = 1 # Overriden by max_score specified in xml. MAX_SCORE = 1 -class OpenEndedChild(): +class OpenEndedChild(object): """ States: @@ -123,7 +123,7 @@ class OpenEndedChild(): return None return self.history[-1].get('score') - def latest_post_assessment(self): + def latest_post_assessment(self, system): """None if not available""" if not self.history: return "" diff --git a/common/lib/xmodule/xmodule/self_assessment_module.py b/common/lib/xmodule/xmodule/self_assessment_module.py index 940b61c557..3d88cb95f6 100644 --- a/common/lib/xmodule/xmodule/self_assessment_module.py +++ b/common/lib/xmodule/xmodule/self_assessment_module.py @@ -122,7 +122,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): if self.state == self.INITIAL: return '' - rubric_html = CombinedOpenEndedRubric.render_rubric(self.rubric) + rubric_html = CombinedOpenEndedRubric.render_rubric(self.rubric, system) # we'll render it context = {'rubric': rubric_html, @@ -147,7 +147,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): if self.state == self.DONE: # display the previous hint - latest = self.latest_post_assessment() + latest = self.latest_post_assessment(system) hint = latest if latest is not None else '' else: hint = ''