diff --git a/common/lib/xmodule/xmodule/combined_open_ended_module.py b/common/lib/xmodule/xmodule/combined_open_ended_module.py index aec330cb49..957b978407 100644 --- a/common/lib/xmodule/xmodule/combined_open_ended_module.py +++ b/common/lib/xmodule/xmodule/combined_open_ended_module.py @@ -218,11 +218,19 @@ class CombinedOpenEndedModule(XModule): last_post_assessment = task.latest_post_assessment(short_feedback=False) last_post_evaluation = task.format_feedback_with_evaluation(last_post_assessment) last_post_assessment = last_post_evaluation + last_correctness = task.is_last_response_correct() max_score = task.max_score() state = task.state - last_response_dict={'response' : last_response, 'score' : last_score, - 'post_assessment' : last_post_assessment, - 'type' : task_type, 'max_score' : max_score, 'state' : state, 'human_state' : task.HUMAN_NAMES[state]} + last_response_dict={ + 'response' : last_response, + 'score' : last_score, + 'post_assessment' : last_post_assessment, + 'type' : task_type, + 'max_score' : max_score, + 'state' : state, + 'human_state' : task.HUMAN_NAMES[state], + 'correct' : last_correctness + } return last_response_dict @@ -252,7 +260,7 @@ class CombinedOpenEndedModule(XModule): task_number=int(get['task_number']) self.update_task_states() response_dict=self.get_last_response(task_number) - context = {'results' : response_dict['post_assessment']} + context = {'results' : response_dict['post_assessment'], 'task_number' : task_number+1} html = render_to_string('combined_open_ended_results.html', context) return {'html' : html, 'success' : True} diff --git a/common/lib/xmodule/xmodule/open_ended_module.py b/common/lib/xmodule/xmodule/open_ended_module.py index ba1c68511f..9cdbdd54e2 100644 --- a/common/lib/xmodule/xmodule/open_ended_module.py +++ b/common/lib/xmodule/xmodule/open_ended_module.py @@ -397,13 +397,6 @@ class OpenEndedModule(openendedchild.OpenEndedChild): short_feedback = self._convert_longform_feedback_to_html(json.loads(self.history[-1].get('post_assessment', ""))) return short_feedback if feedback_dict['valid'] else '' - def is_submission_correct(self, score): - correct=False - if(isinstance(score,(int, long, float, complex))): - score_ratio = int(score) / float(self.max_score()) - correct = (score_ratio >= 0.66) - return correct - def format_feedback_with_evaluation(self,feedback): context={'msg' : feedback, 'id' : "1", 'rows' : 50, 'cols' : 50} html= render_to_string('open_ended_evaluation.html', context) diff --git a/common/lib/xmodule/xmodule/openendedchild.py b/common/lib/xmodule/xmodule/openendedchild.py index 236bd03c4c..73bd8f3957 100644 --- a/common/lib/xmodule/xmodule/openendedchild.py +++ b/common/lib/xmodule/xmodule/openendedchild.py @@ -250,5 +250,16 @@ class OpenEndedChild(): def handle_ajax(self): pass + def is_submission_correct(self, score): + correct=False + if(isinstance(score,(int, long, float, complex))): + score_ratio = int(score) / float(self.max_score()) + correct = (score_ratio >= 0.66) + return correct + + def is_last_response_correct(self): + score=self.get_score() + return self.is_submission_correct(score) + diff --git a/lms/templates/combined_open_ended.html b/lms/templates/combined_open_ended.html index 4c0aaa1042..71c22085e3 100644 --- a/lms/templates/combined_open_ended.html +++ b/lms/templates/combined_open_ended.html @@ -17,7 +17,6 @@