From 091f34668b86caf0b9f2ce95d7234a56ff3901bf Mon Sep 17 00:00:00 2001 From: Vik Paruchuri Date: Thu, 7 Feb 2013 16:40:25 -0500 Subject: [PATCH] Support centralized results --- common/lib/logsettings.py | 5 ++ .../xmodule/combined_open_ended_modulev1.py | 19 +++++++- .../xmodule/combined_open_ended_rubric.py | 48 +++++++++++++++++++ .../lib/xmodule/xmodule/open_ended_module.py | 5 +- 4 files changed, 74 insertions(+), 3 deletions(-) diff --git a/common/lib/logsettings.py b/common/lib/logsettings.py index aa9dc6226f..2ed20a0bad 100644 --- a/common/lib/logsettings.py +++ b/common/lib/logsettings.py @@ -108,6 +108,11 @@ def get_logger_config(log_dir, 'propagate': False, 'level':'DEBUG', }, + 'pipeline.compilers' : { + 'handlers': ['null'], + 'propagate': False, + 'level':'DEBUG', + }, '': { 'handlers': handlers, 'level': 'DEBUG', diff --git a/common/lib/xmodule/xmodule/combined_open_ended_modulev1.py b/common/lib/xmodule/xmodule/combined_open_ended_modulev1.py index c854be3cd9..9989fb8614 100644 --- a/common/lib/xmodule/xmodule/combined_open_ended_modulev1.py +++ b/common/lib/xmodule/xmodule/combined_open_ended_modulev1.py @@ -441,9 +441,12 @@ class CombinedOpenEndedV1Module(): else: last_post_evaluation = task.format_feedback_with_evaluation(self.system, last_post_assessment) last_post_assessment = last_post_evaluation - rubric_scores = task._parse_score_msg(task.history[-1].get('post_assessment', ""), self.system)['rubric_scores'] + rubric_data = task._parse_score_msg(task.history[-1].get('post_assessment', ""), self.system) + rubric_scores = rubric_data['rubric_scores'] + grader_types = rubric_data['grader_types'] elif task_type== "selfassessment": rubric_scores = last_post_assessment + grader_types = ['SA'] last_post_assessment = "" last_correctness = task.is_last_response_correct() max_score = task.max_score() @@ -471,7 +474,6 @@ class CombinedOpenEndedV1Module(): 'max_score_to_attempt': max_score_to_attempt, 'rubric_scores' : rubric_scores, } - log.debug(last_response_dict) return last_response_dict def update_task_states(self): @@ -507,6 +509,19 @@ class CombinedOpenEndedV1Module(): pass return return_html + def get_rubric_scores(self, get): + """ + Gets the results of a given grader via ajax. + Input: AJAX get dictionary + Output: Dictionary to be rendered via ajax that contains the result html. + """ + task_number = int(get['task_number']) + self.update_task_states() + response_dict = self.get_last_response(task_number) + context = {'results': response_dict['post_assessment'], 'task_number': task_number + 1, 'task_name' : response_dict['human_task']} + html = self.system.render_template('combined_open_ended_results.html', context) + return {'html': html, 'success': True} + def get_results(self, get): """ Gets the results of a given grader via ajax. diff --git a/common/lib/xmodule/xmodule/combined_open_ended_rubric.py b/common/lib/xmodule/xmodule/combined_open_ended_rubric.py index 1ecdec14b7..44ae8cb0de 100644 --- a/common/lib/xmodule/xmodule/combined_open_ended_rubric.py +++ b/common/lib/xmodule/xmodule/combined_open_ended_rubric.py @@ -170,3 +170,51 @@ class CombinedOpenEndedRubric(object): raise RubricParsingError("[extract_category]: found duplicate point values between two different options") else: prev = option['points'] + + @staticmethod + def reformat_scores_for_rendering(scores, score_types): + success = False + if len(scores)==0: + return success + + score_lists = [] + score_type_list = [] + for i in xrange(0,len(scores)): + score_cont_list = scores[i] + for j in xrange(0,len(score_cont_list)): + score_list = score_cont_list[j] + score_lists.append(score_list) + score_type_list.append(score_types[i]) + + score_list_len = len(score_lists[0]) + for score_list in score_lists: + if len(score_list)!=score_list_len: + return success + + score_tuples = [] + for i in xrange(0,len(score_lists)): + for j in xrange(0,len(score_lists[i])): + tuple = (1,j,score_lists[i][j]) + score_tuples, tup_ind = CombinedOpenEndedRubric.check_for_tuple_matches(score_tuples,tuple) + score_tuples[tup_ind][0] += 1 + + @staticmethod + def check_for_tuple_matches(tuples, tuple): + category = tuple[1] + score = tuple[2] + tup_ind = -1 + for t in xrange(0,len(tuples)): + if tuples[t][1] == category and tuples[t][2] == score: + tup_ind = t + break + + if tup_ind == -1: + tuples.append([0,category,score]) + tup_ind = len(tuples)-1 + return tuples, tup_ind + + + + + + diff --git a/common/lib/xmodule/xmodule/open_ended_module.py b/common/lib/xmodule/xmodule/open_ended_module.py index d8d80cc199..53c5e0278b 100644 --- a/common/lib/xmodule/xmodule/open_ended_module.py +++ b/common/lib/xmodule/xmodule/open_ended_module.py @@ -457,6 +457,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): if isinstance(score_result['score'], list): feedback_items = [] rubric_scores = [] + grader_types = [] for i in xrange(0, len(score_result['score'])): new_score_result = { 'score': score_result['score'][i], @@ -471,6 +472,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): feedback_template, rubric_score = self._format_feedback(new_score_result, system) feedback_items.append(feedback_template) rubric_scores.append(rubric_score) + grader_types.append(score_result['grader_type']) if join_feedback: feedback = "".join(feedback_items) else: @@ -481,11 +483,12 @@ class OpenEndedModule(openendedchild.OpenEndedChild): feedback, rubric_score = self._format_feedback(score_result, system) score = score_result['score'] rubric_scores = [rubric_score] + grader_types = [score_result['grader_type']] self.submission_id = score_result['submission_id'] self.grader_id = score_result['grader_id'] - return {'valid': True, 'score': score, 'feedback': feedback, 'rubric_scores' : rubric_scores} + return {'valid': True, 'score': score, 'feedback': feedback, 'rubric_scores' : rubric_scores, 'grader_types' : grader_types} def latest_post_assessment(self, system, short_feedback=False, join_feedback=True): """