diff --git a/common/lib/xmodule/xmodule/combined_open_ended_modulev1.py b/common/lib/xmodule/xmodule/combined_open_ended_modulev1.py index 8bd7df86c1..ce5d55d7b7 100644 --- a/common/lib/xmodule/xmodule/combined_open_ended_modulev1.py +++ b/common/lib/xmodule/xmodule/combined_open_ended_modulev1.py @@ -8,6 +8,7 @@ from lxml.html import rewrite_links from path import path import os import sys +import re from pkg_resources import resource_string @@ -21,7 +22,7 @@ from .xml_module import XmlDescriptor from xmodule.modulestore import Location import self_assessment_module import open_ended_module -from combined_open_ended_rubric import CombinedOpenEndedRubric, RubricParsingError +from combined_open_ended_rubric import CombinedOpenEndedRubric, RubricParsingError, GRADER_TYPE_IMAGE_DICT, HUMAN_GRADER_TYPE, LEGEND_LIST from .stringify import stringify_children import dateutil import dateutil.parser @@ -55,7 +56,7 @@ TRUE_DICT = ["True", True, "TRUE", "true"] HUMAN_TASK_TYPE = { 'selfassessment' : "Self Assessment", - 'openended' : "External Grader", + 'openended' : "edX Assessment", } class CombinedOpenEndedV1Module(): @@ -191,9 +192,9 @@ class CombinedOpenEndedV1Module(): # completion (doesn't matter if you self-assessed correct/incorrect). self._max_score = int(self.metadata.get('max_score', MAX_SCORE)) - rubric_renderer = CombinedOpenEndedRubric(system, True) + self.rubric_renderer = CombinedOpenEndedRubric(system, True) rubric_string = stringify_children(definition['rubric']) - rubric_renderer.check_if_rubric_is_parseable(rubric_string, location, MAX_SCORE_ALLOWED, self._max_score) + self.rubric_renderer.check_if_rubric_is_parseable(rubric_string, location, MAX_SCORE_ALLOWED, self._max_score) #Static data is passed to the child modules to render self.static_data = { @@ -354,9 +355,10 @@ class CombinedOpenEndedV1Module(): 'state': self.state, 'task_count': len(self.task_xml), 'task_number': self.current_task_number + 1, - 'status': self.get_status(), + 'status': self.get_status(False), 'display_name': self.display_name, 'accept_file_upload': self.accept_file_upload, + 'legend_list' : LEGEND_LIST, } return context @@ -431,6 +433,9 @@ class CombinedOpenEndedV1Module(): last_score = task.latest_score() last_post_assessment = task.latest_post_assessment(self.system) last_post_feedback = "" + feedback_dicts = [{}] + grader_ids = [0] + submission_ids = [0] if task_type == "openended": last_post_assessment = task.latest_post_assessment(self.system, short_feedback=False, join_feedback=False) if isinstance(last_post_assessment, list): @@ -441,6 +446,18 @@ class CombinedOpenEndedV1Module(): else: last_post_evaluation = task.format_feedback_with_evaluation(self.system, last_post_assessment) last_post_assessment = last_post_evaluation + rubric_data = task._parse_score_msg(task.history[-1].get('post_assessment', ""), self.system) + rubric_scores = rubric_data['rubric_scores'] + grader_types = rubric_data['grader_types'] + feedback_items = rubric_data['feedback_items'] + feedback_dicts = rubric_data['feedback_dicts'] + grader_ids = rubric_data['grader_ids'] + submission_ids = rubric_data['submission_ids'] + elif task_type== "selfassessment": + rubric_scores = last_post_assessment + grader_types = ['SA'] + feedback_items = [''] + last_post_assessment = "" last_correctness = task.is_last_response_correct() max_score = task.max_score() state = task.state @@ -453,6 +470,16 @@ class CombinedOpenEndedV1Module(): human_state = task.HUMAN_NAMES[state] else: human_state = state + if len(grader_types)>0: + grader_type = grader_types[0] + else: + grader_type = "IN" + + if grader_type in HUMAN_GRADER_TYPE: + human_grader_name = HUMAN_GRADER_TYPE[grader_type] + else: + human_grader_name = grader_type + last_response_dict = { 'response': last_response, 'score': last_score, @@ -465,8 +492,15 @@ class CombinedOpenEndedV1Module(): 'correct': last_correctness, 'min_score_to_attempt': min_score_to_attempt, 'max_score_to_attempt': max_score_to_attempt, + 'rubric_scores' : rubric_scores, + 'grader_types' : grader_types, + 'feedback_items' : feedback_items, + 'grader_type' : grader_type, + 'human_grader_type' : human_grader_name, + 'feedback_dicts' : feedback_dicts, + 'grader_ids' : grader_ids, + 'submission_ids' : submission_ids, } - return last_response_dict def update_task_states(self): @@ -502,19 +536,95 @@ class CombinedOpenEndedV1Module(): pass return return_html + def get_rubric(self, get): + """ + Gets the results of a given grader via ajax. + Input: AJAX get dictionary + Output: Dictionary to be rendered via ajax that contains the result html. + """ + all_responses = [] + loop_up_to_task = self.current_task_number+1 + for i in xrange(0,loop_up_to_task): + all_responses.append(self.get_last_response(i)) + rubric_scores = [all_responses[i]['rubric_scores'] for i in xrange(0,len(all_responses)) if len(all_responses[i]['rubric_scores'])>0 and all_responses[i]['grader_types'][0] in HUMAN_GRADER_TYPE.keys()] + grader_types = [all_responses[i]['grader_types'] for i in xrange(0,len(all_responses)) if len(all_responses[i]['grader_types'])>0 and all_responses[i]['grader_types'][0] in HUMAN_GRADER_TYPE.keys()] + feedback_items = [all_responses[i]['feedback_items'] for i in xrange(0,len(all_responses)) if len(all_responses[i]['feedback_items'])>0 and all_responses[i]['grader_types'][0] in HUMAN_GRADER_TYPE.keys()] + rubric_html = self.rubric_renderer.render_combined_rubric(stringify_children(self.static_data['rubric']), rubric_scores, + grader_types, feedback_items) + + response_dict = all_responses[-1] + context = { + 'results': rubric_html, + 'task_name' : 'Scored Rubric', + 'class_name' : 'combined-rubric-container' + } + html = self.system.render_template('combined_open_ended_results.html', context) + return {'html': html, 'success': True} + + def get_legend(self, get): + """ + Gets the results of a given grader via ajax. + Input: AJAX get dictionary + Output: Dictionary to be rendered via ajax that contains the result html. + """ + context = { + 'legend_list' : LEGEND_LIST, + } + html = self.system.render_template('combined_open_ended_legend.html', context) + return {'html': html, 'success': True} + def get_results(self, get): """ Gets the results of a given grader via ajax. Input: AJAX get dictionary Output: Dictionary to be rendered via ajax that contains the result html. """ - task_number = int(get['task_number']) self.update_task_states() - response_dict = self.get_last_response(task_number) - context = {'results': response_dict['post_assessment'], 'task_number': task_number + 1} + loop_up_to_task = self.current_task_number+1 + all_responses =[] + for i in xrange(0,loop_up_to_task): + all_responses.append(self.get_last_response(i)) + context_list = [] + for ri in all_responses: + for i in xrange(0,len(ri['rubric_scores'])): + feedback = ri['feedback_dicts'][i].get('feedback','') + rubric_data = self.rubric_renderer.render_rubric(stringify_children(self.static_data['rubric']), ri['rubric_scores'][i]) + if rubric_data['success']: + rubric_html = rubric_data['html'] + else: + rubric_html = '' + context = { + 'rubric_html': rubric_html, + 'grader_type': ri['grader_type'], + 'feedback' : feedback, + 'grader_id' : ri['grader_ids'][i], + 'submission_id' : ri['submission_ids'][i], + } + context_list.append(context) + feedback_table = self.system.render_template('open_ended_result_table.html', { + 'context_list' : context_list, + 'grader_type_image_dict' : GRADER_TYPE_IMAGE_DICT, + 'human_grader_types' : HUMAN_GRADER_TYPE, + 'rows': 50, + 'cols': 50, + }) + context = { + 'results': feedback_table, + 'task_name' : "Feedback", + 'class_name' : "result-container", + } html = self.system.render_template('combined_open_ended_results.html', context) return {'html': html, 'success': True} + def get_status_ajax(self, get): + """ + Gets the results of a given grader via ajax. + Input: AJAX get dictionary + Output: Dictionary to be rendered via ajax that contains the result html. + """ + html = self.get_status(True) + return {'html': html, 'success': True} + def handle_ajax(self, dispatch, get): """ This is called by courseware.module_render, to handle an AJAX call. @@ -529,7 +639,10 @@ class CombinedOpenEndedV1Module(): handlers = { 'next_problem': self.next_problem, 'reset': self.reset, - 'get_results': self.get_results + 'get_results': self.get_results, + 'get_combined_rubric': self.get_rubric, + 'get_status' : self.get_status_ajax, + 'get_legend' : self.get_legend, } if dispatch not in handlers: @@ -593,7 +706,7 @@ class CombinedOpenEndedV1Module(): return json.dumps(state) - def get_status(self): + def get_status(self, render_via_ajax): """ Gets the status panel to be displayed at the top right. Input: None @@ -604,7 +717,13 @@ class CombinedOpenEndedV1Module(): task_data = self.get_last_response(i) task_data.update({'task_number': i + 1}) status.append(task_data) - context = {'status_list': status} + + context = { + 'status_list': status, + 'grader_type_image_dict' : GRADER_TYPE_IMAGE_DICT, + 'legend_list' : LEGEND_LIST, + 'render_via_ajax' : render_via_ajax, + } status_html = self.system.render_template("combined_open_ended_status.html", context) return status_html diff --git a/common/lib/xmodule/xmodule/combined_open_ended_rubric.py b/common/lib/xmodule/xmodule/combined_open_ended_rubric.py index 689103a86a..7c00c5f029 100644 --- a/common/lib/xmodule/xmodule/combined_open_ended_rubric.py +++ b/common/lib/xmodule/xmodule/combined_open_ended_rubric.py @@ -3,6 +3,27 @@ from lxml import etree log = logging.getLogger(__name__) +GRADER_TYPE_IMAGE_DICT = { + '8B' : '/static/images/random_grading_icon.png', + 'SA' : '/static/images/self_assessment_icon.png', + 'PE' : '/static/images/peer_grading_icon.png', + 'ML' : '/static/images/ml_grading_icon.png', + 'IN' : '/static/images/peer_grading_icon.png', + 'BC' : '/static/images/ml_grading_icon.png', + } + +HUMAN_GRADER_TYPE = { + '8B' : 'Magic-8-Ball-Assessment', + 'SA' : 'Self-Assessment', + 'PE' : 'Peer-Assessment', + 'IN' : 'Instructor-Assessment', + 'ML' : 'AI-Assessment', + 'BC' : 'AI-Assessment', + } + +DO_NOT_DISPLAY = ['BC', 'IN'] + +LEGEND_LIST = [{'name' : HUMAN_GRADER_TYPE[k], 'image' : GRADER_TYPE_IMAGE_DICT[k]} for k in GRADER_TYPE_IMAGE_DICT.keys() if k not in DO_NOT_DISPLAY ] class RubricParsingError(Exception): def __init__(self, msg): @@ -16,7 +37,7 @@ class CombinedOpenEndedRubric(object): self.view_only = view_only self.system = system - def render_rubric(self, rubric_xml): + def render_rubric(self, rubric_xml, score_list = None): ''' render_rubric: takes in an xml string and outputs the corresponding html for that xml, given the type of rubric we're generating @@ -29,22 +50,36 @@ class CombinedOpenEndedRubric(object): success = False try: rubric_categories = self.extract_categories(rubric_xml) + if score_list and len(score_list)==len(rubric_categories): + for i in xrange(0,len(rubric_categories)): + category = rubric_categories[i] + for j in xrange(0,len(category['options'])): + if score_list[i]==j: + rubric_categories[i]['options'][j]['selected'] = True + rubric_scores = [cat['score'] for cat in rubric_categories] max_scores = map((lambda cat: cat['options'][-1]['points']), rubric_categories) max_score = max(max_scores) - html = self.system.render_template('open_ended_rubric.html', + rubric_template = 'open_ended_rubric.html' + if self.view_only: + rubric_template = 'open_ended_view_only_rubric.html' + html = self.system.render_template(rubric_template, {'categories': rubric_categories, 'has_score': self.has_score, 'view_only': self.view_only, - 'max_score': max_score}) + 'max_score': max_score, + 'combined_rubric' : False + }) success = True except: error_message = "[render_rubric] Could not parse the rubric with xml: {0}".format(rubric_xml) log.error(error_message) raise RubricParsingError(error_message) - return success, html + return {'success' : success, 'html' : html, 'rubric_scores' : rubric_scores} def check_if_rubric_is_parseable(self, rubric_string, location, max_score_allowed, max_score): - success, rubric_feedback = self.render_rubric(rubric_string) + rubric_dict = self.render_rubric(rubric_string) + success = rubric_dict['success'] + rubric_feedback = rubric_dict['html'] if not success: error_message = "Could not parse rubric : {0} for location {1}".format(rubric_string, location.url()) log.error(error_message) @@ -149,7 +184,33 @@ class CombinedOpenEndedRubric(object): options = sorted(options, key=lambda option: option['points']) CombinedOpenEndedRubric.validate_options(options) - return {'description': description, 'options': options} + return {'description': description, 'options': options, 'score' : score} + + def render_combined_rubric(self,rubric_xml,scores,score_types,feedback_types): + success, score_tuples = CombinedOpenEndedRubric.reformat_scores_for_rendering(scores,score_types,feedback_types) + rubric_categories = self.extract_categories(rubric_xml) + max_scores = map((lambda cat: cat['options'][-1]['points']), rubric_categories) + max_score = max(max_scores) + for i in xrange(0,len(rubric_categories)): + category = rubric_categories[i] + for j in xrange(0,len(category['options'])): + rubric_categories[i]['options'][j]['grader_types'] = [] + for tuple in score_tuples: + if tuple[1] == i and tuple[2] ==j: + for grader_type in tuple[3]: + rubric_categories[i]['options'][j]['grader_types'].append(grader_type) + + log.debug(rubric_categories) + html = self.system.render_template('open_ended_combined_rubric.html', + {'categories': rubric_categories, + 'has_score': True, + 'view_only': True, + 'max_score': max_score, + 'combined_rubric' : True, + 'grader_type_image_dict' : GRADER_TYPE_IMAGE_DICT, + 'human_grader_types' : HUMAN_GRADER_TYPE, + }) + return html @staticmethod @@ -167,3 +228,79 @@ class CombinedOpenEndedRubric(object): raise RubricParsingError("[extract_category]: found duplicate point values between two different options") else: prev = option['points'] + + @staticmethod + def reformat_scores_for_rendering(scores, score_types, feedback_types): + """ + Takes in a list of rubric scores, the types of those scores, and feedback associated with them + Outputs a reformatted list of score tuples (count, rubric category, rubric score, [graders that gave this score], [feedback types]) + @param scores: + @param score_types: + @param feedback_types: + @return: + """ + success = False + if len(scores)==0: + log.error("Score length is 0.") + return success, "" + + if len(scores) != len(score_types) or len(feedback_types) != len(scores): + log.error("Length mismatches.") + return success, "" + + score_lists = [] + score_type_list = [] + feedback_type_list = [] + for i in xrange(0,len(scores)): + score_cont_list = scores[i] + for j in xrange(0,len(score_cont_list)): + score_list = score_cont_list[j] + score_lists.append(score_list) + score_type_list.append(score_types[i][j]) + feedback_type_list.append(feedback_types[i][j]) + + score_list_len = len(score_lists[0]) + for i in xrange(0,len(score_lists)): + score_list = score_lists[i] + if len(score_list)!=score_list_len: + return success, "" + + score_tuples = [] + for i in xrange(0,len(score_lists)): + for j in xrange(0,len(score_lists[i])): + tuple = [1,j,score_lists[i][j],[],[]] + score_tuples, tup_ind = CombinedOpenEndedRubric.check_for_tuple_matches(score_tuples,tuple) + score_tuples[tup_ind][0] += 1 + score_tuples[tup_ind][3].append(score_type_list[i]) + score_tuples[tup_ind][4].append(feedback_type_list[i]) + + success = True + return success, score_tuples + + @staticmethod + def check_for_tuple_matches(tuples, tuple): + """ + Checks to see if a tuple in a list of tuples is a match for tuple. + If not match, creates a new tuple matching tuple. + @param tuples: list of tuples + @param tuple: tuples to match + @return: a new list of tuples, and the index of the tuple that matches tuple + """ + category = tuple[1] + score = tuple[2] + tup_ind = -1 + for t in xrange(0,len(tuples)): + if tuples[t][1] == category and tuples[t][2] == score: + tup_ind = t + break + + if tup_ind == -1: + tuples.append([0,category,score,[],[]]) + tup_ind = len(tuples)-1 + return tuples, tup_ind + + + + + + diff --git a/common/lib/xmodule/xmodule/css/combinedopenended/display.scss b/common/lib/xmodule/xmodule/css/combinedopenended/display.scss index 8d921f828b..20700ab092 100644 --- a/common/lib/xmodule/xmodule/css/combinedopenended/display.scss +++ b/common/lib/xmodule/xmodule/css/combinedopenended/display.scss @@ -24,14 +24,11 @@ section.combined-open-ended { @include clearfix; .status-container { - float:right; - width:40%; + padding-bottom: 5px; } .item-container { - float:left; - width: 53%; - padding-bottom: 50px; + padding-bottom: 10px; } .result-container @@ -46,14 +43,26 @@ section.combined-open-ended { } } +section.legend-container { + .legenditem { + background-color : #d4d4d4; + font-size: .9em; + padding: 2px; + display: inline; + width: 20%; + } + margin-bottom: 5px; +} + section.combined-open-ended-status { .statusitem { - background-color: #FAFAFA; color: #2C2C2C; - font-family: monospace; - font-size: 1em; - padding: 10px; + background-color : #d4d4d4; + font-size: .9em; + padding: 2px; + display: inline; + width: 20%; .show-results { margin-top: .3em; text-align:right; @@ -61,12 +70,12 @@ section.combined-open-ended-status { .show-results-button { font: 1em monospace; } - } + } .statusitem-current { - background-color: #d4d4d4; + background-color: #B2B2B2; color: #222; - } + } span { &.unanswered { @@ -98,8 +107,29 @@ section.combined-open-ended-status { } } -div.result-container { +div.combined-rubric-container { + ul.rubric-list{ + list-style-type: none; + padding:0; + margin:0; + li { + &.rubric-list-item{ + margin-bottom: 2px; + padding: 0px; + } + } + } + span.rubric-category { + font-size: .9em; + } + padding-bottom: 5px; + padding-top: 10px; +} + +div.result-container { + padding-top: 10px; + padding-bottom: 5px; .evaluation { p { @@ -113,9 +143,8 @@ div.result-container { } .evaluation-response { - margin-bottom: 10px; + margin-bottom: 2px; header { - text-align: right; a { font-size: .85em; } @@ -198,20 +227,6 @@ div.result-container { } } - .result-correct { - background: url('../images/correct-icon.png') left 20px no-repeat; - .result-actual-output { - color: #090; - } - } - - .result-incorrect { - background: url('../images/incorrect-icon.png') left 20px no-repeat; - .result-actual-output { - color: #B00; - } - } - .markup-text{ margin: 5px; padding: 20px 0px 15px 50px; @@ -229,6 +244,16 @@ div.result-container { } } } + .rubric-result-container { + .rubric-result { + font-size: .9em; + padding: 2px; + display: inline-table; + } + padding: 2px; + margin: 0px; + display : inline; + } } @@ -404,7 +429,7 @@ section.open-ended-child { div.short-form-response { background: #F6F6F6; border: 1px solid #ddd; - margin-bottom: 20px; + margin-bottom: 0px; overflow-y: auto; height: 200px; @include clearfix; @@ -478,6 +503,18 @@ section.open-ended-child { margin-left: .75rem; } + ul.rubric-list{ + list-style-type: none; + padding:0; + margin:0; + li { + &.rubric-list-item{ + margin-bottom: 0px; + padding: 0px; + } + } + } + ol { list-style: decimal outside none; margin-bottom: lh(); @@ -503,9 +540,8 @@ section.open-ended-child { } li { - line-height: 1.4em; - margin-bottom: lh(.5); - + margin-bottom: 0px; + padding: 0px; &:last-child { margin-bottom: 0; } diff --git a/common/lib/xmodule/xmodule/grading_service_module.py b/common/lib/xmodule/xmodule/grading_service_module.py index 10c6f16adb..9af28a72c5 100644 --- a/common/lib/xmodule/xmodule/grading_service_module.py +++ b/common/lib/xmodule/xmodule/grading_service_module.py @@ -114,7 +114,9 @@ class GradingService(object): if 'rubric' in response_json: rubric = response_json['rubric'] rubric_renderer = CombinedOpenEndedRubric(self.system, view_only) - success, rubric_html = rubric_renderer.render_rubric(rubric) + rubric_dict = rubric_renderer.render_rubric(rubric) + success = rubric_dict['success'] + rubric_html = rubric_dict['html'] response_json['rubric'] = rubric_html return response_json # if we can't parse the rubric into HTML, diff --git a/common/lib/xmodule/xmodule/js/src/combinedopenended/display.coffee b/common/lib/xmodule/xmodule/js/src/combinedopenended/display.coffee index ae63171ed4..fd0391450b 100644 --- a/common/lib/xmodule/xmodule/js/src/combinedopenended/display.coffee +++ b/common/lib/xmodule/xmodule/js/src/combinedopenended/display.coffee @@ -4,11 +4,11 @@ class @Rubric # finds the scores for each rubric category @get_score_list: () => # find the number of categories: - num_categories = $('table.rubric tr').length + num_categories = $('.rubric-category').length score_lst = [] # get the score for each one - for i in [0..(num_categories-2)] + for i in [0..(num_categories-1)] score = $("input[name='score-selection-#{i}']:checked").val() score_lst.push(score) @@ -23,9 +23,8 @@ class @Rubric @check_complete: () -> # check to see whether or not any categories have not been scored - num_categories = $('table.rubric tr').length - # -2 because we want to skip the header - for i in [0..(num_categories-2)] + num_categories = $('.rubric-category').length + for i in [0..(num_categories-1)] score = $("input[name='score-selection-#{i}']:checked").val() if score == undefined return false @@ -52,22 +51,30 @@ class @CombinedOpenEnded @reset_button.click @reset @next_problem_button = @$('.next-step-button') @next_problem_button.click @next_problem + @status_container = @$('.status-elements') @show_results_button=@$('.show-results-button') @show_results_button.click @show_results + @question_header = @$('.question-header') + @question_header.click @collapse_question + # valid states: 'initial', 'assessing', 'post_assessment', 'done' Collapsible.setCollapsibles(@el) @submit_evaluation_button = $('.submit-evaluation-button') @submit_evaluation_button.click @message_post @results_container = $('.result-container') + @combined_rubric_container = $('.combined-rubric-container') + + @legend_container= $('.legend-container') + @show_legend_current() # Where to put the rubric once we load it @el = $(element).find('section.open-ended-child') @errors_area = @$('.error') @answer_area = @$('textarea.answer') - + @prompt_container = @$('.prompt') @rubric_wrapper = @$('.rubric-wrapper') @hint_wrapper = @$('.hint-wrapper') @message_wrapper = @$('.message-wrapper') @@ -82,11 +89,20 @@ class @CombinedOpenEnded @can_upload_files = false @open_ended_child= @$('.open-ended-child') + if @task_number>1 + @prompt_hide() + else if @task_number==1 and @child_state!='initial' + @prompt_hide() + @find_assessment_elements() @find_hint_elements() @rebind() + if @task_number>1 + @show_combined_rubric_current() + @show_results_current() + # locally scoped jquery. $: (selector) -> $(selector, @el) @@ -102,7 +118,7 @@ class @CombinedOpenEnded Collapsible.setCollapsibles(@results_container) show_results: (event) => - status_item = $(event.target).parent().parent() + status_item = $(event.target).parent() status_number = status_item.data('status-number') data = {'task_number' : status_number} $.postWithPrefix "#{@ajax_url}/get_results", data, (response) => @@ -115,6 +131,27 @@ class @CombinedOpenEnded else @gentle_alert response.error + show_combined_rubric_current: () => + data = {} + $.postWithPrefix "#{@ajax_url}/get_combined_rubric", data, (response) => + if response.success + @combined_rubric_container.after(response.html).remove() + @combined_rubric_container= $('div.combined_rubric_container') + + show_status_current: () => + data = {} + $.postWithPrefix "#{@ajax_url}/get_status", data, (response) => + if response.success + @status_container.after(response.html).remove() + @status_container= $('.status-elements') + + show_legend_current: () => + data = {} + $.postWithPrefix "#{@ajax_url}/get_legend", data, (response) => + if response.success + @legend_container.after(response.html).remove() + @legend_container= $('.legend-container') + message_post: (event)=> Logger.log 'message_post', @answers external_grader_message=$(event.target).parent().parent().parent() @@ -156,6 +193,11 @@ class @CombinedOpenEnded @next_problem_button.hide() @hide_file_upload() @hint_area.attr('disabled', false) + if @task_number>1 or @child_state!='initial' + @show_status_current() + + if @task_number==1 and @child_state=='assessing' + @prompt_hide() if @child_state == 'done' @rubric_wrapper.hide() if @child_type=="openended" @@ -257,7 +299,8 @@ class @CombinedOpenEnded event.preventDefault() if @child_state == 'assessing' && Rubric.check_complete() checked_assessment = Rubric.get_total_score() - data = {'assessment' : checked_assessment} + score_list = Rubric.get_score_list() + data = {'assessment' : checked_assessment, 'score_list' : score_list} $.postWithPrefix "#{@ajax_url}/save_assessment", data, (response) => if response.success @child_state = response.state @@ -267,7 +310,6 @@ class @CombinedOpenEnded @find_hint_elements() else if @child_state == 'done' @rubric_wrapper.hide() - @message_wrapper.html(response.message_html) @rebind() else @@ -367,13 +409,13 @@ class @CombinedOpenEnded window.queuePollerID = window.setTimeout(@poll, 10000) setup_file_upload: => - if window.File and window.FileReader and window.FileList and window.Blob - if @accept_file_upload == "True" - @can_upload_files = true - @file_upload_area.html('') - @file_upload_area.show() - else - @gentle_alert 'File uploads are required for this question, but are not supported in this browser. Try the newest version of google chrome. Alternatively, if you have uploaded the image to the web, you can paste a link to it into the answer box.' + if @accept_file_upload == "True" + if window.File and window.FileReader and window.FileList and window.Blob + @can_upload_files = true + @file_upload_area.html('') + @file_upload_area.show() + else + @gentle_alert 'File uploads are required for this question, but are not supported in this browser. Try the newest version of google chrome. Alternatively, if you have uploaded the image to the web, you can paste a link to it into the answer box.' hide_file_upload: => if @accept_file_upload == "True" @@ -390,3 +432,26 @@ class @CombinedOpenEnded # wrap this so that it can be mocked reload: -> location.reload() + + collapse_question: () => + @prompt_container.slideToggle() + @prompt_container.toggleClass('open') + if @question_header.text() == "(Hide)" + new_text = "(Show)" + else + new_text = "(Hide)" + @question_header.text(new_text) + + prompt_show: () => + if @prompt_container.is(":hidden")==true + @prompt_container.slideToggle() + @prompt_container.toggleClass('open') + @question_header.text("(Hide)") + + prompt_hide: () => + if @prompt_container.is(":visible")==true + @prompt_container.slideToggle() + @prompt_container.toggleClass('open') + @question_header.text("(Show)") + + diff --git a/common/lib/xmodule/xmodule/js/src/peergrading/peer_grading_problem.coffee b/common/lib/xmodule/xmodule/js/src/peergrading/peer_grading_problem.coffee index deeb82900b..5770238649 100644 --- a/common/lib/xmodule/xmodule/js/src/peergrading/peer_grading_problem.coffee +++ b/common/lib/xmodule/xmodule/js/src/peergrading/peer_grading_problem.coffee @@ -180,12 +180,17 @@ class @PeerGradingProblem @content_panel = $('.content-panel') @grading_message = $('.grading-message') @grading_message.hide() + @question_header = $('.question-header') + @question_header.click @collapse_question @grading_wrapper =$('.grading-wrapper') @calibration_feedback_panel = $('.calibration-feedback') @interstitial_page = $('.interstitial-page') @interstitial_page.hide() + @calibration_interstitial_page = $('.calibration-interstitial-page') + @calibration_interstitial_page.hide() + @error_container = $('.error-container') @submission_key_input = $("input[name='submission-key']") @@ -201,7 +206,9 @@ class @PeerGradingProblem @action_button = $('.action-button') @calibration_feedback_button = $('.calibration-feedback-button') @interstitial_page_button = $('.interstitial-page-button') + @calibration_interstitial_page_button = $('.calibration-interstitial-page-button') @flag_student_checkbox = $('.flag-checkbox') + @collapse_question() Collapsible.setCollapsibles(@content_panel) @@ -210,12 +217,21 @@ class @PeerGradingProblem @calibration_feedback_button.click => @calibration_feedback_panel.hide() @grading_wrapper.show() + @gentle_alert "Calibration essay saved. Fetched the next essay." @is_calibrated_check() @interstitial_page_button.click => @interstitial_page.hide() @is_calibrated_check() + @calibration_interstitial_page_button.click => + @calibration_interstitial_page.hide() + @is_calibrated_check() + + @calibration_feedback_button.hide() + @calibration_feedback_panel.hide() + @error_container.hide() + @is_calibrated_check() @@ -233,6 +249,9 @@ class @PeerGradingProblem fetch_submission_essay: () => @backend.post('get_next_submission', {location: @location}, @render_submission) + gentle_alert: (msg) => + @grading_message.fadeIn() + @grading_message.html("
" + msg + "
") construct_data: () -> data = @@ -273,6 +292,9 @@ class @PeerGradingProblem else if response.calibrated and @calibration == true @calibration = false @render_interstitial_page() + else if not response.calibrated and @calibration==null + @calibration=true + @render_calibration_interstitial_page() else @calibration = true @fetch_calibration_essay() @@ -296,7 +318,7 @@ class @PeerGradingProblem if response.success @is_calibrated_check() @grading_message.fadeIn() - @grading_message.html("Grade sent successfully.
") + @grading_message.html("Successfully saved your feedback. Fetched the next essay.
") else if response.error @render_error(response.error) @@ -308,6 +330,7 @@ class @PeerGradingProblem # check to see whether or not any categories have not been scored if Rubric.check_complete() # show button if we have scores for all categories + @grading_message.hide() @show_submit_button() @grade = Rubric.get_total_score() @@ -323,7 +346,7 @@ class @PeerGradingProblem if response.success # load in all the data - @submission_container.html("Congratulations! Your score matches the actual score!
") + calibration_wrapper.append("Your score matches the actual score!
") else - calibration_wrapper.append("Please try to understand the grading critera better to be more accurate next time.
") + calibration_wrapper.append("You may want to review the rubric again.
") # disable score selection and submission from the grading interface $("input[name='score-selection']").attr('disabled', true) @submit_button.hide() + @calibration_feedback_button.show() render_interstitial_page: () => @content_panel.hide() + @grading_message.hide() @interstitial_page.show() + render_calibration_interstitial_page: () => + @content_panel.hide() + @action_button.hide() + @calibration_interstitial_page.show() + render_error: (error_message) => @error_container.show() @calibration_feedback_panel.hide() @@ -433,3 +465,12 @@ class @PeerGradingProblem setup_score_selection: (max_score) => # And now hook up an event handler again $("input[class='score-selection']").change @graded_callback + + collapse_question: () => + @prompt_container.slideToggle() + @prompt_container.toggleClass('open') + if @question_header.text() == "(Hide)" + new_text = "(Show)" + else + new_text = "(Hide)" + @question_header.text(new_text) diff --git a/common/lib/xmodule/xmodule/open_ended_module.py b/common/lib/xmodule/xmodule/open_ended_module.py index 0ad6a26995..98260f3401 100644 --- a/common/lib/xmodule/xmodule/open_ended_module.py +++ b/common/lib/xmodule/xmodule/open_ended_module.py @@ -306,6 +306,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): 'grammar': 1, # needs to be after all the other feedback 'markup_text': 3} + do_not_render = ['topicality', 'prompt-overlap'] default_priority = 2 @@ -360,6 +361,10 @@ class OpenEndedModule(openendedchild.OpenEndedChild): if len(feedback) == 0: return format_feedback('errors', 'No feedback available') + for tag in do_not_render: + if tag in feedback: + feedback.pop(tag) + feedback_lst = sorted(feedback.items(), key=get_priority) feedback_list_part1 = u"\n".join(format_feedback(k, v) for k, v in feedback_lst) else: @@ -381,9 +386,13 @@ class OpenEndedModule(openendedchild.OpenEndedChild): rubric_feedback = "" feedback = self._convert_longform_feedback_to_html(response_items) + rubric_scores = [] if response_items['rubric_scores_complete'] == True: rubric_renderer = CombinedOpenEndedRubric(system, True) - success, rubric_feedback = rubric_renderer.render_rubric(response_items['rubric_xml']) + rubric_dict = rubric_renderer.render_rubric(response_items['rubric_xml']) + success = rubric_dict['success'] + rubric_feedback = rubric_dict['html'] + rubric_scores = rubric_dict['rubric_scores'] if not response_items['success']: return system.render_template("open_ended_error.html", @@ -396,7 +405,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): 'rubric_feedback': rubric_feedback }) - return feedback_template + return feedback_template, rubric_scores def _parse_score_msg(self, score_msg, system, join_feedback=True): @@ -420,7 +429,17 @@ class OpenEndedModule(openendedchild.OpenEndedChild): correct: Correctness of submission (Boolean) score: Points to be assigned (numeric, can be float) """ - fail = {'valid': False, 'score': 0, 'feedback': ''} + fail = { + 'valid': False, + 'score': 0, + 'feedback': '', + 'rubric_scores' : [[0]], + 'grader_types' : [''], + 'feedback_items' : [''], + 'feedback_dicts' : [{}], + 'grader_ids' : [0], + 'submission_ids' : [0], + } try: score_result = json.loads(score_msg) except (TypeError, ValueError): @@ -447,6 +466,11 @@ class OpenEndedModule(openendedchild.OpenEndedChild): #This is to support peer grading if isinstance(score_result['score'], list): feedback_items = [] + rubric_scores = [] + grader_types = [] + feedback_dicts = [] + grader_ids = [] + submission_ids = [] for i in xrange(0, len(score_result['score'])): new_score_result = { 'score': score_result['score'][i], @@ -458,7 +482,17 @@ class OpenEndedModule(openendedchild.OpenEndedChild): 'rubric_scores_complete': score_result['rubric_scores_complete'][i], 'rubric_xml': score_result['rubric_xml'][i], } - feedback_items.append(self._format_feedback(new_score_result, system)) + feedback_template, rubric_score = self._format_feedback(new_score_result, system) + feedback_items.append(feedback_template) + rubric_scores.append(rubric_score) + grader_types.append(score_result['grader_type']) + try: + feedback_dict = json.loads(score_result['feedback'][i]) + except: + pass + feedback_dicts.append(feedback_dict) + grader_ids.append(score_result['grader_id'][i]) + submission_ids.append(score_result['submission_id']) if join_feedback: feedback = "".join(feedback_items) else: @@ -466,13 +500,33 @@ class OpenEndedModule(openendedchild.OpenEndedChild): score = int(median(score_result['score'])) else: #This is for instructor and ML grading - feedback = self._format_feedback(score_result, system) + feedback, rubric_score = self._format_feedback(score_result, system) score = score_result['score'] + rubric_scores = [rubric_score] + grader_types = [score_result['grader_type']] + feedback_items = [feedback] + try: + feedback_dict = json.loads(score_result['feedback']) + except: + pass + feedback_dicts = [feedback_dict] + grader_ids = [score_result['grader_id']] + submission_ids = [score_result['submission_id']] self.submission_id = score_result['submission_id'] self.grader_id = score_result['grader_id'] - return {'valid': True, 'score': score, 'feedback': feedback} + return { + 'valid': True, + 'score': score, + 'feedback': feedback, + 'rubric_scores' : rubric_scores, + 'grader_types' : grader_types, + 'feedback_items' : feedback_items, + 'feedback_dicts' : feedback_dicts, + 'grader_ids' : grader_ids, + 'submission_ids' : submission_ids, + } def latest_post_assessment(self, system, short_feedback=False, join_feedback=True): """ diff --git a/common/lib/xmodule/xmodule/openendedchild.py b/common/lib/xmodule/xmodule/openendedchild.py index ba2de5c930..c83b0f0ea3 100644 --- a/common/lib/xmodule/xmodule/openendedchild.py +++ b/common/lib/xmodule/xmodule/openendedchild.py @@ -68,10 +68,10 @@ class OpenEndedChild(object): #This is used to tell students where they are at in the module HUMAN_NAMES = { - 'initial': 'Started', - 'assessing': 'Being scored', - 'post_assessment': 'Scoring finished', - 'done': 'Problem complete', + 'initial': 'Not started', + 'assessing': 'In progress', + 'post_assessment': 'Done', + 'done': 'Done', } def __init__(self, system, location, definition, descriptor, static_data, @@ -137,8 +137,6 @@ class OpenEndedChild(object): else: return False, {} - - def latest_answer(self): """Empty string if not available""" if not self.history: diff --git a/common/lib/xmodule/xmodule/self_assessment_module.py b/common/lib/xmodule/xmodule/self_assessment_module.py index c8d1fe7a28..0d1092f96f 100644 --- a/common/lib/xmodule/xmodule/self_assessment_module.py +++ b/common/lib/xmodule/xmodule/self_assessment_module.py @@ -53,8 +53,6 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): @param descriptor: SelfAssessmentDescriptor @return: None """ - self.submit_message = definition['submitmessage'] - self.hint_prompt = definition['hintprompt'] self.prompt = stringify_children(self.prompt) self.rubric = stringify_children(self.rubric) @@ -76,8 +74,6 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): 'previous_answer': previous_answer, 'ajax_url': system.ajax_url, 'initial_rubric': self.get_rubric_html(system), - 'initial_hint': "", - 'initial_message': self.get_message_html(), 'state': self.state, 'allow_reset': self._allow_reset(), 'child_type': 'selfassessment', @@ -108,7 +104,6 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): if dispatch not in handlers: return 'Error' - log.debug(get) before = self.get_progress() d = handlers[dispatch](get, system) after = self.get_progress() @@ -126,7 +121,9 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): return '' rubric_renderer = CombinedOpenEndedRubric(system, False) - success, rubric_html = rubric_renderer.render_rubric(self.rubric) + rubric_dict = rubric_renderer.render_rubric(self.rubric) + success = rubric_dict['success'] + rubric_html = rubric_dict['html'] # we'll render it context = {'rubric': rubric_html, @@ -156,8 +153,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): else: hint = '' - context = {'hint_prompt': self.hint_prompt, - 'hint': hint} + context = {'hint': hint} if self.state == self.POST_ASSESSMENT: context['read_only'] = False @@ -168,15 +164,6 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): return system.render_template('self_assessment_hint.html', context) - def get_message_html(self): - """ - Return the appropriate version of the message view, based on state. - """ - if self.state != self.DONE: - return "" - - return """""".format(self.submit_message) - def save_answer(self, get, system): """ @@ -235,15 +222,19 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): try: score = int(get['assessment']) + score_list = get.getlist('score_list[]') + for i in xrange(0,len(score_list)): + score_list[i] = int(score_list[i]) except ValueError: - return {'success': False, 'error': "Non-integer score value"} + return {'success': False, 'error': "Non-integer score value, or no score list"} + #Record score as assessment and rubric scores as post assessment self.record_latest_score(score) + self.record_latest_post_assessment(json.dumps(score_list)) d = {'success': True, } self.change_state(self.DONE) - d['message_html'] = self.get_message_html() d['allow_reset'] = self._allow_reset() d['state'] = self.state @@ -251,6 +242,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): def save_hint(self, get, system): ''' + Not used currently, as hints have been removed from the system. Save the hint. Returns a dict { 'success': bool, 'message_html': message_html, @@ -268,9 +260,18 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): self.change_state(self.DONE) return {'success': True, - 'message_html': self.get_message_html(), + 'message_html': '', 'allow_reset': self._allow_reset()} + def latest_post_assessment(self, system): + latest_post_assessment = super(SelfAssessmentModule, self).latest_post_assessment(system) + try: + rubric_scores = json.loads(latest_post_assessment) + except: + log.error("Cannot parse rubric scores in self assessment module from {0}".format(latest_post_assessment)) + rubric_scores = [] + return [rubric_scores] + class SelfAssessmentDescriptor(XmlDescriptor, EditingDescriptor): """ @@ -299,7 +300,7 @@ class SelfAssessmentDescriptor(XmlDescriptor, EditingDescriptor): 'hintprompt': 'some-html' } """ - expected_children = ['submitmessage', 'hintprompt'] + expected_children = [] for child in expected_children: if len(xml_object.xpath(child)) != 1: raise ValueError("Self assessment definition must include exactly one '{0}' tag".format(child)) @@ -308,9 +309,7 @@ class SelfAssessmentDescriptor(XmlDescriptor, EditingDescriptor): """Assumes that xml_object has child k""" return stringify_children(xml_object.xpath(k)[0]) - return {'submitmessage': parse('submitmessage'), - 'hintprompt': parse('hintprompt'), - } + return {} def definition_to_xml(self, resource_fs): '''Return an xml element representing this definition.''' @@ -321,7 +320,7 @@ class SelfAssessmentDescriptor(XmlDescriptor, EditingDescriptor): child_node = etree.fromstring(child_str) elt.append(child_node) - for child in ['submitmessage', 'hintprompt']: + for child in []: add_child(child) return elt diff --git a/common/static/images/grading_notification.png b/common/static/images/grading_notification.png new file mode 100644 index 0000000000..cd93857da9 Binary files /dev/null and b/common/static/images/grading_notification.png differ diff --git a/common/static/images/ml_grading_icon.png b/common/static/images/ml_grading_icon.png new file mode 100644 index 0000000000..283355814e Binary files /dev/null and b/common/static/images/ml_grading_icon.png differ diff --git a/common/static/images/peer_grading_icon.png b/common/static/images/peer_grading_icon.png new file mode 100644 index 0000000000..0ee7cf5f17 Binary files /dev/null and b/common/static/images/peer_grading_icon.png differ diff --git a/common/static/images/random_grading_icon.png b/common/static/images/random_grading_icon.png new file mode 100644 index 0000000000..d3737e61b0 Binary files /dev/null and b/common/static/images/random_grading_icon.png differ diff --git a/common/static/images/self_assessment_icon.png b/common/static/images/self_assessment_icon.png new file mode 100644 index 0000000000..c4b84e2ec8 Binary files /dev/null and b/common/static/images/self_assessment_icon.png differ diff --git a/lms/djangoapps/open_ended_grading/open_ended_notifications.py b/lms/djangoapps/open_ended_grading/open_ended_notifications.py index f79013e396..4055aab347 100644 --- a/lms/djangoapps/open_ended_grading/open_ended_notifications.py +++ b/lms/djangoapps/open_ended_grading/open_ended_notifications.py @@ -50,7 +50,7 @@ def staff_grading_notifications(course, user): log.info("Problem with getting notifications from staff grading service.") if pending_grading: - img_path = "/static/images/slider-handle.png" + img_path = "/static/images/grading_notification.png" notification_dict = {'pending_grading': pending_grading, 'img_path': img_path, 'response': notifications} @@ -83,7 +83,7 @@ def peer_grading_notifications(course, user): log.info("Problem with getting notifications from peer grading service.") if pending_grading: - img_path = "/static/images/slider-handle.png" + img_path = "/static/images/grading_notification.png" notification_dict = {'pending_grading': pending_grading, 'img_path': img_path, 'response': notifications} @@ -129,7 +129,7 @@ def combined_notifications(course, user): log.exception("Problem with getting notifications from controller query service.") if pending_grading: - img_path = "/static/images/slider-handle.png" + img_path = "/static/images/grading_notification.png" notification_dict = {'pending_grading': pending_grading, 'img_path': img_path, 'response': notifications} diff --git a/lms/static/coffee/src/staff_grading/staff_grading.coffee b/lms/static/coffee/src/staff_grading/staff_grading.coffee index d70608c1bf..8a1bf1adbb 100644 --- a/lms/static/coffee/src/staff_grading/staff_grading.coffee +++ b/lms/static/coffee/src/staff_grading/staff_grading.coffee @@ -181,6 +181,10 @@ class @StaffGrading @ml_error_info_container = $('.ml-error-info-container') @breadcrumbs = $('.breadcrumbs') + + @question_header = $('.question-header') + @question_header.click @collapse_question + @collapse_question() # model state @state = state_no_data @@ -392,10 +396,10 @@ class @StaffGrading else if @state == state_grading @ml_error_info_container.html(@ml_error_info) - meta_list = $("Score: ${score}
- % if grader_type == "ML": -Check below for full feedback:
- % endif -${notification['description']}
diff --git a/lms/templates/open_ended_result_table.html b/lms/templates/open_ended_result_table.html new file mode 100644 index 0000000000..24bf7a76fe --- /dev/null +++ b/lms/templates/open_ended_result_table.html @@ -0,0 +1,58 @@ +% for co in context_list: + % if co['grader_type'] in grader_type_image_dict: + <%grader_type=co['grader_type']%> + <% grader_image = grader_type_image_dict[grader_type] %> + % if grader_type in human_grader_types: + <% human_title = human_grader_types[grader_type] %> + % else: + <% human_title = grader_type %> + % endif +How accurate do you find this feedback?
+Additional comments:
+ + +Before you can do any proper peer grading, you first need to understand how your own grading compares to that of the instrutor. Once your grades begin to match the instructor's, you will move on to grading your peers!
-You have successfully managed to calibrate your answers to that of the instructors and have moved onto the next step in the peer grading process.
-You cannot start grading until you have graded a sufficient number of training problems and have been able to demonstrate that your scores closely match that of the instructor.
-Now that you have finished your training, you are now allowed to grade your peers. Please keep in mind that students are allowed to respond to the grades and feedback they receive.
-You have now completed the calibration step. You are now ready to start grading.
+You have finished learning to grade, which means that you are now ready to start grading.
You have not yet finished learning to grade this problem.
+You will now be shown a series of instructor-scored essays, and will be asked to score them yourself.
+Once you can score the essays similarly to an instructor, you will be ready to grade your peers.
+ +