diff --git a/common/lib/xmodule/xmodule/combined_open_ended_modulev1.py b/common/lib/xmodule/xmodule/combined_open_ended_modulev1.py index f62c2d309a..8bd7df86c1 100644 --- a/common/lib/xmodule/xmodule/combined_open_ended_modulev1.py +++ b/common/lib/xmodule/xmodule/combined_open_ended_modulev1.py @@ -23,6 +23,10 @@ import self_assessment_module import open_ended_module from combined_open_ended_rubric import CombinedOpenEndedRubric, RubricParsingError from .stringify import stringify_children +import dateutil +import dateutil.parser +import datetime +from timeparse import parse_timedelta log = logging.getLogger("mitx.courseware") @@ -54,10 +58,6 @@ HUMAN_TASK_TYPE = { 'openended' : "External Grader", } -class IncorrectMaxScoreError(Exception): - def __init__(self, msg): - self.msg = msg - class CombinedOpenEndedV1Module(): """ This is a module that encapsulates all open ended grading (self assessment, peer assessment, etc). @@ -165,19 +165,35 @@ class CombinedOpenEndedV1Module(): self.is_scored = self.metadata.get('is_graded', IS_SCORED) in TRUE_DICT self.accept_file_upload = self.metadata.get('accept_file_upload', ACCEPT_FILE_UPLOAD) in TRUE_DICT + display_due_date_string = self.metadata.get('due', None) + if display_due_date_string is not None: + try: + self.display_due_date = dateutil.parser.parse(display_due_date_string) + except ValueError: + log.error("Could not parse due date {0} for location {1}".format(display_due_date_string, location)) + raise + else: + self.display_due_date = None + + grace_period_string = self.metadata.get('graceperiod', None) + if grace_period_string is not None and self.display_due_date: + try: + self.grace_period = parse_timedelta(grace_period_string) + self.close_date = self.display_due_date + self.grace_period + except: + log.error("Error parsing the grace period {0} for location {1}".format(grace_period_string, location)) + raise + else: + self.grace_period = None + self.close_date = self.display_due_date + # Used for progress / grading. Currently get credit just for # completion (doesn't matter if you self-assessed correct/incorrect). self._max_score = int(self.metadata.get('max_score', MAX_SCORE)) - if self._max_score > MAX_SCORE_ALLOWED: - error_message = "Max score {0} is higher than max score allowed {1} for location {2}".format(self._max_score, - MAX_SCORE_ALLOWED, location) - log.error(error_message) - raise IncorrectMaxScoreError(error_message) - rubric_renderer = CombinedOpenEndedRubric(system, True) rubric_string = stringify_children(definition['rubric']) - rubric_renderer.check_if_rubric_is_parseable(rubric_string, location, MAX_SCORE_ALLOWED) + rubric_renderer.check_if_rubric_is_parseable(rubric_string, location, MAX_SCORE_ALLOWED, self._max_score) #Static data is passed to the child modules to render self.static_data = { @@ -187,6 +203,7 @@ class CombinedOpenEndedV1Module(): 'rubric': definition['rubric'], 'display_name': self.display_name, 'accept_file_upload': self.accept_file_upload, + 'close_date' : self.close_date, } self.task_xml = definition['task_xml'] diff --git a/common/lib/xmodule/xmodule/combined_open_ended_rubric.py b/common/lib/xmodule/xmodule/combined_open_ended_rubric.py index 9a213299cd..689103a86a 100644 --- a/common/lib/xmodule/xmodule/combined_open_ended_rubric.py +++ b/common/lib/xmodule/xmodule/combined_open_ended_rubric.py @@ -29,10 +29,13 @@ class CombinedOpenEndedRubric(object): success = False try: rubric_categories = self.extract_categories(rubric_xml) - html = self.system.render_template('open_ended_rubric.html', + max_scores = map((lambda cat: cat['options'][-1]['points']), rubric_categories) + max_score = max(max_scores) + html = self.system.render_template('open_ended_rubric.html', {'categories': rubric_categories, 'has_score': self.has_score, - 'view_only': self.view_only}) + 'view_only': self.view_only, + 'max_score': max_score}) success = True except: error_message = "[render_rubric] Could not parse the rubric with xml: {0}".format(rubric_xml) @@ -40,7 +43,7 @@ class CombinedOpenEndedRubric(object): raise RubricParsingError(error_message) return success, html - def check_if_rubric_is_parseable(self, rubric_string, location, max_score_allowed): + def check_if_rubric_is_parseable(self, rubric_string, location, max_score_allowed, max_score): success, rubric_feedback = self.render_rubric(rubric_string) if not success: error_message = "Could not parse rubric : {0} for location {1}".format(rubric_string, location.url()) @@ -48,13 +51,21 @@ class CombinedOpenEndedRubric(object): raise RubricParsingError(error_message) rubric_categories = self.extract_categories(rubric_string) + total = 0 for category in rubric_categories: + total = total + len(category['options']) - 1 if len(category['options']) > (max_score_allowed + 1): error_message = "Number of score points in rubric {0} higher than the max allowed, which is {1}".format( len(category['options']), max_score_allowed) log.error(error_message) raise RubricParsingError(error_message) + if total != max_score: + error_msg = "The max score {0} for problem {1} does not match the total number of points in the rubric {2}".format( + max_score, location, total) + log.error(error_msg) + raise RubricParsingError(error_msg) + def extract_categories(self, element): ''' Contstruct a list of categories such that the structure looks like: diff --git a/common/lib/xmodule/xmodule/css/combinedopenended/display.scss b/common/lib/xmodule/xmodule/css/combinedopenended/display.scss index 38fd6ba01c..8d921f828b 100644 --- a/common/lib/xmodule/xmodule/css/combinedopenended/display.scss +++ b/common/lib/xmodule/xmodule/css/combinedopenended/display.scss @@ -231,47 +231,6 @@ div.result-container { } } -div.result-container, section.open-ended-child { - .rubric { - margin-bottom:25px; - tr { - margin:10px 0px; - height: 100%; - } - td { - padding: 20px 0px 25px 0px; - margin: 10px 0px; - height: 100%; - } - th { - padding: 5px; - margin: 5px; - } - label, - .view-only { - margin:2px; - position: relative; - padding: 10px 15px 25px 15px; - width: 145px; - height:100%; - display: inline-block; - min-height: 50px; - min-width: 50px; - background-color: #CCC; - font-size: .85em; - } - .grade { - position: absolute; - bottom:0px; - right:0px; - margin:10px; - } - .selected-grade { - background: #666; - color: white; - } - } -} section.open-ended-child { @media print { @@ -445,7 +404,6 @@ section.open-ended-child { div.short-form-response { background: #F6F6F6; border: 1px solid #ddd; - border-top: 0; margin-bottom: 20px; overflow-y: auto; height: 200px; @@ -586,11 +544,6 @@ section.open-ended-child { } .submission_feedback { - // background: #F3F3F3; - // border: 1px solid #ddd; - // @include border-radius(3px); - // padding: 8px 12px; - // margin-top: 10px; @include inline-block; font-style: italic; margin: 8px 0 0 10px; diff --git a/common/lib/xmodule/xmodule/grading_service_module.py b/common/lib/xmodule/xmodule/grading_service_module.py index a442f39f34..10c6f16adb 100644 --- a/common/lib/xmodule/xmodule/grading_service_module.py +++ b/common/lib/xmodule/xmodule/grading_service_module.py @@ -113,7 +113,7 @@ class GradingService(object): try: if 'rubric' in response_json: rubric = response_json['rubric'] - rubric_renderer = CombinedOpenEndedRubric(self.system, False) + rubric_renderer = CombinedOpenEndedRubric(self.system, view_only) success, rubric_html = rubric_renderer.render_rubric(rubric) response_json['rubric'] = rubric_html return response_json diff --git a/common/lib/xmodule/xmodule/js/src/combinedopenended/display.coffee b/common/lib/xmodule/xmodule/js/src/combinedopenended/display.coffee index cd85d93381..ae63171ed4 100644 --- a/common/lib/xmodule/xmodule/js/src/combinedopenended/display.coffee +++ b/common/lib/xmodule/xmodule/js/src/combinedopenended/display.coffee @@ -1,3 +1,36 @@ +class @Rubric + constructor: () -> + + # finds the scores for each rubric category + @get_score_list: () => + # find the number of categories: + num_categories = $('table.rubric tr').length + + score_lst = [] + # get the score for each one + for i in [0..(num_categories-2)] + score = $("input[name='score-selection-#{i}']:checked").val() + score_lst.push(score) + + return score_lst + + @get_total_score: () -> + score_lst = @get_score_list() + tot = 0 + for score in score_lst + tot += parseInt(score) + return tot + + @check_complete: () -> + # check to see whether or not any categories have not been scored + num_categories = $('table.rubric tr').length + # -2 because we want to skip the header + for i in [0..(num_categories-2)] + score = $("input[name='score-selection-#{i}']:checked").val() + if score == undefined + return false + return true + class @CombinedOpenEnded constructor: (element) -> @element=element @@ -222,9 +255,9 @@ class @CombinedOpenEnded save_assessment: (event) => event.preventDefault() - if @child_state == 'assessing' - checked_assessment = @$('input[name="grade-selection"]:checked') - data = {'assessment' : checked_assessment.val()} + if @child_state == 'assessing' && Rubric.check_complete() + checked_assessment = Rubric.get_total_score() + data = {'assessment' : checked_assessment} $.postWithPrefix "#{@ajax_url}/save_assessment", data, (response) => if response.success @child_state = response.state diff --git a/common/lib/xmodule/xmodule/js/src/peergrading/peer_grading_problem.coffee b/common/lib/xmodule/xmodule/js/src/peergrading/peer_grading_problem.coffee index ee98905cda..deeb82900b 100644 --- a/common/lib/xmodule/xmodule/js/src/peergrading/peer_grading_problem.coffee +++ b/common/lib/xmodule/xmodule/js/src/peergrading/peer_grading_problem.coffee @@ -233,23 +233,11 @@ class @PeerGradingProblem fetch_submission_essay: () => @backend.post('get_next_submission', {location: @location}, @render_submission) - # finds the scores for each rubric category - get_score_list: () => - # find the number of categories: - num_categories = $('table.rubric tr').length - - score_lst = [] - # get the score for each one - for i in [0..(num_categories-1)] - score = $("input[name='score-selection-#{i}']:checked").val() - score_lst.push(score) - - return score_lst construct_data: () -> data = - rubric_scores: @get_score_list() - score: @grade + rubric_scores: Rubric.get_score_list() + score: Rubric.get_total_score() location: @location submission_id: @essay_id_input.val() submission_key: @submission_key_input.val() @@ -317,17 +305,11 @@ class @PeerGradingProblem # called after a grade is selected on the interface graded_callback: (event) => - @grade = $("input[name='grade-selection']:checked").val() - if @grade == undefined - return # check to see whether or not any categories have not been scored - num_categories = $('table.rubric tr').length - for i in [0..(num_categories-1)] - score = $("input[name='score-selection-#{i}']:checked").val() - if score == undefined - return - # show button if we have scores for all categories - @show_submit_button() + if Rubric.check_complete() + # show button if we have scores for all categories + @show_submit_button() + @grade = Rubric.get_total_score() @@ -401,6 +383,7 @@ class @PeerGradingProblem # render common information between calibration and grading render_submission_data: (response) => @content_panel.show() + @error_container.hide() @submission_container.append(@make_paragraphs(response.student_response)) @prompt_container.html(response.prompt) @@ -448,28 +431,5 @@ class @PeerGradingProblem @submit_button.show() setup_score_selection: (max_score) => - - # first, get rid of all the old inputs, if any. - @score_selection_container.html(""" -

Overall Score

-

Choose an overall score for this submission.

- """) - - # Now create new labels and inputs for each possible score. - for score in [0..max_score] - id = 'score-' + score - label = """""" - - input = """ - - """ # " fix broken parsing in emacs - @score_selection_container.append(input + label) - # And now hook up an event handler again - $("input[name='score-selection']").change @graded_callback - $("input[name='grade-selection']").change @graded_callback - -#mock_backend = false -#ajax_url = $('.peer-grading').data('ajax_url') -#backend = new PeerGradingProblemBackend(ajax_url, mock_backend) -#$(document).ready(() -> new PeerGradingProblem(backend)) + $("input[class='score-selection']").change @graded_callback diff --git a/common/lib/xmodule/xmodule/open_ended_module.py b/common/lib/xmodule/xmodule/open_ended_module.py index 072a7153fb..0ad6a26995 100644 --- a/common/lib/xmodule/xmodule/open_ended_module.py +++ b/common/lib/xmodule/xmodule/open_ended_module.py @@ -549,14 +549,11 @@ class OpenEndedModule(openendedchild.OpenEndedChild): @param system: modulesystem @return: Success indicator """ - if self.attempts > self.max_attempts: - # If too many attempts, prevent student from saving answer and - # seeing rubric. In normal use, students shouldn't see this because - # they won't see the reset button once they're out of attempts. - return { - 'success': False, - 'error': 'Too many attempts.' - } + # Once we close the problem, we should not allow students + # to save answers + closed, msg = self.check_if_closed() + if closed: + return msg if self.state != self.INITIAL: return self.out_of_sync_error(get) diff --git a/common/lib/xmodule/xmodule/openendedchild.py b/common/lib/xmodule/xmodule/openendedchild.py index c5a6ed4f72..ba2de5c930 100644 --- a/common/lib/xmodule/xmodule/openendedchild.py +++ b/common/lib/xmodule/xmodule/openendedchild.py @@ -74,7 +74,7 @@ class OpenEndedChild(object): 'done': 'Problem complete', } - def __init__(self, system, location, definition, descriptor, static_data, + def __init__(self, system, location, definition, descriptor, static_data, instance_state=None, shared_state=None, **kwargs): # Load instance state if instance_state is not None: @@ -99,6 +99,7 @@ class OpenEndedChild(object): self.rubric = static_data['rubric'] self.display_name = static_data['display_name'] self.accept_file_upload = static_data['accept_file_upload'] + self.close_date = static_data['close_date'] # Used for progress / grading. Currently get credit just for # completion (doesn't matter if you self-assessed correct/incorrect). @@ -117,6 +118,27 @@ class OpenEndedChild(object): """ pass + def closed(self): + if self.close_date is not None and datetime.utcnow() > self.close_date: + return True + return False + + def check_if_closed(self): + if self.closed(): + return True, { + 'success': False, + 'error': 'This problem is now closed.' + } + elif self.attempts > self.max_attempts: + return True, { + 'success': False, + 'error': 'Too many attempts.' + } + else: + return False, {} + + + def latest_answer(self): """Empty string if not available""" if not self.history: diff --git a/common/lib/xmodule/xmodule/self_assessment_module.py b/common/lib/xmodule/xmodule/self_assessment_module.py index 07cc68a83a..c8d1fe7a28 100644 --- a/common/lib/xmodule/xmodule/self_assessment_module.py +++ b/common/lib/xmodule/xmodule/self_assessment_module.py @@ -125,7 +125,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): if self.state == self.INITIAL: return '' - rubric_renderer = CombinedOpenEndedRubric(system, True) + rubric_renderer = CombinedOpenEndedRubric(system, False) success, rubric_html = rubric_renderer.render_rubric(self.rubric) # we'll render it @@ -190,15 +190,10 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): Dictionary with keys 'success' and either 'error' (if not success), or 'rubric_html' (if success). """ - # Check to see if attempts are less than max - if self.attempts > self.max_attempts: - # If too many attempts, prevent student from saving answer and - # seeing rubric. In normal use, students shouldn't see this because - # they won't see the reset button once they're out of attempts. - return { - 'success': False, - 'error': 'Too many attempts.' - } + # Check to see if this problem is closed + closed, msg = self.check_if_closed() + if closed: + return msg if self.state != self.INITIAL: return self.out_of_sync_error(get) diff --git a/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py b/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py index 556bf2953b..c2b27e4953 100644 --- a/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py +++ b/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py @@ -31,9 +31,10 @@ class OpenEndedChildTest(unittest.TestCase): Response Quality + ''' - max_score = 4 + max_score = 1 static_data = { 'max_attempts': 20, @@ -42,6 +43,7 @@ class OpenEndedChildTest(unittest.TestCase): 'max_score': max_score, 'display_name': 'Name', 'accept_file_upload': False, + 'close_date': None } definition = Mock() descriptor = Mock() @@ -158,6 +160,7 @@ class OpenEndedModuleTest(unittest.TestCase): 'display_name': 'Name', 'accept_file_upload': False, 'rewrite_content_links' : "", + 'close_date': None, } oeparam = etree.XML(''' @@ -274,9 +277,10 @@ class CombinedOpenEndedModuleTest(unittest.TestCase): Response Quality + ''' - max_score = 3 + max_score = 1 metadata = {'attempts': '10', 'max_score': max_score} @@ -288,6 +292,7 @@ class CombinedOpenEndedModuleTest(unittest.TestCase): 'display_name': 'Name', 'accept_file_upload' : False, 'rewrite_content_links' : "", + 'close_date' : "", } oeparam = etree.XML(''' @@ -320,7 +325,7 @@ class CombinedOpenEndedModuleTest(unittest.TestCase): descriptor = Mock() def setUp(self): - self.combinedoe = CombinedOpenEndedV1Module(test_system, self.location, self.definition, self.descriptor, self.static_data, metadata=self.metadata) + self.combinedoe = CombinedOpenEndedV1Module(test_system, self.location, self.definition, self.descriptor, static_data = self.static_data, metadata=self.metadata) def test_get_tag_name(self): name = self.combinedoe.get_tag_name("Tag") diff --git a/common/lib/xmodule/xmodule/tests/test_self_assessment.py b/common/lib/xmodule/xmodule/tests/test_self_assessment.py index 78dbb082ac..617b2b142a 100644 --- a/common/lib/xmodule/xmodule/tests/test_self_assessment.py +++ b/common/lib/xmodule/xmodule/tests/test_self_assessment.py @@ -46,11 +46,13 @@ class SelfAssessmentTest(unittest.TestCase): 'max_score': 1, 'display_name': "Name", 'accept_file_upload': False, + 'close_date': None } self.module = SelfAssessmentModule(test_system, self.location, self.definition, self.descriptor, - static_data, state, metadata=self.metadata) + static_data, + state, metadata=self.metadata) def test_get_html(self): html = self.module.get_html(test_system) diff --git a/common/lib/xmodule/xmodule/timeparse.py b/common/lib/xmodule/xmodule/timeparse.py index 1c3a780ad8..15a8233ccb 100644 --- a/common/lib/xmodule/xmodule/timeparse.py +++ b/common/lib/xmodule/xmodule/timeparse.py @@ -2,9 +2,12 @@ Helper functions for handling time in the format we like. """ import time +import re +from datetime import timedelta TIME_FORMAT = "%Y-%m-%dT%H:%M" +TIMEDELTA_REGEX = re.compile(r'^((?P\d+?) day(?:s?))?(\s)?((?P\d+?) hour(?:s?))?(\s)?((?P\d+?) minute(?:s)?)?(\s)?((?P\d+?) second(?:s)?)?$') def parse_time(time_str): """ @@ -22,3 +25,23 @@ def stringify_time(time_struct): Convert a time struct to a string """ return time.strftime(TIME_FORMAT, time_struct) + +def parse_timedelta(time_str): + """ + time_str: A string with the following components: + day[s] (optional) + hour[s] (optional) + minute[s] (optional) + second[s] (optional) + + Returns a datetime.timedelta parsed from the string + """ + parts = TIMEDELTA_REGEX.match(time_str) + if not parts: + return + parts = parts.groupdict() + time_params = {} + for (name, param) in parts.iteritems(): + if param: + time_params[name] = int(param) + return timedelta(**time_params) diff --git a/lms/static/coffee/src/staff_grading/staff_grading.coffee b/lms/static/coffee/src/staff_grading/staff_grading.coffee index 005a8e682e..117388bab0 100644 --- a/lms/static/coffee/src/staff_grading/staff_grading.coffee +++ b/lms/static/coffee/src/staff_grading/staff_grading.coffee @@ -212,55 +212,19 @@ class @StaffGrading setup_score_selection: => - # first, get rid of all the old inputs, if any. - @grade_selection_container.html(""" -

Overall Score

-

Choose an overall score for this submission.

- """) - # Now create new labels and inputs for each possible score. - for score in [0..@max_score] - id = 'score-' + score - label = """""" - input = """ - - """ # " fix broken parsing in emacs - @grade_selection_container.append(input + label) - $('.grade-selection').click => @graded_callback() - @score_selection_container.html(@rubric) $('.score-selection').click => @graded_callback() graded_callback: () => - @grade = $("input[name='grade-selection']:checked").val() - if @grade == undefined - return - # check to see whether or not any categories have not been scored - num_categories = $('table.rubric tr').length - for i in [0..(num_categories-1)] - score = $("input[name='score-selection-#{i}']:checked").val() - if score == undefined - return - # show button if we have scores for all categories - @state = state_graded - @submit_button.show() + # show button if we have scores for all categories + if Rubric.check_complete() + @state = state_graded + @submit_button.show() set_button_text: (text) => @action_button.attr('value', text) - # finds the scores for each rubric category - get_score_list: () => - # find the number of categories: - num_categories = $('table.rubric tr').length - - score_lst = [] - # get the score for each one - for i in [0..(num_categories-1)] - score = $("input[name='score-selection-#{i}']:checked").val() - score_lst.push(score) - - return score_lst - ajax_callback: (response) => # always clear out errors and messages on transition. @error_msg = '' @@ -285,8 +249,8 @@ class @StaffGrading skip_and_get_next: () => data = - score: @grade - rubric_scores: @get_score_list() + score: Rubric.get_total_score() + rubric_scores: Rubric.get_score_list() feedback: @feedback_area.val() submission_id: @submission_id location: @location @@ -299,8 +263,8 @@ class @StaffGrading submit_and_get_next: () -> data = - score: @grade - rubric_scores: @get_score_list() + score: Rubric.get_total_score() + rubric_scores: Rubric.get_score_list() feedback: @feedback_area.val() submission_id: @submission_id location: @location diff --git a/lms/static/sass/course.scss b/lms/static/sass/course.scss index 17406782fe..33caeba802 100644 --- a/lms/static/sass/course.scss +++ b/lms/static/sass/course.scss @@ -44,9 +44,9 @@ @import "course/gradebook"; @import "course/tabs"; @import "course/staff_grading"; +@import "course/rubric"; @import "course/open_ended_grading"; - // instructor @import "course/instructor/instructor"; diff --git a/lms/static/sass/course/_rubric.scss b/lms/static/sass/course/_rubric.scss new file mode 100644 index 0000000000..5048d70253 --- /dev/null +++ b/lms/static/sass/course/_rubric.scss @@ -0,0 +1,52 @@ +.rubric { + margin: 40px 0px; + tr { + margin:10px 0px; + height: 100%; + } + td { + padding: 20px 0px 25px 0px; + height: 100%; + border: 1px black solid; + text-align: center; + } + th { + padding: 5px; + margin: 5px; + text-align: center; + } + .points-header th { + padding: 0px; + } + .rubric-label + { + position: relative; + padding: 0px 15px 15px 15px; + width: 130px; + min-height: 50px; + min-width: 50px; + font-size: .9em; + background-color: white; + display: block; + } + .grade { + position: absolute; + bottom:0px; + right:0px; + margin:10px; + } + .selected-grade, + .selected-grade .rubric-label { + background: #666; + color: white; + } + input[type=radio]:checked + .rubric-label { + background: white; + color: $base-font-color; } + input[class='score-selection'] { + position: relative; + margin-left: 10px; + font-size: 16px; + } +} + diff --git a/lms/static/sass/course/_staff_grading.scss b/lms/static/sass/course/_staff_grading.scss index e3de7c8eab..1aaca8f077 100644 --- a/lms/static/sass/course/_staff_grading.scss +++ b/lms/static/sass/course/_staff_grading.scss @@ -12,7 +12,7 @@ div.peer-grading{ label { margin: 10px; padding: 5px; - display: inline-block; + @include inline-block; min-width: 50px; background-color: #CCC; text-size: 1.5em; @@ -176,49 +176,4 @@ div.peer-grading{ } } padding: 40px; - .rubric { - tr { - margin:10px 0px; - height: 100%; - } - td { - padding: 20px 0px 25px 0px; - height: 100%; - } - th { - padding: 5px; - margin: 5px; - } - label, - .view-only { - margin:2px; - position: relative; - padding: 15px 15px 25px 15px; - width: 150px; - height:100%; - display: inline-block; - min-height: 50px; - min-width: 50px; - background-color: #CCC; - font-size: .9em; - } - .grade { - position: absolute; - bottom:0px; - right:0px; - margin:10px; - } - .selected-grade { - background: #666; - color: white; - } - input[type=radio]:checked + label { - background: #666; - color: white; } - input[class='score-selection'] { - display: none; - } - } - } - diff --git a/lms/templates/instructor/staff_grading.html b/lms/templates/instructor/staff_grading.html index 56aed5a54a..dcfece34b8 100644 --- a/lms/templates/instructor/staff_grading.html +++ b/lms/templates/instructor/staff_grading.html @@ -75,6 +75,7 @@

+

Written Feedback

diff --git a/lms/templates/open_ended_rubric.html b/lms/templates/open_ended_rubric.html index 8d40c7d2b8..eb3fc564b4 100644 --- a/lms/templates/open_ended_rubric.html +++ b/lms/templates/open_ended_rubric.html @@ -8,26 +8,33 @@

Select the criteria you feel best represents this submission in each category.

% endif + + + % for i in range(max_score + 1): + + % endfor + % for i in range(len(categories)): <% category = categories[i] %> % for j in range(len(category['options'])): <% option = category['options'][j] %> + %if option['selected']: + % endfor diff --git a/lms/templates/peer_grading/peer_grading_problem.html b/lms/templates/peer_grading/peer_grading_problem.html index 007fd42c8d..5963691700 100644 --- a/lms/templates/peer_grading/peer_grading_problem.html +++ b/lms/templates/peer_grading/peer_grading_problem.html @@ -52,7 +52,9 @@

-

Flag this submission for review by course staff (use if the submission contains inappropriate content):

diff --git a/lms/templates/self_assessment_rubric.html b/lms/templates/self_assessment_rubric.html index b4fc125232..2986c5041a 100644 --- a/lms/templates/self_assessment_rubric.html +++ b/lms/templates/self_assessment_rubric.html @@ -2,20 +2,4 @@
${rubric | n }
- - % if not read_only: -
-

Scoring

-

Please select a score below:

- -
- %for i in xrange(0,max_score+1): - <% id = "score-{0}".format(i) %> - - - %endfor -
-
- % endif -
+ ${i} points +
${category['description']} + %else: + % endif % if view_only: ## if this is the selected rubric block, show it highlighted - % if option['selected']: -
- % else: -
- % endif +
${option['text']} -
[${option['points']} points]
% else: - + % endif