Working on making a combined rubric
This commit is contained in:
@@ -53,7 +53,7 @@ def get_logger_config(log_dir,
|
||||
logging_env=logging_env,
|
||||
hostname=hostname)
|
||||
|
||||
handlers = ['console', 'local'] if debug else ['console',
|
||||
handlers = ['console', 'local', 'null'] if debug else ['console',
|
||||
'syslogger-remote', 'local']
|
||||
|
||||
logger_config = {
|
||||
@@ -84,6 +84,12 @@ def get_logger_config(log_dir,
|
||||
'level': 'ERROR',
|
||||
'class': 'newrelic_logging.NewRelicHandler',
|
||||
'formatter': 'raw',
|
||||
},
|
||||
'null' : {
|
||||
'level': 'CRITICAL',
|
||||
'class': 'logging.handlers.SysLogHandler',
|
||||
'address': syslog_addr,
|
||||
'formatter': 'syslog_format',
|
||||
}
|
||||
},
|
||||
'loggers': {
|
||||
@@ -92,11 +98,21 @@ def get_logger_config(log_dir,
|
||||
'level': 'DEBUG',
|
||||
'propagate': False,
|
||||
},
|
||||
'django.db.backends': {
|
||||
'handlers': ['null'],
|
||||
'propagate': False,
|
||||
'level':'DEBUG',
|
||||
},
|
||||
'django_comment_client.utils' : {
|
||||
'handlers': ['null'],
|
||||
'propagate': False,
|
||||
'level':'DEBUG',
|
||||
},
|
||||
'': {
|
||||
'handlers': handlers,
|
||||
'level': 'DEBUG',
|
||||
'propagate': False
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -29,6 +29,7 @@ class CombinedOpenEndedRubric(object):
|
||||
success = False
|
||||
try:
|
||||
rubric_categories = self.extract_categories(rubric_xml)
|
||||
rubric_scores = [cat['score'] for cat in rubric_categories]
|
||||
max_scores = map((lambda cat: cat['options'][-1]['points']), rubric_categories)
|
||||
max_score = max(max_scores)
|
||||
html = self.system.render_template('open_ended_rubric.html',
|
||||
@@ -41,7 +42,7 @@ class CombinedOpenEndedRubric(object):
|
||||
error_message = "[render_rubric] Could not parse the rubric with xml: {0}".format(rubric_xml)
|
||||
log.error(error_message)
|
||||
raise RubricParsingError(error_message)
|
||||
return success, html
|
||||
return {'success' : success, 'html' : html, 'rubric_scores' : rubric_scores}
|
||||
|
||||
def check_if_rubric_is_parseable(self, rubric_string, location, max_score_allowed, max_score):
|
||||
success, rubric_feedback = self.render_rubric(rubric_string)
|
||||
@@ -149,7 +150,7 @@ class CombinedOpenEndedRubric(object):
|
||||
options = sorted(options, key=lambda option: option['points'])
|
||||
CombinedOpenEndedRubric.validate_options(options)
|
||||
|
||||
return {'description': description, 'options': options}
|
||||
return {'description': description, 'options': options, 'score' : score}
|
||||
|
||||
|
||||
@staticmethod
|
||||
|
||||
@@ -114,7 +114,9 @@ class GradingService(object):
|
||||
if 'rubric' in response_json:
|
||||
rubric = response_json['rubric']
|
||||
rubric_renderer = CombinedOpenEndedRubric(self.system, view_only)
|
||||
success, rubric_html = rubric_renderer.render_rubric(rubric)
|
||||
rubric_dict = rubric_renderer.render_rubric(rubric)
|
||||
success = rubric_dict['success']
|
||||
rubric_html = rubric_dict['html']
|
||||
response_json['rubric'] = rubric_html
|
||||
return response_json
|
||||
# if we can't parse the rubric into HTML,
|
||||
|
||||
@@ -266,7 +266,8 @@ class @CombinedOpenEnded
|
||||
event.preventDefault()
|
||||
if @child_state == 'assessing' && Rubric.check_complete()
|
||||
checked_assessment = Rubric.get_total_score()
|
||||
data = {'assessment' : checked_assessment}
|
||||
score_list = Rubric.get_score_list()
|
||||
data = {'assessment' : checked_assessment, 'score_list' : score_list}
|
||||
$.postWithPrefix "#{@ajax_url}/save_assessment", data, (response) =>
|
||||
if response.success
|
||||
@child_state = response.state
|
||||
|
||||
@@ -388,7 +388,10 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
|
||||
feedback = self._convert_longform_feedback_to_html(response_items)
|
||||
if response_items['rubric_scores_complete'] == True:
|
||||
rubric_renderer = CombinedOpenEndedRubric(system, True)
|
||||
success, rubric_feedback = rubric_renderer.render_rubric(response_items['rubric_xml'])
|
||||
rubric_dict = rubric_renderer.render_rubric(response_items['rubric_xml'])
|
||||
success = rubric_dict['success']
|
||||
rubric_feedback = rubric_dict['html']
|
||||
rubric_scores = rubric_dict['rubric_scores']
|
||||
|
||||
if not response_items['success']:
|
||||
return system.render_template("open_ended_error.html",
|
||||
@@ -401,7 +404,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
|
||||
'rubric_feedback': rubric_feedback
|
||||
})
|
||||
|
||||
return feedback_template
|
||||
return feedback_template, rubric_scores
|
||||
|
||||
|
||||
def _parse_score_msg(self, score_msg, system, join_feedback=True):
|
||||
@@ -452,6 +455,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
|
||||
#This is to support peer grading
|
||||
if isinstance(score_result['score'], list):
|
||||
feedback_items = []
|
||||
rubric_scores = []
|
||||
for i in xrange(0, len(score_result['score'])):
|
||||
new_score_result = {
|
||||
'score': score_result['score'][i],
|
||||
@@ -463,7 +467,9 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
|
||||
'rubric_scores_complete': score_result['rubric_scores_complete'][i],
|
||||
'rubric_xml': score_result['rubric_xml'][i],
|
||||
}
|
||||
feedback_items.append(self._format_feedback(new_score_result, system))
|
||||
feedback_template, rubric_score = self._format_feedback(new_score_result, system)
|
||||
feedback_items.append(feedback_template)
|
||||
rubric_scores.append(rubric_score)
|
||||
if join_feedback:
|
||||
feedback = "".join(feedback_items)
|
||||
else:
|
||||
@@ -471,13 +477,14 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
|
||||
score = int(median(score_result['score']))
|
||||
else:
|
||||
#This is for instructor and ML grading
|
||||
feedback = self._format_feedback(score_result, system)
|
||||
feedback, rubric_score = self._format_feedback(score_result, system)
|
||||
score = score_result['score']
|
||||
rubric_scores = [rubric_score]
|
||||
|
||||
self.submission_id = score_result['submission_id']
|
||||
self.grader_id = score_result['grader_id']
|
||||
|
||||
return {'valid': True, 'score': score, 'feedback': feedback}
|
||||
return {'valid': True, 'score': score, 'feedback': feedback, 'rubric_scores' : rubric_scores}
|
||||
|
||||
def latest_post_assessment(self, system, short_feedback=False, join_feedback=True):
|
||||
"""
|
||||
|
||||
@@ -137,8 +137,6 @@ class OpenEndedChild(object):
|
||||
else:
|
||||
return False, {}
|
||||
|
||||
|
||||
|
||||
def latest_answer(self):
|
||||
"""Empty string if not available"""
|
||||
if not self.history:
|
||||
|
||||
@@ -108,7 +108,6 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
|
||||
if dispatch not in handlers:
|
||||
return 'Error'
|
||||
|
||||
log.debug(get)
|
||||
before = self.get_progress()
|
||||
d = handlers[dispatch](get, system)
|
||||
after = self.get_progress()
|
||||
@@ -126,7 +125,9 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
|
||||
return ''
|
||||
|
||||
rubric_renderer = CombinedOpenEndedRubric(system, False)
|
||||
success, rubric_html = rubric_renderer.render_rubric(self.rubric)
|
||||
rubric_dict = rubric_renderer.render_rubric(self.rubric)
|
||||
success = rubric_dict['success']
|
||||
rubric_html = rubric_dict['html']
|
||||
|
||||
# we'll render it
|
||||
context = {'rubric': rubric_html,
|
||||
@@ -235,8 +236,9 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
|
||||
|
||||
try:
|
||||
score = int(get['assessment'])
|
||||
score_list = get.getlist('score_list[]')
|
||||
except ValueError:
|
||||
return {'success': False, 'error': "Non-integer score value"}
|
||||
return {'success': False, 'error': "Non-integer score value, or no score list"}
|
||||
|
||||
self.record_latest_score(score)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user