Document image class, fix logging and scoring issues
This commit is contained in:
@@ -38,9 +38,12 @@ MAX_SCORE = 1
|
||||
#The highest score allowed for the overall xmodule and for each rubric point
|
||||
MAX_SCORE_ALLOWED = 3
|
||||
|
||||
IS_SCORED=False
|
||||
IS_SCORED = False
|
||||
ACCEPT_FILE_UPLOAD = True
|
||||
|
||||
#Contains all reasonable bool and case combinations of True
|
||||
TRUE_DICT = ["True", True, "TRUE", "true"]
|
||||
|
||||
class CombinedOpenEndedModule(XModule):
|
||||
"""
|
||||
This is a module that encapsulates all open ended grading (self assessment, peer assessment, etc).
|
||||
@@ -141,31 +144,31 @@ class CombinedOpenEndedModule(XModule):
|
||||
#Allow reset is true if student has failed the criteria to move to the next child task
|
||||
self.allow_reset = instance_state.get('ready_to_reset', False)
|
||||
self.max_attempts = int(self.metadata.get('attempts', MAX_ATTEMPTS))
|
||||
self.is_scored = (self.metadata.get('is_graded', IS_SCORED) in [True, "True"])
|
||||
self.accept_file_upload = (self.metadata.get('accept_file_upload', ACCEPT_FILE_UPLOAD) in [True, "True"])
|
||||
|
||||
log.debug(self.metadata.get('is_graded', IS_SCORED))
|
||||
self.is_scored = self.metadata.get('is_graded', IS_SCORED) in TRUE_DICT
|
||||
self.accept_file_upload = self.metadata.get('accept_file_upload', ACCEPT_FILE_UPLOAD) in TRUE_DICT
|
||||
|
||||
# Used for progress / grading. Currently get credit just for
|
||||
# completion (doesn't matter if you self-assessed correct/incorrect).
|
||||
self._max_score = int(self.metadata.get('max_score', MAX_SCORE))
|
||||
|
||||
if self._max_score>MAX_SCORE_ALLOWED:
|
||||
error_message="Max score {0} is higher than max score allowed {1}".format(self._max_score, MAX_SCORE_ALLOWED)
|
||||
if self._max_score > MAX_SCORE_ALLOWED:
|
||||
error_message = "Max score {0} is higher than max score allowed {1}".format(self._max_score,
|
||||
MAX_SCORE_ALLOWED)
|
||||
log.exception(error_message)
|
||||
raise Exception
|
||||
|
||||
rubric_renderer = CombinedOpenEndedRubric(system, True)
|
||||
success, rubric_feedback = rubric_renderer.render_rubric(stringify_children(definition['rubric']))
|
||||
if not success:
|
||||
error_message="Could not parse rubric : {0}".format(definition['rubric'])
|
||||
error_message = "Could not parse rubric : {0}".format(definition['rubric'])
|
||||
log.exception(error_message)
|
||||
raise Exception
|
||||
|
||||
rubric_categories = rubric_renderer.extract_categories(stringify_children(definition['rubric']))
|
||||
for category in rubric_categories:
|
||||
if len(category['options'])>(MAX_SCORE_ALLOWED+1):
|
||||
error_message="Number of score points in rubric {0} higher than the max allowed, which is {1}".format(len(category['options']) , MAX_SCORE_ALLOWED)
|
||||
if len(category['options']) > (MAX_SCORE_ALLOWED + 1):
|
||||
error_message = "Number of score points in rubric {0} higher than the max allowed, which is {1}".format(
|
||||
len(category['options']), MAX_SCORE_ALLOWED)
|
||||
log.exception(error_message)
|
||||
raise Exception
|
||||
|
||||
@@ -176,7 +179,7 @@ class CombinedOpenEndedModule(XModule):
|
||||
'prompt': definition['prompt'],
|
||||
'rubric': definition['rubric'],
|
||||
'display_name': self.display_name,
|
||||
'accept_file_upload' : self.accept_file_upload,
|
||||
'accept_file_upload': self.accept_file_upload,
|
||||
}
|
||||
|
||||
self.task_xml = definition['task_xml']
|
||||
@@ -269,13 +272,13 @@ class CombinedOpenEndedModule(XModule):
|
||||
elif current_task_state is None and self.current_task_number > 0:
|
||||
last_response_data = self.get_last_response(self.current_task_number - 1)
|
||||
last_response = last_response_data['response']
|
||||
current_task_state=json.dumps({
|
||||
'state' : self.ASSESSING,
|
||||
'version' : self.STATE_VERSION,
|
||||
'max_score' : self._max_score,
|
||||
'attempts' : 0,
|
||||
'created' : True,
|
||||
'history' : [{'answer' : last_response}],
|
||||
current_task_state = json.dumps({
|
||||
'state': self.ASSESSING,
|
||||
'version': self.STATE_VERSION,
|
||||
'max_score': self._max_score,
|
||||
'attempts': 0,
|
||||
'created': True,
|
||||
'history': [{'answer': last_response}],
|
||||
})
|
||||
self.current_task = child_task_module(self.system, self.location,
|
||||
self.current_task_parsed_xml, self.current_task_descriptor, self.static_data,
|
||||
@@ -327,8 +330,8 @@ class CombinedOpenEndedModule(XModule):
|
||||
'task_count': len(self.task_xml),
|
||||
'task_number': self.current_task_number + 1,
|
||||
'status': self.get_status(),
|
||||
'display_name': self.display_name ,
|
||||
'accept_file_upload' : self.accept_file_upload,
|
||||
'display_name': self.display_name,
|
||||
'accept_file_upload': self.accept_file_upload,
|
||||
}
|
||||
|
||||
return context
|
||||
@@ -587,12 +590,11 @@ class CombinedOpenEndedModule(XModule):
|
||||
last_response = self.get_last_response(self.current_task_number)
|
||||
max_score = last_response['max_score']
|
||||
score = last_response['score']
|
||||
log.debug(last_response)
|
||||
|
||||
score_dict = {
|
||||
'score' : score,
|
||||
'total' : max_score,
|
||||
}
|
||||
'score': score,
|
||||
'total': max_score,
|
||||
}
|
||||
|
||||
return score_dict
|
||||
|
||||
@@ -621,6 +623,7 @@ class CombinedOpenEndedModule(XModule):
|
||||
|
||||
return progress_object
|
||||
|
||||
|
||||
class CombinedOpenEndedDescriptor(XmlDescriptor, EditingDescriptor):
|
||||
"""
|
||||
Module for adding combined open ended questions
|
||||
|
||||
@@ -7,7 +7,7 @@ from django.conf import settings
|
||||
import pickle
|
||||
import logging
|
||||
|
||||
log=logging.getLogger(__name__)
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
TRUSTED_IMAGE_DOMAINS = [
|
||||
'wikipedia.com',
|
||||
@@ -28,17 +28,29 @@ MAX_COLORS_TO_COUNT = 16
|
||||
MAX_COLORS = 20
|
||||
|
||||
class ImageProperties(object):
|
||||
"""
|
||||
Class to check properties of an image and to validate if they are allowed.
|
||||
"""
|
||||
def __init__(self, image):
|
||||
"""
|
||||
Initializes class variables
|
||||
@param image: Image object (from PIL)
|
||||
@return: None
|
||||
"""
|
||||
self.image = image
|
||||
image_size = self.image.size
|
||||
self.image_too_large = False
|
||||
if image_size[0]> MAX_ALLOWED_IMAGE_DIM or image_size[1] > MAX_ALLOWED_IMAGE_DIM:
|
||||
if image_size[0] > MAX_ALLOWED_IMAGE_DIM or image_size[1] > MAX_ALLOWED_IMAGE_DIM:
|
||||
self.image_too_large = True
|
||||
if image_size[0]> MAX_IMAGE_DIM or image_size[1] > MAX_IMAGE_DIM:
|
||||
if image_size[0] > MAX_IMAGE_DIM or image_size[1] > MAX_IMAGE_DIM:
|
||||
self.image = self.image.resize((MAX_IMAGE_DIM, MAX_IMAGE_DIM))
|
||||
self.image_size = self.image.size
|
||||
|
||||
def count_colors(self):
|
||||
"""
|
||||
Counts the number of colors in an image, and matches them to the max allowed
|
||||
@return: boolean true if color count is acceptable, false otherwise
|
||||
"""
|
||||
colors = self.image.getcolors(MAX_COLORS_TO_COUNT)
|
||||
if colors is None:
|
||||
colors = MAX_COLORS_TO_COUNT
|
||||
@@ -50,9 +62,15 @@ class ImageProperties(object):
|
||||
return too_many_colors
|
||||
|
||||
def get_skin_ratio(self):
|
||||
"""
|
||||
Gets the ratio of skin tone colors in an image
|
||||
@return: True if the ratio is low enough to be acceptable, false otherwise
|
||||
"""
|
||||
im = self.image
|
||||
skin = sum([count for count, rgb in im.getcolors(im.size[0]*im.size[1]) if rgb[0]>60 and rgb[1]<(rgb[0]*0.85) and rgb[2]<(rgb[0]*0.7) and rgb[1]>(rgb[0]*0.4) and rgb[2]>(rgb[0]*0.2)])
|
||||
bad_color_val = float(skin)/float(im.size[0]*im.size[1])
|
||||
skin = sum([count for count, rgb in im.getcolors(im.size[0] * im.size[1]) if
|
||||
rgb[0] > 60 and rgb[1] < (rgb[0] * 0.85) and rgb[2] < (rgb[0] * 0.7) and rgb[1] > (rgb[0] * 0.4) and
|
||||
rgb[2] > (rgb[0] * 0.2)])
|
||||
bad_color_val = float(skin) / float(im.size[0] * im.size[1])
|
||||
if bad_color_val > .4:
|
||||
is_okay = False
|
||||
else:
|
||||
@@ -61,16 +79,29 @@ class ImageProperties(object):
|
||||
return is_okay
|
||||
|
||||
def run_tests(self):
|
||||
"""
|
||||
Does all available checks on an image to ensure that it is okay (size, skin ratio, colors)
|
||||
@return: Boolean indicating whether or not image passes all checks
|
||||
"""
|
||||
image_is_okay = self.count_colors() and self.get_skin_ratio() and not self.image_too_large
|
||||
log.debug("Image too large: {0}".format(self.image_too_large))
|
||||
log.debug("Image Okay: {0}".format(image_is_okay))
|
||||
return image_is_okay
|
||||
|
||||
|
||||
class URLProperties(object):
|
||||
"""
|
||||
Checks to see if a URL points to acceptable content. Added to check if students are submitting reasonable
|
||||
links to the peer grading image functionality of the external grading service.
|
||||
"""
|
||||
def __init__(self, url_string):
|
||||
self.url_string = url_string
|
||||
|
||||
def check_if_parses(self):
|
||||
"""
|
||||
Check to see if a URL parses properly
|
||||
@return: success (True if parses, false if not)
|
||||
"""
|
||||
success = False
|
||||
try:
|
||||
self.parsed_url = urlparse.urlparse(url_string)
|
||||
@@ -81,6 +112,10 @@ class URLProperties(object):
|
||||
return success
|
||||
|
||||
def check_suffix(self):
|
||||
"""
|
||||
Checks the suffix of a url to make sure that it is allowed
|
||||
@return: True if suffix is okay, false if not
|
||||
"""
|
||||
good_suffix = False
|
||||
for suffix in ALLOWABLE_IMAGE_SUFFIXES:
|
||||
if self.url_string.endswith(suffix):
|
||||
@@ -89,17 +124,34 @@ class URLProperties(object):
|
||||
return good_suffix
|
||||
|
||||
def run_tests(self):
|
||||
"""
|
||||
Runs all available url tests
|
||||
@return: True if URL passes tests, false if not.
|
||||
"""
|
||||
url_is_okay = self.check_suffix() and self.check_if_parses()
|
||||
return url_is_okay
|
||||
|
||||
|
||||
def run_url_tests(url_string):
|
||||
"""
|
||||
Creates a URLProperties object and runs all tests
|
||||
@param url_string: A URL in string format
|
||||
@return: Boolean indicating whether or not URL has passed all tests
|
||||
"""
|
||||
url_properties = URLProperties(url_string)
|
||||
return url_properties.run_tests()
|
||||
|
||||
|
||||
def run_image_tests(image):
|
||||
"""
|
||||
Runs all available image tests
|
||||
@param image: PIL Image object
|
||||
@return: Boolean indicating whether or not all tests have been passed
|
||||
"""
|
||||
image_properties = ImageProperties(image)
|
||||
return image_properties.run_tests()
|
||||
|
||||
|
||||
def upload_to_s3(file_to_upload, keyname):
|
||||
'''
|
||||
Upload file to S3 using provided keyname.
|
||||
@@ -124,19 +176,22 @@ def upload_to_s3(file_to_upload, keyname):
|
||||
#k.set_metadata("Content-Type", 'images/png')
|
||||
|
||||
k.set_acl("public-read")
|
||||
public_url = k.generate_url(60*60*24*365) # URL timeout in seconds.
|
||||
public_url = k.generate_url(60 * 60 * 24 * 365) # URL timeout in seconds.
|
||||
|
||||
return True, public_url
|
||||
except:
|
||||
return False, "Could not connect to S3."
|
||||
|
||||
|
||||
def get_from_s3(s3_public_url):
|
||||
"""
|
||||
Gets an image from a given S3 url
|
||||
@param s3_public_url: The URL where an image is located
|
||||
@return: The image data
|
||||
"""
|
||||
r = requests.get(s3_public_url, timeout=2)
|
||||
data=r.text
|
||||
data = r.text
|
||||
return data
|
||||
|
||||
def convert_image_to_string(image):
|
||||
return image.tostring()
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -378,9 +378,9 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
|
||||
Return error message or feedback template
|
||||
"""
|
||||
|
||||
rubric_feedback=""
|
||||
rubric_feedback = ""
|
||||
feedback = self._convert_longform_feedback_to_html(response_items)
|
||||
if response_items['rubric_scores_complete']==True:
|
||||
if response_items['rubric_scores_complete'] == True:
|
||||
rubric_renderer = CombinedOpenEndedRubric(system, True)
|
||||
success, rubric_feedback = rubric_renderer.render_rubric(response_items['rubric_xml'])
|
||||
|
||||
@@ -392,7 +392,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
|
||||
'grader_type': response_items['grader_type'],
|
||||
'score': "{0} / {1}".format(response_items['score'], self.max_score()),
|
||||
'feedback': feedback,
|
||||
'rubric_feedback' : rubric_feedback
|
||||
'rubric_feedback': rubric_feedback
|
||||
})
|
||||
|
||||
return feedback_template
|
||||
@@ -436,7 +436,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
|
||||
log.error(error_message)
|
||||
fail['feedback'] = error_message
|
||||
return fail
|
||||
#This is to support peer grading
|
||||
#This is to support peer grading
|
||||
if isinstance(score_result['score'], list):
|
||||
feedback_items = []
|
||||
for i in xrange(0, len(score_result['score'])):
|
||||
@@ -447,8 +447,8 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
|
||||
'success': score_result['success'],
|
||||
'grader_id': score_result['grader_id'][i],
|
||||
'submission_id': score_result['submission_id'],
|
||||
'rubric_scores_complete' : score_result['rubric_scores_complete'][i],
|
||||
'rubric_xml' : score_result['rubric_xml'][i],
|
||||
'rubric_scores_complete': score_result['rubric_scores_complete'][i],
|
||||
'rubric_xml': score_result['rubric_xml'][i],
|
||||
}
|
||||
feedback_items.append(self._format_feedback(new_score_result, system))
|
||||
if join_feedback:
|
||||
@@ -475,7 +475,8 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
|
||||
if not self.history:
|
||||
return ""
|
||||
|
||||
feedback_dict = self._parse_score_msg(self.history[-1].get('post_assessment', ""), system, join_feedback=join_feedback)
|
||||
feedback_dict = self._parse_score_msg(self.history[-1].get('post_assessment', ""), system,
|
||||
join_feedback=join_feedback)
|
||||
if not short_feedback:
|
||||
return feedback_dict['feedback'] if feedback_dict['valid'] else ''
|
||||
if feedback_dict['valid']:
|
||||
@@ -565,8 +566,8 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'error' : error_message,
|
||||
'student_response' : get['student_answer']
|
||||
'error': error_message,
|
||||
'student_response': get['student_answer']
|
||||
}
|
||||
|
||||
def update_score(self, get, system):
|
||||
@@ -611,7 +612,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
|
||||
'msg': post_assessment,
|
||||
'child_type': 'openended',
|
||||
'correct': correct,
|
||||
'accept_file_upload' : self.accept_file_upload,
|
||||
'accept_file_upload': self.accept_file_upload,
|
||||
}
|
||||
html = system.render_template('open_ended.html', context)
|
||||
return html
|
||||
|
||||
@@ -140,7 +140,9 @@ class OpenEndedChild(object):
|
||||
def sanitize_html(answer):
|
||||
try:
|
||||
answer = autolink_html(answer)
|
||||
cleaner = Cleaner(style=True, links=True, add_nofollow=False, page_structure=True, safe_attrs_only=True, host_whitelist = open_ended_image_submission.TRUSTED_IMAGE_DOMAINS, whitelist_tags = set(['embed', 'iframe', 'a', 'img']))
|
||||
cleaner = Cleaner(style=True, links=True, add_nofollow=False, page_structure=True, safe_attrs_only=True,
|
||||
host_whitelist=open_ended_image_submission.TRUSTED_IMAGE_DOMAINS,
|
||||
whitelist_tags=set(['embed', 'iframe', 'a', 'img']))
|
||||
clean_html = cleaner.clean_html(answer)
|
||||
clean_html = re.sub(r'</p>$', '', re.sub(r'^<p>', '', clean_html))
|
||||
except:
|
||||
@@ -309,10 +311,10 @@ class OpenEndedChild(object):
|
||||
|
||||
def check_for_image_and_upload(self, get_data):
|
||||
has_file_to_upload = False
|
||||
success=False
|
||||
image_tag=""
|
||||
success = False
|
||||
image_tag = ""
|
||||
if 'can_upload_files' in get_data:
|
||||
if get_data['can_upload_files'] =='true':
|
||||
if get_data['can_upload_files'] == 'true':
|
||||
has_file_to_upload = True
|
||||
file = get_data['student_file'][0]
|
||||
success, s3_public_url = self.upload_image_to_s3(file)
|
||||
|
||||
@@ -80,7 +80,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
|
||||
'state': self.state,
|
||||
'allow_reset': self._allow_reset(),
|
||||
'child_type': 'selfassessment',
|
||||
'accept_file_upload' : self.accept_file_upload,
|
||||
'accept_file_upload': self.accept_file_upload,
|
||||
}
|
||||
|
||||
html = system.render_template('self_assessment_prompt.html', context)
|
||||
@@ -125,7 +125,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
|
||||
return ''
|
||||
|
||||
rubric_renderer = CombinedOpenEndedRubric(system, True)
|
||||
success, rubric_html = rubric_renderer.render_rubric(self.rubric)
|
||||
success, rubric_html = rubric_renderer.render_rubric(self.rubric)
|
||||
|
||||
# we'll render it
|
||||
context = {'rubric': rubric_html,
|
||||
@@ -215,8 +215,8 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
|
||||
return {
|
||||
'success': success,
|
||||
'rubric_html': self.get_rubric_html(system),
|
||||
'error' : error_message,
|
||||
'student_response' : get['student_answer'],
|
||||
'error': error_message,
|
||||
'student_response': get['student_answer'],
|
||||
}
|
||||
|
||||
def save_assessment(self, get, system):
|
||||
|
||||
Reference in New Issue
Block a user