Fix tests, address review feedback
This commit is contained in:
@@ -171,7 +171,6 @@ class @CombinedOpenEnded
|
||||
@results_container = @$(@result_container_sel)
|
||||
@combined_rubric_container = @$(@combined_rubric_sel)
|
||||
|
||||
|
||||
# Where to put the rubric once we load it
|
||||
@oe = @$(@open_ended_child_sel)
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@ import self_assessment_module
|
||||
import open_ended_module
|
||||
from functools import partial
|
||||
from .combined_open_ended_rubric import CombinedOpenEndedRubric, GRADER_TYPE_IMAGE_DICT, HUMAN_GRADER_TYPE, LEGEND_LIST
|
||||
from peer_grading_service import PeerGradingService, MockPeerGradingService
|
||||
from peer_grading_service import PeerGradingService, MockPeerGradingService, GradingServiceError
|
||||
|
||||
log = logging.getLogger("mitx.courseware")
|
||||
|
||||
@@ -40,10 +40,10 @@ HUMAN_TASK_TYPE = {
|
||||
}
|
||||
|
||||
HUMAN_STATES = {
|
||||
'intitial' : "Not started.",
|
||||
'assessing' : "Being scored.",
|
||||
'intermediate_done' : "Scoring finished.",
|
||||
'done' : "Complete."
|
||||
'intitial': "Not started.",
|
||||
'assessing': "Being scored.",
|
||||
'intermediate_done': "Scoring finished.",
|
||||
'done': "Complete.",
|
||||
}
|
||||
|
||||
# Default value that controls whether or not to skip basic spelling checks in the controller
|
||||
@@ -438,6 +438,7 @@ class CombinedOpenEndedV1Module():
|
||||
grader_type = grader_types[0]
|
||||
else:
|
||||
grader_type = "IN"
|
||||
grader_types = ["IN"]
|
||||
|
||||
if grader_type in HUMAN_GRADER_TYPE:
|
||||
human_grader_name = HUMAN_GRADER_TYPE[grader_type]
|
||||
@@ -514,33 +515,46 @@ class CombinedOpenEndedV1Module():
|
||||
return return_html
|
||||
|
||||
def check_if_student_has_done_needed_grading(self):
|
||||
"""
|
||||
Checks with the ORA server to see if the student has completed the needed peer grading to be shown their grade.
|
||||
For example, if a student submits one response, and three peers grade their response, the student
|
||||
cannot see their grades and feedback unless they reciprocate.
|
||||
Output:
|
||||
success - boolean indicator of success
|
||||
allowed_to_submit - boolean indicator of whether student has done their needed grading or not
|
||||
error_message - If not success, explains why
|
||||
"""
|
||||
student_id = self.system.anonymous_student_id
|
||||
success = False
|
||||
allowed_to_submit = True
|
||||
error_string = ("<h4>Feedback not available yet</h4>"
|
||||
"<p>You need to peer grade {0} more submissions in order to see your feedback.</p>"
|
||||
"<p>You have graded responses from {1} students, and {2} students have graded your submissions. </p>"
|
||||
"<p>You have made {3} submissions.</p>")
|
||||
try:
|
||||
response = self.peer_gs.get_data_for_location(self.location.url(), student_id)
|
||||
log.info(response)
|
||||
count_graded = response['count_graded']
|
||||
count_required = response['count_required']
|
||||
student_sub_count = response['student_sub_count']
|
||||
count_available = response['count_available']
|
||||
success = True
|
||||
except:
|
||||
except GradingServiceError:
|
||||
# This is a dev_facing_error
|
||||
log.error("Could not contact external open ended graders for location {0} and student {1}".format(
|
||||
self.location, student_id))
|
||||
# This is a student_facing_error
|
||||
error_message = "Could not contact the graders. Please notify course staff."
|
||||
return success, allowed_to_submit, error_message
|
||||
except KeyError:
|
||||
log.error("Invalid response from grading server for location {0} and student {1}".format(self.location, student_id))
|
||||
error_message = "Received invalid response from the graders. Please notify course staff."
|
||||
return success, allowed_to_submit, error_message
|
||||
if count_graded >= count_required or count_available==0:
|
||||
return success, allowed_to_submit, ""
|
||||
error_message = ""
|
||||
return success, allowed_to_submit, error_message
|
||||
else:
|
||||
allowed_to_submit = False
|
||||
# This is a student_facing_error
|
||||
error_string = ("<h4>Feedback not available yet</h4>"
|
||||
"<p>You need to peer grade {0} more submissions in order to see your feedback.</p>"
|
||||
"<p>You have graded responses from {1} students, and {2} students have graded your submissions. </p>"
|
||||
"<p>You have made {3} submissions.</p>")
|
||||
error_message = error_string.format(count_required - count_graded, count_graded, count_required,
|
||||
student_sub_count)
|
||||
return success, allowed_to_submit, error_message
|
||||
@@ -562,9 +576,12 @@ class CombinedOpenEndedV1Module():
|
||||
rubric_number+=1
|
||||
response = self.get_last_response(rubric_number)
|
||||
score_length = len(response['grader_types'])
|
||||
for z in xrange(0,score_length):
|
||||
feedback = response['feedback_dicts'][z].get('feedback', '')
|
||||
if response['grader_types'][z] in HUMAN_GRADER_TYPE.keys():
|
||||
for z in xrange(score_length):
|
||||
if response['grader_types'][z] in HUMAN_GRADER_TYPE:
|
||||
try:
|
||||
feedback = response['feedback_dicts'][z].get('feedback', '')
|
||||
except TypeError:
|
||||
return {'success' : False}
|
||||
rubric_scores = [[response['rubric_scores'][z]]]
|
||||
grader_types = [[response['grader_types'][z]]]
|
||||
feedback_items = [[response['feedback_items'][z]]]
|
||||
@@ -664,7 +681,7 @@ class CombinedOpenEndedV1Module():
|
||||
self.student_attempts +=1
|
||||
self.state = self.INITIAL
|
||||
self.ready_to_reset = False
|
||||
for i in xrange(0, len(self.task_xml)):
|
||||
for i in xrange(len(self.task_xml)):
|
||||
self.current_task_number = i
|
||||
self.setup_next_task(reset=True)
|
||||
self.current_task.reset(self.system)
|
||||
|
||||
@@ -206,27 +206,39 @@ class CombinedOpenEndedRubric(object):
|
||||
def render_combined_rubric(self, rubric_xml, scores, score_types, feedback_types):
|
||||
success, score_tuples = CombinedOpenEndedRubric.reformat_scores_for_rendering(scores, score_types,
|
||||
feedback_types)
|
||||
#Get all the categories in the rubric
|
||||
rubric_categories = self.extract_categories(rubric_xml)
|
||||
#Get a list of max scores, each entry belonging to a rubric category
|
||||
max_scores = map((lambda cat: cat['options'][-1]['points']), rubric_categories)
|
||||
actual_scores = []
|
||||
#Get the highest possible score across all categories
|
||||
max_score = max(max_scores)
|
||||
for i in xrange(0, len(rubric_categories)):
|
||||
category = rubric_categories[i]
|
||||
for j in xrange(0, len(category['options'])):
|
||||
#Loop through each category
|
||||
for i,category in enumerate(rubric_categories):
|
||||
#Loop through each option in the category
|
||||
for j in xrange(len(category['options'])):
|
||||
#Intialize empty grader types list
|
||||
rubric_categories[i]['options'][j]['grader_types'] = []
|
||||
for tuple in score_tuples:
|
||||
if tuple[1] == i and tuple[2] == j:
|
||||
for grader_type in tuple[3]:
|
||||
#Score tuples are a flat data structure with (category, option, grader_type_list) for selected graders
|
||||
for tup in score_tuples:
|
||||
if tup[1] == i and tup[2] == j:
|
||||
for grader_type in tup[3]:
|
||||
#Set the rubric grader type to the tuple grader types
|
||||
rubric_categories[i]['options'][j]['grader_types'].append(grader_type)
|
||||
#Grab the score and add it to the actual scores. J will be the score for the selected
|
||||
#grader type
|
||||
if len(actual_scores)<=i:
|
||||
#Initialize a new list in the list of lists
|
||||
actual_scores.append([j])
|
||||
else:
|
||||
#If a list in the list of lists for this position exists, append to it
|
||||
actual_scores[i] += [j]
|
||||
|
||||
actual_scores = [sum(i)/len(i) for i in actual_scores]
|
||||
correct = []
|
||||
#Define if the student is "correct" (1) "incorrect" (0) or "partially correct" (.5)
|
||||
for (i,a) in enumerate(actual_scores):
|
||||
if int(a)/max_scores[i]==1:
|
||||
if int(a) == max_scores[i]:
|
||||
correct.append(1)
|
||||
elif int(a)==0:
|
||||
correct.append(0)
|
||||
|
||||
@@ -62,7 +62,6 @@ class GradingService(object):
|
||||
"""
|
||||
Make a get request to the grading controller
|
||||
"""
|
||||
log.debug(params)
|
||||
op = lambda: self.session.get(url,
|
||||
allow_redirects=allow_redirects,
|
||||
params=params)
|
||||
|
||||
@@ -227,7 +227,7 @@ class PeerGradingModule(PeerGradingFields, XModule):
|
||||
count_graded = self.student_data_for_location['count_graded']
|
||||
count_required = self.student_data_for_location['count_required']
|
||||
except:
|
||||
success, response = self.query_data_for_location()
|
||||
success, response = self.query_data_for_location(self.location)
|
||||
if not success:
|
||||
log.exception(
|
||||
"No instance data found and could not get data from controller for loc {0} student {1}".format(
|
||||
@@ -311,7 +311,7 @@ class PeerGradingModule(PeerGradingFields, XModule):
|
||||
"""
|
||||
|
||||
required = ['location', 'submission_id', 'submission_key', 'score', 'feedback', 'submission_flagged', 'answer_unknown']
|
||||
if 'submission_flagged' not in data or data['submission_flagged'] in ["false", False, "False"]:
|
||||
if data.get("submission_flagged", False) in ["false", False, "False", "FALSE"]:
|
||||
required.append("rubric_scores[]")
|
||||
success, message = self._check_required(data, set(required))
|
||||
if not success:
|
||||
@@ -325,6 +325,8 @@ class PeerGradingModule(PeerGradingFields, XModule):
|
||||
try:
|
||||
response = self.peer_gs.save_grade(**data_dict)
|
||||
success, location_data = self.query_data_for_location(data_dict['location'])
|
||||
#Don't check for success above because the response = statement will raise the same Exception as the one
|
||||
#that will cause success to be false.
|
||||
response.update({'required_done' : False})
|
||||
if 'count_graded' in location_data and 'count_required' in location_data and int(location_data['count_graded'])>=int(location_data['count_required']):
|
||||
response['required_done'] = True
|
||||
@@ -507,7 +509,7 @@ class PeerGradingModule(PeerGradingFields, XModule):
|
||||
error_text = "Could not get list of problems to peer grade. Please notify course staff."
|
||||
log.error(error_text)
|
||||
success = False
|
||||
except:
|
||||
except Exception:
|
||||
log.exception("Could not contact peer grading service.")
|
||||
success = False
|
||||
|
||||
@@ -518,7 +520,7 @@ class PeerGradingModule(PeerGradingFields, XModule):
|
||||
'''
|
||||
try:
|
||||
return modulestore().get_instance(self.system.course_id, location)
|
||||
except:
|
||||
except Exception:
|
||||
# the linked problem doesn't exist
|
||||
log.error("Problem {0} does not exist in this course".format(location))
|
||||
raise
|
||||
@@ -528,14 +530,14 @@ class PeerGradingModule(PeerGradingFields, XModule):
|
||||
problem_location = problem['location']
|
||||
try:
|
||||
descriptor = _find_corresponding_module_for_location(problem_location)
|
||||
except:
|
||||
except Exception:
|
||||
continue
|
||||
if descriptor:
|
||||
problem['due'] = descriptor.lms.due
|
||||
grace_period = descriptor.lms.graceperiod
|
||||
try:
|
||||
problem_timeinfo = TimeInfo(problem['due'], grace_period)
|
||||
except:
|
||||
except Exception:
|
||||
log.error("Malformed due date or grace period string for location {0}".format(problem_location))
|
||||
raise
|
||||
if self._closed(problem_timeinfo):
|
||||
|
||||
@@ -73,6 +73,7 @@ class OpenEndedChildTest(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.test_system = get_test_system()
|
||||
self.test_system.open_ended_grading_interface = None
|
||||
self.openendedchild = OpenEndedChild(self.test_system, self.location,
|
||||
self.definition, self.descriptor, self.static_data, self.metadata)
|
||||
|
||||
@@ -203,7 +204,7 @@ class OpenEndedModuleTest(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.test_system = get_test_system()
|
||||
|
||||
self.test_system.open_ended_grading_interface = None
|
||||
self.test_system.location = self.location
|
||||
self.mock_xqueue = MagicMock()
|
||||
self.mock_xqueue.send_to_queue.return_value = (None, "Message")
|
||||
@@ -378,6 +379,7 @@ class CombinedOpenEndedModuleTest(unittest.TestCase):
|
||||
full_definition = definition_template.format(prompt=prompt, rubric=rubric, task1=task_xml1, task2=task_xml2)
|
||||
descriptor = Mock(data=full_definition)
|
||||
test_system = get_test_system()
|
||||
test_system.open_ended_grading_interface = None
|
||||
combinedoe_container = CombinedOpenEndedModule(
|
||||
test_system,
|
||||
descriptor,
|
||||
@@ -504,6 +506,7 @@ class OpenEndedModuleXmlTest(unittest.TestCase, DummyModulestore):
|
||||
|
||||
def setUp(self):
|
||||
self.test_system = get_test_system()
|
||||
self.test_system.open_ended_grading_interface = None
|
||||
self.test_system.xqueue['interface'] = Mock(
|
||||
send_to_queue=Mock(side_effect=[1, "queued"])
|
||||
)
|
||||
@@ -537,9 +540,9 @@ class OpenEndedModuleXmlTest(unittest.TestCase, DummyModulestore):
|
||||
module = self.get_module_from_location(self.problem_location, COURSE)
|
||||
|
||||
#Simulate a student saving an answer
|
||||
module.handle_ajax("save_answer", {"student_answer": self.answer})
|
||||
status = module.handle_ajax("get_status", {})
|
||||
self.assertTrue(isinstance(status, basestring))
|
||||
html = module.handle_ajax("get_html", {})
|
||||
module.handle_ajax("save_answer", {"student_answer": self.answer, "can_upload_files" : False, "student_file" : None})
|
||||
html = module.handle_ajax("get_html", {})
|
||||
|
||||
#Mock a student submitting an assessment
|
||||
assessment_dict = MockQueryDict()
|
||||
@@ -547,8 +550,7 @@ class OpenEndedModuleXmlTest(unittest.TestCase, DummyModulestore):
|
||||
module.handle_ajax("save_assessment", assessment_dict)
|
||||
task_one_json = json.loads(module.task_states[0])
|
||||
self.assertEqual(json.loads(task_one_json['child_history'][0]['post_assessment']), assessment)
|
||||
status = module.handle_ajax("get_status", {})
|
||||
self.assertTrue(isinstance(status, basestring))
|
||||
rubric = module.handle_ajax("get_combined_rubric", {})
|
||||
|
||||
#Move to the next step in the problem
|
||||
module.handle_ajax("next_problem", {})
|
||||
@@ -585,7 +587,6 @@ class OpenEndedModuleXmlTest(unittest.TestCase, DummyModulestore):
|
||||
module.handle_ajax("save_assessment", assessment_dict)
|
||||
task_one_json = json.loads(module.task_states[0])
|
||||
self.assertEqual(json.loads(task_one_json['child_history'][0]['post_assessment']), assessment)
|
||||
module.handle_ajax("get_status", {})
|
||||
|
||||
#Move to the next step in the problem
|
||||
try:
|
||||
@@ -628,12 +629,8 @@ class OpenEndedModuleXmlTest(unittest.TestCase, DummyModulestore):
|
||||
|
||||
#Get html and other data client will request
|
||||
module.get_html()
|
||||
legend = module.handle_ajax("get_legend", {})
|
||||
self.assertTrue(isinstance(legend, basestring))
|
||||
|
||||
module.handle_ajax("get_status", {})
|
||||
module.handle_ajax("skip_post_assessment", {})
|
||||
self.assertTrue(isinstance(legend, basestring))
|
||||
|
||||
#Get all results
|
||||
module.handle_ajax("get_combined_rubric", {})
|
||||
@@ -654,6 +651,7 @@ class OpenEndedModuleXmlAttemptTest(unittest.TestCase, DummyModulestore):
|
||||
|
||||
def setUp(self):
|
||||
self.test_system = get_test_system()
|
||||
self.test_system.open_ended_grading_interface = None
|
||||
self.test_system.xqueue['interface'] = Mock(
|
||||
send_to_queue=Mock(side_effect=[1, "queued"])
|
||||
)
|
||||
@@ -670,8 +668,6 @@ class OpenEndedModuleXmlAttemptTest(unittest.TestCase, DummyModulestore):
|
||||
|
||||
#Simulate a student saving an answer
|
||||
module.handle_ajax("save_answer", {"student_answer": self.answer})
|
||||
status = module.handle_ajax("get_status", {})
|
||||
self.assertTrue(isinstance(status, basestring))
|
||||
|
||||
#Mock a student submitting an assessment
|
||||
assessment_dict = MockQueryDict()
|
||||
@@ -679,8 +675,6 @@ class OpenEndedModuleXmlAttemptTest(unittest.TestCase, DummyModulestore):
|
||||
module.handle_ajax("save_assessment", assessment_dict)
|
||||
task_one_json = json.loads(module.task_states[0])
|
||||
self.assertEqual(json.loads(task_one_json['child_history'][0]['post_assessment']), assessment)
|
||||
status = module.handle_ajax("get_status", {})
|
||||
self.assertTrue(isinstance(status, basestring))
|
||||
|
||||
#Move to the next step in the problem
|
||||
module.handle_ajax("next_problem", {})
|
||||
|
||||
@@ -61,7 +61,7 @@ class PeerGradingModuleTest(unittest.TestCase, DummyModulestore):
|
||||
Try getting data from the external grading service
|
||||
@return:
|
||||
"""
|
||||
success, data = self.peer_grading.query_data_for_location()
|
||||
success, data = self.peer_grading.query_data_for_location(self.problem_location.url())
|
||||
self.assertEqual(success, True)
|
||||
|
||||
def test_get_score(self):
|
||||
|
||||
Reference in New Issue
Block a user