diff --git a/common/lib/xmodule/xmodule/combined_open_ended_module.py b/common/lib/xmodule/xmodule/combined_open_ended_module.py index 67ff206e89..f4074283fe 100644 --- a/common/lib/xmodule/xmodule/combined_open_ended_module.py +++ b/common/lib/xmodule/xmodule/combined_open_ended_module.py @@ -104,11 +104,14 @@ class CombinedOpenEndedModule(CombinedOpenEndedFields, XModule): icon_class = 'problem' - js = {'coffee': - [resource_string(__name__, 'js/src/combinedopenended/display.coffee'), - resource_string(__name__, 'js/src/collapsible.coffee'), - resource_string(__name__, 'js/src/javascript_loader.coffee'), - ]} + js = { + 'coffee': + [ + resource_string(__name__, 'js/src/combinedopenended/display.coffee'), + resource_string(__name__, 'js/src/collapsible.coffee'), + resource_string(__name__, 'js/src/javascript_loader.coffee'), + ] + } js_module_name = "CombinedOpenEnded" css = {'scss': [resource_string(__name__, 'css/combinedopenended/display.scss')]} diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py index 12f90ed1b3..1404f52300 100644 --- a/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py +++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py @@ -294,9 +294,8 @@ class CombinedOpenEndedV1Module(): if self.current_task_number > 0: last_response_data = self.get_last_response(self.current_task_number - 1) current_response_data = self.get_current_attributes(self.current_task_number) - if (current_response_data['min_score_to_attempt'] > last_response_data['score'] - or current_response_data['max_score_to_attempt'] < last_response_data['score']): + or current_response_data['max_score_to_attempt'] < last_response_data['score']): self.state = self.DONE self.ready_to_reset = True @@ -662,9 +661,10 @@ class CombinedOpenEndedV1Module(): return { 'success': False, #This is a student_facing_error - 'error': ('You have attempted this question {0} times. ' - 'You are only allowed to attempt it {1} times.').format( - self.student_attempts, self.attempts) + 'error': ( + 'You have attempted this question {0} times. ' + 'You are only allowed to attempt it {1} times.' + ).format(self.student_attempts, self.attempts) } self.state = self.INITIAL self.ready_to_reset = False @@ -803,6 +803,17 @@ class CombinedOpenEndedV1Module(): return progress_object + def out_of_sync_error(self, get, msg=''): + """ + return dict out-of-sync error message, and also log. + """ + #This is a dev_facing_error + log.warning("Combined module state out sync. state: %r, get: %r. %s", + self.state, get, msg) + #This is a student_facing_error + return {'success': False, + 'error': 'The problem state got out-of-sync. Please try reloading the page.'} + class CombinedOpenEndedV1Descriptor(): """ @@ -849,7 +860,6 @@ class CombinedOpenEndedV1Descriptor(): return {'task_xml': parse_task('task'), 'prompt': parse('prompt'), 'rubric': parse('rubric')} - def definition_to_xml(self, resource_fs): '''Return an xml element representing this definition.''' elt = etree.Element('combinedopenended') diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/grading_service_module.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/grading_service_module.py index f3f6568b1e..b16f0618bb 100644 --- a/common/lib/xmodule/xmodule/open_ended_grading_classes/grading_service_module.py +++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/grading_service_module.py @@ -76,7 +76,6 @@ class GradingService(object): return r.text - def _try_with_login(self, operation): """ Call operation(), which should return a requests response object. If @@ -87,7 +86,7 @@ class GradingService(object): """ response = operation() if (response.json - and response.json.get('success') == False + and response.json.get('success') is False and response.json.get('error') == 'login_required'): # apparrently we aren't logged in. Try to fix that. r = self._login() diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/open_ended_module.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/open_ended_module.py index 8373700837..7ba046b2ad 100644 --- a/common/lib/xmodule/xmodule/open_ended_grading_classes/open_ended_module.py +++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/open_ended_module.py @@ -72,7 +72,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): self._parse(oeparam, self.child_prompt, self.child_rubric, system) - if self.child_created == True and self.child_state == self.ASSESSING: + if self.child_created is True and self.child_state == self.ASSESSING: self.child_created = False self.send_to_grader(self.latest_answer(), system) self.child_created = False @@ -159,9 +159,11 @@ class OpenEndedModule(openendedchild.OpenEndedChild): score = int(survey_responses['score']) except: #This is a dev_facing_error - error_message = ("Could not parse submission id, grader id, " - "or feedback from message_post ajax call. Here is the message data: {0}".format( - survey_responses)) + error_message = ( + "Could not parse submission id, grader id, " + "or feedback from message_post ajax call. " + "Here is the message data: {0}".format(survey_responses) + ) log.exception(error_message) #This is a student_facing_error return {'success': False, 'msg': "There was an error saving your feedback. Please contact course staff."} @@ -179,8 +181,9 @@ class OpenEndedModule(openendedchild.OpenEndedChild): queue_name=self.message_queue_name ) - student_info = {'anonymous_student_id': anonymous_student_id, - 'submission_time': qtime, + student_info = { + 'anonymous_student_id': anonymous_student_id, + 'submission_time': qtime, } contents = { 'feedback': feedback, @@ -190,8 +193,10 @@ class OpenEndedModule(openendedchild.OpenEndedChild): 'student_info': json.dumps(student_info), } - (error, msg) = qinterface.send_to_queue(header=xheader, - body=json.dumps(contents)) + (error, msg) = qinterface.send_to_queue( + header=xheader, + body=json.dumps(contents) + ) #Convert error to a success value success = True @@ -224,15 +229,18 @@ class OpenEndedModule(openendedchild.OpenEndedChild): anonymous_student_id + str(len(self.child_history))) - xheader = xqueue_interface.make_xheader(lms_callback_url=system.xqueue['construct_callback'](), - lms_key=queuekey, - queue_name=self.queue_name) + xheader = xqueue_interface.make_xheader( + lms_callback_url=system.xqueue['construct_callback'](), + lms_key=queuekey, + queue_name=self.queue_name + ) contents = self.payload.copy() # Metadata related to the student submission revealed to the external grader - student_info = {'anonymous_student_id': anonymous_student_id, - 'submission_time': qtime, + student_info = { + 'anonymous_student_id': anonymous_student_id, + 'submission_time': qtime, } #Update contents with student response and student info @@ -243,12 +251,16 @@ class OpenEndedModule(openendedchild.OpenEndedChild): }) # Submit request. When successful, 'msg' is the prior length of the queue - (error, msg) = qinterface.send_to_queue(header=xheader, - body=json.dumps(contents)) + qinterface.send_to_queue( + header=xheader, + body=json.dumps(contents) + ) # State associated with the queueing request - queuestate = {'key': queuekey, - 'time': qtime, } + queuestate = { + 'key': queuekey, + 'time': qtime, + } return True def _update_score(self, score_msg, queuekey, system): @@ -302,11 +314,13 @@ class OpenEndedModule(openendedchild.OpenEndedChild): # We want to display available feedback in a particular order. # This dictionary specifies which goes first--lower first. - priorities = {# These go at the start of the feedback - 'spelling': 0, - 'grammar': 1, - # needs to be after all the other feedback - 'markup_text': 3} + priorities = { + # These go at the start of the feedback + 'spelling': 0, + 'grammar': 1, + # needs to be after all the other feedback + 'markup_text': 3 + } do_not_render = ['topicality', 'prompt-overlap'] default_priority = 2 @@ -393,7 +407,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): rubric_feedback = "" feedback = self._convert_longform_feedback_to_html(response_items) rubric_scores = [] - if response_items['rubric_scores_complete'] == True: + if response_items['rubric_scores_complete'] is True: rubric_renderer = CombinedOpenEndedRubric(system, True) rubric_dict = rubric_renderer.render_rubric(response_items['rubric_xml']) success = rubric_dict['success'] @@ -401,8 +415,10 @@ class OpenEndedModule(openendedchild.OpenEndedChild): rubric_scores = rubric_dict['rubric_scores'] if not response_items['success']: - return system.render_template("{0}/open_ended_error.html".format(self.TEMPLATE_DIR), - {'errors': feedback}) + return system.render_template( + "{0}/open_ended_error.html".format(self.TEMPLATE_DIR), + {'errors': feedback} + ) feedback_template = system.render_template("{0}/open_ended_feedback.html".format(self.TEMPLATE_DIR), { 'grader_type': response_items['grader_type'], @@ -496,8 +512,8 @@ class OpenEndedModule(openendedchild.OpenEndedChild): grader_types.append(score_result['grader_type']) try: feedback_dict = json.loads(score_result['feedback'][i]) - except: - pass + except Exception: + feedback_dict = score_result['feedback'][i] feedback_dicts.append(feedback_dict) grader_ids.append(score_result['grader_id'][i]) submission_ids.append(score_result['submission_id']) @@ -515,8 +531,8 @@ class OpenEndedModule(openendedchild.OpenEndedChild): feedback_items = [feedback] try: feedback_dict = json.loads(score_result['feedback']) - except: - pass + except Exception: + feedback_dict = score_result.get('feedback', '') feedback_dicts = [feedback_dict] grader_ids = [score_result['grader_id']] submission_ids = [score_result['submission_id']] @@ -545,8 +561,11 @@ class OpenEndedModule(openendedchild.OpenEndedChild): if not self.child_history: return "" - feedback_dict = self._parse_score_msg(self.child_history[-1].get('post_assessment', ""), system, - join_feedback=join_feedback) + feedback_dict = self._parse_score_msg( + self.child_history[-1].get('post_assessment', ""), + system, + join_feedback=join_feedback + ) if not short_feedback: return feedback_dict['feedback'] if feedback_dict['valid'] else '' if feedback_dict['valid']: @@ -711,7 +730,7 @@ class OpenEndedDescriptor(): template_dir_name = "openended" def __init__(self, system): - self.system =system + self.system = system @classmethod def definition_from_xml(cls, xml_object, system): @@ -734,8 +753,9 @@ class OpenEndedDescriptor(): """Assumes that xml_object has child k""" return xml_object.xpath(k)[0] - return {'oeparam': parse('openendedparam')} - + return { + 'oeparam': parse('openendedparam') + } def definition_to_xml(self, resource_fs): '''Return an xml element representing this definition.''' diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/openendedchild.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/openendedchild.py index d5889636ed..7dc8d99451 100644 --- a/common/lib/xmodule/xmodule/open_ended_grading_classes/openendedchild.py +++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/openendedchild.py @@ -101,8 +101,9 @@ class OpenEndedChild(object): # completion (doesn't matter if you self-assessed correct/incorrect). if system.open_ended_grading_interface: self.peer_gs = PeerGradingService(system.open_ended_grading_interface, system) - self.controller_qs = controller_query_service.ControllerQueryService(system.open_ended_grading_interface, - system) + self.controller_qs = controller_query_service.ControllerQueryService( + system.open_ended_grading_interface,system + ) else: self.peer_gs = MockPeerGradingService() self.controller_qs = None diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/peer_grading_service.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/peer_grading_service.py index 85c7a98132..56bd1ec0a8 100644 --- a/common/lib/xmodule/xmodule/open_ended_grading_classes/peer_grading_service.py +++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/peer_grading_service.py @@ -37,7 +37,7 @@ class PeerGradingService(GradingService): def get_next_submission(self, problem_location, grader_id): response = self.get(self.get_next_submission_url, - {'location': problem_location, 'grader_id': grader_id}) + {'location': problem_location, 'grader_id': grader_id}) return self.try_to_decode(self._render_rubric(response)) def save_grade(self, location, grader_id, submission_id, score, feedback, submission_key, rubric_scores, @@ -100,29 +100,29 @@ without making actual service calls to the grading controller class MockPeerGradingService(object): def get_next_submission(self, problem_location, grader_id): - return json.dumps({'success': True, - 'submission_id': 1, - 'submission_key': "", - 'student_response': 'fake student response', - 'prompt': 'fake submission prompt', - 'rubric': 'fake rubric', - 'max_score': 4}) + return {'success': True, + 'submission_id': 1, + 'submission_key': "", + 'student_response': 'fake student response', + 'prompt': 'fake submission prompt', + 'rubric': 'fake rubric', + 'max_score': 4} def save_grade(self, location, grader_id, submission_id, score, feedback, submission_key, rubric_scores, submission_flagged): - return json.dumps({'success': True}) + return {'success': True} def is_student_calibrated(self, problem_location, grader_id): - return json.dumps({'success': True, 'calibrated': True}) + return {'success': True, 'calibrated': True} def show_calibration_essay(self, problem_location, grader_id): - return json.dumps({'success': True, - 'submission_id': 1, - 'submission_key': '', - 'student_response': 'fake student response', - 'prompt': 'fake submission prompt', - 'rubric': 'fake rubric', - 'max_score': 4}) + return {'success': True, + 'submission_id': 1, + 'submission_key': '', + 'student_response': 'fake student response', + 'prompt': 'fake submission prompt', + 'rubric': 'fake rubric', + 'max_score': 4} def save_calibration_essay(self, problem_location, grader_id, calibration_essay_id, submission_key, score, @@ -130,10 +130,9 @@ class MockPeerGradingService(object): return {'success': True, 'actual_score': 2} def get_problem_list(self, course_id, grader_id): - return json.dumps({'success': True, - 'problem_list': [ - json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo1', - 'problem_name': "Problem 1", 'num_graded': 3, 'num_pending': 5}), - json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo2', - 'problem_name': "Problem 2", 'num_graded': 1, 'num_pending': 5}) - ]}) + return {'success': True, + 'problem_list': [ + ]} + + def get_data_for_location(self, problem_location, student_id): + return {"version": 1, "count_graded": 3, "count_required": 3, "success": True, "student_sub_count": 1} diff --git a/common/lib/xmodule/xmodule/peer_grading_module.py b/common/lib/xmodule/xmodule/peer_grading_module.py index 1ad31922f5..eebfbe22e5 100644 --- a/common/lib/xmodule/xmodule/peer_grading_module.py +++ b/common/lib/xmodule/xmodule/peer_grading_module.py @@ -498,7 +498,6 @@ class PeerGradingModule(PeerGradingFields, XModule): log.error("Problem {0} does not exist in this course".format(location)) raise - for problem in problem_list: problem_location = problem['location'] descriptor = _find_corresponding_module_for_location(problem_location) diff --git a/common/lib/xmodule/xmodule/tests/__init__.py b/common/lib/xmodule/xmodule/tests/__init__.py index 1a10654f6c..0a2f22aa68 100644 --- a/common/lib/xmodule/xmodule/tests/__init__.py +++ b/common/lib/xmodule/xmodule/tests/__init__.py @@ -20,7 +20,7 @@ from xmodule.x_module import ModuleSystem from mock import Mock open_ended_grading_interface = { - 'url': 'http://sandbox-grader-001.m.edx.org/peer_grading', + 'url': 'blah/', 'username': 'incorrect_user', 'password': 'incorrect_pass', 'staff_grading' : 'staff_grading', @@ -52,7 +52,7 @@ def test_system(): user=Mock(is_staff=False), filestore=Mock(), debug=True, - xqueue={'interface': None, 'callback_url': '/', 'default_queuename': 'testqueue', 'waittime': 10}, + xqueue={'interface': None, 'callback_url': '/', 'default_queuename': 'testqueue', 'waittime': 10, 'construct_callback' : Mock(side_effect="/")}, node_path=os.environ.get("NODE_PATH", "/usr/local/lib/node_modules"), xblock_model_data=lambda descriptor: descriptor._model_data, anonymous_student_id='student', diff --git a/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py b/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py index 59f0e222ee..917e90e575 100644 --- a/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py +++ b/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py @@ -2,12 +2,15 @@ import json from mock import Mock, MagicMock, ANY import unittest +from test_util_open_ended import MockQueryDict, DummyModulestore + from xmodule.open_ended_grading_classes.openendedchild import OpenEndedChild from xmodule.open_ended_grading_classes.open_ended_module import OpenEndedModule from xmodule.open_ended_grading_classes.combined_open_ended_modulev1 import CombinedOpenEndedV1Module +from xmodule.open_ended_grading_classes.grading_service_module import GradingServiceError from xmodule.combined_open_ended_module import CombinedOpenEndedModule - from xmodule.modulestore import Location + from lxml import etree import capa.xqueue_interface as xqueue_interface from datetime import datetime @@ -17,6 +20,9 @@ log = logging.getLogger(__name__) from . import test_system +ORG = 'edX' +COURSE = 'open_ended' # name of directory with course data + import test_util_open_ended """ @@ -29,6 +35,9 @@ OpenEndedModule class OpenEndedChildTest(unittest.TestCase): + """ + Test the open ended child class + """ location = Location(["i4x", "edX", "sa_test", "selfassessment", "SampleQuestion"]) @@ -63,7 +72,6 @@ class OpenEndedChildTest(unittest.TestCase): self.openendedchild = OpenEndedChild(self.test_system, self.location, self.definition, self.descriptor, self.static_data, self.metadata) - def test_latest_answer_empty(self): answer = self.openendedchild.latest_answer() self.assertEqual(answer, "") @@ -144,6 +152,9 @@ class OpenEndedChildTest(unittest.TestCase): class OpenEndedModuleTest(unittest.TestCase): + """ + Test the open ended module class + """ location = Location(["i4x", "edX", "sa_test", "selfassessment", "SampleQuestion"]) @@ -285,6 +296,9 @@ class OpenEndedModuleTest(unittest.TestCase): class CombinedOpenEndedModuleTest(unittest.TestCase): + """ + Unit tests for the combined open ended xmodule + """ location = Location(["i4x", "edX", "open_ended", "combinedopenended", "SampleQuestion"]) definition_template = """ @@ -361,7 +375,6 @@ class CombinedOpenEndedModuleTest(unittest.TestCase): descriptor, model_data={'data': full_definition, 'weight': '1'}) - def setUp(self): # TODO: this constructor call is definitely wrong, but neither branch # of the merge matches the module constructor. Someone (Vik?) should fix this. @@ -467,5 +480,148 @@ class CombinedOpenEndedModuleTest(unittest.TestCase): self.assertEqual(score_dict['total'], 15.0) +class OpenEndedModuleXmlTest(unittest.TestCase, DummyModulestore): + """ + Test the student flow in the combined open ended xmodule + """ + problem_location = Location(["i4x", "edX", "open_ended", "combinedopenended", "SampleQuestion"]) + answer = "blah blah" + assessment = [0, 1] + hint = "blah" + def setUp(self): + self.test_system = test_system() + self.test_system.xqueue['interface'] = Mock( + send_to_queue=Mock(side_effect=[1, "queued"]) + ) + self.setup_modulestore(COURSE) + def test_open_ended_load_and_save(self): + """ + See if we can load the module and save an answer + @return: + """ + #Load the module + module = self.get_module_from_location(self.problem_location, COURSE) + + #Try saving an answer + module.handle_ajax("save_answer", {"student_answer": self.answer}) + task_one_json = json.loads(module.task_states[0]) + self.assertEqual(task_one_json['child_history'][0]['answer'], self.answer) + + module = self.get_module_from_location(self.problem_location, COURSE) + task_one_json = json.loads(module.task_states[0]) + self.assertEqual(task_one_json['child_history'][0]['answer'], self.answer) + + def test_open_ended_flow_reset(self): + """ + Test the flow of the module if we complete the self assessment step and then reset + @return: + """ + assessment = [0, 1] + module = self.get_module_from_location(self.problem_location, COURSE) + + #Simulate a student saving an answer + module.handle_ajax("save_answer", {"student_answer": self.answer}) + status = module.handle_ajax("get_status", {}) + self.assertTrue(isinstance(status, basestring)) + + #Mock a student submitting an assessment + assessment_dict = MockQueryDict() + assessment_dict.update({'assessment': sum(assessment), 'score_list[]': assessment}) + module.handle_ajax("save_assessment", assessment_dict) + task_one_json = json.loads(module.task_states[0]) + self.assertEqual(json.loads(task_one_json['child_history'][0]['post_assessment']), assessment) + status = module.handle_ajax("get_status", {}) + self.assertTrue(isinstance(status, basestring)) + + #Move to the next step in the problem + module.handle_ajax("next_problem", {}) + self.assertEqual(module.current_task_number, 0) + + html = module.get_html() + self.assertTrue(isinstance(html, basestring)) + + rubric = module.handle_ajax("get_combined_rubric", {}) + self.assertTrue(isinstance(rubric, basestring)) + self.assertEqual(module.state, "assessing") + module.handle_ajax("reset", {}) + self.assertEqual(module.current_task_number, 0) + + def test_open_ended_flow_correct(self): + """ + Test a two step problem where the student first goes through the self assessment step, and then the + open ended step. + @return: + """ + assessment = [1, 1] + #Load the module + module = self.get_module_from_location(self.problem_location, COURSE) + + #Simulate a student saving an answer + module.handle_ajax("save_answer", {"student_answer": self.answer}) + status = module.handle_ajax("get_status", {}) + self.assertTrue(isinstance(status, basestring)) + + #Mock a student submitting an assessment + assessment_dict = MockQueryDict() + assessment_dict.update({'assessment': sum(assessment), 'score_list[]': assessment}) + module.handle_ajax("save_assessment", assessment_dict) + task_one_json = json.loads(module.task_states[0]) + self.assertEqual(json.loads(task_one_json['child_history'][0]['post_assessment']), assessment) + module.handle_ajax("get_status", {}) + + #Move to the next step in the problem + try: + module.handle_ajax("next_problem", {}) + except GradingServiceError: + #This error is okay. We don't have a grading service to connect to! + pass + self.assertEqual(module.current_task_number, 1) + try: + module.get_html() + except GradingServiceError: + #This error is okay. We don't have a grading service to connect to! + pass + + #Try to get the rubric from the module + module.handle_ajax("get_combined_rubric", {}) + + #Make a fake reply from the queue + queue_reply = { + 'queuekey': "", + 'xqueue_body': json.dumps({ + 'score': 0, + 'feedback': json.dumps({"spelling": "Spelling: Ok.", "grammar": "Grammar: Ok.", + "markup-text": " all of us can think of a book that we hope none of our children or any other children have taken off the shelf . but if i have the right to remove that book from the shelf that work i abhor then you also have exactly the same right and so does everyone else . and then we have no books left on the shelf for any of us . katherine paterson , author write a persuasive essay to a newspaper reflecting your vies on censorship in libraries . do you believe that certain materials , such as books , music , movies , magazines , etc . , should be removed from the shelves if they are found offensive ? support your position with convincing arguments from your own experience , observations , and or reading . "}), + 'grader_type': "ML", + 'success': True, + 'grader_id': 1, + 'submission_id': 1, + 'rubric_xml': "Writing Applications0 Language Conventions 0", + 'rubric_scores_complete': True, + }) + } + + module.handle_ajax("check_for_score", {}) + + #Update the module with the fake queue reply + module.handle_ajax("score_update", queue_reply) + self.assertFalse(module.ready_to_reset) + self.assertEqual(module.current_task_number, 1) + + #Get html and other data client will request + html = module.get_html() + legend = module.handle_ajax("get_legend", {}) + self.assertTrue(isinstance(legend, basestring)) + + status = module.handle_ajax("get_status", {}) + module.handle_ajax("skip_post_assessment", {}) + self.assertTrue(isinstance(legend, basestring)) + + #Get all results + module.handle_ajax("get_results", {}) + + #reset the problem + module.handle_ajax("reset", {}) + self.assertEqual(module.state, "initial") diff --git a/common/lib/xmodule/xmodule/tests/test_peer_grading.py b/common/lib/xmodule/xmodule/tests/test_peer_grading.py new file mode 100644 index 0000000000..a0877eab81 --- /dev/null +++ b/common/lib/xmodule/xmodule/tests/test_peer_grading.py @@ -0,0 +1,160 @@ +import unittest +from xmodule.modulestore import Location +from .import test_system +from test_util_open_ended import MockQueryDict, DummyModulestore +import json + +from xmodule.peer_grading_module import PeerGradingModule, PeerGradingDescriptor +from xmodule.open_ended_grading_classes.grading_service_module import GradingServiceError + +import logging + +log = logging.getLogger(__name__) + +ORG = "edX" +COURSE = "open_ended" + + +class PeerGradingModuleTest(unittest.TestCase, DummyModulestore): + """ + Test peer grading xmodule at the unit level. More detailed tests are difficult, as the module relies on an + external grading service. + """ + problem_location = Location(["i4x", "edX", "open_ended", "peergrading", + "PeerGradingSample"]) + calibrated_dict = {'location': "blah"} + save_dict = MockQueryDict() + save_dict.update({ + 'location': "blah", + 'submission_id': 1, + 'submission_key': "", + 'score': 1, + 'feedback': "", + 'rubric_scores[]': [0, 1], + 'submission_flagged': False, + }) + + def setUp(self): + """ + Create a peer grading module from a test system + @return: + """ + self.test_system = test_system() + self.test_system.open_ended_grading_interface = None + self.setup_modulestore(COURSE) + self.peer_grading = self.get_module_from_location(self.problem_location, COURSE) + + def test_module_closed(self): + """ + Test if peer grading is closed + @return: + """ + closed = self.peer_grading.closed() + self.assertEqual(closed, False) + + def test_get_html(self): + """ + Test to see if the module can be rendered + @return: + """ + html = self.peer_grading.get_html() + + def test_get_data(self): + """ + Try getting data from the external grading service + @return: + """ + success, data = self.peer_grading.query_data_for_location() + self.assertEqual(success, True) + + def test_get_score(self): + """ + Test getting the score + @return: + """ + score = self.peer_grading.get_score() + self.assertEquals(score['score'], None) + + def test_get_max_score(self): + """ + Test getting the max score + @return: + """ + max_score = self.peer_grading.max_score() + self.assertEquals(max_score, None) + + def get_next_submission(self): + """ + Test to see if we can get the next mock submission + @return: + """ + success, next_submission = self.peer_grading.get_next_submission({'location': 'blah'}) + self.assertEqual(success, True) + + def test_save_grade(self): + """ + Test if we can save the grade + @return: + """ + response = self.peer_grading.save_grade(self.save_dict) + self.assertEqual(response['success'], True) + + def test_is_student_calibrated(self): + """ + Check to see if the student has calibrated yet + @return: + """ + calibrated_dict = {'location': "blah"} + response = self.peer_grading.is_student_calibrated(self.calibrated_dict) + self.assertEqual(response['success'], True) + + def test_show_calibration_essay(self): + """ + Test showing the calibration essay + @return: + """ + response = self.peer_grading.show_calibration_essay(self.calibrated_dict) + self.assertEqual(response['success'], True) + + def test_save_calibration_essay(self): + """ + Test saving the calibration essay + @return: + """ + response = self.peer_grading.save_calibration_essay(self.save_dict) + self.assertEqual(response['success'], True) + + def test_peer_grading_problem(self): + """ + See if we can render a single problem + @return: + """ + response = self.peer_grading.peer_grading_problem(self.calibrated_dict) + self.assertEqual(response['success'], True) + + def test_get_instance_state(self): + """ + Get the instance state dict + @return: + """ + self.peer_grading.get_instance_state() + +class PeerGradingModuleScoredTest(unittest.TestCase, DummyModulestore): + """ + Test peer grading xmodule at the unit level. More detailed tests are difficult, as the module relies on an + external grading service. + """ + problem_location = Location(["i4x", "edX", "open_ended", "peergrading", + "PeerGradingScored"]) + def setUp(self): + """ + Create a peer grading module from a test system + @return: + """ + self.test_system = test_system() + self.test_system.open_ended_grading_interface = None + self.setup_modulestore(COURSE) + + def test_metadata_load(self): + peer_grading = self.get_module_from_location(self.problem_location, COURSE) + self.assertEqual(peer_grading.closed(), False) \ No newline at end of file diff --git a/common/lib/xmodule/xmodule/tests/test_util_open_ended.py b/common/lib/xmodule/xmodule/tests/test_util_open_ended.py index db580f1e0e..8f3ad5b980 100644 --- a/common/lib/xmodule/xmodule/tests/test_util_open_ended.py +++ b/common/lib/xmodule/xmodule/tests/test_util_open_ended.py @@ -1,5 +1,10 @@ +from .import test_system +from xmodule.modulestore import Location +from xmodule.modulestore.xml import ImportSystem, XMLModuleStore +from xmodule.tests.test_export import DATA_DIR + OPEN_ENDED_GRADING_INTERFACE = { - 'url': 'http://127.0.0.1:3033/', + 'url': 'blah/', 'username': 'incorrect', 'password': 'incorrect', 'staff_grading': 'staff_grading', @@ -11,4 +16,40 @@ S3_INTERFACE = { 'aws_access_key': "", 'aws_secret_key': "", "aws_bucket_name": "", -} \ No newline at end of file +} + + +class MockQueryDict(dict): + """ + Mock a query dict so that it can be used in test classes. This will only work with the combinedopenended tests, + and does not mock the full query dict, only the behavior that is needed there (namely get_list). + """ + def getlist(self, key, default=None): + try: + return super(MockQueryDict, self).__getitem__(key) + except KeyError: + if default is None: + return [] + return default + + +class DummyModulestore(object): + """ + A mixin that allows test classes to have convenience functions to get a module given a location + """ + test_system = test_system() + + def setup_modulestore(self, name): + self.modulestore = XMLModuleStore(DATA_DIR, course_dirs=[name]) + + def get_course(self, name): + """Get a test course by directory name. If there's more than one, error.""" + courses = self.modulestore.get_courses() + return courses[0] + + def get_module_from_location(self, location, course): + course = self.get_course(course) + if not isinstance(location, Location): + location = Location(location) + descriptor = self.modulestore.get_instance(course.id, location, depth=None) + return descriptor.xmodule(self.test_system) diff --git a/common/test/data/open_ended/README.md b/common/test/data/open_ended/README.md new file mode 100644 index 0000000000..ed1d5c771d --- /dev/null +++ b/common/test/data/open_ended/README.md @@ -0,0 +1 @@ +This is a very very simple course, useful for debugging open ended grading code. diff --git a/common/test/data/open_ended/combinedopenended/SampleQuestion.xml b/common/test/data/open_ended/combinedopenended/SampleQuestion.xml new file mode 100644 index 0000000000..5dbe285526 --- /dev/null +++ b/common/test/data/open_ended/combinedopenended/SampleQuestion.xml @@ -0,0 +1,33 @@ + + + + + Writing Applications + + + + + Language Conventions + + + + + + +

Censorship in the Libraries

+

"All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us." --Katherine Paterson, Author

+

Write a persuasive essay to a newspaper reflecting your vies on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.

+
+ + + + + + + Enter essay here. + This is the answer. + {"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"} + + + +
\ No newline at end of file diff --git a/common/test/data/open_ended/course.xml b/common/test/data/open_ended/course.xml new file mode 100644 index 0000000000..9848343f58 --- /dev/null +++ b/common/test/data/open_ended/course.xml @@ -0,0 +1 @@ + diff --git a/common/test/data/open_ended/course/2012_Fall.xml b/common/test/data/open_ended/course/2012_Fall.xml new file mode 100644 index 0000000000..32c810174b --- /dev/null +++ b/common/test/data/open_ended/course/2012_Fall.xml @@ -0,0 +1,7 @@ + + + + + + + diff --git a/common/test/data/open_ended/peergrading/PeerGradingSample.xml b/common/test/data/open_ended/peergrading/PeerGradingSample.xml new file mode 100644 index 0000000000..7e3afddf3a --- /dev/null +++ b/common/test/data/open_ended/peergrading/PeerGradingSample.xml @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/common/test/data/open_ended/peergrading/PeerGradingScored.xml b/common/test/data/open_ended/peergrading/PeerGradingScored.xml new file mode 100644 index 0000000000..6398a9b4c5 --- /dev/null +++ b/common/test/data/open_ended/peergrading/PeerGradingScored.xml @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/common/test/data/open_ended/policies/2012_Fall.json b/common/test/data/open_ended/policies/2012_Fall.json new file mode 100644 index 0000000000..8f8ba13437 --- /dev/null +++ b/common/test/data/open_ended/policies/2012_Fall.json @@ -0,0 +1,17 @@ +{ + "course/2012_Fall": { + "graceperiod": "2 days 5 hours 59 minutes 59 seconds", + "start": "2015-07-17T12:00", + "display_name": "Self Assessment Test", + "graded": "true" + }, + "chapter/Overview": { + "display_name": "Overview" + }, + "combinedopenended/SampleQuestion": { + "display_name": "Sample Question" + }, + "peergrading/PeerGradingSample": { + "display_name": "Sample Question" + } +} diff --git a/common/test/data/open_ended/roots/2012_Fall.xml b/common/test/data/open_ended/roots/2012_Fall.xml new file mode 100644 index 0000000000..ea7d5c420d --- /dev/null +++ b/common/test/data/open_ended/roots/2012_Fall.xml @@ -0,0 +1 @@ + diff --git a/common/test/data/self_assessment/selfassessment/SampleQuestion.xml b/common/test/data/self_assessment/selfassessment/SampleQuestion.xml index 6c383763b1..f8affa903d 100644 --- a/common/test/data/self_assessment/selfassessment/SampleQuestion.xml +++ b/common/test/data/self_assessment/selfassessment/SampleQuestion.xml @@ -1,14 +1,14 @@ - - What is the meaning of life? - - - This is a rubric. - - - Thanks for your submission! - - - Enter a hint below: - - + + What is the meaning of life? + + + This is a rubric. + + + Thanks for your submission! + + + Enter a hint below: + + \ No newline at end of file diff --git a/lms/djangoapps/open_ended_grading/tests.py b/lms/djangoapps/open_ended_grading/tests.py index 93d27d8e24..ffc02608d5 100644 --- a/lms/djangoapps/open_ended_grading/tests.py +++ b/lms/djangoapps/open_ended_grading/tests.py @@ -84,7 +84,9 @@ class TestStaffGradingService(LoginEnrollmentTestCase): data = {'location': self.location} r = self.check_for_post_code(200, url, data) + d = json.loads(r.content) + self.assertTrue(d['success']) self.assertEquals(d['submission_id'], self.mock_service.cnt) self.assertIsNotNone(d['submission']) @@ -130,6 +132,7 @@ class TestStaffGradingService(LoginEnrollmentTestCase): r = self.check_for_post_code(200, url, data) d = json.loads(r.content) + self.assertTrue(d['success'], str(d)) self.assertIsNotNone(d['problem_list']) @@ -179,7 +182,8 @@ class TestPeerGradingService(LoginEnrollmentTestCase): data = {'location': self.location} r = self.peer_module.get_next_submission(data) - d = json.loads(r) + d = r + self.assertTrue(d['success']) self.assertIsNotNone(d['submission_id']) self.assertIsNotNone(d['prompt']) @@ -213,7 +217,8 @@ class TestPeerGradingService(LoginEnrollmentTestCase): qdict.keys = data.keys r = self.peer_module.save_grade(qdict) - d = json.loads(r) + d = r + self.assertTrue(d['success']) def test_save_grade_missing_keys(self): @@ -225,7 +230,8 @@ class TestPeerGradingService(LoginEnrollmentTestCase): def test_is_calibrated_success(self): data = {'location': self.location} r = self.peer_module.is_student_calibrated(data) - d = json.loads(r) + d = r + self.assertTrue(d['success']) self.assertTrue('calibrated' in d) @@ -239,9 +245,8 @@ class TestPeerGradingService(LoginEnrollmentTestCase): data = {'location': self.location} r = self.peer_module.show_calibration_essay(data) - d = json.loads(r) - log.debug(d) - log.debug(type(d)) + d = r + self.assertTrue(d['success']) self.assertIsNotNone(d['submission_id']) self.assertIsNotNone(d['prompt'])