From c2e5c607136e93a8c63d1aa073fe0ef1756a24f3 Mon Sep 17 00:00:00 2001 From: Vik Paruchuri Date: Thu, 13 Jun 2013 15:34:00 -0400 Subject: [PATCH 01/31] Remove correct/incorrect message, and auto-add in needed markdown for studio --- .../lib/xmodule/xmodule/js/src/combinedopenended/edit.coffee | 4 ++++ lms/templates/combinedopenended/openended/open_ended.html | 4 ---- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/common/lib/xmodule/xmodule/js/src/combinedopenended/edit.coffee b/common/lib/xmodule/xmodule/js/src/combinedopenended/edit.coffee index 1b7f9bb4fb..2f58e86cb2 100644 --- a/common/lib/xmodule/xmodule/js/src/combinedopenended/edit.coffee +++ b/common/lib/xmodule/xmodule/js/src/combinedopenended/edit.coffee @@ -50,6 +50,10 @@ Write a persuasive essay to a newspaper reflecting your vies on censorship in li mode: null }) @setCurrentEditor(@markdown_editor) + selection = @markdown_editor.getSelection() + #Auto-add in the needed template if it isn't already in there. + if(@markdown_editor.getValue() == "") + @markdown_editor.setValue(OpenEndedMarkdownEditingDescriptor.promptTemplate + "\n" + OpenEndedMarkdownEditingDescriptor.rubricTemplate + "\n" + OpenEndedMarkdownEditingDescriptor.tasksTemplate) # Add listeners for toolbar buttons (only present for markdown editor) @element.on('click', '.xml-tab', @onShowXMLButton) @element.on('click', '.format-buttons a', @onToolbarButton) diff --git a/lms/templates/combinedopenended/openended/open_ended.html b/lms/templates/combinedopenended/openended/open_ended.html index 9fb136cee6..909ef15838 100644 --- a/lms/templates/combinedopenended/openended/open_ended.html +++ b/lms/templates/combinedopenended/openended/open_ended.html @@ -10,10 +10,6 @@
% if state == 'initial': Unanswered - % elif state in ['done', 'post_assessment'] and correct == 'correct': -

Correct

- % elif state in ['done', 'post_assessment'] and correct == 'incorrect': -

Incorrect.

% elif state == 'assessing': Submitted for grading. % if eta_message is not None: From cd49e1bb686f747969d13e875111f448cf5b71bb Mon Sep 17 00:00:00 2001 From: Vik Paruchuri Date: Thu, 13 Jun 2013 16:00:28 -0400 Subject: [PATCH 02/31] Check to see if a location is valid before displaying it for instructor grading --- .../open_ended_grading/staff_grading_service.py | 11 ++++++++++- lms/djangoapps/open_ended_grading/utils.py | 16 ++++++++++++++++ lms/djangoapps/open_ended_grading/views.py | 2 +- 3 files changed, 27 insertions(+), 2 deletions(-) create mode 100644 lms/djangoapps/open_ended_grading/utils.py diff --git a/lms/djangoapps/open_ended_grading/staff_grading_service.py b/lms/djangoapps/open_ended_grading/staff_grading_service.py index 2c611b4481..a2d905be0b 100644 --- a/lms/djangoapps/open_ended_grading/staff_grading_service.py +++ b/lms/djangoapps/open_ended_grading/staff_grading_service.py @@ -15,6 +15,7 @@ from xmodule.course_module import CourseDescriptor from student.models import unique_id_for_user from xmodule.x_module import ModuleSystem from mitxmako.shortcuts import render_to_string +from utils import does_location_exist log = logging.getLogger(__name__) @@ -240,7 +241,6 @@ def get_next(request, course_id): return HttpResponse(_get_next(course_id, grader_id, location), mimetype="application/json") - def get_problem_list(request, course_id): """ Get all the problems for the given course id @@ -266,6 +266,15 @@ def get_problem_list(request, course_id): _check_access(request.user, course_id) try: response = staff_grading_service().get_problem_list(course_id, unique_id_for_user(request.user)) + response = json.loads(response) + problem_list = response['problem_list'] + valid_problem_list = [] + for i in xrange(0,len(problem_list)): + if does_location_exist(course_id, problem_list[i]['location']): + valid_problem_list.append(problem_list[i]) + response['problem_list'] = valid_problem_list + response = json.dumps(response) + return HttpResponse(response, mimetype="application/json") except GradingServiceError: diff --git a/lms/djangoapps/open_ended_grading/utils.py b/lms/djangoapps/open_ended_grading/utils.py new file mode 100644 index 0000000000..7634977397 --- /dev/null +++ b/lms/djangoapps/open_ended_grading/utils.py @@ -0,0 +1,16 @@ +from xmodule.modulestore import search +from xmodule.modulestore.django import modulestore +from xmodule.modulestore.exceptions import ItemNotFoundError + +def does_location_exist(course_id, location): + """ + Checks to see if a valid module exists at a given location (ie has not been deleted) + course_id - string course id + location - string location + """ + try: + search.path_to_location(modulestore(), course_id, location) + return True + except ItemNotFoundError: + #If the problem cannot be found at the location received from the grading controller server, it has been deleted by the course author. + return False diff --git a/lms/djangoapps/open_ended_grading/views.py b/lms/djangoapps/open_ended_grading/views.py index a914e434a9..797c90f9a4 100644 --- a/lms/djangoapps/open_ended_grading/views.py +++ b/lms/djangoapps/open_ended_grading/views.py @@ -179,6 +179,7 @@ def student_problem_list(request, course_id): error_text = "" problem_list = [] base_course_url = reverse('courses') + list_to_remove = [] try: #Get list of all open ended problems that the grading server knows about @@ -192,7 +193,6 @@ def student_problem_list(request, course_id): problem_list = problem_list_dict['problem_list'] #A list of problems to remove (problems that can't be found in the course) - list_to_remove = [] for i in xrange(0, len(problem_list)): try: #Try to load each problem in the courseware to get links to them From 9b14ea790c30f25792c05bf6901c3ae44cc4c603 Mon Sep 17 00:00:00 2001 From: Vik Paruchuri Date: Thu, 27 Jun 2013 16:20:58 -0400 Subject: [PATCH 03/31] Properly increment student attempts --- .../open_ended_grading_classes/combined_open_ended_modulev1.py | 1 + 1 file changed, 1 insertion(+) diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py index 1fe62035e6..c99d0915cb 100644 --- a/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py +++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py @@ -653,6 +653,7 @@ class CombinedOpenEndedV1Module(): ).format(self.student_attempts, self.attempts) } self.state = self.INITIAL + self.student_attempts +=1 self.ready_to_reset = False for i in xrange(0, len(self.task_xml)): self.current_task_number = i From 8a6c8b5ab0ac79d5e458e95fc743351dd0c6f8cc Mon Sep 17 00:00:00 2001 From: Vik Paruchuri Date: Fri, 28 Jun 2013 14:10:17 -0400 Subject: [PATCH 04/31] Add test for incrementing student attempts --- .../combined_open_ended_modulev1.py | 4 +- .../xmodule/tests/test_combined_open_ended.py | 51 +++++++++++++++++++ .../SampleQuestion1Attempt.xml | 24 +++++++++ .../test/data/open_ended/course/2012_Fall.xml | 1 + 4 files changed, 78 insertions(+), 2 deletions(-) create mode 100644 common/test/data/open_ended/combinedopenended/SampleQuestion1Attempt.xml diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py index c99d0915cb..62c9417660 100644 --- a/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py +++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py @@ -643,7 +643,8 @@ class CombinedOpenEndedV1Module(): if not self.ready_to_reset: return self.out_of_sync_error(data) - if self.student_attempts > self.attempts: + self.student_attempts +=1 + if self.student_attempts >= self.attempts: return { 'success': False, # This is a student_facing_error @@ -653,7 +654,6 @@ class CombinedOpenEndedV1Module(): ).format(self.student_attempts, self.attempts) } self.state = self.INITIAL - self.student_attempts +=1 self.ready_to_reset = False for i in xrange(0, len(self.task_xml)): self.current_task_number = i diff --git a/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py b/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py index e1f8d135de..dc0023f80d 100644 --- a/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py +++ b/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py @@ -629,3 +629,54 @@ class OpenEndedModuleXmlTest(unittest.TestCase, DummyModulestore): #reset the problem module.handle_ajax("reset", {}) self.assertEqual(module.state, "initial") + +class OpenEndedModuleXmlTest(unittest.TestCase, DummyModulestore): + """ + Test the student flow in the combined open ended xmodule + """ + problem_location = Location(["i4x", "edX", "open_ended", "combinedopenended", "SampleQuestion1Attempt"]) + answer = "blah blah" + assessment = [0, 1] + hint = "blah" + + def setUp(self): + self.test_system = get_test_system() + self.test_system.xqueue['interface'] = Mock( + send_to_queue=Mock(side_effect=[1, "queued"]) + ) + self.setup_modulestore(COURSE) + + def test_reset_fail(self): + """ + Test the flow of the module if we complete the self assessment step and then reset + @return: + """ + assessment = [0, 1] + module = self.get_module_from_location(self.problem_location, COURSE) + + #Simulate a student saving an answer + module.handle_ajax("save_answer", {"student_answer": self.answer}) + status = module.handle_ajax("get_status", {}) + self.assertTrue(isinstance(status, basestring)) + + #Mock a student submitting an assessment + assessment_dict = MockQueryDict() + assessment_dict.update({'assessment': sum(assessment), 'score_list[]': assessment}) + module.handle_ajax("save_assessment", assessment_dict) + task_one_json = json.loads(module.task_states[0]) + self.assertEqual(json.loads(task_one_json['child_history'][0]['post_assessment']), assessment) + status = module.handle_ajax("get_status", {}) + self.assertTrue(isinstance(status, basestring)) + + #Move to the next step in the problem + module.handle_ajax("next_problem", {}) + self.assertEqual(module.current_task_number, 0) + + html = module.get_html() + self.assertTrue(isinstance(html, basestring)) + + rubric = module.handle_ajax("get_combined_rubric", {}) + self.assertTrue(isinstance(rubric, basestring)) + self.assertEqual(module.state, "done") + reset_data = json.loads(module.handle_ajax("reset", {})) + self.assertEqual(reset_data['success'], False) diff --git a/common/test/data/open_ended/combinedopenended/SampleQuestion1Attempt.xml b/common/test/data/open_ended/combinedopenended/SampleQuestion1Attempt.xml new file mode 100644 index 0000000000..9bfabca191 --- /dev/null +++ b/common/test/data/open_ended/combinedopenended/SampleQuestion1Attempt.xml @@ -0,0 +1,24 @@ + + + + + Writing Applications + + + + + Language Conventions + + + + + + +

Censorship in the Libraries

+

"All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us." --Katherine Paterson, Author

+

Write a persuasive essay to a newspaper reflecting your vies on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.

+
+ + + +
\ No newline at end of file diff --git a/common/test/data/open_ended/course/2012_Fall.xml b/common/test/data/open_ended/course/2012_Fall.xml index 32c810174b..609d12f94c 100644 --- a/common/test/data/open_ended/course/2012_Fall.xml +++ b/common/test/data/open_ended/course/2012_Fall.xml @@ -1,6 +1,7 @@ + From b38a36d44a5600c44da9840b59eb2dbef08a6bdf Mon Sep 17 00:00:00 2001 From: Vik Paruchuri Date: Fri, 28 Jun 2013 14:38:13 -0400 Subject: [PATCH 05/31] Fix test --- .../lib/xmodule/xmodule/tests/test_combined_open_ended.py | 8 ++++++-- .../open_ended_grading/staff_grading_service.py | 5 +++++ 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py b/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py index dc0023f80d..af1b6aa12b 100644 --- a/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py +++ b/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py @@ -630,9 +630,9 @@ class OpenEndedModuleXmlTest(unittest.TestCase, DummyModulestore): module.handle_ajax("reset", {}) self.assertEqual(module.state, "initial") -class OpenEndedModuleXmlTest(unittest.TestCase, DummyModulestore): +class OpenEndedModuleXmlAttemptTest(unittest.TestCase, DummyModulestore): """ - Test the student flow in the combined open ended xmodule + Test if student is able to reset the problem """ problem_location = Location(["i4x", "edX", "open_ended", "combinedopenended", "SampleQuestion1Attempt"]) answer = "blah blah" @@ -649,6 +649,7 @@ class OpenEndedModuleXmlTest(unittest.TestCase, DummyModulestore): def test_reset_fail(self): """ Test the flow of the module if we complete the self assessment step and then reset + Since the problem only allows one attempt, should fail. @return: """ assessment = [0, 1] @@ -675,8 +676,11 @@ class OpenEndedModuleXmlTest(unittest.TestCase, DummyModulestore): html = module.get_html() self.assertTrue(isinstance(html, basestring)) + #Module should now be done rubric = module.handle_ajax("get_combined_rubric", {}) self.assertTrue(isinstance(rubric, basestring)) self.assertEqual(module.state, "done") + + #Try to reset, should fail because only 1 attempt is allowed reset_data = json.loads(module.handle_ajax("reset", {})) self.assertEqual(reset_data['success'], False) diff --git a/lms/djangoapps/open_ended_grading/staff_grading_service.py b/lms/djangoapps/open_ended_grading/staff_grading_service.py index eda78d0b09..a6a2847bc1 100644 --- a/lms/djangoapps/open_ended_grading/staff_grading_service.py +++ b/lms/djangoapps/open_ended_grading/staff_grading_service.py @@ -270,6 +270,11 @@ def get_problem_list(request, course_id): problem_list = response['problem_list'] valid_problem_list = [] for i in xrange(0,len(problem_list)): + #Needed to ensure that the 'location' key can be accessed + try: + problem_list[i] = json.loads(problem_list[i]) + except Exception: + pass if does_location_exist(course_id, problem_list[i]['location']): valid_problem_list.append(problem_list[i]) response['problem_list'] = valid_problem_list From 8930e753e4c225a16e862825aac84cb15469fb8d Mon Sep 17 00:00:00 2001 From: Vik Paruchuri Date: Fri, 28 Jun 2013 14:59:39 -0400 Subject: [PATCH 06/31] Set default weights --- .../xmodule/combined_open_ended_module.py | 70 ++++++++++++++----- .../xmodule/xmodule/peer_grading_module.py | 29 +++++--- 2 files changed, 75 insertions(+), 24 deletions(-) diff --git a/common/lib/xmodule/xmodule/combined_open_ended_module.py b/common/lib/xmodule/xmodule/combined_open_ended_module.py index 52d98f032e..fdc85369a1 100644 --- a/common/lib/xmodule/xmodule/combined_open_ended_module.py +++ b/common/lib/xmodule/xmodule/combined_open_ended_module.py @@ -51,47 +51,85 @@ class CombinedOpenEndedFields(object): display_name = String( display_name="Display Name", help="This name appears in the horizontal navigation at the top of the page.", - default="Open Ended Grading", scope=Scope.settings + default="Open Ended Grading", + scope=Scope.settings + ) + current_task_number = Integer( + help="Current task that the student is on.", + default=0, + scope=Scope.user_state + ) + task_states = List( + help="List of state dictionaries of each task within this module.", + scope=Scope.user_state + ) + state = String( + help="Which step within the current task that the student is on.", + default="initial", + scope=Scope.user_state + ) + student_attempts = Integer( + help="Number of attempts taken by the student on this problem", + default=0, + scope=Scope.user_state ) - current_task_number = Integer(help="Current task that the student is on.", default=0, scope=Scope.user_state) - task_states = List(help="List of state dictionaries of each task within this module.", scope=Scope.user_state) - state = String(help="Which step within the current task that the student is on.", default="initial", - scope=Scope.user_state) - student_attempts = Integer(help="Number of attempts taken by the student on this problem", default=0, - scope=Scope.user_state) ready_to_reset = Boolean( - help="If the problem is ready to be reset or not.", default=False, + help="If the problem is ready to be reset or not.", + default=False, scope=Scope.user_state ) attempts = Integer( display_name="Maximum Attempts", - help="The number of times the student can try to answer this problem.", default=1, + help="The number of times the student can try to answer this problem.", + default=1, scope=Scope.settings, values={"min" : 1 } ) - is_graded = Boolean(display_name="Graded", help="Whether or not the problem is graded.", default=False, scope=Scope.settings) + is_graded = Boolean( + display_name="Graded", + help="Whether or not the problem is graded.", + default=False, + scope=Scope.settings + ) accept_file_upload = Boolean( display_name="Allow File Uploads", - help="Whether or not the student can submit files as a response.", default=False, scope=Scope.settings + help="Whether or not the student can submit files as a response.", + default=False, + scope=Scope.settings ) skip_spelling_checks = Boolean( display_name="Disable Quality Filter", help="If False, the Quality Filter is enabled and submissions with poor spelling, short length, or poor grammar will not be peer reviewed.", default=False, scope=Scope.settings ) - due = Date(help="Date that this problem is due by", default=None, scope=Scope.settings) + due = Date( + help="Date that this problem is due by", + default=None, + scope=Scope.settings + ) graceperiod = String( help="Amount of time after the due date that submissions will be accepted", default=None, scope=Scope.settings ) - version = VersionInteger(help="Current version number", default=DEFAULT_VERSION, scope=Scope.settings) - data = String(help="XML data for the problem", scope=Scope.content) + version = VersionInteger( + help="Current version number", + default=DEFAULT_VERSION, + scope=Scope.settings + ) + data = String( + help="XML data for the problem", + scope=Scope.content + ) weight = Float( display_name="Problem Weight", help="Defines the number of points each problem is worth. If the value is not set, each problem is worth one point.", - scope=Scope.settings, values={"min" : 0 , "step": ".1"} + scope=Scope.settings, values={"min" : 0 , "step": ".1"}, + default=1 + ) + markdown = String( + help="Markdown source of this module", + scope=Scope.settings ) - markdown = String(help="Markdown source of this module", scope=Scope.settings) class CombinedOpenEndedModule(CombinedOpenEndedFields, XModule): diff --git a/common/lib/xmodule/xmodule/peer_grading_module.py b/common/lib/xmodule/xmodule/peer_grading_module.py index 7df444a892..f92971796c 100644 --- a/common/lib/xmodule/xmodule/peer_grading_module.py +++ b/common/lib/xmodule/xmodule/peer_grading_module.py @@ -32,23 +32,35 @@ class PeerGradingFields(object): display_name="Show Single Problem", help='When True, only the single problem specified by "Link to Problem Location" is shown. ' 'When False, a panel is displayed with all problems available for peer grading.', - default=USE_FOR_SINGLE_LOCATION, scope=Scope.settings + default=USE_FOR_SINGLE_LOCATION, + scope=Scope.settings ) link_to_location = String( display_name="Link to Problem Location", help='The location of the problem being graded. Only used when "Show Single Problem" is True.', - default=LINK_TO_LOCATION, scope=Scope.settings + default=LINK_TO_LOCATION, + scope=Scope.settings ) is_graded = Boolean( display_name="Graded", help='Defines whether the student gets credit for grading this problem. Only used when "Show Single Problem" is True.', - default=IS_GRADED, scope=Scope.settings + default=IS_GRADED, + scope=Scope.settings + ) + due_date = Date( + help="Due date that should be displayed.", + default=None, + scope=Scope.settings) + grace_period_string = String( + help="Amount of grace to give on the due date.", + default=None, + scope=Scope.settings ) - due_date = Date(help="Due date that should be displayed.", default=None, scope=Scope.settings) - grace_period_string = String(help="Amount of grace to give on the due date.", default=None, scope=Scope.settings) max_grade = Integer( - help="The maximum grade that a student can receive for this problem.", default=MAX_SCORE, - scope=Scope.settings, values={"min": 0} + help="The maximum grade that a student can receive for this problem.", + default=MAX_SCORE, + scope=Scope.settings, + values={"min": 0} ) student_data_for_location = Dict( help="Student data for a given peer grading problem.", @@ -57,7 +69,8 @@ class PeerGradingFields(object): weight = Float( display_name="Problem Weight", help="Defines the number of points each problem is worth. If the value is not set, each problem is worth one point.", - scope=Scope.settings, values={"min": 0, "step": ".1"} + scope=Scope.settings, values={"min": 0, "step": ".1"}, + default=1 ) From 2246944594cf2199e4c57fe371755011b4de5be8 Mon Sep 17 00:00:00 2001 From: Vik Paruchuri Date: Fri, 28 Jun 2013 15:09:19 -0400 Subject: [PATCH 07/31] Set a default weight --- .../xmodule/combined_open_ended_module.py | 9 ++++++--- .../combined_open_ended_modulev1.py | 11 ++++++++--- .../lib/xmodule/xmodule/peer_grading_module.py | 16 ++++++++++------ 3 files changed, 24 insertions(+), 12 deletions(-) diff --git a/common/lib/xmodule/xmodule/combined_open_ended_module.py b/common/lib/xmodule/xmodule/combined_open_ended_module.py index fdc85369a1..736b82e21d 100644 --- a/common/lib/xmodule/xmodule/combined_open_ended_module.py +++ b/common/lib/xmodule/xmodule/combined_open_ended_module.py @@ -82,7 +82,8 @@ class CombinedOpenEndedFields(object): display_name="Maximum Attempts", help="The number of times the student can try to answer this problem.", default=1, - scope=Scope.settings, values={"min" : 1 } + scope=Scope.settings, + values={"min" : 1 } ) is_graded = Boolean( display_name="Graded", @@ -99,7 +100,8 @@ class CombinedOpenEndedFields(object): skip_spelling_checks = Boolean( display_name="Disable Quality Filter", help="If False, the Quality Filter is enabled and submissions with poor spelling, short length, or poor grammar will not be peer reviewed.", - default=False, scope=Scope.settings + default=False, + scope=Scope.settings ) due = Date( help="Date that this problem is due by", @@ -123,7 +125,8 @@ class CombinedOpenEndedFields(object): weight = Float( display_name="Problem Weight", help="Defines the number of points each problem is worth. If the value is not set, each problem is worth one point.", - scope=Scope.settings, values={"min" : 0 , "step": ".1"}, + scope=Scope.settings, + values={"min" : 0 , "step": ".1"}, default=1 ) markdown = String( diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py index 62c9417660..926bd780f7 100644 --- a/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py +++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py @@ -727,7 +727,12 @@ class CombinedOpenEndedV1Module(): """ max_score = None score = None - if self.is_scored and self.weight is not None: + + #The old default was None, so set to 1 if it is the old default weight + weight = self.weight + if weight is None: + weight = 1 + if self.is_scored: # Finds the maximum score of all student attempts and keeps it. score_mat = [] for i in xrange(0, len(self.task_states)): @@ -740,7 +745,7 @@ class CombinedOpenEndedV1Module(): for z in xrange(0, len(score)): if score[z] is None: score[z] = 0 - score[z] *= float(self.weight) + score[z] *= float(weight) score_mat.append(score) if len(score_mat) > 0: @@ -754,7 +759,7 @@ class CombinedOpenEndedV1Module(): if max_score is not None: # Weight the max score if it is not None - max_score *= float(self.weight) + max_score *= float(weight) else: # Without a max_score, we cannot have a score! score = None diff --git a/common/lib/xmodule/xmodule/peer_grading_module.py b/common/lib/xmodule/xmodule/peer_grading_module.py index f92971796c..6164a4d635 100644 --- a/common/lib/xmodule/xmodule/peer_grading_module.py +++ b/common/lib/xmodule/xmodule/peer_grading_module.py @@ -57,7 +57,7 @@ class PeerGradingFields(object): scope=Scope.settings ) max_grade = Integer( - help="The maximum grade that a student can receive for this problem.", + help="The maximum grade that a student can receive for this problem.", default=MAX_SCORE, scope=Scope.settings, values={"min": 0} @@ -214,6 +214,11 @@ class PeerGradingModule(PeerGradingFields, XModule): def get_score(self): max_score = None score = None + weight = self.weight + + #The old default was None, so set to 1 if it is the old default weight + if weight is None: + weight = 1 score_dict = { 'score': score, 'total': max_score, @@ -238,11 +243,10 @@ class PeerGradingModule(PeerGradingFields, XModule): # Ensures that once a student receives a final score for peer grading, that it does not change. self.student_data_for_location = response - if self.weight is not None: - score = int(count_graded >= count_required and count_graded > 0) * float(self.weight) - total = self.max_grade * float(self.weight) - score_dict['score'] = score - score_dict['total'] = total + score = int(count_graded >= count_required and count_graded > 0) * float(weight) + total = self.max_grade * float(weight) + score_dict['score'] = score + score_dict['total'] = total return score_dict From ecbf148688fc3d54c9ccf4ea268bbf4216f1fc29 Mon Sep 17 00:00:00 2001 From: Chris Dodge Date: Thu, 20 Jun 2013 11:46:16 -0400 Subject: [PATCH 08/31] add a new django-admin command to dump out a course structure document. This is response to an emergency request from Harvard researchers. --- .../commands/dump_course_structure.py | 55 +++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 cms/djangoapps/contentstore/management/commands/dump_course_structure.py diff --git a/cms/djangoapps/contentstore/management/commands/dump_course_structure.py b/cms/djangoapps/contentstore/management/commands/dump_course_structure.py new file mode 100644 index 0000000000..d9b7c55cbd --- /dev/null +++ b/cms/djangoapps/contentstore/management/commands/dump_course_structure.py @@ -0,0 +1,55 @@ +from django.core.management.base import BaseCommand, CommandError +from xmodule.course_module import CourseDescriptor +from xmodule.modulestore.django import modulestore +from json import dumps +from xmodule.modulestore.inheritance import own_metadata +from django.conf import settings + +filter_list = ['xml_attributes', 'checklists'] + + +class Command(BaseCommand): + help = '''Write out to stdout a structural and metadata information about a course in a flat dictionary serialized + in a JSON format. This can be used for analytics.''' + + def handle(self, *args, **options): + if len(args) < 2 or len(args) > 3: + raise CommandError("dump_course_structure requires two or more arguments: ||") + + course_id = args[0] + outfile = args[1] + + # use a user-specified database name, if present + # this is useful for doing dumps from databases restored from prod backups + if len(args) == 3: + settings.MODULESTORE['direct']['OPTIONS']['db'] = args[2] + + loc = CourseDescriptor.id_to_location(course_id) + + store = modulestore() + + course = None + try: + course = store.get_item(loc, depth=4) + except: + print 'Could not find course at {0}'.format(course_id) + return + + info = {} + + def dump_into_dict(module, info): + filtered_metadata = dict((key, value) for key, value in own_metadata(module).iteritems() + if key not in filter_list) + info[module.location.url()] = { + 'category': module.location.category, + 'children': module.children if hasattr(module, 'children') else [], + 'metadata': filtered_metadata + } + + for child in module.get_children(): + dump_into_dict(child, info) + + dump_into_dict(course, info) + + with open(outfile, 'w') as f: + f.write(dumps(info)) From 07aad296841bf741e300f5f77ac826c4f9eaa9dc Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Fri, 12 Jul 2013 17:58:44 -0400 Subject: [PATCH 09/31] The clean_history management command to remove excess courseware_studentmodulehistory records. --- .../management/commands/clean_history.py | 217 +++++++++ .../management/tests/test_clean_history.py | 459 ++++++++++++++++++ 2 files changed, 676 insertions(+) create mode 100644 lms/djangoapps/courseware/management/commands/clean_history.py create mode 100644 lms/djangoapps/courseware/management/tests/test_clean_history.py diff --git a/lms/djangoapps/courseware/management/commands/clean_history.py b/lms/djangoapps/courseware/management/commands/clean_history.py new file mode 100644 index 0000000000..2c243a43be --- /dev/null +++ b/lms/djangoapps/courseware/management/commands/clean_history.py @@ -0,0 +1,217 @@ +"""A command to clean the StudentModuleHistory table. + +When we added XBlock storage, each field modification wrote a new history row +to the db. Now that we have bulk saves to avoid that database hammering, we +need to clean out the unnecessary rows from the database. + +This command that does that. + +""" + +import datetime +import json +from optparse import make_option +import traceback + +from django.core.management.base import NoArgsCommand +from django.db import connection + + +class Command(NoArgsCommand): + """The actual clean_history command to clean history rows.""" + + help = "Deletes unneeded rows from the StudentModuleHistory table." + + option_list = NoArgsCommand.option_list + ( + make_option( + '--dry-run', + action='store_true', + default=False, + help="Don't change the database, just show what would be done.", + ), + ) + + def handle_noargs(self, **options): + smhc = StudentModuleHistoryCleaner( + dry_run=options["dry_run"], + ) + smhc.main() + + +class StudentModuleHistoryCleaner(object): + """Logic to clean rows from the StudentModuleHistory table.""" + + DELETE_GAP_SECS = 0.5 # Rows this close can be discarded. + STATE_FILE = "clean_history.json" + BATCH_SIZE = 100 + + def __init__(self, dry_run=False): + self.dry_run = dry_run + self.next_student_module_id = 0 + self.last_student_module_id = 0 + + def main(self, batch_size=None): + """Invoked from the management command to do all the work.""" + + batch_size = batch_size or self.BATCH_SIZE + + self.last_student_module_id = self.get_last_student_module_id() + self.load_state() + + while self.next_student_module_id <= self.last_student_module_id: + for smid in self.module_ids_to_check(batch_size): + try: + self.clean_one_student_module(smid) + except Exception: # pylint: disable=W0703 + trace = traceback.format_exc() + self.say("Couldn't clean student_module_id {}:\n{}".format(smid, trace)) + self.commit() + self.save_state() + + def say(self, message): + """ + Display a message to the user. + + The message will have a trailing newline added to it. + + """ + print message + + def commit(self): + """ + Commit the transaction. + """ + self.say("Committing") + connection.commit() + + def load_state(self): + """ + Load the latest state from disk. + """ + try: + state_file = open(self.STATE_FILE) + except IOError: + self.say("No stored state") + self.next_student_module_id = 0 + else: + with state_file: + state = json.load(state_file) + self.say( + "Loaded stored state: {}".format( + json.dumps(state, sort_keys=True) + ) + ) + self.next_student_module_id = state['next_student_module_id'] + + def save_state(self): + """ + Save the state to disk. + """ + state = { + 'next_student_module_id': self.next_student_module_id, + } + with open(self.STATE_FILE, "w") as state_file: + json.dump(state, state_file) + self.say("Saved state: {}".format(json.dumps(state, sort_keys=True))) + + def get_last_student_module_id(self): + """ + Return the id of the last student_module. + """ + cursor = connection.cursor() + cursor.execute(""" + SELECT max(student_module_id) FROM courseware_studentmodulehistory + """) + last = cursor.fetchone()[0] + self.say("Last student_module_id is {}".format(last)) + return last + + def module_ids_to_check(self, batch_size): + """Produce a sequence of student module ids to check. + + `batch_size` is how many module ids to produce, max. + + The sequence starts with `next_student_module_id`, and goes up to + and including `last_student_module_id`. + + `next_student_module_id` is updated as each id is yielded. + + """ + start = self.next_student_module_id + for smid in range(start, start+batch_size): + if smid > self.last_student_module_id: + break + yield smid + self.next_student_module_id = smid+1 + + def get_history_for_student_modules(self, student_module_id): + """ + Get the history rows for a student module. + + ```student_module_id```: the id of the student module we're + interested in. + + Return a list: [(id, created), ...], all the rows of history. + + """ + cursor = connection.cursor() + cursor.execute(""" + SELECT id, created FROM courseware_studentmodulehistory + WHERE student_module_id = %s + ORDER BY created + """, + [student_module_id] + ) + history = cursor.fetchall() + return history + + def delete_history(self, ids_to_delete): + """ + Delete history rows. + + ```ids_to_delete```: a non-empty list (or set...) of history row ids to delete. + + """ + assert ids_to_delete + cursor = connection.cursor() + cursor.execute(""" + DELETE FROM courseware_studentmodulehistory + WHERE id IN ({ids}) + """.format(ids=",".join(str(i) for i in ids_to_delete)) + ) + + def clean_one_student_module(self, student_module_id): + """Clean one StudentModule's-worth of history. + + `student_module_id`: the id of the StudentModule to process. + + """ + delete_gap = datetime.timedelta(seconds=self.DELETE_GAP_SECS) + + history = self.get_history_for_student_modules(student_module_id) + if not history: + self.say("No history for student_module_id {}".format(student_module_id)) + return + + ids_to_delete = [] + next_created = None + for history_id, created in reversed(history): + if next_created is not None: + # Compare this timestamp with the next one. + if (next_created - created) < delete_gap: + # This row is followed closely by another, we can discard + # this one. + ids_to_delete.append(history_id) + + next_created = created + + verb = "Would have deleted" if self.dry_run else "Deleting" + self.say("{verb} {to_delete} rows of {total} for student_module_id {id}".format( + verb=verb, + to_delete=len(ids_to_delete), + total=len(history), + id=student_module_id, + )) + + if ids_to_delete and not self.dry_run: + self.delete_history(ids_to_delete) diff --git a/lms/djangoapps/courseware/management/tests/test_clean_history.py b/lms/djangoapps/courseware/management/tests/test_clean_history.py new file mode 100644 index 0000000000..cf681a7b0a --- /dev/null +++ b/lms/djangoapps/courseware/management/tests/test_clean_history.py @@ -0,0 +1,459 @@ +"""Test the clean_history management command.""" + +import fnmatch +from mock import Mock +import os.path +import textwrap + +import dateutil.parser + +from django.test import TestCase +from django.db import connection + +from courseware.management.commands.clean_history import StudentModuleHistoryCleaner + +# In lots of places in this file, smhc == StudentModuleHistoryCleaner + +def parse_date(sdate): + """Parse a string date into a datetime.""" + parsed = dateutil.parser.parse(sdate) + parsed = parsed.replace(tzinfo=dateutil.tz.gettz('UTC')) + return parsed + +class SmhcSayStubbed(StudentModuleHistoryCleaner): + """StudentModuleHistoryCleaner, but with .say() stubbed for testing.""" + def __init__(self, **kwargs): + super(SmhcSayStubbed, self).__init__(**kwargs) + self.said_lines = [] + + def say(self, msg): + self.said_lines.append(msg) + + +class SmhcDbMocked(SmhcSayStubbed): + """StudentModuleHistoryCleaner, but with db access mocked.""" + def __init__(self, **kwargs): + super(SmhcDbMocked, self).__init__(**kwargs) + self.get_history_for_student_modules = Mock() + self.delete_history = Mock() + + def set_rows(self, rows): + """Set the mocked history rows.""" + rows = [(row_id, parse_date(created)) for row_id, created in rows] + self.get_history_for_student_modules.return_value = rows + + +class HistoryCleanerTest(TestCase): + """Base class for all history cleaner tests.""" + + maxDiff = None + + def setUp(self): + super(HistoryCleanerTest, self).setUp() + self.addCleanup(self.clean_up_state_file) + + def write_state_file(self, state): + """Write the string `state` into the state file read by StudentModuleHistoryCleaner.""" + with open(StudentModuleHistoryCleaner.STATE_FILE, "w") as state_file: + state_file.write(state) + + def read_state_file(self): + """Return the string contents of the state file read by StudentModuleHistoryCleaner.""" + with open(StudentModuleHistoryCleaner.STATE_FILE) as state_file: + return state_file.read() + + def clean_up_state_file(self): + """Remove any state file lying around.""" + if os.path.exists(StudentModuleHistoryCleaner.STATE_FILE): + os.remove(StudentModuleHistoryCleaner.STATE_FILE) + + def assert_said(self, smhc, *msgs): + """Fail if the `smhc` didn't say `msgs`. + + The messages passed here are `fnmatch`-style patterns: "*" means anything. + + """ + for said, pattern in zip(smhc.said_lines, msgs): + if not fnmatch.fnmatch(said, pattern): + fmt = textwrap.dedent("""\ + Messages: + + {msgs} + + don't match patterns: + + {patterns} + + Failed at {said!r} and {pattern!r} + """) + + msg = fmt.format( + msgs="\n".join(smhc.said_lines), + patterns="\n".join(msgs), + said=said, + pattern=pattern + ) + self.fail(msg) + + def parse_rows(self, rows): + """Parse convenient rows into real data.""" + rows = [ + (row_id, parse_date(created), student_module_id) + for row_id, created, student_module_id in rows + ] + return rows + + def write_history(self, rows): + """Write history rows to the db. + + Each row should be (id, created, student_module_id). + + """ + cursor = connection.cursor() + cursor.executemany(""" + INSERT INTO courseware_studentmodulehistory + (id, created, student_module_id) + VALUES (%s, %s, %s) + """, + self.parse_rows(rows), + ) + + def read_history(self): + """Read the history from the db, and return it as a list of tuples. + + Returns [(id, created, student_module_id), ...] + + """ + cursor = connection.cursor() + cursor.execute(""" + SELECT id, created, student_module_id FROM courseware_studentmodulehistory + """) + return cursor.fetchall() + + def assert_history(self, rows): + """Assert that the history rows are the same as `rows`.""" + self.assertEqual(self.parse_rows(rows), self.read_history()) + + +class HistoryCleanerNoDbTest(HistoryCleanerTest): + """Tests of StudentModuleHistoryCleaner with db access mocked.""" + + def test_empty(self): + smhc = SmhcDbMocked() + smhc.set_rows([]) + + smhc.clean_one_student_module(1) + self.assert_said(smhc, "No history for student_module_id 1") + + # Nothing to delete, so delete_history wasn't called. + self.assertFalse(smhc.delete_history.called) + + def test_one_row(self): + smhc = SmhcDbMocked() + smhc.set_rows([ + (1, "2013-07-13 12:11:10.987"), + ]) + smhc.clean_one_student_module(1) + self.assert_said(smhc, "Deleting 0 rows of 1 for student_module_id 1") + # Nothing to delete, so delete_history wasn't called. + self.assertFalse(smhc.delete_history.called) + + def test_one_row_dry_run(self): + smhc = SmhcDbMocked(dry_run=True) + smhc.set_rows([ + (1, "2013-07-13 12:11:10.987"), + ]) + smhc.clean_one_student_module(1) + self.assert_said(smhc, "Would have deleted 0 rows of 1 for student_module_id 1") + # Nothing to delete, so delete_history wasn't called. + self.assertFalse(smhc.delete_history.called) + + def test_two_rows_close(self): + smhc = SmhcDbMocked() + smhc.set_rows([ + (7, "2013-07-13 12:34:56.789"), + (9, "2013-07-13 12:34:56.987"), + ]) + smhc.clean_one_student_module(1) + self.assert_said(smhc, "Deleting 1 rows of 2 for student_module_id 1") + smhc.delete_history.assert_called_once_with([7]) + + def test_two_rows_far(self): + smhc = SmhcDbMocked() + smhc.set_rows([ + (7, "2013-07-13 12:34:56.789"), + (9, "2013-07-13 12:34:57.890"), + ]) + smhc.clean_one_student_module(1) + self.assert_said(smhc, "Deleting 0 rows of 2 for student_module_id 1") + self.assertFalse(smhc.delete_history.called) + + def test_a_bunch_of_rows(self): + smhc = SmhcDbMocked() + smhc.set_rows([ + ( 4, "2013-07-13 16:30:00.000"), # keep + ( 8, "2013-07-13 16:30:01.100"), + (15, "2013-07-13 16:30:01.200"), + (16, "2013-07-13 16:30:01.300"), # keep + (23, "2013-07-13 16:30:02.400"), + (42, "2013-07-13 16:30:02.500"), + (98, "2013-07-13 16:30:02.600"), # keep + (99, "2013-07-13 16:30:59.000"), # keep + ]) + smhc.clean_one_student_module(17) + self.assert_said(smhc, "Deleting 4 rows of 8 for student_module_id 17") + smhc.delete_history.assert_called_once_with([42, 23, 15, 8]) + + +class HistoryCleanerWitDbTest(HistoryCleanerTest): + """Tests of StudentModuleHistoryCleaner with a real db.""" + + def test_no_history(self): + # Cleaning a student_module_id with no history leaves the db unchanged. + smhc = SmhcSayStubbed() + self.write_history([ + ( 4, "2013-07-13 16:30:00.000", 11), # keep + ( 8, "2013-07-13 16:30:01.100", 11), + (15, "2013-07-13 16:30:01.200", 11), + (16, "2013-07-13 16:30:01.300", 11), # keep + (23, "2013-07-13 16:30:02.400", 11), + (42, "2013-07-13 16:30:02.500", 11), + (98, "2013-07-13 16:30:02.600", 11), # keep + (99, "2013-07-13 16:30:59.000", 11), # keep + ]) + + smhc.clean_one_student_module(22) + self.assert_said(smhc, "No history for student_module_id 22") + self.assert_history([ + ( 4, "2013-07-13 16:30:00.000", 11), # keep + ( 8, "2013-07-13 16:30:01.100", 11), + (15, "2013-07-13 16:30:01.200", 11), + (16, "2013-07-13 16:30:01.300", 11), # keep + (23, "2013-07-13 16:30:02.400", 11), + (42, "2013-07-13 16:30:02.500", 11), + (98, "2013-07-13 16:30:02.600", 11), # keep + (99, "2013-07-13 16:30:59.000", 11), # keep + ]) + + def test_a_bunch_of_rows(self): + # Cleaning a student_module_id with 8 records, 4 to delete. + smhc = SmhcSayStubbed() + self.write_history([ + ( 4, "2013-07-13 16:30:00.000", 11), # keep + ( 8, "2013-07-13 16:30:01.100", 11), + (15, "2013-07-13 16:30:01.200", 11), + (16, "2013-07-13 16:30:01.300", 11), # keep + (17, "2013-07-13 16:30:01.310", 22), # other student_module_id! + (23, "2013-07-13 16:30:02.400", 11), + (42, "2013-07-13 16:30:02.500", 11), + (98, "2013-07-13 16:30:02.600", 11), # keep + (99, "2013-07-13 16:30:59.000", 11), # keep + ]) + + smhc.clean_one_student_module(11) + self.assert_said(smhc, "Deleting 4 rows of 8 for student_module_id 11") + self.assert_history([ + ( 4, "2013-07-13 16:30:00.000", 11), # keep + (16, "2013-07-13 16:30:01.300", 11), # keep + (17, "2013-07-13 16:30:01.310", 22), # other student_module_id! + (98, "2013-07-13 16:30:02.600", 11), # keep + (99, "2013-07-13 16:30:59.000", 11), # keep + ]) + + def test_a_bunch_of_rows_dry_run(self): + # Cleaning a student_module_id with 8 records, 4 to delete, + # but don't really do it. + smhc = SmhcSayStubbed(dry_run=True) + self.write_history([ + ( 4, "2013-07-13 16:30:00.000", 11), # keep + ( 8, "2013-07-13 16:30:01.100", 11), + (15, "2013-07-13 16:30:01.200", 11), + (16, "2013-07-13 16:30:01.300", 11), # keep + (23, "2013-07-13 16:30:02.400", 11), + (42, "2013-07-13 16:30:02.500", 11), + (98, "2013-07-13 16:30:02.600", 11), # keep + (99, "2013-07-13 16:30:59.000", 11), # keep + ]) + + smhc.clean_one_student_module(11) + self.assert_said(smhc, "Would have deleted 4 rows of 8 for student_module_id 11") + self.assert_history([ + ( 4, "2013-07-13 16:30:00.000", 11), # keep + ( 8, "2013-07-13 16:30:01.100", 11), + (15, "2013-07-13 16:30:01.200", 11), + (16, "2013-07-13 16:30:01.300", 11), # keep + (23, "2013-07-13 16:30:02.400", 11), + (42, "2013-07-13 16:30:02.500", 11), + (98, "2013-07-13 16:30:02.600", 11), # keep + (99, "2013-07-13 16:30:59.000", 11), # keep + ]) + + def test_a_bunch_of_rows_in_jumbled_order(self): + # Cleaning a student_module_id with 8 records, 4 to delete. + smhc = SmhcSayStubbed() + self.write_history([ + (23, "2013-07-13 16:30:01.100", 11), + (24, "2013-07-13 16:30:01.300", 11), # keep + (27, "2013-07-13 16:30:02.500", 11), + (30, "2013-07-13 16:30:01.350", 22), # other student_module_id! + (32, "2013-07-13 16:30:59.000", 11), # keep + (50, "2013-07-13 16:30:02.400", 11), + (51, "2013-07-13 16:30:02.600", 11), # keep + (56, "2013-07-13 16:30:00.000", 11), # keep + (57, "2013-07-13 16:30:01.200", 11), + ]) + + smhc.clean_one_student_module(11) + self.assert_said(smhc, "Deleting 4 rows of 8 for student_module_id 11") + self.assert_history([ + (24, "2013-07-13 16:30:01.300", 11), # keep + (30, "2013-07-13 16:30:01.350", 22), # other student_module_id! + (32, "2013-07-13 16:30:59.000", 11), # keep + (51, "2013-07-13 16:30:02.600", 11), # keep + (56, "2013-07-13 16:30:00.000", 11), # keep + ]) + + def test_get_last_student_module(self): + # Can we find the last student_module_id properly? + smhc = SmhcSayStubbed() + self.write_history([ + (23, "2013-07-13 16:30:01.100", 11), + (24, "2013-07-13 16:30:01.300", 44), + (27, "2013-07-13 16:30:02.500", 11), + (30, "2013-07-13 16:30:01.350", 22), + (32, "2013-07-13 16:30:59.000", 11), + (51, "2013-07-13 16:30:02.600", 33), + (56, "2013-07-13 16:30:00.000", 11), + ]) + last = smhc.get_last_student_module_id() + self.assertEqual(last, 44) + self.assert_said(smhc, "Last student_module_id is 44") + + def test_load_state_with_no_stored_state(self): + smhc = SmhcSayStubbed() + self.assertFalse(os.path.exists(smhc.STATE_FILE)) + smhc.load_state() + self.assertEqual(smhc.next_student_module_id, 0) + self.assert_said(smhc, "No stored state") + + def test_load_stored_state(self): + self.write_state_file('{"next_student_module_id": 23}') + smhc = SmhcSayStubbed() + smhc.load_state() + self.assertEqual(smhc.next_student_module_id, 23) + self.assert_said(smhc, 'Loaded stored state: {"next_student_module_id": 23}') + + def test_save_state(self): + smhc = SmhcSayStubbed() + smhc.next_student_module_id = 47 + smhc.save_state() + state = self.read_state_file() + self.assertEqual(state, '{"next_student_module_id": 47}') + + +class SmhcForTestingMain(SmhcSayStubbed): + """A StudentModuleHistoryCleaner with a few function stubbed for testing main.""" + + def __init__(self, *args, **kwargs): + self.exception_smids = kwargs.pop('exception_smids', ()) + super(SmhcForTestingMain, self).__init__(*args, **kwargs) + + def clean_one_student_module(self, smid): + self.say("(not really cleaning {})".format(smid)) + if smid in self.exception_smids: + raise Exception("Something went wrong!") + + def commit(self): + self.say("(not really committing)") + + +class HistoryCleanerMainTest(HistoryCleanerTest): + """Tests of StudentModuleHistoryCleaner.main(), using SmhcForTestingMain.""" + + def test_only_one_record(self): + smhc = SmhcForTestingMain() + self.write_history([ + (1, "2013-07-15 11:47:00.000", 1), + ]) + smhc.main() + self.assert_said(smhc, + 'Last student_module_id is 1', + 'No stored state', + '(not really cleaning 0)', + '(not really cleaning 1)', + '(not really committing)', + 'Saved state: {"next_student_module_id": 2}', + ) + + def test_already_processed_some(self): + smhc = SmhcForTestingMain() + self.write_state_file('{"next_student_module_id": 25}') + self.write_history([ + (1, "2013-07-15 15:04:00.000", 23), + (2, "2013-07-15 15:04:11.000", 23), + (3, "2013-07-15 15:04:01.000", 24), + (4, "2013-07-15 15:04:00.000", 25), + (5, "2013-07-15 15:04:00.000", 26), + ]) + smhc.main() + self.assert_said(smhc, + 'Last student_module_id is 26', + 'Loaded stored state: {"next_student_module_id": 25}', + '(not really cleaning 25)', + '(not really cleaning 26)', + '(not really committing)', + 'Saved state: {"next_student_module_id": 27}' + ) + + def test_working_in_batches(self): + smhc = SmhcForTestingMain() + self.write_state_file('{"next_student_module_id": 25}') + self.write_history([ + (3, "2013-07-15 15:04:01.000", 24), + (4, "2013-07-15 15:04:00.000", 25), + (5, "2013-07-15 15:04:00.000", 26), + (6, "2013-07-15 15:04:00.000", 27), + (7, "2013-07-15 15:04:00.000", 28), + (8, "2013-07-15 15:04:00.000", 29), + ]) + smhc.main(batch_size=3) + self.assert_said(smhc, + 'Last student_module_id is 29', + 'Loaded stored state: {"next_student_module_id": 25}', + '(not really cleaning 25)', + '(not really cleaning 26)', + '(not really cleaning 27)', + '(not really committing)', + 'Saved state: {"next_student_module_id": 28}', + '(not really cleaning 28)', + '(not really cleaning 29)', + '(not really committing)', + 'Saved state: {"next_student_module_id": 30}', + ) + + def test_something_failing_while_cleaning(self): + smhc = SmhcForTestingMain(exception_smids=[26]) + self.write_state_file('{"next_student_module_id": 25}') + self.write_history([ + (3, "2013-07-15 15:04:01.000", 24), + (4, "2013-07-15 15:04:00.000", 25), + (5, "2013-07-15 15:04:00.000", 26), + (6, "2013-07-15 15:04:00.000", 27), + (7, "2013-07-15 15:04:00.000", 28), + (8, "2013-07-15 15:04:00.000", 29), + ]) + smhc.main(batch_size=3) + self.assert_said(smhc, + 'Last student_module_id is 29', + 'Loaded stored state: {"next_student_module_id": 25}', + '(not really cleaning 25)', + '(not really cleaning 26)', + "Couldn't clean student_module_id 26:\nTraceback*Exception: Something went wrong!\n", + '(not really cleaning 27)', + '(not really committing)', + 'Saved state: {"next_student_module_id": 28}', + '(not really cleaning 28)', + '(not really cleaning 29)', + '(not really committing)', + 'Saved state: {"next_student_module_id": 30}', + ) From 3a49136f03ee436eef36829bc8d0891424c9d9f1 Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Mon, 15 Jul 2013 16:57:28 -0400 Subject: [PATCH 10/31] Quiet some debug output, get transactions right. --- .../courseware/management/commands/clean_history.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/lms/djangoapps/courseware/management/commands/clean_history.py b/lms/djangoapps/courseware/management/commands/clean_history.py index 2c243a43be..8b4f3d4d6d 100644 --- a/lms/djangoapps/courseware/management/commands/clean_history.py +++ b/lms/djangoapps/courseware/management/commands/clean_history.py @@ -10,7 +10,8 @@ This command that does that. import datetime import json -from optparse import make_option +import logging +import optparse import traceback from django.core.management.base import NoArgsCommand @@ -23,7 +24,7 @@ class Command(NoArgsCommand): help = "Deletes unneeded rows from the StudentModuleHistory table." option_list = NoArgsCommand.option_list + ( - make_option( + optparse.make_option( '--dry-run', action='store_true', default=False, @@ -32,6 +33,9 @@ class Command(NoArgsCommand): ) def handle_noargs(self, **options): + # We don't want to see the SQL output from the db layer. + logging.getLogger("").setLevel(logging.INFO) + smhc = StudentModuleHistoryCleaner( dry_run=options["dry_run"], ) @@ -55,6 +59,8 @@ class StudentModuleHistoryCleaner(object): batch_size = batch_size or self.BATCH_SIZE + connection.enter_transaction_management() + self.last_student_module_id = self.get_last_student_module_id() self.load_state() @@ -65,7 +71,8 @@ class StudentModuleHistoryCleaner(object): except Exception: # pylint: disable=W0703 trace = traceback.format_exc() self.say("Couldn't clean student_module_id {}:\n{}".format(smid, trace)) - self.commit() + if not self.dry_run: + self.commit() self.save_state() def say(self, message): From 83cb3d192444172158b1a242a41b5edbebdf0dd9 Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Thu, 18 Jul 2013 09:56:07 -0400 Subject: [PATCH 11/31] Add batch and sleep arguments so we can control the speed from the command line. --- .../management/commands/clean_history.py | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/lms/djangoapps/courseware/management/commands/clean_history.py b/lms/djangoapps/courseware/management/commands/clean_history.py index 8b4f3d4d6d..30b0df8627 100644 --- a/lms/djangoapps/courseware/management/commands/clean_history.py +++ b/lms/djangoapps/courseware/management/commands/clean_history.py @@ -12,6 +12,7 @@ import datetime import json import logging import optparse +import time import traceback from django.core.management.base import NoArgsCommand @@ -24,12 +25,24 @@ class Command(NoArgsCommand): help = "Deletes unneeded rows from the StudentModuleHistory table." option_list = NoArgsCommand.option_list + ( + optparse.make_option( + '--batch', + type='int', + default=100, + help="Batch size, number of module_ids to examine in a transaction.", + ), optparse.make_option( '--dry-run', action='store_true', default=False, help="Don't change the database, just show what would be done.", ), + optparse.make_option( + '--sleep', + type='float', + default=0, + help="Seconds to sleep between batches.", + ), ) def handle_noargs(self, **options): @@ -39,7 +52,7 @@ class Command(NoArgsCommand): smhc = StudentModuleHistoryCleaner( dry_run=options["dry_run"], ) - smhc.main() + smhc.main(batch_size=options["batch"], sleep=options["sleep"]) class StudentModuleHistoryCleaner(object): @@ -54,7 +67,7 @@ class StudentModuleHistoryCleaner(object): self.next_student_module_id = 0 self.last_student_module_id = 0 - def main(self, batch_size=None): + def main(self, batch_size=None, sleep=0): """Invoked from the management command to do all the work.""" batch_size = batch_size or self.BATCH_SIZE @@ -74,6 +87,8 @@ class StudentModuleHistoryCleaner(object): if not self.dry_run: self.commit() self.save_state() + if sleep: + time.sleep(sleep) def say(self, message): """ From 10f062cf1810da61a2cfafe250bd1d2c903f77eb Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Thu, 18 Jul 2013 10:34:40 -0400 Subject: [PATCH 12/31] Records should be sorted by created,id so that timestamp ties will be broken by id. --- .../management/commands/clean_history.py | 2 +- .../management/tests/test_clean_history.py | 24 +++++++++++++++++++ 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/lms/djangoapps/courseware/management/commands/clean_history.py b/lms/djangoapps/courseware/management/commands/clean_history.py index 30b0df8627..c266e2df53 100644 --- a/lms/djangoapps/courseware/management/commands/clean_history.py +++ b/lms/djangoapps/courseware/management/commands/clean_history.py @@ -180,7 +180,7 @@ class StudentModuleHistoryCleaner(object): cursor.execute(""" SELECT id, created FROM courseware_studentmodulehistory WHERE student_module_id = %s - ORDER BY created + ORDER BY created, id """, [student_module_id] ) diff --git a/lms/djangoapps/courseware/management/tests/test_clean_history.py b/lms/djangoapps/courseware/management/tests/test_clean_history.py index cf681a7b0a..0636279c26 100644 --- a/lms/djangoapps/courseware/management/tests/test_clean_history.py +++ b/lms/djangoapps/courseware/management/tests/test_clean_history.py @@ -313,6 +313,30 @@ class HistoryCleanerWitDbTest(HistoryCleanerTest): (56, "2013-07-13 16:30:00.000", 11), # keep ]) + def test_a_bunch_of_rows_with_timestamp_ties(self): + # Sometimes rows are written with identical timestamps. The one with + # the greater id is the winner in that case. + smhc = SmhcSayStubbed() + self.write_history([ + (21, "2013-07-13 16:30:01.100", 11), + (24, "2013-07-13 16:30:01.100", 11), # keep + (22, "2013-07-13 16:30:01.100", 11), + (23, "2013-07-13 16:30:01.100", 11), + (27, "2013-07-13 16:30:02.500", 11), + (30, "2013-07-13 16:30:01.350", 22), # other student_module_id! + (32, "2013-07-13 16:30:59.000", 11), # keep + (50, "2013-07-13 16:30:02.500", 11), # keep + ]) + + smhc.clean_one_student_module(11) + self.assert_said(smhc, "Deleting 4 rows of 7 for student_module_id 11") + self.assert_history([ + (24, "2013-07-13 16:30:01.100", 11), # keep + (30, "2013-07-13 16:30:01.350", 22), # other student_module_id! + (32, "2013-07-13 16:30:59.000", 11), # keep + (50, "2013-07-13 16:30:02.500", 11), # keep + ]) + def test_get_last_student_module(self): # Can we find the last student_module_id properly? smhc = SmhcSayStubbed() From 3d67c506a363e3f213e5318421687e0b99bd5d12 Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Thu, 18 Jul 2013 11:16:01 -0400 Subject: [PATCH 13/31] Use TransactionTestCase, to keep other tests from failing, even though it slows them down. --- .../courseware/management/tests/test_clean_history.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lms/djangoapps/courseware/management/tests/test_clean_history.py b/lms/djangoapps/courseware/management/tests/test_clean_history.py index 0636279c26..2a70556ffe 100644 --- a/lms/djangoapps/courseware/management/tests/test_clean_history.py +++ b/lms/djangoapps/courseware/management/tests/test_clean_history.py @@ -7,7 +7,7 @@ import textwrap import dateutil.parser -from django.test import TestCase +from django.test import TransactionTestCase from django.db import connection from courseware.management.commands.clean_history import StudentModuleHistoryCleaner @@ -43,7 +43,7 @@ class SmhcDbMocked(SmhcSayStubbed): self.get_history_for_student_modules.return_value = rows -class HistoryCleanerTest(TestCase): +class HistoryCleanerTest(TransactionTestCase): """Base class for all history cleaner tests.""" maxDiff = None From 900e794c0224c2812fde92a677770e8a65644d61 Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Thu, 18 Jul 2013 11:53:21 -0400 Subject: [PATCH 14/31] Only quiet the particular logger we think is too noisy. --- lms/djangoapps/courseware/management/commands/clean_history.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lms/djangoapps/courseware/management/commands/clean_history.py b/lms/djangoapps/courseware/management/commands/clean_history.py index c266e2df53..4286e17658 100644 --- a/lms/djangoapps/courseware/management/commands/clean_history.py +++ b/lms/djangoapps/courseware/management/commands/clean_history.py @@ -47,7 +47,7 @@ class Command(NoArgsCommand): def handle_noargs(self, **options): # We don't want to see the SQL output from the db layer. - logging.getLogger("").setLevel(logging.INFO) + logging.getLogger("django.db.backends").setLevel(logging.INFO) smhc = StudentModuleHistoryCleaner( dry_run=options["dry_run"], From 46ae2f9c26acefe8387782ded51948d80a655b2f Mon Sep 17 00:00:00 2001 From: RobertMarks Date: Thu, 27 Jun 2013 17:14:49 -0700 Subject: [PATCH 15/31] Added support for a new problem type: ChoicetextResponse --- AUTHORS | 1 + CHANGELOG.rst | 5 +- common/lib/capa/capa/inputtypes.py | 208 +++++++++++ common/lib/capa/capa/responsetypes.py | 332 +++++++++++++++++- .../lib/capa/capa/templates/choicetext.html | 76 ++++ .../capa/capa/tests/response_xml_factory.py | 106 ++++++ .../capa/capa/tests/test_input_templates.py | 167 +++++++++ common/lib/capa/capa/tests/test_inputtypes.py | 91 +++++ .../lib/capa/capa/tests/test_responsetypes.py | 283 +++++++++++++++ common/lib/xmodule/xmodule/capa_module.py | 19 +- .../lib/xmodule/xmodule/css/capa/display.scss | 28 ++ .../xmodule/js/spec/capa/display_spec.coffee | 52 +++ .../xmodule/js/src/capa/display.coffee | 12 + common/static/js/capa/choicetextinput.js | 75 ++++ .../courseware/features/problems.feature | 10 + .../courseware/features/problems_setup.py | 83 ++++- 16 files changed, 1544 insertions(+), 4 deletions(-) create mode 100644 common/lib/capa/capa/templates/choicetext.html create mode 100644 common/static/js/capa/choicetextinput.js diff --git a/AUTHORS b/AUTHORS index 70af9f318d..89fc2d959b 100644 --- a/AUTHORS +++ b/AUTHORS @@ -81,3 +81,4 @@ Felix Sun Adam Palay Ian Hoover Mukul Goyal +Robert Marks diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 04c8a5baae..20642bbb1f 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -5,6 +5,7 @@ These are notable changes in edx-platform. This is a rolling list of changes, in roughly chronological order, most recent first. Add your entries at or near the top. Include a label indicating the component affected. + Common: Added *experimental* support for jsinput type. Common: Added setting to specify Celery Broker vhost @@ -21,6 +22,8 @@ Studio: Added support for uploading and managing PDF textbooks Common: Student information is now passed to the tracking log via POST instead of GET. +Blades: Added functionality and tests for new capa input type: choicetextresponse. + Common: Add tests for documentation generation to test suite Blades: User answer now preserved (and changeable) after clicking "show answer" in choice problems @@ -43,7 +46,7 @@ history of background tasks for a given problem and student. Blades: Small UX fix on capa multiple-choice problems. Make labels only as wide as the text to reduce accidental choice selections. -Studio: +Studio: - use xblock field defaults to initialize all new instances' fields and only use templates as override samples. - create new instances via in memory create_xmodule and related methods rather diff --git a/common/lib/capa/capa/inputtypes.py b/common/lib/capa/capa/inputtypes.py index 9bb72ad4e1..f26909b633 100644 --- a/common/lib/capa/capa/inputtypes.py +++ b/common/lib/capa/capa/inputtypes.py @@ -1368,3 +1368,211 @@ class AnnotationInput(InputTypeBase): return extra_context registry.register(AnnotationInput) + + +class ChoiceTextGroup(InputTypeBase): + """ + Groups of radiobutton/checkboxes with text inputs. + Allows for a "not enough information" option to be added + to problems with numerical answers. + + Examples: + RadioButton problem + + + A person rolls a standard die 100 times and records the results. + On the first roll they received a "1". Given this information + select the correct choice and fill in numbers to make it accurate. + + + + The lowest number rolled was: + and the highest number rolled was: + . + The lowest number rolled was + and there is not enough information to determine the highest number rolled. + + There is not enough information to determine the lowest + number rolled, and the highest number rolled was: + . + + + + + + CheckboxProblem: + + + A person randomly selects 100 times, with replacement, from the list of numbers \(\sqrt{2}\) , 2, 3, 4 ,5 ,6 + and records the results. The first number they pick is \(\sqrt{2}\) Given this information + select the correct choices and fill in numbers to make them accurate. + + + + + The lowest number selected was + + + The highest number selected was . + + There is not enough information given to determine the highest number + which was selected. + + There is not enough information given to determine the lowest number + selected. + + + + + + In the preceding examples the is used to generate a textinput html element + in the problem's display. Since it is inside of an incorrect choice, no answer given + for it will be correct, and thus specifying an answer for it is not needed. + """ + template = "choicetext.html" + tags = ['radiotextgroup', 'checkboxtextgroup'] + + def setup(self): + """ + Performs setup for the initial rendering of the problem. + `self.html_input_type` determines whether this problem is displayed + with radiobuttons or checkboxes + + If the initial value of `self.value` is '' change it to {} so that + the template has an empty dictionary to work with. + + sets the value of self.choices to be equal to the return value of + `self.extract_choices` + """ + self.text_input_values = {} + if self.tag == 'radiotextgroup': + self.html_input_type = "radio" + elif self.tag == 'checkboxtextgroup': + self.html_input_type = "checkbox" + else: + raise Exception("ChoiceGroup: unexpected tag {0}".format(self.tag)) + + if self.value == '': + # Make `value` an empty dictionary, if it currently has an empty + # value. This is necessary because the template expects a + # dictionary. + self.value = {} + self.choices = self.extract_choices(self.xml) + + @classmethod + def get_attributes(cls): + """ + Returns a list of `Attribute` for this problem type + """ + return [ + Attribute("show_correctness", "always"), + Attribute("submitted_message", "Answer received.") + ] + + def _extra_context(self): + """ + Returns a dictionary of extra content necessary for rendering this InputType. + + `input_type` is either 'radio' or 'checkbox' indicating whether the choices for + this problem will have radiobuttons or checkboxes. + """ + return { + 'input_type': self.html_input_type, + 'choices': self.choices + } + + @staticmethod + def extract_choices(element): + """ + Extracts choices from the xml for this problem type. + If we have xml that is as follows(choice names will have been assigned + by now) + + + The number + + Is the mean of the list. + + " + + "tag; got {0} instead".format(choice.tag) + ) + + components = [] + choice_text = '' + if choice.text is not None: + choice_text += choice.text + # Initialize our dict for the next content + adder = { + 'type': 'text', + 'contents': choice_text, + 'tail_text': '', + 'value': '' + } + components.append(adder) + + for elt in choice: + # for elements in the choice e.g. + adder = { + 'type': 'text', + 'contents': '', + 'tail_text': '', + 'value': '' + } + tag_type = elt.tag + # If the current `elt` is a set the + # `adder`type to 'numtolerance_input', and 'contents' to + # the `elt`'s name. + # Treat decoy_inputs and numtolerance_inputs the same in order + # to prevent students from reading the Html and figuring out + # which inputs are valid + if tag_type in ('numtolerance_input', 'decoy_input'): + # We set this to textinput, so that we get a textinput html + # element. + adder['type'] = 'textinput' + adder['contents'] = elt.get('name') + else: + adder['contents'] = elt.text + + # Add any tail text("is the mean" in the example) + adder['tail_text'] = elt.tail if elt.tail else '' + components.append(adder) + + # Add the tuple for the current choice to the list of choices + choices.append((choice.get("name"), components)) + return choices + +registry.register(ChoiceTextGroup) diff --git a/common/lib/capa/capa/responsetypes.py b/common/lib/capa/capa/responsetypes.py index 3762c21976..51bae0b215 100644 --- a/common/lib/capa/capa/responsetypes.py +++ b/common/lib/capa/capa/responsetypes.py @@ -2097,6 +2097,335 @@ class AnnotationResponse(LoncapaResponse): return option_ids[0] return None + +class ChoiceTextResponse(LoncapaResponse): + """ + Allows for multiple choice responses with text inputs + Desired semantics match those of NumericalResponse and + ChoiceResponse. + """ + + response_tag = 'choicetextresponse' + max_inputfields = 1 + allowed_inputfields = ['choicetextgroup', + 'checkboxtextgroup', + 'radiotextgroup' + ] + + def setup_response(self): + """ + Sets up three dictionaries for use later: + `correct_choices`: These are the correct binary choices(radio/checkbox) + `correct_inputs`: These are the numerical/string answers for required + inputs. + `answer_values`: This is a dict, keyed by the name of the binary choice + which contains the correct answers for the text inputs separated by + commas e.g. "1, 0.5" + + `correct_choices` and `correct_inputs` are used for grading the problem + and `answer_values` is used for displaying correct answers. + + """ + context = self.context + self.correct_choices = {} + self.assign_choice_names() + self.correct_inputs = {} + self.answer_values = {self.answer_id: []} + correct_xml = self.xml.xpath('//*[@id=$id]//choice[@correct="true"]', + id=self.xml.get('id')) + for node in correct_xml: + # For each correct choice, set the `parent_name` to the + # current choice's name + parent_name = node.get('name') + # Add the name of the correct binary choice to the + # correct choices list as a key. The value is not important. + self.correct_choices[parent_name] = {'answer': ''} + # Add the name of the parent to the list of correct answers + self.answer_values[self.answer_id].append(parent_name) + answer_list = [] + # Loop over elements inside of the correct choices + for child in node: + answer = child.get('answer', None) + if not answer: + # If the question creator does not specify an answer for a + # inside of a correct choice, raise an error + raise LoncapaProblemError( + "Answer not provided for numtolerance_input" + ) + # Contextualize the answer to allow script generated answers. + answer = contextualize_text(answer, context) + input_name = child.get('name') + # Contextualize the tolerance to value. + tolerance = contextualize_text( + child.get('tolerance', '0'), + context + ) + # Add the answer and tolerance information for the current + # numtolerance_input to `correct_inputs` + self.correct_inputs[input_name] = { + 'answer': answer, + 'tolerance': tolerance + } + # Add the correct answer for this input to the list for show + answer_list.append(answer) + # Turn the list of numtolerance_input answers into a comma separated string. + self.answer_values[parent_name] = ', '.join(answer_list) + # Turn correct choices into a set. Allows faster grading. + self.correct_choices = set(self.correct_choices.keys()) + + def assign_choice_names(self): + """ + Initialize name attributes in and tags + for this response. + + Example: + Assuming for simplicity that `self.answer_id` = '1_2_1' + + Before the function is called `self.xml` = + + + The number + + Is the mean of the list. + + + False demonstration choice + + + + After this is called the choices and numtolerance_inputs will have a name + attribute initialized and self.xml will be: + + + + The number + + Is the mean of the list. + + + # "bc" is appended at the end to indicate that this is a + # binary choice as opposed to a numtolerance_input, this convention + # is used when grading the problem + choice.set( + "name", + self.answer_id + "_choiceinput_" + str(index) + "bc" + ) + # Set Name attributes for elements + # Look for all inside this choice. + numtolerance_inputs = choice.findall('numtolerance_input') + # Look for all inside this choice + decoys = choice.findall('decoy_input') + # would only be used in choices which do not contain + # + inputs = numtolerance_inputs if numtolerance_inputs else decoys + # Give each input inside of the choice a name combining + # The ordinality of the choice, and the ordinality of the input + # within that choice e.g. 1_2_1_choiceinput_0_numtolerance_input_1 + for ind, child in enumerate(inputs): + child.set( + "name", + self.answer_id + "_choiceinput_" + str(index) + + "_numtolerance_input_" + str(ind) + ) + + def get_score(self, student_answers): + """ + Returns a `CorrectMap` showing whether `student_answers` are correct. + + `student_answers` contains keys for binary inputs(radiobutton, + checkbox) and numerical inputs. Keys ending with 'bc' are binary + choice inputs otherwise they are text fields. + + This method first separates the two + types of answers and then grades them in separate methods. + + The student is only correct if they have both the binary inputs and + numerical inputs correct. + """ + answer_dict = student_answers.get(self.answer_id, "") + binary_choices, numtolerance_inputs = self._split_answers_dict(answer_dict) + # Check the binary choices first. + choices_correct = self._check_student_choices(binary_choices) + inputs_correct = True + inputs_correct = self._check_student_inputs(numtolerance_inputs) + # Only return correct if the student got both the binary + # and numtolerance_inputs are correct + correct = choices_correct and inputs_correct + + return CorrectMap( + self.answer_id, + 'correct' if correct else 'incorrect' + ) + + def get_answers(self): + """ + Returns a dictionary containing the names of binary choices as keys + and a string of answers to any numtolerance_inputs which they may have + e.g {choice_1bc : "answer1, answer2", choice_2bc : ""} + """ + return self.answer_values + + def _split_answers_dict(self, a_dict): + """ + Returns two dicts: + `binary_choices` : dictionary {input_name: input_value} for + the binary choices which the student selected. + and + `numtolerance_choices` : a dictionary {input_name: input_value} + for the numtolerance_inputs inside of choices which were selected + + Determines if an input is inside of a binary input by looking at + the beginning of it's name. + + For example. If a binary_choice was named '1_2_1_choiceinput_0bc' + All of the numtolerance_inputs in it would have an idea that begins + with '1_2_1_choice_input_0_numtolerance_input' + + Splits the name of the numtolerance_input at the occurence of + '_numtolerance_input_' and appends 'bc' to the end to get the name + of the choice it is contained in. + + Example: + `a_dict` = { + '1_2_1_choiceinput_0bc': '1_2_1_choiceinput_0bc', + '1_2_1_choiceinput_0_numtolerance_input_0': '1', + '1_2_1_choiceinput_0_numtolerance_input_1': '2' + '1_2_1_choiceinput_1_numtolerance_input_0': '3' + } + + In this case, the binary choice is '1_2_1_choiceinput_0bc', and + the numtolerance_inputs associated with it are + '1_2_1_choiceinput_0_numtolerance_input_0', and + '1_2_1_choiceinput_0_numtolerance_input_1'. + + so the two return dictionaries would be + `binary_choices` = {'1_2_1_choiceinput_0bc': '1_2_1_choiceinput_0bc'} + and + `numtolerance_choices` ={ + '1_2_1_choiceinput_0_numtolerance_input_0': '1', + '1_2_1_choiceinput_0_numtolerance_input_1': '2' + } + + The entry '1_2_1_choiceinput_1_numtolerance_input_0': '3' is discarded + because it was not inside of a selected binary choice, and no validation + should be performed on numtolerance_inputs inside of non-selected choices. + """ + + # Initialize the two dictionaries that are returned + numtolerance_choices = {} + binary_choices = {} + + # `selected_choices` is a list of binary choices which were "checked/selected" + # when the student submitted the problem. + # Keys in a_dict ending with 'bc' refer to binary choices. + selected_choices = [key for key in a_dict if key.endswith("bc")] + for key in selected_choices: + binary_choices[key] = a_dict[key] + + # Convert the name of a numtolerance_input into the name of the binary + # choice that it is contained within, and append it to the list if + # the numtolerance_input's parent binary_choice is contained in + # `selected_choices`. + selected_numtolerance_inputs = [ + key for key in a_dict if key.partition("_numtolerance_input_")[0] + "bc" + in selected_choices + ] + + for key in selected_numtolerance_inputs: + numtolerance_choices[key] = a_dict[key] + + return (binary_choices, numtolerance_choices) + + def _check_student_choices(self, choices): + """ + Compares student submitted checkbox/radiobutton answers against + the correct answers. Returns True or False. + + True if all of the correct choices are selected and no incorrect + choices are selected. + """ + student_choices = set(choices) + required_selected = len(self.correct_choices - student_choices) == 0 + no_extra_selected = len(student_choices - self.correct_choices) == 0 + correct = required_selected and no_extra_selected + return correct + + def _check_student_inputs(self, numtolerance_inputs): + """ + Compares student submitted numerical answers against the correct + answers and tolerances. + + `numtolerance_inputs` is a dictionary {answer_name : answer_value} + + Performs numerical validation by means of calling + `compare_with_tolerance()` on all of `numtolerance_inputs` + + Performs a call to `compare_with_tolerance` even on values for + decoy_inputs. This is used to validate their numericality and + raise an error if the student entered a non numerical expression. + + Returns True if and only if all student inputs are correct. + """ + + inputs_correct = True + for answer_name, answer_value in numtolerance_inputs.iteritems(): + # If `self.corrrect_inputs` does not contain an entry for + # `answer_name`, this means that answer_name is a decoy + # input's value, and validation of its numericality is the + # only thing of interest from the later call to + # `compare_with_tolerance`. + params = self.correct_inputs.get(answer_name, {'answer': 0}) + + correct_ans = params['answer'] + # Set the tolerance to '0' if it was not specified in the xml + tolerance = params.get('tolerance', '0') + # Make sure that the staff answer is a valid number + try: + correct_ans = complex(correct_ans) + except ValueError: + log.debug( + "Content error--answer" + + "'{0}' is not a valid complex number".format(correct_ans) + ) + raise StudentInputError( + "The Staff answer could not be interpreted as a number." + ) + # Compare the student answer to the staff answer/ or to 0 + # if all that is important is verifying numericality + try: + partial_correct = compare_with_tolerance( + evaluator(dict(), dict(), answer_value), + correct_ans, + tolerance + ) + except: + # Use the traceback-preserving version of re-raising with a + # different type + _, _, trace = sys.exc_info() + + raise StudentInputError( + "Could not interpret '{0}' as a number{1}".format( + cgi.escape(answer_value), + trace + ) + ) + # Ignore the results of the comparisons which were just for + # Numerical Validation. + if answer_name in self.correct_inputs and not partial_correct: + # If any input is not correct, set the return value to False + inputs_correct = False + return inputs_correct + #----------------------------------------------------------------------------- # TEMPORARY: List of all response subclasses @@ -2116,4 +2445,5 @@ __all__ = [CodeResponse, MultipleChoiceResponse, TrueFalseResponse, JavascriptResponse, - AnnotationResponse] + AnnotationResponse, + ChoiceTextResponse] diff --git a/common/lib/capa/capa/templates/choicetext.html b/common/lib/capa/capa/templates/choicetext.html new file mode 100644 index 0000000000..5f587e214a --- /dev/null +++ b/common/lib/capa/capa/templates/choicetext.html @@ -0,0 +1,76 @@ +<% element_checked = False %> +% for choice_id, _ in choices: + <%choice_id = choice_id %> + %if choice_id in value: + <% element_checked = True %> + %endif +%endfor +
+
+
+
+ % if input_type == 'checkbox' or not element_checked: + % if status == 'unsubmitted': + + % elif status == 'correct': + + % elif status == 'incorrect': + + % elif status == 'incomplete': + + % endif + % endif +
+ +
+ % for choice_id, choice_description in choices: + <%choice_id= choice_id %> +
+ % if correctness: + class="choicetextgroup_${correctness}" + % endif + % endif + > + + + % for content_node in choice_description: + % if content_node['type'] == 'text': + + ${content_node['contents']} + + % else: + <% my_id = content_node.get('contents','') %> + <% my_val = value.get(my_id,'') %> + + %endif + + ${content_node['tail_text']} + + + % endfor +

+
+ + % endfor + +
+ + % if show_correctness == "never" and (value or status not in ['unsubmitted']): +
${submitted_message}
+ %endif + +
diff --git a/common/lib/capa/capa/tests/response_xml_factory.py b/common/lib/capa/capa/tests/response_xml_factory.py index 35c12800ae..38c4a00caa 100644 --- a/common/lib/capa/capa/tests/response_xml_factory.py +++ b/common/lib/capa/capa/tests/response_xml_factory.py @@ -779,3 +779,109 @@ class SymbolicResponseXMLFactory(ResponseXMLFactory): def create_input_element(self, **kwargs): return ResponseXMLFactory.textline_input_xml(**kwargs) + + +class ChoiceTextResponseXMLFactory(ResponseXMLFactory): + """ Factory for producing xml """ + + def create_response_element(self, **kwargs): + """ Create a element """ + return etree.Element("choicetextresponse") + + def create_input_element(self, **kwargs): + """ Create a element. + choices can be specified in the following format: + [("true", [{"answer": "5", "tolerance": 0}]), + ("false", [{"answer": "5", "tolerance": 0}]) + ] + + This indicates that the first checkbox/radio is correct and it + contains a numtolerance_input with an answer of 5 and a tolerance of 0 + + It also indicates that the second has a second incorrect radiobutton + or checkbox with a numtolerance_input. + """ + choices = kwargs.get('choices', [("true", {})]) + choice_inputs = [] + # Ensure that the first element of choices is an ordered + # collection. It will start as a list, a tuple, or not a Container. + if type(choices[0]) not in [list, tuple]: + choices = [choices] + + for choice in choices: + correctness, answers = choice + numtolerance_inputs = [] + # If the current `choice` contains any("answer": number) + # elements, turn those into numtolerance_inputs + if answers: + # `answers` will be a list or tuple of answers or a single + # answer, representing the answers for numtolerance_inputs + # inside of this specific choice. + + # Make sure that `answers` is an ordered collection for + # convenience. + if type(answers) not in [list, tuple]: + answers = [answers] + + numtolerance_inputs = [ + self._create_numtolerance_input_element(answer) + for answer in answers + ] + + choice_inputs.append( + self._create_choice_element( + correctness=correctness, + inputs=numtolerance_inputs + ) + ) + # Default type is 'radiotextgroup' + input_type = kwargs.get('type', 'radiotextgroup') + input_element = etree.Element(input_type) + + for ind, choice in enumerate(choice_inputs): + # Give each choice text equal to it's position(0,1,2...) + choice.text = "choice_{0}".format(ind) + input_element.append(choice) + + return input_element + + def _create_choice_element(self, **kwargs): + """ + Creates a choice element for a choictextproblem. + Defaults to a correct choice with no numtolerance_input + """ + text = kwargs.get('text', '') + correct = kwargs.get('correctness', "true") + inputs = kwargs.get('inputs', []) + choice_element = etree.Element("choice") + choice_element.set("correct", correct) + choice_element.text = text + for inp in inputs: + # Add all of the inputs as children of this element + choice_element.append(inp) + + return choice_element + + def _create_numtolerance_input_element(self, params): + """ + Creates a element with optionally + specified tolerance and answer. + """ + answer = params['answer'] if 'answer' in params else None + # If there is not an answer specified, Then create a + # otherwise create a and set its tolerance + # and answer attributes. + if answer: + text_input = etree.Element("numtolerance_input") + text_input.set('answer', answer) + # If tolerance was specified, was specified use it, otherwise + # Set the tolerance to "0" + text_input.set( + 'tolerance', + params['tolerance'] if 'tolerance' in params else "0" + ) + + else: + text_input = etree.Element("decoy_input") + + return text_input diff --git a/common/lib/capa/capa/tests/test_input_templates.py b/common/lib/capa/capa/tests/test_input_templates.py index 00a9b3f6c2..dcab279614 100644 --- a/common/lib/capa/capa/tests/test_input_templates.py +++ b/common/lib/capa/capa/tests/test_input_templates.py @@ -714,3 +714,170 @@ class DragAndDropTemplateTest(TemplateTestCase): # escaping the HTML. We should be able to traverse the XML tree. xpath = "//div[@class='drag_and_drop_problem_json']/p/b" self.assert_has_text(xml, xpath, 'HTML') + + +class ChoiceTextGroupTemplateTest(TemplateTestCase): + """Test mako template for `` input""" + + TEMPLATE_NAME = 'choicetext.html' + VALUE_DICT = {'1_choiceinput_0bc': '1_choiceinput_0bc', '1_choiceinput_0_textinput_0': '0', + '1_choiceinput_1_textinput_0': '0'} + EMPTY_DICT = {'1_choiceinput_0_textinput_0': '', + '1_choiceinput_1_textinput_0': ''} + BOTH_CHOICE_CHECKBOX = {'1_choiceinput_0bc': 'choiceinput_0', + '1_choiceinput_1bc': 'choiceinput_1', + '1_choiceinput_0_textinput_0': '0', + '1_choiceinput_1_textinput_0': '0'} + WRONG_CHOICE_CHECKBOX = {'1_choiceinput_1bc': 'choiceinput_1', + '1_choiceinput_0_textinput_0': '0', + '1_choiceinput_1_textinput_0': '0'} + + def setUp(self): + choices = [('1_choiceinput_0bc', + [{'tail_text': '', 'type': 'text', 'value': '', 'contents': ''}, + {'tail_text': '', 'type': 'textinput', 'value': '', 'contents': 'choiceinput_0_textinput_0'}]), + ('1_choiceinput_1bc', [{'tail_text': '', 'type': 'text', 'value': '', 'contents': ''}, + {'tail_text': '', 'type': 'textinput', 'value': '', 'contents': 'choiceinput_1_textinput_0'}])] + self.context = {'id': '1', + 'choices': choices, + 'status': 'correct', + 'input_type': 'radio', + 'value': self.VALUE_DICT} + + super(ChoiceTextGroupTemplateTest, self).setUp() + + def test_grouping_tag(self): + """ + Tests whether we are using a section or a label to wrap choice elements. + Section is used for checkbox, so inputting text does not deselect + """ + input_tags = ('radio', 'checkbox') + self.context['status'] = 'correct' + xpath = "//section[@id='forinput1_choiceinput_0bc']" + + self.context['value'] = {} + for input_type in input_tags: + self.context['input_type'] = input_type + xml = self.render_to_xml(self.context) + self.assert_has_xpath(xml, xpath, self.context) + + def test_problem_marked_correct(self): + """Test conditions under which the entire problem + (not a particular option) is marked correct""" + + self.context['status'] = 'correct' + self.context['input_type'] = 'checkbox' + self.context['value'] = self.VALUE_DICT + + # Should mark the entire problem correct + xml = self.render_to_xml(self.context) + xpath = "//div[@class='indicator_container']/span[@class='correct']" + self.assert_has_xpath(xml, xpath, self.context) + + # Should NOT mark individual options + self.assert_no_xpath(xml, "//label[@class='choicetextgroup_incorrect']", + self.context) + + self.assert_no_xpath(xml, "//label[@class='choicetextgroup_correct']", + self.context) + + def test_problem_marked_incorrect(self): + """Test all conditions under which the entire problem + (not a particular option) is marked incorrect""" + grouping_tags = {'radio': 'label', 'checkbox': 'section'} + conditions = [ + {'status': 'incorrect', 'input_type': 'radio', 'value': {}}, + {'status': 'incorrect', 'input_type': 'checkbox', 'value': self.WRONG_CHOICE_CHECKBOX}, + {'status': 'incorrect', 'input_type': 'checkbox', 'value': self.BOTH_CHOICE_CHECKBOX}, + {'status': 'incorrect', 'input_type': 'checkbox', 'value': self.VALUE_DICT}, + {'status': 'incomplete', 'input_type': 'radio', 'value': {}}, + {'status': 'incomplete', 'input_type': 'checkbox', 'value': self.WRONG_CHOICE_CHECKBOX}, + {'status': 'incomplete', 'input_type': 'checkbox', 'value': self.BOTH_CHOICE_CHECKBOX}, + {'status': 'incomplete', 'input_type': 'checkbox', 'value': self.VALUE_DICT}] + + for test_conditions in conditions: + self.context.update(test_conditions) + xml = self.render_to_xml(self.context) + xpath = "//div[@class='indicator_container']/span[@class='incorrect']" + self.assert_has_xpath(xml, xpath, self.context) + + # Should NOT mark individual options + grouping_tag = grouping_tags[test_conditions['input_type']] + self.assert_no_xpath(xml, + "//{0}[@class='choicetextgroup_incorrect']".format(grouping_tag), + self.context) + + self.assert_no_xpath(xml, + "//{0}[@class='choicetextgroup_correct']".format(grouping_tag), + self.context) + + def test_problem_marked_unsubmitted(self): + """Test all conditions under which the entire problem + (not a particular option) is marked unanswered""" + grouping_tags = {'radio': 'label', 'checkbox': 'section'} + + conditions = [ + {'status': 'unsubmitted', 'input_type': 'radio', 'value': {}}, + {'status': 'unsubmitted', 'input_type': 'radio', 'value': self.EMPTY_DICT}, + {'status': 'unsubmitted', 'input_type': 'checkbox', 'value': {}}, + {'status': 'unsubmitted', 'input_type': 'checkbox', 'value': self.EMPTY_DICT}, + {'status': 'unsubmitted', 'input_type': 'checkbox', 'value': self.VALUE_DICT}, + {'status': 'unsubmitted', 'input_type': 'checkbox', 'value': self.BOTH_CHOICE_CHECKBOX}] + + self.context['status'] = 'unanswered' + + for test_conditions in conditions: + self.context.update(test_conditions) + xml = self.render_to_xml(self.context) + xpath = "//div[@class='indicator_container']/span[@class='unanswered']" + self.assert_has_xpath(xml, xpath, self.context) + + # Should NOT mark individual options + grouping_tag = grouping_tags[test_conditions['input_type']] + self.assert_no_xpath(xml, + "//{0}[@class='choicetextgroup_incorrect']".format(grouping_tag), + self.context) + + self.assert_no_xpath(xml, + "//{0}[@class='choicetextgroup_correct']".format(grouping_tag), + self.context) + + def test_option_marked_correct(self): + """Test conditions under which a particular option + (not the entire problem) is marked correct.""" + + conditions = [ + {'input_type': 'radio', 'value': self.VALUE_DICT}] + + self.context['status'] = 'correct' + + for test_conditions in conditions: + self.context.update(test_conditions) + xml = self.render_to_xml(self.context) + xpath = "//section[@id='forinput1_choiceinput_0bc' and\ + @class='choicetextgroup_correct']" + self.assert_has_xpath(xml, xpath, self.context) + + # Should NOT mark the whole problem + xpath = "//div[@class='indicator_container']/span" + self.assert_no_xpath(xml, xpath, self.context) + + def test_option_marked_incorrect(self): + """Test conditions under which a particular option + (not the entire problem) is marked incorrect.""" + + conditions = [ + {'input_type': 'radio', 'value': self.VALUE_DICT}] + + self.context['status'] = 'incorrect' + + for test_conditions in conditions: + self.context.update(test_conditions) + xml = self.render_to_xml(self.context) + xpath = "//section[@id='forinput1_choiceinput_0bc' and\ + @class='choicetextgroup_incorrect']" + self.assert_has_xpath(xml, xpath, self.context) + + # Should NOT mark the whole problem + xpath = "//div[@class='indicator_container']/span" + self.assert_no_xpath(xml, xpath, self.context) diff --git a/common/lib/capa/capa/tests/test_inputtypes.py b/common/lib/capa/capa/tests/test_inputtypes.py index 1b52d41890..48e34dea09 100644 --- a/common/lib/capa/capa/tests/test_inputtypes.py +++ b/common/lib/capa/capa/tests/test_inputtypes.py @@ -860,3 +860,94 @@ class AnnotationInputTest(unittest.TestCase): self.maxDiff = None self.assertDictEqual(context, expected) + + +class TestChoiceText(unittest.TestCase): + """ + Tests for checkboxtextgroup inputs + """ + @staticmethod + def build_choice_element(node_type, contents, tail_text, value): + """ + Builds a content node for a choice. + """ + # When xml is being parsed numtolerance_input and decoy_input tags map to textinput type + # in order to provide the template with correct rendering information. + if node_type in ('numtolerance_input', 'decoy_input'): + node_type = 'textinput' + choice = {'type': node_type, 'contents': contents, 'tail_text': tail_text, 'value': value} + return choice + + def check_group(self, tag, choice_tag, expected_input_type): + """ + Build a radio or checkbox group, parse it and check the resuls against the + expected output. + + `tag` should be 'checkboxtextgroup' or 'radiotextgroup' + `choice_tag` is either 'choice' for proper xml, or any other value to trigger an error. + `expected_input_type` is either 'radio' or 'checkbox'. + """ + xml_str = """ + <{tag}> + <{choice_tag} correct="false" name="choiceinput_0">this isfalse + Is a number! + + """.format(tag=tag, choice_tag=choice_tag) + element = etree.fromstring(xml_str) + state = { + 'value': '{}', + 'id': 'choicetext_input', + 'status': 'answered' + } + + first_input = self.build_choice_element('numtolerance_input', 'choiceinput_0_textinput_0', 'false', '') + second_input = self.build_choice_element('decoy_input', 'choiceinput_1_textinput_0', '', '') + first_choice_content = self.build_choice_element('text', 'this is', '', '') + second_choice_content = self.build_choice_element('text', 'Is a number', '', '') + second_choice_text = self.build_choice_element('text', "!", '', '') + + choices = [ + ('choiceinput_0', [first_choice_content, first_input]), + ('choiceinput_1', [second_choice_content, second_input, second_choice_text]) + ] + + expected = { + 'msg': '', + 'input_type': expected_input_type, + 'choices': choices, + 'show_correctness': 'always', + 'submitted_message': 'Answer received.' + } + expected.update(state) + the_input = lookup_tag(tag)(test_system(), element, state) + context = the_input._get_render_context() + self.assertEqual(context, expected) + + def test_radiotextgroup(self): + """ + Test that a properly formatted radiotextgroup problem generates + expected ouputs + """ + self.check_group('radiotextgroup', 'choice', 'radio') + + def test_checkboxtextgroup(self): + """ + Test that a properly formatted checkboxtextgroup problem generates + expected ouput + """ + self.check_group('checkboxtextgroup', 'choice', 'checkbox') + + def test_invalid_tag(self): + """ + Test to ensure that an unrecognized inputtype tag causes an error + """ + with self.assertRaises(Exception): + self.check_group('invalid', 'choice', 'checkbox') + + def test_invalid_input_tag(self): + """ + Test to ensure having a tag other than inside of + a checkbox or radiotextgroup problem raises an error. + """ + with self.assertRaisesRegexp(Exception, "Error in xml"): + self.check_group('checkboxtextgroup', 'invalid', 'checkbox') diff --git a/common/lib/capa/capa/tests/test_responsetypes.py b/common/lib/capa/capa/tests/test_responsetypes.py index 594e2ca629..4353f5615b 100644 --- a/common/lib/capa/capa/tests/test_responsetypes.py +++ b/common/lib/capa/capa/tests/test_responsetypes.py @@ -1429,3 +1429,286 @@ class AnnotationResponseTest(ResponseTest): msg="%s should be marked %s" % (answer_id, expected_correctness)) self.assertEqual(expected_points, actual_points, msg="%s should have %d points" % (answer_id, expected_points)) + + +class ChoiceTextResponseTest(ResponseTest): + + from response_xml_factory import ChoiceTextResponseXMLFactory + xml_factory_class = ChoiceTextResponseXMLFactory + + one_choice_one_input = lambda itype, inst: inst._make_problem( + ("true", {"answer": "123", "tolerance": "1"}), + itype + ) + + one_choice_two_inputs = lambda itype, inst: inst._make_problem( + [("true", ({"answer": "123", "tolerance": "1"}, + {"answer": "456", "tolerance": "10"})) + ], + itype + ) + + one_input_script = lambda itype, inst: inst._make_problem( + ("true", {"answer": "$computed_response", "tolerance": "1"}), + itype, + "computed_response = math.sqrt(4)" + ) + + one_choice_no_input = lambda itype, inst: inst._make_problem( + ("true", {}), + itype + ) + + two_choices_no_inputs = lambda itype, inst: inst._make_problem( + [("false", {}), ("true", {})], + itype + ) + + two_choices_one_input_1 = lambda itype, inst: inst._make_problem( + [("false", {}), ("true", {"answer": "123", "tolerance": "0"})], + itype + ) + + two_choices_one_input_2 = lambda itype, inst: inst._make_problem( + [("true", {}), ("false", {"answer": "123", "tolerance": "0"})], + itype + ) + + two_choices_two_inputs = lambda itype, inst: inst._make_problem( + [("true", {"answer": "123", "tolerance": "0"}), + ("false", {"answer": "999", "tolerance": "0"})], + itype + ) + + TEST_INPUTS = { + "1_choice_0_input_correct": [(True, [])], + "1_choice_0_input_incorrect": [(False, [])], + "1_choice_0_input_invalid_choice": [(False, []), (True, [])], + "1_choice_1_input_correct": [(True, ["123"])], + "1_input_script_correct": [(True, ["2"])], + "1_input_script_incorrect": [(True, ["3.25"])], + "1_choice_2_inputs_correct": [(True, ["123", "456"])], + "1_choice_2_inputs_tolerance": [(True, ["123 + .5", "456 + 9"])], + "1_choice_2_inputs_1_wrong": [(True, ["0", "456"])], + "1_choice_2_inputs_both_wrong": [(True, ["0", "0"])], + "1_choice_2_inputs_inputs_blank": [(True, ["", ""])], + "1_choice_2_inputs_empty": [(False, [])], + "1_choice_2_inputs_fail_tolerance": [(True, ["123 + 1.5", "456 + 9"])], + "1_choice_1_input_within_tolerance": [(True, ["122.5"])], + "1_choice_1_input_answer_incorrect": [(True, ["345"])], + "1_choice_1_input_choice_incorrect": [(False, ["123"])], + "2_choices_0_inputs_correct": [(False, []), (True, [])], + "2_choices_0_inputs_incorrect": [(True, []), (False, [])], + "2_choices_0_inputs_blank": [(False, []), (False, [])], + "2_choices_1_input_1_correct": [(False, []), (True, ["123"])], + "2_choices_1_input_1_incorrect": [(True, []), (False, ["123"])], + "2_choices_1_input_input_wrong": [(False, []), (True, ["321"])], + "2_choices_1_input_1_blank": [(False, []), (False, [])], + "2_choices_1_input_2_correct": [(True, []), (False, ["123"])], + "2_choices_1_input_2_incorrect": [(False, []), (True, ["123"])], + "2_choices_2_inputs_correct": [(True, ["123"]), (False, [])], + "2_choices_2_inputs_wrong_choice": [(False, ["123"]), (True, [])], + "2_choices_2_inputs_wrong_input": [(True, ["321"]), (False, [])] + } + + TEST_SCENARIOS = { + "1_choice_0_input_correct": ("1_choice_0_input", "correct"), + "1_choice_0_input_incorrect": ("1_choice_0_input", "incorrect"), + "1_choice_0_input_invalid_choice": ("1_choice_0_input", "incorrect"), + "1_input_script_correct": ("1_input_script", "correct"), + "1_input_script_incorrect": ("1_input_script", "incorrect"), + "1_choice_2_inputs_correct": ("1_choice_2_inputs", "correct"), + "1_choice_2_inputs_tolerance": ("1_choice_2_inputs", "correct"), + "1_choice_2_inputs_1_wrong": ("1_choice_2_inputs", "incorrect"), + "1_choice_2_inputs_both_wrong": ("1_choice_2_inputs", "incorrect"), + "1_choice_2_inputs_inputs_blank": ("1_choice_2_inputs", "incorrect"), + "1_choice_2_inputs_empty": ("1_choice_2_inputs", "incorrect"), + "1_choice_2_inputs_fail_tolerance": ("1_choice_2_inputs", "incorrect"), + "1_choice_1_input_correct": ("1_choice_1_input", "correct"), + "1_choice_1_input_within_tolerance": ("1_choice_1_input", "correct"), + "1_choice_1_input_answer_incorrect": ("1_choice_1_input", "incorrect"), + "1_choice_1_input_choice_incorrect": ("1_choice_1_input", "incorrect"), + "2_choices_0_inputs_correct": ("2_choices_0_inputs", "correct"), + "2_choices_0_inputs_incorrect": ("2_choices_0_inputs", "incorrect"), + "2_choices_0_inputs_blank": ("2_choices_0_inputs", "incorrect"), + "2_choices_1_input_1_correct": ("2_choices_1_input_1", "correct"), + "2_choices_1_input_1_incorrect": ("2_choices_1_input_1", "incorrect"), + "2_choices_1_input_input_wrong": ("2_choices_1_input_1", "incorrect"), + "2_choices_1_input_1_blank": ("2_choices_1_input_1", "incorrect"), + "2_choices_1_input_2_correct": ("2_choices_1_input_2", "correct"), + "2_choices_1_input_2_incorrect": ("2_choices_1_input_2", "incorrect"), + "2_choices_2_inputs_correct": ("2_choices_2_inputs", "correct"), + "2_choices_2_inputs_wrong_choice": ("2_choices_2_inputs", "incorrect"), + "2_choices_2_inputs_wrong_input": ("2_choices_2_inputs", "incorrect") + } + + TEST_PROBLEMS = { + "1_choice_0_input": one_choice_no_input, + "1_choice_1_input": one_choice_one_input, + "1_input_script": one_input_script, + "1_choice_2_inputs": one_choice_two_inputs, + "2_choices_0_inputs": two_choices_no_inputs, + "2_choices_1_input_1": two_choices_one_input_1, + "2_choices_1_input_2": two_choices_one_input_2, + "2_choices_2_inputs": two_choices_two_inputs + } + + def _make_problem(self, choices, in_type='radiotextgroup', script=''): + """ + Convenience method to fill in default values for script and + type if needed, then call self.build_problem + """ + return self.build_problem( + choices=choices, + type=in_type, + script=script + ) + + def _make_answer_dict(self, choice_list): + """ + Convenience method to make generation of answers less tedious, + pass in an iterable argument with elements of the form: [bool, [ans,]] + Will generate an answer dict for those options + """ + + answer_dict = {} + for index, choice_answers_pair in enumerate(choice_list): + # Choice is whether this choice is correct + # Answers contains a list of answers to textinpts for the choice + choice, answers = choice_answers_pair + + if choice: + # Radio/Checkbox inputs in choicetext problems follow + # a naming convention that gives them names ending with "bc" + choice_id = "1_2_1_choiceinput_{index}bc".format(index=index) + choice_value = "choiceinput_{index}".format(index=index) + answer_dict[choice_id] = choice_value + # Build the names for the numtolerance_inputs and add their answers + # to `answer_dict`. + for ind, answer in enumerate(answers): + # In `answer_id` `index` represents the ordinality of the + # choice and `ind` represents the ordinality of the + # numtolerance_input inside the parent choice. + answer_id = "1_2_1_choiceinput_{index}_numtolerance_input_{ind}".format( + index=index, + ind=ind + ) + answer_dict[answer_id] = answer + + return answer_dict + + def test_invalid_xml(self): + with self.assertRaises(Exception): + self.build_problem(type="invalidtextgroup") + + def test_valid_xml(self): + self.build_problem() + self.assertTrue(True) + + def test_interpret_error(self): + one_choice_one_input = lambda itype: self._make_problem( + ("true", {"answer": "123", "tolerance": "1"}), + itype + ) + + with self.assertRaisesRegexp(StudentInputError, "Could not interpret"): + self.assert_grade( + one_choice_one_input('radiotextgroup'), + self._make_answer_dict([(True, ["Platypus"])]), + "correct" + ) + + def test_staff_answer_error(self): + broken_problem = self._make_problem( + [("true", {"answer": "Platypus", "tolerance": "0"}), + ("true", {"answer": "edX", "tolerance": "0"}) + ], + "checkboxtextgroup" + ) + with self.assertRaisesRegexp( + StudentInputError, + "The Staff answer could not be interpreted as a number." + ): + self.assert_grade( + broken_problem, + self._make_answer_dict( + [(True, ["1"]), (True, ["1"])] + ), + "correct" + ) + + def test_radio_grades(self): + + for name, inputs in self.TEST_INPUTS.iteritems(): + submission = self._make_answer_dict(inputs) + problem_name, correctness = self.TEST_SCENARIOS[name] + problem = self.TEST_PROBLEMS[problem_name] + + self.assert_grade( + problem('radiotextgroup', self), + submission, + correctness, + msg="{0} should be {1}".format( + name, + correctness + ) + ) + + def test_checkbox_grades(self): + scenarios = { + "2_choices_correct": ("checkbox_two_choices", "correct"), + "2_choices_incorrect": ("checkbox_two_choices", "incorrect"), + "2_choices_2_inputs_correct": ( + "checkbox_2_choices_2_inputs", + "correct" + ), + + "2_choices_2_inputs_missing_choice": ( + "checkbox_2_choices_2_inputs", + "incorrect" + ), + + "2_choices_2_inputs_wrong_input": ( + "checkbox_2_choices_2_inputs", + "incorrect" + ) + } + inputs = { + "2_choices_correct": [(True, []), (True, [])], + "2_choices_incorrect": [(True, []), (False, [])], + "2_choices_2_inputs_correct": [(True, ["123"]), (True, ["456"])], + "2_choices_2_inputs_missing_choice": [ + (True, ["123"]), (False, ["456"]) + ], + "2_choices_2_inputs_wrong_input": [ + (True, ["123"]), (True, ["654"]) + ] + } + + checkbox_two_choices = self._make_problem( + [("true", {}), ("true", {})], "checkboxtextgroup" + ) + checkbox_two_choices_two_inputs = self._make_problem( + [("true", {"answer": "123", "tolerance": "0"}), + ("true", {"answer": "456", "tolerance": "0"}) + ], + "checkboxtextgroup" + ) + + problems = { + "checkbox_two_choices": checkbox_two_choices, + "checkbox_2_choices_2_inputs": checkbox_two_choices_two_inputs + } + problems.update(self.TEST_PROBLEMS) + + for name, inputs in inputs.iteritems(): + submission = self._make_answer_dict(inputs) + problem_name, correctness = scenarios[name] + problem = problems[problem_name] + + self.assert_grade( + problem, + submission, + correctness, + msg="{0} should be {1}".format(name, correctness) + ) diff --git a/common/lib/xmodule/xmodule/capa_module.py b/common/lib/xmodule/xmodule/capa_module.py index 51c1a396c3..d2a20675a5 100644 --- a/common/lib/xmodule/xmodule/capa_module.py +++ b/common/lib/xmodule/xmodule/capa_module.py @@ -776,6 +776,13 @@ class CapaModule(CapaFields, XModule): then the output dict would contain {'1': ['test'] } (the value is a list). + Some other inputs such as ChoiceTextInput expect a dict of values in the returned + dict If the key ends with '{}' then we will assume that the value is a json + encoded dict and deserialize it. + For example, if the `data` dict contains {'input_1{}': '{"1_2_1": 1}'} + then the output dict would contain {'1': {"1_2_1": 1} } + (the value is a dictionary) + Raises an exception if: -A key in the `data` dictionary does not contain at least one underscore @@ -802,11 +809,21 @@ class CapaModule(CapaFields, XModule): # the same form input (e.g. checkbox inputs). The convention is that # if the name ends with '[]' (which looks like an array), then the # answer will be an array. + # if the name ends with '{}' (Which looks like a dict), + # then the answer will be a dict is_list_key = name.endswith('[]') - name = name[:-2] if is_list_key else name + is_dict_key = name.endswith('{}') + name = name[:-2] if is_list_key or is_dict_key else name if is_list_key: val = data.getlist(key) + elif is_dict_key: + try: + val = json.loads(data[key]) + except(KeyError, ValueError): + # Send this information along to be reported by + # The grading method + val = {"error": "error"} else: val = data[key] diff --git a/common/lib/xmodule/xmodule/css/capa/display.scss b/common/lib/xmodule/xmodule/css/capa/display.scss index c326c79b76..9e6826242f 100644 --- a/common/lib/xmodule/xmodule/css/capa/display.scss +++ b/common/lib/xmodule/xmodule/css/capa/display.scss @@ -929,4 +929,32 @@ section.problem { } } } + .choicetextgroup{ + input[type="text"]{ + margin-bottom: 0.5em; + } + @extend .choicegroup; + + label.choicetextgroup_correct, section.choicetextgroup_correct{ + @extend label.choicegroup_correct; + + input[type="text"] { + border-color: green; + } + } + + label.choicetextgroup_incorrect, section.choicetextgroup_incorrect{ + @extend label.choicegroup_incorrect; + } + + label.choicetextgroup_show_correct, section.choicetextgroup_show_correct{ + &:after{ + content: url('../images/correct-icon.png'); + margin-left:15px; + } + } + span.mock_label{ + cursor : default; + } + } } diff --git a/common/lib/xmodule/xmodule/js/spec/capa/display_spec.coffee b/common/lib/xmodule/xmodule/js/spec/capa/display_spec.coffee index 1efaa6c852..bca89b0dea 100644 --- a/common/lib/xmodule/xmodule/js/spec/capa/display_spec.coffee +++ b/common/lib/xmodule/xmodule/js/spec/capa/display_spec.coffee @@ -223,6 +223,58 @@ describe 'Problem', -> expect($('label[for="input_1_1_3"]')).toHaveAttr 'correct_answer', 'true' expect($('label[for="input_1_2_1"]')).not.toHaveAttr 'correct_answer', 'true' + describe 'radio text question', -> + radio_text_xml=''' +
+

+ +
+
+ +
+
+
+ + +

+ +
+ + +

+
+
+ + +

+
+
+
+''' + beforeEach -> + # Append a radiotextresponse problem to the problem, so we can check it's javascript functionality + @problem.el.prepend(radio_text_xml) + + it 'sets the correct class on the section for the correct choice', -> + spyOn($, 'postWithPrefix').andCallFake (url, callback) -> + callback answers: "1_2_1": ["1_2_1_choiceinput_0bc"], "1_2_1_choiceinput_0bc": "3" + @problem.show() + + expect($('#forinput1_2_1_choiceinput_0bc').attr('class')).toEqual( + 'choicetextgroup_show_correct') + expect($('#answer_1_2_1_choiceinput_0bc').text()).toEqual('3') + expect($('#answer_1_2_1_choiceinput_1bc').text()).toEqual('') + expect($('#answer_1_2_1_choiceinput_2bc').text()).toEqual('') + + it 'Should not disable input fields', -> + spyOn($, 'postWithPrefix').andCallFake (url, callback) -> + callback answers: "1_2_1": ["1_2_1_choiceinput_0bc"], "1_2_1_choiceinput_0bc": "3" + @problem.show() + expect($('input#1_2_1_choiceinput_0bc').attr('disabled')).not.toEqual('disabled') + expect($('input#1_2_1_choiceinput_1bc').attr('disabled')).not.toEqual('disabled') + expect($('input#1_2_1_choiceinput_2bc').attr('disabled')).not.toEqual('disabled') + expect($('input#1_2_1').attr('disabled')).not.toEqual('disabled') + describe 'when the answers are already shown', -> beforeEach -> @problem.el.addClass 'showed' diff --git a/common/lib/xmodule/xmodule/js/src/capa/display.coffee b/common/lib/xmodule/xmodule/js/src/capa/display.coffee index b7dbf6864d..601fb749ac 100644 --- a/common/lib/xmodule/xmodule/js/src/capa/display.coffee +++ b/common/lib/xmodule/xmodule/js/src/capa/display.coffee @@ -403,6 +403,14 @@ class @Problem answer = JSON.parse(answers[answer_id]) display.showAnswer(answer) + choicetextgroup: (element, display, answers) => + element = $(element) + + input_id = element.attr('id').replace(/inputtype_/,'') + answer = answers[input_id] + for choice in answer + element.find("section#forinput#{choice}").addClass 'choicetextgroup_show_correct' + inputtypeHideAnswerMethods: choicegroup: (element, display) => element = $(element) @@ -410,3 +418,7 @@ class @Problem javascriptinput: (element, display) => display.hideAnswer() + + choicetextgroup: (element, display) => + element = $(element) + element.find("section[id^='forinput']").removeClass('choicetextgroup_show_correct') diff --git a/common/static/js/capa/choicetextinput.js b/common/static/js/capa/choicetextinput.js new file mode 100644 index 0000000000..4d7540f938 --- /dev/null +++ b/common/static/js/capa/choicetextinput.js @@ -0,0 +1,75 @@ +(function () { + var update = function () { + // Whenever a value changes create a new serialized version of this + // problem's inputs and set the hidden input fields value to equal it. + var parent = $(this).closest('.problems-wrapper'); + // find the closest parent problems-wrapper and use that as the problem + // grab the input id from the input + // real_input is the hidden input field + var real_input = $('input.choicetextvalue', parent); + var all_inputs = $('.choicetextinput .ctinput', parent); + var user_inputs = {}; + $(all_inputs).each(function (index, elt) { + var node = $(elt); + var name = node.attr('id'); + var val = node.val(); + var radio_value = node.attr('value'); + var type = node.attr('type'); + var is_checked = node.attr('checked'); + if (type === "radio" || type === "checkbox") { + if (is_checked === "checked" || is_checked === "true") { + user_inputs[name] = radio_value; + } + } else { + user_inputs[name] = val; + } + }); + var val_string = JSON.stringify(user_inputs); + //this is what gets submitted as the answer, we deserialize it later + real_input.val(val_string); + }; + + var check_parent = function (event) { + // This looks for the containing choice of a textinput + // and sets it to be checked. + var elt = $(event.target); + var parent_container = elt.closest('section[id^="forinput"]'); + var choice = parent_container.find("input[type='checkbox'], input[type='radio']"); + choice.attr("checked", "checked"); + choice.change(); + //need to check it then trigger the change event + }; + + var imitate_label = function (event) { + // This causes a section to check and uncheck + // a radiobutton/checkbox whenever a user clicks on it + // If the button/checkbox is disabled, nothing happens + var elt = $(event.target); + var parent_container = elt.closest('section[id^="forinput"]'); + var choice = parent_container.find("input[type='checkbox'], input[type='radio']"); + if (choice.attr("type") === "radio") { + choice.attr("checked", "checked"); + } else { + if (choice.attr('checked')) { + choice.prop("checked", false); + } else { + choice.prop("checked", true); + } + + } + choice.change(); + update(); + + }; + var choices = $('.mock_label'); + var inputs = $('.choicetextinput .ctinput'); + var text_inputs = $('.choicetextinput .ctinput[type="text"]'); + // update on load + inputs.each(update); + // and on every change + // This allows text inside of choices to behave as if they were part of + // a label for the choice's button/checkbox + choices.click(imitate_label); + inputs.bind("change", update); + text_inputs.click(check_parent); +}).call(this); diff --git a/lms/djangoapps/courseware/features/problems.feature b/lms/djangoapps/courseware/features/problems.feature index 4a5e64e9f4..fe6a695475 100644 --- a/lms/djangoapps/courseware/features/problems.feature +++ b/lms/djangoapps/courseware/features/problems.feature @@ -21,6 +21,8 @@ Feature: Answer problems | formula | | script | | code | + | radio_text | + | checkbox_text | Scenario: I can answer a problem incorrectly Given External graders respond "incorrect" @@ -40,6 +42,8 @@ Feature: Answer problems | formula | | script | | code | + | radio_text | + | checkbox_text | Scenario: I can submit a blank answer Given I am viewing a "" problem @@ -57,6 +61,8 @@ Feature: Answer problems | numerical | | formula | | script | + | radio_text | + | checkbox_text | Scenario: I can reset a problem @@ -84,6 +90,10 @@ Feature: Answer problems | formula | incorrect | | script | correct | | script | incorrect | + | radio_text | correct | + | radio_text | incorrect | + | checkbox_text | correct | + | checkbox_text | incorrect | Scenario: I can answer a problem with one attempt correctly and not reset diff --git a/lms/djangoapps/courseware/features/problems_setup.py b/lms/djangoapps/courseware/features/problems_setup.py index 6086d7fa5e..aacdec90f8 100644 --- a/lms/djangoapps/courseware/features/problems_setup.py +++ b/lms/djangoapps/courseware/features/problems_setup.py @@ -18,7 +18,7 @@ from capa.tests.response_xml_factory import OptionResponseXMLFactory, \ ChoiceResponseXMLFactory, MultipleChoiceResponseXMLFactory, \ StringResponseXMLFactory, NumericalResponseXMLFactory, \ FormulaResponseXMLFactory, CustomResponseXMLFactory, \ - CodeResponseXMLFactory + CodeResponseXMLFactory, ChoiceTextResponseXMLFactory from nose.tools import assert_true @@ -131,6 +131,32 @@ PROBLEM_DICT = { 'grader_payload': '{"grader": "ps1/Spring2013/test_grader.py"}', }, 'correct': ['span.correct'], 'incorrect': ['span.incorrect'], + 'unanswered': ['span.unanswered']}, + + 'radio_text': { + 'factory': ChoiceTextResponseXMLFactory(), + 'kwargs': { + 'question_text': 'The correct answer is Choice 0 and input 8', + 'type': 'radiotextgroup', + 'choices': [("true", {"answer": "8", "tolerance": "1"}), + ("false", {"answer": "8", "tolerance": "1"}) + ] + }, + 'correct': ['section.choicetextgroup_correct'], + 'incorrect': ['span.incorrect', 'section.choicetextgroup_incorrect'], + 'unanswered': ['span.unanswered']}, + + 'checkbox_text': { + 'factory': ChoiceTextResponseXMLFactory(), + 'kwargs': { + 'question_text': 'The correct answer is Choice 0 and input 8', + 'type': 'checkboxtextgroup', + 'choices': [("true", {"answer": "8", "tolerance": "1"}), + ("false", {"answer": "8", "tolerance": "1"}) + ] + }, + 'correct': ['span.correct'], + 'incorrect': ['span.incorrect'], 'unanswered': ['span.unanswered']} } @@ -196,6 +222,19 @@ def answer_problem(problem_type, correctness): # (configured in the problem XML above) pass + elif problem_type == 'radio_text' or problem_type == 'checkbox_text': + + input_value = "8" if correctness == 'correct' else "5" + choice = "choiceinput_0bc" if correctness == 'correct' else "choiceinput_1bc" + world.css_check(inputfield(problem_type, choice=choice)) + world.css_fill( + inputfield( + problem_type, + choice="choiceinput_0_numtolerance_input_0" + ), + input_value + ) + def problem_has_answer(problem_type, answer_class): if problem_type == "drop down": @@ -244,6 +283,17 @@ def problem_has_answer(problem_type, answer_class): expected = "x^2+2*x+y" if answer_class == 'correct' else 'x^2' assert_textfield('formula', expected) + elif problem_type in ("radio_text", "checkbox_text"): + if answer_class == 'blank': + expected = ('', '') + assert_choicetext_values(problem_type, (), expected) + elif answer_class == 'incorrect': + expected = ('5', '') + assert_choicetext_values(problem_type, ["choiceinput_1bc"], expected) + else: + expected = ('8', '') + assert_choicetext_values(problem_type, ["choiceinput_0bc"], expected) + else: # The other response types use random data, # which would be difficult to check @@ -292,6 +342,12 @@ def inputfield(problem_type, choice=None, input_num=1): sel = ("input#input_i4x-edx-model_course-problem-%s_2_%s" % (problem_type.replace(" ", "_"), str(input_num))) + # this is necessary due to naming requirement for this problem type + if problem_type in ("radio_text", "checkbox_text"): + sel = "input#i4x-edx-model_course-problem-{0}_2_{1}".format( + problem_type.replace(" ", "_"), str(input_num) + ) + if choice is not None: base = "_choice_" if problem_type == "multiple choice" else "_" sel = sel + base + str(choice) @@ -325,3 +381,28 @@ def assert_checked(problem_type, choices): def assert_textfield(problem_type, expected_text, input_num=1): element_value = world.css_value(inputfield(problem_type, input_num=input_num)) assert element_value == expected_text + + +def assert_choicetext_values(problem_type, choices, expected_values): + """ + Asserts that only the given choices are checked, and given + text fields have a desired value + """ + + all_choices = ['choiceinput_0bc', 'choiceinput_1bc'] + all_inputs = [ + "choiceinput_0_numtolerance_input_0", + "choiceinput_1_numtolerance_input_0" + ] + for this_choice in all_choices: + element = world.css_find(inputfield(problem_type, choice=this_choice)) + + if this_choice in choices: + assert element.checked + else: + assert not element.checked + + for (name, expected) in zip(all_inputs, expected_values): + element = world.css_find(inputfield(problem_type, name)) + # Remove any trailing spaces that may have been added + assert element.value.strip() == expected From 4d880db1b551641e82d30b5550825190e284ede4 Mon Sep 17 00:00:00 2001 From: RobertMarks Date: Thu, 4 Jul 2013 21:30:59 -0700 Subject: [PATCH 16/31] Updated behavior for split_answer_dict, get_score, and check_student_inputs (responsetypes.py) --- common/lib/capa/capa/inputtypes.py | 19 +- common/lib/capa/capa/responsetypes.py | 2 - .../capa/capa/tests/response_xml_factory.py | 6 +- .../lib/capa/capa/tests/test_responsetypes.py | 193 ++++++++++++------ common/lib/xmodule/xmodule/capa_module.py | 7 +- .../courseware/features/problems_setup.py | 3 +- 6 files changed, 150 insertions(+), 80 deletions(-) diff --git a/common/lib/capa/capa/inputtypes.py b/common/lib/capa/capa/inputtypes.py index f26909b633..29800a211b 100644 --- a/common/lib/capa/capa/inputtypes.py +++ b/common/lib/capa/capa/inputtypes.py @@ -460,10 +460,10 @@ class JSInput(InputTypeBase): DO NOT USE! HAS NOT BEEN TESTED BEYOND 700X PROBLEMS, AND MAY CHANGE IN BACKWARDS-INCOMPATIBLE WAYS. Inputtype for general javascript inputs. Intended to be used with - customresponse. + customresponse. Loads in a sandboxed iframe to help prevent css and js conflicts between - frame and top-level window. - + frame and top-level window. + iframe sandbox whitelist: - allow-scripts - allow-popups @@ -474,9 +474,9 @@ class JSInput(InputTypeBase): window elements. Example: - See the documentation in the /doc/public folder for more information. @@ -500,7 +500,7 @@ class JSInput(InputTypeBase): Attribute('width', "400"), # iframe width Attribute('height', "300")] # iframe height - + def _extra_context(self): context = { @@ -510,11 +510,12 @@ class JSInput(InputTypeBase): return context - + registry.register(JSInput) #----------------------------------------------------------------------------- + class TextLine(InputTypeBase): """ A text line input. Can do math preview if "math"="1" is specified. @@ -1373,8 +1374,6 @@ registry.register(AnnotationInput) class ChoiceTextGroup(InputTypeBase): """ Groups of radiobutton/checkboxes with text inputs. - Allows for a "not enough information" option to be added - to problems with numerical answers. Examples: RadioButton problem diff --git a/common/lib/capa/capa/responsetypes.py b/common/lib/capa/capa/responsetypes.py index 51bae0b215..f99518c8ce 100644 --- a/common/lib/capa/capa/responsetypes.py +++ b/common/lib/capa/capa/responsetypes.py @@ -2256,7 +2256,6 @@ class ChoiceTextResponse(LoncapaResponse): binary_choices, numtolerance_inputs = self._split_answers_dict(answer_dict) # Check the binary choices first. choices_correct = self._check_student_choices(binary_choices) - inputs_correct = True inputs_correct = self._check_student_inputs(numtolerance_inputs) # Only return correct if the student got both the binary # and numtolerance_inputs are correct @@ -2376,7 +2375,6 @@ class ChoiceTextResponse(LoncapaResponse): Returns True if and only if all student inputs are correct. """ - inputs_correct = True for answer_name, answer_value in numtolerance_inputs.iteritems(): # If `self.corrrect_inputs` does not contain an entry for diff --git a/common/lib/capa/capa/tests/response_xml_factory.py b/common/lib/capa/capa/tests/response_xml_factory.py index 38c4a00caa..4c015d6699 100644 --- a/common/lib/capa/capa/tests/response_xml_factory.py +++ b/common/lib/capa/capa/tests/response_xml_factory.py @@ -857,15 +857,15 @@ class ChoiceTextResponseXMLFactory(ResponseXMLFactory): choice_element.set("correct", correct) choice_element.text = text for inp in inputs: - # Add all of the inputs as children of this element + # Add all of the inputs as children of this choice choice_element.append(inp) return choice_element def _create_numtolerance_input_element(self, params): """ - Creates a element with optionally - specified tolerance and answer. + Creates a or element with + optionally specified tolerance and answer. """ answer = params['answer'] if 'answer' in params else None # If there is not an answer specified, Then create a diff --git a/common/lib/capa/capa/tests/test_responsetypes.py b/common/lib/capa/capa/tests/test_responsetypes.py index 4353f5615b..2a15145579 100644 --- a/common/lib/capa/capa/tests/test_responsetypes.py +++ b/common/lib/capa/capa/tests/test_responsetypes.py @@ -1432,54 +1432,15 @@ class AnnotationResponseTest(ResponseTest): class ChoiceTextResponseTest(ResponseTest): + """ + Class containing setup and tests for ChoiceText responsetype. + """ from response_xml_factory import ChoiceTextResponseXMLFactory xml_factory_class = ChoiceTextResponseXMLFactory - one_choice_one_input = lambda itype, inst: inst._make_problem( - ("true", {"answer": "123", "tolerance": "1"}), - itype - ) - - one_choice_two_inputs = lambda itype, inst: inst._make_problem( - [("true", ({"answer": "123", "tolerance": "1"}, - {"answer": "456", "tolerance": "10"})) - ], - itype - ) - - one_input_script = lambda itype, inst: inst._make_problem( - ("true", {"answer": "$computed_response", "tolerance": "1"}), - itype, - "computed_response = math.sqrt(4)" - ) - - one_choice_no_input = lambda itype, inst: inst._make_problem( - ("true", {}), - itype - ) - - two_choices_no_inputs = lambda itype, inst: inst._make_problem( - [("false", {}), ("true", {})], - itype - ) - - two_choices_one_input_1 = lambda itype, inst: inst._make_problem( - [("false", {}), ("true", {"answer": "123", "tolerance": "0"})], - itype - ) - - two_choices_one_input_2 = lambda itype, inst: inst._make_problem( - [("true", {}), ("false", {"answer": "123", "tolerance": "0"})], - itype - ) - - two_choices_two_inputs = lambda itype, inst: inst._make_problem( - [("true", {"answer": "123", "tolerance": "0"}), - ("false", {"answer": "999", "tolerance": "0"})], - itype - ) - + # `TEST_INPUTS` is a dictionary mapping from + # test_name to a representation of inputs for a test problem. TEST_INPUTS = { "1_choice_0_input_correct": [(True, [])], "1_choice_0_input_incorrect": [(False, [])], @@ -1511,6 +1472,10 @@ class ChoiceTextResponseTest(ResponseTest): "2_choices_2_inputs_wrong_input": [(True, ["321"]), (False, [])] } + # `TEST_SCENARIOS` is a dictionary of the form + # {Test_Name" : (Test_Problem_name, correctness)} + # correctness represents whether the problem should be graded as + # correct or incorrect when the test is run. TEST_SCENARIOS = { "1_choice_0_input_correct": ("1_choice_0_input", "correct"), "1_choice_0_input_incorrect": ("1_choice_0_input", "incorrect"), @@ -1542,15 +1507,53 @@ class ChoiceTextResponseTest(ResponseTest): "2_choices_2_inputs_wrong_input": ("2_choices_2_inputs", "incorrect") } - TEST_PROBLEMS = { - "1_choice_0_input": one_choice_no_input, - "1_choice_1_input": one_choice_one_input, - "1_input_script": one_input_script, - "1_choice_2_inputs": one_choice_two_inputs, - "2_choices_0_inputs": two_choices_no_inputs, - "2_choices_1_input_1": two_choices_one_input_1, - "2_choices_1_input_2": two_choices_one_input_2, - "2_choices_2_inputs": two_choices_two_inputs + # Dictionary that maps from problem_name to arguments for + # _make_problem, that will create the problem. + TEST_PROBLEM_ARGS = { + "1_choice_0_input": {"choices": ("true", {}), "script": ''}, + "1_choice_1_input": { + "choices": ("true", {"answer": "123", "tolerance": "1"}), + "script": '' + }, + + "1_input_script": { + "choices": ("true", {"answer": "$computed_response", "tolerance": "1"}), + "script": "computed_response = math.sqrt(4)" + }, + + "1_choice_2_inputs": { + "choices": [ + ( + "true", ( + {"answer": "123", "tolerance": "1"}, + {"answer": "456", "tolerance": "10"} + ) + ) + ], + "script": '' + }, + "2_choices_0_inputs": { + "choices": [("false", {}), ("true", {})], + "script": '' + + }, + "2_choices_1_input_1": { + "choices": [ + ("false", {}), ("true", {"answer": "123", "tolerance": "0"}) + ], + "script": '' + }, + "2_choices_1_input_2": { + "choices": [("true", {}), ("false", {"answer": "123", "tolerance": "0"})], + "script": '' + }, + "2_choices_2_inputs": { + "choices": [ + ("true", {"answer": "123", "tolerance": "0"}), + ("false", {"answer": "999", "tolerance": "0"}) + ], + "script": '' + } } def _make_problem(self, choices, in_type='radiotextgroup', script=''): @@ -1598,26 +1601,69 @@ class ChoiceTextResponseTest(ResponseTest): return answer_dict def test_invalid_xml(self): + """ + Test that build problem raises errors for invalid options + """ with self.assertRaises(Exception): self.build_problem(type="invalidtextgroup") def test_valid_xml(self): + """ + Test that `build_problem` builds valid xml + """ self.build_problem() self.assertTrue(True) + def test_unchecked_input_not_validated(self): + """ + Test that a student can have a non numeric answer in an unselected + choice without causing an error to be raised when the problem is + checked. + """ + + two_choice_two_input = self._make_problem( + [ + ("true", {"answer": "123", "tolerance": "1"}), + ("false", {}) + ], + "checkboxtextgroup" + ) + + self.assert_grade( + two_choice_two_input, + self._make_answer_dict([(True, ["1"]), (False, ["Platypus"])]), + "incorrect" + ) + def test_interpret_error(self): - one_choice_one_input = lambda itype: self._make_problem( - ("true", {"answer": "123", "tolerance": "1"}), - itype + """ + Test that student answers that cannot be interpeted as numbers + cause the response type to raise an error. + """ + two_choice_two_input = self._make_problem( + [ + ("true", {"answer": "123", "tolerance": "1"}), + ("false", {}) + ], + "checkboxtextgroup" ) with self.assertRaisesRegexp(StudentInputError, "Could not interpret"): + # Test that error is raised for input in selected correct choice. self.assert_grade( - one_choice_one_input('radiotextgroup'), + two_choice_two_input, self._make_answer_dict([(True, ["Platypus"])]), "correct" ) + with self.assertRaisesRegexp(StudentInputError, "Could not interpret"): + # Test that error is raised for input in selected incorrect choice. + self.assert_grade( + two_choice_two_input, + self._make_answer_dict([(True, ["1"]), (True, ["Platypus"])]), + "correct" + ) + def test_staff_answer_error(self): broken_problem = self._make_problem( [("true", {"answer": "Platypus", "tolerance": "0"}), @@ -1638,14 +1684,26 @@ class ChoiceTextResponseTest(ResponseTest): ) def test_radio_grades(self): + """ + Test that confirms correct operation of grading when the inputtag is + radiotextgroup. + """ for name, inputs in self.TEST_INPUTS.iteritems(): + # Turn submission into the form expected when grading this problem. submission = self._make_answer_dict(inputs) + # Lookup the problem_name, and the whether this test problem + # and inputs should be graded as correct or incorrect. problem_name, correctness = self.TEST_SCENARIOS[name] - problem = self.TEST_PROBLEMS[problem_name] - + # Load the args needed to build the problem for this test. + problem_args = self.TEST_PROBLEM_ARGS[problem_name] + test_choices = problem_args["choices"] + test_script = problem_args["script"] + # Build the actual problem for the test. + test_problem = self._make_problem(test_choices, 'radiotextgroup', test_script) + # Make sure the actual grade matches the expected grade. self.assert_grade( - problem('radiotextgroup', self), + test_problem, submission, correctness, msg="{0} should be {1}".format( @@ -1655,9 +1713,16 @@ class ChoiceTextResponseTest(ResponseTest): ) def test_checkbox_grades(self): + """ + Test that confirms correct operation of grading when the inputtag is + checkboxtextgroup. + """ + # Dictionary from name of test_scenario to (problem_name, correctness) + # Correctness is used to test whether the problem was graded properly scenarios = { "2_choices_correct": ("checkbox_two_choices", "correct"), "2_choices_incorrect": ("checkbox_two_choices", "incorrect"), + "2_choices_2_inputs_correct": ( "checkbox_2_choices_2_inputs", "correct" @@ -1673,6 +1738,7 @@ class ChoiceTextResponseTest(ResponseTest): "incorrect" ) } + # Dictionary scenario_name: test_inputs inputs = { "2_choices_correct": [(True, []), (True, [])], "2_choices_incorrect": [(True, []), (False, [])], @@ -1685,9 +1751,11 @@ class ChoiceTextResponseTest(ResponseTest): ] } + # Two choice zero input problem with both choices being correct. checkbox_two_choices = self._make_problem( [("true", {}), ("true", {})], "checkboxtextgroup" ) + # Two choice two input problem with both choices correct. checkbox_two_choices_two_inputs = self._make_problem( [("true", {"answer": "123", "tolerance": "0"}), ("true", {"answer": "456", "tolerance": "0"}) @@ -1695,17 +1763,20 @@ class ChoiceTextResponseTest(ResponseTest): "checkboxtextgroup" ) + # Dictionary problem_name: problem problems = { "checkbox_two_choices": checkbox_two_choices, "checkbox_2_choices_2_inputs": checkbox_two_choices_two_inputs } - problems.update(self.TEST_PROBLEMS) for name, inputs in inputs.iteritems(): submission = self._make_answer_dict(inputs) + # Load the test problem's name and desired correctness problem_name, correctness = scenarios[name] + # Load the problem problem = problems[problem_name] + # Make sure the actual grade matches the expected grade self.assert_grade( problem, submission, diff --git a/common/lib/xmodule/xmodule/capa_module.py b/common/lib/xmodule/xmodule/capa_module.py index d2a20675a5..d76b62dc06 100644 --- a/common/lib/xmodule/xmodule/capa_module.py +++ b/common/lib/xmodule/xmodule/capa_module.py @@ -820,10 +820,11 @@ class CapaModule(CapaFields, XModule): elif is_dict_key: try: val = json.loads(data[key]) + # If the submission wasn't deserializable, raise an error. except(KeyError, ValueError): - # Send this information along to be reported by - # The grading method - val = {"error": "error"} + raise ValueError( + u"Invalid submission: {val} for {key}".format(val=data[key], key=key) + ) else: val = data[key] diff --git a/lms/djangoapps/courseware/features/problems_setup.py b/lms/djangoapps/courseware/features/problems_setup.py index aacdec90f8..67dfbf0dc5 100644 --- a/lms/djangoapps/courseware/features/problems_setup.py +++ b/lms/djangoapps/courseware/features/problems_setup.py @@ -388,8 +388,9 @@ def assert_choicetext_values(problem_type, choices, expected_values): Asserts that only the given choices are checked, and given text fields have a desired value """ - + # Names of the radio buttons or checkboxes all_choices = ['choiceinput_0bc', 'choiceinput_1bc'] + # Names of the numtolerance_inputs all_inputs = [ "choiceinput_0_numtolerance_input_0", "choiceinput_1_numtolerance_input_0" From d17486a9c7c0d1acb0f35be99f5f45b713f06f2f Mon Sep 17 00:00:00 2001 From: Vik Paruchuri Date: Thu, 18 Jul 2013 14:39:52 -0400 Subject: [PATCH 17/31] Update default handling --- .../xmodule/combined_open_ended_module.py | 193 ++++++++++++++---- 1 file changed, 154 insertions(+), 39 deletions(-) diff --git a/common/lib/xmodule/xmodule/combined_open_ended_module.py b/common/lib/xmodule/xmodule/combined_open_ended_module.py index 7d3a35c084..df8c7e716c 100644 --- a/common/lib/xmodule/xmodule/combined_open_ended_module.py +++ b/common/lib/xmodule/xmodule/combined_open_ended_module.py @@ -29,36 +29,124 @@ VERSION_TUPLES = { DEFAULT_VERSION = 1 DEFAULT_DATA = textwrap.dedent("""\ - + + +

Censorship in the Libraries

+ +

'All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us.' --Katherine Paterson, Author +

+ +

+ Write a persuasive essay to a newspaper reflecting your vies on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading. +

+ +
+ - - - Category 1 - - - - + + + Ideas + + + + + + + + + Content + + + + + + + + + Organization + + + + + + + + Style + + + + + + + + Voice + + + + + + - -

Why is the sky blue?

-
- - - - - - - Enter essay here. - This is the answer. - {"grader_settings" : "peer_grading.conf", "problem_id" : "700x/Demo"} - - - -
+ + + + + + + + + Enter essay here. + This is the answer. + {"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"} + + + + + + + + Enter essay here. + This is the answer. + {"grader_settings" : "peer_grading.conf", "problem_id" : "6.002x/Welcome/OETest"} + + + + +
""") @@ -159,17 +247,44 @@ class CombinedOpenEndedFields(object): markdown = String( help="Markdown source of this module", default=textwrap.dedent("""\ - [rubric] - + Category 1 - - The response does not incorporate what is needed for a one response. - - The response is correct for category 1. - [rubric] - [prompt] -

Why is the sky blue?

- [prompt] - [tasks] - (Self), ({1-2}AI) - [tasks] + [prompt] +

Censorship in the Libraries

+ +

'All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us.' --Katherine Paterson, Author +

+ +

+ Write a persuasive essay to a newspaper reflecting your vies on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading. +

+ [prompt] + [rubric] + + Ideas + - Difficult for the reader to discern the main idea. Too brief or too repetitive to establish or maintain a focus. + - Attempts a main idea. Sometimes loses focus or ineffectively displays focus. + - Presents a unifying theme or main idea, but may include minor tangents. Stays somewhat focused on topic and task. + - Presents a unifying theme or main idea without going off on tangents. Stays completely focused on topic and task. + + Content + - Includes little information with few or no details or unrelated details. Unsuccessful in attempts to explore any facets of the topic. + - Includes little information and few or no details. Explores only one or two facets of the topic. + - Includes sufficient information and supporting details. (Details may not be fully developed; ideas may be listed.) Explores some facets of the topic. + - Includes in-depth information and exceptional supporting details that are fully developed. Explores all facets of the topic. + + Organization + - Ideas organized illogically, transitions weak, and response difficult to follow. + - Attempts to logically organize ideas. Attempts to progress in an order that enhances meaning, and demonstrates use of transitions. + - Ideas organized logically. Progresses in an order that enhances meaning. Includes smooth transitions. + + Style + - Contains limited vocabulary, with many words used incorrectly. Demonstrates problems with sentence patterns. + - Contains basic vocabulary, with words that are predictable and common. Contains mostly simple sentences (although there may be an attempt at more varied sentence patterns). + - Includes vocabulary to make explanations detailed and precise. Includes varied sentence patterns, including complex sentences. + + Voice + - Demonstrates language and tone that may be inappropriate to task and reader. + - Demonstrates an attempt to adjust language and tone to task and reader. + - Demonstrates effective adjustment of language and tone to task and reader. + [rubric] + [tasks] + (Self), ({4-12}AI), ({9-12}Peer) + [tasks] + """), scope=Scope.settings ) From 480e97a3fdfe4250738e24c35ae02a928494ed13 Mon Sep 17 00:00:00 2001 From: Vik Paruchuri Date: Thu, 18 Jul 2013 14:55:59 -0400 Subject: [PATCH 18/31] Cleanup and change attribute names --- .../xmodule/combined_open_ended_module.py | 55 ++++++------------- .../combined_open_ended_modulev1.py | 38 ++----------- .../xmodule/xmodule/peer_grading_module.py | 39 +++++-------- 3 files changed, 34 insertions(+), 98 deletions(-) diff --git a/common/lib/xmodule/xmodule/combined_open_ended_module.py b/common/lib/xmodule/xmodule/combined_open_ended_module.py index df8c7e716c..e01ae49149 100644 --- a/common/lib/xmodule/xmodule/combined_open_ended_module.py +++ b/common/lib/xmodule/xmodule/combined_open_ended_module.py @@ -13,7 +13,7 @@ import textwrap log = logging.getLogger("mitx.courseware") -V1_SETTINGS_ATTRIBUTES = ["display_name", "attempts", "is_graded", "accept_file_upload", +V1_SETTINGS_ATTRIBUTES = ["display_name", "max_attempts", "graded", "accept_file_upload", "skip_spelling_checks", "due", "graceperiod", "weight"] V1_STUDENT_ATTRIBUTES = ["current_task_number", "task_states", "state", @@ -172,7 +172,7 @@ class CombinedOpenEndedFields(object): display_name = String( display_name="Display Name", help="This name appears in the horizontal navigation at the top of the page.", - default="Open Ended Grading", + default="Open Response Assessment", scope=Scope.settings ) current_task_number = Integer( @@ -189,6 +189,12 @@ class CombinedOpenEndedFields(object): default="initial", scope=Scope.user_state ) + graded = Boolean( + display_name="Graded", + help='Defines whether the student gets credit for grading this problem.', + default=False, + scope=Scope.settings + ) student_attempts = Integer( help="Number of attempts taken by the student on this problem", default=0, @@ -199,19 +205,13 @@ class CombinedOpenEndedFields(object): default=False, scope=Scope.user_state ) - attempts = Integer( + max_attempts = Integer( display_name="Maximum Attempts", help="The number of times the student can try to answer this problem.", default=1, scope=Scope.settings, values={"min" : 1 } ) - is_graded = Boolean( - display_name="Graded", - help="Whether or not the problem is graded.", - default=False, - scope=Scope.settings - ) accept_file_upload = Boolean( display_name="Allow File Uploads", help="Whether or not the student can submit files as a response.", @@ -339,37 +339,9 @@ class CombinedOpenEndedModule(CombinedOpenEndedFields, XModule): def __init__(self, *args, **kwargs): """ - Definition file should have one or many task blocks, a rubric block, and a prompt block: + Definition file should have one or many task blocks, a rubric block, and a prompt block. - Sample file: - - - Blah blah rubric. - - - Some prompt. - - - - - What hint about this problem would you give to someone? - - - Save Succcesful. Thanks for participating! - - - - - - - Enter essay here. - This is the answer. - {"grader_settings" : "ml_grading.conf", - "problem_id" : "6.002x/Welcome/OETest"} - - - - + See DEFAULT_DATA for a sample. """ XModule.__init__(self, *args, **kwargs) @@ -450,6 +422,11 @@ class CombinedOpenEndedDescriptor(CombinedOpenEndedFields, RawDescriptor): js_module_name = "OpenEndedMarkdownEditingDescriptor" css = {'scss': [resource_string(__name__, 'css/editor/edit.scss'), resource_string(__name__, 'css/combinedopenended/edit.scss')]} + metadata_translations = { + 'is_graded': 'graded', + 'attempts': 'max_attempts', + } + def get_context(self): _context = RawDescriptor.get_context(self) _context.update({'markdown': self.markdown, diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py index a9feca24f0..fbc846ecd3 100644 --- a/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py +++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py @@ -78,37 +78,7 @@ class CombinedOpenEndedV1Module(): instance_state=None, shared_state=None, metadata=None, static_data=None, **kwargs): """ - Definition file should have one or many task blocks, a rubric block, and a prompt block: - - Sample file: - - - Blah blah rubric. - - - Some prompt. - - - - - What hint about this problem would you give to someone? - - - Save Succcesful. Thanks for participating! - - - - - - - Enter essay here. - This is the answer. - {"grader_settings" : "ml_grading.conf", - "problem_id" : "6.002x/Welcome/OETest"} - - - - + Definition file should have one or many task blocks, a rubric block, and a prompt block. See DEFAULT_DATA in combined_open_ended_module for a sample. """ @@ -131,8 +101,8 @@ class CombinedOpenEndedV1Module(): # Allow reset is true if student has failed the criteria to move to the next child task self.ready_to_reset = instance_state.get('ready_to_reset', False) - self.attempts = self.instance_state.get('attempts', MAX_ATTEMPTS) - self.is_scored = self.instance_state.get('is_graded', IS_SCORED) in TRUE_DICT + self.max_attempts = self.instance_state.get('max_attempts', MAX_ATTEMPTS) + self.is_scored = self.instance_state.get('graded', IS_SCORED) in TRUE_DICT self.accept_file_upload = self.instance_state.get('accept_file_upload', ACCEPT_FILE_UPLOAD) in TRUE_DICT self.skip_basic_checks = self.instance_state.get('skip_spelling_checks', SKIP_BASIC_CHECKS) in TRUE_DICT @@ -153,7 +123,7 @@ class CombinedOpenEndedV1Module(): # Static data is passed to the child modules to render self.static_data = { 'max_score': self._max_score, - 'max_attempts': self.attempts, + 'max_attempts': self.max_attempts, 'prompt': definition['prompt'], 'rubric': definition['rubric'], 'display_name': self.display_name, diff --git a/common/lib/xmodule/xmodule/peer_grading_module.py b/common/lib/xmodule/xmodule/peer_grading_module.py index 0df47758dc..8f5f93318a 100644 --- a/common/lib/xmodule/xmodule/peer_grading_module.py +++ b/common/lib/xmodule/xmodule/peer_grading_module.py @@ -19,32 +19,27 @@ from django.utils.timezone import UTC log = logging.getLogger(__name__) -USE_FOR_SINGLE_LOCATION = False -LINK_TO_LOCATION = "" -MAX_SCORE = 1 -IS_GRADED = False EXTERNAL_GRADER_NO_CONTACT_ERROR = "Failed to contact external graders. Please notify course staff." - class PeerGradingFields(object): use_for_single_location = Boolean( display_name="Show Single Problem", help='When True, only the single problem specified by "Link to Problem Location" is shown. ' 'When False, a panel is displayed with all problems available for peer grading.', - default=USE_FOR_SINGLE_LOCATION, + default=False, scope=Scope.settings ) link_to_location = String( display_name="Link to Problem Location", help='The location of the problem being graded. Only used when "Show Single Problem" is True.', - default=LINK_TO_LOCATION, + default="", scope=Scope.settings ) - is_graded = Boolean( + graded = Boolean( display_name="Graded", help='Defines whether the student gets credit for grading this problem. Only used when "Show Single Problem" is True.', - default=IS_GRADED, + default=False, scope=Scope.settings ) due_date = Date( @@ -56,12 +51,6 @@ class PeerGradingFields(object): default=None, scope=Scope.settings ) - max_grade = Integer( - help="The maximum grade that a student can receive for this problem.", - default=MAX_SCORE, - scope=Scope.settings, - values={"min": 0} - ) student_data_for_location = Dict( help="Student data for a given peer grading problem.", scope=Scope.user_state @@ -136,10 +125,6 @@ class PeerGradingModule(PeerGradingFields, XModule): if not self.ajax_url.endswith("/"): self.ajax_url = self.ajax_url + "/" - # Integer could return None, so keep this check. - if not isinstance(self.max_grade, int): - raise TypeError("max_grade needs to be an integer.") - def closed(self): return self._closed(self.timeinfo) @@ -232,7 +217,7 @@ class PeerGradingModule(PeerGradingFields, XModule): 'score': score, 'total': max_score, } - if not self.use_for_single_location or not self.is_graded: + if not self.use_for_single_location or not self.graded: return score_dict try: @@ -253,7 +238,7 @@ class PeerGradingModule(PeerGradingFields, XModule): self.student_data_for_location = response score = int(count_graded >= count_required and count_graded > 0) * float(weight) - total = self.max_grade * float(weight) + total = float(weight) score_dict['score'] = score score_dict['total'] = total @@ -266,8 +251,8 @@ class PeerGradingModule(PeerGradingFields, XModule): randomization, and 5/7 on another ''' max_grade = None - if self.use_for_single_location and self.is_graded: - max_grade = self.max_grade + if self.use_for_single_location and self.graded: + max_grade = self.weight return max_grade def get_next_submission(self, data): @@ -634,9 +619,13 @@ class PeerGradingDescriptor(PeerGradingFields, RawDescriptor): #Specify whether or not to pass in open ended interface needs_open_ended_interface = True + metadata_translations = { + 'is_graded': 'graded', + 'attempts': 'max_attempts', + } + @property def non_editable_metadata_fields(self): non_editable_fields = super(PeerGradingDescriptor, self).non_editable_metadata_fields - non_editable_fields.extend([PeerGradingFields.due_date, PeerGradingFields.grace_period_string, - PeerGradingFields.max_grade]) + non_editable_fields.extend([PeerGradingFields.due_date, PeerGradingFields.grace_period_string]) return non_editable_fields From 84c4b7f1390721704ba0269c5e20af12fa4cf91f Mon Sep 17 00:00:00 2001 From: Vik Paruchuri Date: Thu, 18 Jul 2013 15:08:52 -0400 Subject: [PATCH 19/31] Test fixes --- cms/djangoapps/contentstore/tests/test_contentstore.py | 2 +- .../combined_open_ended_modulev1.py | 4 ++-- common/lib/xmodule/xmodule/tests/test_combined_open_ended.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cms/djangoapps/contentstore/tests/test_contentstore.py b/cms/djangoapps/contentstore/tests/test_contentstore.py index 500db414f4..3e1b046a98 100644 --- a/cms/djangoapps/contentstore/tests/test_contentstore.py +++ b/cms/djangoapps/contentstore/tests/test_contentstore.py @@ -134,7 +134,7 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase): self.check_components_on_page(ADVANCED_COMPONENT_TYPES, ['Video Alpha', 'Word cloud', 'Annotation', - 'Open Ended Grading', + 'Open Response Assessment', 'Peer Grading Interface']) def test_advanced_components_require_two_clicks(self): diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py index fbc846ecd3..4d406ddd83 100644 --- a/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py +++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py @@ -614,14 +614,14 @@ class CombinedOpenEndedV1Module(): return self.out_of_sync_error(data) self.student_attempts +=1 - if self.student_attempts >= self.attempts: + if self.student_attempts >= self.max_attempts: return { 'success': False, # This is a student_facing_error 'error': ( 'You have attempted this question {0} times. ' 'You are only allowed to attempt it {1} times.' - ).format(self.student_attempts, self.attempts) + ).format(self.student_attempts, self.max_attempts) } self.state = self.INITIAL self.ready_to_reset = False diff --git a/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py b/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py index af1b6aa12b..0a14608edc 100644 --- a/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py +++ b/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py @@ -335,7 +335,7 @@ class CombinedOpenEndedModuleTest(unittest.TestCase): 's3_interface': test_util_open_ended.S3_INTERFACE, 'open_ended_grading_interface': test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE, 'skip_basic_checks': False, - 'is_graded': True, + 'graded': True, } oeparam = etree.XML(''' From 0b991f9bbd2bc6d809527a31f4c7d3bc7548397d Mon Sep 17 00:00:00 2001 From: Vik Paruchuri Date: Thu, 18 Jul 2013 15:19:13 -0400 Subject: [PATCH 20/31] Fix reset and score tests --- .../combined_open_ended_modulev1.py | 18 ++++++++++-------- .../xmodule/tests/test_combined_open_ended.py | 2 +- 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py index 4d406ddd83..933eb0b5bb 100644 --- a/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py +++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py @@ -101,14 +101,14 @@ class CombinedOpenEndedV1Module(): # Allow reset is true if student has failed the criteria to move to the next child task self.ready_to_reset = instance_state.get('ready_to_reset', False) - self.max_attempts = self.instance_state.get('max_attempts', MAX_ATTEMPTS) - self.is_scored = self.instance_state.get('graded', IS_SCORED) in TRUE_DICT - self.accept_file_upload = self.instance_state.get('accept_file_upload', ACCEPT_FILE_UPLOAD) in TRUE_DICT - self.skip_basic_checks = self.instance_state.get('skip_spelling_checks', SKIP_BASIC_CHECKS) in TRUE_DICT + self.max_attempts = instance_state.get('max_attempts', MAX_ATTEMPTS) + self.is_scored = instance_state.get('graded', IS_SCORED) in TRUE_DICT + self.accept_file_upload = instance_state.get('accept_file_upload', ACCEPT_FILE_UPLOAD) in TRUE_DICT + self.skip_basic_checks = instance_state.get('skip_spelling_checks', SKIP_BASIC_CHECKS) in TRUE_DICT - due_date = self.instance_state.get('due', None) + due_date = instance_state.get('due', None) - grace_period_string = self.instance_state.get('graceperiod', None) + grace_period_string = instance_state.get('graceperiod', None) try: self.timeinfo = TimeInfo(due_date, grace_period_string) except Exception: @@ -613,8 +613,9 @@ class CombinedOpenEndedV1Module(): if not self.ready_to_reset: return self.out_of_sync_error(data) - self.student_attempts +=1 - if self.student_attempts >= self.max_attempts: + if self.student_attempts >= self.max_attempts-1: + if self.student_attempts==self.max_attempts-1: + self.student_attempts +=1 return { 'success': False, # This is a student_facing_error @@ -623,6 +624,7 @@ class CombinedOpenEndedV1Module(): 'You are only allowed to attempt it {1} times.' ).format(self.student_attempts, self.max_attempts) } + self.student_attempts +=1 self.state = self.INITIAL self.ready_to_reset = False for i in xrange(0, len(self.task_xml)): diff --git a/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py b/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py index 0a14608edc..7d369a8429 100644 --- a/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py +++ b/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py @@ -453,7 +453,7 @@ class CombinedOpenEndedModuleTest(unittest.TestCase): self.assertFalse(changed) def test_get_score_realistic(self): - instance_state = r"""{"ready_to_reset": false, "skip_spelling_checks": true, "current_task_number": 1, "weight": 5.0, "graceperiod": "1 day 12 hours 59 minutes 59 seconds", "is_graded": "True", "task_states": ["{\"child_created\": false, \"child_attempts\": 4, \"version\": 1, \"child_history\": [{\"answer\": \"The students\\u2019 data are recorded in the table below.\\r\\n\\r\\nStarting Mass (g)\\tEnding Mass (g)\\tDifference in Mass (g)\\r\\nMarble\\t 9.8\\t 9.4\\t\\u20130.4\\r\\nLimestone\\t10.4\\t 9.1\\t\\u20131.3\\r\\nWood\\t11.2\\t11.2\\t 0.0\\r\\nPlastic\\t 7.2\\t 7.1\\t\\u20130.1\\r\\nAfter reading the group\\u2019s procedure, describe what additional information you would need in order to replicate the expe\", \"post_assessment\": \"{\\\"submission_id\\\": 3097, \\\"score\\\": 0, \\\"feedback\\\": \\\"{\\\\\\\"spelling\\\\\\\": \\\\\\\"Spelling: Ok.\\\\\\\", \\\\\\\"grammar\\\\\\\": \\\\\\\"Grammar: More grammar errors than average.\\\\\\\", \\\\\\\"markup-text\\\\\\\": \\\\\\\"the students data are recorded in the table below . starting mass g ending mass g difference in mass g marble . . . limestone . . . wood . . . plastic . . . after reading the groups procedure , describe what additional information you would need in order to replicate the expe\\\\\\\"}\\\", \\\"success\\\": true, \\\"grader_id\\\": 3233, \\\"grader_type\\\": \\\"ML\\\", \\\"rubric_scores_complete\\\": true, \\\"rubric_xml\\\": \\\"Response Quality0\\\"}\", \"score\": 0}, {\"answer\": \"After 24 hours, remove the samples from the containers and rinse each sample with distilled water.\\r\\nAllow the samples to sit and dry for 30 minutes.\\r\\nDetermine the mass of each sample.\\r\\nThe students\\u2019 data are recorded in the table below.\\r\\n\\r\\nStarting Mass (g)\\tEnding Mass (g)\\tDifference in Mass (g)\\r\\nMarble\\t 9.8\\t 9.4\\t\\u20130.4\\r\\nLimestone\\t10.4\\t 9.1\\t\\u20131.3\\r\\nWood\\t11.2\\t11.2\\t 0.0\\r\\nPlastic\\t 7.2\\t 7.1\\t\\u20130.1\\r\\nAfter reading the\", \"post_assessment\": \"[3]\", \"score\": 3}, {\"answer\": \"To replicate the experiment, the procedure would require more detail. One piece of information that is omitted is the amount of vinegar used in the experiment. It is also important to know what temperature the experiment was kept at during the 24 hours. Finally, the procedure needs to include details about the experiment, for example if the whole sample must be submerged.\", \"post_assessment\": \"[3]\", \"score\": 3}, {\"answer\": \"e the mass of four different samples.\\r\\nPour vinegar in each of four separate, but identical, containers.\\r\\nPlace a sample of one material into one container and label. Repeat with remaining samples, placing a single sample into a single container.\\r\\nAfter 24 hours, remove the samples from the containers and rinse each sample with distilled water.\\r\\nAllow the samples to sit and dry for 30 minutes.\\r\\nDetermine the mass of each sample.\\r\\nThe students\\u2019 data are recorded in the table below.\\r\\n\", \"post_assessment\": \"[3]\", \"score\": 3}, {\"answer\": \"\", \"post_assessment\": \"[3]\", \"score\": 3}], \"max_score\": 3, \"child_state\": \"done\"}", "{\"child_created\": false, \"child_attempts\": 0, \"version\": 1, \"child_history\": [{\"answer\": \"The students\\u2019 data are recorded in the table below.\\r\\n\\r\\nStarting Mass (g)\\tEnding Mass (g)\\tDifference in Mass (g)\\r\\nMarble\\t 9.8\\t 9.4\\t\\u20130.4\\r\\nLimestone\\t10.4\\t 9.1\\t\\u20131.3\\r\\nWood\\t11.2\\t11.2\\t 0.0\\r\\nPlastic\\t 7.2\\t 7.1\\t\\u20130.1\\r\\nAfter reading the group\\u2019s procedure, describe what additional information you would need in order to replicate the expe\", \"post_assessment\": \"{\\\"submission_id\\\": 3097, \\\"score\\\": 0, \\\"feedback\\\": \\\"{\\\\\\\"spelling\\\\\\\": \\\\\\\"Spelling: Ok.\\\\\\\", \\\\\\\"grammar\\\\\\\": \\\\\\\"Grammar: More grammar errors than average.\\\\\\\", \\\\\\\"markup-text\\\\\\\": \\\\\\\"the students data are recorded in the table below . starting mass g ending mass g difference in mass g marble . . . limestone . . . wood . . . plastic . . . after reading the groups procedure , describe what additional information you would need in order to replicate the expe\\\\\\\"}\\\", \\\"success\\\": true, \\\"grader_id\\\": 3233, \\\"grader_type\\\": \\\"ML\\\", \\\"rubric_scores_complete\\\": true, \\\"rubric_xml\\\": \\\"Response Quality0\\\"}\", \"score\": 0}, {\"answer\": \"After 24 hours, remove the samples from the containers and rinse each sample with distilled water.\\r\\nAllow the samples to sit and dry for 30 minutes.\\r\\nDetermine the mass of each sample.\\r\\nThe students\\u2019 data are recorded in the table below.\\r\\n\\r\\nStarting Mass (g)\\tEnding Mass (g)\\tDifference in Mass (g)\\r\\nMarble\\t 9.8\\t 9.4\\t\\u20130.4\\r\\nLimestone\\t10.4\\t 9.1\\t\\u20131.3\\r\\nWood\\t11.2\\t11.2\\t 0.0\\r\\nPlastic\\t 7.2\\t 7.1\\t\\u20130.1\\r\\nAfter reading the\", \"post_assessment\": \"{\\\"submission_id\\\": 3098, \\\"score\\\": 0, \\\"feedback\\\": \\\"{\\\\\\\"spelling\\\\\\\": \\\\\\\"Spelling: Ok.\\\\\\\", \\\\\\\"grammar\\\\\\\": \\\\\\\"Grammar: Ok.\\\\\\\", \\\\\\\"markup-text\\\\\\\": \\\\\\\"after hours , remove the samples from the containers and rinse each sample with distilled water . allow the samples to sit and dry for minutes . determine the mass of each sample . the students data are recorded in the table below . starting mass g ending mass g difference in mass g marble . . . limestone . . . wood . . . plastic . . . after reading the\\\\\\\"}\\\", \\\"success\\\": true, \\\"grader_id\\\": 3235, \\\"grader_type\\\": \\\"ML\\\", \\\"rubric_scores_complete\\\": true, \\\"rubric_xml\\\": \\\"Response Quality0\\\"}\", \"score\": 0}, {\"answer\": \"To replicate the experiment, the procedure would require more detail. One piece of information that is omitted is the amount of vinegar used in the experiment. It is also important to know what temperature the experiment was kept at during the 24 hours. Finally, the procedure needs to include details about the experiment, for example if the whole sample must be submerged.\", \"post_assessment\": \"{\\\"submission_id\\\": 3099, \\\"score\\\": 3, \\\"feedback\\\": \\\"{\\\\\\\"spelling\\\\\\\": \\\\\\\"Spelling: Ok.\\\\\\\", \\\\\\\"grammar\\\\\\\": \\\\\\\"Grammar: Ok.\\\\\\\", \\\\\\\"markup-text\\\\\\\": \\\\\\\"to replicate the experiment , the procedure would require more detail . one piece of information that is omitted is the amount of vinegar used in the experiment . it is also important to know what temperature the experiment was kept at during the hours . finally , the procedure needs to include details about the experiment , for example if the whole sample must be submerged .\\\\\\\"}\\\", \\\"success\\\": true, \\\"grader_id\\\": 3237, \\\"grader_type\\\": \\\"ML\\\", \\\"rubric_scores_complete\\\": true, \\\"rubric_xml\\\": \\\"Response Quality3\\\"}\", \"score\": 3}, {\"answer\": \"e the mass of four different samples.\\r\\nPour vinegar in each of four separate, but identical, containers.\\r\\nPlace a sample of one material into one container and label. Repeat with remaining samples, placing a single sample into a single container.\\r\\nAfter 24 hours, remove the samples from the containers and rinse each sample with distilled water.\\r\\nAllow the samples to sit and dry for 30 minutes.\\r\\nDetermine the mass of each sample.\\r\\nThe students\\u2019 data are recorded in the table below.\\r\\n\", \"post_assessment\": \"{\\\"submission_id\\\": 3100, \\\"score\\\": 0, \\\"feedback\\\": \\\"{\\\\\\\"spelling\\\\\\\": \\\\\\\"Spelling: Ok.\\\\\\\", \\\\\\\"grammar\\\\\\\": \\\\\\\"Grammar: Ok.\\\\\\\", \\\\\\\"markup-text\\\\\\\": \\\\\\\"e the mass of four different samples . pour vinegar in each of four separate , but identical , containers . place a sample of one material into one container and label . repeat with remaining samples , placing a single sample into a single container . after hours , remove the samples from the containers and rinse each sample with distilled water . allow the samples to sit and dry for minutes . determine the mass of each sample . the students data are recorded in the table below . \\\\\\\"}\\\", \\\"success\\\": true, \\\"grader_id\\\": 3239, \\\"grader_type\\\": \\\"ML\\\", \\\"rubric_scores_complete\\\": true, \\\"rubric_xml\\\": \\\"Response Quality0\\\"}\", \"score\": 0}, {\"answer\": \"\", \"post_assessment\": \"{\\\"submission_id\\\": 3101, \\\"score\\\": 0, \\\"feedback\\\": \\\"{\\\\\\\"spelling\\\\\\\": \\\\\\\"Spelling: Ok.\\\\\\\", \\\\\\\"grammar\\\\\\\": \\\\\\\"Grammar: Ok.\\\\\\\", \\\\\\\"markup-text\\\\\\\": \\\\\\\"invalid essay .\\\\\\\"}\\\", \\\"success\\\": true, \\\"grader_id\\\": 3241, \\\"grader_type\\\": \\\"ML\\\", \\\"rubric_scores_complete\\\": true, \\\"rubric_xml\\\": \\\"Response Quality0\\\"}\", \"score\": 0}], \"max_score\": 3, \"child_state\": \"done\"}"], "attempts": "10000", "student_attempts": 0, "due": null, "state": "done", "accept_file_upload": false, "display_name": "Science Question -- Machine Assessed"}""" + instance_state = r"""{"ready_to_reset": false, "skip_spelling_checks": true, "current_task_number": 1, "weight": 5.0, "graceperiod": "1 day 12 hours 59 minutes 59 seconds", "graded": "True", "task_states": ["{\"child_created\": false, \"child_attempts\": 4, \"version\": 1, \"child_history\": [{\"answer\": \"The students\\u2019 data are recorded in the table below.\\r\\n\\r\\nStarting Mass (g)\\tEnding Mass (g)\\tDifference in Mass (g)\\r\\nMarble\\t 9.8\\t 9.4\\t\\u20130.4\\r\\nLimestone\\t10.4\\t 9.1\\t\\u20131.3\\r\\nWood\\t11.2\\t11.2\\t 0.0\\r\\nPlastic\\t 7.2\\t 7.1\\t\\u20130.1\\r\\nAfter reading the group\\u2019s procedure, describe what additional information you would need in order to replicate the expe\", \"post_assessment\": \"{\\\"submission_id\\\": 3097, \\\"score\\\": 0, \\\"feedback\\\": \\\"{\\\\\\\"spelling\\\\\\\": \\\\\\\"Spelling: Ok.\\\\\\\", \\\\\\\"grammar\\\\\\\": \\\\\\\"Grammar: More grammar errors than average.\\\\\\\", \\\\\\\"markup-text\\\\\\\": \\\\\\\"the students data are recorded in the table below . starting mass g ending mass g difference in mass g marble . . . limestone . . . wood . . . plastic . . . after reading the groups procedure , describe what additional information you would need in order to replicate the expe\\\\\\\"}\\\", \\\"success\\\": true, \\\"grader_id\\\": 3233, \\\"grader_type\\\": \\\"ML\\\", \\\"rubric_scores_complete\\\": true, \\\"rubric_xml\\\": \\\"Response Quality0\\\"}\", \"score\": 0}, {\"answer\": \"After 24 hours, remove the samples from the containers and rinse each sample with distilled water.\\r\\nAllow the samples to sit and dry for 30 minutes.\\r\\nDetermine the mass of each sample.\\r\\nThe students\\u2019 data are recorded in the table below.\\r\\n\\r\\nStarting Mass (g)\\tEnding Mass (g)\\tDifference in Mass (g)\\r\\nMarble\\t 9.8\\t 9.4\\t\\u20130.4\\r\\nLimestone\\t10.4\\t 9.1\\t\\u20131.3\\r\\nWood\\t11.2\\t11.2\\t 0.0\\r\\nPlastic\\t 7.2\\t 7.1\\t\\u20130.1\\r\\nAfter reading the\", \"post_assessment\": \"[3]\", \"score\": 3}, {\"answer\": \"To replicate the experiment, the procedure would require more detail. One piece of information that is omitted is the amount of vinegar used in the experiment. It is also important to know what temperature the experiment was kept at during the 24 hours. Finally, the procedure needs to include details about the experiment, for example if the whole sample must be submerged.\", \"post_assessment\": \"[3]\", \"score\": 3}, {\"answer\": \"e the mass of four different samples.\\r\\nPour vinegar in each of four separate, but identical, containers.\\r\\nPlace a sample of one material into one container and label. Repeat with remaining samples, placing a single sample into a single container.\\r\\nAfter 24 hours, remove the samples from the containers and rinse each sample with distilled water.\\r\\nAllow the samples to sit and dry for 30 minutes.\\r\\nDetermine the mass of each sample.\\r\\nThe students\\u2019 data are recorded in the table below.\\r\\n\", \"post_assessment\": \"[3]\", \"score\": 3}, {\"answer\": \"\", \"post_assessment\": \"[3]\", \"score\": 3}], \"max_score\": 3, \"child_state\": \"done\"}", "{\"child_created\": false, \"child_attempts\": 0, \"version\": 1, \"child_history\": [{\"answer\": \"The students\\u2019 data are recorded in the table below.\\r\\n\\r\\nStarting Mass (g)\\tEnding Mass (g)\\tDifference in Mass (g)\\r\\nMarble\\t 9.8\\t 9.4\\t\\u20130.4\\r\\nLimestone\\t10.4\\t 9.1\\t\\u20131.3\\r\\nWood\\t11.2\\t11.2\\t 0.0\\r\\nPlastic\\t 7.2\\t 7.1\\t\\u20130.1\\r\\nAfter reading the group\\u2019s procedure, describe what additional information you would need in order to replicate the expe\", \"post_assessment\": \"{\\\"submission_id\\\": 3097, \\\"score\\\": 0, \\\"feedback\\\": \\\"{\\\\\\\"spelling\\\\\\\": \\\\\\\"Spelling: Ok.\\\\\\\", \\\\\\\"grammar\\\\\\\": \\\\\\\"Grammar: More grammar errors than average.\\\\\\\", \\\\\\\"markup-text\\\\\\\": \\\\\\\"the students data are recorded in the table below . starting mass g ending mass g difference in mass g marble . . . limestone . . . wood . . . plastic . . . after reading the groups procedure , describe what additional information you would need in order to replicate the expe\\\\\\\"}\\\", \\\"success\\\": true, \\\"grader_id\\\": 3233, \\\"grader_type\\\": \\\"ML\\\", \\\"rubric_scores_complete\\\": true, \\\"rubric_xml\\\": \\\"Response Quality0\\\"}\", \"score\": 0}, {\"answer\": \"After 24 hours, remove the samples from the containers and rinse each sample with distilled water.\\r\\nAllow the samples to sit and dry for 30 minutes.\\r\\nDetermine the mass of each sample.\\r\\nThe students\\u2019 data are recorded in the table below.\\r\\n\\r\\nStarting Mass (g)\\tEnding Mass (g)\\tDifference in Mass (g)\\r\\nMarble\\t 9.8\\t 9.4\\t\\u20130.4\\r\\nLimestone\\t10.4\\t 9.1\\t\\u20131.3\\r\\nWood\\t11.2\\t11.2\\t 0.0\\r\\nPlastic\\t 7.2\\t 7.1\\t\\u20130.1\\r\\nAfter reading the\", \"post_assessment\": \"{\\\"submission_id\\\": 3098, \\\"score\\\": 0, \\\"feedback\\\": \\\"{\\\\\\\"spelling\\\\\\\": \\\\\\\"Spelling: Ok.\\\\\\\", \\\\\\\"grammar\\\\\\\": \\\\\\\"Grammar: Ok.\\\\\\\", \\\\\\\"markup-text\\\\\\\": \\\\\\\"after hours , remove the samples from the containers and rinse each sample with distilled water . allow the samples to sit and dry for minutes . determine the mass of each sample . the students data are recorded in the table below . starting mass g ending mass g difference in mass g marble . . . limestone . . . wood . . . plastic . . . after reading the\\\\\\\"}\\\", \\\"success\\\": true, \\\"grader_id\\\": 3235, \\\"grader_type\\\": \\\"ML\\\", \\\"rubric_scores_complete\\\": true, \\\"rubric_xml\\\": \\\"Response Quality0\\\"}\", \"score\": 0}, {\"answer\": \"To replicate the experiment, the procedure would require more detail. One piece of information that is omitted is the amount of vinegar used in the experiment. It is also important to know what temperature the experiment was kept at during the 24 hours. Finally, the procedure needs to include details about the experiment, for example if the whole sample must be submerged.\", \"post_assessment\": \"{\\\"submission_id\\\": 3099, \\\"score\\\": 3, \\\"feedback\\\": \\\"{\\\\\\\"spelling\\\\\\\": \\\\\\\"Spelling: Ok.\\\\\\\", \\\\\\\"grammar\\\\\\\": \\\\\\\"Grammar: Ok.\\\\\\\", \\\\\\\"markup-text\\\\\\\": \\\\\\\"to replicate the experiment , the procedure would require more detail . one piece of information that is omitted is the amount of vinegar used in the experiment . it is also important to know what temperature the experiment was kept at during the hours . finally , the procedure needs to include details about the experiment , for example if the whole sample must be submerged .\\\\\\\"}\\\", \\\"success\\\": true, \\\"grader_id\\\": 3237, \\\"grader_type\\\": \\\"ML\\\", \\\"rubric_scores_complete\\\": true, \\\"rubric_xml\\\": \\\"Response Quality3\\\"}\", \"score\": 3}, {\"answer\": \"e the mass of four different samples.\\r\\nPour vinegar in each of four separate, but identical, containers.\\r\\nPlace a sample of one material into one container and label. Repeat with remaining samples, placing a single sample into a single container.\\r\\nAfter 24 hours, remove the samples from the containers and rinse each sample with distilled water.\\r\\nAllow the samples to sit and dry for 30 minutes.\\r\\nDetermine the mass of each sample.\\r\\nThe students\\u2019 data are recorded in the table below.\\r\\n\", \"post_assessment\": \"{\\\"submission_id\\\": 3100, \\\"score\\\": 0, \\\"feedback\\\": \\\"{\\\\\\\"spelling\\\\\\\": \\\\\\\"Spelling: Ok.\\\\\\\", \\\\\\\"grammar\\\\\\\": \\\\\\\"Grammar: Ok.\\\\\\\", \\\\\\\"markup-text\\\\\\\": \\\\\\\"e the mass of four different samples . pour vinegar in each of four separate , but identical , containers . place a sample of one material into one container and label . repeat with remaining samples , placing a single sample into a single container . after hours , remove the samples from the containers and rinse each sample with distilled water . allow the samples to sit and dry for minutes . determine the mass of each sample . the students data are recorded in the table below . \\\\\\\"}\\\", \\\"success\\\": true, \\\"grader_id\\\": 3239, \\\"grader_type\\\": \\\"ML\\\", \\\"rubric_scores_complete\\\": true, \\\"rubric_xml\\\": \\\"Response Quality0\\\"}\", \"score\": 0}, {\"answer\": \"\", \"post_assessment\": \"{\\\"submission_id\\\": 3101, \\\"score\\\": 0, \\\"feedback\\\": \\\"{\\\\\\\"spelling\\\\\\\": \\\\\\\"Spelling: Ok.\\\\\\\", \\\\\\\"grammar\\\\\\\": \\\\\\\"Grammar: Ok.\\\\\\\", \\\\\\\"markup-text\\\\\\\": \\\\\\\"invalid essay .\\\\\\\"}\\\", \\\"success\\\": true, \\\"grader_id\\\": 3241, \\\"grader_type\\\": \\\"ML\\\", \\\"rubric_scores_complete\\\": true, \\\"rubric_xml\\\": \\\"Response Quality0\\\"}\", \"score\": 0}], \"max_score\": 3, \"child_state\": \"done\"}"], "attempts": "10000", "student_attempts": 0, "due": null, "state": "done", "accept_file_upload": false, "display_name": "Science Question -- Machine Assessed"}""" instance_state = json.loads(instance_state) rubric = """ From f0c9aa3916f448bacf32848d15f1d2d421cbe638 Mon Sep 17 00:00:00 2001 From: Sarina Canelake Date: Mon, 1 Jul 2013 12:27:11 -0400 Subject: [PATCH 21/31] Provide `set_many` methods for Lms and Mongo KeyValueStores Refactor new set_many and update XBlock version number. --- .../xmodule/xmodule/modulestore/mongo/base.py | 8 ++ .../xmodule/modulestore/tests/factories.py | 3 + lms/djangoapps/courseware/model_data.py | 58 ++++++++++++- .../courseware/tests/test_model_data.py | 81 +++++++++++++++++-- requirements/edx/github.txt | 2 +- 5 files changed, 143 insertions(+), 9 deletions(-) diff --git a/common/lib/xmodule/xmodule/modulestore/mongo/base.py b/common/lib/xmodule/xmodule/modulestore/mongo/base.py index ab63243aaf..7fc903623a 100644 --- a/common/lib/xmodule/xmodule/modulestore/mongo/base.py +++ b/common/lib/xmodule/xmodule/modulestore/mongo/base.py @@ -105,6 +105,14 @@ class MongoKeyValueStore(KeyValueStore): else: raise InvalidScopeError(key.scope) + def set_many(self, update_dict): + """set_many method. Implementations should accept an `update_dict` of + key-value pairs, and set all the `keys` to the given `value`s.""" + # It appears that `set` simply updates an in-memory db, rather than calling down + # to a real db; need to figure out if this is the case. + for key, value in update_dict.iteritems(): + self.set(key, value) + def delete(self, key): if key.scope == Scope.children: self._children = [] diff --git a/common/lib/xmodule/xmodule/modulestore/tests/factories.py b/common/lib/xmodule/xmodule/modulestore/tests/factories.py index be705149f3..e442944805 100644 --- a/common/lib/xmodule/xmodule/modulestore/tests/factories.py +++ b/common/lib/xmodule/xmodule/modulestore/tests/factories.py @@ -53,6 +53,9 @@ class XModuleCourseFactory(Factory): for k, v in kwargs.iteritems(): setattr(new_course, k, v) + # Save the data we've just created before we update mongo datastore + new_course.save() + # Update the data in the mongo datastore store.save_xmodule(new_course) return new_course diff --git a/lms/djangoapps/courseware/model_data.py b/lms/djangoapps/courseware/model_data.py index 790f1fd721..dec4805ced 100644 --- a/lms/djangoapps/courseware/model_data.py +++ b/lms/djangoapps/courseware/model_data.py @@ -12,9 +12,14 @@ from .models import ( XModuleStudentPrefsField, XModuleStudentInfoField ) +import logging + +from django.db import DatabaseError from xblock.runtime import KeyValueStore, InvalidScopeError -from xblock.core import Scope +from xblock.core import KeyValueMultiSaveError, Scope + +log = logging.getLogger(__name__) class InvalidWriteError(Exception): @@ -244,7 +249,7 @@ class ModelDataCache(object): module_state_key=key.block_scope_id.url(), defaults={'state': json.dumps({}), 'module_type': key.block_scope_id.category, - }, + }, ) elif key.scope == Scope.content: field_object, _ = XModuleContentField.objects.get_or_create( @@ -345,6 +350,55 @@ class LmsKeyValueStore(KeyValueStore): field_object.save() + def set_many(self, kv_dict): + """ + Provide a bulk save mechanism. + + `kv_dict`: A dictionary of dirty fields that maps + xblock.DbModel._key : value + + """ + saved_fields = [] + # field_objects maps a field_object to a list of associated fields + field_objects = dict() + for field in kv_dict: + # check field for validity + if field.field_name in self._descriptor_model_data: + raise InvalidWriteError("Not allowed to overwrite descriptor model data", field.field_name) + + if field.scope not in self._allowed_scopes: + raise InvalidScopeError(field.scope) + + # if the field is valid + field_object = self._model_data_cache.find_or_create(field) + # if this field_object isn't already in the dictionary + # add it + if field_object not in field_objects.keys(): + field_objects[field_object] = [] + # update the list of associated fields + field_objects[field_object].append(field) + + # special case when scope is for the user state + if field.scope == Scope.user_state: + state = json.loads(field_object.state) + state[field.field_name] = kv_dict[field] + field_object.state = json.dumps(state) + else: + # The remaining scopes save fields on different rows, so + # we don't have to worry about conflicts + field_object.value = json.dumps(kv_dict[field]) + + for field_object in field_objects: + try: + # Save the field object that we made above + field_object.save() + # If save is successful on this scope, add the saved fields to + # the list of successful saves + saved_fields.extend([field.field_name for field in field_objects[field_object]]) + except DatabaseError: + log.error('Error saving fields %r', field_objects[field_object]) + raise KeyValueMultiSaveError(saved_fields) + def delete(self, key): if key.field_name in self._descriptor_model_data: raise InvalidWriteError("Not allowed to deleted descriptor model data", key.field_name) diff --git a/lms/djangoapps/courseware/tests/test_model_data.py b/lms/djangoapps/courseware/tests/test_model_data.py index e961f80939..07aaa66473 100644 --- a/lms/djangoapps/courseware/tests/test_model_data.py +++ b/lms/djangoapps/courseware/tests/test_model_data.py @@ -1,5 +1,5 @@ import json -from mock import Mock +from mock import Mock, patch from functools import partial from courseware.model_data import LmsKeyValueStore, InvalidWriteError @@ -15,6 +15,8 @@ from courseware.tests.factories import StudentPrefsFactory, StudentInfoFactory from xblock.core import Scope, BlockScope from xmodule.modulestore import Location from django.test import TestCase +from django.db import DatabaseError +from xblock.core import KeyValueMultiSaveError def mock_field(scope, name): @@ -93,7 +95,7 @@ class TestStudentModuleStorage(TestCase): def setUp(self): self.desc_md = {} - student_module = StudentModuleFactory(state=json.dumps({'a_field': 'a_value'})) + student_module = StudentModuleFactory(state=json.dumps({'a_field': 'a_value', 'b_field': 'b_value'})) self.user = student_module.student self.mdc = ModelDataCache([mock_descriptor([mock_field(Scope.user_state, 'a_field')])], course_id, self.user) self.kvs = LmsKeyValueStore(self.desc_md, self.mdc) @@ -110,13 +112,13 @@ class TestStudentModuleStorage(TestCase): "Test that setting an existing user_state field changes the value" self.kvs.set(user_state_key('a_field'), 'new_value') self.assertEquals(1, StudentModule.objects.all().count()) - self.assertEquals({'a_field': 'new_value'}, json.loads(StudentModule.objects.all()[0].state)) + self.assertEquals({'b_field': 'b_value', 'a_field': 'new_value'}, json.loads(StudentModule.objects.all()[0].state)) def test_set_missing_field(self): "Test that setting a new user_state field changes the value" self.kvs.set(user_state_key('not_a_field'), 'new_value') self.assertEquals(1, StudentModule.objects.all().count()) - self.assertEquals({'a_field': 'a_value', 'not_a_field': 'new_value'}, json.loads(StudentModule.objects.all()[0].state)) + self.assertEquals({'b_field': 'b_value', 'a_field': 'a_value', 'not_a_field': 'new_value'}, json.loads(StudentModule.objects.all()[0].state)) def test_delete_existing_field(self): "Test that deleting an existing field removes it from the StudentModule" @@ -128,7 +130,7 @@ class TestStudentModuleStorage(TestCase): "Test that deleting a missing field from an existing StudentModule raises a KeyError" self.assertRaises(KeyError, self.kvs.delete, user_state_key('not_a_field')) self.assertEquals(1, StudentModule.objects.all().count()) - self.assertEquals({'a_field': 'a_value'}, json.loads(StudentModule.objects.all()[0].state)) + self.assertEquals({'b_field': 'b_value', 'a_field': 'a_value'}, json.loads(StudentModule.objects.all()[0].state)) def test_has_existing_field(self): "Test that `has` returns True for existing fields in StudentModules" @@ -138,6 +140,35 @@ class TestStudentModuleStorage(TestCase): "Test that `has` returns False for missing fields in StudentModule" self.assertFalse(self.kvs.has(user_state_key('not_a_field'))) + def construct_kv_dict(self): + """ construct a kv_dict that can be passed to set_many """ + key1 = user_state_key('field_a') + key2 = user_state_key('field_b') + new_value = 'new value' + newer_value = 'newer value' + return {key1: new_value, key2: newer_value} + + def test_set_many(self): + """Test setting many fields that are scoped to Scope.user_state """ + kv_dict = self.construct_kv_dict() + self.kvs.set_many(kv_dict) + + for key in kv_dict: + self.assertEquals(self.kvs.get(key), kv_dict[key]) + + def test_set_many_failure(self): + """Test failures when setting many fields that are scoped to Scope.user_state """ + kv_dict = self.construct_kv_dict() + # because we're patching the underlying save, we need to ensure the + # fields are in the cache + for key in kv_dict: + self.kvs.set(key, 'test_value') + + with patch('django.db.models.Model.save', side_effect=DatabaseError): + with self.assertRaises(KeyValueMultiSaveError) as exception_context: + self.kvs.set_many(kv_dict) + self.assertEquals(len(exception_context.exception.saved_field_names), 0) + class TestMissingStudentModule(TestCase): def setUp(self): @@ -176,6 +207,10 @@ class TestMissingStudentModule(TestCase): class StorageTestBase(object): + """ + A base class for that gets subclassed when testing each of the scopes. + + """ factory = None scope = None key_factory = None @@ -188,7 +223,10 @@ class StorageTestBase(object): else: self.user = UserFactory.create() self.desc_md = {} - self.mdc = ModelDataCache([mock_descriptor([mock_field(self.scope, 'existing_field')])], course_id, self.user) + self.mock_descriptor = mock_descriptor([ + mock_field(self.scope, 'existing_field'), + mock_field(self.scope, 'other_existing_field')]) + self.mdc = ModelDataCache([self.mock_descriptor], course_id, self.user) self.kvs = LmsKeyValueStore(self.desc_md, self.mdc) def test_set_and_get_existing_field(self): @@ -234,6 +272,37 @@ class StorageTestBase(object): "Test that `has` return False for an existing Storage Field" self.assertFalse(self.kvs.has(self.key_factory('missing_field'))) + def construct_kv_dict(self): + key1 = self.key_factory('existing_field') + key2 = self.key_factory('other_existing_field') + new_value = 'new value' + newer_value = 'newer value' + return {key1: new_value, key2: newer_value} + + def test_set_many(self): + """Test that setting many regular fields at the same time works""" + kv_dict = self.construct_kv_dict() + + self.kvs.set_many(kv_dict) + for key in kv_dict: + self.assertEquals(self.kvs.get(key), kv_dict[key]) + + def test_set_many_failure(self): + """Test that setting many regular fields with a DB error """ + kv_dict = self.construct_kv_dict() + for key in kv_dict: + self.kvs.set(key, 'test value') + + with patch('django.db.models.Model.save', side_effect=[None, DatabaseError]): + with self.assertRaises(KeyValueMultiSaveError) as exception_context: + self.kvs.set_many(kv_dict) + + exception = exception_context.exception + self.assertEquals(len(exception.saved_field_names), 1) + self.assertEquals(exception.saved_field_names[0], 'existing_field') + + + class TestSettingsStorage(StorageTestBase, TestCase): factory = SettingsFactory diff --git a/requirements/edx/github.txt b/requirements/edx/github.txt index f64568dc10..62a44f1307 100644 --- a/requirements/edx/github.txt +++ b/requirements/edx/github.txt @@ -8,6 +8,6 @@ -e git://github.com/eventbrite/zendesk.git@d53fe0e81b623f084e91776bcf6369f8b7b63879#egg=zendesk # Our libraries: --e git+https://github.com/edx/XBlock.git@4d8735e883#egg=XBlock +-e git+https://github.com/edx/XBlock.git@0b71db6ee6f9b216d0dd85cd76b55f2354b93dff#egg=XBlock -e git+https://github.com/edx/codejail.git@0a1b468#egg=codejail -e git+https://github.com/edx/diff-cover.git@v0.1.3#egg=diff_cover From 2a4976cf8a46046fe411da7c35cb2c7ce01532ef Mon Sep 17 00:00:00 2001 From: Vik Paruchuri Date: Thu, 18 Jul 2013 15:36:42 -0400 Subject: [PATCH 22/31] Cleanup how peer grading handles due date --- .../xmodule/xmodule/peer_grading_module.py | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/common/lib/xmodule/xmodule/peer_grading_module.py b/common/lib/xmodule/xmodule/peer_grading_module.py index 8f5f93318a..ff5d41dd8a 100644 --- a/common/lib/xmodule/xmodule/peer_grading_module.py +++ b/common/lib/xmodule/xmodule/peer_grading_module.py @@ -42,7 +42,7 @@ class PeerGradingFields(object): default=False, scope=Scope.settings ) - due_date = Date( + due = Date( help="Due date that should be displayed.", default=None, scope=Scope.settings) @@ -100,25 +100,25 @@ class PeerGradingModule(PeerGradingFields, XModule): if self.use_for_single_location: try: self.linked_problem = modulestore().get_instance(self.system.course_id, self.link_to_location) - except: + except Exception: log.error("Linked location {0} for peer grading module {1} does not exist".format( self.link_to_location, self.location)) raise - due_date = self.linked_problem._model_data.get('peer_grading_due', None) + due_date = self.linked_problem._model_data.get('due', None) if due_date: self._model_data['due'] = due_date try: - self.timeinfo = TimeInfo(self.due_date, self.grace_period_string) - except: - log.error("Error parsing due date information in location {0}".format(location)) + self.timeinfo = TimeInfo(self.due, self.grace_period_string) + except Exception: + log.error("Error parsing due date information in location {0}".format(self.location)) raise self.display_due_date = self.timeinfo.display_due_date try: self.student_data_for_location = json.loads(self.student_data_for_location) - except: + except Exception: pass self.ajax_url = self.system.ajax_url @@ -532,7 +532,7 @@ class PeerGradingModule(PeerGradingFields, XModule): problem_location = problem['location'] descriptor = _find_corresponding_module_for_location(problem_location) if descriptor: - problem['due'] = descriptor._model_data.get('peer_grading_due', None) + problem['due'] = descriptor._model_data.get('due', None) grace_period_string = descriptor._model_data.get('graceperiod', None) try: problem_timeinfo = TimeInfo(problem['due'], grace_period_string) @@ -622,10 +622,11 @@ class PeerGradingDescriptor(PeerGradingFields, RawDescriptor): metadata_translations = { 'is_graded': 'graded', 'attempts': 'max_attempts', + 'due_data' : 'due' } @property def non_editable_metadata_fields(self): non_editable_fields = super(PeerGradingDescriptor, self).non_editable_metadata_fields - non_editable_fields.extend([PeerGradingFields.due_date, PeerGradingFields.grace_period_string]) + non_editable_fields.extend([PeerGradingFields.due, PeerGradingFields.grace_period_string]) return non_editable_fields From 3f9431e8cff9ba0ad0cf2308e3290334cd8624fa Mon Sep 17 00:00:00 2001 From: Diana Huang Date: Wed, 3 Jul 2013 14:38:22 -0400 Subject: [PATCH 23/31] Provide implicit saves for XBlocks and XModules. Update existing tests and provide new ones to test new paradigm. --- CHANGELOG.rst | 2 + .../contentstore/tests/test_checklists.py | 2 + .../contentstore/tests/test_contentstore.py | 13 ++++ .../tests/test_course_settings.py | 65 ++++++++++++++++ .../contentstore/tests/test_textbooks.py | 6 ++ cms/djangoapps/contentstore/views/course.py | 17 ++++- cms/djangoapps/contentstore/views/item.py | 3 + cms/djangoapps/contentstore/views/preview.py | 9 ++- cms/djangoapps/contentstore/views/tabs.py | 3 + .../models/settings/course_details.py | 4 + .../models/settings/course_grading.py | 45 +++++++---- .../models/settings/course_metadata.py | 7 ++ common/djangoapps/xmodule_modifiers.py | 15 ++++ .../xmodule/xmodule/modulestore/mongo/base.py | 13 +++- .../xmodule/modulestore/tests/django_utils.py | 21 +++-- .../xmodule/modulestore/tests/factories.py | 4 - common/lib/xmodule/xmodule/modulestore/xml.py | 4 + .../xmodule/tests/test_combined_open_ended.py | 6 +- .../xmodule/xmodule/tests/test_conditional.py | 7 +- lms/djangoapps/courseware/model_data.py | 37 +++------ lms/djangoapps/courseware/module_render.py | 76 ++++++++++++------- .../courseware/tests/test_model_data.py | 30 ++++++-- .../courseware/tests/test_module_render.py | 61 ++++++++++++++- lms/djangoapps/courseware/views.py | 2 + requirements/edx/github.txt | 2 +- 25 files changed, 349 insertions(+), 105 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 04c8a5baae..13015d6ce6 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -9,6 +9,8 @@ Common: Added *experimental* support for jsinput type. Common: Added setting to specify Celery Broker vhost +Common: Utilize new XBlock bulk save API in LMS and CMS. + Studio: Add table for tracking course creator permissions (not yet used). Update rake django-admin[syncdb] and rake django-admin[migrate] so they run for both LMS and CMS. diff --git a/cms/djangoapps/contentstore/tests/test_checklists.py b/cms/djangoapps/contentstore/tests/test_checklists.py index 99ffb8678d..02999f6567 100644 --- a/cms/djangoapps/contentstore/tests/test_checklists.py +++ b/cms/djangoapps/contentstore/tests/test_checklists.py @@ -46,6 +46,8 @@ class ChecklistTestCase(CourseTestCase): # Now delete the checklists from the course and verify they get repopulated (for courses # created before checklists were introduced). self.course.checklists = None + # Save the changed `checklists` to the underlying KeyValueStore before updating the modulestore + self.course.save() modulestore = get_modulestore(self.course.location) modulestore.update_metadata(self.course.location, own_metadata(self.course)) self.assertEqual(self.get_persisted_checklists(), None) diff --git a/cms/djangoapps/contentstore/tests/test_contentstore.py b/cms/djangoapps/contentstore/tests/test_contentstore.py index 500db414f4..839175f04d 100644 --- a/cms/djangoapps/contentstore/tests/test_contentstore.py +++ b/cms/djangoapps/contentstore/tests/test_contentstore.py @@ -87,6 +87,8 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase): self.user.is_active = True # Staff has access to view all courses self.user.is_staff = True + + # Save the data that we've just changed to the db. self.user.save() self.client = Client() @@ -117,6 +119,10 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase): course.advanced_modules = component_types + # Save the data that we've just changed to the underlying + # MongoKeyValueStore before we update the mongo datastore. + course.save() + store.update_metadata(course.location, own_metadata(course)) # just pick one vertical @@ -239,6 +245,9 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase): self.assertNotIn('graceperiod', own_metadata(html_module)) html_module.lms.graceperiod = new_graceperiod + # Save the data that we've just changed to the underlying + # MongoKeyValueStore before we update the mongo datastore. + html_module.save() self.assertIn('graceperiod', own_metadata(html_module)) self.assertEqual(html_module.lms.graceperiod, new_graceperiod) @@ -883,6 +892,9 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase): # add a bool piece of unknown metadata so we can verify we don't throw an exception metadata['new_metadata'] = True + # Save the data that we've just changed to the underlying + # MongoKeyValueStore before we update the mongo datastore. + course.save() module_store.update_metadata(location, metadata) print 'Exporting to tempdir = {0}'.format(root_dir) @@ -1299,6 +1311,7 @@ class ContentStoreTest(ModuleStoreTestCase): # now let's define an override at the leaf node level # new_module.lms.graceperiod = timedelta(1) + new_module.save() module_store.update_metadata(new_module.location, own_metadata(new_module)) # flush the cache and refetch diff --git a/cms/djangoapps/contentstore/tests/test_course_settings.py b/cms/djangoapps/contentstore/tests/test_course_settings.py index 44eb16436d..0862eb462d 100644 --- a/cms/djangoapps/contentstore/tests/test_course_settings.py +++ b/cms/djangoapps/contentstore/tests/test_course_settings.py @@ -290,6 +290,71 @@ class CourseGradingTest(CourseTestCase): altered_grader = CourseGradingModel.update_grader_from_json(test_grader.course_location, test_grader.graders[1]) self.assertDictEqual(test_grader.graders[1], altered_grader, "drop_count[1] + 2") + def test_update_cutoffs_from_json(self): + test_grader = CourseGradingModel.fetch(self.course.location) + CourseGradingModel.update_cutoffs_from_json(test_grader.course_location, test_grader.grade_cutoffs) + # Unlike other tests, need to actually perform a db fetch for this test since update_cutoffs_from_json + # simply returns the cutoffs you send into it, rather than returning the db contents. + altered_grader = CourseGradingModel.fetch(self.course.location) + self.assertDictEqual(test_grader.grade_cutoffs, altered_grader.grade_cutoffs, "Noop update") + + test_grader.grade_cutoffs['D'] = 0.3 + CourseGradingModel.update_cutoffs_from_json(test_grader.course_location, test_grader.grade_cutoffs) + altered_grader = CourseGradingModel.fetch(self.course.location) + self.assertDictEqual(test_grader.grade_cutoffs, altered_grader.grade_cutoffs, "cutoff add D") + + test_grader.grade_cutoffs['Pass'] = 0.75 + CourseGradingModel.update_cutoffs_from_json(test_grader.course_location, test_grader.grade_cutoffs) + altered_grader = CourseGradingModel.fetch(self.course.location) + self.assertDictEqual(test_grader.grade_cutoffs, altered_grader.grade_cutoffs, "cutoff change 'Pass'") + + def test_delete_grace_period(self): + test_grader = CourseGradingModel.fetch(self.course.location) + CourseGradingModel.update_grace_period_from_json(test_grader.course_location, test_grader.grace_period) + # update_grace_period_from_json doesn't return anything, so query the db for its contents. + altered_grader = CourseGradingModel.fetch(self.course.location) + self.assertEqual(test_grader.grace_period, altered_grader.grace_period, "Noop update") + + test_grader.grace_period = {'hours': 15, 'minutes': 5, 'seconds': 30} + CourseGradingModel.update_grace_period_from_json(test_grader.course_location, test_grader.grace_period) + altered_grader = CourseGradingModel.fetch(self.course.location) + self.assertDictEqual(test_grader.grace_period, altered_grader.grace_period, "Adding in a grace period") + + test_grader.grace_period = {'hours': 1, 'minutes': 10, 'seconds': 0} + # Now delete the grace period + CourseGradingModel.delete_grace_period(test_grader.course_location) + # update_grace_period_from_json doesn't return anything, so query the db for its contents. + altered_grader = CourseGradingModel.fetch(self.course.location) + # Once deleted, the grace period should simply be None + self.assertEqual(None, altered_grader.grace_period, "Delete grace period") + + def test_update_section_grader_type(self): + # Get the descriptor and the section_grader_type and assert they are the default values + descriptor = get_modulestore(self.course.location).get_item(self.course.location) + section_grader_type = CourseGradingModel.get_section_grader_type(self.course.location) + + self.assertEqual('Not Graded', section_grader_type['graderType']) + self.assertEqual(None, descriptor.lms.format) + self.assertEqual(False, descriptor.lms.graded) + + # Change the default grader type to Homework, which should also mark the section as graded + CourseGradingModel.update_section_grader_type(self.course.location, {'graderType': 'Homework'}) + descriptor = get_modulestore(self.course.location).get_item(self.course.location) + section_grader_type = CourseGradingModel.get_section_grader_type(self.course.location) + + self.assertEqual('Homework', section_grader_type['graderType']) + self.assertEqual('Homework', descriptor.lms.format) + self.assertEqual(True, descriptor.lms.graded) + + # Change the grader type back to Not Graded, which should also unmark the section as graded + CourseGradingModel.update_section_grader_type(self.course.location, {'graderType': 'Not Graded'}) + descriptor = get_modulestore(self.course.location).get_item(self.course.location) + section_grader_type = CourseGradingModel.get_section_grader_type(self.course.location) + + self.assertEqual('Not Graded', section_grader_type['graderType']) + self.assertEqual(None, descriptor.lms.format) + self.assertEqual(False, descriptor.lms.graded) + class CourseMetadataEditingTest(CourseTestCase): """ diff --git a/cms/djangoapps/contentstore/tests/test_textbooks.py b/cms/djangoapps/contentstore/tests/test_textbooks.py index 02c64e9413..a21a1b1023 100644 --- a/cms/djangoapps/contentstore/tests/test_textbooks.py +++ b/cms/djangoapps/contentstore/tests/test_textbooks.py @@ -62,6 +62,9 @@ class TextbookIndexTestCase(CourseTestCase): } ] self.course.pdf_textbooks = content + # Save the data that we've just changed to the underlying + # MongoKeyValueStore before we update the mongo datastore. + self.course.save() store = get_modulestore(self.course.location) store.update_metadata(self.course.location, own_metadata(self.course)) @@ -220,6 +223,9 @@ class TextbookByIdTestCase(CourseTestCase): 'tid': 2, }) self.course.pdf_textbooks = [self.textbook1, self.textbook2] + # Save the data that we've just changed to the underlying + # MongoKeyValueStore before we update the mongo datastore. + self.course.save() self.store = get_modulestore(self.course.location) self.store.update_metadata(self.course.location, own_metadata(self.course)) self.url_nonexist = reverse('textbook_by_id', kwargs={ diff --git a/cms/djangoapps/contentstore/views/course.py b/cms/djangoapps/contentstore/views/course.py index 0e16624c42..3791e6779a 100644 --- a/cms/djangoapps/contentstore/views/course.py +++ b/cms/djangoapps/contentstore/views/course.py @@ -1,10 +1,9 @@ """ Views related to operations on course objects """ -#pylint: disable=W0402 import json import random -import string +import string # pylint: disable=W0402 from django.contrib.auth.decorators import login_required from django_future.csrf import ensure_csrf_cookie @@ -496,6 +495,9 @@ def textbook_index(request, org, course, name): if not any(tab['type'] == 'pdf_textbooks' for tab in course_module.tabs): course_module.tabs.append({"type": "pdf_textbooks"}) course_module.pdf_textbooks = textbooks + # Save the data that we've just changed to the underlying + # MongoKeyValueStore before we update the mongo datastore. + course_module.save() store.update_metadata(course_module.location, own_metadata(course_module)) return JsonResponse(course_module.pdf_textbooks) else: @@ -542,6 +544,9 @@ def create_textbook(request, org, course, name): tabs = course_module.tabs tabs.append({"type": "pdf_textbooks"}) course_module.tabs = tabs + # Save the data that we've just changed to the underlying + # MongoKeyValueStore before we update the mongo datastore. + course_module.save() store.update_metadata(course_module.location, own_metadata(course_module)) resp = JsonResponse(textbook, status=201) resp["Location"] = reverse("textbook_by_id", kwargs={ @@ -585,10 +590,13 @@ def textbook_by_id(request, org, course, name, tid): i = course_module.pdf_textbooks.index(textbook) new_textbooks = course_module.pdf_textbooks[0:i] new_textbooks.append(new_textbook) - new_textbooks.extend(course_module.pdf_textbooks[i+1:]) + new_textbooks.extend(course_module.pdf_textbooks[i + 1:]) course_module.pdf_textbooks = new_textbooks else: course_module.pdf_textbooks.append(new_textbook) + # Save the data that we've just changed to the underlying + # MongoKeyValueStore before we update the mongo datastore. + course_module.save() store.update_metadata(course_module.location, own_metadata(course_module)) return JsonResponse(new_textbook, status=201) elif request.method == 'DELETE': @@ -596,7 +604,8 @@ def textbook_by_id(request, org, course, name, tid): return JsonResponse(status=404) i = course_module.pdf_textbooks.index(textbook) new_textbooks = course_module.pdf_textbooks[0:i] - new_textbooks.extend(course_module.pdf_textbooks[i+1:]) + new_textbooks.extend(course_module.pdf_textbooks[i + 1:]) course_module.pdf_textbooks = new_textbooks + course_module.save() store.update_metadata(course_module.location, own_metadata(course_module)) return JsonResponse() diff --git a/cms/djangoapps/contentstore/views/item.py b/cms/djangoapps/contentstore/views/item.py index 90dae10c23..9a5e40e967 100644 --- a/cms/djangoapps/contentstore/views/item.py +++ b/cms/djangoapps/contentstore/views/item.py @@ -70,6 +70,9 @@ def save_item(request): delattr(existing_item, metadata_key) else: setattr(existing_item, metadata_key, value) + # Save the data that we've just changed to the underlying + # MongoKeyValueStore before we update the mongo datastore. + existing_item.save() # commit to datastore store.update_metadata(item_location, own_metadata(existing_item)) diff --git a/cms/djangoapps/contentstore/views/preview.py b/cms/djangoapps/contentstore/views/preview.py index ba393e72f4..35af3e9ac3 100644 --- a/cms/djangoapps/contentstore/views/preview.py +++ b/cms/djangoapps/contentstore/views/preview.py @@ -7,7 +7,7 @@ from django.core.urlresolvers import reverse from django.contrib.auth.decorators import login_required from mitxmako.shortcuts import render_to_response -from xmodule_modifiers import replace_static_urls, wrap_xmodule +from xmodule_modifiers import replace_static_urls, wrap_xmodule, save_module # pylint: disable=F0401 from xmodule.error_module import ErrorDescriptor from xmodule.errortracker import exc_info_to_str from xmodule.exceptions import NotFoundError, ProcessingError @@ -47,6 +47,8 @@ def preview_dispatch(request, preview_id, location, dispatch=None): # Let the module handle the AJAX try: ajax_return = instance.handle_ajax(dispatch, request.POST) + # Save any module data that has changed to the underlying KeyValueStore + instance.save() except NotFoundError: log.exception("Module indicating to user that request doesn't exist") @@ -166,6 +168,11 @@ def load_preview_module(request, preview_id, descriptor): course_namespace=Location([module.location.tag, module.location.org, module.location.course, None, None]) ) + module.get_html = save_module( + module.get_html, + module + ) + return module diff --git a/cms/djangoapps/contentstore/views/tabs.py b/cms/djangoapps/contentstore/views/tabs.py index 154f9fb55d..d55932e33d 100644 --- a/cms/djangoapps/contentstore/views/tabs.py +++ b/cms/djangoapps/contentstore/views/tabs.py @@ -76,6 +76,9 @@ def reorder_static_tabs(request): # OK, re-assemble the static tabs in the new order course.tabs = reordered_tabs + # Save the data that we've just changed to the underlying + # MongoKeyValueStore before we update the mongo datastore. + course.save() modulestore('direct').update_metadata(course.location, own_metadata(course)) return HttpResponse() diff --git a/cms/djangoapps/models/settings/course_details.py b/cms/djangoapps/models/settings/course_details.py index 8ce8c2db34..7c3b883283 100644 --- a/cms/djangoapps/models/settings/course_details.py +++ b/cms/djangoapps/models/settings/course_details.py @@ -122,6 +122,10 @@ class CourseDetails(object): descriptor.enrollment_end = converted if dirty: + # Save the data that we've just changed to the underlying + # MongoKeyValueStore before we update the mongo datastore. + descriptor.save() + get_modulestore(course_location).update_metadata(course_location, own_metadata(descriptor)) # NOTE: below auto writes to the db w/o verifying that any of the fields actually changed diff --git a/cms/djangoapps/models/settings/course_grading.py b/cms/djangoapps/models/settings/course_grading.py index e529a284c6..0746fc7a90 100644 --- a/cms/djangoapps/models/settings/course_grading.py +++ b/cms/djangoapps/models/settings/course_grading.py @@ -7,6 +7,9 @@ class CourseGradingModel(object): """ Basically a DAO and Model combo for CRUD operations pertaining to grading policy. """ + # Within this class, allow access to protected members of client classes. + # This comes up when accessing kvs data and caches during kvs saves and modulestore writes. + # pylint: disable=W0212 def __init__(self, course_descriptor): self.course_location = course_descriptor.location self.graders = [CourseGradingModel.jsonize_grader(i, grader) for i, grader in enumerate(course_descriptor.raw_grader)] # weights transformed to ints [0..100] @@ -83,13 +86,16 @@ class CourseGradingModel(object): """ course_location = Location(jsondict['course_location']) descriptor = get_modulestore(course_location).get_item(course_location) - graders_parsed = [CourseGradingModel.parse_grader(jsonele) for jsonele in jsondict['graders']] descriptor.raw_grader = graders_parsed descriptor.grade_cutoffs = jsondict['grade_cutoffs'] + # Save the data that we've just changed to the underlying + # MongoKeyValueStore before we update the mongo datastore. + descriptor.save() get_modulestore(course_location).update_item(course_location, descriptor.xblock_kvs._data) + CourseGradingModel.update_grace_period_from_json(course_location, jsondict['grace_period']) return CourseGradingModel.fetch(course_location) @@ -116,6 +122,9 @@ class CourseGradingModel(object): else: descriptor.raw_grader.append(grader) + # Save the data that we've just changed to the underlying + # MongoKeyValueStore before we update the mongo datastore. + descriptor.save() get_modulestore(course_location).update_item(course_location, descriptor._model_data._kvs._data) return CourseGradingModel.jsonize_grader(index, descriptor.raw_grader[index]) @@ -131,6 +140,10 @@ class CourseGradingModel(object): descriptor = get_modulestore(course_location).get_item(course_location) descriptor.grade_cutoffs = cutoffs + + # Save the data that we've just changed to the underlying + # MongoKeyValueStore before we update the mongo datastore. + descriptor.save() get_modulestore(course_location).update_item(course_location, descriptor._model_data._kvs._data) return cutoffs @@ -156,6 +169,10 @@ class CourseGradingModel(object): descriptor = get_modulestore(course_location).get_item(course_location) descriptor.lms.graceperiod = grace_timedelta + + # Save the data that we've just changed to the underlying + # MongoKeyValueStore before we update the mongo datastore. + descriptor.save() get_modulestore(course_location).update_metadata(course_location, descriptor._model_data._kvs._metadata) @staticmethod @@ -172,23 +189,12 @@ class CourseGradingModel(object): del descriptor.raw_grader[index] # force propagation to definition descriptor.raw_grader = descriptor.raw_grader + + # Save the data that we've just changed to the underlying + # MongoKeyValueStore before we update the mongo datastore. + descriptor.save() get_modulestore(course_location).update_item(course_location, descriptor._model_data._kvs._data) - # NOTE cannot delete cutoffs. May be useful to reset - @staticmethod - def delete_cutoffs(course_location, cutoffs): - """ - Resets the cutoffs to the defaults - """ - if not isinstance(course_location, Location): - course_location = Location(course_location) - - descriptor = get_modulestore(course_location).get_item(course_location) - descriptor.grade_cutoffs = descriptor.defaut_grading_policy['GRADE_CUTOFFS'] - get_modulestore(course_location).update_item(course_location, descriptor._model_data._kvs._data) - - return descriptor.grade_cutoffs - @staticmethod def delete_grace_period(course_location): """ @@ -199,6 +205,10 @@ class CourseGradingModel(object): descriptor = get_modulestore(course_location).get_item(course_location) del descriptor.lms.graceperiod + + # Save the data that we've just changed to the underlying + # MongoKeyValueStore before we update the mongo datastore. + descriptor.save() get_modulestore(course_location).update_metadata(course_location, descriptor._model_data._kvs._metadata) @staticmethod @@ -225,6 +235,9 @@ class CourseGradingModel(object): del descriptor.lms.format del descriptor.lms.graded + # Save the data that we've just changed to the underlying + # MongoKeyValueStore before we update the mongo datastore. + descriptor.save() get_modulestore(location).update_metadata(location, descriptor._model_data._kvs._metadata) @staticmethod diff --git a/cms/djangoapps/models/settings/course_metadata.py b/cms/djangoapps/models/settings/course_metadata.py index 5fb07fe806..8d9a292867 100644 --- a/cms/djangoapps/models/settings/course_metadata.py +++ b/cms/djangoapps/models/settings/course_metadata.py @@ -76,6 +76,9 @@ class CourseMetadata(object): setattr(descriptor.lms, key, value) if dirty: + # Save the data that we've just changed to the underlying + # MongoKeyValueStore before we update the mongo datastore. + descriptor.save() get_modulestore(course_location).update_metadata(course_location, own_metadata(descriptor)) @@ -97,6 +100,10 @@ class CourseMetadata(object): elif hasattr(descriptor.lms, key): delattr(descriptor.lms, key) + # Save the data that we've just changed to the underlying + # MongoKeyValueStore before we update the mongo datastore. + descriptor.save() + get_modulestore(course_location).update_metadata(course_location, own_metadata(descriptor)) diff --git a/common/djangoapps/xmodule_modifiers.py b/common/djangoapps/xmodule_modifiers.py index 3914892bbf..3efc04789e 100644 --- a/common/djangoapps/xmodule_modifiers.py +++ b/common/djangoapps/xmodule_modifiers.py @@ -89,6 +89,21 @@ def grade_histogram(module_id): return grades +def save_module(get_html, module): + """ + Updates the given get_html function for the given module to save the fields + after rendering. + """ + @wraps(get_html) + def _get_html(): + """Cache the rendered output, save, then return the output.""" + rendered_html = get_html() + module.save() + return rendered_html + + return _get_html + + def add_histogram(get_html, module, user): """ Updates the supplied module with a new get_html function that wraps diff --git a/common/lib/xmodule/xmodule/modulestore/mongo/base.py b/common/lib/xmodule/xmodule/modulestore/mongo/base.py index 7fc903623a..ae879ba3e8 100644 --- a/common/lib/xmodule/xmodule/modulestore/mongo/base.py +++ b/common/lib/xmodule/xmodule/modulestore/mongo/base.py @@ -108,8 +108,9 @@ class MongoKeyValueStore(KeyValueStore): def set_many(self, update_dict): """set_many method. Implementations should accept an `update_dict` of key-value pairs, and set all the `keys` to the given `value`s.""" - # It appears that `set` simply updates an in-memory db, rather than calling down - # to a real db; need to figure out if this is the case. + # `set` simply updates an in-memory db, rather than calling down to a real db, + # as mongo bulk save is handled elsewhere. A future improvement would be to pull + # the mongo-specific bulk save logic into this method. for key, value in update_dict.iteritems(): self.set(key, value) @@ -647,6 +648,8 @@ class MongoModuleStore(ModuleStoreBase): :param xmodule: """ + # Save any changes to the xmodule to the MongoKeyValueStore + xmodule.save() # split mongo's persist_dag is more general and useful. self.collection.save({ '_id': xmodule.location.dict(), @@ -691,6 +694,8 @@ class MongoModuleStore(ModuleStoreBase): 'url_slug': new_object.location.name }) course.tabs = existing_tabs + # Save any changes to the course to the MongoKeyValueStore + course.save() self.update_metadata(course.location, course.xblock_kvs._metadata) def fire_updated_modulestore_signal(self, course_id, location): @@ -797,6 +802,8 @@ class MongoModuleStore(ModuleStoreBase): tab['name'] = metadata.get('display_name') break course.tabs = existing_tabs + # Save the updates to the course to the MongoKeyValueStore + course.save() self.update_metadata(course.location, own_metadata(course)) self._update_single_item(location, {'metadata': metadata}) @@ -819,6 +826,8 @@ class MongoModuleStore(ModuleStoreBase): course = self.get_course_for_item(item.location) existing_tabs = course.tabs or [] course.tabs = [tab for tab in existing_tabs if tab.get('url_slug') != location.name] + # Save the updates to the course to the MongoKeyValueStore + course.save() self.update_metadata(course.location, own_metadata(course)) # Must include this to avoid the django debug toolbar (which defines the deprecated "safe=False") diff --git a/common/lib/xmodule/xmodule/modulestore/tests/django_utils.py b/common/lib/xmodule/xmodule/modulestore/tests/django_utils.py index 564aac141d..4f998d57fb 100644 --- a/common/lib/xmodule/xmodule/modulestore/tests/django_utils.py +++ b/common/lib/xmodule/xmodule/modulestore/tests/django_utils.py @@ -165,34 +165,31 @@ class ModuleStoreTestCase(TestCase): # Call superclass implementation super(ModuleStoreTestCase, self)._post_teardown() + def assert2XX(self, status_code, msg=None): """ Assert that the given value is a success status (between 200 and 299) """ - if not 200 <= status_code < 300: - msg = self._formatMessage(msg, "%s is not a success status" % safe_repr(status_code)) - raise self.failureExecption(msg) + msg = self._formatMessage(msg, "%s is not a success status" % safe_repr(status_code)) + self.assertTrue(status_code >= 200 and status_code < 300, msg=msg) def assert3XX(self, status_code, msg=None): """ Assert that the given value is a redirection status (between 300 and 399) """ - if not 300 <= status_code < 400: - msg = self._formatMessage(msg, "%s is not a redirection status" % safe_repr(status_code)) - raise self.failureExecption(msg) + msg = self._formatMessage(msg, "%s is not a redirection status" % safe_repr(status_code)) + self.assertTrue(status_code >= 300 and status_code < 400, msg=msg) def assert4XX(self, status_code, msg=None): """ Assert that the given value is a client error status (between 400 and 499) """ - if not 400 <= status_code < 500: - msg = self._formatMessage(msg, "%s is not a client error status" % safe_repr(status_code)) - raise self.failureExecption(msg) + msg = self._formatMessage(msg, "%s is not a client error status" % safe_repr(status_code)) + self.assertTrue(status_code >= 400 and status_code < 500, msg=msg) def assert5XX(self, status_code, msg=None): """ Assert that the given value is a server error status (between 500 and 599) """ - if not 500 <= status_code < 600: - msg = self._formatMessage(msg, "%s is not a server error status" % safe_repr(status_code)) - raise self.failureExecption(msg) + msg = self._formatMessage(msg, "%s is not a server error status" % safe_repr(status_code)) + self.assertTrue(status_code >= 500 and status_code < 600, msg=msg) diff --git a/common/lib/xmodule/xmodule/modulestore/tests/factories.py b/common/lib/xmodule/xmodule/modulestore/tests/factories.py index e442944805..8c3b5d59dd 100644 --- a/common/lib/xmodule/xmodule/modulestore/tests/factories.py +++ b/common/lib/xmodule/xmodule/modulestore/tests/factories.py @@ -53,9 +53,6 @@ class XModuleCourseFactory(Factory): for k, v in kwargs.iteritems(): setattr(new_course, k, v) - # Save the data we've just created before we update mongo datastore - new_course.save() - # Update the data in the mongo datastore store.save_xmodule(new_course) return new_course @@ -138,7 +135,6 @@ class XModuleItemFactory(Factory): # replace the display name with an optional parameter passed in from the caller if display_name is not None: metadata['display_name'] = display_name - # note that location comes from above lazy_attribute store.create_and_save_xmodule(location, metadata=metadata, definition_data=data) if location.category not in DETACHED_CATEGORIES: diff --git a/common/lib/xmodule/xmodule/modulestore/xml.py b/common/lib/xmodule/xmodule/modulestore/xml.py index 012740ff9a..8bc3142c77 100644 --- a/common/lib/xmodule/xmodule/modulestore/xml.py +++ b/common/lib/xmodule/xmodule/modulestore/xml.py @@ -194,6 +194,10 @@ class ImportSystem(XMLParsingSystem, MakoDescriptorSystem): if hasattr(descriptor, 'children'): for child in descriptor.get_children(): parent_tracker.add_parent(child.location, descriptor.location) + + # After setting up the descriptor, save any changes that we have + # made to attributes on the descriptor to the underlying KeyValueStore. + descriptor.save() return descriptor render_template = lambda: '' diff --git a/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py b/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py index e1f8d135de..bed9cd86ba 100644 --- a/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py +++ b/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py @@ -504,11 +504,13 @@ class OpenEndedModuleXmlTest(unittest.TestCase, DummyModulestore): See if we can load the module and save an answer @return: """ - #Load the module + # Load the module module = self.get_module_from_location(self.problem_location, COURSE) - #Try saving an answer + # Try saving an answer module.handle_ajax("save_answer", {"student_answer": self.answer}) + # Save our modifications to the underlying KeyValueStore so they can be persisted + module.save() task_one_json = json.loads(module.task_states[0]) self.assertEqual(task_one_json['child_history'][0]['answer'], self.answer) diff --git a/common/lib/xmodule/xmodule/tests/test_conditional.py b/common/lib/xmodule/xmodule/tests/test_conditional.py index abb8b4941a..b28d236369 100644 --- a/common/lib/xmodule/xmodule/tests/test_conditional.py +++ b/common/lib/xmodule/xmodule/tests/test_conditional.py @@ -217,8 +217,11 @@ class ConditionalModuleXmlTest(unittest.TestCase): html = ajax['html'] self.assertFalse(any(['This is a secret' in item for item in html])) - # now change state of the capa problem to make it completed - inner_get_module(Location('i4x://HarvardX/ER22x/problem/choiceprob')).attempts = 1 + # Now change state of the capa problem to make it completed + inner_module = inner_get_module(Location('i4x://HarvardX/ER22x/problem/choiceprob')) + inner_module.attempts = 1 + # Save our modifications to the underlying KeyValueStore so they can be persisted + inner_module.save() ajax = json.loads(module.handle_ajax('', '')) print "post-attempt ajax: ", ajax diff --git a/lms/djangoapps/courseware/model_data.py b/lms/djangoapps/courseware/model_data.py index dec4805ced..44be16e441 100644 --- a/lms/djangoapps/courseware/model_data.py +++ b/lms/djangoapps/courseware/model_data.py @@ -247,9 +247,10 @@ class ModelDataCache(object): course_id=self.course_id, student=self.user, module_state_key=key.block_scope_id.url(), - defaults={'state': json.dumps({}), - 'module_type': key.block_scope_id.category, - }, + defaults={ + 'state': json.dumps({}), + 'module_type': key.block_scope_id.category, + }, ) elif key.scope == Scope.content: field_object, _ = XModuleContentField.objects.get_or_create( @@ -333,22 +334,10 @@ class LmsKeyValueStore(KeyValueStore): return json.loads(field_object.value) def set(self, key, value): - if key.field_name in self._descriptor_model_data: - raise InvalidWriteError("Not allowed to overwrite descriptor model data", key.field_name) - - field_object = self._model_data_cache.find_or_create(key) - - if key.scope not in self._allowed_scopes: - raise InvalidScopeError(key.scope) - - if key.scope == Scope.user_state: - state = json.loads(field_object.state) - state[key.field_name] = value - field_object.state = json.dumps(state) - else: - field_object.value = json.dumps(value) - - field_object.save() + """ + Set a single value in the KeyValueStore + """ + self.set_many({key: value}) def set_many(self, kv_dict): """ @@ -362,23 +351,21 @@ class LmsKeyValueStore(KeyValueStore): # field_objects maps a field_object to a list of associated fields field_objects = dict() for field in kv_dict: - # check field for validity + # Check field for validity if field.field_name in self._descriptor_model_data: raise InvalidWriteError("Not allowed to overwrite descriptor model data", field.field_name) if field.scope not in self._allowed_scopes: raise InvalidScopeError(field.scope) - # if the field is valid + # If the field is valid and isn't already in the dictionary, add it. field_object = self._model_data_cache.find_or_create(field) - # if this field_object isn't already in the dictionary - # add it if field_object not in field_objects.keys(): field_objects[field_object] = [] - # update the list of associated fields + # Update the list of associated fields field_objects[field_object].append(field) - # special case when scope is for the user state + # Special case when scope is for the user state, because this scope saves fields in a single row if field.scope == Scope.user_state: state = json.loads(field_object.state) state[field.field_name] = kv_dict[field] diff --git a/lms/djangoapps/courseware/module_render.py b/lms/djangoapps/courseware/module_render.py index c343701a94..de709f7652 100644 --- a/lms/djangoapps/courseware/module_render.py +++ b/lms/djangoapps/courseware/module_render.py @@ -27,7 +27,7 @@ from xmodule.modulestore import Location from xmodule.modulestore.django import modulestore from xmodule.modulestore.exceptions import ItemNotFoundError from xmodule.x_module import ModuleSystem -from xmodule_modifiers import replace_course_urls, replace_static_urls, add_histogram, wrap_xmodule +from xmodule_modifiers import replace_course_urls, replace_static_urls, add_histogram, wrap_xmodule, save_module # pylint: disable=F0401 import static_replace from psychometrics.psychoanalyze import make_psychometrics_data_update_handler @@ -36,6 +36,8 @@ from student.models import unique_id_for_user from courseware.access import has_access from courseware.masquerade import setup_masquerade from courseware.model_data import LmsKeyValueStore, LmsUsage, ModelDataCache +from xblock.runtime import KeyValueStore +from xblock.core import Scope from courseware.models import StudentModule from util.sandboxing import can_execute_unsafe_code from util.json_request import JsonResponse @@ -226,7 +228,7 @@ def get_module_for_descriptor_internal(user, descriptor, model_data_cache, cours userid=str(user.id), mod_id=descriptor.location.url(), dispatch=dispatch), - ) + ) return xqueue_callback_url_prefix + relative_xqueue_callback_url # Default queuename is course-specific and is derived from the course that @@ -234,11 +236,12 @@ def get_module_for_descriptor_internal(user, descriptor, model_data_cache, cours # TODO: Queuename should be derived from 'course_settings.json' of each course xqueue_default_queuename = descriptor.location.org + '-' + descriptor.location.course - xqueue = {'interface': xqueue_interface, - 'construct_callback': make_xqueue_callback, - 'default_queuename': xqueue_default_queuename.replace(' ', '_'), - 'waittime': settings.XQUEUE_WAITTIME_BETWEEN_REQUESTS - } + xqueue = { + 'interface': xqueue_interface, + 'construct_callback': make_xqueue_callback, + 'default_queuename': xqueue_default_queuename.replace(' ', '_'), + 'waittime': settings.XQUEUE_WAITTIME_BETWEEN_REQUESTS + } # This is a hacky way to pass settings to the combined open ended xmodule # It needs an S3 interface to upload images to S3 @@ -286,18 +289,24 @@ def get_module_for_descriptor_internal(user, descriptor, model_data_cache, cours ) def publish(event): + """A function that allows XModules to publish events. This only supports grade changes right now.""" if event.get('event_name') != 'grade': return - student_module, created = StudentModule.objects.get_or_create( - course_id=course_id, - student=user, - module_type=descriptor.location.category, - module_state_key=descriptor.location.url(), - defaults={'state': '{}'}, + usage = LmsUsage(descriptor.location, descriptor.location) + # Construct the key for the module + key = KeyValueStore.Key( + scope=Scope.user_state, + student_id=user.id, + block_scope_id=usage.id, + field_name='grade' ) + + student_module = model_data_cache.find_or_create(key) + # Update the grades student_module.grade = event.get('value') student_module.max_grade = event.get('max_value') + # Save all changes to the underlying KeyValueStore student_module.save() # Bin score into range and increment stats @@ -388,9 +397,31 @@ def get_module_for_descriptor_internal(user, descriptor, model_data_cache, cours if has_access(user, module, 'staff', course_id): module.get_html = add_histogram(module.get_html, module, user) + # force the module to save after rendering + module.get_html = save_module(module.get_html, module) return module +def find_target_student_module(request, user_id, course_id, mod_id): + """ + Retrieve target StudentModule + """ + user = User.objects.get(id=user_id) + model_data_cache = ModelDataCache.cache_for_descriptor_descendents( + course_id, + user, + modulestore().get_instance(course_id, mod_id), + depth=0, + select_for_update=True + ) + instance = get_module(user, request, mod_id, model_data_cache, course_id, grade_bucket_type='xqueue') + if instance is None: + msg = "No module {0} for user {1}--access denied?".format(mod_id, user) + log.debug(msg) + raise Http404 + return instance + + @csrf_exempt def xqueue_callback(request, course_id, userid, mod_id, dispatch): ''' @@ -409,20 +440,7 @@ def xqueue_callback(request, course_id, userid, mod_id, dispatch): if not isinstance(header, dict) or 'lms_key' not in header: raise Http404 - # Retrieve target StudentModule - user = User.objects.get(id=userid) - model_data_cache = ModelDataCache.cache_for_descriptor_descendents( - course_id, - user, - modulestore().get_instance(course_id, mod_id), - depth=0, - select_for_update=True - ) - instance = get_module(user, request, mod_id, model_data_cache, course_id, grade_bucket_type='xqueue') - if instance is None: - msg = "No module {0} for user {1}--access denied?".format(mod_id, user) - log.debug(msg) - raise Http404 + instance = find_target_student_module(request, userid, course_id, mod_id) # Transfer 'queuekey' from xqueue response header to the data. # This is required to use the interface defined by 'handle_ajax' @@ -433,6 +451,8 @@ def xqueue_callback(request, course_id, userid, mod_id, dispatch): try: # Can ignore the return value--not used for xqueue_callback instance.handle_ajax(dispatch, data) + # Save any state that has changed to the underlying KeyValueStore + instance.save() except: log.exception("error processing ajax call") raise @@ -504,6 +524,8 @@ def modx_dispatch(request, dispatch, location, course_id): # Let the module handle the AJAX try: ajax_return = instance.handle_ajax(dispatch, data) + # Save any fields that have changed to the underlying KeyValueStore + instance.save() # If we can't find the module, respond with a 404 except NotFoundError: diff --git a/lms/djangoapps/courseware/tests/test_model_data.py b/lms/djangoapps/courseware/tests/test_model_data.py index 07aaa66473..0368bb040b 100644 --- a/lms/djangoapps/courseware/tests/test_model_data.py +++ b/lms/djangoapps/courseware/tests/test_model_data.py @@ -1,3 +1,6 @@ +""" +Test for lms courseware app, module data (runtime data storage for XBlocks) +""" import json from mock import Mock, patch from functools import partial @@ -68,12 +71,17 @@ class TestDescriptorFallback(TestCase): self.assertRaises(InvalidWriteError, self.kvs.set, settings_key('field_b'), 'foo') self.assertEquals('settings', self.desc_md['field_b']) + self.assertRaises(InvalidWriteError, self.kvs.set_many, {content_key('field_a'): 'foo'}) + self.assertEquals('content', self.desc_md['field_a']) + self.assertRaises(InvalidWriteError, self.kvs.delete, content_key('field_a')) self.assertEquals('content', self.desc_md['field_a']) self.assertRaises(InvalidWriteError, self.kvs.delete, settings_key('field_b')) self.assertEquals('settings', self.desc_md['field_b']) + + class TestInvalidScopes(TestCase): def setUp(self): self.desc_md = {} @@ -85,10 +93,13 @@ class TestInvalidScopes(TestCase): for scope in (Scope(user=True, block=BlockScope.DEFINITION), Scope(user=False, block=BlockScope.TYPE), Scope(user=False, block=BlockScope.ALL)): - self.assertRaises(InvalidScopeError, self.kvs.get, LmsKeyValueStore.Key(scope, None, None, 'field')) - self.assertRaises(InvalidScopeError, self.kvs.set, LmsKeyValueStore.Key(scope, None, None, 'field'), 'value') - self.assertRaises(InvalidScopeError, self.kvs.delete, LmsKeyValueStore.Key(scope, None, None, 'field')) - self.assertRaises(InvalidScopeError, self.kvs.has, LmsKeyValueStore.Key(scope, None, None, 'field')) + key = LmsKeyValueStore.Key(scope, None, None, 'field') + + self.assertRaises(InvalidScopeError, self.kvs.get, key) + self.assertRaises(InvalidScopeError, self.kvs.set, key, 'value') + self.assertRaises(InvalidScopeError, self.kvs.delete, key) + self.assertRaises(InvalidScopeError, self.kvs.has, key) + self.assertRaises(InvalidScopeError, self.kvs.set_many, {key: 'value'}) class TestStudentModuleStorage(TestCase): @@ -141,7 +152,7 @@ class TestStudentModuleStorage(TestCase): self.assertFalse(self.kvs.has(user_state_key('not_a_field'))) def construct_kv_dict(self): - """ construct a kv_dict that can be passed to set_many """ + """Construct a kv_dict that can be passed to set_many""" key1 = user_state_key('field_a') key2 = user_state_key('field_b') new_value = 'new value' @@ -149,7 +160,7 @@ class TestStudentModuleStorage(TestCase): return {key1: new_value, key2: newer_value} def test_set_many(self): - """Test setting many fields that are scoped to Scope.user_state """ + "Test setting many fields that are scoped to Scope.user_state" kv_dict = self.construct_kv_dict() self.kvs.set_many(kv_dict) @@ -157,7 +168,7 @@ class TestStudentModuleStorage(TestCase): self.assertEquals(self.kvs.get(key), kv_dict[key]) def test_set_many_failure(self): - """Test failures when setting many fields that are scoped to Scope.user_state """ + "Test failures when setting many fields that are scoped to Scope.user_state" kv_dict = self.construct_kv_dict() # because we're patching the underlying save, we need to ensure the # fields are in the cache @@ -211,6 +222,10 @@ class StorageTestBase(object): A base class for that gets subclassed when testing each of the scopes. """ + # Disable pylint warnings that arise because of the way the child classes call + # this base class -- pylint's static analysis can't keep up with it. + # pylint: disable=E1101, E1102 + factory = None scope = None key_factory = None @@ -273,6 +288,7 @@ class StorageTestBase(object): self.assertFalse(self.kvs.has(self.key_factory('missing_field'))) def construct_kv_dict(self): + """Construct a kv_dict that can be passed to set_many""" key1 = self.key_factory('existing_field') key2 = self.key_factory('other_existing_field') new_value = 'new value' diff --git a/lms/djangoapps/courseware/tests/test_module_render.py b/lms/djangoapps/courseware/tests/test_module_render.py index a9060b57e9..732758be2c 100644 --- a/lms/djangoapps/courseware/tests/test_module_render.py +++ b/lms/djangoapps/courseware/tests/test_module_render.py @@ -1,4 +1,7 @@ -from mock import MagicMock +""" +Test for lms courseware app, module render unit +""" +from mock import MagicMock, patch import json from django.http import Http404, HttpResponse @@ -28,6 +31,20 @@ class ModuleRenderTestCase(LoginEnrollmentTestCase): self.location = ['i4x', 'edX', 'toy', 'chapter', 'Overview'] self.course_id = 'edX/toy/2012_Fall' self.toy_course = modulestore().get_course(self.course_id) + self.mock_user = UserFactory() + self.mock_user.id = 1 + self.request_factory = RequestFactory() + + # Construct a mock module for the modulestore to return + self.mock_module = MagicMock() + self.mock_module.id = 1 + self.dispatch = 'score_update' + + # Construct a 'standard' xqueue_callback url + self.callback_url = reverse('xqueue_callback', kwargs=dict(course_id=self.course_id, + userid=str(self.mock_user.id), + mod_id=self.mock_module.id, + dispatch=self.dispatch)) def test_get_module(self): self.assertIsNone(render.get_module('dummyuser', None, @@ -56,7 +73,7 @@ class ModuleRenderTestCase(LoginEnrollmentTestCase): mock_request_3 = MagicMock() mock_request_3.POST.copy.return_value = {'position': 1} mock_request_3.FILES = False - mock_request_3.user = UserFactory() + mock_request_3.user = self.mock_user inputfile_2 = Stub() inputfile_2.size = 1 inputfile_2.name = 'name' @@ -87,6 +104,46 @@ class ModuleRenderTestCase(LoginEnrollmentTestCase): self.course_id ) + def test_xqueue_callback_success(self): + """ + Test for happy-path xqueue_callback + """ + fake_key = 'fake key' + xqueue_header = json.dumps({'lms_key': fake_key}) + data = { + 'xqueue_header': xqueue_header, + 'xqueue_body': 'hello world', + } + + # Patch getmodule to return our mock module + with patch('courseware.module_render.find_target_student_module') as get_fake_module: + get_fake_module.return_value = self.mock_module + # call xqueue_callback with our mocked information + request = self.request_factory.post(self.callback_url, data) + render.xqueue_callback(request, self.course_id, self.mock_user.id, self.mock_module.id, self.dispatch) + + # Verify that handle ajax is called with the correct data + request.POST['queuekey'] = fake_key + self.mock_module.handle_ajax.assert_called_once_with(self.dispatch, request.POST) + + def test_xqueue_callback_missing_header_info(self): + data = { + 'xqueue_header': '{}', + 'xqueue_body': 'hello world', + } + + with patch('courseware.module_render.find_target_student_module') as get_fake_module: + get_fake_module.return_value = self.mock_module + # Test with missing xqueue data + with self.assertRaises(Http404): + request = self.request_factory.post(self.callback_url, {}) + render.xqueue_callback(request, self.course_id, self.mock_user.id, self.mock_module.id, self.dispatch) + + # Test with missing xqueue_header + with self.assertRaises(Http404): + request = self.request_factory.post(self.callback_url, data) + render.xqueue_callback(request, self.course_id, self.mock_user.id, self.mock_module.id, self.dispatch) + def test_get_score_bucket(self): self.assertEquals(render.get_score_bucket(0, 10), 'incorrect') self.assertEquals(render.get_score_bucket(1, 10), 'partial') diff --git a/lms/djangoapps/courseware/views.py b/lms/djangoapps/courseware/views.py index 9c5a665754..e6e062c494 100644 --- a/lms/djangoapps/courseware/views.py +++ b/lms/djangoapps/courseware/views.py @@ -167,6 +167,8 @@ def save_child_position(seq_module, child_name): # Only save if position changed if position != seq_module.position: seq_module.position = position + # Save this new position to the underlying KeyValueStore + seq_module.save() def check_for_active_timelimit_module(request, course_id, course): diff --git a/requirements/edx/github.txt b/requirements/edx/github.txt index 62a44f1307..5d1a505ace 100644 --- a/requirements/edx/github.txt +++ b/requirements/edx/github.txt @@ -8,6 +8,6 @@ -e git://github.com/eventbrite/zendesk.git@d53fe0e81b623f084e91776bcf6369f8b7b63879#egg=zendesk # Our libraries: --e git+https://github.com/edx/XBlock.git@0b71db6ee6f9b216d0dd85cd76b55f2354b93dff#egg=XBlock +-e git+https://github.com/edx/XBlock.git@3974e999fe853a37dfa6fadf0611289434349409#egg=XBlock -e git+https://github.com/edx/codejail.git@0a1b468#egg=codejail -e git+https://github.com/edx/diff-cover.git@v0.1.3#egg=diff_cover From 025a95885ad345fb3775e58a7e5e360d6a92f8c0 Mon Sep 17 00:00:00 2001 From: Don Mitchell Date: Wed, 17 Jul 2013 15:02:53 -0400 Subject: [PATCH 24/31] Generate TypeError if from or to_json fail. --- common/lib/xmodule/xmodule/fields.py | 5 +++-- common/lib/xmodule/xmodule/tests/test_fields.py | 5 ++++- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/common/lib/xmodule/xmodule/fields.py b/common/lib/xmodule/xmodule/fields.py index a9b4be4fcd..465993a51f 100644 --- a/common/lib/xmodule/xmodule/fields.py +++ b/common/lib/xmodule/xmodule/fields.py @@ -58,8 +58,7 @@ class Date(ModelType): else: msg = "Field {0} has bad value '{1}'".format( self._name, field) - log.warning(msg) - return None + raise TypeError(msg) def to_json(self, value): """ @@ -76,6 +75,8 @@ class Date(ModelType): return value.strftime('%Y-%m-%dT%H:%M:%SZ') else: return value.isoformat() + else: + raise TypeError("Cannot convert {} to json".format(value)) TIMEDELTA_REGEX = re.compile(r'^((?P\d+?) day(?:s?))?(\s)?((?P\d+?) hour(?:s?))?(\s)?((?P\d+?) minute(?:s)?)?(\s)?((?P\d+?) second(?:s)?)?$') diff --git a/common/lib/xmodule/xmodule/tests/test_fields.py b/common/lib/xmodule/xmodule/tests/test_fields.py index f0eb082dcf..8453adaa20 100644 --- a/common/lib/xmodule/xmodule/tests/test_fields.py +++ b/common/lib/xmodule/xmodule/tests/test_fields.py @@ -44,7 +44,8 @@ class DateTest(unittest.TestCase): def test_return_None(self): self.assertIsNone(DateTest.date.from_json("")) self.assertIsNone(DateTest.date.from_json(None)) - self.assertIsNone(DateTest.date.from_json(['unknown value'])) + with self.assertRaises(TypeError): + DateTest.date.from_json(['unknown value']) def test_old_due_date_format(self): current = datetime.datetime.today() @@ -83,6 +84,8 @@ class DateTest(unittest.TestCase): DateTest.date.to_json( DateTest.date.from_json("2012-12-31T23:00:01-01:00")), "2012-12-31T23:00:01-01:00") + with self.assertRaises(TypeError): + DateTest.date.to_json('2012-12-31T23:00:01-01:00') class TimedeltaTest(unittest.TestCase): From badf86f71b7e4f65f07cdbcf43b6b4ebcf0464b2 Mon Sep 17 00:00:00 2001 From: Don Mitchell Date: Wed, 17 Jul 2013 15:05:19 -0400 Subject: [PATCH 25/31] Have save_item parse json formatted field values by type including allowing javascript to serialize date/time natively. --- cms/djangoapps/contentstore/views/item.py | 37 +++++++++++++++++++++-- cms/static/js/base.js | 8 ++--- 2 files changed, 36 insertions(+), 9 deletions(-) diff --git a/cms/djangoapps/contentstore/views/item.py b/cms/djangoapps/contentstore/views/item.py index 9a5e40e967..efebded9b9 100644 --- a/cms/djangoapps/contentstore/views/item.py +++ b/cms/djangoapps/contentstore/views/item.py @@ -59,17 +59,21 @@ def save_item(request): # 'apply' the submitted metadata, so we don't end up deleting system metadata existing_item = modulestore().get_item(item_location) for metadata_key in request.POST.get('nullout', []): - setattr(existing_item, metadata_key, None) + # [dhm] see comment on _get_xblock_field + _get_xblock_field(existing_item, metadata_key).write_to(existing_item, None) # update existing metadata with submitted metadata (which can be partial) # IMPORTANT NOTE: if the client passed 'null' (None) for a piece of metadata that means 'remove it'. If # the intent is to make it None, use the nullout field for metadata_key, value in request.POST.get('metadata', {}).items(): + # [dhm] see comment on _get_xblock_field + field = _get_xblock_field(existing_item, metadata_key) if value is None: - delattr(existing_item, metadata_key) + field.delete_from(existing_item) else: - setattr(existing_item, metadata_key, value) + value = field.from_json(value) + field.write_to(existing_item, value) # Save the data that we've just changed to the underlying # MongoKeyValueStore before we update the mongo datastore. existing_item.save() @@ -79,6 +83,33 @@ def save_item(request): return HttpResponse() +# [DHM] A hack until we implement a permanent soln. Proposed perm solution is to make namespace fields also top level +# fields in xblocks rather than requiring dereference through namespace but we'll need to consider whether there are +# plausible use cases for distinct fields w/ same name in different namespaces on the same blocks. +# The idea is that consumers of the xblock, and particularly the web client, shouldn't know about our internal +# representation (namespaces as means of decorating all modules). +# Given top-level access, the calls can simply be setattr(existing_item, field, value) ... +# Really, this method should be elsewhere (e.g., xblock). We also need methods for has_value (v is_default)... +def _get_xblock_field(xblock, field_name): + """ + A temporary function to get the xblock field either from the xblock or one of its namespaces by name. + :param xblock: + :param field_name: + """ + def find_field(fields): + for field in fields: + if field.name == field_name: + return field + + found = find_field(xblock.fields) + if found: + return found + for namespace in xblock.namespaces: + found = find_field(getattr(xblock, namespace).fields) + if found: + return found + + @login_required @expect_json def create_item(request): diff --git a/cms/static/js/base.js b/cms/static/js/base.js index 3d8cd7684e..329624ef46 100644 --- a/cms/static/js/base.js +++ b/cms/static/js/base.js @@ -253,17 +253,13 @@ function syncReleaseDate(e) { } function getEdxTimeFromDateTimeVals(date_val, time_val) { - var edxTimeStr = null; - if (date_val != '') { if (time_val == '') time_val = '00:00'; - // Note, we are using date.js utility which has better parsing abilities than the built in JS date parsing - var date = Date.parse(date_val + " " + time_val); - edxTimeStr = date.toString('yyyy-MM-ddTHH:mm'); + return new Date(date_val + " " + time_val + "Z"); } - return edxTimeStr; + else return null; } function getEdxTimeFromDateTimeInputs(date_id, time_id) { From 0aa9c6c1aefe90e8fa11f857cb37cf7ff862764d Mon Sep 17 00:00:00 2001 From: Don Mitchell Date: Wed, 17 Jul 2013 15:38:44 -0400 Subject: [PATCH 26/31] json field value conversion test --- .../contentstore/tests/test_item.py | 37 +++++++++++++++++-- 1 file changed, 34 insertions(+), 3 deletions(-) diff --git a/cms/djangoapps/contentstore/tests/test_item.py b/cms/djangoapps/contentstore/tests/test_item.py index cd203e6af7..578b82b3cf 100644 --- a/cms/djangoapps/contentstore/tests/test_item.py +++ b/cms/djangoapps/contentstore/tests/test_item.py @@ -4,6 +4,8 @@ from django.core.urlresolvers import reverse from xmodule.capa_module import CapaDescriptor import json from xmodule.modulestore.django import modulestore +import datetime +from pytz import UTC class DeleteItem(CourseTestCase): @@ -151,16 +153,16 @@ class TestEditItem(CourseTestCase): reverse('create_item'), json.dumps( {'parent_location': chap_location, - 'category': 'vertical' + 'category': 'sequential' }), content_type="application/json" ) - vert_location = self.response_id(resp) + self.seq_location = self.response_id(resp) # create problem w/ boilerplate template_id = 'multiplechoice.yaml' resp = self.client.post( reverse('create_item'), - json.dumps({'parent_location': vert_location, + json.dumps({'parent_location': self.seq_location, 'category': 'problem', 'boilerplate': template_id }), @@ -210,3 +212,32 @@ class TestEditItem(CourseTestCase): ) problem = modulestore('draft').get_item(self.problems[0]) self.assertIsNone(problem.markdown) + + def test_date_fields(self): + """ + Test setting due & start dates on sequential + """ + sequential = modulestore().get_item(self.seq_location) + self.assertIsNone(sequential.lms.due) + self.client.post( + reverse('save_item'), + json.dumps({ + 'id': self.seq_location, + 'metadata': {'due': '2010-11-22T04:00Z'} + }), + content_type="application/json" + ) + sequential = modulestore().get_item(self.seq_location) + self.assertEqual(sequential.lms.due, datetime.datetime(2010, 11, 22, 4, 0, tzinfo=UTC)) + self.client.post( + reverse('save_item'), + json.dumps({ + 'id': self.seq_location, + 'metadata': {'start': '2010-09-12T14:00Z'} + }), + content_type="application/json" + ) + sequential = modulestore().get_item(self.seq_location) + self.assertEqual(sequential.lms.due, datetime.datetime(2010, 11, 22, 4, 0, tzinfo=UTC)) + self.assertEqual(sequential.lms.start, datetime.datetime(2010, 9, 12, 14, 0, tzinfo=UTC)) + From a478fa9ff752bd5296aa1fd359442dd9c0791937 Mon Sep 17 00:00:00 2001 From: Vik Paruchuri Date: Thu, 18 Jul 2013 16:56:38 -0400 Subject: [PATCH 27/31] Change error class --- common/lib/xmodule/xmodule/peer_grading_module.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/common/lib/xmodule/xmodule/peer_grading_module.py b/common/lib/xmodule/xmodule/peer_grading_module.py index ff5d41dd8a..09cac9a6b4 100644 --- a/common/lib/xmodule/xmodule/peer_grading_module.py +++ b/common/lib/xmodule/xmodule/peer_grading_module.py @@ -9,6 +9,7 @@ from .capa_module import ComplexEncoder from .x_module import XModule from xmodule.raw_module import RawDescriptor from xmodule.modulestore.django import modulestore +from xmodule.modulestore.exceptions import ItemNotFoundError from .timeinfo import TimeInfo from xblock.core import Dict, String, Scope, Boolean, Integer, Float from xmodule.fields import Date @@ -100,7 +101,7 @@ class PeerGradingModule(PeerGradingFields, XModule): if self.use_for_single_location: try: self.linked_problem = modulestore().get_instance(self.system.course_id, self.link_to_location) - except Exception: + except ItemNotFoundError: log.error("Linked location {0} for peer grading module {1} does not exist".format( self.link_to_location, self.location)) raise From 89c48f4a3047ca6cf0985b92e004fb584dd22a30 Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Thu, 18 Jul 2013 08:49:05 -0400 Subject: [PATCH 28/31] Use CodeJail with FSIZE support, and default to 50k --- lms/envs/common.py | 2 ++ requirements/edx/github.txt | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/lms/envs/common.py b/lms/envs/common.py index 8b2a1f28cf..0859dbc3f2 100644 --- a/lms/envs/common.py +++ b/lms/envs/common.py @@ -308,6 +308,8 @@ CODE_JAIL = { 'limits': { # How many CPU seconds can jailed code use? 'CPU': 1, + # How large a file can jailed code write? + 'FSIZE': 50000, }, } diff --git a/requirements/edx/github.txt b/requirements/edx/github.txt index 5d1a505ace..64231bc6b7 100644 --- a/requirements/edx/github.txt +++ b/requirements/edx/github.txt @@ -9,5 +9,5 @@ # Our libraries: -e git+https://github.com/edx/XBlock.git@3974e999fe853a37dfa6fadf0611289434349409#egg=XBlock --e git+https://github.com/edx/codejail.git@0a1b468#egg=codejail +-e git+https://github.com/edx/codejail.git@c08967fb44d1bcdb259d3ec58812e3ac592539c2#egg=codejail -e git+https://github.com/edx/diff-cover.git@v0.1.3#egg=diff_cover From 95302b02a8269781c0311ba5ab615bc73c1f5789 Mon Sep 17 00:00:00 2001 From: Chris Dodge Date: Thu, 18 Jul 2013 21:12:49 -0400 Subject: [PATCH 29/31] I18N work seems to have a very prominent space in the landing page --- cms/templates/howitworks.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cms/templates/howitworks.html b/cms/templates/howitworks.html index a791f5d1fa..8de260a69b 100644 --- a/cms/templates/howitworks.html +++ b/cms/templates/howitworks.html @@ -11,7 +11,7 @@
## "edX Studio" should not be translated -

${_('Welcome to')}

+

${_('Welcome to')}

${_("Studio helps manage your courses online, so you can focus on teaching them")}

From 67f6d6c4d1b9bddf7117f42971dfbddad7862246 Mon Sep 17 00:00:00 2001 From: Alexander Kryklia Date: Fri, 19 Jul 2013 12:05:17 +0300 Subject: [PATCH 30/31] Fixes code_jail imports and draganddrop yaml --- .../templates/problem/drag_and_drop.yaml | 32 +++++++++---------- requirements/edx/local.txt | 1 + 2 files changed, 17 insertions(+), 16 deletions(-) diff --git a/common/lib/xmodule/xmodule/templates/problem/drag_and_drop.yaml b/common/lib/xmodule/xmodule/templates/problem/drag_and_drop.yaml index 4b77c9a249..678b75716b 100644 --- a/common/lib/xmodule/xmodule/templates/problem/drag_and_drop.yaml +++ b/common/lib/xmodule/xmodule/templates/problem/drag_and_drop.yaml @@ -20,22 +20,22 @@ data: | - correct_answer = { - '1': [[70, 150], 121], - '6': [[190, 150], 121], - '8': [[190, 150], 121], - '2': [[310, 150], 121], - '9': [[310, 150], 121], - '11': [[310, 150], 121], - '4': [[420, 150], 121], - '7': [[420, 150], 121], - '3': [[550, 150], 121], - '5': [[550, 150], 121], - '10': [[550, 150], 121]} - if draganddrop.grade(submission[0], correct_answer): - correct = ['correct'] - else: - correct = ['incorrect'] + correct_answer = { + '1': [[70, 150], 121], + '6': [[190, 150], 121], + '8': [[190, 150], 121], + '2': [[310, 150], 121], + '9': [[310, 150], 121], + '11': [[310, 150], 121], + '4': [[420, 150], 121], + '7': [[420, 150], 121], + '3': [[550, 150], 121], + '5': [[550, 150], 121], + '10': [[550, 150], 121]} + if draganddrop.grade(submission[0], correct_answer): + correct = ['correct'] + else: + correct = ['incorrect'] diff --git a/requirements/edx/local.txt b/requirements/edx/local.txt index f5ba60e21b..04a1f7f2c6 100644 --- a/requirements/edx/local.txt +++ b/requirements/edx/local.txt @@ -2,6 +2,7 @@ -e common/lib/calc -e common/lib/capa -e common/lib/chem +-e common/lib/sandbox-packages -e common/lib/symmath -e common/lib/xmodule -e . From eee1d6f593fc798bdbd97b65c2dc02853bfc1771 Mon Sep 17 00:00:00 2001 From: Don Mitchell Date: Fri, 19 Jul 2013 09:15:29 -0400 Subject: [PATCH 31/31] Bug report: remove tag https://edx-wiki.atlassian.net/browse/STUD-428 --- .../lib/xmodule/xmodule/templates/problem/problem_with_hint.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/common/lib/xmodule/xmodule/templates/problem/problem_with_hint.yaml b/common/lib/xmodule/xmodule/templates/problem/problem_with_hint.yaml index 0d93cd3c5e..aa1000a93a 100644 --- a/common/lib/xmodule/xmodule/templates/problem/problem_with_hint.yaml +++ b/common/lib/xmodule/xmodule/templates/problem/problem_with_hint.yaml @@ -48,7 +48,6 @@ metadata: \edXabox{type="custom" cfn='test_str' expect='python' hintfn='hint_fn'} markdown: !!null data: | -