From 7df9ab9c149ba1d70f29ea800f7a851fc39be42f Mon Sep 17 00:00:00 2001 From: Diana Huang Date: Fri, 17 Apr 2015 13:09:34 -0400 Subject: [PATCH 1/7] Extend the Score namedtuple to know about module locations. --- common/lib/xmodule/xmodule/graders.py | 24 +++++++---- .../lib/xmodule/xmodule/tests/test_graders.py | 42 +++++++++---------- lms/djangoapps/courseware/grades.py | 16 +++++-- 3 files changed, 49 insertions(+), 33 deletions(-) diff --git a/common/lib/xmodule/xmodule/graders.py b/common/lib/xmodule/xmodule/graders.py index b5e0e1ba9f..8d73738b28 100644 --- a/common/lib/xmodule/xmodule/graders.py +++ b/common/lib/xmodule/xmodule/graders.py @@ -10,7 +10,7 @@ log = logging.getLogger("edx.courseware") # This is a tuple for holding scores, either from problems or sections. # Section either indicates the name of the problem or the name of the section -Score = namedtuple("Score", "earned possible graded section") +Score = namedtuple("Score", "earned possible graded section module_id") def aggregate_scores(scores, section_name="summary"): @@ -27,15 +27,21 @@ def aggregate_scores(scores, section_name="summary"): total_possible = sum(score.possible for score in scores) #regardless of whether or not it is graded - all_total = Score(total_correct, - total_possible, - False, - section_name) + all_total = Score( + total_correct, + total_possible, + False, + section_name, + None + ) #selecting only graded things - graded_total = Score(total_correct_graded, - total_possible_graded, - True, - section_name) + graded_total = Score( + total_correct_graded, + total_possible_graded, + True, + section_name, + None + ) return all_total, graded_total diff --git a/common/lib/xmodule/xmodule/tests/test_graders.py b/common/lib/xmodule/xmodule/tests/test_graders.py index 1a9ba50dc4..cc56173aa5 100644 --- a/common/lib/xmodule/xmodule/tests/test_graders.py +++ b/common/lib/xmodule/xmodule/tests/test_graders.py @@ -13,23 +13,23 @@ class GradesheetTest(unittest.TestCase): Score.__sub__ = lambda me, other: (me.earned - other.earned) + (me.possible - other.possible) all_total, graded_total = aggregate_scores(scores) - self.assertEqual(all_total, Score(earned=0, possible=0, graded=False, section="summary")) - self.assertEqual(graded_total, Score(earned=0, possible=0, graded=True, section="summary")) + self.assertEqual(all_total, Score(earned=0, possible=0, graded=False, section="summary", module_id=None)) + self.assertEqual(graded_total, Score(earned=0, possible=0, graded=True, section="summary", module_id=None)) - scores.append(Score(earned=0, possible=5, graded=False, section="summary")) + scores.append(Score(earned=0, possible=5, graded=False, section="summary", module_id=None)) all_total, graded_total = aggregate_scores(scores) - self.assertEqual(all_total, Score(earned=0, possible=5, graded=False, section="summary")) - self.assertEqual(graded_total, Score(earned=0, possible=0, graded=True, section="summary")) + self.assertEqual(all_total, Score(earned=0, possible=5, graded=False, section="summary", module_id=None)) + self.assertEqual(graded_total, Score(earned=0, possible=0, graded=True, section="summary", module_id=None)) - scores.append(Score(earned=3, possible=5, graded=True, section="summary")) + scores.append(Score(earned=3, possible=5, graded=True, section="summary", module_id=None)) all_total, graded_total = aggregate_scores(scores) - self.assertAlmostEqual(all_total, Score(earned=3, possible=10, graded=False, section="summary")) - self.assertAlmostEqual(graded_total, Score(earned=3, possible=5, graded=True, section="summary")) + self.assertAlmostEqual(all_total, Score(earned=3, possible=10, graded=False, section="summary", module_id=None)) + self.assertAlmostEqual(graded_total, Score(earned=3, possible=5, graded=True, section="summary", module_id=None)) - scores.append(Score(earned=2, possible=5, graded=True, section="summary")) + scores.append(Score(earned=2, possible=5, graded=True, section="summary", module_id=None)) all_total, graded_total = aggregate_scores(scores) - self.assertAlmostEqual(all_total, Score(earned=5, possible=15, graded=False, section="summary")) - self.assertAlmostEqual(graded_total, Score(earned=5, possible=10, graded=True, section="summary")) + self.assertAlmostEqual(all_total, Score(earned=5, possible=15, graded=False, section="summary", module_id=None)) + self.assertAlmostEqual(graded_total, Score(earned=5, possible=10, graded=True, section="summary", module_id=None)) class GraderTest(unittest.TestCase): @@ -45,19 +45,19 @@ class GraderTest(unittest.TestCase): } test_gradesheet = { - 'Homework': [Score(earned=2, possible=20.0, graded=True, section='hw1'), - Score(earned=16, possible=16.0, graded=True, section='hw2')], + 'Homework': [Score(earned=2, possible=20.0, graded=True, section='hw1', module_id=None), + Score(earned=16, possible=16.0, graded=True, section='hw2', module_id=None)], # The dropped scores should be from the assignments that don't exist yet - 'Lab': [Score(earned=1, possible=2.0, graded=True, section='lab1'), # Dropped - Score(earned=1, possible=1.0, graded=True, section='lab2'), - Score(earned=1, possible=1.0, graded=True, section='lab3'), - Score(earned=5, possible=25.0, graded=True, section='lab4'), # Dropped - Score(earned=3, possible=4.0, graded=True, section='lab5'), # Dropped - Score(earned=6, possible=7.0, graded=True, section='lab6'), - Score(earned=5, possible=6.0, graded=True, section='lab7')], + 'Lab': [Score(earned=1, possible=2.0, graded=True, section='lab1', module_id=None), # Dropped + Score(earned=1, possible=1.0, graded=True, section='lab2', module_id=None), + Score(earned=1, possible=1.0, graded=True, section='lab3', module_id=None), + Score(earned=5, possible=25.0, graded=True, section='lab4', module_id=None), # Dropped + Score(earned=3, possible=4.0, graded=True, section='lab5', module_id=None), # Dropped + Score(earned=6, possible=7.0, graded=True, section='lab6', module_id=None), + Score(earned=5, possible=6.0, graded=True, section='lab7', module_id=None)], - 'Midterm': [Score(earned=50.5, possible=100, graded=True, section="Midterm Exam"), ], + 'Midterm': [Score(earned=50.5, possible=100, graded=True, section="Midterm Exam", module_id=None), ], } def test_single_section_grader(self): diff --git a/lms/djangoapps/courseware/grades.py b/lms/djangoapps/courseware/grades.py index a1444be4c8..79caf5b765 100644 --- a/lms/djangoapps/courseware/grades.py +++ b/lms/djangoapps/courseware/grades.py @@ -228,13 +228,15 @@ def _grade(student, request, course, keep_raw_scores): #We simply cannot grade a problem that is 12/0, because we might need it as a percentage graded = False - scores.append(Score(correct, total, graded, module_descriptor.display_name_with_default)) + scores.append( + Score(correct, total, graded, module_descriptor.display_name_with_default, module_descriptor.location) + ) _, graded_total = graders.aggregate_scores(scores, section_name) if keep_raw_scores: raw_scores += scores else: - graded_total = Score(0.0, 1.0, True, section_name) + graded_total = Score(0.0, 1.0, True, section_name, None) #Add the graded total to totaled_scores if graded_total.possible > 0: @@ -364,7 +366,15 @@ def _progress_summary(student, request, course): if correct is None and total is None: continue - scores.append(Score(correct, total, graded, module_descriptor.display_name_with_default)) + scores.append( + Score( + correct, + total, + graded, + module_descriptor.display_name_with_default, + module_descriptor.location + ) + ) scores.reverse() section_total, _ = graders.aggregate_scores( From 9c32b1e878f804b5eae86d28841f728aa082fba1 Mon Sep 17 00:00:00 2001 From: Diana Huang Date: Tue, 21 Apr 2015 14:57:56 -0400 Subject: [PATCH 2/7] Refactor course_structure_api to have separate API Layer. --- lms/djangoapps/course_structure_api/v0/api.py | 94 +++++++++++++++++++ .../course_structure_api/v0/errors.py | 9 ++ .../course_structure_api/v0/views.py | 59 +++++++----- 3 files changed, 140 insertions(+), 22 deletions(-) create mode 100644 lms/djangoapps/course_structure_api/v0/api.py create mode 100644 lms/djangoapps/course_structure_api/v0/errors.py diff --git a/lms/djangoapps/course_structure_api/v0/api.py b/lms/djangoapps/course_structure_api/v0/api.py new file mode 100644 index 0000000000..5a0523f6b9 --- /dev/null +++ b/lms/djangoapps/course_structure_api/v0/api.py @@ -0,0 +1,94 @@ +""" +API implementation of the Course Structure API for Python code. + +Note: The course list and course detail functionality isn't currently supported here because of the tricky interactions between DRF and the code. +Most of that information is available by accessing the course objects directly. +""" + +from course_structure_api.v0 import serializers +from course_structure_api.v0.errors import CourseNotFoundError, CourseStructureNotAvailableError +from openedx.core.djangoapps.content.course_structures import models, tasks +from courseware import courses + + +def _retrieve_course(course_key): + """Retrieves the course for the given course key. + + Args: + course_key: The CourseKey for the course we'd like to retrieve. + Returns: + the course that matches the CourseKey + Raises: + CourseNotFoundError + + """ + try: + course = courses.get_course(course_key) + return course + except ValueError: + raise CourseNotFoundError + + +def course_structure(course_key): + """ + Retrieves the entire course structure, including information about all the blocks used in the course. + + Args: + course_key: the CourseKey of the course we'd like to retrieve. + Returns: + The serialized output of the course structure: + * root: The ID of the root node of the course structure. + + * blocks: A dictionary that maps block IDs to a collection of + information about each block. Each block contains the following + fields. + + * id: The ID of the block. + + * type: The type of block. Possible values include sequential, + vertical, html, problem, video, and discussion. The type can also be + the name of a custom type of block used for the course. + + * display_name: The display name configured for the block. + + * graded: Whether or not the sequential or problem is graded. The + value is true or false. + + * format: The assignment type. + + * children: If the block has child blocks, a list of IDs of the child + blocks. + Raises: + CourseStructureNotAvailableError, CourseNotFoundError + """ + course = _retrieve_course(course_key) + try: + course_structure = models.CourseStructure.objects.get(course_id=course.id) + return serializers.CourseStructureSerializer(course_structure.structure).data + except models.CourseStructure.DoesNotExist: + # If we don't have data stored, generate it and return an error. + tasks.update_course_structure.delay(unicode(course_key)) + raise CourseStructureNotAvailableError + + +def course_grading_policy(course_key): + """ + Retrieves the course grading policy. + + Args: + course_key: CourseKey the corresponds to the course we'd like to know grading policy information about. + Returns: + The serialized version of the course grading policy containing the following information: + * assignment_type: The type of the assignment, as configured by course + staff. For example, course staff might make the assignment types Homework, + Quiz, and Exam. + + * count: The number of assignments of the type. + + * dropped: Number of assignments of the type that are dropped. + + * weight: The weight, or effect, of the assignment type on the learner's + final grade. + """ + course = _retrieve_course(course_key) + return serializers.GradingPolicySerializer(course.raw_grader).data diff --git a/lms/djangoapps/course_structure_api/v0/errors.py b/lms/djangoapps/course_structure_api/v0/errors.py new file mode 100644 index 0000000000..91a3f24f4d --- /dev/null +++ b/lms/djangoapps/course_structure_api/v0/errors.py @@ -0,0 +1,9 @@ + +class CourseNotFoundError(Exception): + """ The course was not found. """ + pass + + +class CourseStructureNotAvailableError(Exception): + """ The course structure still needs to be generated. """ + pass diff --git a/lms/djangoapps/course_structure_api/v0/views.py b/lms/djangoapps/course_structure_api/v0/views.py index eb6625fdc1..fe9156c6ae 100644 --- a/lms/djangoapps/course_structure_api/v0/views.py +++ b/lms/djangoapps/course_structure_api/v0/views.py @@ -11,7 +11,8 @@ from rest_framework.response import Response from xmodule.modulestore.django import modulestore from opaque_keys.edx.keys import CourseKey -from course_structure_api.v0 import serializers +from course_structure_api.v0 import api, serializers +from course_structure_api.v0.errors import CourseNotFoundError, CourseStructureNotAvailableError from courseware import courses from courseware.access import has_access from openedx.core.djangoapps.content.course_structures import models, tasks @@ -40,13 +41,37 @@ class CourseViewMixin(object): course_id = self.kwargs.get('course_id') course_key = CourseKey.from_string(course_id) course = courses.get_course(course_key) - - self.check_course_permissions(self.request.user, course) + self.check_course_permissions(self.request.user, course_key) return course except ValueError: raise Http404 + @staticmethod + def course_check(func): + """Decorator responsible for catching errors finding and returning a 404 if the user does not have access + to the API function. + + :param func: function to be wrapped + :returns: the wrapped function + """ + def func_wrapper(self, *args, **kwargs): + """Wrapper function for this decorator. + + :param *args: the arguments passed into the function + :param **kwargs: the keyword arguments passed into the function + :returns: the result of the wrapped function + """ + try: + course_id = self.kwargs.get('course_id') + self.course_key = CourseKey.from_string(course_id) + self.check_course_permissions(self.request.user, self.course_key) + return func(self, *args, **kwargs) + except CourseNotFoundError: + raise Http404 + + return func_wrapper + def user_can_access_course(self, user, course): """ Determines if the user is staff or an instructor for the course. @@ -185,7 +210,6 @@ class CourseDetail(CourseViewMixin, RetrieveAPIView): * end: The course end date. If course end date is not specified, the value is null. """ - serializer_class = serializers.CourseSerializer def get_object(self, queryset=None): @@ -227,22 +251,16 @@ class CourseStructure(CourseViewMixin, RetrieveAPIView): * children: If the block has child blocks, a list of IDs of the child blocks. """ - serializer_class = serializers.CourseStructureSerializer - course = None - def retrieve(self, request, *args, **kwargs): + @CourseViewMixin.course_check + def get(self, request, course_id=None): try: - return super(CourseStructure, self).retrieve(request, *args, **kwargs) - except models.CourseStructure.DoesNotExist: - # If we don't have data stored, generate it and return a 503. - tasks.update_course_structure.delay(unicode(self.course.id)) + return Response(api.course_structure(self.course_key)) + except CourseStructureNotAvailableError: + # If we don't have data stored, we will try to regenerate it, so + # return a 503 and as them to retry in 2 minutes. return Response(status=503, headers={'Retry-After': '120'}) - def get_object(self, queryset=None): - # Make sure the course exists and the user has permissions to view it. - self.course = self.get_course_or_404() - course_structure = models.CourseStructure.objects.get(course_id=self.course.id) - return course_structure.structure class CourseGradingPolicy(CourseViewMixin, ListAPIView): @@ -269,11 +287,8 @@ class CourseGradingPolicy(CourseViewMixin, ListAPIView): final grade. """ - serializer_class = serializers.GradingPolicySerializer allow_empty = False - def get_queryset(self): - course = self.get_course_or_404() - - # Return the raw data. The serializer will handle the field mappings. - return course.raw_grader + @CourseViewMixin.course_check + def get(self, request, course_id=None): + return Response(api.course_grading_policy(self.course_key)) From 9269ec3b00f67e7ddf332fa96ed89efb9c7eba0d Mon Sep 17 00:00:00 2001 From: Daniel Friedman Date: Wed, 22 Apr 2015 16:06:32 -0400 Subject: [PATCH 3/7] Add new instructor task for weighted problems --- .../course_structure_api/v0/views.py | 1 - lms/djangoapps/courseware/grades.py | 6 +- .../courseware/tests/test_grades.py | 6 +- .../django_comment_client/tests/utils.py | 9 + lms/djangoapps/instructor/views/api.py | 28 +++ lms/djangoapps/instructor/views/api_urls.py | 2 + .../instructor/views/instructor_dashboard.py | 1 + lms/djangoapps/instructor_task/api.py | 13 ++ lms/djangoapps/instructor_task/tasks.py | 20 +++ .../instructor_task/tasks_helper.py | 103 +++++++++++ .../instructor_task/tests/test_base.py | 23 ++- .../instructor_task/tests/test_integration.py | 2 +- .../tests/test_tasks_helper.py | 166 ++++++++++++++++++ .../instructor_dashboard/data_download.coffee | 11 +- .../instructor_dashboard_2/data_download.html | 2 + .../content/course_structures/models.py | 24 +++ .../content/course_structures/tests.py | 34 ++++ 17 files changed, 433 insertions(+), 18 deletions(-) diff --git a/lms/djangoapps/course_structure_api/v0/views.py b/lms/djangoapps/course_structure_api/v0/views.py index fe9156c6ae..d88d4138b1 100644 --- a/lms/djangoapps/course_structure_api/v0/views.py +++ b/lms/djangoapps/course_structure_api/v0/views.py @@ -262,7 +262,6 @@ class CourseStructure(CourseViewMixin, RetrieveAPIView): return Response(status=503, headers={'Retry-After': '120'}) - class CourseGradingPolicy(CourseViewMixin, ListAPIView): """ **Use Case** diff --git a/lms/djangoapps/courseware/grades.py b/lms/djangoapps/courseware/grades.py index 79caf5b765..02d608040d 100644 --- a/lms/djangoapps/courseware/grades.py +++ b/lms/djangoapps/courseware/grades.py @@ -225,7 +225,7 @@ def _grade(student, request, course, keep_raw_scores): graded = module_descriptor.graded if not total > 0: - #We simply cannot grade a problem that is 12/0, because we might need it as a percentage + # We simply cannot grade a problem that is 12/0, because we might need it as a percentage graded = False scores.append( @@ -494,7 +494,7 @@ def manual_transaction(): transaction.commit() -def iterate_grades_for(course_or_id, students): +def iterate_grades_for(course_or_id, students, keep_raw_scores=False): """Given a course_id and an iterable of students (User), yield a tuple of: (student, gradeset, err_msg) for every student enrolled in the course. @@ -531,7 +531,7 @@ def iterate_grades_for(course_or_id, students): # It's not pretty, but untangling that is currently beyond the # scope of this feature. request.session = {} - gradeset = grade(student, request, course) + gradeset = grade(student, request, course, keep_raw_scores) yield student, gradeset, "" except Exception as exc: # pylint: disable=broad-except # Keep marching on even if this student couldn't be graded for diff --git a/lms/djangoapps/courseware/tests/test_grades.py b/lms/djangoapps/courseware/tests/test_grades.py index c6825c99be..f0d46c5d02 100644 --- a/lms/djangoapps/courseware/tests/test_grades.py +++ b/lms/djangoapps/courseware/tests/test_grades.py @@ -68,7 +68,7 @@ class TestGradeIteration(ModuleStoreTestCase): def test_all_empty_grades(self): """No students have grade entries""" - all_gradesets, all_errors = self._gradesets_and_errors_for(self.course.id, self.students) + all_gradesets, all_errors = self._gradesets_and_errors_for(self.course.id, self.students, keep_raw_scores=True) self.assertEqual(len(all_errors), 0) for gradeset in all_gradesets.values(): self.assertIsNone(gradeset['grade']) @@ -107,7 +107,7 @@ class TestGradeIteration(ModuleStoreTestCase): self.assertTrue(all_gradesets[student5]) ################################# Helpers ################################# - def _gradesets_and_errors_for(self, course_id, students): + def _gradesets_and_errors_for(self, course_id, students, keep_raw_scores=False): """Simple helper method to iterate through student grades and give us two dictionaries -- one that has all students and their respective gradesets, and one that has only students that could not be graded and @@ -115,7 +115,7 @@ class TestGradeIteration(ModuleStoreTestCase): students_to_gradesets = {} students_to_errors = {} - for student, gradeset, err_msg in iterate_grades_for(course_id, students): + for student, gradeset, err_msg in iterate_grades_for(course_id, students, keep_raw_scores): students_to_gradesets[student] = gradeset if err_msg: students_to_errors[student] = err_msg diff --git a/lms/djangoapps/django_comment_client/tests/utils.py b/lms/djangoapps/django_comment_client/tests/utils.py index 084c78cbfd..9b73ff4dd4 100644 --- a/lms/djangoapps/django_comment_client/tests/utils.py +++ b/lms/djangoapps/django_comment_client/tests/utils.py @@ -76,6 +76,15 @@ class ContentGroupTestCase(ModuleStoreTestCase): scheme_id='cohort' ) ], + grading_policy={ + "GRADER": [{ + "type": "Homework", + "min_count": 1, + "drop_count": 0, + "short_label": "HW", + "weight": 1.0 + }] + }, cohort_config={'cohorted': True}, discussion_topics={} ) diff --git a/lms/djangoapps/instructor/views/api.py b/lms/djangoapps/instructor/views/api.py index 628ed638ee..78fd631cde 100644 --- a/lms/djangoapps/instructor/views/api.py +++ b/lms/djangoapps/instructor/views/api.py @@ -1951,6 +1951,34 @@ def calculate_grades_csv(request, course_id): }) +@ensure_csrf_cookie +@cache_control(no_cache=True, no_store=True, must_revalidate=True) +@require_level('staff') +def problem_grade_report(request, course_id): + """ + Request a CSV showing students' weighted grades for all problems in the + course. + + AlreadyRunningError is raised if the course's grades are already being + updated. + """ + course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id) + try: + instructor_task.api.submit_problem_grade_report(request, course_key) + # TODO: verify copy with documentation team + success_status = _("Your weighted problem report is being generated! " + "You can view the status of the generation task in the 'Pending Instructor Tasks' section.") + return JsonResponse({"status": success_status}) + except AlreadyRunningError: + # TODO: verify copy with documentation team + already_running_status = _("A weighted problem generation task is already in progress. " + "Check the 'Pending Instructor Tasks' table for the status of the task. " + "When completed, the report will be available for download in the table below.") + return JsonResponse({ + "status": already_running_status + }) + + @ensure_csrf_cookie @cache_control(no_cache=True, no_store=True, must_revalidate=True) @require_level('staff') diff --git a/lms/djangoapps/instructor/views/api_urls.py b/lms/djangoapps/instructor/views/api_urls.py index fc6a05aaef..0991108ba2 100644 --- a/lms/djangoapps/instructor/views/api_urls.py +++ b/lms/djangoapps/instructor/views/api_urls.py @@ -87,6 +87,8 @@ urlpatterns = patterns( 'instructor.views.api.list_report_downloads', name="list_report_downloads"), url(r'calculate_grades_csv$', 'instructor.views.api.calculate_grades_csv', name="calculate_grades_csv"), + url(r'problem_grade_report$', + 'instructor.views.api.problem_grade_report', name="problem_grade_report"), # Registration Codes.. url(r'get_registration_codes$', diff --git a/lms/djangoapps/instructor/views/instructor_dashboard.py b/lms/djangoapps/instructor/views/instructor_dashboard.py index 5b44f8ace2..c91a8d3855 100644 --- a/lms/djangoapps/instructor/views/instructor_dashboard.py +++ b/lms/djangoapps/instructor/views/instructor_dashboard.py @@ -420,6 +420,7 @@ def _section_data_download(course, access): 'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}), 'list_report_downloads_url': reverse('list_report_downloads', kwargs={'course_id': unicode(course_key)}), 'calculate_grades_csv_url': reverse('calculate_grades_csv', kwargs={'course_id': unicode(course_key)}), + 'problem_grade_report_url': reverse('problem_grade_report', kwargs={'course_id': unicode(course_key)}), } return section_data diff --git a/lms/djangoapps/instructor_task/api.py b/lms/djangoapps/instructor_task/api.py index 51aa63c8e6..8c16cef652 100644 --- a/lms/djangoapps/instructor_task/api.py +++ b/lms/djangoapps/instructor_task/api.py @@ -19,6 +19,7 @@ from instructor_task.tasks import ( delete_problem_state, send_bulk_course_email, calculate_grades_csv, + calculate_problem_grade_report, calculate_students_features_csv, cohort_students, ) @@ -334,6 +335,18 @@ def submit_calculate_grades_csv(request, course_key): return submit_task(request, task_type, task_class, course_key, task_input, task_key) +def submit_problem_grade_report(request, course_key): + """ + Submits a task to generate a CSV grade report containing weighted problem + values. + """ + task_type = 'grade_problems' + task_class = calculate_problem_grade_report + task_input = {} + task_key = "" + return submit_task(request, task_type, task_class, course_key, task_input, task_key) + + def submit_calculate_students_features_csv(request, course_key, features): """ Submits a task to generate a CSV containing student profile info. diff --git a/lms/djangoapps/instructor_task/tasks.py b/lms/djangoapps/instructor_task/tasks.py index 9e3ae7d6ae..16c1021e20 100644 --- a/lms/djangoapps/instructor_task/tasks.py +++ b/lms/djangoapps/instructor_task/tasks.py @@ -35,6 +35,7 @@ from instructor_task.tasks_helper import ( reset_attempts_module_state, delete_problem_module_state, upload_grades_csv, + upload_problem_grade_report, upload_students_csv, cohort_students_and_upload ) @@ -155,6 +156,25 @@ def calculate_grades_csv(entry_id, xmodule_instance_args): return run_main_task(entry_id, task_fn, action_name) +# TODO: GRADES_DOWNLOAD_ROUTING_KEY is the high mem queue. Do we know we need it? +@task(base=BaseInstructorTask, routing_key=settings.GRADES_DOWNLOAD_ROUTING_KEY) # pylint: disable=not-callable +def calculate_problem_grade_report(entry_id, xmodule_instance_args): + """ + Generate a CSV for a course containing all students' weighted problem + grades and push the results to an S3 bucket for download. + """ + # Translators: This is a past-tense verb that is inserted into task progress messages as {action}. + # TODO: can this be the same as the `calculate_grades_csv` action_name? + action_name = ugettext_noop('graded') + TASK_LOG.info( + u"Task: %s, InstructorTask ID: %s, Task type: %s, Preparing for task execution", + xmodule_instance_args.get('task_id'), entry_id, action_name + ) + + task_fn = partial(upload_problem_grade_report, xmodule_instance_args) + return run_main_task(entry_id, task_fn, action_name) + + @task(base=BaseInstructorTask, routing_key=settings.GRADES_DOWNLOAD_ROUTING_KEY) # pylint: disable=not-callable def calculate_students_features_csv(entry_id, xmodule_instance_args): """ diff --git a/lms/djangoapps/instructor_task/tasks_helper.py b/lms/djangoapps/instructor_task/tasks_helper.py index 7dcc967394..eb5d99a814 100644 --- a/lms/djangoapps/instructor_task/tasks_helper.py +++ b/lms/djangoapps/instructor_task/tasks_helper.py @@ -4,7 +4,10 @@ running state of a course. """ import json +from collections import OrderedDict from datetime import datetime +from eventtracking import tracker +from itertools import chain from time import time import unicodecsv import logging @@ -34,6 +37,7 @@ from instructor_task.models import ReportStore, InstructorTask, PROGRESS from lms.djangoapps.lms_xblock.runtime import LmsPartitionService from openedx.core.djangoapps.course_groups.cohorts import get_cohort from openedx.core.djangoapps.course_groups.models import CourseUserGroup +from openedx.core.djangoapps.content.course_structures.models import CourseStructure from opaque_keys.edx.keys import UsageKey from openedx.core.djangoapps.course_groups.cohorts import add_user_to_cohort, is_course_cohorted from student.models import CourseEnrollment @@ -705,6 +709,105 @@ def upload_grades_csv(_xmodule_instance_args, _entry_id, course_id, _task_input, return task_progress.update_task_state(extra_meta=current_step) +def _order_problems(blocks): + """ + Sort the problems by the assignment type and assignment that it belongs to. + """ + problems = OrderedDict() + assignments = dict() + # First, sort out all the blocks into their correct assignments and all the + # assignments into their correct types. + for block in blocks: + # Put the assignments in order into the assignments list. + if blocks[block]['block_type'] == 'sequential': + block_format = blocks[block]['format'] + if block_format not in assignments: + assignments[block_format] = OrderedDict() + assignments[block_format][block] = list() + + # Put the problems into the correct order within their assignment. + if blocks[block]['block_type'] == 'problem' and blocks[block]['graded'] is True: + current = blocks[block]['parent'] + # crawl up the tree for the sequential block + while blocks[current]['block_type'] != 'sequential': + current = blocks[current]['parent'] + + current_format = blocks[current]['format'] + assignments[current_format][current].append(block) + + # Now that we have a sorting and an order for the assignments and problems, + # iterate through them in order to generate the header row. + for assignment_type in assignments: + for assignment_index, assignment in enumerate(assignments[assignment_type].keys(), start=1): + for problem in assignments[assignment_type][assignment]: + header_name = "{assignment_type} {assignment_index}: {assignment_name} - {block}".format( + block=blocks[problem]['display_name'], + assignment_type=assignment_type, + assignment_index=assignment_index, + assignment_name=blocks[assignment]['display_name'] + ) + problems[problem] = [header_name + " (Earned)", header_name + " (Possible)"] + + return problems + + +def upload_problem_grade_report(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name): + """ + Generate a CSV containing all students' problem grades within a given + `course_id`. + """ + start_time = time() + start_date = datetime.now(UTC) + status_interval = 100 + enrolled_students = CourseEnrollment.users_enrolled_in(course_id) + task_progress = TaskProgress(action_name, enrolled_students.count(), start_time) + + # This struct encapsulates both the display names of each static item in + # the header row as values as well as the django User field names of those + # items as the keys. It is structured in this way to keep the values + # related. + header_row = OrderedDict([('id', 'Student ID'), ('email', 'Email'), ('username', 'Username')]) + + try: + course_structure = CourseStructure.objects.get(course_id=course_id) + blocks = course_structure.ordered_blocks + problems = _order_problems(blocks) + except CourseStructure.DoesNotExist: + return task_progress.update_task_state(extra_meta={'step': 'Generating course structure. Please refresh and try again.'}) + + # Just generate the static fields for now. + rows = [list(header_row.values()) + ['Final Grade'] + list(chain.from_iterable(problems.values()))] + current_step = {'step': 'Calculating Grades'} + + for student, gradeset, err_msg in iterate_grades_for(course_id, enrolled_students, keep_raw_scores=True): + student_fields = [getattr(student, field_name) for field_name in header_row] + final_grade = gradeset['percent'] + # Only consider graded problems + problem_scores = {unicode(score.module_id): score for score in gradeset['raw_scores'] if score.graded} + earned_possible_values = list() + for problem_id in problems: + try: + problem_score = problem_scores[problem_id] + earned_possible_values.append([problem_score.earned, problem_score.possible]) + except KeyError: + # The student has not been graded on this problem. For example, + # iterate_grades_for skips problems that students have never + # seen in order to speed up report generation. It could also be + # the case that the student does not have access to it (e.g. A/B + # test or cohorted courseware). + earned_possible_values.append(['N/A', 'N/A']) + rows.append(student_fields + [final_grade] + list(chain.from_iterable(earned_possible_values))) + + task_progress.attempted += 1 + task_progress.succeeded += 1 + if task_progress.attempted % status_interval == 0: + task_progress.update_task_state(extra_meta=current_step) + + # Perform the upload + upload_csv_to_report_store(rows, 'problem_grade_report', course_id, start_date) + return task_progress.update_task_state(extra_meta={'step': 'Uploading CSV'}) + + def upload_students_csv(_xmodule_instance_args, _entry_id, course_id, task_input, action_name): """ For a given `course_id`, generate a CSV file containing profile diff --git a/lms/djangoapps/instructor_task/tests/test_base.py b/lms/djangoapps/instructor_task/tests/test_base.py index f83d79a7c3..d8a905ba2a 100644 --- a/lms/djangoapps/instructor_task/tests/test_base.py +++ b/lms/djangoapps/instructor_task/tests/test_base.py @@ -127,7 +127,9 @@ class InstructorTaskCourseTestCase(LoginEnrollmentTestCase, ModuleStoreTestCase) if course_factory_kwargs is not None: course_args.update(course_factory_kwargs) self.course = CourseFactory.create(**course_args) + self.add_course_content() + def add_course_content(self): # Add a chapter to the course chapter = ItemFactory.create(parent_location=self.course.location, display_name=TEST_SECTION_NAME) @@ -141,12 +143,13 @@ class InstructorTaskCourseTestCase(LoginEnrollmentTestCase, ModuleStoreTestCase) @staticmethod def get_user_email(username): """Generate email address based on username""" - return '{0}@test.com'.format(username) + return u'{0}@test.com'.format(username) def login_username(self, username): """Login the user, given the `username`.""" if self.current_user != username: - self.login(InstructorTaskCourseTestCase.get_user_email(username), "test") + user_email = User.objects.get(username=username).email + self.login(user_email, "test") self.current_user = username def _create_user(self, username, email=None, is_staff=False, mode='honor'): @@ -190,16 +193,18 @@ class InstructorTaskModuleTestCase(InstructorTaskCourseTestCase): the setup of a course and problem in order to access StudentModule state. """ @staticmethod - def problem_location(problem_url_name): + def problem_location(problem_url_name, course_key=None): """ Create an internal location for a test problem. """ if "i4x:" in problem_url_name: return Location.from_deprecated_string(problem_url_name) + elif course_key: + return course_key.make_usage_key('problem', problem_url_name) else: return TEST_COURSE_KEY.make_usage_key('problem', problem_url_name) - def define_option_problem(self, problem_url_name, parent=None): + def define_option_problem(self, problem_url_name, parent=None, **kwargs): """Create the problem definition so the answer is Option 1""" if parent is None: parent = self.problem_section @@ -213,7 +218,8 @@ class InstructorTaskModuleTestCase(InstructorTaskCourseTestCase): parent=parent, category="problem", display_name=str(problem_url_name), - data=problem_xml) + data=problem_xml, + **kwargs) def redefine_option_problem(self, problem_url_name): """Change the problem definition so the answer is Option 2""" @@ -249,8 +255,9 @@ class InstructorTaskModuleTestCase(InstructorTaskCourseTestCase): # Note that this is a capa-specific convention. The form is a version of the problem's # URL, modified so that it can be easily stored in html, prepended with "input-" and # appended with a sequence identifier for the particular response the input goes to. - return 'input_i4x-{0}-{1}-problem-{2}_{3}'.format(TEST_COURSE_ORG.lower(), - TEST_COURSE_NUMBER.replace('.', '_'), + course_key = self.course.id + return 'input_i4x-{0}-{1}-problem-{2}_{3}'.format(course_key.org, + course_key.course.replace('.', '_'), problem_url_name, response_id) # make sure that the requested user is logged in, so that the ajax call works @@ -260,7 +267,7 @@ class InstructorTaskModuleTestCase(InstructorTaskCourseTestCase): modx_url = reverse('xblock_handler', kwargs={ 'course_id': self.course.id.to_deprecated_string(), 'usage_id': quote_slashes( - InstructorTaskModuleTestCase.problem_location(problem_url_name).to_deprecated_string() + InstructorTaskModuleTestCase.problem_location(problem_url_name, self.course.id).to_deprecated_string() ), 'handler': 'xmodule_handler', 'suffix': 'problem_check', diff --git a/lms/djangoapps/instructor_task/tests/test_integration.py b/lms/djangoapps/instructor_task/tests/test_integration.py index d6cb4cfc0e..deaebe7904 100644 --- a/lms/djangoapps/instructor_task/tests/test_integration.py +++ b/lms/djangoapps/instructor_task/tests/test_integration.py @@ -28,7 +28,7 @@ from instructor_task.api import (submit_rescore_problem_for_all_students, submit_reset_problem_attempts_for_all_students, submit_delete_problem_state_for_all_students) from instructor_task.models import InstructorTask -from instructor_task.tasks_helper import upload_grades_csv +from instructor_task.tasks_helper import upload_grades_csv, upload_problem_grade_report from instructor_task.tests.test_base import (InstructorTaskModuleTestCase, TestReportMixin, TEST_COURSE_ORG, TEST_COURSE_NUMBER, OPTION_1, OPTION_2) from capa.responsetypes import StudentInputError diff --git a/lms/djangoapps/instructor_task/tests/test_tasks_helper.py b/lms/djangoapps/instructor_task/tests/test_tasks_helper.py index 1ec9d1a7ec..6afaccdb81 100644 --- a/lms/djangoapps/instructor_task/tests/test_tasks_helper.py +++ b/lms/djangoapps/instructor_task/tests/test_tasks_helper.py @@ -19,6 +19,7 @@ from instructor_task.tasks_helper import cohort_students_and_upload, upload_grad from instructor_task.tests.test_base import InstructorTaskCourseTestCase, TestReportMixin, InstructorTaskModuleTestCase from openedx.core.djangoapps.course_groups.models import CourseUserGroupPartitionGroup from openedx.core.djangoapps.course_groups.tests.helpers import CohortFactory +from openedx.core.djangoapps.user_api.tests.factories import UserCourseTagFactory import openedx.core.djangoapps.user_api.course_tag.api as course_tag_api from openedx.core.djangoapps.user_api.partition_schemes import RandomUserPartitionScheme from student.tests.factories import UserFactory @@ -26,6 +27,13 @@ from student.models import CourseEnrollment from verify_student.tests.factories import SoftwareSecurePhotoVerificationFactory from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory from xmodule.partitions.partitions import Group, UserPartition +from instructor_task.models import ReportStore +from instructor_task.tasks_helper import ( + cohort_students_and_upload, upload_grades_csv, upload_problem_grade_report, upload_students_csv +) +from instructor_task.tests.test_base import InstructorTaskCourseTestCase, TestReportMixin +from instructor_task.tests.test_integration import TestGradeReportConditionalContent +from django_comment_client.tests.utils import ContentGroupTestCase @ddt.ddt @@ -261,6 +269,164 @@ class TestInstructorGradeReport(TestReportMixin, InstructorTaskCourseTestCase): self.assertDictContainsSubset({'attempted': 1, 'succeeded': 1, 'failed': 0}, result) +class TestProblemGradeReport(TestReportMixin, InstructorTaskModuleTestCase): + """ + Test that the weighted problem CSV generation works. + """ + def setUp(self): + super(TestProblemGradeReport, self).setUp() + self.maxDiff = None + self.initialize_course() + # Add unicode data to CSV even though unicode usernames aren't + # technically possible in openedx. + self.student_1 = self.create_student(u'üser_1') + self.student_2 = self.create_student(u'üser_2') + self.csv_header_row = [u'Student ID', u'Email', u'Username', u'Final Grade'] + + @patch('instructor_task.tasks_helper._get_current_task') + def test_no_problems(self, _get_current_task): + """ + Verify that we see no grade information for a course with no graded + problems. + """ + result = upload_problem_grade_report(None, None, self.course.id, None, 'graded') + self.assertDictContainsSubset({'action_name': 'graded', 'attempted': 2, 'succeeded': 2, 'failed': 0}, result) + self.verify_rows_in_csv([ + dict(zip(self.csv_header_row, [unicode(self.student_1.id), self.student_1.email, self.student_1.username, '0.0'])), + dict(zip(self.csv_header_row, [unicode(self.student_2.id), self.student_2.email, self.student_2.username, '0.0'])) + ]) + + @patch('instructor_task.tasks_helper._get_current_task') + def test_single_problem(self, _get_current_task): + vertical = ItemFactory.create( + parent_location=self.problem_section.location, + category='vertical', + metadata={'graded': True}, + display_name='Problem Vertical' + ) + self.define_option_problem('Problem1', parent=vertical) + # generate the course structure + + self.submit_student_answer(self.student_1.username, 'Problem1', ['Option 1']) + result = upload_problem_grade_report(None, None, self.course.id, None, 'graded') + self.assertDictContainsSubset({'action_name': 'graded', 'attempted': 2, 'succeeded': 2, 'failed': 0}, result) + problem_name = 'Homework 1: Problem - Problem1' + header_row = self.csv_header_row + [problem_name + ' (Earned)', problem_name + ' (Possible)'] + self.verify_rows_in_csv([ + dict(zip( + header_row, + [unicode(self.student_1.id), self.student_1.email, self.student_1.username, '0.01', '1.0', '2.0'] + )), + dict(zip( + header_row, + [unicode(self.student_2.id), self.student_2.email, self.student_2.username, '0.0', 'N/A', 'N/A'] + )) + ]) + + +class TestProblemReportSplitTestContent(TestGradeReportConditionalContent): + OPTION_1 = 'Option 1' + OPTION_2 = 'Option 2' + + def setUp(self): + super(TestProblemReportSplitTestContent, self).setUp() + self.problem_a_url = 'problem_a_url' + self.problem_b_url = 'problem_b_url' + self.define_option_problem(self.problem_a_url, parent=self.vertical_a) + self.define_option_problem(self.problem_b_url, parent=self.vertical_b) + + def test_problem_grade_report(self): + """ + Test problems that exist in a problem grade report. + """ + # student A will get 100%, student B will get 50% because + # OPTION_1 is the correct option, and OPTION_2 is the + # incorrect option + self.submit_student_answer(self.student_a.username, self.problem_a_url, [self.OPTION_1, self.OPTION_1]) + self.submit_student_answer(self.student_a.username, self.problem_b_url, [self.OPTION_1, self.OPTION_1]) + + self.submit_student_answer(self.student_b.username, self.problem_a_url, [self.OPTION_1, self.OPTION_2]) + self.submit_student_answer(self.student_b.username, self.problem_b_url, [self.OPTION_1, self.OPTION_2]) + + with patch('instructor_task.tasks_helper._get_current_task'): + result = upload_problem_grade_report(None, None, self.course.id, None, 'graded') + self.verify_csv_task_success(result) + + problem_names = ['Homework 1: Problem - problem_a_url', 'Homework 1: Problem - problem_b_url'] + header_row = [u'Student ID', u'Email', u'Username', u'Final Grade'] + for problem in problem_names: + header_row += [problem + ' (Earned)', problem + ' (Possible)'] + + self.verify_rows_in_csv([ + dict(zip( + header_row, + [unicode(self.student_a.id), self.student_a.email, self.student_a.username, u'1.0', u'2.0', u'2.0', u'N/A', u'N/A'] + )), + dict(zip( + header_row, + [unicode(self.student_b.id), self.student_b.email, self.student_b.username, u'0.5', u'N/A', u'N/A', u'1.0', u'2.0'] + )) + ]) + + +class TestProblemReportCohortedContent(TestReportMixin, ContentGroupTestCase, InstructorTaskModuleTestCase): + def setUp(self): + super(TestProblemReportCohortedContent, self).setUp() + # contstruct cohorted problems to work on. + self.add_course_content() + vertical = ItemFactory.create( + parent_location=self.problem_section.location, + category='vertical', + metadata={'graded': True}, + display_name='Problem Vertical' + ) + print self.course.user_partitions + self.define_option_problem( + "Problem0", + parent=vertical, + group_access={self.course.user_partitions[0].id: [self.course.user_partitions[0].groups[0].id]} + ) + self.define_option_problem( + "Problem1", + parent=vertical, + group_access={self.course.user_partitions[0].id: [self.course.user_partitions[0].groups[1].id]} + ) + + self.submit_student_answer(self.alpha_user.username, 'Problem0', ['Option 1', 'Option 1']) + self.submit_student_answer(self.alpha_user.username, 'Problem1', ['Option 1', 'Option 1']) + self.submit_student_answer(self.beta_user.username, 'Problem0', ['Option 1', 'Option 2']) + self.submit_student_answer(self.beta_user.username, 'Problem1', ['Option 1', 'Option 2']) + + def test_cohort_content(self): + with patch('instructor_task.tasks_helper._get_current_task'): + result = upload_problem_grade_report(None, None, self.course.id, None, 'graded') + self.assertDictContainsSubset({'action_name': 'graded', 'attempted': 4, 'succeeded': 4, 'failed': 0}, result) + + problem_names = ['Homework 1: Problem - Problem0', 'Homework 1: Problem - Problem1'] + header_row = [u'Student ID', u'Email', u'Username', u'Final Grade'] + for problem in problem_names: + header_row += [problem + ' (Earned)', problem + ' (Possible)'] + + self.verify_rows_in_csv([ + dict(zip( + header_row, + [unicode(self.staff_user.id), self.staff_user.email, self.staff_user.username, u'0.0', u'N/A', u'N/A', u'N/A', u'N/A'] + )), + dict(zip( + header_row, + [unicode(self.alpha_user.id), self.alpha_user.email, self.alpha_user.username, u'1.0', u'2.0', u'2.0', u'N/A', u'N/A'] + )), + dict(zip( + header_row, + [unicode(self.beta_user.id), self.beta_user.email, self.beta_user.username, u'0.5', u'N/A', u'N/A', u'1.0', u'2.0'] + )), + dict(zip( + header_row, + [unicode(self.non_cohorted_user.id), self.non_cohorted_user.email, self.non_cohorted_user.username, u'0.0', u'N/A', u'N/A', u'N/A', u'N/A'] + )), + ]) + + @ddt.ddt class TestStudentReport(TestReportMixin, InstructorTaskCourseTestCase): """ diff --git a/lms/static/coffee/src/instructor_dashboard/data_download.coffee b/lms/static/coffee/src/instructor_dashboard/data_download.coffee index acc0cd9929..ad9de769f9 100644 --- a/lms/static/coffee/src/instructor_dashboard/data_download.coffee +++ b/lms/static/coffee/src/instructor_dashboard/data_download.coffee @@ -22,6 +22,7 @@ class DataDownload @$list_anon_btn = @$section.find("input[name='list-anon-ids']'") @$grade_config_btn = @$section.find("input[name='dump-gradeconf']'") @$calculate_grades_csv_btn = @$section.find("input[name='calculate-grades-csv']'") + @$problem_grade_report_csv_btn = @$section.find("input[name='problem-grade-report']'") # response areas @$download = @$section.find '.data-download-container' @@ -108,16 +109,22 @@ class DataDownload @$download_display_text.html data['grading_config_summary'] @$calculate_grades_csv_btn.click (e) => + @onClickGradeDownload @$calculate_grades_csv_btn, "Error generating grades. Please try again." + + @$problem_grade_report_csv_btn.click (e) => + @onClickGradeDownload @$problem_grade_report_csv_btn, "Error generating weighted problem report. Please try again." + + onClickGradeDownload: (button, errorMessage) -> # Clear any CSS styling from the request-response areas #$(".msg-confirm").css({"display":"none"}) #$(".msg-error").css({"display":"none"}) @clear_display() - url = @$calculate_grades_csv_btn.data 'endpoint' + url = button.data 'endpoint' $.ajax dataType: 'json' url: url error: (std_ajax_err) => - @$reports_request_response_error.text gettext("Error generating grades. Please try again.") + @$reports_request_response_error.text gettext(errorMessage) $(".msg-error").css({"display":"block"}) success: (data) => @$reports_request_response.text data['status'] diff --git a/lms/templates/instructor/instructor_dashboard_2/data_download.html b/lms/templates/instructor/instructor_dashboard_2/data_download.html index 9ca6717337..28923c01e2 100644 --- a/lms/templates/instructor/instructor_dashboard_2/data_download.html +++ b/lms/templates/instructor/instructor_dashboard_2/data_download.html @@ -41,6 +41,8 @@

${_("Click to generate a CSV grade report for all currently enrolled students.")}

+ +

%endif
diff --git a/openedx/core/djangoapps/content/course_structures/models.py b/openedx/core/djangoapps/content/course_structures/models.py index 2dcbccbc1a..29b6697e9c 100644 --- a/openedx/core/djangoapps/content/course_structures/models.py +++ b/openedx/core/djangoapps/content/course_structures/models.py @@ -1,6 +1,7 @@ import json import logging +from collections import OrderedDict from model_utils.models import TimeStampedModel from util.models import CompressedTextField @@ -26,6 +27,29 @@ class CourseStructure(TimeStampedModel): return json.loads(self.structure_json) return None + @property + def ordered_blocks(self): + if self.structure: + ordered_blocks = OrderedDict() + self._traverse_tree(self.structure['root'], self.structure['blocks'], ordered_blocks) + return ordered_blocks + + def _traverse_tree(self, block, unordered_structure, ordered_blocks, parent=None): + """ + Traverses the tree and fills in the ordered_blocks OrderedDict with the blocks in + the order that they appear in the course. + """ + # find the dictionary entry for the current node + cur_block = unordered_structure[block] + + if parent: + cur_block['parent'] = parent + + ordered_blocks[block] = cur_block + + for child_node in cur_block['children']: + self._traverse_tree(child_node, unordered_structure, ordered_blocks, parent=block) + # Signals must be imported in a file that is automatically loaded at app startup (e.g. models.py). We import them # at the end of this file to avoid circular dependencies. import signals # pylint: disable=unused-import diff --git a/openedx/core/djangoapps/content/course_structures/tests.py b/openedx/core/djangoapps/content/course_structures/tests.py index 5c645049bf..3b82f9a2b0 100644 --- a/openedx/core/djangoapps/content/course_structures/tests.py +++ b/openedx/core/djangoapps/content/course_structures/tests.py @@ -91,6 +91,40 @@ class CourseStructureTaskTests(ModuleStoreTestCase): cs = CourseStructure.objects.create(course_id=self.course.id, structure_json=structure_json) self.assertDictEqual(cs.structure, structure) + + def test_ordered_blocks(self): + structure = { + 'root': 'a/b/c', + 'blocks': { + 'a/b/c': { + 'id': 'a/b/c', + 'children': [ + 'g/h/i' + ] + }, + 'd/e/f': { + 'id': 'd/e/f', + 'children': [] + }, + 'g/h/i': { + 'id': 'h/j/k', + 'children': [ + 'j/k/l', + 'd/e/f' + ] + }, + 'j/k/l': { + 'id': 'j/k/l', + 'children': [] + } + } + } + in_order_blocks = ['a/b/c', 'g/h/i', 'j/k/l', 'd/e/f'] + structure_json = json.dumps(structure) + cs = CourseStructure.objects.create(course_id=self.course.id, structure_json=structure_json) + + self.assertEqual(cs.ordered_blocks.keys(), in_order_blocks) + def test_block_with_missing_fields(self): """ The generator should continue to operate on blocks/XModule that do not have graded or format fields. From 2fafaec053fa01e32b7607f276a338d78e5e6b95 Mon Sep 17 00:00:00 2001 From: Andy Armstrong Date: Fri, 24 Apr 2015 17:06:45 -0400 Subject: [PATCH 4/7] Implement grade report analytics TNL-1988 --- common/djangoapps/util/model_utils.py | 2 - .../pages/lms/instructor_dashboard.py | 39 ++++- .../discussion/test_cohort_management.py | 5 +- .../lms/test_lms_instructor_dashboard.py | 145 ++++++++++++++++-- lms/djangoapps/course_structure_api/v0/api.py | 3 +- lms/djangoapps/instructor_task/tasks.py | 1 - .../instructor_task/tasks_helper.py | 9 ++ .../instructor_dashboard/data_download.coffee | 15 +- 8 files changed, 189 insertions(+), 30 deletions(-) diff --git a/common/djangoapps/util/model_utils.py b/common/djangoapps/util/model_utils.py index 4c9e7b9c77..cbd67a5204 100644 --- a/common/djangoapps/util/model_utils.py +++ b/common/djangoapps/util/model_utils.py @@ -4,8 +4,6 @@ Utilities for django models. from eventtracking import tracker from django.conf import settings -from django.core.exceptions import ObjectDoesNotExist -from django.db.models.fields.related import RelatedField from django_countries.fields import Country diff --git a/common/test/acceptance/pages/lms/instructor_dashboard.py b/common/test/acceptance/pages/lms/instructor_dashboard.py index c1d1c4c043..525b5cd88e 100644 --- a/common/test/acceptance/pages/lms/instructor_dashboard.py +++ b/common/test/acceptance/pages/lms/instructor_dashboard.py @@ -702,12 +702,47 @@ class DataDownloadPage(PageObject): def is_browser_on_page(self): return self.q(css='a[data-section=data_download].active-section').present + @property + def generate_student_profile_report_button(self): + """ + Returns the "Download profile information as a CSV" button. + """ + return self.q(css='input[name=list-profiles-csv]') + + @property + def generate_grade_report_button(self): + """ + Returns the "Generate Grade Report" button. + """ + return self.q(css='input[name=calculate-grades-csv]') + + @property + def generate_weighted_problem_grade_report_button(self): + """ + Returns the "Generate Weighted Problem Grade Report" button. + """ + return self.q(css='input[name=problem-grade-report]') + + @property + def report_download_links(self): + """ + Returns the download links for the current page. + """ + return self.q(css="#report-downloads-table .file-download-link>a") + + def wait_for_available_report(self): + """ + Waits for a downloadable report to be available. + """ + EmptyPromise( + lambda: len(self.report_download_links) >= 1, 'Waiting for downloadable report' + ).fulfill() + def get_available_reports_for_download(self): """ Returns a list of all the available reports for download. """ - reports = self.q(css="#report-downloads-table .file-download-link>a").map(lambda el: el.text) - return reports.results + return self.report_download_links.map(lambda el: el.text) class StudentAdminPage(PageObject): diff --git a/common/test/acceptance/tests/discussion/test_cohort_management.py b/common/test/acceptance/tests/discussion/test_cohort_management.py index ea9bd93176..921f471dbf 100644 --- a/common/test/acceptance/tests/discussion/test_cohort_management.py +++ b/common/test/acceptance/tests/discussion/test_cohort_management.py @@ -14,7 +14,6 @@ from xmodule.partitions.partitions import Group from ...fixtures.course import CourseFixture, XBlockFixtureDesc from ...pages.lms.auto_auth import AutoAuthPage from ...pages.lms.instructor_dashboard import InstructorDashboardPage, DataDownloadPage -from ...pages.studio.settings_advanced import AdvancedSettingsPage from ...pages.studio.settings_group_configurations import GroupConfigurationsPage import uuid @@ -555,9 +554,7 @@ class CohortConfigurationTest(EventsTestMixin, UniqueCourseTest, CohortTestMixin # Verify the results can be downloaded. data_download = self.instructor_dashboard_page.select_data_download() - EmptyPromise( - lambda: 1 == len(data_download.get_available_reports_for_download()), 'Waiting for downloadable report' - ).fulfill() + data_download.wait_for_available_report() report = data_download.get_available_reports_for_download()[0] base_file_name = "cohort_results_" self.assertIn("{}_{}".format( diff --git a/common/test/acceptance/tests/lms/test_lms_instructor_dashboard.py b/common/test/acceptance/tests/lms/test_lms_instructor_dashboard.py index fc78048288..b58299859f 100644 --- a/common/test/acceptance/tests/lms/test_lms_instructor_dashboard.py +++ b/common/test/acceptance/tests/lms/test_lms_instructor_dashboard.py @@ -5,15 +5,36 @@ End-to-end tests for the LMS Instructor Dashboard. from nose.plugins.attrib import attr -from ..helpers import UniqueCourseTest, get_modal_alert +from ..helpers import UniqueCourseTest, get_modal_alert, EventsTestMixin from ...pages.common.logout import LogoutPage from ...pages.lms.auto_auth import AutoAuthPage from ...pages.lms.instructor_dashboard import InstructorDashboardPage from ...fixtures.course import CourseFixture +class BaseInstructorDashboardTest(EventsTestMixin, UniqueCourseTest): + """ + Mixin class for testing the instructor dashboard. + """ + def log_in_as_instructor(self): + """ + Logs in as an instructor and returns the id. + """ + username = "test_instructor_{uuid}".format(uuid=self.unique_id[0:6]) + auto_auth_page = AutoAuthPage(self.browser, username=username, course_id=self.course_id, staff=True) + return username, auto_auth_page.visit().get_user_id() + + def visit_instructor_dashboard(self): + """ + Visits the instructor dashboard. + """ + instructor_dashboard_page = InstructorDashboardPage(self.browser, self.course_id) + instructor_dashboard_page.visit() + return instructor_dashboard_page + + @attr('shard_5') -class AutoEnrollmentWithCSVTest(UniqueCourseTest): +class AutoEnrollmentWithCSVTest(BaseInstructorDashboardTest): """ End-to-end tests for Auto-Registration and enrollment functionality via CSV file. """ @@ -21,13 +42,8 @@ class AutoEnrollmentWithCSVTest(UniqueCourseTest): def setUp(self): super(AutoEnrollmentWithCSVTest, self).setUp() self.course_fixture = CourseFixture(**self.course_info).install() - - # login as an instructor - AutoAuthPage(self.browser, course_id=self.course_id, staff=True).visit() - - # go to the membership page on the instructor dashboard - instructor_dashboard_page = InstructorDashboardPage(self.browser, self.course_id) - instructor_dashboard_page.visit() + self.log_in_as_instructor() + instructor_dashboard_page = self.visit_instructor_dashboard() self.auto_enroll_section = instructor_dashboard_page.select_membership().select_auto_enroll_section() def test_browse_and_upload_buttons_are_visible(self): @@ -91,7 +107,7 @@ class AutoEnrollmentWithCSVTest(UniqueCourseTest): @attr('shard_5') -class EntranceExamGradeTest(UniqueCourseTest): +class EntranceExamGradeTest(BaseInstructorDashboardTest): """ Tests for Entrance exam specific student grading tasks. """ @@ -112,13 +128,9 @@ class EntranceExamGradeTest(UniqueCourseTest): LogoutPage(self.browser).visit() - # login as an instructor - AutoAuthPage(self.browser, course_id=self.course_id, staff=True).visit() - # go to the student admin page on the instructor dashboard - instructor_dashboard_page = InstructorDashboardPage(self.browser, self.course_id) - instructor_dashboard_page.visit() - self.student_admin_section = instructor_dashboard_page.select_student_admin() + self.log_in_as_instructor() + self.student_admin_section = self.visit_instructor_dashboard().select_student_admin() def test_input_text_and_buttons_are_visible(self): """ @@ -291,3 +303,104 @@ class EntranceExamGradeTest(UniqueCourseTest): self.student_admin_section.set_student_email(self.student_identifier) self.student_admin_section.click_task_history_button() self.assertTrue(self.student_admin_section.is_background_task_history_table_visible()) + + +class DataDownloadsTest(BaseInstructorDashboardTest): + """ + Bok Choy tests for the "Data Downloads" tab. + """ + def setUp(self): + super(DataDownloadsTest, self).setUp() + self.course_fixture = CourseFixture(**self.course_info).install() + self.instructor_username, self.instructor_id = self.log_in_as_instructor() + instructor_dashboard_page = self.visit_instructor_dashboard() + self.data_download_section = instructor_dashboard_page.select_data_download() + + def verify_report_requested_event(self, report_type): + """ + Verifies that the correct event is emitted when a report is requested. + """ + self.verify_events_of_type( + self.instructor_username, + u"edx.instructor.report.requested", + [{ + u"report_type": report_type + }] + ) + + def verify_report_downloaded_event(self, report_url): + """ + Verifies that the correct event is emitted when a report is downloaded. + """ + self.verify_events_of_type( + self.instructor_username, + u"edx.instructor.report.downloaded", + [{ + u"report_url": report_url + }] + ) + + def verify_report_download(self, report_name): + """ + Verifies that a report can be downloaded and an event fired. + """ + download_links = self.data_download_section.report_download_links + self.assertEquals(len(download_links), 1) + download_links[0].click() + expected_url = download_links.attrs('href')[0] + self.assertIn(report_name, expected_url) + self.verify_report_downloaded_event(expected_url) + + def test_student_profiles_report_download(self): + """ + Scenario: Verify that an instructor can download a student profiles report + + Given that I am an instructor + And I visit the instructor dashboard's "Data Downloads" tab + And I click on the "Download profile information as a CSV" button + Then a report should be generated + And a report requested event should be emitted + When I click on the report + Then a report downloaded event should be emitted + """ + report_name = u"student_profile_info" + self.data_download_section.generate_student_profile_report_button.click() + self.data_download_section.wait_for_available_report() + self.verify_report_requested_event(report_name) + self.verify_report_download(report_name) + + def test_grade_report_download(self): + """ + Scenario: Verify that an instructor can download a grade report + + Given that I am an instructor + And I visit the instructor dashboard's "Data Downloads" tab + And I click on the "Generate Grade Report" button + Then a report should be generated + And a report requested event should be emitted + When I click on the report + Then a report downloaded event should be emitted + """ + report_name = u"grade_report" + self.data_download_section.generate_grade_report_button.click() + self.data_download_section.wait_for_available_report() + self.verify_report_requested_event(report_name) + self.verify_report_download(report_name) + + def test_weighted_problem_grade_report_download(self): + """ + Scenario: Verify that an instructor can download a weighted problem grade report + + Given that I am an instructor + And I visit the instructor dashboard's "Data Downloads" tab + And I click on the "Generate Weighted Problem Grade Report" button + Then a report should be generated + And a report requested event should be emitted + When I click on the report + Then a report downloaded event should be emitted + """ + report_name = u"problem_grade_report" + self.data_download_section.generate_weighted_problem_grade_report_button.click() + self.data_download_section.wait_for_available_report() + self.verify_report_requested_event(report_name) + self.verify_report_download(report_name) diff --git a/lms/djangoapps/course_structure_api/v0/api.py b/lms/djangoapps/course_structure_api/v0/api.py index 5a0523f6b9..c59fbea080 100644 --- a/lms/djangoapps/course_structure_api/v0/api.py +++ b/lms/djangoapps/course_structure_api/v0/api.py @@ -23,8 +23,7 @@ def _retrieve_course(course_key): """ try: - course = courses.get_course(course_key) - return course + return courses.get_course(course_key) except ValueError: raise CourseNotFoundError diff --git a/lms/djangoapps/instructor_task/tasks.py b/lms/djangoapps/instructor_task/tasks.py index 16c1021e20..88048f93e4 100644 --- a/lms/djangoapps/instructor_task/tasks.py +++ b/lms/djangoapps/instructor_task/tasks.py @@ -156,7 +156,6 @@ def calculate_grades_csv(entry_id, xmodule_instance_args): return run_main_task(entry_id, task_fn, action_name) -# TODO: GRADES_DOWNLOAD_ROUTING_KEY is the high mem queue. Do we know we need it? @task(base=BaseInstructorTask, routing_key=settings.GRADES_DOWNLOAD_ROUTING_KEY) # pylint: disable=not-callable def calculate_problem_grade_report(entry_id, xmodule_instance_args): """ diff --git a/lms/djangoapps/instructor_task/tasks_helper.py b/lms/djangoapps/instructor_task/tasks_helper.py index eb5d99a814..2479420eb6 100644 --- a/lms/djangoapps/instructor_task/tasks_helper.py +++ b/lms/djangoapps/instructor_task/tasks_helper.py @@ -55,6 +55,9 @@ UPDATE_STATUS_SUCCEEDED = 'succeeded' UPDATE_STATUS_FAILED = 'failed' UPDATE_STATUS_SKIPPED = 'skipped' +# The setting name used for events when "settings" (account settings, preferences, profile information) change. +REPORT_REQUESTED_EVENT_NAME = u'edx.instructor.report.requested' + class BaseInstructorTask(Task): """ @@ -553,6 +556,12 @@ def upload_csv_to_report_store(rows, csv_name, course_id, timestamp): ), rows ) + tracker.emit( + REPORT_REQUESTED_EVENT_NAME, + { + "report_type": csv_name, + } + ) def upload_grades_csv(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name): # pylint: disable=too-many-statements diff --git a/lms/static/coffee/src/instructor_dashboard/data_download.coffee b/lms/static/coffee/src/instructor_dashboard/data_download.coffee index ad9de769f9..dcd2e7ba91 100644 --- a/lms/static/coffee/src/instructor_dashboard/data_download.coffee +++ b/lms/static/coffee/src/instructor_dashboard/data_download.coffee @@ -109,10 +109,10 @@ class DataDownload @$download_display_text.html data['grading_config_summary'] @$calculate_grades_csv_btn.click (e) => - @onClickGradeDownload @$calculate_grades_csv_btn, "Error generating grades. Please try again." + @onClickGradeDownload @$calculate_grades_csv_btn, gettext("Error generating grades. Please try again.") @$problem_grade_report_csv_btn.click (e) => - @onClickGradeDownload @$problem_grade_report_csv_btn, "Error generating weighted problem report. Please try again." + @onClickGradeDownload @$problem_grade_report_csv_btn, gettext("Error generating weighted problem report. Please try again.") onClickGradeDownload: (button, errorMessage) -> # Clear any CSS styling from the request-response areas @@ -124,7 +124,7 @@ class DataDownload dataType: 'json' url: url error: (std_ajax_err) => - @$reports_request_response_error.text gettext(errorMessage) + @$reports_request_response_error.text errorMessage $(".msg-error").css({"display":"block"}) success: (data) => @$reports_request_response.text data['status'] @@ -201,6 +201,15 @@ class ReportDownloads $table_placeholder = $ '
', class: 'slickgrid' @$report_downloads_table.append $table_placeholder grid = new Slick.Grid($table_placeholder, report_downloads_data, columns, options) + grid.onClick.subscribe( + (event) => + report_url = event.target.href + if report_url + # Record that the user requested to download a report + Logger.log('edx.instructor.report.downloaded', { + report_url: report_url + }) + ) grid.autosizeColumns() From 67fdca18475619c7657eaa80e3ef47c216eef0ea Mon Sep 17 00:00:00 2001 From: Daniel Friedman Date: Tue, 28 Apr 2015 15:55:05 +0000 Subject: [PATCH 5/7] Monitor performance through datadog --- lms/djangoapps/instructor_task/tasks.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/lms/djangoapps/instructor_task/tasks.py b/lms/djangoapps/instructor_task/tasks.py index 88048f93e4..6ee49b4b53 100644 --- a/lms/djangoapps/instructor_task/tasks.py +++ b/lms/djangoapps/instructor_task/tasks.py @@ -162,9 +162,8 @@ def calculate_problem_grade_report(entry_id, xmodule_instance_args): Generate a CSV for a course containing all students' weighted problem grades and push the results to an S3 bucket for download. """ - # Translators: This is a past-tense verb that is inserted into task progress messages as {action}. - # TODO: can this be the same as the `calculate_grades_csv` action_name? - action_name = ugettext_noop('graded') + # Translators: This is a past-tense phrase that is inserted into task progress messages as {action}. + action_name = ugettext_noop('problem distribution graded') TASK_LOG.info( u"Task: %s, InstructorTask ID: %s, Task type: %s, Preparing for task execution", xmodule_instance_args.get('task_id'), entry_id, action_name From 84f3c33df70dfce525bba574dc3e0d86329b8963 Mon Sep 17 00:00:00 2001 From: Daniel Friedman Date: Mon, 11 May 2015 13:31:47 -0400 Subject: [PATCH 6/7] Address doc review --- common/test/acceptance/pages/lms/instructor_dashboard.py | 4 ++-- .../acceptance/tests/lms/test_lms_instructor_dashboard.py | 8 ++++---- lms/djangoapps/instructor/views/api.py | 8 +++----- lms/djangoapps/instructor_task/api.py | 2 +- lms/djangoapps/instructor_task/tasks.py | 2 +- lms/djangoapps/instructor_task/tests/test_tasks_helper.py | 2 +- .../coffee/src/instructor_dashboard/data_download.coffee | 2 +- 7 files changed, 13 insertions(+), 15 deletions(-) diff --git a/common/test/acceptance/pages/lms/instructor_dashboard.py b/common/test/acceptance/pages/lms/instructor_dashboard.py index 525b5cd88e..e54f4d76f3 100644 --- a/common/test/acceptance/pages/lms/instructor_dashboard.py +++ b/common/test/acceptance/pages/lms/instructor_dashboard.py @@ -717,9 +717,9 @@ class DataDownloadPage(PageObject): return self.q(css='input[name=calculate-grades-csv]') @property - def generate_weighted_problem_grade_report_button(self): + def generate_problem_grade_report_button(self): """ - Returns the "Generate Weighted Problem Grade Report" button. + Returns the "Generate Problem Grade Report" button. """ return self.q(css='input[name=problem-grade-report]') diff --git a/common/test/acceptance/tests/lms/test_lms_instructor_dashboard.py b/common/test/acceptance/tests/lms/test_lms_instructor_dashboard.py index b58299859f..f06baa724c 100644 --- a/common/test/acceptance/tests/lms/test_lms_instructor_dashboard.py +++ b/common/test/acceptance/tests/lms/test_lms_instructor_dashboard.py @@ -387,20 +387,20 @@ class DataDownloadsTest(BaseInstructorDashboardTest): self.verify_report_requested_event(report_name) self.verify_report_download(report_name) - def test_weighted_problem_grade_report_download(self): + def test_problem_grade_report_download(self): """ - Scenario: Verify that an instructor can download a weighted problem grade report + Scenario: Verify that an instructor can download a problem grade report Given that I am an instructor And I visit the instructor dashboard's "Data Downloads" tab - And I click on the "Generate Weighted Problem Grade Report" button + And I click on the "Generate Problem Grade Report" button Then a report should be generated And a report requested event should be emitted When I click on the report Then a report downloaded event should be emitted """ report_name = u"problem_grade_report" - self.data_download_section.generate_weighted_problem_grade_report_button.click() + self.data_download_section.generate_problem_grade_report_button.click() self.data_download_section.wait_for_available_report() self.verify_report_requested_event(report_name) self.verify_report_download(report_name) diff --git a/lms/djangoapps/instructor/views/api.py b/lms/djangoapps/instructor/views/api.py index 78fd631cde..407d8bfcf7 100644 --- a/lms/djangoapps/instructor/views/api.py +++ b/lms/djangoapps/instructor/views/api.py @@ -1956,7 +1956,7 @@ def calculate_grades_csv(request, course_id): @require_level('staff') def problem_grade_report(request, course_id): """ - Request a CSV showing students' weighted grades for all problems in the + Request a CSV showing students' grades for all problems in the course. AlreadyRunningError is raised if the course's grades are already being @@ -1965,13 +1965,11 @@ def problem_grade_report(request, course_id): course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id) try: instructor_task.api.submit_problem_grade_report(request, course_key) - # TODO: verify copy with documentation team - success_status = _("Your weighted problem report is being generated! " + success_status = _("Your problem grade report is being generated! " "You can view the status of the generation task in the 'Pending Instructor Tasks' section.") return JsonResponse({"status": success_status}) except AlreadyRunningError: - # TODO: verify copy with documentation team - already_running_status = _("A weighted problem generation task is already in progress. " + already_running_status = _("A problem grade report is already being generated. " "Check the 'Pending Instructor Tasks' table for the status of the task. " "When completed, the report will be available for download in the table below.") return JsonResponse({ diff --git a/lms/djangoapps/instructor_task/api.py b/lms/djangoapps/instructor_task/api.py index 8c16cef652..b02fc1470b 100644 --- a/lms/djangoapps/instructor_task/api.py +++ b/lms/djangoapps/instructor_task/api.py @@ -337,7 +337,7 @@ def submit_calculate_grades_csv(request, course_key): def submit_problem_grade_report(request, course_key): """ - Submits a task to generate a CSV grade report containing weighted problem + Submits a task to generate a CSV grade report containing problem values. """ task_type = 'grade_problems' diff --git a/lms/djangoapps/instructor_task/tasks.py b/lms/djangoapps/instructor_task/tasks.py index 6ee49b4b53..fa84cf5ca7 100644 --- a/lms/djangoapps/instructor_task/tasks.py +++ b/lms/djangoapps/instructor_task/tasks.py @@ -159,7 +159,7 @@ def calculate_grades_csv(entry_id, xmodule_instance_args): @task(base=BaseInstructorTask, routing_key=settings.GRADES_DOWNLOAD_ROUTING_KEY) # pylint: disable=not-callable def calculate_problem_grade_report(entry_id, xmodule_instance_args): """ - Generate a CSV for a course containing all students' weighted problem + Generate a CSV for a course containing all students' problem grades and push the results to an S3 bucket for download. """ # Translators: This is a past-tense phrase that is inserted into task progress messages as {action}. diff --git a/lms/djangoapps/instructor_task/tests/test_tasks_helper.py b/lms/djangoapps/instructor_task/tests/test_tasks_helper.py index 6afaccdb81..a81e52d8e7 100644 --- a/lms/djangoapps/instructor_task/tests/test_tasks_helper.py +++ b/lms/djangoapps/instructor_task/tests/test_tasks_helper.py @@ -271,7 +271,7 @@ class TestInstructorGradeReport(TestReportMixin, InstructorTaskCourseTestCase): class TestProblemGradeReport(TestReportMixin, InstructorTaskModuleTestCase): """ - Test that the weighted problem CSV generation works. + Test that the problem CSV generation works. """ def setUp(self): super(TestProblemGradeReport, self).setUp() diff --git a/lms/static/coffee/src/instructor_dashboard/data_download.coffee b/lms/static/coffee/src/instructor_dashboard/data_download.coffee index dcd2e7ba91..ccca75ba83 100644 --- a/lms/static/coffee/src/instructor_dashboard/data_download.coffee +++ b/lms/static/coffee/src/instructor_dashboard/data_download.coffee @@ -112,7 +112,7 @@ class DataDownload @onClickGradeDownload @$calculate_grades_csv_btn, gettext("Error generating grades. Please try again.") @$problem_grade_report_csv_btn.click (e) => - @onClickGradeDownload @$problem_grade_report_csv_btn, gettext("Error generating weighted problem report. Please try again.") + @onClickGradeDownload @$problem_grade_report_csv_btn, gettext("Error generating problem grade report. Please try again.") onClickGradeDownload: (button, errorMessage) -> # Clear any CSS styling from the request-response areas From 3acd7a008c514bab4b7d5f0d286076405aff3409 Mon Sep 17 00:00:00 2001 From: Daniel Friedman Date: Tue, 5 May 2015 17:55:55 +0000 Subject: [PATCH 7/7] Refactor and add tests for new grade report. * Handle grading errors --- .../lib/xmodule/xmodule/tests/test_graders.py | 8 +- .../pages/lms/instructor_dashboard.py | 4 +- .../lms/test_lms_instructor_dashboard.py | 20 +- lms/djangoapps/course_structure_api/v0/api.py | 7 +- .../course_structure_api/v0/errors.py | 2 + .../course_structure_api/v0/views.py | 5 +- lms/djangoapps/courseware/grades.py | 8 +- .../courseware/tests/test_grades.py | 6 +- .../django_comment_client/forum/tests.py | 3 +- .../django_comment_client/tests/test_utils.py | 2 +- .../django_comment_client/tests/utils.py | 94 +------- .../instructor_task/tasks_helper.py | 47 ++-- .../instructor_task/tests/test_base.py | 23 +- .../instructor_task/tests/test_integration.py | 101 +-------- .../tests/test_tasks_helper.py | 158 +++++++++---- .../content/course_structures/models.py | 3 + .../content/course_structures/tests.py | 11 +- openedx/core/djangoapps/util/__init__.py | 0 openedx/core/djangoapps/util/testing.py | 209 ++++++++++++++++++ 19 files changed, 425 insertions(+), 286 deletions(-) create mode 100644 openedx/core/djangoapps/util/__init__.py create mode 100644 openedx/core/djangoapps/util/testing.py diff --git a/common/lib/xmodule/xmodule/tests/test_graders.py b/common/lib/xmodule/xmodule/tests/test_graders.py index cc56173aa5..d3e73fcc54 100644 --- a/common/lib/xmodule/xmodule/tests/test_graders.py +++ b/common/lib/xmodule/xmodule/tests/test_graders.py @@ -24,12 +24,16 @@ class GradesheetTest(unittest.TestCase): scores.append(Score(earned=3, possible=5, graded=True, section="summary", module_id=None)) all_total, graded_total = aggregate_scores(scores) self.assertAlmostEqual(all_total, Score(earned=3, possible=10, graded=False, section="summary", module_id=None)) - self.assertAlmostEqual(graded_total, Score(earned=3, possible=5, graded=True, section="summary", module_id=None)) + self.assertAlmostEqual( + graded_total, Score(earned=3, possible=5, graded=True, section="summary", module_id=None) + ) scores.append(Score(earned=2, possible=5, graded=True, section="summary", module_id=None)) all_total, graded_total = aggregate_scores(scores) self.assertAlmostEqual(all_total, Score(earned=5, possible=15, graded=False, section="summary", module_id=None)) - self.assertAlmostEqual(graded_total, Score(earned=5, possible=10, graded=True, section="summary", module_id=None)) + self.assertAlmostEqual( + graded_total, Score(earned=5, possible=10, graded=True, section="summary", module_id=None) + ) class GraderTest(unittest.TestCase): diff --git a/common/test/acceptance/pages/lms/instructor_dashboard.py b/common/test/acceptance/pages/lms/instructor_dashboard.py index e54f4d76f3..728d5b86c0 100644 --- a/common/test/acceptance/pages/lms/instructor_dashboard.py +++ b/common/test/acceptance/pages/lms/instructor_dashboard.py @@ -703,7 +703,7 @@ class DataDownloadPage(PageObject): return self.q(css='a[data-section=data_download].active-section').present @property - def generate_student_profile_report_button(self): + def generate_student_report_button(self): """ Returns the "Download profile information as a CSV" button. """ @@ -717,7 +717,7 @@ class DataDownloadPage(PageObject): return self.q(css='input[name=calculate-grades-csv]') @property - def generate_problem_grade_report_button(self): + def generate_problem_report_button(self): """ Returns the "Generate Problem Grade Report" button. """ diff --git a/common/test/acceptance/tests/lms/test_lms_instructor_dashboard.py b/common/test/acceptance/tests/lms/test_lms_instructor_dashboard.py index f06baa724c..8fde51d0c0 100644 --- a/common/test/acceptance/tests/lms/test_lms_instructor_dashboard.py +++ b/common/test/acceptance/tests/lms/test_lms_instructor_dashboard.py @@ -320,24 +320,16 @@ class DataDownloadsTest(BaseInstructorDashboardTest): """ Verifies that the correct event is emitted when a report is requested. """ - self.verify_events_of_type( - self.instructor_username, - u"edx.instructor.report.requested", - [{ - u"report_type": report_type - }] + self.assert_matching_events_were_emitted( + event_filter={'name': u'edx.instructor.report.requested', 'report_type': report_type} ) def verify_report_downloaded_event(self, report_url): """ Verifies that the correct event is emitted when a report is downloaded. """ - self.verify_events_of_type( - self.instructor_username, - u"edx.instructor.report.downloaded", - [{ - u"report_url": report_url - }] + self.assert_matching_events_were_emitted( + event_filter={'name': u'edx.instructor.report.downloaded', 'report_url': report_url} ) def verify_report_download(self, report_name): @@ -364,7 +356,7 @@ class DataDownloadsTest(BaseInstructorDashboardTest): Then a report downloaded event should be emitted """ report_name = u"student_profile_info" - self.data_download_section.generate_student_profile_report_button.click() + self.data_download_section.generate_student_report_button.click() self.data_download_section.wait_for_available_report() self.verify_report_requested_event(report_name) self.verify_report_download(report_name) @@ -400,7 +392,7 @@ class DataDownloadsTest(BaseInstructorDashboardTest): Then a report downloaded event should be emitted """ report_name = u"problem_grade_report" - self.data_download_section.generate_problem_grade_report_button.click() + self.data_download_section.generate_problem_report_button.click() self.data_download_section.wait_for_available_report() self.verify_report_requested_event(report_name) self.verify_report_download(report_name) diff --git a/lms/djangoapps/course_structure_api/v0/api.py b/lms/djangoapps/course_structure_api/v0/api.py index c59fbea080..55c884c384 100644 --- a/lms/djangoapps/course_structure_api/v0/api.py +++ b/lms/djangoapps/course_structure_api/v0/api.py @@ -1,7 +1,8 @@ """ API implementation of the Course Structure API for Python code. -Note: The course list and course detail functionality isn't currently supported here because of the tricky interactions between DRF and the code. +Note: The course list and course detail functionality isn't currently supported here because +of the tricky interactions between DRF and the code. Most of that information is available by accessing the course objects directly. """ @@ -62,8 +63,8 @@ def course_structure(course_key): """ course = _retrieve_course(course_key) try: - course_structure = models.CourseStructure.objects.get(course_id=course.id) - return serializers.CourseStructureSerializer(course_structure.structure).data + requested_course_structure = models.CourseStructure.objects.get(course_id=course.id) + return serializers.CourseStructureSerializer(requested_course_structure.structure).data except models.CourseStructure.DoesNotExist: # If we don't have data stored, generate it and return an error. tasks.update_course_structure.delay(unicode(course_key)) diff --git a/lms/djangoapps/course_structure_api/v0/errors.py b/lms/djangoapps/course_structure_api/v0/errors.py index 91a3f24f4d..e0413a3abb 100644 --- a/lms/djangoapps/course_structure_api/v0/errors.py +++ b/lms/djangoapps/course_structure_api/v0/errors.py @@ -1,3 +1,5 @@ +""" Errors used by the Course Structure API. """ + class CourseNotFoundError(Exception): """ The course was not found. """ diff --git a/lms/djangoapps/course_structure_api/v0/views.py b/lms/djangoapps/course_structure_api/v0/views.py index d88d4138b1..e182315aff 100644 --- a/lms/djangoapps/course_structure_api/v0/views.py +++ b/lms/djangoapps/course_structure_api/v0/views.py @@ -15,7 +15,6 @@ from course_structure_api.v0 import api, serializers from course_structure_api.v0.errors import CourseNotFoundError, CourseStructureNotAvailableError from courseware import courses from courseware.access import has_access -from openedx.core.djangoapps.content.course_structures import models, tasks from openedx.core.lib.api.permissions import IsAuthenticatedOrDebug from openedx.core.lib.api.serializers import PaginationSerializer from student.roles import CourseInstructorRole, CourseStaffRole @@ -253,7 +252,7 @@ class CourseStructure(CourseViewMixin, RetrieveAPIView): """ @CourseViewMixin.course_check - def get(self, request, course_id=None): + def get(self, request, **kwargs): try: return Response(api.course_structure(self.course_key)) except CourseStructureNotAvailableError: @@ -289,5 +288,5 @@ class CourseGradingPolicy(CourseViewMixin, ListAPIView): allow_empty = False @CourseViewMixin.course_check - def get(self, request, course_id=None): + def get(self, request, **kwargs): return Response(api.course_grading_policy(self.course_key)) diff --git a/lms/djangoapps/courseware/grades.py b/lms/djangoapps/courseware/grades.py index 02d608040d..aff9005bb7 100644 --- a/lms/djangoapps/courseware/grades.py +++ b/lms/djangoapps/courseware/grades.py @@ -229,7 +229,13 @@ def _grade(student, request, course, keep_raw_scores): graded = False scores.append( - Score(correct, total, graded, module_descriptor.display_name_with_default, module_descriptor.location) + Score( + correct, + total, + graded, + module_descriptor.display_name_with_default, + module_descriptor.location + ) ) _, graded_total = graders.aggregate_scores(scores, section_name) diff --git a/lms/djangoapps/courseware/tests/test_grades.py b/lms/djangoapps/courseware/tests/test_grades.py index f0d46c5d02..c6825c99be 100644 --- a/lms/djangoapps/courseware/tests/test_grades.py +++ b/lms/djangoapps/courseware/tests/test_grades.py @@ -68,7 +68,7 @@ class TestGradeIteration(ModuleStoreTestCase): def test_all_empty_grades(self): """No students have grade entries""" - all_gradesets, all_errors = self._gradesets_and_errors_for(self.course.id, self.students, keep_raw_scores=True) + all_gradesets, all_errors = self._gradesets_and_errors_for(self.course.id, self.students) self.assertEqual(len(all_errors), 0) for gradeset in all_gradesets.values(): self.assertIsNone(gradeset['grade']) @@ -107,7 +107,7 @@ class TestGradeIteration(ModuleStoreTestCase): self.assertTrue(all_gradesets[student5]) ################################# Helpers ################################# - def _gradesets_and_errors_for(self, course_id, students, keep_raw_scores=False): + def _gradesets_and_errors_for(self, course_id, students): """Simple helper method to iterate through student grades and give us two dictionaries -- one that has all students and their respective gradesets, and one that has only students that could not be graded and @@ -115,7 +115,7 @@ class TestGradeIteration(ModuleStoreTestCase): students_to_gradesets = {} students_to_errors = {} - for student, gradeset, err_msg in iterate_grades_for(course_id, students, keep_raw_scores): + for student, gradeset, err_msg in iterate_grades_for(course_id, students): students_to_gradesets[student] = gradeset if err_msg: students_to_errors[student] = err_msg diff --git a/lms/djangoapps/django_comment_client/forum/tests.py b/lms/djangoapps/django_comment_client/forum/tests.py index bbeb7dba09..59dc5a2106 100644 --- a/lms/djangoapps/django_comment_client/forum/tests.py +++ b/lms/djangoapps/django_comment_client/forum/tests.py @@ -15,10 +15,11 @@ from django_comment_client.tests.group_id import ( NonCohortedTopicGroupIdTestMixin ) from django_comment_client.tests.unicode import UnicodeTestMixin -from django_comment_client.tests.utils import CohortedTestCase, ContentGroupTestCase +from django_comment_client.tests.utils import CohortedTestCase from django_comment_client.utils import strip_none from student.tests.factories import UserFactory, CourseEnrollmentFactory from util.testing import UrlResetMixin +from openedx.core.djangoapps.util.testing import ContentGroupTestCase from xmodule.modulestore import ModuleStoreEnum from xmodule.modulestore.django import modulestore from xmodule.modulestore.tests.django_utils import ( diff --git a/lms/djangoapps/django_comment_client/tests/test_utils.py b/lms/djangoapps/django_comment_client/tests/test_utils.py index 76c03a3e4f..39ab33876b 100644 --- a/lms/djangoapps/django_comment_client/tests/test_utils.py +++ b/lms/djangoapps/django_comment_client/tests/test_utils.py @@ -15,12 +15,12 @@ from edxmako import add_lookup from django_comment_client.tests.factories import RoleFactory from django_comment_client.tests.unicode import UnicodeTestMixin -from django_comment_client.tests.utils import ContentGroupTestCase import django_comment_client.utils as utils from courseware.tests.factories import InstructorFactory from openedx.core.djangoapps.course_groups.cohorts import set_course_cohort_settings from student.tests.factories import UserFactory, CourseEnrollmentFactory +from openedx.core.djangoapps.util.testing import ContentGroupTestCase from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase diff --git a/lms/djangoapps/django_comment_client/tests/utils.py b/lms/djangoapps/django_comment_client/tests/utils.py index 9b73ff4dd4..6c06c5c7ab 100644 --- a/lms/djangoapps/django_comment_client/tests/utils.py +++ b/lms/djangoapps/django_comment_client/tests/utils.py @@ -1,18 +1,14 @@ """ Utilities for tests within the django_comment_client module. """ -from datetime import datetime from mock import patch -from pytz import UTC -from openedx.core.djangoapps.course_groups.models import CourseUserGroupPartitionGroup from openedx.core.djangoapps.course_groups.tests.helpers import CohortFactory from django_comment_common.models import Role from django_comment_common.utils import seed_permissions_roles from student.tests.factories import CourseEnrollmentFactory, UserFactory -from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory +from xmodule.modulestore.tests.factories import CourseFactory from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase -from xmodule.partitions.partitions import UserPartition, Group class CohortedTestCase(ModuleStoreTestCase): @@ -49,91 +45,3 @@ class CohortedTestCase(ModuleStoreTestCase): self.moderator.roles.add(Role.objects.get(name="Moderator", course_id=self.course.id)) self.student_cohort.users.add(self.student) self.moderator_cohort.users.add(self.moderator) - - -class ContentGroupTestCase(ModuleStoreTestCase): - """ - Sets up discussion modules visible to content groups 'Alpha' and - 'Beta', as well as a module visible to all students. Creates a - staff user, users with access to Alpha/Beta (by way of cohorts), - and a non-cohorted user with no special access. - """ - def setUp(self): - super(ContentGroupTestCase, self).setUp() - - self.course = CourseFactory.create( - org='org', number='number', run='run', - # This test needs to use a course that has already started -- - # discussion topics only show up if the course has already started, - # and the default start date for courses is Jan 1, 2030. - start=datetime(2012, 2, 3, tzinfo=UTC), - user_partitions=[ - UserPartition( - 0, - 'Content Group Configuration', - '', - [Group(1, 'Alpha'), Group(2, 'Beta')], - scheme_id='cohort' - ) - ], - grading_policy={ - "GRADER": [{ - "type": "Homework", - "min_count": 1, - "drop_count": 0, - "short_label": "HW", - "weight": 1.0 - }] - }, - cohort_config={'cohorted': True}, - discussion_topics={} - ) - - self.staff_user = UserFactory.create(is_staff=True) - self.alpha_user = UserFactory.create() - self.beta_user = UserFactory.create() - self.non_cohorted_user = UserFactory.create() - for user in [self.staff_user, self.alpha_user, self.beta_user, self.non_cohorted_user]: - CourseEnrollmentFactory.create(user=user, course_id=self.course.id) - - alpha_cohort = CohortFactory( - course_id=self.course.id, - name='Cohort Alpha', - users=[self.alpha_user] - ) - beta_cohort = CohortFactory( - course_id=self.course.id, - name='Cohort Beta', - users=[self.beta_user] - ) - CourseUserGroupPartitionGroup.objects.create( - course_user_group=alpha_cohort, - partition_id=self.course.user_partitions[0].id, - group_id=self.course.user_partitions[0].groups[0].id - ) - CourseUserGroupPartitionGroup.objects.create( - course_user_group=beta_cohort, - partition_id=self.course.user_partitions[0].id, - group_id=self.course.user_partitions[0].groups[1].id - ) - self.alpha_module = ItemFactory.create( - parent_location=self.course.location, - category='discussion', - discussion_id='alpha_group_discussion', - discussion_target='Visible to Alpha', - group_access={self.course.user_partitions[0].id: [self.course.user_partitions[0].groups[0].id]} - ) - self.beta_module = ItemFactory.create( - parent_location=self.course.location, - category='discussion', - discussion_id='beta_group_discussion', - discussion_target='Visible to Beta', - group_access={self.course.user_partitions[0].id: [self.course.user_partitions[0].groups[1].id]} - ) - self.global_module = ItemFactory.create( - parent_location=self.course.location, - category='discussion', - discussion_id='global_group_discussion', - discussion_target='Visible to Everyone' - ) - self.course = self.store.get_item(self.course.location) diff --git a/lms/djangoapps/instructor_task/tasks_helper.py b/lms/djangoapps/instructor_task/tasks_helper.py index 2479420eb6..6aa8b24075 100644 --- a/lms/djangoapps/instructor_task/tasks_helper.py +++ b/lms/djangoapps/instructor_task/tasks_helper.py @@ -556,12 +556,7 @@ def upload_csv_to_report_store(rows, csv_name, course_id, timestamp): ), rows ) - tracker.emit( - REPORT_REQUESTED_EVENT_NAME, - { - "report_type": csv_name, - } - ) + tracker.emit(REPORT_REQUESTED_EVENT_NAME, {"report_type": csv_name, }) def upload_grades_csv(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name): # pylint: disable=too-many-statements @@ -721,6 +716,14 @@ def upload_grades_csv(_xmodule_instance_args, _entry_id, course_id, _task_input, def _order_problems(blocks): """ Sort the problems by the assignment type and assignment that it belongs to. + + Args: + blocks (OrderedDict) - A course structure containing blocks that have been ordered + (i.e. when we iterate over them, we will see them in the order + that they appear in the course). + + Returns: + an OrderedDict that maps a problem id to its headers in the final report. """ problems = OrderedDict() assignments = dict() @@ -749,7 +752,7 @@ def _order_problems(blocks): for assignment_type in assignments: for assignment_index, assignment in enumerate(assignments[assignment_type].keys(), start=1): for problem in assignments[assignment_type][assignment]: - header_name = "{assignment_type} {assignment_index}: {assignment_name} - {block}".format( + header_name = u"{assignment_type} {assignment_index}: {assignment_name} - {block}".format( block=blocks[problem]['display_name'], assignment_type=assignment_type, assignment_index=assignment_index, @@ -771,10 +774,9 @@ def upload_problem_grade_report(_xmodule_instance_args, _entry_id, course_id, _t enrolled_students = CourseEnrollment.users_enrolled_in(course_id) task_progress = TaskProgress(action_name, enrolled_students.count(), start_time) - # This struct encapsulates both the display names of each static item in - # the header row as values as well as the django User field names of those - # items as the keys. It is structured in this way to keep the values - # related. + # This struct encapsulates both the display names of each static item in the + # header row as values as well as the django User field names of those items + # as the keys. It is structured in this way to keep the values related. header_row = OrderedDict([('id', 'Student ID'), ('email', 'Email'), ('username', 'Username')]) try: @@ -782,14 +784,25 @@ def upload_problem_grade_report(_xmodule_instance_args, _entry_id, course_id, _t blocks = course_structure.ordered_blocks problems = _order_problems(blocks) except CourseStructure.DoesNotExist: - return task_progress.update_task_state(extra_meta={'step': 'Generating course structure. Please refresh and try again.'}) + return task_progress.update_task_state( + extra_meta={'step': 'Generating course structure. Please refresh and try again.'} + ) # Just generate the static fields for now. rows = [list(header_row.values()) + ['Final Grade'] + list(chain.from_iterable(problems.values()))] + error_rows = [list(header_row.values()) + ['error_msg']] current_step = {'step': 'Calculating Grades'} for student, gradeset, err_msg in iterate_grades_for(course_id, enrolled_students, keep_raw_scores=True): student_fields = [getattr(student, field_name) for field_name in header_row] + task_progress.attempted += 1 + + if err_msg: + # There was an error grading this student. + error_rows.append(student_fields + [err_msg]) + task_progress.failed += 1 + continue + final_grade = gradeset['percent'] # Only consider graded problems problem_scores = {unicode(score.module_id): score for score in gradeset['raw_scores'] if score.graded} @@ -807,13 +820,17 @@ def upload_problem_grade_report(_xmodule_instance_args, _entry_id, course_id, _t earned_possible_values.append(['N/A', 'N/A']) rows.append(student_fields + [final_grade] + list(chain.from_iterable(earned_possible_values))) - task_progress.attempted += 1 task_progress.succeeded += 1 if task_progress.attempted % status_interval == 0: task_progress.update_task_state(extra_meta=current_step) - # Perform the upload - upload_csv_to_report_store(rows, 'problem_grade_report', course_id, start_date) + # Perform the upload if any students have been successfully graded + if len(rows) > 1: + upload_csv_to_report_store(rows, 'problem_grade_report', course_id, start_date) + # If there are any error rows, write them out as well + if len(error_rows) > 1: + upload_csv_to_report_store(error_rows, 'problem_grade_report_err', course_id, start_date) + return task_progress.update_task_state(extra_meta={'step': 'Uploading CSV'}) diff --git a/lms/djangoapps/instructor_task/tests/test_base.py b/lms/djangoapps/instructor_task/tests/test_base.py index d8a905ba2a..e7f18ce01e 100644 --- a/lms/djangoapps/instructor_task/tests/test_base.py +++ b/lms/djangoapps/instructor_task/tests/test_base.py @@ -130,6 +130,9 @@ class InstructorTaskCourseTestCase(LoginEnrollmentTestCase, ModuleStoreTestCase) self.add_course_content() def add_course_content(self): + """ + Add a chapter and a sequential to the current course. + """ # Add a chapter to the course chapter = ItemFactory.create(parent_location=self.course.location, display_name=TEST_SECTION_NAME) @@ -217,7 +220,7 @@ class InstructorTaskModuleTestCase(InstructorTaskCourseTestCase): ItemFactory.create(parent_location=parent.location, parent=parent, category="problem", - display_name=str(problem_url_name), + display_name=problem_url_name, data=problem_xml, **kwargs) @@ -256,9 +259,12 @@ class InstructorTaskModuleTestCase(InstructorTaskCourseTestCase): # URL, modified so that it can be easily stored in html, prepended with "input-" and # appended with a sequence identifier for the particular response the input goes to. course_key = self.course.id - return 'input_i4x-{0}-{1}-problem-{2}_{3}'.format(course_key.org, - course_key.course.replace('.', '_'), - problem_url_name, response_id) + return u'input_i4x-{0}-{1}-problem-{2}_{3}'.format( + course_key.org.replace(u'.', u'_'), + course_key.course.replace(u'.', u'_'), + problem_url_name, + response_id + ) # make sure that the requested user is logged in, so that the ajax call works # on the right problem: @@ -275,7 +281,7 @@ class InstructorTaskModuleTestCase(InstructorTaskCourseTestCase): # assign correct identifier to each response. resp = self.client.post(modx_url, { - get_input_id('{}_1').format(index): response for index, response in enumerate(responses, 2) + get_input_id(u'{}_1').format(index): response for index, response in enumerate(responses, 2) }) return resp @@ -289,7 +295,7 @@ class TestReportMixin(object): if os.path.exists(reports_download_path): shutil.rmtree(reports_download_path) - def verify_rows_in_csv(self, expected_rows, verify_order=True, ignore_other_columns=False): + def verify_rows_in_csv(self, expected_rows, file_index=0, verify_order=True, ignore_other_columns=False): """ Verify that the last ReportStore CSV contains the expected content. @@ -298,6 +304,9 @@ class TestReportMixin(object): where each dict represents a row of data in the last ReportStore CSV. Each dict maps keys from the CSV header to values in that row's corresponding cell. + file_index (int): Describes which report store file to + open. Files are ordered by last modified date, and 0 + corresponds to the most recently modified file. verify_order (boolean): When True, we verify that both the content and order of `expected_rows` matches the actual csv rows. When False (default), we only verify @@ -306,7 +315,7 @@ class TestReportMixin(object): contain data which is the subset of actual csv rows. """ report_store = ReportStore.from_config() - report_csv_filename = report_store.links_for(self.course.id)[0][0] + report_csv_filename = report_store.links_for(self.course.id)[file_index][0] with open(report_store.path_to(self.course.id, report_csv_filename)) as csv_file: # Expand the dict reader generator so we don't lose it's content csv_rows = [row for row in unicodecsv.DictReader(csv_file)] diff --git a/lms/djangoapps/instructor_task/tests/test_integration.py b/lms/djangoapps/instructor_task/tests/test_integration.py index deaebe7904..db3e4d5c9c 100644 --- a/lms/djangoapps/instructor_task/tests/test_integration.py +++ b/lms/djangoapps/instructor_task/tests/test_integration.py @@ -14,9 +14,9 @@ from celery.states import SUCCESS, FAILURE from django.contrib.auth.models import User from django.core.urlresolvers import reverse +from openedx.core.djangoapps.util.testing import TestConditionalContent from capa.tests.response_xml_factory import (CodeResponseXMLFactory, CustomResponseXMLFactory) -from openedx.core.djangoapps.user_api.tests.factories import UserCourseTagFactory from xmodule.modulestore.tests.factories import ItemFactory from xmodule.modulestore import ModuleStoreEnum from xmodule.partitions.partitions import Group, UserPartition @@ -28,7 +28,7 @@ from instructor_task.api import (submit_rescore_problem_for_all_students, submit_reset_problem_attempts_for_all_students, submit_delete_problem_state_for_all_students) from instructor_task.models import InstructorTask -from instructor_task.tasks_helper import upload_grades_csv, upload_problem_grade_report +from instructor_task.tasks_helper import upload_grades_csv from instructor_task.tests.test_base import (InstructorTaskModuleTestCase, TestReportMixin, TEST_COURSE_ORG, TEST_COURSE_NUMBER, OPTION_1, OPTION_2) from capa.responsetypes import StudentInputError @@ -465,103 +465,10 @@ class TestDeleteProblemTask(TestIntegrationTask): self.assertEqual(instructor_task.task_state, SUCCESS) -class TestGradeReportConditionalContent(TestReportMixin, TestIntegrationTask): +class TestGradeReportConditionalContent(TestReportMixin, TestConditionalContent, TestIntegrationTask): """ - Check that grade export works when graded content exists within - split modules. + Test grade report in cases where there are problems contained within split tests. """ - def setUp(self): - """ - Set up a course with graded problems within a split test. - - Course hierarchy is as follows (modeled after how split tests - are created in studio): - -> course - -> chapter - -> sequential (graded) - -> vertical - -> split_test - -> vertical (Group A) - -> problem - -> vertical (Group B) - -> problem - """ - super(TestGradeReportConditionalContent, self).setUp() - - # Create user partitions - self.user_partition_group_a = 0 - self.user_partition_group_b = 1 - self.partition = UserPartition( - 0, - 'first_partition', - 'First Partition', - [ - Group(self.user_partition_group_a, 'Group A'), - Group(self.user_partition_group_b, 'Group B') - ] - ) - - # Create course with group configurations and grading policy - self.initialize_course( - course_factory_kwargs={ - 'user_partitions': [self.partition], - 'grading_policy': { - "GRADER": [{ - "type": "Homework", - "min_count": 1, - "drop_count": 0, - "short_label": "HW", - "weight": 1.0 - }] - } - } - ) - - # Create users and partition them - self.student_a = self.create_student('student_a') - self.student_b = self.create_student('student_b') - UserCourseTagFactory( - user=self.student_a, - course_id=self.course.id, - key='xblock.partition_service.partition_{0}'.format(self.partition.id), # pylint: disable=no-member - value=str(self.user_partition_group_a) - ) - UserCourseTagFactory( - user=self.student_b, - course_id=self.course.id, - key='xblock.partition_service.partition_{0}'.format(self.partition.id), # pylint: disable=no-member - value=str(self.user_partition_group_b) - ) - - # Create a vertical to contain our split test - problem_vertical = ItemFactory.create( - parent_location=self.problem_section.location, - category='vertical', - display_name='Problem Unit' - ) - - # Create the split test and child vertical containers - vertical_a_url = self.course.id.make_usage_key('vertical', 'split_test_vertical_a') - vertical_b_url = self.course.id.make_usage_key('vertical', 'split_test_vertical_b') - self.split_test = ItemFactory.create( - parent_location=problem_vertical.location, - category='split_test', - display_name='Split Test', - user_partition_id=self.partition.id, # pylint: disable=no-member - group_id_to_child={str(index): url for index, url in enumerate([vertical_a_url, vertical_b_url])} - ) - self.vertical_a = ItemFactory.create( - parent_location=self.split_test.location, - category='vertical', - display_name='Group A problem container', - location=vertical_a_url - ) - self.vertical_b = ItemFactory.create( - parent_location=self.split_test.location, - category='vertical', - display_name='Group B problem container', - location=vertical_b_url - ) def verify_csv_task_success(self, task_result): """ diff --git a/lms/djangoapps/instructor_task/tests/test_tasks_helper.py b/lms/djangoapps/instructor_task/tests/test_tasks_helper.py index a81e52d8e7..72b66354c7 100644 --- a/lms/djangoapps/instructor_task/tests/test_tasks_helper.py +++ b/lms/djangoapps/instructor_task/tests/test_tasks_helper.py @@ -14,12 +14,9 @@ import unicodecsv from capa.tests.response_xml_factory import MultipleChoiceResponseXMLFactory from certificates.tests.factories import GeneratedCertificateFactory, CertificateWhitelistFactory from course_modes.models import CourseMode -from instructor_task.models import ReportStore -from instructor_task.tasks_helper import cohort_students_and_upload, upload_grades_csv, upload_students_csv from instructor_task.tests.test_base import InstructorTaskCourseTestCase, TestReportMixin, InstructorTaskModuleTestCase from openedx.core.djangoapps.course_groups.models import CourseUserGroupPartitionGroup from openedx.core.djangoapps.course_groups.tests.helpers import CohortFactory -from openedx.core.djangoapps.user_api.tests.factories import UserCourseTagFactory import openedx.core.djangoapps.user_api.course_tag.api as course_tag_api from openedx.core.djangoapps.user_api.partition_schemes import RandomUserPartitionScheme from student.tests.factories import UserFactory @@ -31,9 +28,7 @@ from instructor_task.models import ReportStore from instructor_task.tasks_helper import ( cohort_students_and_upload, upload_grades_csv, upload_problem_grade_report, upload_students_csv ) -from instructor_task.tests.test_base import InstructorTaskCourseTestCase, TestReportMixin -from instructor_task.tests.test_integration import TestGradeReportConditionalContent -from django_comment_client.tests.utils import ContentGroupTestCase +from openedx.core.djangoapps.util.testing import ContentGroupTestCase, TestConditionalContent @ddt.ddt @@ -275,7 +270,6 @@ class TestProblemGradeReport(TestReportMixin, InstructorTaskModuleTestCase): """ def setUp(self): super(TestProblemGradeReport, self).setUp() - self.maxDiff = None self.initialize_course() # Add unicode data to CSV even though unicode usernames aren't # technically possible in openedx. @@ -286,14 +280,20 @@ class TestProblemGradeReport(TestReportMixin, InstructorTaskModuleTestCase): @patch('instructor_task.tasks_helper._get_current_task') def test_no_problems(self, _get_current_task): """ - Verify that we see no grade information for a course with no graded + Verify that we see no grade information for a course with no graded problems. """ result = upload_problem_grade_report(None, None, self.course.id, None, 'graded') self.assertDictContainsSubset({'action_name': 'graded', 'attempted': 2, 'succeeded': 2, 'failed': 0}, result) self.verify_rows_in_csv([ - dict(zip(self.csv_header_row, [unicode(self.student_1.id), self.student_1.email, self.student_1.username, '0.0'])), - dict(zip(self.csv_header_row, [unicode(self.student_2.id), self.student_2.email, self.student_2.username, '0.0'])) + dict(zip( + self.csv_header_row, + [unicode(self.student_1.id), self.student_1.email, self.student_1.username, '0.0'] + )), + dict(zip( + self.csv_header_row, + [unicode(self.student_2.id), self.student_2.email, self.student_2.username, '0.0'] + )) ]) @patch('instructor_task.tasks_helper._get_current_task') @@ -304,40 +304,83 @@ class TestProblemGradeReport(TestReportMixin, InstructorTaskModuleTestCase): metadata={'graded': True}, display_name='Problem Vertical' ) - self.define_option_problem('Problem1', parent=vertical) - # generate the course structure + self.define_option_problem(u'Pröblem1', parent=vertical) - self.submit_student_answer(self.student_1.username, 'Problem1', ['Option 1']) + self.submit_student_answer(self.student_1.username, u'Pröblem1', ['Option 1']) result = upload_problem_grade_report(None, None, self.course.id, None, 'graded') self.assertDictContainsSubset({'action_name': 'graded', 'attempted': 2, 'succeeded': 2, 'failed': 0}, result) - problem_name = 'Homework 1: Problem - Problem1' + problem_name = u'Homework 1: Problem - Pröblem1' header_row = self.csv_header_row + [problem_name + ' (Earned)', problem_name + ' (Possible)'] self.verify_rows_in_csv([ dict(zip( header_row, - [unicode(self.student_1.id), self.student_1.email, self.student_1.username, '0.01', '1.0', '2.0'] + [ + unicode(self.student_1.id), + self.student_1.email, + self.student_1.username, + '0.01', '1.0', '2.0'] )), dict(zip( header_row, - [unicode(self.student_2.id), self.student_2.email, self.student_2.username, '0.0', 'N/A', 'N/A'] + [ + unicode(self.student_2.id), + self.student_2.email, + self.student_2.username, + '0.0', 'N/A', 'N/A' + ] )) ]) + @patch('instructor_task.tasks_helper._get_current_task') + @patch('instructor_task.tasks_helper.iterate_grades_for') + def test_grading_failure(self, mock_iterate_grades_for, _mock_current_task): + """ + Test that any grading errors are properly reported in the progress + dict and uploaded to the report store. + """ + # mock an error response from `iterate_grades_for` + student = self.create_student(u'username', u'student@example.com') + error_message = u'Cannöt grade student' + mock_iterate_grades_for.return_value = [ + (student, {}, error_message) + ] + result = upload_problem_grade_report(None, None, self.course.id, None, 'graded') + self.assertDictContainsSubset({'attempted': 1, 'succeeded': 0, 'failed': 1}, result) + + report_store = ReportStore.from_config() + self.assertTrue(any('grade_report_err' in item[0] for item in report_store.links_for(self.course.id))) + self.verify_rows_in_csv([ + { + u'Student ID': unicode(student.id), + u'Email': student.email, + u'Username': student.username, + u'error_msg': error_message + } + ]) + + +class TestProblemReportSplitTestContent(TestReportMixin, TestConditionalContent, InstructorTaskModuleTestCase): + """ + Test the problem report on a course that has split tests. + """ -class TestProblemReportSplitTestContent(TestGradeReportConditionalContent): OPTION_1 = 'Option 1' OPTION_2 = 'Option 2' def setUp(self): super(TestProblemReportSplitTestContent, self).setUp() - self.problem_a_url = 'problem_a_url' - self.problem_b_url = 'problem_b_url' + self.problem_a_url = u'pröblem_a_url' + self.problem_b_url = u'pröblem_b_url' self.define_option_problem(self.problem_a_url, parent=self.vertical_a) self.define_option_problem(self.problem_b_url, parent=self.vertical_b) def test_problem_grade_report(self): """ - Test problems that exist in a problem grade report. + Test that we generate the correct the correct grade report when dealing with A/B tests. + + In order to verify that the behavior of the grade report is correct, we submit answers for problems + that the student won't have access to. A/B tests won't restrict access to the problems, but it should + not show up in that student's course tree when generating the grade report, hence the N/A's in the grade report. """ # student A will get 100%, student B will get 50% because # OPTION_1 is the correct option, and OPTION_2 is the @@ -350,9 +393,11 @@ class TestProblemReportSplitTestContent(TestGradeReportConditionalContent): with patch('instructor_task.tasks_helper._get_current_task'): result = upload_problem_grade_report(None, None, self.course.id, None, 'graded') - self.verify_csv_task_success(result) + self.assertDictContainsSubset( + {'action_name': 'graded', 'attempted': 2, 'succeeded': 2, 'failed': 0}, result + ) - problem_names = ['Homework 1: Problem - problem_a_url', 'Homework 1: Problem - problem_b_url'] + problem_names = [u'Homework 1: Problem - pröblem_a_url', u'Homework 1: Problem - pröblem_b_url'] header_row = [u'Student ID', u'Email', u'Username', u'Final Grade'] for problem in problem_names: header_row += [problem + ' (Earned)', problem + ' (Possible)'] @@ -360,16 +405,28 @@ class TestProblemReportSplitTestContent(TestGradeReportConditionalContent): self.verify_rows_in_csv([ dict(zip( header_row, - [unicode(self.student_a.id), self.student_a.email, self.student_a.username, u'1.0', u'2.0', u'2.0', u'N/A', u'N/A'] + [ + unicode(self.student_a.id), + self.student_a.email, + self.student_a.username, + u'1.0', u'2.0', u'2.0', u'N/A', u'N/A' + ] )), dict(zip( header_row, - [unicode(self.student_b.id), self.student_b.email, self.student_b.username, u'0.5', u'N/A', u'N/A', u'1.0', u'2.0'] + [ + unicode(self.student_b.id), + self.student_b.email, + self.student_b.username, u'0.5', u'N/A', u'N/A', u'1.0', u'2.0' + ] )) ]) class TestProblemReportCohortedContent(TestReportMixin, ContentGroupTestCase, InstructorTaskModuleTestCase): + """ + Test the problem report on a course that has cohorted content. + """ def setUp(self): super(TestProblemReportCohortedContent, self).setUp() # contstruct cohorted problems to work on. @@ -380,29 +437,33 @@ class TestProblemReportCohortedContent(TestReportMixin, ContentGroupTestCase, In metadata={'graded': True}, display_name='Problem Vertical' ) - print self.course.user_partitions self.define_option_problem( - "Problem0", + u"Pröblem0", parent=vertical, group_access={self.course.user_partitions[0].id: [self.course.user_partitions[0].groups[0].id]} ) self.define_option_problem( - "Problem1", + u"Pröblem1", parent=vertical, group_access={self.course.user_partitions[0].id: [self.course.user_partitions[0].groups[1].id]} ) - self.submit_student_answer(self.alpha_user.username, 'Problem0', ['Option 1', 'Option 1']) - self.submit_student_answer(self.alpha_user.username, 'Problem1', ['Option 1', 'Option 1']) - self.submit_student_answer(self.beta_user.username, 'Problem0', ['Option 1', 'Option 2']) - self.submit_student_answer(self.beta_user.username, 'Problem1', ['Option 1', 'Option 2']) - def test_cohort_content(self): + self.submit_student_answer(self.alpha_user.username, u'Pröblem0', ['Option 1', 'Option 1']) + resp = self.submit_student_answer(self.alpha_user.username, u'Pröblem1', ['Option 1', 'Option 1']) + self.assertEqual(resp.status_code, 404) + + resp = self.submit_student_answer(self.beta_user.username, u'Pröblem0', ['Option 1', 'Option 2']) + self.assertEqual(resp.status_code, 404) + self.submit_student_answer(self.beta_user.username, u'Pröblem1', ['Option 1', 'Option 2']) + with patch('instructor_task.tasks_helper._get_current_task'): result = upload_problem_grade_report(None, None, self.course.id, None, 'graded') - self.assertDictContainsSubset({'action_name': 'graded', 'attempted': 4, 'succeeded': 4, 'failed': 0}, result) + self.assertDictContainsSubset( + {'action_name': 'graded', 'attempted': 4, 'succeeded': 4, 'failed': 0}, result + ) - problem_names = ['Homework 1: Problem - Problem0', 'Homework 1: Problem - Problem1'] + problem_names = [u'Homework 1: Problem - Pröblem0', u'Homework 1: Problem - Pröblem1'] header_row = [u'Student ID', u'Email', u'Username', u'Final Grade'] for problem in problem_names: header_row += [problem + ' (Earned)', problem + ' (Possible)'] @@ -410,19 +471,38 @@ class TestProblemReportCohortedContent(TestReportMixin, ContentGroupTestCase, In self.verify_rows_in_csv([ dict(zip( header_row, - [unicode(self.staff_user.id), self.staff_user.email, self.staff_user.username, u'0.0', u'N/A', u'N/A', u'N/A', u'N/A'] + [ + unicode(self.staff_user.id), + self.staff_user.email, + self.staff_user.username, u'0.0', u'N/A', u'N/A', u'N/A', u'N/A' + ] )), dict(zip( header_row, - [unicode(self.alpha_user.id), self.alpha_user.email, self.alpha_user.username, u'1.0', u'2.0', u'2.0', u'N/A', u'N/A'] + [ + unicode(self.alpha_user.id), + self.alpha_user.email, + self.alpha_user.username, + u'1.0', u'2.0', u'2.0', u'N/A', u'N/A' + ] )), dict(zip( header_row, - [unicode(self.beta_user.id), self.beta_user.email, self.beta_user.username, u'0.5', u'N/A', u'N/A', u'1.0', u'2.0'] + [ + unicode(self.beta_user.id), + self.beta_user.email, + self.beta_user.username, + u'0.5', u'N/A', u'N/A', u'1.0', u'2.0' + ] )), dict(zip( header_row, - [unicode(self.non_cohorted_user.id), self.non_cohorted_user.email, self.non_cohorted_user.username, u'0.0', u'N/A', u'N/A', u'N/A', u'N/A'] + [ + unicode(self.non_cohorted_user.id), + self.non_cohorted_user.email, + self.non_cohorted_user.username, + u'0.0', u'N/A', u'N/A', u'N/A', u'N/A' + ] )), ]) @@ -468,7 +548,7 @@ class TestStudentReport(TestReportMixin, InstructorTaskCourseTestCase): with patch('instructor_task.tasks_helper._get_current_task') as mock_current_task: mock_current_task.return_value = self.current_task result = upload_students_csv(None, None, self.course.id, task_input, 'calculated') - #This assertion simply confirms that the generation completed with no errors + # This assertion simply confirms that the generation completed with no errors num_students = len(students) self.assertDictContainsSubset({'attempted': num_students, 'succeeded': num_students, 'failed': 0}, result) diff --git a/openedx/core/djangoapps/content/course_structures/models.py b/openedx/core/djangoapps/content/course_structures/models.py index 29b6697e9c..e0445ecec9 100644 --- a/openedx/core/djangoapps/content/course_structures/models.py +++ b/openedx/core/djangoapps/content/course_structures/models.py @@ -29,6 +29,9 @@ class CourseStructure(TimeStampedModel): @property def ordered_blocks(self): + """ + Return the blocks in the order with which they're seen in the courseware. Parents are ordered before children. + """ if self.structure: ordered_blocks = OrderedDict() self._traverse_tree(self.structure['root'], self.structure['blocks'], ordered_blocks) diff --git a/openedx/core/djangoapps/content/course_structures/tests.py b/openedx/core/djangoapps/content/course_structures/tests.py index 3b82f9a2b0..c2f1b4f5cf 100644 --- a/openedx/core/djangoapps/content/course_structures/tests.py +++ b/openedx/core/djangoapps/content/course_structures/tests.py @@ -68,8 +68,8 @@ class CourseStructureTaskTests(ModuleStoreTestCase): } } structure_json = json.dumps(structure) - cs = CourseStructure.objects.create(course_id=self.course.id, structure_json=structure_json) - self.assertEqual(cs.structure_json, structure_json) + structure = CourseStructure.objects.create(course_id=self.course.id, structure_json=structure_json) + self.assertEqual(structure.structure_json, structure_json) # Reload the data to ensure the init signal is fired to decompress the data. cs = CourseStructure.objects.get(course_id=self.course.id) @@ -91,7 +91,6 @@ class CourseStructureTaskTests(ModuleStoreTestCase): cs = CourseStructure.objects.create(course_id=self.course.id, structure_json=structure_json) self.assertDictEqual(cs.structure, structure) - def test_ordered_blocks(self): structure = { 'root': 'a/b/c', @@ -121,9 +120,11 @@ class CourseStructureTaskTests(ModuleStoreTestCase): } in_order_blocks = ['a/b/c', 'g/h/i', 'j/k/l', 'd/e/f'] structure_json = json.dumps(structure) - cs = CourseStructure.objects.create(course_id=self.course.id, structure_json=structure_json) + retrieved_course_structure = CourseStructure.objects.create( + course_id=self.course.id, structure_json=structure_json + ) - self.assertEqual(cs.ordered_blocks.keys(), in_order_blocks) + self.assertEqual(retrieved_course_structure.ordered_blocks.keys(), in_order_blocks) def test_block_with_missing_fields(self): """ diff --git a/openedx/core/djangoapps/util/__init__.py b/openedx/core/djangoapps/util/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openedx/core/djangoapps/util/testing.py b/openedx/core/djangoapps/util/testing.py new file mode 100644 index 0000000000..21c1a209c2 --- /dev/null +++ b/openedx/core/djangoapps/util/testing.py @@ -0,0 +1,209 @@ +""" Mixins for setting up particular course structures (such as split tests or cohorted content) """ + +from datetime import datetime +from pytz import UTC + +from openedx.core.djangoapps.course_groups.models import CourseUserGroupPartitionGroup +from openedx.core.djangoapps.course_groups.tests.helpers import CohortFactory +from openedx.core.djangoapps.user_api.tests.factories import UserCourseTagFactory +from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory +from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase +from xmodule.partitions.partitions import UserPartition, Group +from student.tests.factories import CourseEnrollmentFactory, UserFactory + + +class ContentGroupTestCase(ModuleStoreTestCase): + """ + Sets up discussion modules visible to content groups 'Alpha' and + 'Beta', as well as a module visible to all students. Creates a + staff user, users with access to Alpha/Beta (by way of cohorts), + and a non-cohorted user with no special access. + """ + def setUp(self): + super(ContentGroupTestCase, self).setUp() + + self.course = CourseFactory.create( + org='org', number='number', run='run', + # This test needs to use a course that has already started -- + # discussion topics only show up if the course has already started, + # and the default start date for courses is Jan 1, 2030. + start=datetime(2012, 2, 3, tzinfo=UTC), + user_partitions=[ + UserPartition( + 0, + 'Content Group Configuration', + '', + [Group(1, 'Alpha'), Group(2, 'Beta')], + scheme_id='cohort' + ) + ], + grading_policy={ + "GRADER": [{ + "type": "Homework", + "min_count": 1, + "drop_count": 0, + "short_label": "HW", + "weight": 1.0 + }] + }, + cohort_config={'cohorted': True}, + discussion_topics={} + ) + + self.staff_user = UserFactory.create(is_staff=True) + self.alpha_user = UserFactory.create() + self.beta_user = UserFactory.create() + self.non_cohorted_user = UserFactory.create() + for user in [self.staff_user, self.alpha_user, self.beta_user, self.non_cohorted_user]: + CourseEnrollmentFactory.create(user=user, course_id=self.course.id) + + alpha_cohort = CohortFactory( + course_id=self.course.id, + name='Cohort Alpha', + users=[self.alpha_user] + ) + beta_cohort = CohortFactory( + course_id=self.course.id, + name='Cohort Beta', + users=[self.beta_user] + ) + CourseUserGroupPartitionGroup.objects.create( + course_user_group=alpha_cohort, + partition_id=self.course.user_partitions[0].id, + group_id=self.course.user_partitions[0].groups[0].id + ) + CourseUserGroupPartitionGroup.objects.create( + course_user_group=beta_cohort, + partition_id=self.course.user_partitions[0].id, + group_id=self.course.user_partitions[0].groups[1].id + ) + self.alpha_module = ItemFactory.create( + parent_location=self.course.location, + category='discussion', + discussion_id='alpha_group_discussion', + discussion_target='Visible to Alpha', + group_access={self.course.user_partitions[0].id: [self.course.user_partitions[0].groups[0].id]} + ) + self.beta_module = ItemFactory.create( + parent_location=self.course.location, + category='discussion', + discussion_id='beta_group_discussion', + discussion_target='Visible to Beta', + group_access={self.course.user_partitions[0].id: [self.course.user_partitions[0].groups[1].id]} + ) + self.global_module = ItemFactory.create( + parent_location=self.course.location, + category='discussion', + discussion_id='global_group_discussion', + discussion_target='Visible to Everyone' + ) + self.course = self.store.get_item(self.course.location) + + +class TestConditionalContent(ModuleStoreTestCase): + """ + Construct a course with graded problems that exist within a split test. + """ + TEST_SECTION_NAME = 'Problem' + + def setUp(self): + """ + Set up a course with graded problems within a split test. + + Course hierarchy is as follows (modeled after how split tests + are created in studio): + -> course + -> chapter + -> sequential (graded) + -> vertical + -> split_test + -> vertical (Group A) + -> problem + -> vertical (Group B) + -> problem + """ + super(TestConditionalContent, self).setUp() + + # Create user partitions + self.user_partition_group_a = 0 + self.user_partition_group_b = 1 + self.partition = UserPartition( + 0, + 'first_partition', + 'First Partition', + [ + Group(self.user_partition_group_a, 'Group A'), + Group(self.user_partition_group_b, 'Group B') + ] + ) + + # Create course with group configurations and grading policy + self.course = CourseFactory.create( + user_partitions=[self.partition], + grading_policy={ + "GRADER": [{ + "type": "Homework", + "min_count": 1, + "drop_count": 0, + "short_label": "HW", + "weight": 1.0 + }] + } + ) + chapter = ItemFactory.create(parent_location=self.course.location, + display_name='Chapter') + + # add a sequence to the course to which the problems can be added + self.problem_section = ItemFactory.create(parent_location=chapter.location, + category='sequential', + metadata={'graded': True, 'format': 'Homework'}, + display_name=self.TEST_SECTION_NAME) + + # Create users and partition them + self.student_a = UserFactory.create(username='student_a', email='student_a@example.com') + CourseEnrollmentFactory.create(user=self.student_a, course_id=self.course.id) + self.student_b = UserFactory.create(username='student_b', email='student_b@example.com') + CourseEnrollmentFactory.create(user=self.student_b, course_id=self.course.id) + + UserCourseTagFactory( + user=self.student_a, + course_id=self.course.id, + key='xblock.partition_service.partition_{0}'.format(self.partition.id), # pylint: disable=no-member + value=str(self.user_partition_group_a) + ) + UserCourseTagFactory( + user=self.student_b, + course_id=self.course.id, + key='xblock.partition_service.partition_{0}'.format(self.partition.id), # pylint: disable=no-member + value=str(self.user_partition_group_b) + ) + + # Create a vertical to contain our split test + problem_vertical = ItemFactory.create( + parent_location=self.problem_section.location, + category='vertical', + display_name='Problem Unit' + ) + + # Create the split test and child vertical containers + vertical_a_url = self.course.id.make_usage_key('vertical', 'split_test_vertical_a') + vertical_b_url = self.course.id.make_usage_key('vertical', 'split_test_vertical_b') + self.split_test = ItemFactory.create( + parent_location=problem_vertical.location, + category='split_test', + display_name='Split Test', + user_partition_id=self.partition.id, # pylint: disable=no-member + group_id_to_child={str(index): url for index, url in enumerate([vertical_a_url, vertical_b_url])} + ) + self.vertical_a = ItemFactory.create( + parent_location=self.split_test.location, + category='vertical', + display_name='Group A problem container', + location=vertical_a_url + ) + self.vertical_b = ItemFactory.create( + parent_location=self.split_test.location, + category='vertical', + display_name='Group B problem container', + location=vertical_b_url + )