diff --git a/lms/djangoapps/course_structure_api/v0/views.py b/lms/djangoapps/course_structure_api/v0/views.py index fe9156c6ae..d88d4138b1 100644 --- a/lms/djangoapps/course_structure_api/v0/views.py +++ b/lms/djangoapps/course_structure_api/v0/views.py @@ -262,7 +262,6 @@ class CourseStructure(CourseViewMixin, RetrieveAPIView): return Response(status=503, headers={'Retry-After': '120'}) - class CourseGradingPolicy(CourseViewMixin, ListAPIView): """ **Use Case** diff --git a/lms/djangoapps/courseware/grades.py b/lms/djangoapps/courseware/grades.py index 79caf5b765..02d608040d 100644 --- a/lms/djangoapps/courseware/grades.py +++ b/lms/djangoapps/courseware/grades.py @@ -225,7 +225,7 @@ def _grade(student, request, course, keep_raw_scores): graded = module_descriptor.graded if not total > 0: - #We simply cannot grade a problem that is 12/0, because we might need it as a percentage + # We simply cannot grade a problem that is 12/0, because we might need it as a percentage graded = False scores.append( @@ -494,7 +494,7 @@ def manual_transaction(): transaction.commit() -def iterate_grades_for(course_or_id, students): +def iterate_grades_for(course_or_id, students, keep_raw_scores=False): """Given a course_id and an iterable of students (User), yield a tuple of: (student, gradeset, err_msg) for every student enrolled in the course. @@ -531,7 +531,7 @@ def iterate_grades_for(course_or_id, students): # It's not pretty, but untangling that is currently beyond the # scope of this feature. request.session = {} - gradeset = grade(student, request, course) + gradeset = grade(student, request, course, keep_raw_scores) yield student, gradeset, "" except Exception as exc: # pylint: disable=broad-except # Keep marching on even if this student couldn't be graded for diff --git a/lms/djangoapps/courseware/tests/test_grades.py b/lms/djangoapps/courseware/tests/test_grades.py index c6825c99be..f0d46c5d02 100644 --- a/lms/djangoapps/courseware/tests/test_grades.py +++ b/lms/djangoapps/courseware/tests/test_grades.py @@ -68,7 +68,7 @@ class TestGradeIteration(ModuleStoreTestCase): def test_all_empty_grades(self): """No students have grade entries""" - all_gradesets, all_errors = self._gradesets_and_errors_for(self.course.id, self.students) + all_gradesets, all_errors = self._gradesets_and_errors_for(self.course.id, self.students, keep_raw_scores=True) self.assertEqual(len(all_errors), 0) for gradeset in all_gradesets.values(): self.assertIsNone(gradeset['grade']) @@ -107,7 +107,7 @@ class TestGradeIteration(ModuleStoreTestCase): self.assertTrue(all_gradesets[student5]) ################################# Helpers ################################# - def _gradesets_and_errors_for(self, course_id, students): + def _gradesets_and_errors_for(self, course_id, students, keep_raw_scores=False): """Simple helper method to iterate through student grades and give us two dictionaries -- one that has all students and their respective gradesets, and one that has only students that could not be graded and @@ -115,7 +115,7 @@ class TestGradeIteration(ModuleStoreTestCase): students_to_gradesets = {} students_to_errors = {} - for student, gradeset, err_msg in iterate_grades_for(course_id, students): + for student, gradeset, err_msg in iterate_grades_for(course_id, students, keep_raw_scores): students_to_gradesets[student] = gradeset if err_msg: students_to_errors[student] = err_msg diff --git a/lms/djangoapps/django_comment_client/tests/utils.py b/lms/djangoapps/django_comment_client/tests/utils.py index 084c78cbfd..9b73ff4dd4 100644 --- a/lms/djangoapps/django_comment_client/tests/utils.py +++ b/lms/djangoapps/django_comment_client/tests/utils.py @@ -76,6 +76,15 @@ class ContentGroupTestCase(ModuleStoreTestCase): scheme_id='cohort' ) ], + grading_policy={ + "GRADER": [{ + "type": "Homework", + "min_count": 1, + "drop_count": 0, + "short_label": "HW", + "weight": 1.0 + }] + }, cohort_config={'cohorted': True}, discussion_topics={} ) diff --git a/lms/djangoapps/instructor/views/api.py b/lms/djangoapps/instructor/views/api.py index 628ed638ee..78fd631cde 100644 --- a/lms/djangoapps/instructor/views/api.py +++ b/lms/djangoapps/instructor/views/api.py @@ -1951,6 +1951,34 @@ def calculate_grades_csv(request, course_id): }) +@ensure_csrf_cookie +@cache_control(no_cache=True, no_store=True, must_revalidate=True) +@require_level('staff') +def problem_grade_report(request, course_id): + """ + Request a CSV showing students' weighted grades for all problems in the + course. + + AlreadyRunningError is raised if the course's grades are already being + updated. + """ + course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id) + try: + instructor_task.api.submit_problem_grade_report(request, course_key) + # TODO: verify copy with documentation team + success_status = _("Your weighted problem report is being generated! " + "You can view the status of the generation task in the 'Pending Instructor Tasks' section.") + return JsonResponse({"status": success_status}) + except AlreadyRunningError: + # TODO: verify copy with documentation team + already_running_status = _("A weighted problem generation task is already in progress. " + "Check the 'Pending Instructor Tasks' table for the status of the task. " + "When completed, the report will be available for download in the table below.") + return JsonResponse({ + "status": already_running_status + }) + + @ensure_csrf_cookie @cache_control(no_cache=True, no_store=True, must_revalidate=True) @require_level('staff') diff --git a/lms/djangoapps/instructor/views/api_urls.py b/lms/djangoapps/instructor/views/api_urls.py index fc6a05aaef..0991108ba2 100644 --- a/lms/djangoapps/instructor/views/api_urls.py +++ b/lms/djangoapps/instructor/views/api_urls.py @@ -87,6 +87,8 @@ urlpatterns = patterns( 'instructor.views.api.list_report_downloads', name="list_report_downloads"), url(r'calculate_grades_csv$', 'instructor.views.api.calculate_grades_csv', name="calculate_grades_csv"), + url(r'problem_grade_report$', + 'instructor.views.api.problem_grade_report', name="problem_grade_report"), # Registration Codes.. url(r'get_registration_codes$', diff --git a/lms/djangoapps/instructor/views/instructor_dashboard.py b/lms/djangoapps/instructor/views/instructor_dashboard.py index 5b44f8ace2..c91a8d3855 100644 --- a/lms/djangoapps/instructor/views/instructor_dashboard.py +++ b/lms/djangoapps/instructor/views/instructor_dashboard.py @@ -420,6 +420,7 @@ def _section_data_download(course, access): 'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}), 'list_report_downloads_url': reverse('list_report_downloads', kwargs={'course_id': unicode(course_key)}), 'calculate_grades_csv_url': reverse('calculate_grades_csv', kwargs={'course_id': unicode(course_key)}), + 'problem_grade_report_url': reverse('problem_grade_report', kwargs={'course_id': unicode(course_key)}), } return section_data diff --git a/lms/djangoapps/instructor_task/api.py b/lms/djangoapps/instructor_task/api.py index 51aa63c8e6..8c16cef652 100644 --- a/lms/djangoapps/instructor_task/api.py +++ b/lms/djangoapps/instructor_task/api.py @@ -19,6 +19,7 @@ from instructor_task.tasks import ( delete_problem_state, send_bulk_course_email, calculate_grades_csv, + calculate_problem_grade_report, calculate_students_features_csv, cohort_students, ) @@ -334,6 +335,18 @@ def submit_calculate_grades_csv(request, course_key): return submit_task(request, task_type, task_class, course_key, task_input, task_key) +def submit_problem_grade_report(request, course_key): + """ + Submits a task to generate a CSV grade report containing weighted problem + values. + """ + task_type = 'grade_problems' + task_class = calculate_problem_grade_report + task_input = {} + task_key = "" + return submit_task(request, task_type, task_class, course_key, task_input, task_key) + + def submit_calculate_students_features_csv(request, course_key, features): """ Submits a task to generate a CSV containing student profile info. diff --git a/lms/djangoapps/instructor_task/tasks.py b/lms/djangoapps/instructor_task/tasks.py index 9e3ae7d6ae..16c1021e20 100644 --- a/lms/djangoapps/instructor_task/tasks.py +++ b/lms/djangoapps/instructor_task/tasks.py @@ -35,6 +35,7 @@ from instructor_task.tasks_helper import ( reset_attempts_module_state, delete_problem_module_state, upload_grades_csv, + upload_problem_grade_report, upload_students_csv, cohort_students_and_upload ) @@ -155,6 +156,25 @@ def calculate_grades_csv(entry_id, xmodule_instance_args): return run_main_task(entry_id, task_fn, action_name) +# TODO: GRADES_DOWNLOAD_ROUTING_KEY is the high mem queue. Do we know we need it? +@task(base=BaseInstructorTask, routing_key=settings.GRADES_DOWNLOAD_ROUTING_KEY) # pylint: disable=not-callable +def calculate_problem_grade_report(entry_id, xmodule_instance_args): + """ + Generate a CSV for a course containing all students' weighted problem + grades and push the results to an S3 bucket for download. + """ + # Translators: This is a past-tense verb that is inserted into task progress messages as {action}. + # TODO: can this be the same as the `calculate_grades_csv` action_name? + action_name = ugettext_noop('graded') + TASK_LOG.info( + u"Task: %s, InstructorTask ID: %s, Task type: %s, Preparing for task execution", + xmodule_instance_args.get('task_id'), entry_id, action_name + ) + + task_fn = partial(upload_problem_grade_report, xmodule_instance_args) + return run_main_task(entry_id, task_fn, action_name) + + @task(base=BaseInstructorTask, routing_key=settings.GRADES_DOWNLOAD_ROUTING_KEY) # pylint: disable=not-callable def calculate_students_features_csv(entry_id, xmodule_instance_args): """ diff --git a/lms/djangoapps/instructor_task/tasks_helper.py b/lms/djangoapps/instructor_task/tasks_helper.py index 7dcc967394..eb5d99a814 100644 --- a/lms/djangoapps/instructor_task/tasks_helper.py +++ b/lms/djangoapps/instructor_task/tasks_helper.py @@ -4,7 +4,10 @@ running state of a course. """ import json +from collections import OrderedDict from datetime import datetime +from eventtracking import tracker +from itertools import chain from time import time import unicodecsv import logging @@ -34,6 +37,7 @@ from instructor_task.models import ReportStore, InstructorTask, PROGRESS from lms.djangoapps.lms_xblock.runtime import LmsPartitionService from openedx.core.djangoapps.course_groups.cohorts import get_cohort from openedx.core.djangoapps.course_groups.models import CourseUserGroup +from openedx.core.djangoapps.content.course_structures.models import CourseStructure from opaque_keys.edx.keys import UsageKey from openedx.core.djangoapps.course_groups.cohorts import add_user_to_cohort, is_course_cohorted from student.models import CourseEnrollment @@ -705,6 +709,105 @@ def upload_grades_csv(_xmodule_instance_args, _entry_id, course_id, _task_input, return task_progress.update_task_state(extra_meta=current_step) +def _order_problems(blocks): + """ + Sort the problems by the assignment type and assignment that it belongs to. + """ + problems = OrderedDict() + assignments = dict() + # First, sort out all the blocks into their correct assignments and all the + # assignments into their correct types. + for block in blocks: + # Put the assignments in order into the assignments list. + if blocks[block]['block_type'] == 'sequential': + block_format = blocks[block]['format'] + if block_format not in assignments: + assignments[block_format] = OrderedDict() + assignments[block_format][block] = list() + + # Put the problems into the correct order within their assignment. + if blocks[block]['block_type'] == 'problem' and blocks[block]['graded'] is True: + current = blocks[block]['parent'] + # crawl up the tree for the sequential block + while blocks[current]['block_type'] != 'sequential': + current = blocks[current]['parent'] + + current_format = blocks[current]['format'] + assignments[current_format][current].append(block) + + # Now that we have a sorting and an order for the assignments and problems, + # iterate through them in order to generate the header row. + for assignment_type in assignments: + for assignment_index, assignment in enumerate(assignments[assignment_type].keys(), start=1): + for problem in assignments[assignment_type][assignment]: + header_name = "{assignment_type} {assignment_index}: {assignment_name} - {block}".format( + block=blocks[problem]['display_name'], + assignment_type=assignment_type, + assignment_index=assignment_index, + assignment_name=blocks[assignment]['display_name'] + ) + problems[problem] = [header_name + " (Earned)", header_name + " (Possible)"] + + return problems + + +def upload_problem_grade_report(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name): + """ + Generate a CSV containing all students' problem grades within a given + `course_id`. + """ + start_time = time() + start_date = datetime.now(UTC) + status_interval = 100 + enrolled_students = CourseEnrollment.users_enrolled_in(course_id) + task_progress = TaskProgress(action_name, enrolled_students.count(), start_time) + + # This struct encapsulates both the display names of each static item in + # the header row as values as well as the django User field names of those + # items as the keys. It is structured in this way to keep the values + # related. + header_row = OrderedDict([('id', 'Student ID'), ('email', 'Email'), ('username', 'Username')]) + + try: + course_structure = CourseStructure.objects.get(course_id=course_id) + blocks = course_structure.ordered_blocks + problems = _order_problems(blocks) + except CourseStructure.DoesNotExist: + return task_progress.update_task_state(extra_meta={'step': 'Generating course structure. Please refresh and try again.'}) + + # Just generate the static fields for now. + rows = [list(header_row.values()) + ['Final Grade'] + list(chain.from_iterable(problems.values()))] + current_step = {'step': 'Calculating Grades'} + + for student, gradeset, err_msg in iterate_grades_for(course_id, enrolled_students, keep_raw_scores=True): + student_fields = [getattr(student, field_name) for field_name in header_row] + final_grade = gradeset['percent'] + # Only consider graded problems + problem_scores = {unicode(score.module_id): score for score in gradeset['raw_scores'] if score.graded} + earned_possible_values = list() + for problem_id in problems: + try: + problem_score = problem_scores[problem_id] + earned_possible_values.append([problem_score.earned, problem_score.possible]) + except KeyError: + # The student has not been graded on this problem. For example, + # iterate_grades_for skips problems that students have never + # seen in order to speed up report generation. It could also be + # the case that the student does not have access to it (e.g. A/B + # test or cohorted courseware). + earned_possible_values.append(['N/A', 'N/A']) + rows.append(student_fields + [final_grade] + list(chain.from_iterable(earned_possible_values))) + + task_progress.attempted += 1 + task_progress.succeeded += 1 + if task_progress.attempted % status_interval == 0: + task_progress.update_task_state(extra_meta=current_step) + + # Perform the upload + upload_csv_to_report_store(rows, 'problem_grade_report', course_id, start_date) + return task_progress.update_task_state(extra_meta={'step': 'Uploading CSV'}) + + def upload_students_csv(_xmodule_instance_args, _entry_id, course_id, task_input, action_name): """ For a given `course_id`, generate a CSV file containing profile diff --git a/lms/djangoapps/instructor_task/tests/test_base.py b/lms/djangoapps/instructor_task/tests/test_base.py index f83d79a7c3..d8a905ba2a 100644 --- a/lms/djangoapps/instructor_task/tests/test_base.py +++ b/lms/djangoapps/instructor_task/tests/test_base.py @@ -127,7 +127,9 @@ class InstructorTaskCourseTestCase(LoginEnrollmentTestCase, ModuleStoreTestCase) if course_factory_kwargs is not None: course_args.update(course_factory_kwargs) self.course = CourseFactory.create(**course_args) + self.add_course_content() + def add_course_content(self): # Add a chapter to the course chapter = ItemFactory.create(parent_location=self.course.location, display_name=TEST_SECTION_NAME) @@ -141,12 +143,13 @@ class InstructorTaskCourseTestCase(LoginEnrollmentTestCase, ModuleStoreTestCase) @staticmethod def get_user_email(username): """Generate email address based on username""" - return '{0}@test.com'.format(username) + return u'{0}@test.com'.format(username) def login_username(self, username): """Login the user, given the `username`.""" if self.current_user != username: - self.login(InstructorTaskCourseTestCase.get_user_email(username), "test") + user_email = User.objects.get(username=username).email + self.login(user_email, "test") self.current_user = username def _create_user(self, username, email=None, is_staff=False, mode='honor'): @@ -190,16 +193,18 @@ class InstructorTaskModuleTestCase(InstructorTaskCourseTestCase): the setup of a course and problem in order to access StudentModule state. """ @staticmethod - def problem_location(problem_url_name): + def problem_location(problem_url_name, course_key=None): """ Create an internal location for a test problem. """ if "i4x:" in problem_url_name: return Location.from_deprecated_string(problem_url_name) + elif course_key: + return course_key.make_usage_key('problem', problem_url_name) else: return TEST_COURSE_KEY.make_usage_key('problem', problem_url_name) - def define_option_problem(self, problem_url_name, parent=None): + def define_option_problem(self, problem_url_name, parent=None, **kwargs): """Create the problem definition so the answer is Option 1""" if parent is None: parent = self.problem_section @@ -213,7 +218,8 @@ class InstructorTaskModuleTestCase(InstructorTaskCourseTestCase): parent=parent, category="problem", display_name=str(problem_url_name), - data=problem_xml) + data=problem_xml, + **kwargs) def redefine_option_problem(self, problem_url_name): """Change the problem definition so the answer is Option 2""" @@ -249,8 +255,9 @@ class InstructorTaskModuleTestCase(InstructorTaskCourseTestCase): # Note that this is a capa-specific convention. The form is a version of the problem's # URL, modified so that it can be easily stored in html, prepended with "input-" and # appended with a sequence identifier for the particular response the input goes to. - return 'input_i4x-{0}-{1}-problem-{2}_{3}'.format(TEST_COURSE_ORG.lower(), - TEST_COURSE_NUMBER.replace('.', '_'), + course_key = self.course.id + return 'input_i4x-{0}-{1}-problem-{2}_{3}'.format(course_key.org, + course_key.course.replace('.', '_'), problem_url_name, response_id) # make sure that the requested user is logged in, so that the ajax call works @@ -260,7 +267,7 @@ class InstructorTaskModuleTestCase(InstructorTaskCourseTestCase): modx_url = reverse('xblock_handler', kwargs={ 'course_id': self.course.id.to_deprecated_string(), 'usage_id': quote_slashes( - InstructorTaskModuleTestCase.problem_location(problem_url_name).to_deprecated_string() + InstructorTaskModuleTestCase.problem_location(problem_url_name, self.course.id).to_deprecated_string() ), 'handler': 'xmodule_handler', 'suffix': 'problem_check', diff --git a/lms/djangoapps/instructor_task/tests/test_integration.py b/lms/djangoapps/instructor_task/tests/test_integration.py index d6cb4cfc0e..deaebe7904 100644 --- a/lms/djangoapps/instructor_task/tests/test_integration.py +++ b/lms/djangoapps/instructor_task/tests/test_integration.py @@ -28,7 +28,7 @@ from instructor_task.api import (submit_rescore_problem_for_all_students, submit_reset_problem_attempts_for_all_students, submit_delete_problem_state_for_all_students) from instructor_task.models import InstructorTask -from instructor_task.tasks_helper import upload_grades_csv +from instructor_task.tasks_helper import upload_grades_csv, upload_problem_grade_report from instructor_task.tests.test_base import (InstructorTaskModuleTestCase, TestReportMixin, TEST_COURSE_ORG, TEST_COURSE_NUMBER, OPTION_1, OPTION_2) from capa.responsetypes import StudentInputError diff --git a/lms/djangoapps/instructor_task/tests/test_tasks_helper.py b/lms/djangoapps/instructor_task/tests/test_tasks_helper.py index 1ec9d1a7ec..6afaccdb81 100644 --- a/lms/djangoapps/instructor_task/tests/test_tasks_helper.py +++ b/lms/djangoapps/instructor_task/tests/test_tasks_helper.py @@ -19,6 +19,7 @@ from instructor_task.tasks_helper import cohort_students_and_upload, upload_grad from instructor_task.tests.test_base import InstructorTaskCourseTestCase, TestReportMixin, InstructorTaskModuleTestCase from openedx.core.djangoapps.course_groups.models import CourseUserGroupPartitionGroup from openedx.core.djangoapps.course_groups.tests.helpers import CohortFactory +from openedx.core.djangoapps.user_api.tests.factories import UserCourseTagFactory import openedx.core.djangoapps.user_api.course_tag.api as course_tag_api from openedx.core.djangoapps.user_api.partition_schemes import RandomUserPartitionScheme from student.tests.factories import UserFactory @@ -26,6 +27,13 @@ from student.models import CourseEnrollment from verify_student.tests.factories import SoftwareSecurePhotoVerificationFactory from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory from xmodule.partitions.partitions import Group, UserPartition +from instructor_task.models import ReportStore +from instructor_task.tasks_helper import ( + cohort_students_and_upload, upload_grades_csv, upload_problem_grade_report, upload_students_csv +) +from instructor_task.tests.test_base import InstructorTaskCourseTestCase, TestReportMixin +from instructor_task.tests.test_integration import TestGradeReportConditionalContent +from django_comment_client.tests.utils import ContentGroupTestCase @ddt.ddt @@ -261,6 +269,164 @@ class TestInstructorGradeReport(TestReportMixin, InstructorTaskCourseTestCase): self.assertDictContainsSubset({'attempted': 1, 'succeeded': 1, 'failed': 0}, result) +class TestProblemGradeReport(TestReportMixin, InstructorTaskModuleTestCase): + """ + Test that the weighted problem CSV generation works. + """ + def setUp(self): + super(TestProblemGradeReport, self).setUp() + self.maxDiff = None + self.initialize_course() + # Add unicode data to CSV even though unicode usernames aren't + # technically possible in openedx. + self.student_1 = self.create_student(u'üser_1') + self.student_2 = self.create_student(u'üser_2') + self.csv_header_row = [u'Student ID', u'Email', u'Username', u'Final Grade'] + + @patch('instructor_task.tasks_helper._get_current_task') + def test_no_problems(self, _get_current_task): + """ + Verify that we see no grade information for a course with no graded + problems. + """ + result = upload_problem_grade_report(None, None, self.course.id, None, 'graded') + self.assertDictContainsSubset({'action_name': 'graded', 'attempted': 2, 'succeeded': 2, 'failed': 0}, result) + self.verify_rows_in_csv([ + dict(zip(self.csv_header_row, [unicode(self.student_1.id), self.student_1.email, self.student_1.username, '0.0'])), + dict(zip(self.csv_header_row, [unicode(self.student_2.id), self.student_2.email, self.student_2.username, '0.0'])) + ]) + + @patch('instructor_task.tasks_helper._get_current_task') + def test_single_problem(self, _get_current_task): + vertical = ItemFactory.create( + parent_location=self.problem_section.location, + category='vertical', + metadata={'graded': True}, + display_name='Problem Vertical' + ) + self.define_option_problem('Problem1', parent=vertical) + # generate the course structure + + self.submit_student_answer(self.student_1.username, 'Problem1', ['Option 1']) + result = upload_problem_grade_report(None, None, self.course.id, None, 'graded') + self.assertDictContainsSubset({'action_name': 'graded', 'attempted': 2, 'succeeded': 2, 'failed': 0}, result) + problem_name = 'Homework 1: Problem - Problem1' + header_row = self.csv_header_row + [problem_name + ' (Earned)', problem_name + ' (Possible)'] + self.verify_rows_in_csv([ + dict(zip( + header_row, + [unicode(self.student_1.id), self.student_1.email, self.student_1.username, '0.01', '1.0', '2.0'] + )), + dict(zip( + header_row, + [unicode(self.student_2.id), self.student_2.email, self.student_2.username, '0.0', 'N/A', 'N/A'] + )) + ]) + + +class TestProblemReportSplitTestContent(TestGradeReportConditionalContent): + OPTION_1 = 'Option 1' + OPTION_2 = 'Option 2' + + def setUp(self): + super(TestProblemReportSplitTestContent, self).setUp() + self.problem_a_url = 'problem_a_url' + self.problem_b_url = 'problem_b_url' + self.define_option_problem(self.problem_a_url, parent=self.vertical_a) + self.define_option_problem(self.problem_b_url, parent=self.vertical_b) + + def test_problem_grade_report(self): + """ + Test problems that exist in a problem grade report. + """ + # student A will get 100%, student B will get 50% because + # OPTION_1 is the correct option, and OPTION_2 is the + # incorrect option + self.submit_student_answer(self.student_a.username, self.problem_a_url, [self.OPTION_1, self.OPTION_1]) + self.submit_student_answer(self.student_a.username, self.problem_b_url, [self.OPTION_1, self.OPTION_1]) + + self.submit_student_answer(self.student_b.username, self.problem_a_url, [self.OPTION_1, self.OPTION_2]) + self.submit_student_answer(self.student_b.username, self.problem_b_url, [self.OPTION_1, self.OPTION_2]) + + with patch('instructor_task.tasks_helper._get_current_task'): + result = upload_problem_grade_report(None, None, self.course.id, None, 'graded') + self.verify_csv_task_success(result) + + problem_names = ['Homework 1: Problem - problem_a_url', 'Homework 1: Problem - problem_b_url'] + header_row = [u'Student ID', u'Email', u'Username', u'Final Grade'] + for problem in problem_names: + header_row += [problem + ' (Earned)', problem + ' (Possible)'] + + self.verify_rows_in_csv([ + dict(zip( + header_row, + [unicode(self.student_a.id), self.student_a.email, self.student_a.username, u'1.0', u'2.0', u'2.0', u'N/A', u'N/A'] + )), + dict(zip( + header_row, + [unicode(self.student_b.id), self.student_b.email, self.student_b.username, u'0.5', u'N/A', u'N/A', u'1.0', u'2.0'] + )) + ]) + + +class TestProblemReportCohortedContent(TestReportMixin, ContentGroupTestCase, InstructorTaskModuleTestCase): + def setUp(self): + super(TestProblemReportCohortedContent, self).setUp() + # contstruct cohorted problems to work on. + self.add_course_content() + vertical = ItemFactory.create( + parent_location=self.problem_section.location, + category='vertical', + metadata={'graded': True}, + display_name='Problem Vertical' + ) + print self.course.user_partitions + self.define_option_problem( + "Problem0", + parent=vertical, + group_access={self.course.user_partitions[0].id: [self.course.user_partitions[0].groups[0].id]} + ) + self.define_option_problem( + "Problem1", + parent=vertical, + group_access={self.course.user_partitions[0].id: [self.course.user_partitions[0].groups[1].id]} + ) + + self.submit_student_answer(self.alpha_user.username, 'Problem0', ['Option 1', 'Option 1']) + self.submit_student_answer(self.alpha_user.username, 'Problem1', ['Option 1', 'Option 1']) + self.submit_student_answer(self.beta_user.username, 'Problem0', ['Option 1', 'Option 2']) + self.submit_student_answer(self.beta_user.username, 'Problem1', ['Option 1', 'Option 2']) + + def test_cohort_content(self): + with patch('instructor_task.tasks_helper._get_current_task'): + result = upload_problem_grade_report(None, None, self.course.id, None, 'graded') + self.assertDictContainsSubset({'action_name': 'graded', 'attempted': 4, 'succeeded': 4, 'failed': 0}, result) + + problem_names = ['Homework 1: Problem - Problem0', 'Homework 1: Problem - Problem1'] + header_row = [u'Student ID', u'Email', u'Username', u'Final Grade'] + for problem in problem_names: + header_row += [problem + ' (Earned)', problem + ' (Possible)'] + + self.verify_rows_in_csv([ + dict(zip( + header_row, + [unicode(self.staff_user.id), self.staff_user.email, self.staff_user.username, u'0.0', u'N/A', u'N/A', u'N/A', u'N/A'] + )), + dict(zip( + header_row, + [unicode(self.alpha_user.id), self.alpha_user.email, self.alpha_user.username, u'1.0', u'2.0', u'2.0', u'N/A', u'N/A'] + )), + dict(zip( + header_row, + [unicode(self.beta_user.id), self.beta_user.email, self.beta_user.username, u'0.5', u'N/A', u'N/A', u'1.0', u'2.0'] + )), + dict(zip( + header_row, + [unicode(self.non_cohorted_user.id), self.non_cohorted_user.email, self.non_cohorted_user.username, u'0.0', u'N/A', u'N/A', u'N/A', u'N/A'] + )), + ]) + + @ddt.ddt class TestStudentReport(TestReportMixin, InstructorTaskCourseTestCase): """ diff --git a/lms/static/coffee/src/instructor_dashboard/data_download.coffee b/lms/static/coffee/src/instructor_dashboard/data_download.coffee index acc0cd9929..ad9de769f9 100644 --- a/lms/static/coffee/src/instructor_dashboard/data_download.coffee +++ b/lms/static/coffee/src/instructor_dashboard/data_download.coffee @@ -22,6 +22,7 @@ class DataDownload @$list_anon_btn = @$section.find("input[name='list-anon-ids']'") @$grade_config_btn = @$section.find("input[name='dump-gradeconf']'") @$calculate_grades_csv_btn = @$section.find("input[name='calculate-grades-csv']'") + @$problem_grade_report_csv_btn = @$section.find("input[name='problem-grade-report']'") # response areas @$download = @$section.find '.data-download-container' @@ -108,16 +109,22 @@ class DataDownload @$download_display_text.html data['grading_config_summary'] @$calculate_grades_csv_btn.click (e) => + @onClickGradeDownload @$calculate_grades_csv_btn, "Error generating grades. Please try again." + + @$problem_grade_report_csv_btn.click (e) => + @onClickGradeDownload @$problem_grade_report_csv_btn, "Error generating weighted problem report. Please try again." + + onClickGradeDownload: (button, errorMessage) -> # Clear any CSS styling from the request-response areas #$(".msg-confirm").css({"display":"none"}) #$(".msg-error").css({"display":"none"}) @clear_display() - url = @$calculate_grades_csv_btn.data 'endpoint' + url = button.data 'endpoint' $.ajax dataType: 'json' url: url error: (std_ajax_err) => - @$reports_request_response_error.text gettext("Error generating grades. Please try again.") + @$reports_request_response_error.text gettext(errorMessage) $(".msg-error").css({"display":"block"}) success: (data) => @$reports_request_response.text data['status'] diff --git a/lms/templates/instructor/instructor_dashboard_2/data_download.html b/lms/templates/instructor/instructor_dashboard_2/data_download.html index 9ca6717337..28923c01e2 100644 --- a/lms/templates/instructor/instructor_dashboard_2/data_download.html +++ b/lms/templates/instructor/instructor_dashboard_2/data_download.html @@ -41,6 +41,8 @@

${_("Click to generate a CSV grade report for all currently enrolled students.")}

+ +

%endif
diff --git a/openedx/core/djangoapps/content/course_structures/models.py b/openedx/core/djangoapps/content/course_structures/models.py index 2dcbccbc1a..29b6697e9c 100644 --- a/openedx/core/djangoapps/content/course_structures/models.py +++ b/openedx/core/djangoapps/content/course_structures/models.py @@ -1,6 +1,7 @@ import json import logging +from collections import OrderedDict from model_utils.models import TimeStampedModel from util.models import CompressedTextField @@ -26,6 +27,29 @@ class CourseStructure(TimeStampedModel): return json.loads(self.structure_json) return None + @property + def ordered_blocks(self): + if self.structure: + ordered_blocks = OrderedDict() + self._traverse_tree(self.structure['root'], self.structure['blocks'], ordered_blocks) + return ordered_blocks + + def _traverse_tree(self, block, unordered_structure, ordered_blocks, parent=None): + """ + Traverses the tree and fills in the ordered_blocks OrderedDict with the blocks in + the order that they appear in the course. + """ + # find the dictionary entry for the current node + cur_block = unordered_structure[block] + + if parent: + cur_block['parent'] = parent + + ordered_blocks[block] = cur_block + + for child_node in cur_block['children']: + self._traverse_tree(child_node, unordered_structure, ordered_blocks, parent=block) + # Signals must be imported in a file that is automatically loaded at app startup (e.g. models.py). We import them # at the end of this file to avoid circular dependencies. import signals # pylint: disable=unused-import diff --git a/openedx/core/djangoapps/content/course_structures/tests.py b/openedx/core/djangoapps/content/course_structures/tests.py index 5c645049bf..3b82f9a2b0 100644 --- a/openedx/core/djangoapps/content/course_structures/tests.py +++ b/openedx/core/djangoapps/content/course_structures/tests.py @@ -91,6 +91,40 @@ class CourseStructureTaskTests(ModuleStoreTestCase): cs = CourseStructure.objects.create(course_id=self.course.id, structure_json=structure_json) self.assertDictEqual(cs.structure, structure) + + def test_ordered_blocks(self): + structure = { + 'root': 'a/b/c', + 'blocks': { + 'a/b/c': { + 'id': 'a/b/c', + 'children': [ + 'g/h/i' + ] + }, + 'd/e/f': { + 'id': 'd/e/f', + 'children': [] + }, + 'g/h/i': { + 'id': 'h/j/k', + 'children': [ + 'j/k/l', + 'd/e/f' + ] + }, + 'j/k/l': { + 'id': 'j/k/l', + 'children': [] + } + } + } + in_order_blocks = ['a/b/c', 'g/h/i', 'j/k/l', 'd/e/f'] + structure_json = json.dumps(structure) + cs = CourseStructure.objects.create(course_id=self.course.id, structure_json=structure_json) + + self.assertEqual(cs.ordered_blocks.keys(), in_order_blocks) + def test_block_with_missing_fields(self): """ The generator should continue to operate on blocks/XModule that do not have graded or format fields.