diff --git a/common/lib/capa/capa/correctmap.py b/common/lib/capa/capa/correctmap.py index 26a6b60fba..6adf47832c 100644 --- a/common/lib/capa/capa/correctmap.py +++ b/common/lib/capa/capa/correctmap.py @@ -10,7 +10,7 @@ class CorrectMap(object): in a capa problem. The response evaluation result for each answer_id includes (correctness, npoints, msg, hint, hintmode). - - correctness : 'correct', 'incorrect', or 'partially-correct' + - correctness : 'correct', 'incorrect', 'partially-correct', or 'incomplete' - npoints : None, or integer specifying number of points awarded for this answer_id - msg : string (may have HTML) giving extra message response (displayed below textline or textbox) diff --git a/common/lib/xmodule/xmodule/graders.py b/common/lib/xmodule/xmodule/graders.py index 2b3ff833c3..6a8e5d013b 100644 --- a/common/lib/xmodule/xmodule/graders.py +++ b/common/lib/xmodule/xmodule/graders.py @@ -5,6 +5,7 @@ Code used to calculate learner grades. from __future__ import division import abc +from collections import OrderedDict import inspect import logging import random @@ -174,10 +175,10 @@ class CourseGrader(object): - section_breakdown: This is a list of dictionaries which provide details on sections that were graded. These are used for display in a graph or chart. The format for a section_breakdown dictionary is explained below. - - grade_breakdown: This is a list of dictionaries which provide details on the contributions - of the final percentage grade. This is a higher level breakdown, for when the grade is constructed - of a few very large sections (such as Homeworks, Labs, a Midterm, and a Final). The format for - a grade_breakdown is explained below. This section is optional. + - grade_breakdown: This is a dict of dictionaries, keyed by category, which provide details on + the contributions of the final percentage grade. This is a higher level breakdown, for when the + grade is constructed of a few very large sections (such as Homeworks, Labs, a Midterm, and a Final). + The format for a grade_breakdown is explained below. This section is optional. A dictionary in the section_breakdown list has the following keys: percent: A float percentage for the section. @@ -188,7 +189,7 @@ class CourseGrader(object): prominent: A boolean value indicating that this section should be displayed as more prominent than other items. - A dictionary in the grade_breakdown list has the following keys: + A dictionary in the grade_breakdown dict has the following keys: percent: A float percentage in the breakdown. All percents should add up to the final percentage. detail: A string explanation of this breakdown. E.g. "Homework - 10% of a possible 15%" category: A string identifying the category. Items with the same category are grouped together @@ -221,27 +222,33 @@ class WeightedSubsectionsGrader(CourseGrader): a value > 1, the student may end up with a percent > 100%. This allows for sections that are extra credit. """ - def __init__(self, sections): - self.sections = sections + def __init__(self, subgraders): + self.subgraders = subgraders def grade(self, grade_sheet, generate_random_scores=False): total_percent = 0.0 section_breakdown = [] - grade_breakdown = [] + grade_breakdown = OrderedDict() - for subgrader, category, weight in self.sections: + for subgrader, assignment_type, weight in self.subgraders: subgrade_result = subgrader.grade(grade_sheet, generate_random_scores) weighted_percent = subgrade_result['percent'] * weight - section_detail = u"{0} = {1:.2%} of a possible {2:.2%}".format(category, weighted_percent, weight) + section_detail = u"{0} = {1:.2%} of a possible {2:.2%}".format(assignment_type, weighted_percent, weight) total_percent += weighted_percent section_breakdown += subgrade_result['section_breakdown'] - grade_breakdown.append({'percent': weighted_percent, 'detail': section_detail, 'category': category}) + grade_breakdown[assignment_type] = { + 'percent': weighted_percent, + 'detail': section_detail, + 'category': assignment_type, + } - return {'percent': total_percent, - 'section_breakdown': section_breakdown, - 'grade_breakdown': grade_breakdown} + return { + 'percent': total_percent, + 'section_breakdown': section_breakdown, + 'grade_breakdown': grade_breakdown + } class AssignmentFormatGrader(CourseGrader): @@ -302,9 +309,12 @@ class AssignmentFormatGrader(CourseGrader): def grade(self, grade_sheet, generate_random_scores=False): def total_with_drops(breakdown, drop_count): - '''calculates total score for a section while dropping lowest scores''' - #create an array of tuples with (index, mark), sorted by mark['percent'] descending + """ + Calculates total score for a section while dropping lowest scores + """ + # Create an array of tuples with (index, mark), sorted by mark['percent'] descending sorted_breakdown = sorted(enumerate(breakdown), key=lambda x: -x[1]['percent']) + # A list of the indices of the dropped scores dropped_indices = [] if drop_count > 0: @@ -319,8 +329,7 @@ class AssignmentFormatGrader(CourseGrader): return aggregate_score, dropped_indices - #Figure the homework scores - scores = grade_sheet.get(self.type, []) + scores = grade_sheet.get(self.type, {}).values() breakdown = [] for i in range(max(self.min_count, len(scores))): if i < len(scores) or generate_random_scores: diff --git a/common/lib/xmodule/xmodule/tests/test_graders.py b/common/lib/xmodule/xmodule/tests/test_graders.py index 5dfa6327af..dc99f38818 100644 --- a/common/lib/xmodule/xmodule/tests/test_graders.py +++ b/common/lib/xmodule/xmodule/tests/test_graders.py @@ -76,9 +76,9 @@ class GraderTest(unittest.TestCase): } incomplete_gradesheet = { - 'Homework': [], - 'Lab': [], - 'Midterm': [], + 'Homework': {}, + 'Lab': {}, + 'Midterm': {}, } class MockGrade(object): @@ -91,25 +91,31 @@ class GraderTest(unittest.TestCase): common_fields = dict(graded=True, attempted=True) test_gradesheet = { - 'Homework': [ - MockGrade(AggregatedScore(tw_earned=2, tw_possible=20.0, **common_fields), display_name='hw1'), - MockGrade(AggregatedScore(tw_earned=16, tw_possible=16.0, **common_fields), display_name='hw2'), - ], + 'Homework': { + 'hw1': MockGrade(AggregatedScore(tw_earned=2, tw_possible=20.0, **common_fields), display_name='hw1'), + 'hw2': MockGrade(AggregatedScore(tw_earned=16, tw_possible=16.0, **common_fields), display_name='hw2'), + }, # The dropped scores should be from the assignments that don't exist yet - 'Lab': [ - MockGrade(AggregatedScore(tw_earned=1, tw_possible=2.0, **common_fields), display_name='lab1'), # Dropped - MockGrade(AggregatedScore(tw_earned=1, tw_possible=1.0, **common_fields), display_name='lab2'), - MockGrade(AggregatedScore(tw_earned=1, tw_possible=1.0, **common_fields), display_name='lab3'), - MockGrade(AggregatedScore(tw_earned=5, tw_possible=25.0, **common_fields), display_name='lab4'), # Dropped - MockGrade(AggregatedScore(tw_earned=3, tw_possible=4.0, **common_fields), display_name='lab5'), # Dropped - MockGrade(AggregatedScore(tw_earned=6, tw_possible=7.0, **common_fields), display_name='lab6'), - MockGrade(AggregatedScore(tw_earned=5, tw_possible=6.0, **common_fields), display_name='lab7'), - ], + 'Lab': { + # Dropped + 'lab1': MockGrade(AggregatedScore(tw_earned=1, tw_possible=2.0, **common_fields), display_name='lab1'), + 'lab2': MockGrade(AggregatedScore(tw_earned=1, tw_possible=1.0, **common_fields), display_name='lab2'), + 'lab3': MockGrade(AggregatedScore(tw_earned=1, tw_possible=1.0, **common_fields), display_name='lab3'), + # Dropped + 'lab4': MockGrade(AggregatedScore(tw_earned=5, tw_possible=25.0, **common_fields), display_name='lab4'), + # Dropped + 'lab5': MockGrade(AggregatedScore(tw_earned=3, tw_possible=4.0, **common_fields), display_name='lab5'), + 'lab6': MockGrade(AggregatedScore(tw_earned=6, tw_possible=7.0, **common_fields), display_name='lab6'), + 'lab7': MockGrade(AggregatedScore(tw_earned=5, tw_possible=6.0, **common_fields), display_name='lab7'), + }, - 'Midterm': [ - MockGrade(AggregatedScore(tw_earned=50.5, tw_possible=100, **common_fields), display_name="Midterm Exam"), - ], + 'Midterm': { + 'midterm': MockGrade( + AggregatedScore(tw_earned=50.5, tw_possible=100, **common_fields), + display_name="Midterm Exam", + ), + }, } def test_assignment_format_grader(self): diff --git a/lms/djangoapps/ccx/tests/test_views.py b/lms/djangoapps/ccx/tests/test_views.py index 41436ecdb0..5ce7a24e24 100644 --- a/lms/djangoapps/ccx/tests/test_views.py +++ b/lms/djangoapps/ccx/tests/test_views.py @@ -1174,11 +1174,8 @@ class TestCCXGrades(FieldOverrideTestMixin, SharedModuleStoreTestCase, LoginEnro self.assertEqual(len(response.mako_context['students']), 1) # pylint: disable=no-member student_info = response.mako_context['students'][0] # pylint: disable=no-member self.assertEqual(student_info['grade_summary']['percent'], 0.5) - self.assertEqual( - student_info['grade_summary']['grade_breakdown'][0]['percent'], - 0.5) - self.assertEqual( - len(student_info['grade_summary']['section_breakdown']), 4) + self.assertEqual(student_info['grade_summary']['grade_breakdown'].values()[0]['percent'], 0.5) + self.assertEqual(len(student_info['grade_summary']['section_breakdown']), 4) def test_grades_csv(self): self.course.enable_ccx = True @@ -1223,7 +1220,7 @@ class TestCCXGrades(FieldOverrideTestMixin, SharedModuleStoreTestCase, LoginEnro self.assertEqual(response.status_code, 200) grades = response.mako_context['grade_summary'] # pylint: disable=no-member self.assertEqual(grades['percent'], 0.5) - self.assertEqual(grades['grade_breakdown'][0]['percent'], 0.5) + self.assertEqual(grades['grade_breakdown'].values()[0]['percent'], 0.5) self.assertEqual(len(grades['section_breakdown']), 4) diff --git a/lms/djangoapps/courseware/tests/test_submitting_problems.py b/lms/djangoapps/courseware/tests/test_submitting_problems.py index d8482f04e2..9c7ffc3a6b 100644 --- a/lms/djangoapps/courseware/tests/test_submitting_problems.py +++ b/lms/djangoapps/courseware/tests/test_submitting_problems.py @@ -288,7 +288,9 @@ class TestSubmittingProblems(ModuleStoreTestCase, LoginEnrollmentTestCase, Probl Returns list of scores: [, , ..., ] """ - return [s.graded_total.earned for s in self.get_course_grade().graded_subsections_by_format['Homework']] + return [ + s.graded_total.earned for s in self.get_course_grade().graded_subsections_by_format['Homework'].itervalues() + ] def hw_grade(self, hw_url_name): """ diff --git a/lms/djangoapps/courseware/tests/test_views.py b/lms/djangoapps/courseware/tests/test_views.py index 53f3ca0ce5..0fde44ae70 100644 --- a/lms/djangoapps/courseware/tests/test_views.py +++ b/lms/djangoapps/courseware/tests/test_views.py @@ -1235,7 +1235,7 @@ class ProgressPageTests(ModuleStoreTestCase): @patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': True}) @patch( 'lms.djangoapps.grades.new.course_grade.CourseGrade.summary', - PropertyMock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [], 'grade_breakdown': []}), + PropertyMock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [], 'grade_breakdown': {}}), ) def test_view_certificate_link(self): """ @@ -1294,7 +1294,7 @@ class ProgressPageTests(ModuleStoreTestCase): @patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': False}) @patch( 'lms.djangoapps.grades.new.course_grade.CourseGrade.summary', - PropertyMock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [], 'grade_breakdown': []}) + PropertyMock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [], 'grade_breakdown': {}}) ) def test_view_certificate_link_hidden(self): """ @@ -1341,7 +1341,7 @@ class ProgressPageTests(ModuleStoreTestCase): @patch( 'lms.djangoapps.grades.new.course_grade.CourseGrade.summary', - PropertyMock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [], 'grade_breakdown': []}) + PropertyMock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [], 'grade_breakdown': {}}) ) @ddt.data( *itertools.product( @@ -1381,7 +1381,7 @@ class ProgressPageTests(ModuleStoreTestCase): @patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': True}) @patch( 'lms.djangoapps.grades.new.course_grade.CourseGrade.summary', - PropertyMock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [], 'grade_breakdown': []}) + PropertyMock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [], 'grade_breakdown': {}}) ) def test_page_with_invalidated_certificate_with_html_view(self): """ @@ -1415,7 +1415,7 @@ class ProgressPageTests(ModuleStoreTestCase): @patch( 'lms.djangoapps.grades.new.course_grade.CourseGrade.summary', - PropertyMock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [], 'grade_breakdown': []}) + PropertyMock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [], 'grade_breakdown': {}}) ) def test_page_with_invalidated_certificate_with_pdf(self): """ @@ -1432,7 +1432,7 @@ class ProgressPageTests(ModuleStoreTestCase): @patch( 'lms.djangoapps.grades.new.course_grade.CourseGrade.summary', - PropertyMock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [], 'grade_breakdown': []}) + PropertyMock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [], 'grade_breakdown': {}}) ) def test_message_for_audit_mode(self): """ Verify that message appears on progress page, if learner is enrolled diff --git a/lms/djangoapps/grades/context.py b/lms/djangoapps/grades/context.py index d821fc6962..fce7342bc5 100644 --- a/lms/djangoapps/grades/context.py +++ b/lms/djangoapps/grades/context.py @@ -18,7 +18,7 @@ def grading_context_for_course(course_key): def grading_context(course_structure): """ This returns a dictionary with keys necessary for quickly grading - a student. They are used by grades.grade() + a student. The grading context has two keys: all_graded_subsections_by_type - This contains all subsections that are @@ -27,7 +27,7 @@ def grading_context(course_structure): The values are arrays of dictionaries containing "subsection_block" : The subsection block "scored_descendants" : An array of usage keys for blocks - could possibly be in the subsection, for any student + that could possibly be in the subsection, for any student all_graded_blocks - This contains a list of all blocks that can affect grading a student. This is used to efficiently fetch diff --git a/lms/djangoapps/grades/new/course_grade.py b/lms/djangoapps/grades/new/course_grade.py index 5688e4d3ec..01af70e70a 100644 --- a/lms/djangoapps/grades/new/course_grade.py +++ b/lms/djangoapps/grades/new/course_grade.py @@ -2,8 +2,7 @@ CourseGrade Class """ -from collections import defaultdict -from collections import namedtuple +from collections import defaultdict, namedtuple, OrderedDict from logging import getLogger from django.conf import settings @@ -44,13 +43,13 @@ class CourseGrade(object): Returns grades for the subsections in the course in a dict keyed by subsection format types. """ - subsections_by_format = defaultdict(list) + subsections_by_format = defaultdict(OrderedDict) for chapter in self.chapter_grades: for subsection_grade in chapter['sections']: if subsection_grade.graded: graded_total = subsection_grade.graded_total if graded_total.possible > 0: - subsections_by_format[subsection_grade.format].append(subsection_grade) + subsections_by_format[subsection_grade.format][subsection_grade.location] = subsection_grade return subsections_by_format @lazy diff --git a/lms/djangoapps/instructor_analytics/basic.py b/lms/djangoapps/instructor_analytics/basic.py index 4543fed179..91068aef91 100644 --- a/lms/djangoapps/instructor_analytics/basic.py +++ b/lms/djangoapps/instructor_analytics/basic.py @@ -483,7 +483,7 @@ def dump_grading_context(course): if isinstance(course.grader, xmgraders.WeightedSubsectionsGrader): msg += '\n' msg += "Graded sections:\n" - for subgrader, category, weight in course.grader.sections: + for subgrader, category, weight in course.grader.subgraders: msg += " subgrader=%s, type=%s, category=%s, weight=%s\n"\ % (subgrader.__class__, subgrader.type, category, weight) subgrader.index = 1 diff --git a/lms/djangoapps/instructor_task/tasks_helper.py b/lms/djangoapps/instructor_task/tasks_helper.py index 87ff578c42..3fa5a6d048 100644 --- a/lms/djangoapps/instructor_task/tasks_helper.py +++ b/lms/djangoapps/instructor_task/tasks_helper.py @@ -696,7 +696,6 @@ def upload_grades_csv(_xmodule_instance_args, _entry_id, course_id, _task_input, whitelisted_user_ids = [entry.user_id for entry in certificate_whitelist] # Loop over all our students and build our CSV lists in memory - header = None rows = [] err_rows = [["id", "username", "error_msg"]] current_step = {'step': 'Calculating Grades'} @@ -709,7 +708,24 @@ def upload_grades_csv(_xmodule_instance_args, _entry_id, course_id, _task_input, current_step, total_enrolled_students, ) - # NAA TODO - Get the list of ALL subsections to put in the header + + graded_assignments = _graded_assignments(course_id) + grade_header = [] + for assignment_info in graded_assignments.itervalues(): + if assignment_info['use_subsection_headers']: + grade_header.extend(assignment_info['subsection_headers'].itervalues()) + grade_header.append(assignment_info['average_header']) + + rows.append( + ["Student ID", "Email", "Username", "Grade"] + + grade_header + + cohorts_header + + group_configs_header + + teams_header + + ['Enrollment Track', 'Verification Status'] + + certificate_info_header + ) + for student, course_grade, err_msg in CourseGradeFactory().iter(course, enrolled_students): # Periodically update task status (this is a cache write) if task_progress.attempted % status_interval == 0: @@ -728,70 +744,71 @@ def upload_grades_csv(_xmodule_instance_args, _entry_id, course_id, _task_input, total_enrolled_students ) - if course_grade: - # We were able to successfully grade this student for this course. - task_progress.succeeded += 1 - if not header: - header = [section['label'] for section in course_grade.summary[u'section_breakdown']] - rows.append( - ["id", "email", "username", "grade"] + header + cohorts_header + - group_configs_header + teams_header + - ['Enrollment Track', 'Verification Status'] + certificate_info_header - ) - - percents = { - section['label']: section.get('percent', 0.0) - for section in course_grade.summary[u'section_breakdown'] - if 'label' in section - } - - cohorts_group_name = [] - if course_is_cohorted: - group = get_cohort(student, course_id, assign=False) - cohorts_group_name.append(group.name if group else '') - - group_configs_group_names = [] - for partition in experiment_partitions: - group = LmsPartitionService(student, course_id).get_group(partition, assign=False) - group_configs_group_names.append(group.name if group else '') - - team_name = [] - if teams_enabled: - try: - membership = CourseTeamMembership.objects.get(user=student, team__course_id=course_id) - team_name.append(membership.team.name) - except CourseTeamMembership.DoesNotExist: - team_name.append('') - - enrollment_mode = CourseEnrollment.enrollment_mode_for_user(student, course_id)[0] - verification_status = SoftwareSecurePhotoVerification.verification_status_for_user( - student, - course_id, - enrollment_mode - ) - certificate_info = certificate_info_for_user( - student, - course_id, - course_grade.letter_grade, - student.id in whitelisted_user_ids - ) - - # Not everybody has the same gradable items. If the item is not - # found in the user's gradeset, just assume it's a 0. The aggregated - # grades for their sections and overall course will be calculated - # without regard for the item they didn't have access to, so it's - # possible for a student to have a 0.0 show up in their row but - # still have 100% for the course. - row_percents = [percents.get(label, 0.0) for label in header] - rows.append( - [student.id, student.email, student.username, course_grade.percent] + - row_percents + cohorts_group_name + group_configs_group_names + team_name + - [enrollment_mode] + [verification_status] + certificate_info - ) - else: + if not course_grade: # An empty gradeset means we failed to grade a student. task_progress.failed += 1 err_rows.append([student.id, student.username, err_msg]) + continue + + # We were able to successfully grade this student for this course. + task_progress.succeeded += 1 + + cohorts_group_name = [] + if course_is_cohorted: + group = get_cohort(student, course_id, assign=False) + cohorts_group_name.append(group.name if group else '') + + group_configs_group_names = [] + for partition in experiment_partitions: + group = LmsPartitionService(student, course_id).get_group(partition, assign=False) + group_configs_group_names.append(group.name if group else '') + + team_name = [] + if teams_enabled: + try: + membership = CourseTeamMembership.objects.get(user=student, team__course_id=course_id) + team_name.append(membership.team.name) + except CourseTeamMembership.DoesNotExist: + team_name.append('') + + enrollment_mode = CourseEnrollment.enrollment_mode_for_user(student, course_id)[0] + verification_status = SoftwareSecurePhotoVerification.verification_status_for_user( + student, + course_id, + enrollment_mode + ) + certificate_info = certificate_info_for_user( + student, + course_id, + course_grade.letter_grade, + student.id in whitelisted_user_ids + ) + + grade_results = [] + for assignment_type, assignment_info in graded_assignments.iteritems(): + for subsection_location in assignment_info['subsection_headers']: + try: + subsection_grade = course_grade.graded_subsections_by_format[assignment_type][subsection_location] + except KeyError: + grade_results.append([u'Not Accessible']) + else: + if subsection_grade.graded_total.attempted: + grade_results.append( + [subsection_grade.graded_total.earned / subsection_grade.graded_total.possible] + ) + else: + grade_results.append([u'Not Attempted']) + if assignment_info['use_subsection_headers']: + assignment_average = course_grade.grade_value['grade_breakdown'].get(assignment_type, {}).get('percent') + grade_results.append([assignment_average]) + + grade_results = list(chain.from_iterable(grade_results)) + + rows.append( + [student.id, student.email, student.username, course_grade.percent] + + grade_results + cohorts_group_name + group_configs_group_names + team_name + + [enrollment_mode] + [verification_status] + certificate_info + ) TASK_LOG.info( u'%s, Task type: %s, Current step: %s, Grade calculation completed for students: %s/%s', @@ -819,33 +836,61 @@ def upload_grades_csv(_xmodule_instance_args, _entry_id, course_id, _task_input, return task_progress.update_task_state(extra_meta=current_step) -def _order_graded_scorable_blocks(course_key): +def _graded_assignments(course_key): """ - Sort each graded scorable block by the assignment type and - subsection that it belongs to. + Returns an OrderedDict that maps an assignment type to a dict of subsection-headers and average-header. + """ + grading_context = grading_context_for_course(course_key) + graded_assignments_map = OrderedDict() + for assignment_type_name, subsection_infos in grading_context['all_graded_subsections_by_type'].iteritems(): + graded_subsections_map = OrderedDict() - Returns: - an OrderedDict that maps a scorable block's id to its - headers in the final report. + for subsection_index, subsection_info in enumerate(subsection_infos, start=1): + subsection = subsection_info['subsection_block'] + header_name = u"{assignment_type} {subsection_index}: {subsection_name}".format( + assignment_type=assignment_type_name, + subsection_index=subsection_index, + subsection_name=subsection.display_name, + ) + graded_subsections_map[subsection.location] = header_name + + average_header = u"{assignment_type}".format(assignment_type=assignment_type_name) + + # Use separate subsection and average columns only if + # there's more than one subsection. + use_subsection_headers = len(subsection_infos) > 1 + if use_subsection_headers: + average_header += u" (Avg)" + + graded_assignments_map[assignment_type_name] = { + 'subsection_headers': graded_subsections_map, + 'average_header': average_header, + 'use_subsection_headers': use_subsection_headers + } + return graded_assignments_map + + +def _graded_scorable_blocks_to_header(course_key): """ - scorable_blocks = OrderedDict() + Returns an OrderedDict that maps a scorable block's id to its + headers in the final report. + """ + scorable_blocks_map = OrderedDict() grading_context = grading_context_for_course(course_key) for assignment_type_name, subsection_infos in grading_context['all_graded_subsections_by_type'].iteritems(): for subsection_index, subsection_info in enumerate(subsection_infos, start=1): - subsection = subsection_info['subsection_block'] - if subsection.graded: - for scorable_block in subsection_info['scored_descendants']: - header_name = ( - u"{assignment_type} {subsection_index}: " - u"{subsection_name} - {scorable_block_name}" - ).format( - scorable_block_name=scorable_block.display_name, - assignment_type=assignment_type_name, - subsection_index=subsection_index, - subsection_name=subsection.display_name, - ) - scorable_blocks[scorable_block.location] = [header_name + " (Earned)", header_name + " (Possible)"] - return scorable_blocks + for scorable_block in subsection_info['scored_descendants']: + header_name = ( + u"{assignment_type} {subsection_index}: " + u"{subsection_name} - {scorable_block_name}" + ).format( + scorable_block_name=scorable_block.display_name, + assignment_type=assignment_type_name, + subsection_index=subsection_index, + subsection_name=subsection_info['subsection_block'].display_name, + ) + scorable_blocks_map[scorable_block.location] = [header_name + " (Earned)", header_name + " (Possible)"] + return scorable_blocks_map def upload_problem_responses_csv(_xmodule_instance_args, _entry_id, course_id, task_input, action_name): @@ -898,10 +943,10 @@ def upload_problem_grade_report(_xmodule_instance_args, _entry_id, course_id, _t # as the keys. It is structured in this way to keep the values related. header_row = OrderedDict([('id', 'Student ID'), ('email', 'Email'), ('username', 'Username')]) - graded_scorable_blocks = _order_graded_scorable_blocks(course_id) + graded_scorable_blocks = _graded_scorable_blocks_to_header(course_id) # Just generate the static fields for now. - rows = [list(header_row.values()) + ['Final Grade'] + list(chain.from_iterable(graded_scorable_blocks.values()))] + rows = [list(header_row.values()) + ['Grade'] + list(chain.from_iterable(graded_scorable_blocks.values()))] error_rows = [list(header_row.values()) + ['error_msg']] current_step = {'step': 'Calculating Grades'} @@ -913,22 +958,22 @@ def upload_problem_grade_report(_xmodule_instance_args, _entry_id, course_id, _t if not course_grade: # There was an error grading this student. if not err_msg: - err_msg = 'Unknown error' + err_msg = u'Unknown error' error_rows.append(student_fields + [err_msg]) task_progress.failed += 1 continue - earned_possible_values = list() + earned_possible_values = [] for block_location in graded_scorable_blocks: try: problem_score = course_grade.locations_to_scores[block_location] except KeyError: - earned_possible_values.append(['Not Accessible', 'Not Accessible']) + earned_possible_values.append([u'Not Accessible', u'Not Accessible']) else: if problem_score.attempted: earned_possible_values.append([problem_score.earned, problem_score.possible]) else: - earned_possible_values.append(['Not Attempted', problem_score.possible]) + earned_possible_values.append([u'Not Attempted', problem_score.possible]) rows.append(student_fields + [course_grade.percent] + list(chain.from_iterable(earned_possible_values))) diff --git a/lms/djangoapps/instructor_task/tests/test_base.py b/lms/djangoapps/instructor_task/tests/test_base.py index c2ea743da1..7192c67cf5 100644 --- a/lms/djangoapps/instructor_task/tests/test_base.py +++ b/lms/djangoapps/instructor_task/tests/test_base.py @@ -2,6 +2,7 @@ Base test classes for LMS instructor-initiated background tasks """ +# pylint: disable=attribute-defined-outside-init import os import json from mock import Mock, patch @@ -12,7 +13,6 @@ from uuid import uuid4 from celery.states import SUCCESS, FAILURE from django.core.urlresolvers import reverse -from django.test.testcases import TestCase from django.contrib.auth.models import User from capa.tests.response_xml_factory import OptionResponseXMLFactory @@ -37,7 +37,8 @@ TEST_COURSE_ORG = 'edx' TEST_COURSE_NAME = 'test_course' TEST_COURSE_NUMBER = '1.23x' TEST_COURSE_KEY = SlashSeparatedCourseKey(TEST_COURSE_ORG, TEST_COURSE_NUMBER, TEST_COURSE_NAME) -TEST_SECTION_NAME = "Problem" +TEST_CHAPTER_NAME = "Section" +TEST_SECTION_NAME = "Subsection" TEST_FAILURE_MESSAGE = 'task failed horribly' TEST_FAILURE_EXCEPTION = 'RandomCauseError' @@ -135,14 +136,18 @@ class InstructorTaskCourseTestCase(LoginEnrollmentTestCase, ModuleStoreTestCase) Add a chapter and a sequential to the current course. """ # Add a chapter to the course - chapter = ItemFactory.create(parent_location=self.course.location, - display_name=TEST_SECTION_NAME) + self.chapter = ItemFactory.create( + parent_location=self.course.location, + display_name=TEST_CHAPTER_NAME, + ) # add a sequence to the course to which the problems can be added - self.problem_section = ItemFactory.create(parent_location=chapter.location, - category='sequential', - metadata={'graded': True, 'format': 'Homework'}, - display_name=TEST_SECTION_NAME) + self.problem_section = ItemFactory.create( + parent_location=self.chapter.location, + category='sequential', + metadata={'graded': True, 'format': 'Homework'}, + display_name=TEST_SECTION_NAME, + ) @staticmethod def get_user_email(username): @@ -335,10 +340,10 @@ class TestReportMixin(object): file_index (int): Describes which report store file to open. Files are ordered by last modified date, and 0 corresponds to the most recently modified file. - verify_order (boolean): When True, we verify that both the - content and order of `expected_rows` matches the - actual csv rows. When False (default), we only verify - that the content matches. + verify_order (boolean): When True (default), we verify that + both the content and order of `expected_rows` matches + the actual csv rows. When False, we only verify that + the content matches. ignore_other_columns (boolean): When True, we verify that `expected_rows` contain data which is the subset of actual csv rows. """ diff --git a/lms/djangoapps/instructor_task/tests/test_integration.py b/lms/djangoapps/instructor_task/tests/test_integration.py index e64a23abc7..9feff9dd3a 100644 --- a/lms/djangoapps/instructor_task/tests/test_integration.py +++ b/lms/djangoapps/instructor_task/tests/test_integration.py @@ -136,7 +136,7 @@ class TestRescoringTask(TestIntegrationTask): course_grade = CourseGradeFactory().create(user, self.course) self.assertEquals( - course_grade.graded_subsections_by_format['Homework'][0].graded_total.earned, + course_grade.graded_subsections_by_format['Homework'][self.problem_section.location].graded_total.earned, expected_subsection_grade, ) @@ -574,13 +574,13 @@ class TestGradeReportConditionalContent(TestReportMixin, TestConditionalContent, self.verify_rows_in_csv( [ merge_dicts( - {'id': str(student.id), 'username': student.username, 'email': student.email}, + {'Student ID': str(student.id), 'Username': student.username, 'Email': student.email}, grades, user_partition_group(student) ) for student_grades in students_grades for student, grades in student_grades.iteritems() ], - ignore_other_columns=ignore_other_columns + ignore_other_columns=ignore_other_columns, ) def test_both_groups_problems(self): @@ -604,10 +604,20 @@ class TestGradeReportConditionalContent(TestReportMixin, TestConditionalContent, self.verify_csv_task_success(result) self.verify_grades_in_csv( [ - {self.student_a: {'grade': '1.0', 'HW': '1.0'}}, - {self.student_b: {'grade': '0.5', 'HW': '0.5'}} + { + self.student_a: { + u'Grade': '1.0', + u'Homework': '1.0', + } + }, + { + self.student_b: { + u'Grade': '0.5', + u'Homework': '0.5', + } + }, ], - ignore_other_columns=True + ignore_other_columns=True, ) def test_one_group_problem(self): @@ -627,8 +637,18 @@ class TestGradeReportConditionalContent(TestReportMixin, TestConditionalContent, self.verify_csv_task_success(result) self.verify_grades_in_csv( [ - {self.student_a: {'grade': '1.0', 'HW': '1.0'}}, - {self.student_b: {'grade': '0.0', 'HW': '0.0'}} + { + self.student_a: { + u'Grade': '1.0', + u'Homework': '1.0', + }, + }, + { + self.student_b: { + u'Grade': '0.0', + u'Homework': u'Not Accessible', + } + }, ], ignore_other_columns=True ) diff --git a/lms/djangoapps/instructor_task/tests/test_tasks_helper.py b/lms/djangoapps/instructor_task/tests/test_tasks_helper.py index 5cd6a0a087..13fe952571 100644 --- a/lms/djangoapps/instructor_task/tests/test_tasks_helper.py +++ b/lms/djangoapps/instructor_task/tests/test_tasks_helper.py @@ -590,7 +590,7 @@ class TestProblemGradeReport(TestReportMixin, InstructorTaskModuleTestCase): # technically possible in openedx. self.student_1 = self.create_student(u'üser_1') self.student_2 = self.create_student(u'üser_2') - self.csv_header_row = [u'Student ID', u'Email', u'Username', u'Final Grade'] + self.csv_header_row = [u'Student ID', u'Email', u'Username', u'Grade'] @patch('lms.djangoapps.instructor_task.tasks_helper._get_current_task') def test_no_problems(self, _get_current_task): @@ -624,7 +624,7 @@ class TestProblemGradeReport(TestReportMixin, InstructorTaskModuleTestCase): self.submit_student_answer(self.student_1.username, u'Problem1', ['Option 1']) result = upload_problem_grade_report(None, None, self.course.id, None, 'graded') self.assertDictContainsSubset({'action_name': 'graded', 'attempted': 2, 'succeeded': 2, 'failed': 0}, result) - problem_name = u'Homework 1: Problem - Problem1' + problem_name = u'Homework 1: Subsection - Problem1' header_row = self.csv_header_row + [problem_name + ' (Earned)', problem_name + ' (Possible)'] self.verify_rows_in_csv([ dict(zip( @@ -642,7 +642,7 @@ class TestProblemGradeReport(TestReportMixin, InstructorTaskModuleTestCase): unicode(self.student_2.id), self.student_2.email, self.student_2.username, - '0.0', 'Not Attempted', '2.0', + '0.0', u'Not Attempted', '2.0', ] )) ]) @@ -714,8 +714,8 @@ class TestProblemReportSplitTestContent(TestReportMixin, TestConditionalContent, {'action_name': 'graded', 'attempted': 2, 'succeeded': 2, 'failed': 0}, result ) - problem_names = [u'Homework 1: Problem - problem_a_url', u'Homework 1: Problem - problem_b_url'] - header_row = [u'Student ID', u'Email', u'Username', u'Final Grade'] + problem_names = [u'Homework 1: Subsection - problem_a_url', u'Homework 1: Subsection - problem_b_url'] + header_row = [u'Student ID', u'Email', u'Username', u'Grade'] for problem in problem_names: header_row += [problem + ' (Earned)', problem + ' (Possible)'] @@ -796,7 +796,7 @@ class TestProblemReportSplitTestContent(TestReportMixin, TestConditionalContent, title = 'Homework %d 1: Problem section %d - %s' % (i, i, problem_url) problem_names.append(title) - header_row = [u'Student ID', u'Email', u'Username', u'Final Grade'] + header_row = [u'Student ID', u'Email', u'Username', u'Grade'] for problem in problem_names: header_row += [problem + ' (Earned)', problem + ' (Possible)'] @@ -861,8 +861,8 @@ class TestProblemReportCohortedContent(TestReportMixin, ContentGroupTestCase, In self.assertDictContainsSubset( {'action_name': 'graded', 'attempted': 4, 'succeeded': 4, 'failed': 0}, result ) - problem_names = [u'Homework 1: Problem - Problem0', u'Homework 1: Problem - Problem1'] - header_row = [u'Student ID', u'Email', u'Username', u'Final Grade'] + problem_names = [u'Homework 1: Subsection - Problem0', u'Homework 1: Subsection - Problem1'] + header_row = [u'Student ID', u'Email', u'Username', u'Grade'] for problem in problem_names: header_row += [problem + ' (Earned)', problem + ' (Possible)'] @@ -1507,6 +1507,90 @@ class TestCohortStudents(TestReportMixin, InstructorTaskCourseTestCase): ) +@patch('lms.djangoapps.instructor_task.tasks_helper.DefaultStorage', new=MockDefaultStorage) +class TestGradeReport(TestReportMixin, InstructorTaskModuleTestCase): + """ + Test that grade report has correct grade values. + """ + def setUp(self): + super(TestGradeReport, self).setUp() + self.create_course() + self.student = self.create_student(u'üser_1') + + def create_course(self): + """ + Creates a course with various subsections for testing + """ + self.course = CourseFactory.create( + grading_policy={ + "GRADER": [ + { + "type": "Homework", + "min_count": 4, + "drop_count": 0, + "weight": 1.0 + }, + ], + }, + ) + self.chapter = ItemFactory.create(parent=self.course, category='chapter') + + self.problem_section = ItemFactory.create( + parent=self.chapter, + category='sequential', + metadata={'graded': True, 'format': 'Homework'}, + display_name='Subsection' + ) + self.define_option_problem(u'Problem1', parent=self.problem_section, num_responses=1) + self.hidden_section = ItemFactory.create( + parent=self.chapter, + category='sequential', + metadata={'graded': True, 'format': 'Homework'}, + visible_to_staff_only=True, + display_name='Hidden', + ) + self.define_option_problem(u'Problem2', parent=self.hidden_section) + self.unattempted_section = ItemFactory.create( + parent=self.chapter, + category='sequential', + metadata={'graded': True, 'format': 'Homework'}, + display_name='Unattempted', + ) + self.define_option_problem(u'Problem3', parent=self.unattempted_section) + self.empty_section = ItemFactory.create( + parent=self.chapter, + category='sequential', + metadata={'graded': True, 'format': 'Homework'}, + display_name='Empty', + ) + + def test_grade_report(self): + self.submit_student_answer(self.student.username, u'Problem1', ['Option 1']) + + with patch('lms.djangoapps.instructor_task.tasks_helper._get_current_task'): + result = upload_grades_csv(None, None, self.course.id, None, 'graded') + self.assertDictContainsSubset( + {'action_name': 'graded', 'attempted': 1, 'succeeded': 1, 'failed': 0}, + result, + ) + self.verify_rows_in_csv( + [ + { + u'Student ID': unicode(self.student.id), + u'Email': self.student.email, + u'Username': self.student.username, + u'Grade': '0.13', + u'Homework 1: Subsection': '0.5', + u'Homework 2: Hidden': u'Not Accessible', + u'Homework 3: Unattempted': u'Not Attempted', + u'Homework 4: Empty': u'Not Accessible', + u'Homework (Avg)': '0.125', + }, + ], + ignore_other_columns=True, + ) + + @ddt.ddt @patch('lms.djangoapps.instructor_task.tasks_helper.DefaultStorage', new=MockDefaultStorage) class TestGradeReportEnrollmentAndCertificateInfo(TestReportMixin, InstructorTaskModuleTestCase): diff --git a/lms/templates/courseware/progress_graph.js b/lms/templates/courseware/progress_graph.js index 10605eec21..d8609f6b8d 100644 --- a/lms/templates/courseware/progress_graph.js +++ b/lms/templates/courseware/progress_graph.js @@ -100,7 +100,7 @@ $(function () { extraColorIndex = len(categories) #Keeping track of the next color to use for categories not in categories[] if show_grade_breakdown: - for section in grade_summary['grade_breakdown']: + for section in grade_summary['grade_breakdown'].itervalues(): if section['percent'] > 0: if section['category'] in categories: color = categories[ section['category'] ]['color'] diff --git a/openedx/core/djangoapps/util/testing.py b/openedx/core/djangoapps/util/testing.py index 9fcf5b1a3a..380c8cdfcf 100644 --- a/openedx/core/djangoapps/util/testing.py +++ b/openedx/core/djangoapps/util/testing.py @@ -104,7 +104,7 @@ class TestConditionalContent(ModuleStoreTestCase): """ Construct a course with graded problems that exist within a split test. """ - TEST_SECTION_NAME = 'Problem' + TEST_SECTION_NAME = 'Subsection' def setUp(self): """