Merge pull request #14032 from edx/beryl/unattempted-grade-report
Update grade report to distinguish between Not Attempted and 0
This commit is contained in:
@@ -404,7 +404,7 @@ def _cert_info(user, course_overview, cert_status, course_mode): # pylint: disa
|
||||
)
|
||||
|
||||
if status in {'generating', 'ready', 'notpassing', 'restricted', 'auditing', 'unverified'}:
|
||||
persisted_grade = CourseGradeFactory(user).get_persisted(course_overview)
|
||||
persisted_grade = CourseGradeFactory().get_persisted(user, course_overview)
|
||||
if persisted_grade is not None:
|
||||
status_dict['grade'] = unicode(persisted_grade.percent)
|
||||
elif 'grade' in cert_status:
|
||||
|
||||
@@ -10,7 +10,7 @@ class CorrectMap(object):
|
||||
in a capa problem. The response evaluation result for each answer_id includes
|
||||
(correctness, npoints, msg, hint, hintmode).
|
||||
|
||||
- correctness : 'correct', 'incorrect', or 'partially-correct'
|
||||
- correctness : 'correct', 'incorrect', 'partially-correct', or 'incomplete'
|
||||
- npoints : None, or integer specifying number of points awarded for this answer_id
|
||||
- msg : string (may have HTML) giving extra message response
|
||||
(displayed below textline or textbox)
|
||||
|
||||
@@ -5,6 +5,7 @@ Code used to calculate learner grades.
|
||||
from __future__ import division
|
||||
|
||||
import abc
|
||||
from collections import OrderedDict
|
||||
import inspect
|
||||
import logging
|
||||
import random
|
||||
@@ -18,17 +19,13 @@ class ScoreBase(object):
|
||||
"""
|
||||
Abstract base class for encapsulating fields of values scores.
|
||||
Field common to all scores include:
|
||||
display_name (string) - the display name of the module
|
||||
module_id (UsageKey) - the location of the module
|
||||
graded (boolean) - whether or not this module is graded
|
||||
attempted (boolean) - whether the module was attempted
|
||||
"""
|
||||
__metaclass__ = abc.ABCMeta
|
||||
|
||||
def __init__(self, graded, display_name, module_id, attempted):
|
||||
def __init__(self, graded, attempted):
|
||||
self.graded = graded
|
||||
self.display_name = display_name
|
||||
self.module_id = module_id
|
||||
self.attempted = attempted
|
||||
|
||||
def __eq__(self, other):
|
||||
@@ -55,10 +52,10 @@ class ProblemScore(ScoreBase):
|
||||
"""
|
||||
def __init__(self, raw_earned, raw_possible, weighted_earned, weighted_possible, weight, *args, **kwargs):
|
||||
super(ProblemScore, self).__init__(*args, **kwargs)
|
||||
self.raw_earned = raw_earned
|
||||
self.raw_possible = raw_possible
|
||||
self.earned = weighted_earned
|
||||
self.possible = weighted_possible
|
||||
self.raw_earned = float(raw_earned) if raw_earned is not None else None
|
||||
self.raw_possible = float(raw_possible) if raw_possible is not None else None
|
||||
self.earned = float(weighted_earned) if weighted_earned is not None else None
|
||||
self.possible = float(weighted_possible) if weighted_possible is not None else None
|
||||
self.weight = weight
|
||||
|
||||
|
||||
@@ -71,8 +68,8 @@ class AggregatedScore(ScoreBase):
|
||||
"""
|
||||
def __init__(self, tw_earned, tw_possible, *args, **kwargs):
|
||||
super(AggregatedScore, self).__init__(*args, **kwargs)
|
||||
self.earned = tw_earned
|
||||
self.possible = tw_possible
|
||||
self.earned = float(tw_earned) if tw_earned is not None else None
|
||||
self.possible = float(tw_possible) if tw_possible is not None else None
|
||||
|
||||
|
||||
def float_sum(iterable):
|
||||
@@ -82,11 +79,9 @@ def float_sum(iterable):
|
||||
return float(sum(iterable))
|
||||
|
||||
|
||||
def aggregate_scores(scores, display_name="summary", location=None):
|
||||
def aggregate_scores(scores):
|
||||
"""
|
||||
scores: A list of ScoreBase objects
|
||||
display_name: The display name for the score object
|
||||
location: The location under which all objects in scores are located
|
||||
returns: A tuple (all_total, graded_total).
|
||||
all_total: A ScoreBase representing the total score summed over all input scores
|
||||
graded_total: A ScoreBase representing the score summed over all graded input scores
|
||||
@@ -100,11 +95,11 @@ def aggregate_scores(scores, display_name="summary", location=None):
|
||||
any_attempted = any(score.attempted for score in scores)
|
||||
|
||||
# regardless of whether it is graded
|
||||
all_total = AggregatedScore(total_correct, total_possible, False, display_name, location, any_attempted)
|
||||
all_total = AggregatedScore(total_correct, total_possible, False, any_attempted)
|
||||
|
||||
# selecting only graded things
|
||||
graded_total = AggregatedScore(
|
||||
total_correct_graded, total_possible_graded, True, display_name, location, any_attempted_graded,
|
||||
total_correct_graded, total_possible_graded, True, any_attempted_graded,
|
||||
)
|
||||
|
||||
return all_total, graded_total
|
||||
@@ -126,9 +121,8 @@ def grader_from_conf(conf):
|
||||
This creates a CourseGrader from a configuration (such as in course_settings.py).
|
||||
The conf can simply be an instance of CourseGrader, in which case no work is done.
|
||||
More commonly, the conf is a list of dictionaries. A WeightedSubsectionsGrader
|
||||
with AssignmentFormatGrader's or SingleSectionGrader's as subsections will be
|
||||
generated. Every dictionary should contain the parameters for making either a
|
||||
AssignmentFormatGrader or SingleSectionGrader, in addition to a 'weight' key.
|
||||
with AssignmentFormatGraders will be generated. Every dictionary should contain
|
||||
the parameters for making an AssignmentFormatGrader, in addition to a 'weight' key.
|
||||
"""
|
||||
if isinstance(conf, CourseGrader):
|
||||
return conf
|
||||
@@ -137,27 +131,14 @@ def grader_from_conf(conf):
|
||||
for subgraderconf in conf:
|
||||
subgraderconf = subgraderconf.copy()
|
||||
weight = subgraderconf.pop("weight", 0)
|
||||
# NOTE: 'name' used to exist in SingleSectionGrader. We are deprecating SingleSectionGrader
|
||||
# and converting everything into an AssignmentFormatGrader by adding 'min_count' and
|
||||
# 'drop_count'. AssignmentFormatGrader does not expect 'name', so if it appears
|
||||
# in bad_args, go ahead remove it (this causes no errors). Eventually, SingleSectionGrader
|
||||
# should be completely removed.
|
||||
name = 'name'
|
||||
try:
|
||||
if 'min_count' in subgraderconf:
|
||||
#This is an AssignmentFormatGrader
|
||||
subgrader_class = AssignmentFormatGrader
|
||||
elif name in subgraderconf:
|
||||
#This is an SingleSectionGrader
|
||||
subgrader_class = SingleSectionGrader
|
||||
else:
|
||||
raise ValueError("Configuration has no appropriate grader class.")
|
||||
|
||||
bad_args = invalid_args(subgrader_class.__init__, subgraderconf)
|
||||
# See note above concerning 'name'.
|
||||
if bad_args.issuperset({name}):
|
||||
bad_args = bad_args - {name}
|
||||
del subgraderconf[name]
|
||||
if len(bad_args) > 0:
|
||||
log.warning("Invalid arguments for a subgrader: %s", bad_args)
|
||||
for key in bad_args:
|
||||
@@ -194,10 +175,10 @@ class CourseGrader(object):
|
||||
- section_breakdown: This is a list of dictionaries which provide details on sections
|
||||
that were graded. These are used for display in a graph or chart. The format for a
|
||||
section_breakdown dictionary is explained below.
|
||||
- grade_breakdown: This is a list of dictionaries which provide details on the contributions
|
||||
of the final percentage grade. This is a higher level breakdown, for when the grade is constructed
|
||||
of a few very large sections (such as Homeworks, Labs, a Midterm, and a Final). The format for
|
||||
a grade_breakdown is explained below. This section is optional.
|
||||
- grade_breakdown: This is a dict of dictionaries, keyed by category, which provide details on
|
||||
the contributions of the final percentage grade. This is a higher level breakdown, for when the
|
||||
grade is constructed of a few very large sections (such as Homeworks, Labs, a Midterm, and a Final).
|
||||
The format for a grade_breakdown is explained below. This section is optional.
|
||||
|
||||
A dictionary in the section_breakdown list has the following keys:
|
||||
percent: A float percentage for the section.
|
||||
@@ -208,7 +189,7 @@ class CourseGrader(object):
|
||||
prominent: A boolean value indicating that this section should be displayed as more prominent
|
||||
than other items.
|
||||
|
||||
A dictionary in the grade_breakdown list has the following keys:
|
||||
A dictionary in the grade_breakdown dict has the following keys:
|
||||
percent: A float percentage in the breakdown. All percents should add up to the final percentage.
|
||||
detail: A string explanation of this breakdown. E.g. "Homework - 10% of a possible 15%"
|
||||
category: A string identifying the category. Items with the same category are grouped together
|
||||
@@ -241,77 +222,32 @@ class WeightedSubsectionsGrader(CourseGrader):
|
||||
a value > 1, the student may end up with a percent > 100%. This allows for sections that
|
||||
are extra credit.
|
||||
"""
|
||||
def __init__(self, sections):
|
||||
self.sections = sections
|
||||
def __init__(self, subgraders):
|
||||
self.subgraders = subgraders
|
||||
|
||||
def grade(self, grade_sheet, generate_random_scores=False):
|
||||
total_percent = 0.0
|
||||
section_breakdown = []
|
||||
grade_breakdown = []
|
||||
grade_breakdown = OrderedDict()
|
||||
|
||||
for subgrader, category, weight in self.sections:
|
||||
for subgrader, assignment_type, weight in self.subgraders:
|
||||
subgrade_result = subgrader.grade(grade_sheet, generate_random_scores)
|
||||
|
||||
weighted_percent = subgrade_result['percent'] * weight
|
||||
section_detail = u"{0} = {1:.2%} of a possible {2:.2%}".format(category, weighted_percent, weight)
|
||||
section_detail = u"{0} = {1:.2%} of a possible {2:.2%}".format(assignment_type, weighted_percent, weight)
|
||||
|
||||
total_percent += weighted_percent
|
||||
section_breakdown += subgrade_result['section_breakdown']
|
||||
grade_breakdown.append({'percent': weighted_percent, 'detail': section_detail, 'category': category})
|
||||
|
||||
return {'percent': total_percent,
|
||||
'section_breakdown': section_breakdown,
|
||||
'grade_breakdown': grade_breakdown}
|
||||
|
||||
|
||||
class SingleSectionGrader(CourseGrader):
|
||||
"""
|
||||
This grades a single section with the format 'type' and the name 'name'.
|
||||
|
||||
If the name is not appropriate for the short short_label or category, they each may
|
||||
be specified individually.
|
||||
"""
|
||||
def __init__(self, type, name, short_label=None, category=None): # pylint: disable=redefined-builtin
|
||||
self.type = type
|
||||
self.name = name
|
||||
self.short_label = short_label or name
|
||||
self.category = category or name
|
||||
|
||||
def grade(self, grade_sheet, generate_random_scores=False):
|
||||
found_score = None
|
||||
if self.type in grade_sheet:
|
||||
for score in grade_sheet[self.type]:
|
||||
if score.display_name == self.name:
|
||||
found_score = score
|
||||
break
|
||||
|
||||
if found_score or generate_random_scores:
|
||||
if generate_random_scores: # for debugging!
|
||||
earned = random.randint(2, 15)
|
||||
possible = random.randint(earned, 15)
|
||||
else: # We found the score
|
||||
earned = found_score.earned
|
||||
possible = found_score.possible
|
||||
|
||||
percent = earned / possible
|
||||
detail = u"{name} - {percent:.0%} ({earned:.3n}/{possible:.3n})".format(
|
||||
name=self.name,
|
||||
percent=percent,
|
||||
earned=float(earned),
|
||||
possible=float(possible)
|
||||
)
|
||||
|
||||
else:
|
||||
percent = 0.0
|
||||
detail = u"{name} - 0% (?/?)".format(name=self.name)
|
||||
|
||||
breakdown = [{'percent': percent, 'label': self.short_label,
|
||||
'detail': detail, 'category': self.category, 'prominent': True}]
|
||||
grade_breakdown[assignment_type] = {
|
||||
'percent': weighted_percent,
|
||||
'detail': section_detail,
|
||||
'category': assignment_type,
|
||||
}
|
||||
|
||||
return {
|
||||
'percent': percent,
|
||||
'section_breakdown': breakdown,
|
||||
#No grade_breakdown here
|
||||
'percent': total_percent,
|
||||
'section_breakdown': section_breakdown,
|
||||
'grade_breakdown': grade_breakdown
|
||||
}
|
||||
|
||||
|
||||
@@ -332,9 +268,9 @@ class AssignmentFormatGrader(CourseGrader):
|
||||
hide_average is to suppress the display of the total score in this grader and instead
|
||||
only show each assignment in this grader in the breakdown.
|
||||
|
||||
If there is only a single assignment in this grader, then it acts like a SingleSectionGrader
|
||||
and returns only one entry for the grader. Since the assignment and the total are the same,
|
||||
the total is returned but is not labeled as an average.
|
||||
If there is only a single assignment in this grader, then it returns only one entry for the
|
||||
grader. Since the assignment and the total are the same, the total is returned but is not
|
||||
labeled as an average.
|
||||
|
||||
category should be presentable to the user, but may not appear. When the grade breakdown is
|
||||
displayed, scores from the same category will be similar (for example, by color).
|
||||
@@ -373,9 +309,12 @@ class AssignmentFormatGrader(CourseGrader):
|
||||
|
||||
def grade(self, grade_sheet, generate_random_scores=False):
|
||||
def total_with_drops(breakdown, drop_count):
|
||||
'''calculates total score for a section while dropping lowest scores'''
|
||||
#create an array of tuples with (index, mark), sorted by mark['percent'] descending
|
||||
"""
|
||||
Calculates total score for a section while dropping lowest scores
|
||||
"""
|
||||
# Create an array of tuples with (index, mark), sorted by mark['percent'] descending
|
||||
sorted_breakdown = sorted(enumerate(breakdown), key=lambda x: -x[1]['percent'])
|
||||
|
||||
# A list of the indices of the dropped scores
|
||||
dropped_indices = []
|
||||
if drop_count > 0:
|
||||
@@ -390,8 +329,7 @@ class AssignmentFormatGrader(CourseGrader):
|
||||
|
||||
return aggregate_score, dropped_indices
|
||||
|
||||
#Figure the homework scores
|
||||
scores = grade_sheet.get(self.type, [])
|
||||
scores = grade_sheet.get(self.type, {}).values()
|
||||
breakdown = []
|
||||
for i in range(max(self.min_count, len(scores))):
|
||||
if i < len(scores) or generate_random_scores:
|
||||
@@ -401,8 +339,8 @@ class AssignmentFormatGrader(CourseGrader):
|
||||
section_name = "Generated"
|
||||
|
||||
else:
|
||||
earned = scores[i].earned
|
||||
possible = scores[i].possible
|
||||
earned = scores[i].graded_total.earned
|
||||
possible = scores[i].graded_total.possible
|
||||
section_name = scores[i].display_name
|
||||
|
||||
percentage = earned / possible
|
||||
@@ -442,8 +380,7 @@ class AssignmentFormatGrader(CourseGrader):
|
||||
|
||||
if len(breakdown) == 1:
|
||||
# if there is only one entry in a section, suppress the existing individual entry and the average,
|
||||
# and just display a single entry for the section. That way it acts automatically like a
|
||||
# SingleSectionGrader.
|
||||
# and just display a single entry for the section.
|
||||
total_detail = u"{section_type} = {percent:.0%}".format(
|
||||
percent=total_percent,
|
||||
section_type=self.section_type,
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
"""Grading tests"""
|
||||
import ddt
|
||||
import unittest
|
||||
|
||||
from xmodule import graders
|
||||
@@ -12,13 +13,11 @@ class GradesheetTest(unittest.TestCase):
|
||||
|
||||
def test_weighted_grading(self):
|
||||
scores = []
|
||||
agg_fields = dict(display_name="aggregated_score", module_id=None, attempted=False)
|
||||
prob_fields = dict(
|
||||
display_name="problem_score", module_id=None, raw_earned=0, raw_possible=0, weight=0, attempted=False,
|
||||
)
|
||||
agg_fields = dict(attempted=False)
|
||||
prob_fields = dict(raw_earned=0, raw_possible=0, weight=0, attempted=False)
|
||||
|
||||
# No scores
|
||||
all_total, graded_total = aggregate_scores(scores, display_name=agg_fields['display_name'])
|
||||
all_total, graded_total = aggregate_scores(scores)
|
||||
self.assertEqual(
|
||||
all_total,
|
||||
AggregatedScore(tw_earned=0, tw_possible=0, graded=False, **agg_fields),
|
||||
@@ -30,7 +29,7 @@ class GradesheetTest(unittest.TestCase):
|
||||
|
||||
# (0/5 non-graded)
|
||||
scores.append(ProblemScore(weighted_earned=0, weighted_possible=5, graded=False, **prob_fields))
|
||||
all_total, graded_total = aggregate_scores(scores, display_name=agg_fields['display_name'])
|
||||
all_total, graded_total = aggregate_scores(scores)
|
||||
self.assertEqual(
|
||||
all_total,
|
||||
AggregatedScore(tw_earned=0, tw_possible=5, graded=False, **agg_fields),
|
||||
@@ -44,7 +43,7 @@ class GradesheetTest(unittest.TestCase):
|
||||
prob_fields['attempted'] = True
|
||||
agg_fields['attempted'] = True
|
||||
scores.append(ProblemScore(weighted_earned=3, weighted_possible=5, graded=True, **prob_fields))
|
||||
all_total, graded_total = aggregate_scores(scores, display_name=agg_fields['display_name'])
|
||||
all_total, graded_total = aggregate_scores(scores)
|
||||
self.assertAlmostEqual(
|
||||
all_total,
|
||||
AggregatedScore(tw_earned=3, tw_possible=10, graded=False, **agg_fields),
|
||||
@@ -56,7 +55,7 @@ class GradesheetTest(unittest.TestCase):
|
||||
|
||||
# (0/5 non-graded) + (3/5 graded) + (2/5 graded) = 5/15 total, 5/10 graded
|
||||
scores.append(ProblemScore(weighted_earned=2, weighted_possible=5, graded=True, **prob_fields))
|
||||
all_total, graded_total = aggregate_scores(scores, display_name=agg_fields['display_name'])
|
||||
all_total, graded_total = aggregate_scores(scores)
|
||||
self.assertAlmostEqual(
|
||||
all_total,
|
||||
AggregatedScore(tw_earned=5, tw_possible=15, graded=False, **agg_fields),
|
||||
@@ -67,6 +66,7 @@ class GradesheetTest(unittest.TestCase):
|
||||
)
|
||||
|
||||
|
||||
@ddt.ddt
|
||||
class GraderTest(unittest.TestCase):
|
||||
"""
|
||||
Tests grader implementations
|
||||
@@ -76,55 +76,48 @@ class GraderTest(unittest.TestCase):
|
||||
}
|
||||
|
||||
incomplete_gradesheet = {
|
||||
'Homework': [],
|
||||
'Lab': [],
|
||||
'Midterm': [],
|
||||
'Homework': {},
|
||||
'Lab': {},
|
||||
'Midterm': {},
|
||||
}
|
||||
|
||||
common_fields = dict(graded=True, module_id=None, attempted=True)
|
||||
class MockGrade(object):
|
||||
"""
|
||||
Mock class for SubsectionGrade object.
|
||||
"""
|
||||
def __init__(self, graded_total, display_name):
|
||||
self.graded_total = graded_total
|
||||
self.display_name = display_name
|
||||
|
||||
common_fields = dict(graded=True, attempted=True)
|
||||
test_gradesheet = {
|
||||
'Homework': [
|
||||
AggregatedScore(tw_earned=2, tw_possible=20.0, display_name='hw1', **common_fields),
|
||||
AggregatedScore(tw_earned=16, tw_possible=16.0, display_name='hw2', **common_fields),
|
||||
],
|
||||
'Homework': {
|
||||
'hw1': MockGrade(AggregatedScore(tw_earned=2, tw_possible=20.0, **common_fields), display_name='hw1'),
|
||||
'hw2': MockGrade(AggregatedScore(tw_earned=16, tw_possible=16.0, **common_fields), display_name='hw2'),
|
||||
},
|
||||
|
||||
# The dropped scores should be from the assignments that don't exist yet
|
||||
'Lab': [
|
||||
AggregatedScore(tw_earned=1, tw_possible=2.0, display_name='lab1', **common_fields), # Dropped
|
||||
AggregatedScore(tw_earned=1, tw_possible=1.0, display_name='lab2', **common_fields),
|
||||
AggregatedScore(tw_earned=1, tw_possible=1.0, display_name='lab3', **common_fields),
|
||||
AggregatedScore(tw_earned=5, tw_possible=25.0, display_name='lab4', **common_fields), # Dropped
|
||||
AggregatedScore(tw_earned=3, tw_possible=4.0, display_name='lab5', **common_fields), # Dropped
|
||||
AggregatedScore(tw_earned=6, tw_possible=7.0, display_name='lab6', **common_fields),
|
||||
AggregatedScore(tw_earned=5, tw_possible=6.0, display_name='lab7', **common_fields),
|
||||
],
|
||||
'Lab': {
|
||||
# Dropped
|
||||
'lab1': MockGrade(AggregatedScore(tw_earned=1, tw_possible=2.0, **common_fields), display_name='lab1'),
|
||||
'lab2': MockGrade(AggregatedScore(tw_earned=1, tw_possible=1.0, **common_fields), display_name='lab2'),
|
||||
'lab3': MockGrade(AggregatedScore(tw_earned=1, tw_possible=1.0, **common_fields), display_name='lab3'),
|
||||
# Dropped
|
||||
'lab4': MockGrade(AggregatedScore(tw_earned=5, tw_possible=25.0, **common_fields), display_name='lab4'),
|
||||
# Dropped
|
||||
'lab5': MockGrade(AggregatedScore(tw_earned=3, tw_possible=4.0, **common_fields), display_name='lab5'),
|
||||
'lab6': MockGrade(AggregatedScore(tw_earned=6, tw_possible=7.0, **common_fields), display_name='lab6'),
|
||||
'lab7': MockGrade(AggregatedScore(tw_earned=5, tw_possible=6.0, **common_fields), display_name='lab7'),
|
||||
},
|
||||
|
||||
'Midterm': [
|
||||
AggregatedScore(tw_earned=50.5, tw_possible=100, display_name="Midterm Exam", **common_fields),
|
||||
],
|
||||
'Midterm': {
|
||||
'midterm': MockGrade(
|
||||
AggregatedScore(tw_earned=50.5, tw_possible=100, **common_fields),
|
||||
display_name="Midterm Exam",
|
||||
),
|
||||
},
|
||||
}
|
||||
|
||||
def test_single_section_grader(self):
|
||||
midterm_grader = graders.SingleSectionGrader("Midterm", "Midterm Exam")
|
||||
lab4_grader = graders.SingleSectionGrader("Lab", "lab4")
|
||||
bad_lab_grader = graders.SingleSectionGrader("Lab", "lab42")
|
||||
|
||||
for graded in [
|
||||
midterm_grader.grade(self.empty_gradesheet),
|
||||
midterm_grader.grade(self.incomplete_gradesheet),
|
||||
bad_lab_grader.grade(self.test_gradesheet),
|
||||
]:
|
||||
self.assertEqual(len(graded['section_breakdown']), 1)
|
||||
self.assertEqual(graded['percent'], 0.0)
|
||||
|
||||
graded = midterm_grader.grade(self.test_gradesheet)
|
||||
self.assertAlmostEqual(graded['percent'], 0.505)
|
||||
self.assertEqual(len(graded['section_breakdown']), 1)
|
||||
|
||||
graded = lab4_grader.grade(self.test_gradesheet)
|
||||
self.assertAlmostEqual(graded['percent'], 0.2)
|
||||
self.assertEqual(len(graded['section_breakdown']), 1)
|
||||
|
||||
def test_assignment_format_grader(self):
|
||||
homework_grader = graders.AssignmentFormatGrader("Homework", 12, 2)
|
||||
no_drop_grader = graders.AssignmentFormatGrader("Homework", 12, 0)
|
||||
@@ -179,8 +172,6 @@ class GraderTest(unittest.TestCase):
|
||||
# First, a few sub graders
|
||||
homework_grader = graders.AssignmentFormatGrader("Homework", 12, 2)
|
||||
lab_grader = graders.AssignmentFormatGrader("Lab", 7, 3)
|
||||
# phasing out the use of SingleSectionGraders, and instead using AssignmentFormatGraders that
|
||||
# will act like SingleSectionGraders on single sections.
|
||||
midterm_grader = graders.AssignmentFormatGrader("Midterm", 1, 0)
|
||||
|
||||
weighted_grader = graders.WeightedSubsectionsGrader([
|
||||
@@ -268,6 +259,8 @@ class GraderTest(unittest.TestCase):
|
||||
},
|
||||
{
|
||||
'type': "Midterm",
|
||||
'min_count': 0,
|
||||
'drop_count': 0,
|
||||
'name': "Midterm Exam",
|
||||
'short_label': "Midterm",
|
||||
'weight': 0.5,
|
||||
@@ -294,5 +287,25 @@ class GraderTest(unittest.TestCase):
|
||||
self.assertAlmostEqual(graded['percent'], 0.11)
|
||||
self.assertEqual(len(graded['section_breakdown']), 12 + 1)
|
||||
|
||||
# TODO: How do we test failure cases? The parser only logs an error when
|
||||
# it can't parse something. Maybe it should throw exceptions?
|
||||
@ddt.data(
|
||||
(
|
||||
# empty
|
||||
{},
|
||||
u"Configuration has no appropriate grader class."
|
||||
),
|
||||
(
|
||||
# no min_count
|
||||
{'type': "Homework", 'drop_count': 0},
|
||||
u"Configuration has no appropriate grader class."
|
||||
),
|
||||
(
|
||||
# no drop_count
|
||||
{'type': "Homework", 'min_count': 0},
|
||||
u"__init__() takes at least 4 arguments (3 given)"
|
||||
),
|
||||
)
|
||||
@ddt.unpack
|
||||
def test_grader_with_invalid_conf(self, invalid_conf, expected_error_message):
|
||||
with self.assertRaises(ValueError) as error:
|
||||
graders.grader_from_conf([invalid_conf])
|
||||
self.assertIn(expected_error_message, error.exception.message)
|
||||
|
||||
@@ -1174,11 +1174,8 @@ class TestCCXGrades(FieldOverrideTestMixin, SharedModuleStoreTestCase, LoginEnro
|
||||
self.assertEqual(len(response.mako_context['students']), 1) # pylint: disable=no-member
|
||||
student_info = response.mako_context['students'][0] # pylint: disable=no-member
|
||||
self.assertEqual(student_info['grade_summary']['percent'], 0.5)
|
||||
self.assertEqual(
|
||||
student_info['grade_summary']['grade_breakdown'][0]['percent'],
|
||||
0.5)
|
||||
self.assertEqual(
|
||||
len(student_info['grade_summary']['section_breakdown']), 4)
|
||||
self.assertEqual(student_info['grade_summary']['grade_breakdown'].values()[0]['percent'], 0.5)
|
||||
self.assertEqual(len(student_info['grade_summary']['section_breakdown']), 4)
|
||||
|
||||
def test_grades_csv(self):
|
||||
self.course.enable_ccx = True
|
||||
@@ -1223,7 +1220,7 @@ class TestCCXGrades(FieldOverrideTestMixin, SharedModuleStoreTestCase, LoginEnro
|
||||
self.assertEqual(response.status_code, 200)
|
||||
grades = response.mako_context['grade_summary'] # pylint: disable=no-member
|
||||
self.assertEqual(grades['percent'], 0.5)
|
||||
self.assertEqual(grades['grade_breakdown'][0]['percent'], 0.5)
|
||||
self.assertEqual(grades['grade_breakdown'].values()[0]['percent'], 0.5)
|
||||
self.assertEqual(len(grades['section_breakdown']), 4)
|
||||
|
||||
|
||||
|
||||
@@ -33,7 +33,7 @@ from courseware.field_overrides import disable_overrides
|
||||
from django_comment_common.models import FORUM_ROLE_ADMINISTRATOR, assign_role
|
||||
from django_comment_common.utils import seed_permissions_roles
|
||||
from edxmako.shortcuts import render_to_response
|
||||
from lms.djangoapps.grades.course_grades import iterate_grades_for
|
||||
from lms.djangoapps.grades.new.course_grade import CourseGradeFactory
|
||||
from opaque_keys.edx.keys import CourseKey
|
||||
from ccx_keys.locator import CCXLocator
|
||||
from student.roles import CourseCcxCoachRole
|
||||
@@ -564,30 +564,30 @@ def ccx_grades_csv(request, course, ccx=None):
|
||||
courseenrollment__course_id=ccx_key,
|
||||
courseenrollment__is_active=1
|
||||
).order_by('username').select_related("profile")
|
||||
grades = iterate_grades_for(course, enrolled_students)
|
||||
grades = CourseGradeFactory().iter(course, enrolled_students)
|
||||
|
||||
header = None
|
||||
rows = []
|
||||
for student, gradeset, __ in grades:
|
||||
if gradeset:
|
||||
for student, course_grade, __ in grades:
|
||||
if course_grade:
|
||||
# We were able to successfully grade this student for this
|
||||
# course.
|
||||
if not header:
|
||||
# Encode the header row in utf-8 encoding in case there are
|
||||
# unicode characters
|
||||
header = [section['label'].encode('utf-8')
|
||||
for section in gradeset[u'section_breakdown']]
|
||||
for section in course_grade.summary[u'section_breakdown']]
|
||||
rows.append(["id", "email", "username", "grade"] + header)
|
||||
|
||||
percents = {
|
||||
section['label']: section.get('percent', 0.0)
|
||||
for section in gradeset[u'section_breakdown']
|
||||
for section in course_grade.summary[u'section_breakdown']
|
||||
if 'label' in section
|
||||
}
|
||||
|
||||
row_percents = [percents.get(label, 0.0) for label in header]
|
||||
rows.append([student.id, student.email, student.username,
|
||||
gradeset['percent']] + row_percents)
|
||||
course_grade.percent] + row_percents)
|
||||
|
||||
buf = StringIO()
|
||||
writer = csv.writer(buf)
|
||||
|
||||
@@ -1,14 +1,16 @@
|
||||
"""
|
||||
Management command which fixes ungraded certificates for students
|
||||
"""
|
||||
|
||||
from django.core.management.base import BaseCommand
|
||||
import logging
|
||||
from optparse import make_option
|
||||
|
||||
from certificates.models import GeneratedCertificate
|
||||
from courseware import courses
|
||||
from lms.djangoapps.grades import course_grades
|
||||
from django.test.client import RequestFactory
|
||||
from django.core.management.base import BaseCommand
|
||||
from optparse import make_option
|
||||
from lms.djangoapps.grades.new.course_grade import CourseGradeFactory
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
@@ -42,18 +44,15 @@ class Command(BaseCommand):
|
||||
|
||||
def handle(self, *args, **options):
|
||||
course_id = options['course']
|
||||
print "Fetching ungraded students for {0}".format(course_id)
|
||||
log.info('Fetching ungraded students for %s.', course_id)
|
||||
ungraded = GeneratedCertificate.objects.filter( # pylint: disable=no-member
|
||||
course_id__exact=course_id
|
||||
).filter(grade__exact='')
|
||||
course = courses.get_course_by_id(course_id)
|
||||
factory = RequestFactory()
|
||||
request = factory.get('/')
|
||||
|
||||
for cert in ungraded:
|
||||
# grade the student
|
||||
grade = course_grades.summary(cert.user, course)
|
||||
print "grading {0} - {1}".format(cert.user, grade['percent'])
|
||||
cert.grade = grade['percent']
|
||||
grade = CourseGradeFactory().create(cert.user, course)
|
||||
log.info('grading %s - %s', cert.user, grade.percent)
|
||||
cert.grade = grade.percent
|
||||
if not options['noop']:
|
||||
cert.save()
|
||||
|
||||
@@ -11,7 +11,7 @@ from django.conf import settings
|
||||
from django.core.urlresolvers import reverse
|
||||
from requests.auth import HTTPBasicAuth
|
||||
|
||||
from lms.djangoapps.grades import course_grades
|
||||
from lms.djangoapps.grades.new.course_grade import CourseGradeFactory
|
||||
from xmodule.modulestore.django import modulestore
|
||||
from capa.xqueue_interface import XQueueInterface
|
||||
from capa.xqueue_interface import make_xheader, make_hashkey
|
||||
@@ -271,7 +271,7 @@ class XQueueCertInterface(object):
|
||||
self.request.session = {}
|
||||
|
||||
is_whitelisted = self.whitelist.filter(user=student, course_id=course_id, whitelist=True).exists()
|
||||
grade = course_grades.summary(student, course)
|
||||
grade = CourseGradeFactory().create(student, course).summary
|
||||
enrollment_mode, __ = CourseEnrollment.enrollment_mode_for_user(student, course_id)
|
||||
mode_is_verified = enrollment_mode in GeneratedCertificate.VERIFIED_CERTS_MODES
|
||||
user_is_verified = SoftwareSecurePhotoVerification.user_is_verified(student)
|
||||
|
||||
@@ -22,7 +22,7 @@ from capa.tests.response_xml_factory import (
|
||||
from course_modes.models import CourseMode
|
||||
from courseware.models import StudentModule, BaseStudentModuleHistory
|
||||
from courseware.tests.helpers import LoginEnrollmentTestCase
|
||||
from lms.djangoapps.grades import course_grades, progress
|
||||
from lms.djangoapps.grades.new.course_grade import CourseGradeFactory
|
||||
from openedx.core.djangoapps.credit.api import (
|
||||
set_credit_requirements, get_credit_requirement_status
|
||||
)
|
||||
@@ -270,39 +270,17 @@ class TestSubmittingProblems(ModuleStoreTestCase, LoginEnrollmentTestCase, Probl
|
||||
self.update_course(self.course, self.student_user.id)
|
||||
self.refresh_course()
|
||||
|
||||
def get_grade_summary(self):
|
||||
def get_course_grade(self):
|
||||
"""
|
||||
calls course_grades.summary for current user and course.
|
||||
|
||||
the keywords for the returned object are
|
||||
- grade : A final letter grade.
|
||||
- percent : The final percent for the class (rounded up).
|
||||
- section_breakdown : A breakdown of each section that makes
|
||||
up the grade. (For display)
|
||||
- grade_breakdown : A breakdown of the major components that
|
||||
make up the final grade. (For display)
|
||||
Return CourseGrade for current user and course.
|
||||
"""
|
||||
return course_grades.summary(self.student_user, self.course)
|
||||
|
||||
def get_progress_summary(self):
|
||||
"""
|
||||
Return progress summary structure for current user and course.
|
||||
|
||||
Returns
|
||||
- courseware_summary is a summary of all sections with problems in the course.
|
||||
It is organized as an array of chapters, each containing an array of sections,
|
||||
each containing an array of scores. This contains information for graded and
|
||||
ungraded problems, and is good for displaying a course summary with due dates,
|
||||
etc.
|
||||
"""
|
||||
return progress.summary(self.student_user, self.course).chapter_grades
|
||||
return CourseGradeFactory().create(self.student_user, self.course)
|
||||
|
||||
def check_grade_percent(self, percent):
|
||||
"""
|
||||
Assert that percent grade is as expected.
|
||||
"""
|
||||
grade_summary = self.get_grade_summary()
|
||||
self.assertEqual(grade_summary['percent'], percent)
|
||||
self.assertEqual(self.get_course_grade().percent, percent)
|
||||
|
||||
def earned_hw_scores(self):
|
||||
"""
|
||||
@@ -310,7 +288,9 @@ class TestSubmittingProblems(ModuleStoreTestCase, LoginEnrollmentTestCase, Probl
|
||||
|
||||
Returns list of scores: [<points on hw_1>, <points on hw_2>, ..., <points on hw_n>]
|
||||
"""
|
||||
return [s.earned for s in self.get_grade_summary()['totaled_scores']['Homework']]
|
||||
return [
|
||||
s.graded_total.earned for s in self.get_course_grade().graded_subsections_by_format['Homework'].itervalues()
|
||||
]
|
||||
|
||||
def hw_grade(self, hw_url_name):
|
||||
"""
|
||||
@@ -318,7 +298,7 @@ class TestSubmittingProblems(ModuleStoreTestCase, LoginEnrollmentTestCase, Probl
|
||||
"""
|
||||
# list of grade summaries for each section
|
||||
sections_list = []
|
||||
for chapter in self.get_progress_summary():
|
||||
for chapter in self.get_course_grade().chapter_grades:
|
||||
sections_list.extend(chapter['sections'])
|
||||
|
||||
# get the first section that matches the url (there should only be one)
|
||||
@@ -431,8 +411,11 @@ class TestCourseGrader(TestSubmittingProblems):
|
||||
"drop_count": 0,
|
||||
"short_label": "HW",
|
||||
"weight": hw_weight
|
||||
}, {
|
||||
},
|
||||
{
|
||||
"type": "Final",
|
||||
"min_count": 0,
|
||||
"drop_count": 0,
|
||||
"name": "Final Section",
|
||||
"short_label": "Final",
|
||||
"weight": final_weight
|
||||
@@ -558,7 +541,7 @@ class TestCourseGrader(TestSubmittingProblems):
|
||||
"""
|
||||
self.basic_setup()
|
||||
self.check_grade_percent(0)
|
||||
self.assertEqual(self.get_grade_summary()['grade'], None)
|
||||
self.assertEqual(self.get_course_grade().letter_grade, None)
|
||||
|
||||
def test_b_grade_exact(self):
|
||||
"""
|
||||
@@ -567,7 +550,7 @@ class TestCourseGrader(TestSubmittingProblems):
|
||||
self.basic_setup()
|
||||
self.submit_question_answer('p1', {'2_1': 'Correct'})
|
||||
self.check_grade_percent(0.33)
|
||||
self.assertEqual(self.get_grade_summary()['grade'], 'B')
|
||||
self.assertEqual(self.get_course_grade().letter_grade, 'B')
|
||||
|
||||
def test_b_grade_above(self):
|
||||
"""
|
||||
@@ -577,7 +560,7 @@ class TestCourseGrader(TestSubmittingProblems):
|
||||
self.submit_question_answer('p1', {'2_1': 'Correct'})
|
||||
self.submit_question_answer('p2', {'2_1': 'Correct'})
|
||||
self.check_grade_percent(0.67)
|
||||
self.assertEqual(self.get_grade_summary()['grade'], 'B')
|
||||
self.assertEqual(self.get_course_grade().letter_grade, 'B')
|
||||
|
||||
def test_a_grade(self):
|
||||
"""
|
||||
@@ -588,7 +571,7 @@ class TestCourseGrader(TestSubmittingProblems):
|
||||
self.submit_question_answer('p2', {'2_1': 'Correct'})
|
||||
self.submit_question_answer('p3', {'2_1': 'Correct'})
|
||||
self.check_grade_percent(1.0)
|
||||
self.assertEqual(self.get_grade_summary()['grade'], 'A')
|
||||
self.assertEqual(self.get_course_grade().letter_grade, 'A')
|
||||
|
||||
def test_wrong_answers(self):
|
||||
"""
|
||||
@@ -599,7 +582,7 @@ class TestCourseGrader(TestSubmittingProblems):
|
||||
self.submit_question_answer('p2', {'2_1': 'Correct'})
|
||||
self.submit_question_answer('p3', {'2_1': 'Incorrect'})
|
||||
self.check_grade_percent(0.67)
|
||||
self.assertEqual(self.get_grade_summary()['grade'], 'B')
|
||||
self.assertEqual(self.get_course_grade().letter_grade, 'B')
|
||||
|
||||
def test_submissions_api_overrides_scores(self):
|
||||
"""
|
||||
@@ -610,7 +593,7 @@ class TestCourseGrader(TestSubmittingProblems):
|
||||
self.submit_question_answer('p2', {'2_1': 'Correct'})
|
||||
self.submit_question_answer('p3', {'2_1': 'Incorrect'})
|
||||
self.check_grade_percent(0.67)
|
||||
self.assertEqual(self.get_grade_summary()['grade'], 'B')
|
||||
self.assertEqual(self.get_course_grade().letter_grade, 'B')
|
||||
|
||||
# But now, set the score with the submissions API and watch
|
||||
# as it overrides the score read from StudentModule and our
|
||||
@@ -625,7 +608,7 @@ class TestCourseGrader(TestSubmittingProblems):
|
||||
submission = submissions_api.create_submission(student_item, 'any answer')
|
||||
submissions_api.set_score(submission['uuid'], 1, 1)
|
||||
self.check_grade_percent(1.0)
|
||||
self.assertEqual(self.get_grade_summary()['grade'], 'A')
|
||||
self.assertEqual(self.get_course_grade().letter_grade, 'A')
|
||||
|
||||
def test_submissions_api_anonymous_student_id(self):
|
||||
"""
|
||||
@@ -640,7 +623,7 @@ class TestCourseGrader(TestSubmittingProblems):
|
||||
mock_get_scores.return_value = {
|
||||
self.problem_location('p3').to_deprecated_string(): (1, 1)
|
||||
}
|
||||
self.get_grade_summary()
|
||||
self.get_course_grade()
|
||||
|
||||
# Verify that the submissions API was sent an anonymized student ID
|
||||
mock_get_scores.assert_called_with(
|
||||
@@ -752,9 +735,6 @@ class TestCourseGrader(TestSubmittingProblems):
|
||||
# the Django student views, and does not update enrollment if it already exists.
|
||||
CourseEnrollment.enroll(self.student_user, self.course.id, mode)
|
||||
|
||||
self.submit_question_answer('p1', {'2_1': 'Correct'})
|
||||
self.submit_question_answer('p2', {'2_1': 'Correct'})
|
||||
|
||||
# Enable the course for credit
|
||||
CreditCourse.objects.create(course_key=self.course.id, enabled=True)
|
||||
|
||||
@@ -774,7 +754,15 @@ class TestCourseGrader(TestSubmittingProblems):
|
||||
# Add a single credit requirement (final grade)
|
||||
set_credit_requirements(self.course.id, requirements)
|
||||
|
||||
self.get_grade_summary()
|
||||
# Credit requirement is not satisfied before passing grade
|
||||
req_status = get_credit_requirement_status(self.course.id, self.student_user.username, 'grade', 'grade')
|
||||
self.assertEqual(req_status[0]["status"], None)
|
||||
|
||||
self._stop_signal_patch()
|
||||
self.submit_question_answer('p1', {'2_1': 'Correct'})
|
||||
self.submit_question_answer('p2', {'2_1': 'Correct'})
|
||||
|
||||
# Credit requirement is now satisfied after passing grade
|
||||
req_status = get_credit_requirement_status(self.course.id, self.student_user.username, 'grade', 'grade')
|
||||
self.assertEqual(req_status[0]["status"], 'satisfied')
|
||||
|
||||
|
||||
@@ -1235,7 +1235,7 @@ class ProgressPageTests(ModuleStoreTestCase):
|
||||
@patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': True})
|
||||
@patch(
|
||||
'lms.djangoapps.grades.new.course_grade.CourseGrade.summary',
|
||||
PropertyMock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [], 'grade_breakdown': []}),
|
||||
PropertyMock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [], 'grade_breakdown': {}}),
|
||||
)
|
||||
def test_view_certificate_link(self):
|
||||
"""
|
||||
@@ -1294,7 +1294,7 @@ class ProgressPageTests(ModuleStoreTestCase):
|
||||
@patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': False})
|
||||
@patch(
|
||||
'lms.djangoapps.grades.new.course_grade.CourseGrade.summary',
|
||||
PropertyMock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [], 'grade_breakdown': []})
|
||||
PropertyMock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [], 'grade_breakdown': {}})
|
||||
)
|
||||
def test_view_certificate_link_hidden(self):
|
||||
"""
|
||||
@@ -1341,7 +1341,7 @@ class ProgressPageTests(ModuleStoreTestCase):
|
||||
|
||||
@patch(
|
||||
'lms.djangoapps.grades.new.course_grade.CourseGrade.summary',
|
||||
PropertyMock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [], 'grade_breakdown': []})
|
||||
PropertyMock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [], 'grade_breakdown': {}})
|
||||
)
|
||||
@ddt.data(
|
||||
*itertools.product(
|
||||
@@ -1381,7 +1381,7 @@ class ProgressPageTests(ModuleStoreTestCase):
|
||||
@patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': True})
|
||||
@patch(
|
||||
'lms.djangoapps.grades.new.course_grade.CourseGrade.summary',
|
||||
PropertyMock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [], 'grade_breakdown': []})
|
||||
PropertyMock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [], 'grade_breakdown': {}})
|
||||
)
|
||||
def test_page_with_invalidated_certificate_with_html_view(self):
|
||||
"""
|
||||
@@ -1415,7 +1415,7 @@ class ProgressPageTests(ModuleStoreTestCase):
|
||||
|
||||
@patch(
|
||||
'lms.djangoapps.grades.new.course_grade.CourseGrade.summary',
|
||||
PropertyMock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [], 'grade_breakdown': []})
|
||||
PropertyMock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [], 'grade_breakdown': {}})
|
||||
)
|
||||
def test_page_with_invalidated_certificate_with_pdf(self):
|
||||
"""
|
||||
@@ -1432,7 +1432,7 @@ class ProgressPageTests(ModuleStoreTestCase):
|
||||
|
||||
@patch(
|
||||
'lms.djangoapps.grades.new.course_grade.CourseGrade.summary',
|
||||
PropertyMock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [], 'grade_breakdown': []})
|
||||
PropertyMock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [], 'grade_breakdown': {}})
|
||||
)
|
||||
def test_message_for_audit_mode(self):
|
||||
""" Verify that message appears on progress page, if learner is enrolled
|
||||
|
||||
@@ -723,7 +723,7 @@ def _progress(request, course_key, student_id):
|
||||
# additional DB lookup (this kills the Progress page in particular).
|
||||
student = User.objects.prefetch_related("groups").get(id=student.id)
|
||||
|
||||
course_grade = CourseGradeFactory(student).create(course)
|
||||
course_grade = CourseGradeFactory().create(student, course)
|
||||
courseware_summary = course_grade.chapter_grades
|
||||
grade_summary = course_grade.summary
|
||||
|
||||
@@ -1127,7 +1127,7 @@ def is_course_passed(course, grade_summary=None, student=None, request=None):
|
||||
success_cutoff = min(nonzero_cutoffs) if nonzero_cutoffs else None
|
||||
|
||||
if grade_summary is None:
|
||||
grade_summary = CourseGradeFactory(student).create(course).summary
|
||||
grade_summary = CourseGradeFactory().create(student, course).summary
|
||||
|
||||
return success_cutoff and grade_summary['percent'] >= success_cutoff
|
||||
|
||||
|
||||
@@ -148,7 +148,7 @@ class UserGradeView(GradeViewMixin, GenericAPIView):
|
||||
return course
|
||||
|
||||
prep_course_for_grading(course, request)
|
||||
course_grade = CourseGradeFactory(request.user).create(course)
|
||||
course_grade = CourseGradeFactory().create(request.user, course)
|
||||
|
||||
return Response([{
|
||||
'username': username,
|
||||
|
||||
@@ -1,36 +1,33 @@
|
||||
"""
|
||||
Grading Context
|
||||
"""
|
||||
from collections import defaultdict
|
||||
from collections import OrderedDict
|
||||
from openedx.core.djangoapps.content.block_structure.api import get_course_in_cache
|
||||
|
||||
from .scores import possibly_scored
|
||||
|
||||
|
||||
def grading_context_for_course(course):
|
||||
def grading_context_for_course(course_key):
|
||||
"""
|
||||
Same as grading_context, but takes in a course object.
|
||||
"""
|
||||
course_structure = get_course_in_cache(course.id)
|
||||
course_structure = get_course_in_cache(course_key)
|
||||
return grading_context(course_structure)
|
||||
|
||||
|
||||
def grading_context(course_structure):
|
||||
"""
|
||||
This returns a dictionary with keys necessary for quickly grading
|
||||
a student. They are used by grades.grade()
|
||||
a student.
|
||||
|
||||
The grading context has two keys:
|
||||
graded_sections - This contains the sections that are graded, as
|
||||
well as all possible children modules that can affect the
|
||||
grading. This allows some sections to be skipped if the student
|
||||
hasn't seen any part of it.
|
||||
all_graded_subsections_by_type - This contains all subsections that are
|
||||
graded, keyed by subsection format (assignment type).
|
||||
|
||||
The format is a dictionary keyed by section-type. The values are
|
||||
arrays of dictionaries containing
|
||||
"section_block" : The section block
|
||||
"scored_descendant_keys" : An array of usage keys for blocks
|
||||
could possibly be in the section, for any student
|
||||
The values are arrays of dictionaries containing
|
||||
"subsection_block" : The subsection block
|
||||
"scored_descendants" : An array of usage keys for blocks
|
||||
that could possibly be in the subsection, for any student
|
||||
|
||||
all_graded_blocks - This contains a list of all blocks that can
|
||||
affect grading a student. This is used to efficiently fetch
|
||||
@@ -39,34 +36,36 @@ def grading_context(course_structure):
|
||||
|
||||
"""
|
||||
all_graded_blocks = []
|
||||
all_graded_sections = defaultdict(list)
|
||||
all_graded_subsections_by_type = OrderedDict()
|
||||
|
||||
for chapter_key in course_structure.get_children(course_structure.root_block_usage_key):
|
||||
for section_key in course_structure.get_children(chapter_key):
|
||||
section = course_structure[section_key]
|
||||
scored_descendants_of_section = [section]
|
||||
if section.graded:
|
||||
for subsection_key in course_structure.get_children(chapter_key):
|
||||
subsection = course_structure[subsection_key]
|
||||
scored_descendants_of_subsection = []
|
||||
if subsection.graded:
|
||||
for descendant_key in course_structure.post_order_traversal(
|
||||
filter_func=possibly_scored,
|
||||
start_node=section_key,
|
||||
start_node=subsection_key,
|
||||
):
|
||||
scored_descendants_of_section.append(
|
||||
scored_descendants_of_subsection.append(
|
||||
course_structure[descendant_key],
|
||||
)
|
||||
|
||||
# include only those blocks that have scores, not if they are just a parent
|
||||
section_info = {
|
||||
'section_block': section,
|
||||
subsection_info = {
|
||||
'subsection_block': subsection,
|
||||
'scored_descendants': [
|
||||
child for child in scored_descendants_of_section
|
||||
child for child in scored_descendants_of_subsection
|
||||
if getattr(child, 'has_score', None)
|
||||
]
|
||||
}
|
||||
section_format = getattr(section, 'format', '')
|
||||
all_graded_sections[section_format].append(section_info)
|
||||
all_graded_blocks.extend(scored_descendants_of_section)
|
||||
subsection_format = getattr(subsection, 'format', '')
|
||||
if subsection_format not in all_graded_subsections_by_type:
|
||||
all_graded_subsections_by_type[subsection_format] = []
|
||||
all_graded_subsections_by_type[subsection_format].append(subsection_info)
|
||||
all_graded_blocks.extend(scored_descendants_of_subsection)
|
||||
|
||||
return {
|
||||
'all_graded_sections': all_graded_sections,
|
||||
'all_graded_subsections_by_type': all_graded_subsections_by_type,
|
||||
'all_graded_blocks': all_graded_blocks,
|
||||
}
|
||||
|
||||
@@ -1,69 +0,0 @@
|
||||
"""
|
||||
Functionality for course-level grades.
|
||||
"""
|
||||
from collections import namedtuple
|
||||
from logging import getLogger
|
||||
|
||||
import dogstats_wrapper as dog_stats_api
|
||||
|
||||
from opaque_keys.edx.keys import CourseKey
|
||||
from courseware.courses import get_course_by_id
|
||||
from .new.course_grade import CourseGradeFactory
|
||||
|
||||
|
||||
log = getLogger(__name__)
|
||||
|
||||
|
||||
GradeResult = namedtuple('GradeResult', ['student', 'gradeset', 'err_msg'])
|
||||
|
||||
|
||||
def iterate_grades_for(course_or_id, students):
|
||||
"""
|
||||
Given a course_id and an iterable of students (User), yield a GradeResult
|
||||
for every student enrolled in the course. GradeResult is a named tuple of:
|
||||
|
||||
(student, gradeset, err_msg)
|
||||
|
||||
If an error occurred, gradeset will be an empty dict and err_msg will be an
|
||||
exception message. If there was no error, err_msg is an empty string.
|
||||
|
||||
The gradeset is a dictionary with the following fields:
|
||||
|
||||
- grade : A final letter grade.
|
||||
- percent : The final percent for the class (rounded up).
|
||||
- section_breakdown : A breakdown of each section that makes
|
||||
up the grade. (For display)
|
||||
- grade_breakdown : A breakdown of the major components that
|
||||
make up the final grade. (For display)
|
||||
- raw_scores: contains scores for every graded module
|
||||
"""
|
||||
if isinstance(course_or_id, (basestring, CourseKey)):
|
||||
course = get_course_by_id(course_or_id)
|
||||
else:
|
||||
course = course_or_id
|
||||
|
||||
for student in students:
|
||||
with dog_stats_api.timer('lms.grades.iterate_grades_for', tags=[u'action:{}'.format(course.id)]):
|
||||
try:
|
||||
gradeset = summary(student, course)
|
||||
yield GradeResult(student, gradeset, "")
|
||||
except Exception as exc: # pylint: disable=broad-except
|
||||
# Keep marching on even if this student couldn't be graded for
|
||||
# some reason, but log it for future reference.
|
||||
log.exception(
|
||||
'Cannot grade student %s (%s) in course %s because of exception: %s',
|
||||
student.username,
|
||||
student.id,
|
||||
course.id,
|
||||
exc.message
|
||||
)
|
||||
yield GradeResult(student, {}, exc.message)
|
||||
|
||||
|
||||
def summary(student, course):
|
||||
"""
|
||||
Returns the grade summary of the student for the given course.
|
||||
|
||||
Also sends a signal to update the minimum grade requirement status.
|
||||
"""
|
||||
return CourseGradeFactory(student).create(course).summary
|
||||
@@ -7,7 +7,7 @@ from django.core.management.base import BaseCommand, CommandError
|
||||
import os
|
||||
from lms.djangoapps.courseware import courses
|
||||
from lms.djangoapps.certificates.models import GeneratedCertificate
|
||||
from lms.djangoapps.grades import course_grades
|
||||
from lms.djangoapps.grades.new.course_grade import CourseGradeFactory
|
||||
from opaque_keys import InvalidKeyError
|
||||
from opaque_keys.edx.keys import CourseKey
|
||||
from opaque_keys.edx.locations import SlashSeparatedCourseKey
|
||||
@@ -124,18 +124,18 @@ class Command(BaseCommand):
|
||||
count, total, hours, minutes)
|
||||
start = datetime.datetime.now()
|
||||
request.user = student
|
||||
grade = course_grades.summary(student, course)
|
||||
grade = CourseGradeFactory().create(student, course)
|
||||
if not header:
|
||||
header = [section['label'] for section in grade[u'section_breakdown']]
|
||||
header = [section['label'] for section in grade.summary[u'section_breakdown']]
|
||||
rows.append(["email", "username", "certificate-grade", "grade"] + header)
|
||||
percents = {section['label']: section['percent'] for section in grade[u'section_breakdown']}
|
||||
percents = {section['label']: section['percent'] for section in grade.summary[u'section_breakdown']}
|
||||
row_percents = [percents[label] for label in header]
|
||||
if student.username in cert_grades:
|
||||
rows.append(
|
||||
[student.email, student.username, cert_grades[student.username], grade['percent']] + row_percents,
|
||||
[student.email, student.username, cert_grades[student.username], grade.percent] + row_percents,
|
||||
)
|
||||
else:
|
||||
rows.append([student.email, student.username, "N/A", grade['percent']] + row_percents)
|
||||
rows.append([student.email, student.username, "N/A", grade.percent] + row_percents)
|
||||
with open(options['output'], 'wb') as f:
|
||||
writer = csv.writer(f)
|
||||
writer.writerows(rows)
|
||||
|
||||
@@ -2,16 +2,17 @@
|
||||
CourseGrade Class
|
||||
"""
|
||||
|
||||
from collections import defaultdict
|
||||
from collections import defaultdict, namedtuple, OrderedDict
|
||||
from logging import getLogger
|
||||
|
||||
from django.conf import settings
|
||||
from django.core.exceptions import PermissionDenied
|
||||
import dogstats_wrapper as dog_stats_api
|
||||
from lazy import lazy
|
||||
|
||||
from lms.djangoapps.course_blocks.api import get_course_blocks
|
||||
from lms.djangoapps.grades.config.models import PersistentGradesEnabledFlag
|
||||
from openedx.core.djangoapps.signals.signals import GRADES_UPDATED
|
||||
from openedx.core.djangoapps.signals.signals import COURSE_GRADE_CHANGED
|
||||
from xmodule import block_metadata_utils
|
||||
|
||||
from ..models import PersistentCourseGrade
|
||||
@@ -37,18 +38,18 @@ class CourseGrade(object):
|
||||
self._subsection_grade_factory = SubsectionGradeFactory(self.student, self.course, self.course_structure)
|
||||
|
||||
@lazy
|
||||
def subsection_grade_totals_by_format(self):
|
||||
def graded_subsections_by_format(self):
|
||||
"""
|
||||
Returns grades for the subsections in the course in
|
||||
a dict keyed by subsection format types.
|
||||
"""
|
||||
subsections_by_format = defaultdict(list)
|
||||
subsections_by_format = defaultdict(OrderedDict)
|
||||
for chapter in self.chapter_grades:
|
||||
for subsection_grade in chapter['sections']:
|
||||
if subsection_grade.graded:
|
||||
graded_total = subsection_grade.graded_total
|
||||
if graded_total.possible > 0:
|
||||
subsections_by_format[subsection_grade.format].append(graded_total)
|
||||
subsections_by_format[subsection_grade.format][subsection_grade.location] = subsection_grade
|
||||
return subsections_by_format
|
||||
|
||||
@lazy
|
||||
@@ -70,7 +71,7 @@ class CourseGrade(object):
|
||||
# Grading policy might be overriden by a CCX, need to reset it
|
||||
self.course.set_grading_policy(self.course.grading_policy)
|
||||
grade_value = self.course.grader.grade(
|
||||
self.subsection_grade_totals_by_format,
|
||||
self.graded_subsections_by_format,
|
||||
generate_random_scores=settings.GENERATE_PROFILE_SCORES
|
||||
)
|
||||
# can't use the existing properties due to recursion issues caused by referencing self.grade_value
|
||||
@@ -137,8 +138,6 @@ class CourseGrade(object):
|
||||
grade_summary = self.grade_value
|
||||
grade_summary['percent'] = self.percent
|
||||
grade_summary['grade'] = self.letter_grade
|
||||
grade_summary['totaled_scores'] = self.subsection_grade_totals_by_format
|
||||
grade_summary['raw_scores'] = list(self.locations_to_scores.itervalues())
|
||||
|
||||
return grade_summary
|
||||
|
||||
@@ -150,7 +149,7 @@ class CourseGrade(object):
|
||||
"""
|
||||
subsections_total = sum(len(chapter['sections']) for chapter in self.chapter_grades)
|
||||
|
||||
total_graded_subsections = sum(len(x) for x in self.subsection_grade_totals_by_format.itervalues())
|
||||
total_graded_subsections = sum(len(x) for x in self.graded_subsections_by_format.itervalues())
|
||||
subsections_created = len(self._subsection_grade_factory._unsaved_subsection_grades) # pylint: disable=protected-access
|
||||
subsections_read = subsections_total - subsections_created
|
||||
blocks_total = len(self.locations_to_scores)
|
||||
@@ -295,10 +294,10 @@ class CourseGrade(object):
|
||||
"""
|
||||
Signal all listeners when grades are computed.
|
||||
"""
|
||||
responses = GRADES_UPDATED.send_robust(
|
||||
responses = COURSE_GRADE_CHANGED.send_robust(
|
||||
sender=None,
|
||||
user=self.student,
|
||||
grade_summary=self.summary,
|
||||
course_grade=self,
|
||||
course_key=self.course.id,
|
||||
deadline=self.course.end
|
||||
)
|
||||
@@ -324,32 +323,60 @@ class CourseGradeFactory(object):
|
||||
"""
|
||||
Factory class to create Course Grade objects
|
||||
"""
|
||||
def __init__(self, student):
|
||||
self.student = student
|
||||
|
||||
def create(self, course, read_only=True):
|
||||
def create(self, student, course, read_only=True):
|
||||
"""
|
||||
Returns the CourseGrade object for the given student and course.
|
||||
|
||||
If read_only is True, doesn't save any updates to the grades.
|
||||
Raises a PermissionDenied if the user does not have course access.
|
||||
"""
|
||||
course_structure = get_course_blocks(self.student, course.location)
|
||||
course_structure = get_course_blocks(student, course.location)
|
||||
# if user does not have access to this course, throw an exception
|
||||
if not self._user_has_access_to_course(course_structure):
|
||||
raise PermissionDenied("User does not have access to this course")
|
||||
return (
|
||||
self._get_saved_grade(course, course_structure) or
|
||||
self._compute_and_update_grade(course, course_structure, read_only)
|
||||
self._get_saved_grade(student, course, course_structure) or
|
||||
self._compute_and_update_grade(student, course, course_structure, read_only)
|
||||
)
|
||||
|
||||
def update(self, course, course_structure):
|
||||
GradeResult = namedtuple('GradeResult', ['student', 'course_grade', 'err_msg'])
|
||||
|
||||
def iter(self, course, students):
|
||||
"""
|
||||
Given a course and an iterable of students (User), yield a GradeResult
|
||||
for every student enrolled in the course. GradeResult is a named tuple of:
|
||||
|
||||
(student, course_grade, err_msg)
|
||||
|
||||
If an error occurred, course_grade will be None and err_msg will be an
|
||||
exception message. If there was no error, err_msg is an empty string.
|
||||
"""
|
||||
for student in students:
|
||||
with dog_stats_api.timer('lms.grades.CourseGradeFactory.iter', tags=[u'action:{}'.format(course.id)]):
|
||||
|
||||
try:
|
||||
course_grade = CourseGradeFactory().create(student, course)
|
||||
yield self.GradeResult(student, course_grade, "")
|
||||
|
||||
except Exception as exc: # pylint: disable=broad-except
|
||||
# Keep marching on even if this student couldn't be graded for
|
||||
# some reason, but log it for future reference.
|
||||
log.exception(
|
||||
'Cannot grade student %s (%s) in course %s because of exception: %s',
|
||||
student.username,
|
||||
student.id,
|
||||
course.id,
|
||||
exc.message
|
||||
)
|
||||
yield self.GradeResult(student, None, exc.message)
|
||||
|
||||
def update(self, student, course, course_structure):
|
||||
"""
|
||||
Updates the CourseGrade for this Factory's student.
|
||||
"""
|
||||
self._compute_and_update_grade(course, course_structure)
|
||||
self._compute_and_update_grade(student, course, course_structure)
|
||||
|
||||
def get_persisted(self, course):
|
||||
def get_persisted(self, student, course):
|
||||
"""
|
||||
Returns the saved grade for the given course and student,
|
||||
irrespective of whether the saved grade is up-to-date.
|
||||
@@ -357,9 +384,9 @@ class CourseGradeFactory(object):
|
||||
if not PersistentGradesEnabledFlag.feature_enabled(course.id):
|
||||
return None
|
||||
|
||||
return CourseGrade.get_persisted_grade(self.student, course)
|
||||
return CourseGrade.get_persisted_grade(student, course)
|
||||
|
||||
def _get_saved_grade(self, course, course_structure):
|
||||
def _get_saved_grade(self, student, course, course_structure):
|
||||
"""
|
||||
Returns the saved grade for the given course and student.
|
||||
"""
|
||||
@@ -367,18 +394,18 @@ class CourseGradeFactory(object):
|
||||
return None
|
||||
|
||||
return CourseGrade.load_persisted_grade(
|
||||
self.student,
|
||||
student,
|
||||
course,
|
||||
course_structure
|
||||
)
|
||||
|
||||
def _compute_and_update_grade(self, course, course_structure, read_only=False):
|
||||
def _compute_and_update_grade(self, student, course, course_structure, read_only=False):
|
||||
"""
|
||||
Freshly computes and updates the grade for the student and course.
|
||||
|
||||
If read_only is True, doesn't save any updates to the grades.
|
||||
"""
|
||||
course_grade = CourseGrade(self.student, course, course_structure)
|
||||
course_grade = CourseGrade(student, course, course_structure)
|
||||
course_grade.compute_and_update(read_only)
|
||||
return course_grade
|
||||
|
||||
|
||||
@@ -68,7 +68,7 @@ class SubsectionGrade(object):
|
||||
):
|
||||
self._compute_block_score(descendant_key, course_structure, submissions_scores, csm_scores)
|
||||
|
||||
self.all_total, self.graded_total = graders.aggregate_scores(self.scores, self.display_name, self.location)
|
||||
self.all_total, self.graded_total = graders.aggregate_scores(self.scores)
|
||||
self._log_event(log.debug, u"init_from_structure", student)
|
||||
return self
|
||||
|
||||
@@ -83,16 +83,12 @@ class SubsectionGrade(object):
|
||||
tw_earned=model.earned_graded,
|
||||
tw_possible=model.possible_graded,
|
||||
graded=True,
|
||||
display_name=self.display_name,
|
||||
module_id=self.location,
|
||||
attempted=model.first_attempted is not None,
|
||||
)
|
||||
self.all_total = AggregatedScore(
|
||||
tw_earned=model.earned_all,
|
||||
tw_possible=model.possible_all,
|
||||
graded=False,
|
||||
display_name=self.display_name,
|
||||
module_id=self.location,
|
||||
attempted=model.first_attempted is not None,
|
||||
)
|
||||
self._log_event(log.debug, u"init_from_model", student)
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
"""
|
||||
Progress Summary of a learner's course grades.
|
||||
"""
|
||||
from .new.course_grade import CourseGradeFactory
|
||||
|
||||
|
||||
def summary(student, course):
|
||||
"""
|
||||
Returns the CourseGrade for the given course and student.
|
||||
"""
|
||||
return CourseGradeFactory(student).create(course)
|
||||
@@ -122,8 +122,6 @@ def get_score(submissions_scores, csm_scores, persisted_block, block):
|
||||
weighted_possible,
|
||||
weight,
|
||||
graded,
|
||||
display_name=display_name_with_default_escaped(block),
|
||||
module_id=block.location,
|
||||
attempted=attempted,
|
||||
)
|
||||
|
||||
|
||||
@@ -182,4 +182,4 @@ def recalculate_course_grade(sender, course, course_structure, user, **kwargs):
|
||||
"""
|
||||
Updates a saved course grade.
|
||||
"""
|
||||
CourseGradeFactory(user).update(course, course_structure)
|
||||
CourseGradeFactory().update(user, course, course_structure)
|
||||
|
||||
@@ -3,11 +3,9 @@ Test grade calculation.
|
||||
"""
|
||||
|
||||
import ddt
|
||||
from django.http import Http404
|
||||
import itertools
|
||||
from mock import patch
|
||||
from nose.plugins.attrib import attr
|
||||
from opaque_keys.edx.locations import SlashSeparatedCourseKey
|
||||
|
||||
from capa.tests.response_xml_factory import MultipleChoiceResponseXMLFactory
|
||||
from courseware.model_data import set_score
|
||||
@@ -17,49 +15,21 @@ from lms.djangoapps.course_blocks.api import get_course_blocks
|
||||
from openedx.core.djangolib.testing.utils import get_mock_request
|
||||
from student.tests.factories import UserFactory
|
||||
from student.models import CourseEnrollment
|
||||
from xmodule.block_metadata_utils import display_name_with_default_escaped
|
||||
from xmodule.graders import ProblemScore
|
||||
from xmodule.modulestore import ModuleStoreEnum
|
||||
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
|
||||
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
|
||||
|
||||
from .utils import answer_problem
|
||||
from .. import course_grades
|
||||
from ..course_grades import summary as grades_summary
|
||||
from ..module_grades import get_module_score
|
||||
from ..new.course_grade import CourseGradeFactory
|
||||
from ..new.subsection_grade import SubsectionGradeFactory
|
||||
|
||||
|
||||
def _grade_with_errors(student, course):
|
||||
"""This fake grade method will throw exceptions for student3 and
|
||||
student4, but allow any other students to go through normal grading.
|
||||
|
||||
It's meant to simulate when something goes really wrong while trying to
|
||||
grade a particular student, so we can test that we won't kill the entire
|
||||
course grading run.
|
||||
"""
|
||||
if student.username in ['student3', 'student4']:
|
||||
raise Exception("I don't like {}".format(student.username))
|
||||
|
||||
return grades_summary(student, course)
|
||||
|
||||
|
||||
def _create_problem_xml():
|
||||
"""
|
||||
Creates and returns XML for a multiple choice response problem
|
||||
"""
|
||||
return MultipleChoiceResponseXMLFactory().build_xml(
|
||||
question_text='The correct answer is Choice 3',
|
||||
choices=[False, False, True, False],
|
||||
choice_names=['choice_0', 'choice_1', 'choice_2', 'choice_3']
|
||||
)
|
||||
|
||||
|
||||
@attr(shard=1)
|
||||
class TestGradeIteration(SharedModuleStoreTestCase):
|
||||
"""
|
||||
Test iteration through student gradesets.
|
||||
Test iteration through student course grades.
|
||||
"""
|
||||
COURSE_NUM = "1000"
|
||||
COURSE_NAME = "grading_test_course"
|
||||
@@ -87,29 +57,25 @@ class TestGradeIteration(SharedModuleStoreTestCase):
|
||||
]
|
||||
|
||||
def test_empty_student_list(self):
|
||||
"""If we don't pass in any students, it should return a zero-length
|
||||
iterator, but it shouldn't error."""
|
||||
gradeset_results = list(course_grades.iterate_grades_for(self.course.id, []))
|
||||
self.assertEqual(gradeset_results, [])
|
||||
|
||||
def test_nonexistent_course(self):
|
||||
"""If the course we want to get grades for does not exist, a `Http404`
|
||||
should be raised. This is a horrible crossing of abstraction boundaries
|
||||
and should be fixed, but for now we're just testing the behavior. :-("""
|
||||
with self.assertRaises(Http404):
|
||||
gradeset_results = course_grades.iterate_grades_for(SlashSeparatedCourseKey("I", "dont", "exist"), [])
|
||||
gradeset_results.next()
|
||||
"""
|
||||
If we don't pass in any students, it should return a zero-length
|
||||
iterator, but it shouldn't error.
|
||||
"""
|
||||
grade_results = list(CourseGradeFactory().iter(self.course, []))
|
||||
self.assertEqual(grade_results, [])
|
||||
|
||||
def test_all_empty_grades(self):
|
||||
"""No students have grade entries"""
|
||||
all_gradesets, all_errors = self._gradesets_and_errors_for(self.course.id, self.students)
|
||||
"""
|
||||
No students have grade entries.
|
||||
"""
|
||||
all_course_grades, all_errors = self._course_grades_and_errors_for(self.course, self.students)
|
||||
self.assertEqual(len(all_errors), 0)
|
||||
for gradeset in all_gradesets.values():
|
||||
self.assertIsNone(gradeset['grade'])
|
||||
self.assertEqual(gradeset['percent'], 0.0)
|
||||
for course_grade in all_course_grades.values():
|
||||
self.assertIsNone(course_grade.letter_grade)
|
||||
self.assertEqual(course_grade.percent, 0.0)
|
||||
|
||||
@patch('lms.djangoapps.grades.course_grades.summary', _grade_with_errors)
|
||||
def test_grading_exception(self):
|
||||
@patch('lms.djangoapps.grades.new.course_grade.CourseGradeFactory.create')
|
||||
def test_grading_exception(self, mock_course_grade):
|
||||
"""Test that we correctly capture exception messages that bubble up from
|
||||
grading. Note that we only see errors at this level if the grading
|
||||
process for this student fails entirely due to an unexpected event --
|
||||
@@ -118,43 +84,51 @@ class TestGradeIteration(SharedModuleStoreTestCase):
|
||||
We patch the grade() method with our own, which will generate the errors
|
||||
for student3 and student4.
|
||||
"""
|
||||
all_gradesets, all_errors = self._gradesets_and_errors_for(self.course.id, self.students)
|
||||
|
||||
student1, student2, student3, student4, student5 = self.students
|
||||
mock_course_grade.side_effect = [
|
||||
Exception("Error for {}.".format(student.username))
|
||||
if student.username in ['student3', 'student4']
|
||||
else mock_course_grade.return_value
|
||||
for student in self.students
|
||||
]
|
||||
all_course_grades, all_errors = self._course_grades_and_errors_for(self.course, self.students)
|
||||
self.assertEqual(
|
||||
all_errors,
|
||||
{
|
||||
student3: "I don't like student3",
|
||||
student4: "I don't like student4"
|
||||
student3: "Error for student3.",
|
||||
student4: "Error for student4.",
|
||||
}
|
||||
)
|
||||
|
||||
# But we should still have five gradesets
|
||||
self.assertEqual(len(all_gradesets), 5)
|
||||
self.assertEqual(len(all_course_grades), 5)
|
||||
|
||||
# Even though two will simply be empty
|
||||
self.assertFalse(all_gradesets[student3])
|
||||
self.assertFalse(all_gradesets[student4])
|
||||
self.assertIsNone(all_course_grades[student3])
|
||||
self.assertIsNone(all_course_grades[student4])
|
||||
|
||||
# The rest will have grade information in them
|
||||
self.assertTrue(all_gradesets[student1])
|
||||
self.assertTrue(all_gradesets[student2])
|
||||
self.assertTrue(all_gradesets[student5])
|
||||
self.assertIsNotNone(all_course_grades[student1])
|
||||
self.assertIsNotNone(all_course_grades[student2])
|
||||
self.assertIsNotNone(all_course_grades[student5])
|
||||
|
||||
################################# Helpers #################################
|
||||
def _gradesets_and_errors_for(self, course_id, students):
|
||||
"""Simple helper method to iterate through student grades and give us
|
||||
def _course_grades_and_errors_for(self, course, students):
|
||||
"""
|
||||
Simple helper method to iterate through student grades and give us
|
||||
two dictionaries -- one that has all students and their respective
|
||||
gradesets, and one that has only students that could not be graded and
|
||||
their respective error messages."""
|
||||
students_to_gradesets = {}
|
||||
course grades, and one that has only students that could not be graded
|
||||
and their respective error messages.
|
||||
"""
|
||||
students_to_course_grades = {}
|
||||
students_to_errors = {}
|
||||
|
||||
for student, gradeset, err_msg in course_grades.iterate_grades_for(course_id, students):
|
||||
students_to_gradesets[student] = gradeset
|
||||
for student, course_grade, err_msg in CourseGradeFactory().iter(course, students):
|
||||
students_to_course_grades[student] = course_grade
|
||||
if err_msg:
|
||||
students_to_errors[student] = err_msg
|
||||
|
||||
return students_to_gradesets, students_to_errors
|
||||
return students_to_course_grades, students_to_errors
|
||||
|
||||
|
||||
@ddt.ddt
|
||||
@@ -169,7 +143,7 @@ class TestWeightedProblems(SharedModuleStoreTestCase):
|
||||
cls.chapter = ItemFactory.create(parent=cls.course, category="chapter", display_name="chapter")
|
||||
cls.sequential = ItemFactory.create(parent=cls.chapter, category="sequential", display_name="sequential")
|
||||
cls.vertical = ItemFactory.create(parent=cls.sequential, category="vertical", display_name="vertical1")
|
||||
problem_xml = _create_problem_xml()
|
||||
problem_xml = cls._create_problem_xml()
|
||||
cls.problems = []
|
||||
for i in range(2):
|
||||
cls.problems.append(
|
||||
@@ -186,6 +160,17 @@ class TestWeightedProblems(SharedModuleStoreTestCase):
|
||||
self.user = UserFactory()
|
||||
self.request = get_mock_request(self.user)
|
||||
|
||||
@classmethod
|
||||
def _create_problem_xml(cls):
|
||||
"""
|
||||
Creates and returns XML for a multiple choice response problem
|
||||
"""
|
||||
return MultipleChoiceResponseXMLFactory().build_xml(
|
||||
question_text='The correct answer is Choice 3',
|
||||
choices=[False, False, True, False],
|
||||
choice_names=['choice_0', 'choice_1', 'choice_2', 'choice_3']
|
||||
)
|
||||
|
||||
def _verify_grades(self, raw_earned, raw_possible, weight, expected_score):
|
||||
"""
|
||||
Verifies the computed grades are as expected.
|
||||
@@ -211,8 +196,6 @@ class TestWeightedProblems(SharedModuleStoreTestCase):
|
||||
# verify all problem grades
|
||||
for problem in self.problems:
|
||||
problem_score = subsection_grade.locations_to_scores[problem.location]
|
||||
expected_score.display_name = display_name_with_default_escaped(problem)
|
||||
expected_score.module_id = problem.location
|
||||
self.assertEquals(problem_score, expected_score)
|
||||
|
||||
# verify subsection grades
|
||||
@@ -246,8 +229,6 @@ class TestWeightedProblems(SharedModuleStoreTestCase):
|
||||
weighted_possible=expected_w_possible,
|
||||
weight=weight,
|
||||
graded=expected_graded,
|
||||
display_name=None, # problem-specific, filled in by _verify_grades
|
||||
module_id=None, # problem-specific, filled in by _verify_grades
|
||||
attempted=True,
|
||||
)
|
||||
self._verify_grades(raw_earned, raw_possible, weight, expected_score)
|
||||
@@ -296,7 +277,7 @@ class TestScoreForModule(SharedModuleStoreTestCase):
|
||||
answer_problem(cls.course, cls.request, cls.l, score=1, max_value=3)
|
||||
answer_problem(cls.course, cls.request, cls.n, score=3, max_value=10)
|
||||
|
||||
cls.course_grade = CourseGradeFactory(cls.request.user).create(cls.course)
|
||||
cls.course_grade = CourseGradeFactory().create(cls.request.user, cls.course)
|
||||
|
||||
def test_score_chapter(self):
|
||||
earned, possible = self.course_grade.score_for_module(self.a.location)
|
||||
|
||||
@@ -111,7 +111,7 @@ class TestCourseGradeFactory(GradeTestBase):
|
||||
def test_course_grade_feature_gating(self, feature_flag, course_setting):
|
||||
# Grades are only saved if the feature flag and the advanced setting are
|
||||
# both set to True.
|
||||
grade_factory = CourseGradeFactory(self.request.user)
|
||||
grade_factory = CourseGradeFactory()
|
||||
with persistent_grades_feature_flags(
|
||||
global_flag=feature_flag,
|
||||
enabled_for_all_courses=False,
|
||||
@@ -119,32 +119,32 @@ class TestCourseGradeFactory(GradeTestBase):
|
||||
enabled_for_course=course_setting
|
||||
):
|
||||
with patch('lms.djangoapps.grades.new.course_grade.CourseGrade.load_persisted_grade') as mock_save_grades:
|
||||
grade_factory.create(self.course)
|
||||
grade_factory.create(self.request.user, self.course)
|
||||
self.assertEqual(mock_save_grades.called, feature_flag and course_setting)
|
||||
|
||||
def test_course_grade_creation(self):
|
||||
grade_factory = CourseGradeFactory(self.request.user)
|
||||
grade_factory = CourseGradeFactory()
|
||||
with mock_get_score(1, 2):
|
||||
course_grade = grade_factory.create(self.course)
|
||||
course_grade = grade_factory.create(self.request.user, self.course)
|
||||
self.assertEqual(course_grade.letter_grade, u'Pass')
|
||||
self.assertEqual(course_grade.percent, 0.5)
|
||||
|
||||
def test_zero_course_grade(self):
|
||||
grade_factory = CourseGradeFactory(self.request.user)
|
||||
grade_factory = CourseGradeFactory()
|
||||
with mock_get_score(0, 2):
|
||||
course_grade = grade_factory.create(self.course)
|
||||
course_grade = grade_factory.create(self.request.user, self.course)
|
||||
self.assertIsNone(course_grade.letter_grade)
|
||||
self.assertEqual(course_grade.percent, 0.0)
|
||||
|
||||
def test_get_persisted(self):
|
||||
grade_factory = CourseGradeFactory(self.request.user)
|
||||
grade_factory = CourseGradeFactory()
|
||||
# first, create a grade in the database
|
||||
with mock_get_score(1, 2):
|
||||
grade_factory.create(self.course, read_only=False)
|
||||
grade_factory.create(self.request.user, self.course, read_only=False)
|
||||
|
||||
# retrieve the grade, ensuring it is as expected and take just one query
|
||||
with self.assertNumQueries(1):
|
||||
course_grade = grade_factory.get_persisted(self.course)
|
||||
course_grade = grade_factory.get_persisted(self.request.user, self.course)
|
||||
self.assertEqual(course_grade.letter_grade, u'Pass')
|
||||
self.assertEqual(course_grade.percent, 0.5)
|
||||
|
||||
@@ -168,7 +168,7 @@ class TestCourseGradeFactory(GradeTestBase):
|
||||
# ensure the grade can still be retrieved via get_persisted
|
||||
# despite its outdated grading policy
|
||||
with self.assertNumQueries(1):
|
||||
course_grade = grade_factory.get_persisted(self.course)
|
||||
course_grade = grade_factory.get_persisted(self.request.user, self.course)
|
||||
self.assertEqual(course_grade.letter_grade, u'Pass')
|
||||
self.assertEqual(course_grade.percent, 0.5)
|
||||
|
||||
@@ -587,7 +587,7 @@ class TestCourseGradeLogging(ProblemSubmissionTestMixin, SharedModuleStoreTestCa
|
||||
Creates a course grade and asserts that the associated logging
|
||||
matches the expected totals passed in to the function.
|
||||
"""
|
||||
factory.create(self.course, read_only=False)
|
||||
factory.create(self.request.user, self.course, read_only=False)
|
||||
log_mock.assert_called_with(
|
||||
u"Persistent Grades: CourseGrade.{0}, course: {1}, user: {2}".format(
|
||||
log_statement,
|
||||
@@ -597,7 +597,7 @@ class TestCourseGradeLogging(ProblemSubmissionTestMixin, SharedModuleStoreTestCa
|
||||
)
|
||||
|
||||
def test_course_grade_logging(self):
|
||||
grade_factory = CourseGradeFactory(self.request.user)
|
||||
grade_factory = CourseGradeFactory()
|
||||
with persistent_grades_feature_flags(
|
||||
global_flag=True,
|
||||
enabled_for_all_courses=False,
|
||||
|
||||
@@ -175,9 +175,7 @@ class TestGetScore(TestCase):
|
||||
self._create_persisted_block(persisted_block_value),
|
||||
self._create_block(block_value),
|
||||
)
|
||||
expected_score = ProblemScore(
|
||||
display_name=self.display_name, module_id=self.location, **expected_result._asdict()
|
||||
)
|
||||
expected_score = ProblemScore(**expected_result._asdict())
|
||||
self.assertEquals(score, expected_score)
|
||||
|
||||
|
||||
|
||||
@@ -9,13 +9,15 @@ from xmodule.graders import ProblemScore
|
||||
|
||||
|
||||
@contextmanager
|
||||
def mock_passing_grade(grade_pass='Pass', percent=0.75):
|
||||
def mock_passing_grade(grade_pass='Pass', percent=0.75, ):
|
||||
"""
|
||||
Mock the grading function to always return a passing grade.
|
||||
"""
|
||||
with patch('lms.djangoapps.grades.course_grades.summary') as mock_grade:
|
||||
mock_grade.return_value = {'grade': grade_pass, 'percent': percent}
|
||||
yield
|
||||
with patch('lms.djangoapps.grades.new.course_grade.CourseGrade._compute_letter_grade') as mock_letter_grade:
|
||||
with patch('lms.djangoapps.grades.new.course_grade.CourseGrade._calc_percent') as mock_percent_grade:
|
||||
mock_letter_grade.return_value = grade_pass
|
||||
mock_percent_grade.return_value = percent
|
||||
yield
|
||||
|
||||
|
||||
@contextmanager
|
||||
@@ -31,8 +33,6 @@ def mock_get_score(earned=0, possible=1):
|
||||
weighted_possible=possible,
|
||||
weight=1,
|
||||
graded=True,
|
||||
display_name=None,
|
||||
module_id=None,
|
||||
attempted=True,
|
||||
)
|
||||
yield mock_score
|
||||
|
||||
@@ -14,7 +14,7 @@ from opaque_keys.edx.keys import CourseKey
|
||||
from edxmako.shortcuts import render_to_response
|
||||
from courseware.courses import get_course_with_access
|
||||
from lms.djangoapps.instructor.views.api import require_level
|
||||
from lms.djangoapps.grades import course_grades
|
||||
from lms.djangoapps.grades.new.course_grade import CourseGradeFactory
|
||||
from xmodule.modulestore.django import modulestore
|
||||
|
||||
|
||||
@@ -91,7 +91,7 @@ def get_grade_book_page(request, course, course_key):
|
||||
'username': student.username,
|
||||
'id': student.id,
|
||||
'email': student.email,
|
||||
'grade_summary': course_grades.summary(student, course)
|
||||
'grade_summary': CourseGradeFactory().create(student, course).summary
|
||||
}
|
||||
for student in enrolled_students
|
||||
]
|
||||
|
||||
@@ -483,7 +483,7 @@ def dump_grading_context(course):
|
||||
if isinstance(course.grader, xmgraders.WeightedSubsectionsGrader):
|
||||
msg += '\n'
|
||||
msg += "Graded sections:\n"
|
||||
for subgrader, category, weight in course.grader.sections:
|
||||
for subgrader, category, weight in course.grader.subgraders:
|
||||
msg += " subgrader=%s, type=%s, category=%s, weight=%s\n"\
|
||||
% (subgrader.__class__, subgrader.type, category, weight)
|
||||
subgrader.index = 1
|
||||
@@ -491,14 +491,14 @@ def dump_grading_context(course):
|
||||
msg += hbar
|
||||
msg += "Listing grading context for course %s\n" % course.id.to_deprecated_string()
|
||||
|
||||
gcontext = grading_context_for_course(course)
|
||||
gcontext = grading_context_for_course(course.id)
|
||||
msg += "graded sections:\n"
|
||||
|
||||
msg += '%s\n' % gcontext['all_graded_sections'].keys()
|
||||
for (gsomething, gsvals) in gcontext['all_graded_sections'].items():
|
||||
msg += '%s\n' % gcontext['all_graded_subsections_by_type'].keys()
|
||||
for (gsomething, gsvals) in gcontext['all_graded_subsections_by_type'].items():
|
||||
msg += "--> Section %s:\n" % (gsomething)
|
||||
for sec in gsvals:
|
||||
sdesc = sec['section_block']
|
||||
sdesc = sec['subsection_block']
|
||||
frmat = getattr(sdesc, 'format', None)
|
||||
aname = ''
|
||||
if frmat in graders:
|
||||
|
||||
@@ -46,7 +46,8 @@ from certificates.models import (
|
||||
)
|
||||
from certificates.api import generate_user_certificates
|
||||
from courseware.courses import get_course_by_id, get_problems_in_section
|
||||
from lms.djangoapps.grades.course_grades import iterate_grades_for
|
||||
from lms.djangoapps.grades.context import grading_context_for_course
|
||||
from lms.djangoapps.grades.new.course_grade import CourseGradeFactory
|
||||
from courseware.models import StudentModule
|
||||
from courseware.model_data import DjangoKeyValueStore, FieldDataCache
|
||||
from courseware.module_render import get_module_for_descriptor_internal
|
||||
@@ -62,7 +63,6 @@ from lms.djangoapps.instructor_task.models import ReportStore, InstructorTask, P
|
||||
from lms.djangoapps.lms_xblock.runtime import LmsPartitionService
|
||||
from openedx.core.djangoapps.course_groups.cohorts import get_cohort
|
||||
from openedx.core.djangoapps.course_groups.models import CourseUserGroup
|
||||
from openedx.core.djangoapps.content.course_structures.models import CourseStructure
|
||||
from opaque_keys.edx.keys import UsageKey
|
||||
from openedx.core.djangoapps.course_groups.cohorts import add_user_to_cohort, is_course_cohorted
|
||||
from student.models import CourseEnrollment, CourseAccessRole
|
||||
@@ -670,7 +670,8 @@ def upload_grades_csv(_xmodule_instance_args, _entry_id, course_id, _task_input,
|
||||
start_date = datetime.now(UTC)
|
||||
status_interval = 100
|
||||
enrolled_students = CourseEnrollment.objects.users_enrolled_in(course_id)
|
||||
task_progress = TaskProgress(action_name, enrolled_students.count(), start_time)
|
||||
total_enrolled_students = enrolled_students.count()
|
||||
task_progress = TaskProgress(action_name, total_enrolled_students, start_time)
|
||||
|
||||
fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
|
||||
task_info_string = fmt.format(
|
||||
@@ -695,22 +696,37 @@ def upload_grades_csv(_xmodule_instance_args, _entry_id, course_id, _task_input,
|
||||
whitelisted_user_ids = [entry.user_id for entry in certificate_whitelist]
|
||||
|
||||
# Loop over all our students and build our CSV lists in memory
|
||||
header = None
|
||||
rows = []
|
||||
err_rows = [["id", "username", "error_msg"]]
|
||||
current_step = {'step': 'Calculating Grades'}
|
||||
|
||||
total_enrolled_students = enrolled_students.count()
|
||||
student_counter = 0
|
||||
TASK_LOG.info(
|
||||
u'%s, Task type: %s, Current step: %s, Starting grade calculation for total students: %s',
|
||||
task_info_string,
|
||||
action_name,
|
||||
current_step,
|
||||
|
||||
total_enrolled_students
|
||||
total_enrolled_students,
|
||||
)
|
||||
for student, gradeset, err_msg in iterate_grades_for(course_id, enrolled_students):
|
||||
|
||||
graded_assignments = _graded_assignments(course_id)
|
||||
grade_header = []
|
||||
for assignment_info in graded_assignments.itervalues():
|
||||
if assignment_info['use_subsection_headers']:
|
||||
grade_header.extend(assignment_info['subsection_headers'].itervalues())
|
||||
grade_header.append(assignment_info['average_header'])
|
||||
|
||||
rows.append(
|
||||
["Student ID", "Email", "Username", "Grade"] +
|
||||
grade_header +
|
||||
cohorts_header +
|
||||
group_configs_header +
|
||||
teams_header +
|
||||
['Enrollment Track', 'Verification Status'] +
|
||||
certificate_info_header
|
||||
)
|
||||
|
||||
for student, course_grade, err_msg in CourseGradeFactory().iter(course, enrolled_students):
|
||||
# Periodically update task status (this is a cache write)
|
||||
if task_progress.attempted % status_interval == 0:
|
||||
task_progress.update_task_state(extra_meta=current_step)
|
||||
@@ -728,70 +744,71 @@ def upload_grades_csv(_xmodule_instance_args, _entry_id, course_id, _task_input,
|
||||
total_enrolled_students
|
||||
)
|
||||
|
||||
if gradeset:
|
||||
# We were able to successfully grade this student for this course.
|
||||
task_progress.succeeded += 1
|
||||
if not header:
|
||||
header = [section['label'] for section in gradeset[u'section_breakdown']]
|
||||
rows.append(
|
||||
["id", "email", "username", "grade"] + header + cohorts_header +
|
||||
group_configs_header + teams_header +
|
||||
['Enrollment Track', 'Verification Status'] + certificate_info_header
|
||||
)
|
||||
|
||||
percents = {
|
||||
section['label']: section.get('percent', 0.0)
|
||||
for section in gradeset[u'section_breakdown']
|
||||
if 'label' in section
|
||||
}
|
||||
|
||||
cohorts_group_name = []
|
||||
if course_is_cohorted:
|
||||
group = get_cohort(student, course_id, assign=False)
|
||||
cohorts_group_name.append(group.name if group else '')
|
||||
|
||||
group_configs_group_names = []
|
||||
for partition in experiment_partitions:
|
||||
group = LmsPartitionService(student, course_id).get_group(partition, assign=False)
|
||||
group_configs_group_names.append(group.name if group else '')
|
||||
|
||||
team_name = []
|
||||
if teams_enabled:
|
||||
try:
|
||||
membership = CourseTeamMembership.objects.get(user=student, team__course_id=course_id)
|
||||
team_name.append(membership.team.name)
|
||||
except CourseTeamMembership.DoesNotExist:
|
||||
team_name.append('')
|
||||
|
||||
enrollment_mode = CourseEnrollment.enrollment_mode_for_user(student, course_id)[0]
|
||||
verification_status = SoftwareSecurePhotoVerification.verification_status_for_user(
|
||||
student,
|
||||
course_id,
|
||||
enrollment_mode
|
||||
)
|
||||
certificate_info = certificate_info_for_user(
|
||||
student,
|
||||
course_id,
|
||||
gradeset['grade'],
|
||||
student.id in whitelisted_user_ids
|
||||
)
|
||||
|
||||
# Not everybody has the same gradable items. If the item is not
|
||||
# found in the user's gradeset, just assume it's a 0. The aggregated
|
||||
# grades for their sections and overall course will be calculated
|
||||
# without regard for the item they didn't have access to, so it's
|
||||
# possible for a student to have a 0.0 show up in their row but
|
||||
# still have 100% for the course.
|
||||
row_percents = [percents.get(label, 0.0) for label in header]
|
||||
rows.append(
|
||||
[student.id, student.email, student.username, gradeset['percent']] +
|
||||
row_percents + cohorts_group_name + group_configs_group_names + team_name +
|
||||
[enrollment_mode] + [verification_status] + certificate_info
|
||||
)
|
||||
else:
|
||||
if not course_grade:
|
||||
# An empty gradeset means we failed to grade a student.
|
||||
task_progress.failed += 1
|
||||
err_rows.append([student.id, student.username, err_msg])
|
||||
continue
|
||||
|
||||
# We were able to successfully grade this student for this course.
|
||||
task_progress.succeeded += 1
|
||||
|
||||
cohorts_group_name = []
|
||||
if course_is_cohorted:
|
||||
group = get_cohort(student, course_id, assign=False)
|
||||
cohorts_group_name.append(group.name if group else '')
|
||||
|
||||
group_configs_group_names = []
|
||||
for partition in experiment_partitions:
|
||||
group = LmsPartitionService(student, course_id).get_group(partition, assign=False)
|
||||
group_configs_group_names.append(group.name if group else '')
|
||||
|
||||
team_name = []
|
||||
if teams_enabled:
|
||||
try:
|
||||
membership = CourseTeamMembership.objects.get(user=student, team__course_id=course_id)
|
||||
team_name.append(membership.team.name)
|
||||
except CourseTeamMembership.DoesNotExist:
|
||||
team_name.append('')
|
||||
|
||||
enrollment_mode = CourseEnrollment.enrollment_mode_for_user(student, course_id)[0]
|
||||
verification_status = SoftwareSecurePhotoVerification.verification_status_for_user(
|
||||
student,
|
||||
course_id,
|
||||
enrollment_mode
|
||||
)
|
||||
certificate_info = certificate_info_for_user(
|
||||
student,
|
||||
course_id,
|
||||
course_grade.letter_grade,
|
||||
student.id in whitelisted_user_ids
|
||||
)
|
||||
|
||||
grade_results = []
|
||||
for assignment_type, assignment_info in graded_assignments.iteritems():
|
||||
for subsection_location in assignment_info['subsection_headers']:
|
||||
try:
|
||||
subsection_grade = course_grade.graded_subsections_by_format[assignment_type][subsection_location]
|
||||
except KeyError:
|
||||
grade_results.append([u'Not Accessible'])
|
||||
else:
|
||||
if subsection_grade.graded_total.attempted:
|
||||
grade_results.append(
|
||||
[subsection_grade.graded_total.earned / subsection_grade.graded_total.possible]
|
||||
)
|
||||
else:
|
||||
grade_results.append([u'Not Attempted'])
|
||||
if assignment_info['use_subsection_headers']:
|
||||
assignment_average = course_grade.grade_value['grade_breakdown'].get(assignment_type, {}).get('percent')
|
||||
grade_results.append([assignment_average])
|
||||
|
||||
grade_results = list(chain.from_iterable(grade_results))
|
||||
|
||||
rows.append(
|
||||
[student.id, student.email, student.username, course_grade.percent] +
|
||||
grade_results + cohorts_group_name + group_configs_group_names + team_name +
|
||||
[enrollment_mode] + [verification_status] + certificate_info
|
||||
)
|
||||
|
||||
TASK_LOG.info(
|
||||
u'%s, Task type: %s, Current step: %s, Grade calculation completed for students: %s/%s',
|
||||
@@ -819,54 +836,61 @@ def upload_grades_csv(_xmodule_instance_args, _entry_id, course_id, _task_input,
|
||||
return task_progress.update_task_state(extra_meta=current_step)
|
||||
|
||||
|
||||
def _order_problems(blocks):
|
||||
def _graded_assignments(course_key):
|
||||
"""
|
||||
Sort the problems by the assignment type and assignment that it belongs to.
|
||||
|
||||
Args:
|
||||
blocks (OrderedDict) - A course structure containing blocks that have been ordered
|
||||
(i.e. when we iterate over them, we will see them in the order
|
||||
that they appear in the course).
|
||||
|
||||
Returns:
|
||||
an OrderedDict that maps a problem id to its headers in the final report.
|
||||
Returns an OrderedDict that maps an assignment type to a dict of subsection-headers and average-header.
|
||||
"""
|
||||
problems = OrderedDict()
|
||||
assignments = OrderedDict()
|
||||
# First, sort out all the blocks into their correct assignments and all the
|
||||
# assignments into their correct types.
|
||||
for block in blocks:
|
||||
# Put the assignments in order into the assignments list.
|
||||
if blocks[block]['block_type'] == 'sequential':
|
||||
block_format = blocks[block]['format']
|
||||
if block_format not in assignments:
|
||||
assignments[block_format] = OrderedDict()
|
||||
assignments[block_format][block] = list()
|
||||
grading_context = grading_context_for_course(course_key)
|
||||
graded_assignments_map = OrderedDict()
|
||||
for assignment_type_name, subsection_infos in grading_context['all_graded_subsections_by_type'].iteritems():
|
||||
graded_subsections_map = OrderedDict()
|
||||
|
||||
# Put the problems into the correct order within their assignment.
|
||||
if blocks[block]['block_type'] == 'problem' and blocks[block]['graded'] is True:
|
||||
current = blocks[block]['parent']
|
||||
# crawl up the tree for the sequential block
|
||||
while blocks[current]['block_type'] != 'sequential':
|
||||
current = blocks[current]['parent']
|
||||
for subsection_index, subsection_info in enumerate(subsection_infos, start=1):
|
||||
subsection = subsection_info['subsection_block']
|
||||
header_name = u"{assignment_type} {subsection_index}: {subsection_name}".format(
|
||||
assignment_type=assignment_type_name,
|
||||
subsection_index=subsection_index,
|
||||
subsection_name=subsection.display_name,
|
||||
)
|
||||
graded_subsections_map[subsection.location] = header_name
|
||||
|
||||
current_format = blocks[current]['format']
|
||||
assignments[current_format][current].append(block)
|
||||
average_header = u"{assignment_type}".format(assignment_type=assignment_type_name)
|
||||
|
||||
# Now that we have a sorting and an order for the assignments and problems,
|
||||
# iterate through them in order to generate the header row.
|
||||
for assignment_type in assignments:
|
||||
for assignment_index, assignment in enumerate(assignments[assignment_type].keys(), start=1):
|
||||
for problem in assignments[assignment_type][assignment]:
|
||||
header_name = u"{assignment_type} {assignment_index}: {assignment_name} - {block}".format(
|
||||
block=blocks[problem]['display_name'],
|
||||
assignment_type=assignment_type,
|
||||
assignment_index=assignment_index,
|
||||
assignment_name=blocks[assignment]['display_name']
|
||||
# Use separate subsection and average columns only if
|
||||
# there's more than one subsection.
|
||||
use_subsection_headers = len(subsection_infos) > 1
|
||||
if use_subsection_headers:
|
||||
average_header += u" (Avg)"
|
||||
|
||||
graded_assignments_map[assignment_type_name] = {
|
||||
'subsection_headers': graded_subsections_map,
|
||||
'average_header': average_header,
|
||||
'use_subsection_headers': use_subsection_headers
|
||||
}
|
||||
return graded_assignments_map
|
||||
|
||||
|
||||
def _graded_scorable_blocks_to_header(course_key):
|
||||
"""
|
||||
Returns an OrderedDict that maps a scorable block's id to its
|
||||
headers in the final report.
|
||||
"""
|
||||
scorable_blocks_map = OrderedDict()
|
||||
grading_context = grading_context_for_course(course_key)
|
||||
for assignment_type_name, subsection_infos in grading_context['all_graded_subsections_by_type'].iteritems():
|
||||
for subsection_index, subsection_info in enumerate(subsection_infos, start=1):
|
||||
for scorable_block in subsection_info['scored_descendants']:
|
||||
header_name = (
|
||||
u"{assignment_type} {subsection_index}: "
|
||||
u"{subsection_name} - {scorable_block_name}"
|
||||
).format(
|
||||
scorable_block_name=scorable_block.display_name,
|
||||
assignment_type=assignment_type_name,
|
||||
subsection_index=subsection_index,
|
||||
subsection_name=subsection_info['subsection_block'].display_name,
|
||||
)
|
||||
problems[problem] = [header_name + " (Earned)", header_name + " (Possible)"]
|
||||
|
||||
return problems
|
||||
scorable_blocks_map[scorable_block.location] = [header_name + " (Earned)", header_name + " (Possible)"]
|
||||
return scorable_blocks_map
|
||||
|
||||
|
||||
def upload_problem_responses_csv(_xmodule_instance_args, _entry_id, course_id, task_input, action_name):
|
||||
@@ -919,49 +943,39 @@ def upload_problem_grade_report(_xmodule_instance_args, _entry_id, course_id, _t
|
||||
# as the keys. It is structured in this way to keep the values related.
|
||||
header_row = OrderedDict([('id', 'Student ID'), ('email', 'Email'), ('username', 'Username')])
|
||||
|
||||
try:
|
||||
course_structure = CourseStructure.objects.get(course_id=course_id)
|
||||
blocks = course_structure.ordered_blocks
|
||||
problems = _order_problems(blocks)
|
||||
except CourseStructure.DoesNotExist:
|
||||
return task_progress.update_task_state(
|
||||
extra_meta={'step': 'Generating course structure. Please refresh and try again.'}
|
||||
)
|
||||
graded_scorable_blocks = _graded_scorable_blocks_to_header(course_id)
|
||||
|
||||
# Just generate the static fields for now.
|
||||
rows = [list(header_row.values()) + ['Final Grade'] + list(chain.from_iterable(problems.values()))]
|
||||
rows = [list(header_row.values()) + ['Grade'] + list(chain.from_iterable(graded_scorable_blocks.values()))]
|
||||
error_rows = [list(header_row.values()) + ['error_msg']]
|
||||
current_step = {'step': 'Calculating Grades'}
|
||||
|
||||
for student, gradeset, err_msg in iterate_grades_for(course_id, enrolled_students):
|
||||
course = get_course_by_id(course_id)
|
||||
for student, course_grade, err_msg in CourseGradeFactory().iter(course, enrolled_students):
|
||||
student_fields = [getattr(student, field_name) for field_name in header_row]
|
||||
task_progress.attempted += 1
|
||||
|
||||
if 'percent' not in gradeset or 'raw_scores' not in gradeset:
|
||||
if not course_grade:
|
||||
# There was an error grading this student.
|
||||
# Generally there will be a non-empty err_msg, but that is not always the case.
|
||||
if not err_msg:
|
||||
err_msg = u"Unknown error"
|
||||
err_msg = u'Unknown error'
|
||||
error_rows.append(student_fields + [err_msg])
|
||||
task_progress.failed += 1
|
||||
continue
|
||||
|
||||
final_grade = gradeset['percent']
|
||||
# Only consider graded problems
|
||||
problem_scores = {unicode(score.module_id): score for score in gradeset['raw_scores'] if score.graded}
|
||||
earned_possible_values = list()
|
||||
for problem_id in problems:
|
||||
earned_possible_values = []
|
||||
for block_location in graded_scorable_blocks:
|
||||
try:
|
||||
problem_score = problem_scores[problem_id]
|
||||
earned_possible_values.append([problem_score.earned, problem_score.possible])
|
||||
problem_score = course_grade.locations_to_scores[block_location]
|
||||
except KeyError:
|
||||
# The student has not been graded on this problem. For example,
|
||||
# iterate_grades_for skips problems that students have never
|
||||
# seen in order to speed up report generation. It could also be
|
||||
# the case that the student does not have access to it (e.g. A/B
|
||||
# test or cohorted courseware).
|
||||
earned_possible_values.append(['N/A', 'N/A'])
|
||||
rows.append(student_fields + [final_grade] + list(chain.from_iterable(earned_possible_values)))
|
||||
earned_possible_values.append([u'Not Accessible', u'Not Accessible'])
|
||||
else:
|
||||
if problem_score.attempted:
|
||||
earned_possible_values.append([problem_score.earned, problem_score.possible])
|
||||
else:
|
||||
earned_possible_values.append([u'Not Attempted', problem_score.possible])
|
||||
|
||||
rows.append(student_fields + [course_grade.percent] + list(chain.from_iterable(earned_possible_values)))
|
||||
|
||||
task_progress.succeeded += 1
|
||||
if task_progress.attempted % status_interval == 0:
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
Base test classes for LMS instructor-initiated background tasks
|
||||
|
||||
"""
|
||||
# pylint: disable=attribute-defined-outside-init
|
||||
import os
|
||||
import json
|
||||
from mock import Mock, patch
|
||||
@@ -12,7 +13,6 @@ from uuid import uuid4
|
||||
|
||||
from celery.states import SUCCESS, FAILURE
|
||||
from django.core.urlresolvers import reverse
|
||||
from django.test.testcases import TestCase
|
||||
from django.contrib.auth.models import User
|
||||
|
||||
from capa.tests.response_xml_factory import OptionResponseXMLFactory
|
||||
@@ -37,7 +37,8 @@ TEST_COURSE_ORG = 'edx'
|
||||
TEST_COURSE_NAME = 'test_course'
|
||||
TEST_COURSE_NUMBER = '1.23x'
|
||||
TEST_COURSE_KEY = SlashSeparatedCourseKey(TEST_COURSE_ORG, TEST_COURSE_NUMBER, TEST_COURSE_NAME)
|
||||
TEST_SECTION_NAME = "Problem"
|
||||
TEST_CHAPTER_NAME = "Section"
|
||||
TEST_SECTION_NAME = "Subsection"
|
||||
|
||||
TEST_FAILURE_MESSAGE = 'task failed horribly'
|
||||
TEST_FAILURE_EXCEPTION = 'RandomCauseError'
|
||||
@@ -135,14 +136,18 @@ class InstructorTaskCourseTestCase(LoginEnrollmentTestCase, ModuleStoreTestCase)
|
||||
Add a chapter and a sequential to the current course.
|
||||
"""
|
||||
# Add a chapter to the course
|
||||
chapter = ItemFactory.create(parent_location=self.course.location,
|
||||
display_name=TEST_SECTION_NAME)
|
||||
self.chapter = ItemFactory.create(
|
||||
parent_location=self.course.location,
|
||||
display_name=TEST_CHAPTER_NAME,
|
||||
)
|
||||
|
||||
# add a sequence to the course to which the problems can be added
|
||||
self.problem_section = ItemFactory.create(parent_location=chapter.location,
|
||||
category='sequential',
|
||||
metadata={'graded': True, 'format': 'Homework'},
|
||||
display_name=TEST_SECTION_NAME)
|
||||
self.problem_section = ItemFactory.create(
|
||||
parent_location=self.chapter.location,
|
||||
category='sequential',
|
||||
metadata={'graded': True, 'format': 'Homework'},
|
||||
display_name=TEST_SECTION_NAME,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get_user_email(username):
|
||||
@@ -335,10 +340,10 @@ class TestReportMixin(object):
|
||||
file_index (int): Describes which report store file to
|
||||
open. Files are ordered by last modified date, and 0
|
||||
corresponds to the most recently modified file.
|
||||
verify_order (boolean): When True, we verify that both the
|
||||
content and order of `expected_rows` matches the
|
||||
actual csv rows. When False (default), we only verify
|
||||
that the content matches.
|
||||
verify_order (boolean): When True (default), we verify that
|
||||
both the content and order of `expected_rows` matches
|
||||
the actual csv rows. When False, we only verify that
|
||||
the content matches.
|
||||
ignore_other_columns (boolean): When True, we verify that `expected_rows`
|
||||
contain data which is the subset of actual csv rows.
|
||||
"""
|
||||
|
||||
@@ -134,9 +134,9 @@ class TestRescoringTask(TestIntegrationTask):
|
||||
# are in sync.
|
||||
expected_subsection_grade = expected_score
|
||||
|
||||
course_grade = CourseGradeFactory(user).create(self.course)
|
||||
course_grade = CourseGradeFactory().create(user, self.course)
|
||||
self.assertEquals(
|
||||
course_grade.subsection_grade_totals_by_format['Homework'][0].earned,
|
||||
course_grade.graded_subsections_by_format['Homework'][self.problem_section.location].graded_total.earned,
|
||||
expected_subsection_grade,
|
||||
)
|
||||
|
||||
@@ -574,13 +574,13 @@ class TestGradeReportConditionalContent(TestReportMixin, TestConditionalContent,
|
||||
self.verify_rows_in_csv(
|
||||
[
|
||||
merge_dicts(
|
||||
{'id': str(student.id), 'username': student.username, 'email': student.email},
|
||||
{'Student ID': str(student.id), 'Username': student.username, 'Email': student.email},
|
||||
grades,
|
||||
user_partition_group(student)
|
||||
)
|
||||
for student_grades in students_grades for student, grades in student_grades.iteritems()
|
||||
],
|
||||
ignore_other_columns=ignore_other_columns
|
||||
ignore_other_columns=ignore_other_columns,
|
||||
)
|
||||
|
||||
def test_both_groups_problems(self):
|
||||
@@ -604,10 +604,20 @@ class TestGradeReportConditionalContent(TestReportMixin, TestConditionalContent,
|
||||
self.verify_csv_task_success(result)
|
||||
self.verify_grades_in_csv(
|
||||
[
|
||||
{self.student_a: {'grade': '1.0', 'HW': '1.0'}},
|
||||
{self.student_b: {'grade': '0.5', 'HW': '0.5'}}
|
||||
{
|
||||
self.student_a: {
|
||||
u'Grade': '1.0',
|
||||
u'Homework': '1.0',
|
||||
}
|
||||
},
|
||||
{
|
||||
self.student_b: {
|
||||
u'Grade': '0.5',
|
||||
u'Homework': '0.5',
|
||||
}
|
||||
},
|
||||
],
|
||||
ignore_other_columns=True
|
||||
ignore_other_columns=True,
|
||||
)
|
||||
|
||||
def test_one_group_problem(self):
|
||||
@@ -627,8 +637,18 @@ class TestGradeReportConditionalContent(TestReportMixin, TestConditionalContent,
|
||||
self.verify_csv_task_success(result)
|
||||
self.verify_grades_in_csv(
|
||||
[
|
||||
{self.student_a: {'grade': '1.0', 'HW': '1.0'}},
|
||||
{self.student_b: {'grade': '0.0', 'HW': '0.0'}}
|
||||
{
|
||||
self.student_a: {
|
||||
u'Grade': '1.0',
|
||||
u'Homework': '1.0',
|
||||
},
|
||||
},
|
||||
{
|
||||
self.student_b: {
|
||||
u'Grade': '0.0',
|
||||
u'Homework': u'Not Accessible',
|
||||
}
|
||||
},
|
||||
],
|
||||
ignore_other_columns=True
|
||||
)
|
||||
|
||||
@@ -15,7 +15,7 @@ import urllib
|
||||
|
||||
import ddt
|
||||
from freezegun import freeze_time
|
||||
from mock import Mock, patch
|
||||
from mock import Mock, patch, MagicMock
|
||||
from nose.plugins.attrib import attr
|
||||
import tempfile
|
||||
import unicodecsv
|
||||
@@ -115,15 +115,14 @@ class TestInstructorGradeReport(InstructorGradeReportTestCase):
|
||||
self.assertDictContainsSubset({'attempted': num_students, 'succeeded': num_students, 'failed': 0}, result)
|
||||
|
||||
@patch('lms.djangoapps.instructor_task.tasks_helper._get_current_task')
|
||||
@patch('lms.djangoapps.instructor_task.tasks_helper.iterate_grades_for')
|
||||
def test_grading_failure(self, mock_iterate_grades_for, _mock_current_task):
|
||||
@patch('lms.djangoapps.grades.new.course_grade.CourseGradeFactory.iter')
|
||||
def test_grading_failure(self, mock_grades_iter, _mock_current_task):
|
||||
"""
|
||||
Test that any grading errors are properly reported in the
|
||||
progress dict and uploaded to the report store.
|
||||
"""
|
||||
# mock an error response from `iterate_grades_for`
|
||||
mock_iterate_grades_for.return_value = [
|
||||
(self.create_student('username', 'student@example.com'), {}, 'Cannot grade student')
|
||||
mock_grades_iter.return_value = [
|
||||
(self.create_student('username', 'student@example.com'), None, 'Cannot grade student')
|
||||
]
|
||||
result = upload_grades_csv(None, None, self.course.id, None, 'graded')
|
||||
self.assertDictContainsSubset({'attempted': 1, 'succeeded': 0, 'failed': 1}, result)
|
||||
@@ -293,17 +292,20 @@ class TestInstructorGradeReport(InstructorGradeReportTestCase):
|
||||
)
|
||||
|
||||
@patch('lms.djangoapps.instructor_task.tasks_helper._get_current_task')
|
||||
@patch('lms.djangoapps.instructor_task.tasks_helper.iterate_grades_for')
|
||||
def test_unicode_in_csv_header(self, mock_iterate_grades_for, _mock_current_task):
|
||||
@patch('lms.djangoapps.grades.new.course_grade.CourseGradeFactory.iter')
|
||||
def test_unicode_in_csv_header(self, mock_grades_iter, _mock_current_task):
|
||||
"""
|
||||
Tests that CSV grade report works if unicode in headers.
|
||||
"""
|
||||
# mock a response from `iterate_grades_for`
|
||||
mock_iterate_grades_for.return_value = [
|
||||
mock_course_grade = MagicMock()
|
||||
mock_course_grade.summary = {'section_breakdown': [{'label': u'\u8282\u540e\u9898 01'}]}
|
||||
mock_course_grade.letter_grade = None
|
||||
mock_course_grade.percent = 0
|
||||
mock_grades_iter.return_value = [
|
||||
(
|
||||
self.create_student('username', 'student@example.com'),
|
||||
{'section_breakdown': [{'label': u'\u8282\u540e\u9898 01'}], 'percent': 0, 'grade': None},
|
||||
'Cannot grade student'
|
||||
mock_course_grade,
|
||||
'',
|
||||
)
|
||||
]
|
||||
result = upload_grades_csv(None, None, self.course.id, None, 'graded')
|
||||
@@ -588,7 +590,7 @@ class TestProblemGradeReport(TestReportMixin, InstructorTaskModuleTestCase):
|
||||
# technically possible in openedx.
|
||||
self.student_1 = self.create_student(u'üser_1')
|
||||
self.student_2 = self.create_student(u'üser_2')
|
||||
self.csv_header_row = [u'Student ID', u'Email', u'Username', u'Final Grade']
|
||||
self.csv_header_row = [u'Student ID', u'Email', u'Username', u'Grade']
|
||||
|
||||
@patch('lms.djangoapps.instructor_task.tasks_helper._get_current_task')
|
||||
def test_no_problems(self, _get_current_task):
|
||||
@@ -622,7 +624,7 @@ class TestProblemGradeReport(TestReportMixin, InstructorTaskModuleTestCase):
|
||||
self.submit_student_answer(self.student_1.username, u'Problem1', ['Option 1'])
|
||||
result = upload_problem_grade_report(None, None, self.course.id, None, 'graded')
|
||||
self.assertDictContainsSubset({'action_name': 'graded', 'attempted': 2, 'succeeded': 2, 'failed': 0}, result)
|
||||
problem_name = u'Homework 1: Problem - Problem1'
|
||||
problem_name = u'Homework 1: Subsection - Problem1'
|
||||
header_row = self.csv_header_row + [problem_name + ' (Earned)', problem_name + ' (Possible)']
|
||||
self.verify_rows_in_csv([
|
||||
dict(zip(
|
||||
@@ -631,7 +633,8 @@ class TestProblemGradeReport(TestReportMixin, InstructorTaskModuleTestCase):
|
||||
unicode(self.student_1.id),
|
||||
self.student_1.email,
|
||||
self.student_1.username,
|
||||
'0.01', '1.0', '2.0']
|
||||
'0.01', '1.0', '2.0',
|
||||
]
|
||||
)),
|
||||
dict(zip(
|
||||
header_row,
|
||||
@@ -639,23 +642,22 @@ class TestProblemGradeReport(TestReportMixin, InstructorTaskModuleTestCase):
|
||||
unicode(self.student_2.id),
|
||||
self.student_2.email,
|
||||
self.student_2.username,
|
||||
'0.0', '0.0', '2'
|
||||
'0.0', u'Not Attempted', '2.0',
|
||||
]
|
||||
))
|
||||
])
|
||||
|
||||
@patch('lms.djangoapps.instructor_task.tasks_helper._get_current_task')
|
||||
@patch('lms.djangoapps.instructor_task.tasks_helper.iterate_grades_for')
|
||||
@patch('lms.djangoapps.grades.new.course_grade.CourseGradeFactory.iter')
|
||||
@ddt.data(u'Cannot grade student', '')
|
||||
def test_grading_failure(self, error_message, mock_iterate_grades_for, _mock_current_task):
|
||||
def test_grading_failure(self, error_message, mock_grades_iter, _mock_current_task):
|
||||
"""
|
||||
Test that any grading errors are properly reported in the progress
|
||||
dict and uploaded to the report store.
|
||||
"""
|
||||
# mock an error response from `iterate_grades_for`
|
||||
student = self.create_student(u'username', u'student@example.com')
|
||||
mock_iterate_grades_for.return_value = [
|
||||
(student, {}, error_message)
|
||||
mock_grades_iter.return_value = [
|
||||
(student, None, error_message)
|
||||
]
|
||||
result = upload_problem_grade_report(None, None, self.course.id, None, 'graded')
|
||||
self.assertDictContainsSubset({'attempted': 1, 'succeeded': 0, 'failed': 1}, result)
|
||||
@@ -694,7 +696,8 @@ class TestProblemReportSplitTestContent(TestReportMixin, TestConditionalContent,
|
||||
|
||||
In order to verify that the behavior of the grade report is correct, we submit answers for problems
|
||||
that the student won't have access to. A/B tests won't restrict access to the problems, but it should
|
||||
not show up in that student's course tree when generating the grade report, hence the N/A's in the grade report.
|
||||
not show up in that student's course tree when generating the grade report, hence the Not Accessible's
|
||||
in the grade report.
|
||||
"""
|
||||
# student A will get 100%, student B will get 50% because
|
||||
# OPTION_1 is the correct option, and OPTION_2 is the
|
||||
@@ -711,8 +714,8 @@ class TestProblemReportSplitTestContent(TestReportMixin, TestConditionalContent,
|
||||
{'action_name': 'graded', 'attempted': 2, 'succeeded': 2, 'failed': 0}, result
|
||||
)
|
||||
|
||||
problem_names = [u'Homework 1: Problem - problem_a_url', u'Homework 1: Problem - problem_b_url']
|
||||
header_row = [u'Student ID', u'Email', u'Username', u'Final Grade']
|
||||
problem_names = [u'Homework 1: Subsection - problem_a_url', u'Homework 1: Subsection - problem_b_url']
|
||||
header_row = [u'Student ID', u'Email', u'Username', u'Grade']
|
||||
for problem in problem_names:
|
||||
header_row += [problem + ' (Earned)', problem + ' (Possible)']
|
||||
|
||||
@@ -723,7 +726,7 @@ class TestProblemReportSplitTestContent(TestReportMixin, TestConditionalContent,
|
||||
unicode(self.student_a.id),
|
||||
self.student_a.email,
|
||||
self.student_a.username,
|
||||
u'1.0', u'2.0', u'2.0', u'N/A', u'N/A'
|
||||
u'1.0', u'2.0', u'2.0', u'Not Accessible', u'Not Accessible'
|
||||
]
|
||||
)),
|
||||
dict(zip(
|
||||
@@ -731,7 +734,7 @@ class TestProblemReportSplitTestContent(TestReportMixin, TestConditionalContent,
|
||||
[
|
||||
unicode(self.student_b.id),
|
||||
self.student_b.email,
|
||||
self.student_b.username, u'0.5', u'N/A', u'N/A', u'1.0', u'2.0'
|
||||
self.student_b.username, u'0.5', u'Not Accessible', u'Not Accessible', u'1.0', u'2.0'
|
||||
]
|
||||
))
|
||||
])
|
||||
@@ -793,7 +796,7 @@ class TestProblemReportSplitTestContent(TestReportMixin, TestConditionalContent,
|
||||
title = 'Homework %d 1: Problem section %d - %s' % (i, i, problem_url)
|
||||
problem_names.append(title)
|
||||
|
||||
header_row = [u'Student ID', u'Email', u'Username', u'Final Grade']
|
||||
header_row = [u'Student ID', u'Email', u'Username', u'Grade']
|
||||
for problem in problem_names:
|
||||
header_row += [problem + ' (Earned)', problem + ' (Possible)']
|
||||
|
||||
@@ -858,16 +861,28 @@ class TestProblemReportCohortedContent(TestReportMixin, ContentGroupTestCase, In
|
||||
self.assertDictContainsSubset(
|
||||
{'action_name': 'graded', 'attempted': 4, 'succeeded': 4, 'failed': 0}, result
|
||||
)
|
||||
problem_names = [u'Homework 1: Problem - Problem0', u'Homework 1: Problem - Problem1']
|
||||
header_row = [u'Student ID', u'Email', u'Username', u'Final Grade']
|
||||
problem_names = [u'Homework 1: Subsection - Problem0', u'Homework 1: Subsection - Problem1']
|
||||
header_row = [u'Student ID', u'Email', u'Username', u'Grade']
|
||||
for problem in problem_names:
|
||||
header_row += [problem + ' (Earned)', problem + ' (Possible)']
|
||||
|
||||
user_grades = [
|
||||
{'user': self.staff_user, 'grade': [u'0.0', u'N/A', u'N/A', u'N/A', u'N/A']},
|
||||
{'user': self.alpha_user, 'grade': [u'1.0', u'2.0', u'2.0', u'N/A', u'N/A']},
|
||||
{'user': self.beta_user, 'grade': [u'0.5', u'N/A', u'N/A', u'1.0', u'2.0']},
|
||||
{'user': self.non_cohorted_user, 'grade': [u'0.0', u'N/A', u'N/A', u'N/A', u'N/A']},
|
||||
{
|
||||
'user': self.staff_user,
|
||||
'grade': [u'0.0', u'Not Accessible', u'Not Accessible', u'Not Accessible', u'Not Accessible'],
|
||||
},
|
||||
{
|
||||
'user': self.alpha_user,
|
||||
'grade': [u'1.0', u'2.0', u'2.0', u'Not Accessible', u'Not Accessible'],
|
||||
},
|
||||
{
|
||||
'user': self.beta_user,
|
||||
'grade': [u'0.5', u'Not Accessible', u'Not Accessible', u'1.0', u'2.0'],
|
||||
},
|
||||
{
|
||||
'user': self.non_cohorted_user,
|
||||
'grade': [u'0.0', u'Not Accessible', u'Not Accessible', u'Not Accessible', u'Not Accessible'],
|
||||
},
|
||||
]
|
||||
|
||||
# Verify generated grades and expected grades match
|
||||
@@ -1492,6 +1507,90 @@ class TestCohortStudents(TestReportMixin, InstructorTaskCourseTestCase):
|
||||
)
|
||||
|
||||
|
||||
@patch('lms.djangoapps.instructor_task.tasks_helper.DefaultStorage', new=MockDefaultStorage)
|
||||
class TestGradeReport(TestReportMixin, InstructorTaskModuleTestCase):
|
||||
"""
|
||||
Test that grade report has correct grade values.
|
||||
"""
|
||||
def setUp(self):
|
||||
super(TestGradeReport, self).setUp()
|
||||
self.create_course()
|
||||
self.student = self.create_student(u'üser_1')
|
||||
|
||||
def create_course(self):
|
||||
"""
|
||||
Creates a course with various subsections for testing
|
||||
"""
|
||||
self.course = CourseFactory.create(
|
||||
grading_policy={
|
||||
"GRADER": [
|
||||
{
|
||||
"type": "Homework",
|
||||
"min_count": 4,
|
||||
"drop_count": 0,
|
||||
"weight": 1.0
|
||||
},
|
||||
],
|
||||
},
|
||||
)
|
||||
self.chapter = ItemFactory.create(parent=self.course, category='chapter')
|
||||
|
||||
self.problem_section = ItemFactory.create(
|
||||
parent=self.chapter,
|
||||
category='sequential',
|
||||
metadata={'graded': True, 'format': 'Homework'},
|
||||
display_name='Subsection'
|
||||
)
|
||||
self.define_option_problem(u'Problem1', parent=self.problem_section, num_responses=1)
|
||||
self.hidden_section = ItemFactory.create(
|
||||
parent=self.chapter,
|
||||
category='sequential',
|
||||
metadata={'graded': True, 'format': 'Homework'},
|
||||
visible_to_staff_only=True,
|
||||
display_name='Hidden',
|
||||
)
|
||||
self.define_option_problem(u'Problem2', parent=self.hidden_section)
|
||||
self.unattempted_section = ItemFactory.create(
|
||||
parent=self.chapter,
|
||||
category='sequential',
|
||||
metadata={'graded': True, 'format': 'Homework'},
|
||||
display_name='Unattempted',
|
||||
)
|
||||
self.define_option_problem(u'Problem3', parent=self.unattempted_section)
|
||||
self.empty_section = ItemFactory.create(
|
||||
parent=self.chapter,
|
||||
category='sequential',
|
||||
metadata={'graded': True, 'format': 'Homework'},
|
||||
display_name='Empty',
|
||||
)
|
||||
|
||||
def test_grade_report(self):
|
||||
self.submit_student_answer(self.student.username, u'Problem1', ['Option 1'])
|
||||
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper._get_current_task'):
|
||||
result = upload_grades_csv(None, None, self.course.id, None, 'graded')
|
||||
self.assertDictContainsSubset(
|
||||
{'action_name': 'graded', 'attempted': 1, 'succeeded': 1, 'failed': 0},
|
||||
result,
|
||||
)
|
||||
self.verify_rows_in_csv(
|
||||
[
|
||||
{
|
||||
u'Student ID': unicode(self.student.id),
|
||||
u'Email': self.student.email,
|
||||
u'Username': self.student.username,
|
||||
u'Grade': '0.13',
|
||||
u'Homework 1: Subsection': '0.5',
|
||||
u'Homework 2: Hidden': u'Not Accessible',
|
||||
u'Homework 3: Unattempted': u'Not Attempted',
|
||||
u'Homework 4: Empty': u'Not Accessible',
|
||||
u'Homework (Avg)': '0.125',
|
||||
},
|
||||
],
|
||||
ignore_other_columns=True,
|
||||
)
|
||||
|
||||
|
||||
@ddt.ddt
|
||||
@patch('lms.djangoapps.instructor_task.tasks_helper.DefaultStorage', new=MockDefaultStorage)
|
||||
class TestGradeReportEnrollmentAndCertificateInfo(TestReportMixin, InstructorTaskModuleTestCase):
|
||||
|
||||
@@ -7,7 +7,7 @@ from django.contrib.auth.models import User
|
||||
from django.dispatch import receiver
|
||||
import logging
|
||||
|
||||
from lms.djangoapps.grades import progress
|
||||
from lms.djangoapps.grades.new.course_grade import CourseGradeFactory
|
||||
from lms.djangoapps.grades.signals.signals import PROBLEM_WEIGHTED_SCORE_CHANGED
|
||||
from lms import CELERY_APP
|
||||
from lti_provider.models import GradedAssignment
|
||||
@@ -109,8 +109,8 @@ def send_composite_outcome(user_id, course_id, assignment_id, version):
|
||||
mapped_usage_key = assignment.usage_key.map_into_course(course_key)
|
||||
user = User.objects.get(id=user_id)
|
||||
course = modulestore().get_course(course_key, depth=0)
|
||||
progress_summary = progress.summary(user, course)
|
||||
earned, possible = progress_summary.score_for_module(mapped_usage_key)
|
||||
course_grade = CourseGradeFactory().create(user, course)
|
||||
earned, possible = course_grade.score_for_module(mapped_usage_key)
|
||||
if possible == 0:
|
||||
weighted_score = 0
|
||||
else:
|
||||
|
||||
@@ -99,9 +99,9 @@ class SendCompositeOutcomeTest(BaseOutcomeTest):
|
||||
block_type='problem',
|
||||
block_id='problem',
|
||||
)
|
||||
self.weighted_scores = MagicMock()
|
||||
self.weighted_scores_mock = self.setup_patch(
|
||||
'lti_provider.tasks.progress.summary', self.weighted_scores
|
||||
self.course_grade = MagicMock()
|
||||
self.course_grade_mock = self.setup_patch(
|
||||
'lti_provider.tasks.CourseGradeFactory.create', self.course_grade
|
||||
)
|
||||
self.module_store = MagicMock()
|
||||
self.module_store.get_item = MagicMock(return_value=self.descriptor)
|
||||
@@ -117,7 +117,7 @@ class SendCompositeOutcomeTest(BaseOutcomeTest):
|
||||
)
|
||||
@ddt.unpack
|
||||
def test_outcome_with_score_score(self, earned, possible, expected):
|
||||
self.weighted_scores.score_for_module = MagicMock(return_value=(earned, possible))
|
||||
self.course_grade.score_for_module = MagicMock(return_value=(earned, possible))
|
||||
tasks.send_composite_outcome(
|
||||
self.user.id, unicode(self.course_key), self.assignment.id, 1
|
||||
)
|
||||
@@ -129,4 +129,4 @@ class SendCompositeOutcomeTest(BaseOutcomeTest):
|
||||
tasks.send_composite_outcome(
|
||||
self.user.id, unicode(self.course_key), self.assignment.id, 1
|
||||
)
|
||||
self.assertEqual(self.weighted_scores_mock.call_count, 0)
|
||||
self.assertEqual(self.course_grade_mock.call_count, 0)
|
||||
|
||||
@@ -100,7 +100,7 @@ $(function () {
|
||||
extraColorIndex = len(categories) #Keeping track of the next color to use for categories not in categories[]
|
||||
|
||||
if show_grade_breakdown:
|
||||
for section in grade_summary['grade_breakdown']:
|
||||
for section in grade_summary['grade_breakdown'].itervalues():
|
||||
if section['percent'] > 0:
|
||||
if section['category'] in categories:
|
||||
color = categories[ section['category'] ]['color']
|
||||
|
||||
@@ -10,7 +10,7 @@ from opaque_keys.edx.keys import CourseKey
|
||||
from xmodule.modulestore.django import SignalHandler
|
||||
|
||||
from openedx.core.djangoapps.credit.verification_access import update_verification_partitions
|
||||
from openedx.core.djangoapps.signals.signals import GRADES_UPDATED
|
||||
from openedx.core.djangoapps.signals.signals import COURSE_GRADE_CHANGED
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
@@ -52,14 +52,14 @@ def on_pre_publish(sender, course_key, **kwargs): # pylint: disable=unused-argu
|
||||
log.info(u"Finished updating in-course reverification access rules")
|
||||
|
||||
|
||||
@receiver(GRADES_UPDATED)
|
||||
def listen_for_grade_calculation(sender, user, grade_summary, course_key, deadline, **kwargs): # pylint: disable=unused-argument
|
||||
@receiver(COURSE_GRADE_CHANGED)
|
||||
def listen_for_grade_calculation(sender, user, course_grade, course_key, deadline, **kwargs): # pylint: disable=unused-argument
|
||||
"""Receive 'MIN_GRADE_REQUIREMENT_STATUS' signal and update minimum grade requirement status.
|
||||
|
||||
Args:
|
||||
sender: None
|
||||
user(User): User Model object
|
||||
grade_summary(dict): Dict containing output from the course grader
|
||||
course_grade(CourseGrade): CourseGrade object
|
||||
course_key(CourseKey): The key for the course
|
||||
deadline(datetime): Course end date or None
|
||||
|
||||
@@ -78,7 +78,7 @@ def listen_for_grade_calculation(sender, user, grade_summary, course_key, deadli
|
||||
criteria = requirements[0].get('criteria')
|
||||
if criteria:
|
||||
min_grade = criteria.get('min_grade')
|
||||
passing_grade = grade_summary['percent'] >= min_grade
|
||||
passing_grade = course_grade.percent >= min_grade
|
||||
now = timezone.now()
|
||||
status = None
|
||||
reason = None
|
||||
@@ -89,7 +89,7 @@ def listen_for_grade_calculation(sender, user, grade_summary, course_key, deadli
|
||||
if passing_grade:
|
||||
# Student received a passing grade
|
||||
status = 'satisfied'
|
||||
reason = {'final_grade': grade_summary['percent']}
|
||||
reason = {'final_grade': course_grade.percent}
|
||||
else:
|
||||
# Submission after deadline
|
||||
|
||||
@@ -104,7 +104,7 @@ def listen_for_grade_calculation(sender, user, grade_summary, course_key, deadli
|
||||
# Student failed to receive minimum grade
|
||||
status = 'failed'
|
||||
reason = {
|
||||
'final_grade': grade_summary['percent'],
|
||||
'final_grade': course_grade.percent,
|
||||
'minimum_grade': min_grade
|
||||
}
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@ Tests for minimum grade requirement status
|
||||
import ddt
|
||||
import pytz
|
||||
from datetime import timedelta, datetime
|
||||
from mock import MagicMock
|
||||
from unittest import skipUnless
|
||||
|
||||
from django.conf import settings
|
||||
@@ -73,7 +74,9 @@ class TestMinGradedRequirementStatus(ModuleStoreTestCase):
|
||||
|
||||
def assert_requirement_status(self, grade, due_date, expected_status):
|
||||
""" Verify the user's credit requirement status is as expected after simulating a grading calculation. """
|
||||
listen_for_grade_calculation(None, self.user, {'percent': grade}, self.course.id, due_date)
|
||||
course_grade = MagicMock()
|
||||
course_grade.percent = grade
|
||||
listen_for_grade_calculation(None, self.user, course_grade, self.course.id, due_date)
|
||||
req_status = get_credit_requirement_status(self.course.id, self.request.user.username, 'grade', 'grade')
|
||||
|
||||
self.assertEqual(req_status[0]['status'], expected_status)
|
||||
|
||||
@@ -5,8 +5,8 @@ This module contains all signals.
|
||||
from django.dispatch import Signal
|
||||
|
||||
|
||||
# Signal that fires when a user is graded (in lms/grades/course_grades.py)
|
||||
GRADES_UPDATED = Signal(providing_args=["user", "grade_summary", "course_key", "deadline"])
|
||||
# Signal that fires when a user is graded
|
||||
COURSE_GRADE_CHANGED = Signal(providing_args=["user", "course_grade", "course_key", "deadline"])
|
||||
|
||||
# Signal that fires when a user is awarded a certificate in a course (in the certificates django app)
|
||||
# TODO: runtime coupling between apps will be reduced if this event is changed to carry a username
|
||||
|
||||
@@ -104,7 +104,7 @@ class TestConditionalContent(ModuleStoreTestCase):
|
||||
"""
|
||||
Construct a course with graded problems that exist within a split test.
|
||||
"""
|
||||
TEST_SECTION_NAME = 'Problem'
|
||||
TEST_SECTION_NAME = 'Subsection'
|
||||
|
||||
def setUp(self):
|
||||
"""
|
||||
|
||||
Reference in New Issue
Block a user