So making this code change took a few hours. But then deciding that it
was the right one of the many options available took the next 3 days.
When changing to the new rounding function, we had a test that started
failing. It appears as if our new rounding function is not the same in
some way as the one built into python 2.
```
>>> round(.0045*100 + .05)/100
0.0
>>> round_away_from_zero(.0045*100 + .05)/100
0.01
```
Doing the math by hand we see that the new function is actually correct
but the old one is clearly rounding incorrectly in this case. Looking
closer at this I discovered that it was due to a floating point issue
where .0045*100 is represented as 0.44999999999999996 so when we add
0.05 to this number we get 0.49999999999999994. This is all because of
the limitations of floating point arithmetic.
See https://docs.python.org/3/tutorial/floatingpoint.html#tut-fp-issues
for more on that.
Because python does its rounding at the bit level in C code. It treats
the .4999... as below the .5 cutoff and rounds down. Whereas our code
does more simple arithmetic which causes the number to correct itself
before we round and so correctly rounds up to 0.01
The result of this change is that previously, the rounding threshold used to
be that any number > .0045 would ronud to 0.01 and now any number that
is >= .0045 rounds to 0.01
Note that if we only care about the two most significant digits of
number between 0 and 1, this error only manifests itself in such a way
that other than the case of going from 0.00 to 0.01 eg. from 0% to 1%
none of the other cases where we would now round up cause the 2 most
significant digits to change. Given this level of impact, we're fine
with this change.
In our tests we see this for one case, where an incomplete turns into an
F in a test. I'm updating the test here to be more correct.
As we were looking at it we speculated as to why we were adding the .05
to the number. Could it be to counteract this floating point issue? It
turns out not.
Looking at this commit(a1286b1c7d) we see that it
looks like this was intended to always round up to the nearest
percentage point. However, there's a typo here. If you wanted to
ensure that we always rounded up to the nearest percentage point you
would have the math be `round(final_grade_percent*100 + 0.5)/ 100` or a
simpler way to do this would be
`math.ceil(final_grade_percent*100)/100`. However, that is not what
happened and 7 years later, there have been a lot of people graded with
the wrong rounding where essentialy anyone with a grade of 89.45 gets a
90 when the intended impact was supposed to be that anyone with a grade
above an 89.0 would get a grade of 90.
Changing it now requires a lot of conversation and wolud have a large
impact on existing learners. So we are not going to change it as a part
of the python 2 -> python 3 upgrade. I have created
https://openedx.atlassian.net/browse/TNL-6972 to capture this issue if
we want to address it in the future.
174 lines
6.3 KiB
Python
174 lines
6.3 KiB
Python
"""
|
|
Tests of the instructor dashboard spoc gradebook
|
|
"""
|
|
|
|
from __future__ import absolute_import
|
|
|
|
from django.urls import reverse
|
|
from six import text_type
|
|
from six.moves import range
|
|
|
|
from capa.tests.response_xml_factory import StringResponseXMLFactory
|
|
from lms.djangoapps.courseware.tests.factories import StudentModuleFactory
|
|
from lms.djangoapps.grades.api import task_compute_all_grades_for_course
|
|
from student.tests.factories import AdminFactory, CourseEnrollmentFactory, UserFactory
|
|
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
|
|
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
|
|
|
|
USER_COUNT = 11
|
|
|
|
|
|
class TestGradebook(SharedModuleStoreTestCase):
|
|
"""
|
|
Test functionality of the spoc gradebook. Sets up a course with assignments and
|
|
students who've scored various scores on these assignments. Base class for further
|
|
gradebook tests.
|
|
"""
|
|
grading_policy = None
|
|
|
|
@classmethod
|
|
def setUpClass(cls):
|
|
super(TestGradebook, cls).setUpClass()
|
|
|
|
# Create a course with the desired grading policy (from our class attribute)
|
|
kwargs = {}
|
|
if cls.grading_policy is not None:
|
|
kwargs['grading_policy'] = cls.grading_policy
|
|
cls.course = CourseFactory.create(**kwargs)
|
|
|
|
# Now give it some content
|
|
with cls.store.bulk_operations(cls.course.id, emit_signals=False):
|
|
chapter = ItemFactory.create(
|
|
parent_location=cls.course.location,
|
|
category="sequential",
|
|
)
|
|
section = ItemFactory.create(
|
|
parent_location=chapter.location,
|
|
category="sequential",
|
|
metadata={'graded': True, 'format': 'Homework'}
|
|
)
|
|
cls.items = [
|
|
ItemFactory.create(
|
|
parent_location=section.location,
|
|
category="problem",
|
|
data=StringResponseXMLFactory().build_xml(answer='foo'),
|
|
metadata={'rerandomize': 'always'}
|
|
)
|
|
for __ in range(USER_COUNT - 1)
|
|
]
|
|
|
|
def setUp(self):
|
|
super(TestGradebook, self).setUp()
|
|
|
|
instructor = AdminFactory.create()
|
|
self.client.login(username=instructor.username, password='test')
|
|
self.users = [UserFactory.create() for _ in range(USER_COUNT)]
|
|
|
|
for user in self.users:
|
|
CourseEnrollmentFactory.create(user=user, course_id=self.course.id)
|
|
|
|
for i, item in enumerate(self.items):
|
|
for j, user in enumerate(self.users):
|
|
StudentModuleFactory.create(
|
|
grade=1 if i < j else 0,
|
|
max_grade=1,
|
|
student=user,
|
|
course_id=self.course.id,
|
|
module_state_key=item.location
|
|
)
|
|
task_compute_all_grades_for_course.apply_async(kwargs={'course_key': text_type(self.course.id)})
|
|
|
|
self.response = self.client.get(reverse(
|
|
'spoc_gradebook',
|
|
args=(text_type(self.course.id),)
|
|
))
|
|
|
|
self.assertEqual(self.response.status_code, 200)
|
|
|
|
|
|
class TestDefaultGradingPolicy(TestGradebook):
|
|
"""
|
|
Tests that the grading policy is properly applied for all users in the course
|
|
Uses the default policy (50% passing rate)
|
|
"""
|
|
def test_all_users_listed(self):
|
|
for user in self.users:
|
|
self.assertIn(user.username, text_type(self.response.content, 'utf-8'))
|
|
|
|
def test_default_policy(self):
|
|
# Default >= 50% passes, so Users 5-10 should be passing for Homework 1 [6]
|
|
# One use at the top of the page [1]
|
|
self.assertEqual(7, self.response.content.count(b'grade_Pass'))
|
|
|
|
# Users 1-5 attempted Homework 1 (and get Fs) [4]
|
|
# Users 1-10 attempted any homework (and get Fs) [10]
|
|
# Users 4-10 scored enough to not get rounded to 0 for the class (and get Fs) [7]
|
|
# One use at top of the page [1]
|
|
self.assertEqual(23, self.response.content.count(b'grade_F'))
|
|
|
|
# All other grades are None [29 categories * 11 users - 27 non-empty grades = 292]
|
|
# One use at the top of the page [1]
|
|
self.assertEqual(292, self.response.content.count(b'grade_None'))
|
|
|
|
|
|
class TestLetterCutoffPolicy(TestGradebook):
|
|
"""
|
|
Tests advanced grading policy (with letter grade cutoffs). Includes tests of
|
|
UX display (color, etc).
|
|
"""
|
|
grading_policy = {
|
|
"GRADER": [
|
|
{
|
|
"type": "Homework",
|
|
"min_count": 1,
|
|
"drop_count": 0,
|
|
"short_label": "HW",
|
|
"weight": 1
|
|
},
|
|
],
|
|
"GRADE_CUTOFFS": {
|
|
'A': .9,
|
|
'B': .8,
|
|
'C': .7,
|
|
'D': .6,
|
|
}
|
|
}
|
|
|
|
def test_styles(self):
|
|
|
|
self.assertContains(self.response, u"grade_A {color:green;}")
|
|
self.assertContains(self.response, u"grade_B {color:Chocolate;}")
|
|
self.assertContains(self.response, u"grade_C {color:DarkSlateGray;}")
|
|
self.assertContains(self.response, u"grade_D {color:DarkSlateGray;}")
|
|
|
|
def test_assigned_grades(self):
|
|
# Users 9-10 have >= 90% on Homeworks [2]
|
|
# Users 9-10 have >= 90% on the class [2]
|
|
# One use at the top of the page [1]
|
|
self.assertEqual(5, self.response.content.count(b'grade_A'))
|
|
|
|
# User 8 has 80 <= Homeworks < 90 [1]
|
|
# User 8 has 80 <= class < 90 [1]
|
|
# One use at the top of the page [1]
|
|
self.assertEqual(3, self.response.content.count(b'grade_B'))
|
|
|
|
# User 7 has 70 <= Homeworks < 80 [1]
|
|
# User 7 has 70 <= class < 80 [1]
|
|
# One use at the top of the page [1]
|
|
self.assertEqual(3, self.response.content.count(b'grade_C'))
|
|
|
|
# User 6 has 60 <= Homeworks < 70 [1]
|
|
# User 6 has 60 <= class < 70 [1]
|
|
# One use at the top of the page [1]
|
|
self.assertEqual(3, self.response.content.count(b'grade_C'))
|
|
|
|
# Users 1-5 have 60% > grades > 0 on Homeworks [5]
|
|
# Users 1-5 have 60% > grades > 0 on the class [5]
|
|
# One use at top of the page [1]
|
|
self.assertEqual(11, self.response.content.count(b'grade_F'))
|
|
|
|
# User 0 has 0 on Homeworks [1]
|
|
# User 0 has 0 on the class [1]
|
|
# One use at the top of the page [1]
|
|
self.assertEqual(3, self.response.content.count(b'grade_None'))
|