Merge pull request #22149 from edx/feanil/update_rounding

BOM-738 - Review and update rounding for python 3
This commit is contained in:
Feanil Patel
2019-10-31 14:58:07 -04:00
committed by GitHub
7 changed files with 54 additions and 12 deletions

View File

@@ -48,6 +48,7 @@ import capa.safe_exec as safe_exec
import capa.xqueue_interface as xqueue_interface
from openedx.core.djangolib.markup import HTML, Text
from openedx.core.lib import edx_six
from openedx.core.lib.grade_utils import round_away_from_zero
from . import correctmap
from .registry import TagRegistry
@@ -727,7 +728,7 @@ class ChoiceResponse(LoncapaResponse):
good_non_answers = sum([1 for blank in student_non_answers if blank in self.incorrect_choices])
edc_current_grade = good_answers + good_non_answers
return_grade = round(self.get_max_score() * float(edc_current_grade) / float(edc_max_grade), 2)
return_grade = round_away_from_zero(self.get_max_score() * float(edc_current_grade) / float(edc_max_grade), 2)
if edc_current_grade == edc_max_grade:
return CorrectMap(self.answer_id, correctness='correct')
@@ -764,10 +765,10 @@ class ChoiceResponse(LoncapaResponse):
return_grade = self.get_max_score()
return CorrectMap(self.answer_id, correctness='correct', npoints=return_grade)
elif halves_error_count == 1 and len(all_choices) > 2:
return_grade = round(self.get_max_score() / 2.0, 2)
return_grade = round_away_from_zero(self.get_max_score() / 2.0, 2)
return CorrectMap(self.answer_id, correctness='partially-correct', npoints=return_grade)
elif halves_error_count == 2 and len(all_choices) > 4:
return_grade = round(self.get_max_score() / 4.0, 2)
return_grade = round_away_from_zero(self.get_max_score() / 4.0, 2)
return CorrectMap(self.answer_id, correctness='partially-correct', npoints=return_grade)
else:
return CorrectMap(self.answer_id, 'incorrect')

View File

@@ -18,6 +18,7 @@ from oauthlib.oauth1 import Client
from six import text_type
from webob import Response
from xblock.core import XBlock
from openedx.core.lib.grade_utils import round_away_from_zero
log = logging.getLogger(__name__)
@@ -177,7 +178,7 @@ class LTI20ModuleMixin(object):
return Response(json.dumps(base_json_obj).encode('utf-8'), content_type=LTI_2_0_JSON_CONTENT_TYPE)
# Fall through to returning grade and comment
base_json_obj['resultScore'] = round(self.module_score, 2)
base_json_obj['resultScore'] = round_away_from_zero(self.module_score, 2)
base_json_obj['comment'] = self.score_comment
return Response(json.dumps(base_json_obj).encode('utf-8'), content_type=LTI_2_0_JSON_CONTENT_TYPE)

View File

@@ -16,6 +16,7 @@ from lms.djangoapps.instructor_analytics.csvs import create_csv_response
from util.json_request import JsonResponse
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.inheritance import own_metadata
from openedx.core.lib.grade_utils import round_away_from_zero
# Used to limit the length of list displayed to the screen.
MAX_SCREEN_LIST_LENGTH = 250
@@ -193,7 +194,7 @@ def get_d3_problem_grade_distrib(course_id):
for (grade, count_grade) in problem_info['grade_distrib']:
percent = 0.0
if max_grade > 0:
percent = round((grade * 100.0) / max_grade, 1)
percent = round_away_from_zero((grade * 100.0) / max_grade, 1)
# Compute percent of students with this grade
student_count_percent = 0
@@ -352,7 +353,7 @@ def get_d3_section_grade_distrib(course_id, section):
for (grade, count_grade) in grade_distrib[problem]['grade_distrib']:
percent = 0.0
if max_grade > 0:
percent = round((grade * 100.0) / max_grade, 1)
percent = round_away_from_zero((grade * 100.0) / max_grade, 1)
# Construct tooltip for problem in grade distibution view
tooltip = {
@@ -513,7 +514,7 @@ def get_students_problem_grades(request, csv=False):
student_dict['percent'] = 0
if student['max_grade'] > 0:
student_dict['percent'] = round(student['grade'] * 100 / student['max_grade'])
student_dict['percent'] = round_away_from_zero(student['grade'] * 100 / student['max_grade'])
results.append(student_dict)
max_exceeded = False
@@ -535,7 +536,7 @@ def get_students_problem_grades(request, csv=False):
for student in students:
percent = 0
if student['max_grade'] > 0:
percent = round(decimal.Decimal(student['grade'] * 100 / student['max_grade']), 1)
percent = round_away_from_zero((student['grade'] * 100 / student['max_grade']), 1)
results.append([student['student__profile__name'], student['student__username'], student['grade'], percent])
response = create_csv_response(filename, header, results)

View File

@@ -12,6 +12,7 @@ from django.conf import settings
from django.utils.encoding import python_2_unicode_compatible
from lazy import lazy
from openedx.core.lib.grade_utils import round_away_from_zero
from xmodule import block_metadata_utils
from .config import assume_zero_if_absent
@@ -296,7 +297,9 @@ class CourseGrade(CourseGradeBase):
Computes and returns the grade percentage from the given
result from the grader.
"""
return round(grader_result['percent'] * 100 + 0.05) / 100
# Confused about the addition of .05 here? See https://openedx.atlassian.net/browse/TNL-6972
return round_away_from_zero(grader_result['percent'] * 100 + 0.05) / 100
@staticmethod
def _compute_letter_grade(grade_cutoffs, percent):

View File

@@ -104,11 +104,11 @@ class TestDefaultGradingPolicy(TestGradebook):
# Users 1-10 attempted any homework (and get Fs) [10]
# Users 4-10 scored enough to not get rounded to 0 for the class (and get Fs) [7]
# One use at top of the page [1]
self.assertEqual(22, self.response.content.count(b'grade_F'))
self.assertEqual(23, self.response.content.count(b'grade_F'))
# All other grades are None [29 categories * 11 users - 27 non-empty grades = 292]
# One use at the top of the page [1]
self.assertEqual(293, self.response.content.count(b'grade_None'))
self.assertEqual(292, self.response.content.count(b'grade_None'))
class TestLetterCutoffPolicy(TestGradebook):

View File

@@ -1,6 +1,7 @@
"""
Helpers functions for grades and scores.
"""
import math
def compare_scores(earned1, possible1, earned2, possible2, treat_undefined_as_zero=False):
@@ -42,3 +43,26 @@ def is_score_higher_or_equal(earned1, possible1, earned2, possible2, treat_undef
"""
is_higher_or_equal, _, _ = compare_scores(earned1, possible1, earned2, possible2, treat_undefined_as_zero)
return is_higher_or_equal
def round_away_from_zero(number, digits=0):
"""
Round numbers using the 'away from zero' strategy as opposed to the
'Banker's rounding strategy.' The strategy refers to how we round when
a number is half way between two numbers. eg. 0.5, 1.5, etc. In python 2
positive numbers in this category would be rounded up and negative numbers
would be rounded down. ie. away from zero. In python 3 numbers round
towards even. So 0.5 would round to 0 but 1.5 would round to 2.
See here for more on floating point rounding strategies:
https://en.wikipedia.org/wiki/IEEE_754#Rounding_rules
We want to continue to round away from zero so that student grades remain
consistent and don't suddenly change.
"""
p = 10.0 ** digits
if number >= 0:
return float(math.floor((number * p) + 0.5)) / p
else:
return float(math.ceil((number * p) - 0.5)) / p

View File

@@ -7,7 +7,7 @@ from unittest import TestCase
import ddt
from ..grade_utils import compare_scores
from ..grade_utils import compare_scores, round_away_from_zero
@ddt.ddt
@@ -45,3 +45,15 @@ class TestGradeUtils(TestCase):
assert is_higher is True
assert 0 == percentage_1
assert 0 == percentage_2
@ddt.data(
(0.5, 1),
(1.45, 1.5, 1),
(-0.5, -1.0),
(-0.1, -0.0),
(0.1, 0.0),
(0.0, 0.0)
)
@ddt.unpack
def test_round_away_from_zero(self, precise, expected_rounded_number, rounding_precision=0):
assert round_away_from_zero(precise, rounding_precision) == expected_rounded_number