feat: add grading method support for problems with multiple attempts (#33911)

A new field in the Problem settings for choosing a Grading Method. Currently, the only Grading Method is the Last Score. From now on, when turning the feature flag on, the new grading methods available for configuration in Studio are:
- Last Score (Default): The last score made is taken for grading.
- First Score: The first score made is taken for grading.
- Highest Score: The highest score made is taken for grading.
- Average Score: The average of all scores made is taken for grading.
This commit is contained in:
Bryann Valderrama
2024-04-04 13:04:25 -05:00
committed by GitHub
parent 24db4dfb53
commit 85620ec73c
10 changed files with 1171 additions and 14 deletions

View File

@@ -570,6 +570,15 @@ FEATURES = {
# .. toggle_creation_date: 2024-03-14
# .. toggle_tickets: https://github.com/openedx/edx-platform/pull/34173
'ENABLE_HOME_PAGE_COURSE_API_V2': True,
# .. toggle_name: FEATURES['ENABLE_GRADING_METHOD_IN_PROBLEMS']
# .. toggle_implementation: DjangoSetting
# .. toggle_default: False
# .. toggle_description: Enables the grading method feature in capa problems.
# .. toggle_use_cases: open_edx
# .. toggle_creation_date: 2024-03-22
# .. toggle_tickets: https://github.com/openedx/edx-platform/pull/33911
'ENABLE_GRADING_METHOD_IN_PROBLEMS': False,
}
# .. toggle_name: ENABLE_COPPA_COMPLIANCE

View File

@@ -391,6 +391,9 @@ def _reset_module_attempts(studentmodule):
problem_state = json.loads(studentmodule.state)
# old_number_of_attempts = problem_state["attempts"]
problem_state["attempts"] = 0
problem_state["score_history"] = []
problem_state["correct_map_history"] = []
problem_state["student_answers_history"] = []
# save
studentmodule.state = json.dumps(problem_state)

View File

@@ -543,7 +543,10 @@ class TestInstructorEnrollmentStudentModule(SharedModuleStoreTestCase):
'attempts': 1,
'saved_files_descriptions': ['summary', 'proposal', 'diagrams'],
'saved_files_sizes': [1364677, 958418],
'saved_files_names': ['case_study_abstract.txt', 'design_prop.pdf', 'diagram1.png']
'saved_files_names': ['case_study_abstract.txt', 'design_prop.pdf', 'diagram1.png'],
'score_history': [],
'correct_map_history': [],
'student_answers_history': [],
}
team_state = json.dumps(self.team_state_dict)

View File

@@ -1049,7 +1049,16 @@ FEATURES = {
# .. toggle_use_cases: opt_in
# .. toggle_creation_date: 2023-10-10
# .. toggle_tickets: https://github.com/openedx/openedx-events/issues/210
'SEND_LEARNING_CERTIFICATE_LIFECYCLE_EVENTS_TO_BUS': False
'SEND_LEARNING_CERTIFICATE_LIFECYCLE_EVENTS_TO_BUS': False,
# .. toggle_name: FEATURES['ENABLE_GRADING_METHOD_IN_PROBLEMS']
# .. toggle_implementation: DjangoSetting
# .. toggle_default: False
# .. toggle_description: Enables the grading method feature in capa problems.
# .. toggle_use_cases: open_edx
# .. toggle_creation_date: 2024-03-22
# .. toggle_tickets: https://github.com/openedx/edx-platform/pull/33911
'ENABLE_GRADING_METHOD_IN_PROBLEMS': False,
}
# Specifies extra XBlock fields that should available when requested via the Course Blocks API

View File

@@ -1,7 +1,7 @@
<%page expression_filter="h"/>
<%!
from django.utils.translation import ngettext, gettext as _
from openedx.core.djangolib.markup import HTML
from openedx.core.djangolib.markup import HTML, Text
%>
<%namespace name='static' file='static_content.html'/>
@@ -90,6 +90,9 @@ from openedx.core.djangolib.markup import HTML
% if attempts_allowed and (not submit_disabled_cta or attempts_used == 0):
${ngettext("You have used {num_used} of {num_total} attempt", "You have used {num_used} of {num_total} attempts", attempts_allowed).format(num_used=attempts_used, num_total=attempts_allowed)}
% endif
% if grading_method:
<div>${Text(_("Grading method: {grading_method}")).format(grading_method=grading_method)}</div>
% endif
<span class="sr">${_("Some problems have options such as save, reset, hints, or show answer. These options follow the Submit button.")}</span>
</div>
</div>

View File

@@ -20,6 +20,7 @@ import re
from collections import OrderedDict
from copy import deepcopy
from datetime import datetime
from typing import Optional
from xml.sax.saxutils import unescape
from django.conf import settings
@@ -172,6 +173,12 @@ class LoncapaProblem(object):
self.has_saved_answers = state.get('has_saved_answers', False)
if 'correct_map' in state:
self.correct_map.set_dict(state['correct_map'])
self.correct_map_history = []
for cmap in state.get('correct_map_history', []):
correct_map = CorrectMap()
correct_map.set_dict(cmap)
self.correct_map_history.append(correct_map)
self.done = state.get('done', False)
self.input_state = state.get('input_state', {})
@@ -232,6 +239,15 @@ class LoncapaProblem(object):
if extract_tree:
self.extracted_tree = self._extract_html(self.tree)
@property
def is_grading_method_enabled(self) -> bool:
"""
Returns whether the grading method feature is enabled. If the
feature is not enabled, the grading method field will not be shown in
Studio settings and the default grading method will be used.
"""
return settings.FEATURES.get('ENABLE_GRADING_METHOD_IN_PROBLEMS', False)
def make_xml_compatible(self, tree):
"""
Adjust tree xml in-place for compatibility before creating
@@ -299,8 +315,10 @@ class LoncapaProblem(object):
Reset internal state to unfinished, with no answers
"""
self.student_answers = {}
self.student_answers_history = []
self.has_saved_answers = False
self.correct_map = CorrectMap()
self.correct_map_history = []
self.done = False
def set_initial_display(self):
@@ -328,6 +346,7 @@ class LoncapaProblem(object):
'student_answers': self.student_answers,
'has_saved_answers': self.has_saved_answers,
'correct_map': self.correct_map.get_dict(),
'correct_map_history': [cmap.get_dict() for cmap in self.correct_map_history],
'input_state': self.input_state,
'done': self.done}
@@ -434,6 +453,7 @@ class LoncapaProblem(object):
self.student_answers = convert_files_to_filenames(answers)
new_cmap = self.get_grade_from_current_answers(answers)
self.correct_map = new_cmap # lint-amnesty, pylint: disable=attribute-defined-outside-init
self.correct_map_history.append(deepcopy(new_cmap))
return self.correct_map
def supports_rescoring(self):
@@ -455,7 +475,7 @@ class LoncapaProblem(object):
"""
return all('filesubmission' not in responder.allowed_inputfields for responder in self.responders.values())
def get_grade_from_current_answers(self, student_answers):
def get_grade_from_current_answers(self, student_answers, correct_map: Optional[CorrectMap] = None):
"""
Gets the grade for the currently-saved problem state, but does not save it
to the block.
@@ -468,9 +488,14 @@ class LoncapaProblem(object):
For rescoring, `student_answers` is None.
Calls the Response for each question in this problem, to do the actual grading.
When the grading method is enabled, this method is used for rescore. In this case,
the `correct_map` and the `student_answers` passed as arguments will be used,
corresponding to each pair in the fields that store the history (correct_map_history
and student_answers_history). The correct map will always be updated, depending on
the student answers. The student answers will always remain the same over time.
"""
# old CorrectMap
oldcmap = self.correct_map
oldcmap = correct_map if self.is_grading_method_enabled else self.correct_map
# start new with empty CorrectMap
newcmap = CorrectMap()
@@ -487,7 +512,12 @@ class LoncapaProblem(object):
# use 'student_answers' only if it is provided, and if it might contain a file
# submission that would not exist in the persisted "student_answers".
if 'filesubmission' in responder.allowed_inputfields and student_answers is not None:
# If grading method is enabled, we need to pass each student answers and the
# correct map in the history fields.
if (
"filesubmission" in responder.allowed_inputfields
and student_answers is not None
) or self.is_grading_method_enabled:
results = responder.evaluate_answers(student_answers, oldcmap)
else:
results = responder.evaluate_answers(self.student_answers, oldcmap)

View File

@@ -4,17 +4,24 @@ Test capa problem.
import textwrap
import unittest
from django.conf import settings
from django.test import override_settings
import pytest
import ddt
from lxml import etree
from markupsafe import Markup
from mock import patch
from mock import patch, MagicMock
from xmodule.capa.correctmap import CorrectMap
from xmodule.capa.responsetypes import LoncapaProblemError
from xmodule.capa.tests.helpers import new_loncapa_problem
from openedx.core.djangolib.markup import HTML
FEATURES_WITH_GRADING_METHOD_IN_PROBLEMS = settings.FEATURES.copy()
FEATURES_WITH_GRADING_METHOD_IN_PROBLEMS['ENABLE_GRADING_METHOD_IN_PROBLEMS'] = True
@ddt.ddt
class CAPAProblemTest(unittest.TestCase):
""" CAPA problem related tests"""
@@ -732,3 +739,104 @@ class CAPAProblemReportHelpersTest(unittest.TestCase):
# Ensure that the answer is a string so that the dict returned from this
# function can eventualy be serialized to json without issues.
assert isinstance(problem.get_question_answers()['1_solution_1'], str)
@override_settings(FEATURES=FEATURES_WITH_GRADING_METHOD_IN_PROBLEMS)
def test_get_grade_from_current_answers(self):
"""
Verify that `responder.evaluate_answers` is called with `student_answers`
and `correct_map` sent to `get_grade_from_current_answers`.
When both arguments are provided, means that the problem is being rescored.
"""
student_answers = {'1_2_1': 'over-suspicious'}
correct_map = CorrectMap(answer_id='1_2_1', correctness="correct", npoints=1)
problem = new_loncapa_problem(
"""
<problem>
<multiplechoiceresponse>
<choicegroup>
<choice correct="true">Answer1</choice>
<choice correct="false">Answer2</choice>
<choice correct="false">Answer3</choice>
<choice correct="false">Answer4</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
"""
)
responder_mock = MagicMock()
with patch.object(problem, 'responders', {'responder1': responder_mock}):
responder_mock.allowed_inputfields = ['choicegroup']
responder_mock.evaluate_answers.return_value = correct_map
result = problem.get_grade_from_current_answers(student_answers, correct_map)
self.assertDictEqual(result.get_dict(), correct_map.get_dict())
responder_mock.evaluate_answers.assert_called_once_with(student_answers, correct_map)
@override_settings(FEATURES=FEATURES_WITH_GRADING_METHOD_IN_PROBLEMS)
def test_get_grade_from_current_answers_without_student_answers(self):
"""
Verify that `responder.evaluate_answers` is called with appropriate arguments.
When `student_answers` is None, `responder.evaluate_answers` should be called with
the `self.student_answers` instead.
"""
correct_map = CorrectMap(answer_id='1_2_1', correctness="correct", npoints=1)
problem = new_loncapa_problem(
"""
<problem>
<multiplechoiceresponse>
<choicegroup>
<choice correct="true">Answer1</choice>
<choice correct="false">Answer2</choice>
<choice correct="false">Answer3</choice>
<choice correct="false">Answer4</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
"""
)
responder_mock = MagicMock()
with patch.object(problem, 'responders', {'responder1': responder_mock}):
problem.responders['responder1'].allowed_inputfields = ['choicegroup']
problem.responders['responder1'].evaluate_answers.return_value = correct_map
result = problem.get_grade_from_current_answers(None, correct_map)
self.assertDictEqual(result.get_dict(), correct_map.get_dict())
responder_mock.evaluate_answers.assert_called_once_with(None, correct_map)
@override_settings(FEATURES=FEATURES_WITH_GRADING_METHOD_IN_PROBLEMS)
def test_get_grade_from_current_answers_with_filesubmission(self):
"""
Verify that an exception is raised when `responder.evaluate_answers` is called
with `student_answers` as None and `correct_map` sent to `get_grade_from_current_answers`
This ensures that rescore is not allowed if the problem has a filesubmission.
"""
correct_map = CorrectMap(answer_id='1_2_1', correctness="correct", npoints=1)
problem = new_loncapa_problem(
"""
<problem>
<multiplechoiceresponse>
<choicegroup>
<choice correct="true">Answer1</choice>
<choice correct="false">Answer2</choice>
<choice correct="false">Answer3</choice>
<choice correct="false">Answer4</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
"""
)
responder_mock = MagicMock()
with patch.object(problem, 'responders', {'responder1': responder_mock}):
responder_mock.allowed_inputfields = ['filesubmission']
responder_mock.evaluate_answers.return_value = correct_map
with self.assertRaises(Exception):
problem.get_grade_from_current_answers(None, correct_map)
responder_mock.evaluate_answers.assert_not_called()

View File

@@ -1,6 +1,7 @@
"""
Implements the Problem XBlock, which is built on top of the CAPA subsystem.
"""
from __future__ import annotations
import copy
import datetime
@@ -22,7 +23,7 @@ from lxml import etree
from pytz import utc
from web_fragments.fragment import Fragment
from xblock.core import XBlock
from xblock.fields import Boolean, Dict, Float, Integer, Scope, String, XMLString
from xblock.fields import Boolean, Dict, Float, Integer, Scope, String, XMLString, List
from xblock.scorable import ScorableXBlockMixin, Score
from xmodule.capa import responsetypes
@@ -52,7 +53,7 @@ from common.djangoapps.xblock_django.constants import (
from openedx.core.djangolib.markup import HTML, Text
from .capa.xqueue_interface import XQueueService
from .fields import Date, ScoreField, Timedelta
from .fields import Date, ListScoreField, ScoreField, Timedelta
from .progress import Progress
log = logging.getLogger("edx.courseware")
@@ -92,6 +93,16 @@ class SHOWANSWER:
ATTEMPTED_NO_PAST_DUE = "attempted_no_past_due"
class GRADING_METHOD:
"""
Constants for grading method options.
"""
LAST_SCORE = "last_score"
FIRST_SCORE = "first_score"
HIGHEST_SCORE = "highest_score"
AVERAGE_SCORE = "average_score"
class RANDOMIZATION:
"""
Constants for problem randomization
@@ -181,6 +192,21 @@ class ProblemBlock(
"If the value is not set, infinite attempts are allowed."),
values={"min": 0}, scope=Scope.settings
)
grading_method = String(
display_name=_("Grading Method"),
help=_(
"Define the grading method for this problem. By default, "
"it's the score of the last submission made by the student."
),
scope=Scope.settings,
default=GRADING_METHOD.LAST_SCORE,
values=[
{"display_name": _("Last Score"), "value": GRADING_METHOD.LAST_SCORE},
{"display_name": _("First Score"), "value": GRADING_METHOD.FIRST_SCORE},
{"display_name": _("Highest Score"), "value": GRADING_METHOD.HIGHEST_SCORE},
{"display_name": _("Average Score"), "value": GRADING_METHOD.AVERAGE_SCORE},
],
)
due = Date(help=_("Date that this problem is due by"), scope=Scope.settings)
graceperiod = Timedelta(
help=_("Amount of time after the due date that submissions will be accepted"),
@@ -263,11 +289,20 @@ class ProblemBlock(
)
correct_map = Dict(help=_("Dictionary with the correctness of current student answers"),
scope=Scope.user_state, default={})
correct_map_history = List(
help=_("List of correctness maps for each attempt"), scope=Scope.user_state, default=[]
)
input_state = Dict(help=_("Dictionary for maintaining the state of inputtypes"), scope=Scope.user_state)
student_answers = Dict(help=_("Dictionary with the current student responses"), scope=Scope.user_state)
student_answers_history = List(
help=_("List of student answers for each attempt"), scope=Scope.user_state, default=[]
)
# enforce_type is set to False here because this field is saved as a dict in the database.
score = ScoreField(help=_("Dictionary with the current student score"), scope=Scope.user_state, enforce_type=False)
score_history = ListScoreField(
help=_("List of scores for each attempt"), scope=Scope.user_state, default=[], enforce_type=False
)
has_saved_answers = Boolean(help=_("Whether or not the answers have been saved since last submit"),
scope=Scope.user_state, default=False)
done = Boolean(help=_("Whether the student has answered the problem"), scope=Scope.user_state, default=False)
@@ -456,6 +491,31 @@ class ProblemBlock(
return self.display_name
def grading_method_display_name(self) -> str | None:
"""
If the `ENABLE_GRADING_METHOD_IN_PROBLEMS` feature flag is enabled,
return the grading method, else return None.
"""
_ = self.runtime.service(self, "i18n").gettext
display_name = {
GRADING_METHOD.LAST_SCORE: _("Last Score"),
GRADING_METHOD.FIRST_SCORE: _("First Score"),
GRADING_METHOD.HIGHEST_SCORE: _("Highest Score"),
GRADING_METHOD.AVERAGE_SCORE: _("Average Score"),
}
if self.is_grading_method_enabled:
return display_name[self.grading_method]
return None
@property
def is_grading_method_enabled(self) -> bool:
"""
Returns whether the grading method feature is enabled. If the
feature is not enabled, the grading method field will not be shown in
Studio settings and the default grading method will be used.
"""
return settings.FEATURES.get('ENABLE_GRADING_METHOD_IN_PROBLEMS', False)
@property
def debug(self):
"""
@@ -510,6 +570,8 @@ class ProblemBlock(
# https://github.com/openedx/public-engineering/issues/192
ProblemBlock.matlab_api_key,
])
if not self.is_grading_method_enabled:
non_editable_fields.append(ProblemBlock.grading_method)
return non_editable_fields
@property
@@ -832,6 +894,7 @@ class ProblemBlock(
return {
'done': self.done,
'correct_map': self.correct_map,
'correct_map_history': self.correct_map_history,
'student_answers': self.student_answers,
'has_saved_answers': self.has_saved_answers,
'input_state': self.input_state,
@@ -845,6 +908,7 @@ class ProblemBlock(
lcp_state = self.lcp.get_state()
self.done = lcp_state['done']
self.correct_map = lcp_state['correct_map']
self.correct_map_history = lcp_state['correct_map_history']
self.input_state = lcp_state['input_state']
self.student_answers = lcp_state['student_answers']
self.has_saved_answers = lcp_state['has_saved_answers']
@@ -1241,6 +1305,7 @@ class ProblemBlock(
'reset_button': self.should_show_reset_button(),
'save_button': self.should_show_save_button(),
'answer_available': self.answer_available(),
'grading_method': self.grading_method_display_name(),
'attempts_used': self.attempts,
'attempts_allowed': self.max_attempts,
'demand_hint_possible': demand_hint_possible,
@@ -1687,6 +1752,7 @@ class ProblemBlock(
self.lcp.has_saved_answers = False
answers = self.make_dict_of_responses(data)
answers_without_files = convert_files_to_filenames(answers)
self.student_answers_history.append(answers_without_files)
event_info['answers'] = answers_without_files
metric_name = 'xmodule.capa.check_problem.{}'.format # lint-amnesty, pylint: disable=unused-variable
@@ -1753,7 +1819,12 @@ class ProblemBlock(
self.attempts = self.attempts + 1
self.lcp.done = True
self.set_state_from_lcp()
self.set_score(self.score_from_lcp(self.lcp))
current_score = self.score_from_lcp(self.lcp)
self.score_history.append(current_score)
if self.is_grading_method_enabled:
current_score = self.get_score_with_grading_method(current_score)
self.set_score(current_score)
self.set_last_submission_time()
except (StudentInputError, ResponseError, LoncapaProblemError) as inst:
@@ -1827,6 +1898,28 @@ class ProblemBlock(
}
# pylint: enable=too-many-statements
def get_score_with_grading_method(self, current_score: Score) -> Score:
"""
Calculate and return the current score based on the grading method.
Args:
current_score (Score): The current score of the LON-CAPA problem.
In this method:
- The current score is obtained from the LON-CAPA problem.
- The score history is updated adding the current score.
Returns:
Score: The score based on the grading method.
"""
grading_method_handler = GradingMethodHandler(
current_score,
self.grading_method,
self.score_history,
self.max_score(),
)
return grading_method_handler.get_score()
def publish_unmasked(self, title, event_info):
"""
All calls to runtime.publish route through here so that the
@@ -2144,7 +2237,6 @@ class ProblemBlock(
event_info['orig_score'] = orig_score.raw_earned
event_info['orig_total'] = orig_score.raw_possible
try:
self.update_correctness()
calculated_score = self.calculate_score()
except (StudentInputError, ResponseError, LoncapaProblemError) as inst: # lint-amnesty, pylint: disable=unused-variable
log.warning("Input error in capa_block:problem_rescore", exc_info=True)
@@ -2178,6 +2270,28 @@ class ProblemBlock(
event_info['attempts'] = self.attempts
self.publish_unmasked('problem_rescore', event_info)
def get_rescore_with_grading_method(self) -> Score:
"""
Calculate and return the rescored score based on the grading method.
In this method:
- The list with the correctness maps is updated.
- The list with the score history is updated based on the correctness maps.
- The final score is calculated based on the grading method.
Returns:
Score: The score calculated based on the grading method.
"""
self.update_correctness_list()
self.score_history = self.calculate_score_list()
grading_method_handler = GradingMethodHandler(
self.score,
self.grading_method,
self.score_history,
self.max_score(),
)
return grading_method_handler.get_score()
def has_submitted_answer(self):
return self.done
@@ -2206,13 +2320,47 @@ class ProblemBlock(
new_correct_map = self.lcp.get_grade_from_current_answers(None)
self.lcp.correct_map.update(new_correct_map)
def update_correctness_list(self):
"""
Updates the `correct_map_history` and the `correct_map` of the LCP.
Operates by creating a new correctness map based on the current
state of the LCP, and updating the old correctness map of the LCP.
"""
# Make sure that the attempt number is always at least 1 for grading purposes,
# even if the number of attempts have been reset and this problem is regraded.
self.lcp.context['attempt'] = max(self.attempts, 1)
new_correct_map_list = []
for student_answers, correct_map in zip(self.student_answers_history, self.correct_map_history):
new_correct_map = self.lcp.get_grade_from_current_answers(student_answers, correct_map)
new_correct_map_list.append(new_correct_map)
self.lcp.correct_map_history = new_correct_map_list
if new_correct_map_list:
self.lcp.correct_map.update(new_correct_map_list[-1])
def calculate_score(self):
"""
Returns the score calculated from the current problem state.
If the grading method is enabled, the score is calculated based on the grading method.
"""
if self.is_grading_method_enabled:
return self.get_rescore_with_grading_method()
self.update_correctness()
new_score = self.lcp.calculate_score()
return Score(raw_earned=new_score['score'], raw_possible=new_score['total'])
def calculate_score_list(self):
"""
Returns the score calculated from the current problem state.
"""
new_score_list = []
for correct_map in self.lcp.correct_map_history:
new_score = self.lcp.calculate_score(correct_map)
new_score_list.append(Score(raw_earned=new_score['score'], raw_possible=new_score['total']))
return new_score_list
def score_from_lcp(self, lcp):
"""
Returns the score associated with the correctness map
@@ -2222,6 +2370,102 @@ class ProblemBlock(
return Score(raw_earned=lcp_score['score'], raw_possible=lcp_score['total'])
class GradingMethodHandler:
"""
A class for handling grading method and calculating scores.
This class allows for flexible handling of grading methods, including options
such as considering the last score, the first score, the highest score,
or the average score.
Attributes:
- score (Score): The current score.
- grading_method (str): The chosen grading method.
- score_history (list[Score]): A list to store the history of scores.
- max_score (int): The maximum possible score.
- mapping_method (dict): A dictionary mapping the grading
method to the corresponding handler.
Methods:
- get_score(): Retrieves the updated score based on the grading method.
- handle_last_score(): Handles the last score method.
- handle_first_score(): Handles the first score method.
- handle_highest_score(): Handles the highest score method.
- handle_average_score(): Handles the average score method.
"""
def __init__(
self,
score: Score,
grading_method: str,
score_history: list[Score],
max_score: int,
):
self.score = score
self.grading_method = grading_method
self.score_history = score_history
if not self.score_history:
self.score_history.append(score)
self.max_score = max_score
self.mapping_method = {
GRADING_METHOD.LAST_SCORE: self.handle_last_score,
GRADING_METHOD.FIRST_SCORE: self.handle_first_score,
GRADING_METHOD.HIGHEST_SCORE: self.handle_highest_score,
GRADING_METHOD.AVERAGE_SCORE: self.handle_average_score,
}
def get_score(self) -> Score:
"""
Retrieves the updated score based on the grading method.
Returns:
- Score: The updated score based on the chosen grading method.
"""
return self.mapping_method[self.grading_method]()
def handle_last_score(self) -> Score:
"""
Retrieves the score based on the last score.
It is the last score in the score history.
Returns:
- Score: The score based on the last score.
"""
return self.score_history[-1]
def handle_first_score(self) -> Score:
"""
Retrieves the score based on the first score.
It is the first score in the score history.
Returns:
- Score: The score based on the first score.
"""
return self.score_history[0]
def handle_highest_score(self) -> Score:
"""
Retrieves the score based on the highest score.
It is the highest score in the score history.
Returns:
- Score: The score based on the highest score.
"""
return max(self.score_history)
def handle_average_score(self) -> Score:
"""
Calculates the average score based on all attempts. The average score is
the sum of all scores divided by the number of scores.
Returns:
- Score: The average score based on all attempts.
"""
total = sum(score.raw_earned for score in self.score_history)
average_score = round(total / len(self.score_history), 2)
return Score(raw_earned=average_score, raw_possible=self.max_score)
class ComplexEncoder(json.JSONEncoder):
"""
Extend the JSON encoder to correctly handle complex numbers

View File

@@ -7,7 +7,7 @@ import time
import dateutil.parser
from pytz import UTC
from xblock.fields import JSONField
from xblock.fields import JSONField, List
from xblock.scorable import Score
log = logging.getLogger(__name__)
@@ -300,3 +300,26 @@ class ScoreField(JSONField):
return Score(raw_earned, raw_possible)
enforce_type = from_json
class ListScoreField(ScoreField, List):
"""
Field for blocks that need to store a list of Scores.
"""
MUTABLE = True
_default = []
def from_json(self, value):
if value is None:
return value
if isinstance(value, list):
scores = []
for score_json in value:
score = super().from_json(score_json)
scores.append(score)
return scores
raise TypeError("Value must be a list of Scores. Got {}".format(type(value)))
enforce_type = from_json

View File

@@ -6,6 +6,7 @@ Tests of the Capa XModule
import datetime
import json
import mock
import os
import random
import textwrap
@@ -17,6 +18,7 @@ import ddt
import requests
import webob
from codejail.safe_exec import SafeExecException
from django.conf import settings
from django.test import override_settings
from django.utils.encoding import smart_str
from lms.djangoapps.courseware.user_state_client import XBlockUserState
@@ -41,6 +43,10 @@ from ..capa_block import RANDOMIZATION, SHOWANSWER
from . import get_test_system
FEATURES_WITH_GRADING_METHOD_IN_PROBLEMS = settings.FEATURES.copy()
FEATURES_WITH_GRADING_METHOD_IN_PROBLEMS['ENABLE_GRADING_METHOD_IN_PROBLEMS'] = True
class CapaFactory:
"""
A helper class to create problem blocks with various parameters for testing.
@@ -725,6 +731,364 @@ class ProblemBlockTest(unittest.TestCase): # lint-amnesty, pylint: disable=miss
# and that this was considered attempt number 2 for grading purposes
assert block.lcp.context['attempt'] == 2
@patch('xmodule.capa_block.ProblemBlock.get_score_with_grading_method')
@patch('xmodule.capa.correctmap.CorrectMap.is_correct')
@patch('xmodule.capa_block.ProblemBlock.get_problem_html')
def test_submit_problem_with_grading_method_disable(
self, mock_html: Mock, mock_is_correct: Mock, mock_get_score: Mock
):
"""
Test that the grading method is disabled by default. Then, the
`get_score_with_grading_method` method should not be called, and
always the last attempt as the final score.
"""
block = CapaFactory.create(attempts=0, max_attempts=3)
mock_html.return_value = "Test HTML"
# First Attempt
mock_is_correct.return_value = True
get_request_dict = {CapaFactory.input_key(): '3.14'}
block.submit_problem(get_request_dict)
assert block.attempts == 1
assert block.lcp.context['attempt'] == 1
assert block.score == Score(raw_earned=1, raw_possible=1)
mock_get_score.assert_not_called()
# Second Attempt
mock_is_correct.return_value = False
get_request_dict = {CapaFactory.input_key(): '3.50'}
block.submit_problem(get_request_dict)
assert block.attempts == 2
assert block.lcp.context['attempt'] == 2
assert block.score == Score(raw_earned=0, raw_possible=1)
mock_get_score.assert_not_called()
# Third Attempt
mock_is_correct.return_value = True
get_request_dict = {CapaFactory.input_key(): '3.14'}
block.submit_problem(get_request_dict)
assert block.attempts == 3
assert block.lcp.context['attempt'] == 3
assert block.score == Score(raw_earned=1, raw_possible=1)
mock_get_score.assert_not_called()
@override_settings(FEATURES=FEATURES_WITH_GRADING_METHOD_IN_PROBLEMS)
@patch('xmodule.capa.correctmap.CorrectMap.is_correct')
@patch('xmodule.capa_block.ProblemBlock.get_problem_html')
def test_submit_problem_with_grading_method_enable(
self, mock_html: Mock, mock_is_correct: Mock
):
"""
Test that the grading method is enabled when submit a problem.
Then, the `get_score_with_grading_method` method should be called.
"""
block = CapaFactory.create(attempts=0)
mock_html.return_value = "Test HTML"
mock_is_correct.return_value = True
with patch.object(
ProblemBlock, 'get_score_with_grading_method', wraps=block.get_score_with_grading_method
) as mock_get_score:
get_request_dict = {CapaFactory.input_key(): '3.14'}
block.submit_problem(get_request_dict)
assert block.attempts == 1
assert block.lcp.context['attempt'] == 1
assert block.score == Score(raw_earned=1, raw_possible=1)
mock_get_score.assert_called()
@patch('xmodule.capa.correctmap.CorrectMap.is_correct')
@patch('xmodule.capa_block.ProblemBlock.get_problem_html')
def test_submit_problem_grading_method_disable_to_enable(
self, mock_html: Mock, mock_is_correct: Mock
):
"""
Test when the grading method is disabled and then enabled.
When the grading method is disabled, the final score is always the last attempt.
When the grading method is enabled, the final score is calculated according to the grading method.
"""
block = CapaFactory.create(attempts=0, max_attempts=4)
mock_html.return_value = "Test HTML"
# Disabled grading method
with patch(
'xmodule.capa_block.ProblemBlock.is_grading_method_enabled',
new_callable=mock.PropertyMock,
return_value=False
):
# First Attempt
mock_is_correct.return_value = True
get_request_dict = {CapaFactory.input_key(): '3.14'}
block.submit_problem(get_request_dict)
assert block.attempts == 1
assert block.lcp.context['attempt'] == 1
assert block.score == Score(raw_earned=1, raw_possible=1)
# Second Attempt
mock_is_correct.return_value = False
get_request_dict = {CapaFactory.input_key(): '3.50'}
block.submit_problem(get_request_dict)
assert block.attempts == 2
assert block.lcp.context['attempt'] == 2
assert block.score == Score(raw_earned=0, raw_possible=1)
# Enabled grading method
with patch(
'xmodule.capa_block.ProblemBlock.is_grading_method_enabled',
new_callable=mock.PropertyMock,
return_value=True
):
# Third Attempt
mock_is_correct.return_value = False
get_request_dict = {CapaFactory.input_key(): '3.96'}
block.submit_problem(get_request_dict)
assert block.attempts == 3
assert block.lcp.context['attempt'] == 3
assert block.score == Score(raw_earned=0, raw_possible=1)
# Fourth Attempt
block.grading_method = 'highest_score'
mock_is_correct.return_value = False
get_request_dict = {CapaFactory.input_key(): '3.99'}
block.submit_problem(get_request_dict)
assert block.attempts == 4
assert block.lcp.context['attempt'] == 4
assert block.score == Score(raw_earned=1, raw_possible=1)
@patch('xmodule.capa.correctmap.CorrectMap.is_correct')
@patch('xmodule.capa_block.ProblemBlock.get_problem_html')
def test_submit_problem_grading_method_enable_to_disable(
self, mock_html: Mock, mock_is_correct: Mock
):
"""
Test when the grading method is enabled and then disabled.
When the grading method is enabled, the final score is calculated according to the grading method.
When the grading method is disabled, the final score is always the last attempt.
"""
block = CapaFactory.create(attempts=0, max_attempts=4, grading_method='highest_score')
mock_html.return_value = "Test HTML"
# Enabled grading method
with patch(
'xmodule.capa_block.ProblemBlock.is_grading_method_enabled',
new_callable=mock.PropertyMock,
return_value=True
):
# First Attempt
mock_is_correct.return_value = True
get_request_dict = {CapaFactory.input_key(): '3.14'}
block.submit_problem(get_request_dict)
assert block.attempts == 1
assert block.lcp.context['attempt'] == 1
assert block.score == Score(raw_earned=1, raw_possible=1)
# Second Attempt
mock_is_correct.return_value = False
get_request_dict = {CapaFactory.input_key(): '3.50'}
block.submit_problem(get_request_dict)
assert block.attempts == 2
assert block.lcp.context['attempt'] == 2
assert block.score == Score(raw_earned=1, raw_possible=1)
# Disabled grading method
with patch(
'xmodule.capa_block.ProblemBlock.is_grading_method_enabled',
new_callable=mock.PropertyMock,
return_value=False
):
# Third Attempt
mock_is_correct.return_value = False
get_request_dict = {CapaFactory.input_key(): '3.96'}
block.submit_problem(get_request_dict)
assert block.attempts == 3
assert block.lcp.context['attempt'] == 3
assert block.score == Score(raw_earned=0, raw_possible=1)
# Fourth Attempt
mock_is_correct.return_value = True
get_request_dict = {CapaFactory.input_key(): '3.14'}
block.submit_problem(get_request_dict)
assert block.attempts == 4
assert block.lcp.context['attempt'] == 4
assert block.score == Score(raw_earned=1, raw_possible=1)
@override_settings(FEATURES=FEATURES_WITH_GRADING_METHOD_IN_PROBLEMS)
@patch('xmodule.capa.correctmap.CorrectMap.is_correct')
@patch('xmodule.capa_block.ProblemBlock.get_problem_html')
def test_submit_problem_correct_last_score(self, mock_html: Mock, mock_is_correct: Mock):
"""
Test the `last_score` grading method.
When the grading method is `last_score`,
the final score is always the last attempt.
"""
# default grading method is last_score
block = CapaFactory.create(attempts=0, max_attempts=2)
mock_html.return_value = "Test HTML"
# First Attempt
mock_is_correct.return_value = True
get_request_dict = {CapaFactory.input_key(): '3.14'}
block.submit_problem(get_request_dict)
assert block.attempts == 1
assert block.lcp.context['attempt'] == 1
assert block.score == Score(raw_earned=1, raw_possible=1)
# Second Attempt
mock_is_correct.return_value = False
get_request_dict = {CapaFactory.input_key(): '3.54'}
block.submit_problem(get_request_dict)
assert block.attempts == 2
assert block.lcp.context['attempt'] == 2
assert block.score == Score(raw_earned=0, raw_possible=1)
@override_settings(FEATURES=FEATURES_WITH_GRADING_METHOD_IN_PROBLEMS)
@patch('xmodule.capa.correctmap.CorrectMap.is_correct')
@patch('xmodule.capa_block.ProblemBlock.get_problem_html')
def test_submit_problem_correct_highest_score(self, mock_html: Mock, mock_is_correct: Mock):
"""
Test the `highest_score` grading method.
When the grading method is `highest_score`,
the final score is the highest score among all attempts.
"""
block = CapaFactory.create(attempts=0, max_attempts=2, grading_method='highest_score')
mock_html.return_value = "Test HTML"
# First Attempt
mock_is_correct.return_value = True
get_request_dict = {CapaFactory.input_key(): '3.14'}
block.submit_problem(get_request_dict)
assert block.attempts == 1
assert block.lcp.context['attempt'] == 1
assert block.score == Score(raw_earned=1, raw_possible=1)
# Second Attempt
mock_is_correct.return_value = False
get_request_dict = {CapaFactory.input_key(): '3.54'}
block.submit_problem(get_request_dict)
assert block.attempts == 2
assert block.lcp.context['attempt'] == 2
assert block.score == Score(raw_earned=1, raw_possible=1)
@override_settings(FEATURES=FEATURES_WITH_GRADING_METHOD_IN_PROBLEMS)
@patch('xmodule.capa.correctmap.CorrectMap.is_correct')
@patch('xmodule.capa_block.ProblemBlock.get_problem_html')
def test_submit_problem_correct_first_score(self, mock_html: Mock, mock_is_correct: Mock):
"""
Test the `first_score` grading method.
When the grading method is `first_score`,
the final score is the first score among all attempts.
"""
block = CapaFactory.create(attempts=0, max_attempts=2, grading_method='first_score')
mock_html.return_value = "Test HTML"
# First Attempt
mock_is_correct.return_value = False
get_request_dict = {CapaFactory.input_key(): '3.14'}
block.submit_problem(get_request_dict)
assert block.attempts == 1
assert block.lcp.context['attempt'] == 1
assert block.score == Score(raw_earned=0, raw_possible=1)
# Second Attempt
mock_is_correct.return_value = True
get_request_dict = {CapaFactory.input_key(): '3.54'}
block.submit_problem(get_request_dict)
assert block.attempts == 2
assert block.lcp.context['attempt'] == 2
assert block.score == Score(raw_earned=0, raw_possible=1)
@override_settings(FEATURES=FEATURES_WITH_GRADING_METHOD_IN_PROBLEMS)
@patch('xmodule.capa.correctmap.CorrectMap.is_correct')
@patch('xmodule.capa_block.ProblemBlock.get_problem_html')
def test_submit_problem_correct_average_score(self, mock_html: Mock, mock_is_correct: Mock):
"""
Test the `average_score` grading method.
When the grading method is `average_score`,
the final score is the average score among all attempts.
"""
block = CapaFactory.create(attempts=0, max_attempts=4, grading_method='average_score')
mock_html.return_value = "Test HTML"
# First Attempt
mock_is_correct.return_value = False
get_request_dict = {CapaFactory.input_key(): '3.14'}
block.submit_problem(get_request_dict)
assert block.attempts == 1
assert block.lcp.context['attempt'] == 1
assert block.score == Score(raw_earned=0, raw_possible=1)
# Second Attempt
mock_is_correct.return_value = True
get_request_dict = {CapaFactory.input_key(): '3.54'}
block.submit_problem(get_request_dict)
assert block.attempts == 2
assert block.lcp.context['attempt'] == 2
assert block.score == Score(raw_earned=0.5, raw_possible=1)
# Third Attempt
mock_is_correct.return_value = False
get_request_dict = {CapaFactory.input_key(): '3.45'}
block.submit_problem(get_request_dict)
assert block.attempts == 3
assert block.lcp.context['attempt'] == 3
assert block.score == Score(raw_earned=0.33, raw_possible=1)
# Fourth Attempt
mock_is_correct.return_value = False
get_request_dict = {CapaFactory.input_key(): '41.3'}
block.submit_problem(get_request_dict)
assert block.attempts == 4
assert block.lcp.context['attempt'] == 4
assert block.score == Score(raw_earned=0.25, raw_possible=1)
def test_submit_problem_incorrect(self):
block = CapaFactory.create(attempts=0)
@@ -1218,6 +1582,224 @@ class ProblemBlockTest(unittest.TestCase): # lint-amnesty, pylint: disable=miss
# and that this is treated as the first attempt for grading purposes
assert block.lcp.context['attempt'] == 1
@patch('xmodule.capa_block.ProblemBlock.get_rescore_with_grading_method')
def test_rescore_problem_with_grading_method_disable(self, mock_get_rescore: Mock):
"""
Test the rescore method with grading method disabled.
In this case, the rescore method should not call `get_rescore_with_grading_method` method.
"""
block = CapaFactory.create(attempts=0, done=True)
block.rescore(only_if_higher=False)
assert block.attempts == 0
assert block.lcp.context['attempt'] == 1
mock_get_rescore.assert_not_called()
@override_settings(FEATURES=FEATURES_WITH_GRADING_METHOD_IN_PROBLEMS)
def test_rescore_problem_with_grading_method_enable(self):
"""
Test the rescore method with grading method enabled.
In this case, the rescore method should call `get_rescore_with_grading_method` method.
"""
block = CapaFactory.create(attempts=0, done=True)
with patch.object(
ProblemBlock, 'get_rescore_with_grading_method', wraps=block.get_rescore_with_grading_method
) as mock_get_rescore:
block.rescore(only_if_higher=False)
assert block.attempts == 0
assert block.lcp.context['attempt'] == 1
mock_get_rescore.assert_called()
@patch('xmodule.capa_block.ProblemBlock.publish_grade')
def test_rescore_problem_grading_method_disable_to_enable(self, mock_publish_grade: Mock):
"""
Test the rescore method the grading method is disabled and then enabled.
When the grading method is disabled, the final score is always the last score.
When the grading method is enabled, the final score is the score based on the grading method.
"""
block = CapaFactory.create(attempts=0, max_attempts=3)
get_request_dict = {CapaFactory.input_key(): '3.21'}
block.submit_problem(get_request_dict)
get_request_dict = {CapaFactory.input_key(): '3.45'}
block.submit_problem(get_request_dict)
get_request_dict = {CapaFactory.input_key(): '3.14'}
block.submit_problem(get_request_dict)
# Disabled grading method
with patch(
'xmodule.capa_block.ProblemBlock.is_grading_method_enabled',
new_callable=mock.PropertyMock,
return_value=False
):
# Score is the last score
assert block.score == Score(raw_earned=1, raw_possible=1)
block.rescore(only_if_higher=False)
# Still Score is the last score
mock_publish_grade.assert_called_with(
score=Score(raw_earned=1, raw_possible=1), only_if_higher=False
)
# Enabled grading method
with patch(
'xmodule.capa_block.ProblemBlock.is_grading_method_enabled',
new_callable=mock.PropertyMock,
return_value=True
):
with patch(
'xmodule.capa.capa_problem.LoncapaProblem.is_grading_method_enabled',
new_callable=mock.PropertyMock,
return_value=True
):
# Change grading method to 'first_score'
block.grading_method = 'first_score'
block.rescore(only_if_higher=False)
mock_publish_grade.assert_called_with(
score=Score(raw_earned=0, raw_possible=1), only_if_higher=False
)
# Change grading method to 'highest_score'
block.grading_method = 'highest_score'
block.rescore(only_if_higher=False)
mock_publish_grade.assert_called_with(
score=Score(raw_earned=1, raw_possible=1), only_if_higher=False
)
# Change grading method to 'average_score'
block.grading_method = 'average_score'
block.rescore(only_if_higher=False)
mock_publish_grade.assert_called_with(
score=Score(raw_earned=0.33, raw_possible=1), only_if_higher=False
)
@patch('xmodule.capa_block.ProblemBlock.publish_grade')
def test_rescore_problem_grading_method_enable_to_disable(self, mock_publish_grade: Mock):
"""
Test the rescore method the grading method is enabled and then disabled.
When the grading method is enabled, the final score is the score based on the grading method.
When the grading method is disabled, the final score is always the last score.
"""
block = CapaFactory.create(attempts=0, max_attempts=3)
get_request_dict = {CapaFactory.input_key(): '3.21'}
block.submit_problem(get_request_dict)
get_request_dict = {CapaFactory.input_key(): '3.45'}
block.submit_problem(get_request_dict)
get_request_dict = {CapaFactory.input_key(): '3.14'}
block.submit_problem(get_request_dict)
# Enabled grading method
with patch(
'xmodule.capa_block.ProblemBlock.is_grading_method_enabled',
new_callable=mock.PropertyMock,
return_value=True
):
with patch(
'xmodule.capa.capa_problem.LoncapaProblem.is_grading_method_enabled',
new_callable=mock.PropertyMock,
return_value=True
):
# Grading method is 'last_score'
assert block.grading_method == 'last_score'
assert block.score == Score(raw_earned=1, raw_possible=1)
# Change grading method to 'first_score'
block.grading_method = 'first_score'
block.rescore(only_if_higher=False)
mock_publish_grade.assert_called_with(
score=Score(raw_earned=0, raw_possible=1), only_if_higher=False
)
# Change grading method to 'highest_score'
block.grading_method = 'highest_score'
block.rescore(only_if_higher=False)
mock_publish_grade.assert_called_with(
score=Score(raw_earned=1, raw_possible=1), only_if_higher=False
)
# Change grading method to 'average_score'
block.grading_method = 'average_score'
block.rescore(only_if_higher=False)
mock_publish_grade.assert_called_with(
score=Score(raw_earned=0.33, raw_possible=1), only_if_higher=False
)
# Disabled grading method
with patch(
'xmodule.capa_block.ProblemBlock.is_grading_method_enabled',
new_callable=mock.PropertyMock,
return_value=False
):
block.rescore(only_if_higher=False)
# The score is the last score
assert block.score == Score(raw_earned=1, raw_possible=1)
@override_settings(FEATURES=FEATURES_WITH_GRADING_METHOD_IN_PROBLEMS)
@patch('xmodule.capa_block.ProblemBlock.publish_grade')
def test_rescore_problem_update_grading_method(self, mock_publish_grade: Mock):
"""
Test the rescore method when the grading method is updated.
When the grading method is updated, the final
score is the score based on the new grading method.
"""
block = CapaFactory.create(attempts=0, max_attempts=3)
get_request_dict = {CapaFactory.input_key(): '3.21'}
block.submit_problem(get_request_dict)
get_request_dict = {CapaFactory.input_key(): '3.45'}
block.submit_problem(get_request_dict)
get_request_dict = {CapaFactory.input_key(): '3.14'}
block.submit_problem(get_request_dict)
# Grading method is 'last_score'
assert block.grading_method == 'last_score'
assert block.score == Score(raw_earned=1, raw_possible=1)
# Change grading method to 'first_score'
block.grading_method = 'first_score'
block.rescore(only_if_higher=False)
mock_publish_grade.assert_called_with(
score=Score(raw_earned=0, raw_possible=1), only_if_higher=False
)
# Change grading method to 'highest_score'
block.grading_method = 'highest_score'
block.rescore(only_if_higher=False)
mock_publish_grade.assert_called_with(
score=Score(raw_earned=1, raw_possible=1), only_if_higher=False
)
# Change grading method to 'average_score'
block.grading_method = 'average_score'
block.rescore(only_if_higher=False)
mock_publish_grade.assert_called_with(
score=Score(raw_earned=0.33, raw_possible=1), only_if_higher=False
)
def test_rescore_problem_not_done(self):
# Simulate that the problem is NOT done
block = CapaFactory.create(done=False)
@@ -1235,6 +1817,144 @@ class ProblemBlockTest(unittest.TestCase): # lint-amnesty, pylint: disable=miss
with pytest.raises(NotImplementedError):
block.rescore(only_if_higher=False)
def test_calculate_score_list(self):
"""
Test that the `calculate_score_list` method returns the correct list of scores.
"""
block = CapaFactory.create(correct=True)
correct_map = CorrectMap(answer_id='1_2_1', correctness="correct", npoints=1)
block.lcp.correct_map_history = [correct_map, correct_map]
with patch.object(block.lcp, 'calculate_score', return_value={'score': 1, 'total': 2}):
result = block.calculate_score_list()
expected_result = [Score(raw_earned=1, raw_possible=2), Score(raw_earned=1, raw_possible=2)]
self.assertEqual(result, expected_result)
def test_calculate_score_list_empty(self):
"""
Test that the `calculate_score_list` method returns an
empty list when the `correct_map_history` is empty.
The `calculate_score` method should not be called.
"""
block = CapaFactory.create(correct=True)
block.lcp.correct_map_history = []
with patch.object(block.lcp, 'calculate_score', return_value=Mock()):
result = block.calculate_score_list()
self.assertEqual(result, [])
block.lcp.calculate_score.assert_not_called()
def test_update_correctness_list_updates_attempt(self):
"""
Test that the `update_correctness_list` method updates the attempt number.
"""
block = CapaFactory.create(correct=True, attempts=0)
block.update_correctness_list()
self.assertEqual(block.lcp.context['attempt'], 1)
def test_update_correctness_list_with_history(self):
"""
Test that the `update_correctness_list` method updates the correct map history.
"""
block = CapaFactory.create(correct=True, attempts=2)
correct_map = CorrectMap(answer_id='1_2_1', correctness="correct", npoints=1)
student_answers = {'1_2_1': 'abcd'}
block.correct_map_history = [correct_map]
block.student_answers_history = [student_answers]
with patch.object(block.lcp, 'get_grade_from_current_answers', return_value=correct_map):
block.update_correctness_list()
self.assertEqual(block.lcp.context['attempt'], 2)
block.lcp.get_grade_from_current_answers.assert_called_once_with(student_answers, correct_map)
self.assertEqual(block.lcp.correct_map_history, [correct_map])
self.assertEqual(block.lcp.correct_map.get_dict(), correct_map.get_dict())
def test_update_correctness_list_without_history(self):
"""
Test that the `update_correctness_list` method does not
update the correct map history because the history is empty.
The `get_grade_from_current_answers` method should not be called.
"""
block = CapaFactory.create(correct=True, attempts=1)
block.correct_map_history = []
block.student_answers_history = []
with patch.object(block.lcp, 'get_grade_from_current_answers', return_value=Mock()):
block.update_correctness_list()
self.assertEqual(block.lcp.context['attempt'], 1)
block.lcp.get_grade_from_current_answers.assert_not_called()
@override_settings(FEATURES=FEATURES_WITH_GRADING_METHOD_IN_PROBLEMS)
def test_get_rescore_with_grading_method(self):
"""
Test that the `get_rescore_with_grading_method` method returns the correct score.
"""
block = CapaFactory.create(done=True, attempts=0, max_attempts=2)
get_request_dict = {CapaFactory.input_key(): '3.21'}
block.submit_problem(get_request_dict)
get_request_dict = {CapaFactory.input_key(): '3.14'}
block.submit_problem(get_request_dict)
result = block.get_rescore_with_grading_method()
self.assertEqual(result, Score(raw_earned=1, raw_possible=1))
def test_get_score_with_grading_method(self):
"""
Test that the `get_score_with_grading_method` method
returns the correct score based on the grading method.
"""
block = CapaFactory.create(done=True, attempts=0, max_attempts=2)
get_request_dict = {CapaFactory.input_key(): '3.21'}
block.submit_problem(get_request_dict)
get_request_dict = {CapaFactory.input_key(): '3.14'}
block.submit_problem(get_request_dict)
expected_score = Score(raw_earned=1, raw_possible=1)
score = block.get_score_with_grading_method(block.score_from_lcp(block.lcp))
self.assertEqual(score, expected_score)
self.assertEqual(block.score, expected_score)
@patch('xmodule.capa_block.ProblemBlock.score_from_lcp')
def test_get_score_with_grading_method_updates_score(self, mock_score_from_lcp: Mock):
"""
Test that the `get_score_with_grading_method` method returns the correct score.
Check that the score is returned with the correct score and the score
history is updated including that score.
"""
block = CapaFactory.create(attempts=1)
current_score = Score(raw_earned=1, raw_possible=1)
mock_score_from_lcp.return_value = current_score
score = block.get_score_with_grading_method(current_score)
self.assertEqual(score, current_score)
self.assertEqual(block.score_history, [current_score])
def test_get_score_with_grading_method_calls_grading_method_handler(self):
"""
Test that the `get_score_with_grading_method` method calls
the grading method handler with the appropriate arguments.
"""
block = CapaFactory.create(attempts=1)
current_score = Score(raw_earned=0, raw_possible=1)
with patch('xmodule.capa_block.GradingMethodHandler') as mock_handler:
mock_handler.return_value.get_score.return_value = current_score
block.get_score_with_grading_method(current_score)
mock_handler.assert_called_once_with(
Score(raw_earned=0, raw_possible=1),
"last_score",
block.score_history,
current_score.raw_possible,
)
def capa_factory_for_problem_xml(self, xml): # lint-amnesty, pylint: disable=missing-function-docstring
class CustomCapaFactory(CapaFactory):
"""
@@ -1263,7 +1983,12 @@ class ProblemBlockTest(unittest.TestCase): # lint-amnesty, pylint: disable=miss
def _rescore_problem_error_helper(self, exception_class):
"""Helper to allow testing all errors that rescoring might return."""
# Create the block
block = CapaFactory.create(attempts=1, done=True)
block = CapaFactory.create(attempts=0)
CapaFactory.answer_key()
# Check the problem
get_request_dict = {CapaFactory.input_key(): '1'}
block.submit_problem(get_request_dict)
# Simulate answering a problem that raises the exception
with patch('xmodule.capa.capa_problem.LoncapaProblem.get_grade_from_current_answers') as mock_rescore: