diff --git a/common/lib/capa/capa/capa_problem.py b/common/lib/capa/capa/capa_problem.py
index bdb66229af..c5993e0a84 100644
--- a/common/lib/capa/capa/capa_problem.py
+++ b/common/lib/capa/capa/capa_problem.py
@@ -798,16 +798,21 @@ class LoncapaProblem(object):
if problemid in self.correct_map:
pid = input_id
- # If the the problem has not been saved since the last submit set the status to the
- # current correctness value and set the message as expected. Otherwise we do not want to
- # display correctness because the answer may have changed since the problem was graded.
- if not self.has_saved_answers:
- status = self.correct_map.get_correctness(pid)
- msg = self.correct_map.get_msg(pid)
+ # If we're withholding correctness, don't show adaptive hints either.
+ # Note that regular, "demand" hints will be shown, if the course author has added them to the problem.
+ if not self.capa_module.correctness_available():
+ status = 'submitted'
+ else:
+ # If the the problem has not been saved since the last submit set the status to the
+ # current correctness value and set the message as expected. Otherwise we do not want to
+ # display correctness because the answer may have changed since the problem was graded.
+ if not self.has_saved_answers:
+ status = self.correct_map.get_correctness(pid)
+ msg = self.correct_map.get_msg(pid)
- hint = self.correct_map.get_hint(pid)
- hintmode = self.correct_map.get_hintmode(pid)
- answervariable = self.correct_map.get_property(pid, 'answervariable')
+ hint = self.correct_map.get_hint(pid)
+ hintmode = self.correct_map.get_hintmode(pid)
+ answervariable = self.correct_map.get_property(pid, 'answervariable')
value = ''
if self.student_answers and problemid in self.student_answers:
diff --git a/common/lib/capa/capa/inputtypes.py b/common/lib/capa/capa/inputtypes.py
index 467629d7a1..1432762ef2 100644
--- a/common/lib/capa/capa/inputtypes.py
+++ b/common/lib/capa/capa/inputtypes.py
@@ -90,6 +90,7 @@ class Status(object):
'incomplete': _('incomplete'),
'unanswered': _('unanswered'),
'unsubmitted': _('unanswered'),
+ 'submitted': _('submitted'),
'queued': _('processing'),
}
tooltips = {
@@ -197,7 +198,7 @@ class InputTypeBase(object):
(what the student entered last time)
* 'id' -- the id of this input, typically
"{problem-location}_{response-num}_{input-num}"
- * 'status' (answered, unanswered, unsubmitted)
+ * 'status' (submitted, unanswered, unsubmitted)
* 'input_state' -- dictionary containing any inputtype-specific state
that has been preserved
* 'feedback' (dictionary containing keys for hints, errors, or other
diff --git a/common/lib/capa/capa/templates/jsinput.html b/common/lib/capa/capa/templates/jsinput.html
index 61a04a40eb..cf47d1bfba 100644
--- a/common/lib/capa/capa/templates/jsinput.html
+++ b/common/lib/capa/capa/templates/jsinput.html
@@ -22,7 +22,7 @@
- % if status in ['unsubmitted', 'correct', 'incorrect', 'partially-correct', 'incomplete']:
+ % if status in ['unsubmitted', 'submitted', 'correct', 'incorrect', 'partially-correct', 'incomplete']:
% endif
@@ -49,7 +49,7 @@
- % if status in ['unsubmitted', 'correct', 'incorrect', 'partially-correct', 'incomplete']:
+ % if status in ['unsubmitted', 'submitted', 'correct', 'incorrect', 'partially-correct', 'incomplete']:
% endif
diff --git a/common/lib/capa/capa/templates/textline.html b/common/lib/capa/capa/templates/textline.html
index 632fb0f7da..58bba7d093 100644
--- a/common/lib/capa/capa/templates/textline.html
+++ b/common/lib/capa/capa/templates/textline.html
@@ -8,7 +8,7 @@
% endif
-% if status in ('unsubmitted', 'correct', 'incorrect', 'partially-correct', 'incomplete'):
+% if status in ('unsubmitted', 'submitted', 'correct', 'incorrect', 'partially-correct', 'incomplete'):
% endif
@@ -45,7 +45,7 @@
% endif
-% if status in ('unsubmitted', 'correct', 'incorrect', 'partially-correct', 'incomplete'):
+% if status in ('unsubmitted', 'submitted', 'correct', 'incorrect', 'partially-correct', 'incomplete'):
% endif
diff --git a/common/lib/capa/capa/tests/test_input_templates.py b/common/lib/capa/capa/tests/test_input_templates.py
index 39730a958a..7f912d166a 100644
--- a/common/lib/capa/capa/tests/test_input_templates.py
+++ b/common/lib/capa/capa/tests/test_input_templates.py
@@ -173,6 +173,7 @@ class TemplateTestCase(unittest.TestCase):
cases = [
('correct', 'correct'),
('unsubmitted', 'unanswered'),
+ ('submitted', 'submitted'),
('incorrect', 'incorrect'),
('incomplete', 'incorrect')
]
diff --git a/common/lib/xmodule/xmodule/capa_base.py b/common/lib/xmodule/xmodule/capa_base.py
index d6e15d6f28..f778316bfb 100644
--- a/common/lib/xmodule/xmodule/capa_base.py
+++ b/common/lib/xmodule/xmodule/capa_base.py
@@ -24,7 +24,7 @@ from capa.inputtypes import Status
from capa.responsetypes import StudentInputError, ResponseError, LoncapaProblemError
from capa.util import convert_files_to_filenames, get_inner_html_from_xpath
from xblock.fields import Boolean, Dict, Float, Integer, Scope, String, XMLString
-from xmodule.capa_base_constants import RANDOMIZATION, SHOWANSWER
+from xmodule.capa_base_constants import RANDOMIZATION, SHOWANSWER, SHOW_CORRECTNESS
from xmodule.exceptions import NotFoundError
from .fields import Date, Timedelta
from .progress import Progress
@@ -114,6 +114,18 @@ class CapaFields(object):
help=_("Amount of time after the due date that submissions will be accepted"),
scope=Scope.settings
)
+ show_correctness = String(
+ display_name=_("Show Results"),
+ help=_("Defines when to show whether a learner's answer to the problem is correct. "
+ "Configured on the subsection."),
+ scope=Scope.settings,
+ default=SHOW_CORRECTNESS.ALWAYS,
+ values=[
+ {"display_name": _("Always"), "value": SHOW_CORRECTNESS.ALWAYS},
+ {"display_name": _("Never"), "value": SHOW_CORRECTNESS.NEVER},
+ {"display_name": _("Past Due"), "value": SHOW_CORRECTNESS.PAST_DUE},
+ ],
+ )
showanswer = String(
display_name=_("Show Answer"),
help=_("Defines when to show the answer to the problem. "
@@ -391,12 +403,25 @@ class CapaMixin(CapaFields):
return None
return None
+ def get_display_progress(self):
+ """
+ Return (score, total) to be displayed to the learner.
+ """
+ progress = self.get_progress()
+ score, total = (progress.frac() if progress else (0, 0))
+
+ # Withhold the score if hiding correctness
+ if not self.correctness_available():
+ score = None
+
+ return score, total
+
def get_html(self):
"""
Return some html with data about the module
"""
- progress = self.get_progress()
- curr_score, total_possible = (progress.frac() if progress else (0, 0))
+ curr_score, total_possible = self.get_display_progress()
+
return self.runtime.render_template('problem_ajax.html', {
'element_id': self.location.html_id(),
'id': self.location.to_deprecated_string(),
@@ -739,7 +764,11 @@ class CapaMixin(CapaFields):
if render_notifications:
progress = self.get_progress()
id_list = self.lcp.correct_map.keys()
- if len(id_list) == 1:
+
+ # Show only a generic message if hiding correctness
+ if not self.correctness_available():
+ answer_notification_type = 'submitted'
+ elif len(id_list) == 1:
# Only one answer available
answer_notification_type = self.lcp.correct_map.get_correctness(id_list[0])
elif len(id_list) > 1:
@@ -782,6 +811,8 @@ class CapaMixin(CapaFields):
).format(progress=str(progress))
else:
answer_notification_message = _('Partially Correct')
+ elif answer_notification_type == 'submitted':
+ answer_notification_message = _("Answer submitted.")
return answer_notification_type, answer_notification_message
@@ -855,7 +886,10 @@ class CapaMixin(CapaFields):
"""
Is the user allowed to see an answer?
"""
- if self.showanswer == '':
+ if not self.correctness_available():
+ # If correctness is being withheld, then don't show answers either.
+ return False
+ elif self.showanswer == '':
return False
elif self.showanswer == SHOWANSWER.NEVER:
return False
@@ -883,6 +917,24 @@ class CapaMixin(CapaFields):
return False
+ def correctness_available(self):
+ """
+ Is the user allowed to see whether she's answered correctly?
+
+ Limits access to the correct/incorrect flags, messages, and problem score.
+ """
+ if self.show_correctness == SHOW_CORRECTNESS.NEVER:
+ return False
+ elif self.runtime.user_is_staff:
+ # This is after the 'never' check because admins can see correctness
+ # unless the problem explicitly prevents it
+ return True
+ elif self.show_correctness == SHOW_CORRECTNESS.PAST_DUE:
+ return self.is_past_due()
+
+ # else: self.show_correctness == SHOW_CORRECTNESS.ALWAYS
+ return True
+
def update_score(self, data):
"""
Delivers grading response (e.g. from asynchronous code checking) to
@@ -1233,6 +1285,10 @@ class CapaMixin(CapaFields):
# render problem into HTML
html = self.get_problem_html(encapsulate=False, submit_notification=True)
+ # Withhold success indicator if hiding correctness
+ if not self.correctness_available():
+ success = 'submitted'
+
return {
'success': success,
'contents': html
diff --git a/common/lib/xmodule/xmodule/capa_base_constants.py b/common/lib/xmodule/xmodule/capa_base_constants.py
index 7739be238e..2864825906 100644
--- a/common/lib/xmodule/xmodule/capa_base_constants.py
+++ b/common/lib/xmodule/xmodule/capa_base_constants.py
@@ -4,6 +4,15 @@ Constants for capa_base problems
"""
+class SHOW_CORRECTNESS(object): # pylint: disable=invalid-name
+ """
+ Constants for when to show correctness
+ """
+ ALWAYS = "always"
+ PAST_DUE = "past_due"
+ NEVER = "never"
+
+
class SHOWANSWER(object):
"""
Constants for when to show answer
diff --git a/common/lib/xmodule/xmodule/capa_module.py b/common/lib/xmodule/xmodule/capa_module.py
index 4ca258474f..7b43c36a78 100644
--- a/common/lib/xmodule/xmodule/capa_module.py
+++ b/common/lib/xmodule/xmodule/capa_module.py
@@ -120,7 +120,8 @@ class CapaModule(CapaMixin, XModule):
after = self.get_progress()
after_attempts = self.attempts
progress_changed = (after != before) or (after_attempts != before_attempts)
- curr_score, total_possible = (after.frac() if after else (0, 0))
+ curr_score, total_possible = self.get_display_progress()
+
result.update({
'progress_changed': progress_changed,
'current_score': curr_score,
@@ -215,6 +216,7 @@ class CapaDescriptor(CapaFields, RawDescriptor):
CapaDescriptor.force_save_button,
CapaDescriptor.markdown,
CapaDescriptor.use_latex_compiler,
+ CapaDescriptor.show_correctness,
])
return non_editable_fields
diff --git a/common/lib/xmodule/xmodule/css/capa/display.scss b/common/lib/xmodule/xmodule/css/capa/display.scss
index 7b8f9db0df..54bd748c38 100644
--- a/common/lib/xmodule/xmodule/css/capa/display.scss
+++ b/common/lib/xmodule/xmodule/css/capa/display.scss
@@ -250,6 +250,15 @@ div.problem {
border-color: $incorrect;
}
}
+
+ &.choicegroup_submitted {
+ border: 2px solid $submitted;
+
+ // keep blue for submitted answers on hover.
+ &:hover {
+ border-color: $submitted;
+ }
+ }
}
.indicator-container {
@@ -325,6 +334,7 @@ div.problem {
@include status-icon($incorrect, $cross-icon);
}
+ &.submitted,
&.unsubmitted,
&.unanswered {
.status-icon {
@@ -419,6 +429,12 @@ div.problem {
}
}
+ &.submitted, &.ui-icon-check {
+ input {
+ border-color: $submitted;
+ }
+ }
+
p.answer {
display: inline-block;
margin-top: ($baseline / 2);
@@ -790,6 +806,18 @@ div.problem {
}
}
+ // CASE: submitted, correctness withheld
+ > .submitted {
+
+ input {
+ border: 2px solid $submitted;
+ }
+
+ .status {
+ content: '';
+ }
+ }
+
// CASE: unanswered and unsubmitted
> .unanswered, > .unsubmitted {
@@ -824,7 +852,11 @@ div.problem {
.indicator-container {
display: inline-block;
- .status.correct:after, .status.partially-correct:after, .status.incorrect:after, .status.unanswered:after {
+ .status.correct:after,
+ .status.partially-correct:after,
+ .status.incorrect:after,
+ .status.submitted:after,
+ .status.unanswered:after {
@include margin-left(0);
}
}
@@ -1531,6 +1563,10 @@ div.problem {
@extend label.choicegroup_incorrect;
}
+ label.choicetextgroup_submitted, section.choicetextgroup_submitted {
+ @extend label.choicegroup_submitted;
+ }
+
label.choicetextgroup_show_correct, section.choicetextgroup_show_correct {
&:after {
@include margin-left($baseline*.75);
@@ -1569,6 +1605,10 @@ div.problem .imageinput.capa_inputtype {
.partially-correct {
@include status-icon($partially-correct, $asterisk-icon);
}
+
+ .submitted {
+ content: '';
+ }
}
// +Problem - Annotation Problem Overrides
@@ -1596,4 +1636,8 @@ div.problem .annotation-input {
.partially-correct {
@include status-icon($partially-correct, $asterisk-icon);
}
+
+ .submitted {
+ content: '';
+ }
}
diff --git a/common/lib/xmodule/xmodule/js/spec/capa/display_spec.coffee b/common/lib/xmodule/xmodule/js/spec/capa/display_spec.coffee
index 964a4cdcba..a187b0da60 100644
--- a/common/lib/xmodule/xmodule/js/spec/capa/display_spec.coffee
+++ b/common/lib/xmodule/xmodule/js/spec/capa/display_spec.coffee
@@ -138,6 +138,28 @@ describe 'Problem', ->
it 'shows 0 points possible for the detail', ->
testProgessData(@problem, 0, 0, 1, "False", "0 points possible (ungraded)")
+ describe 'with a score of null (show_correctness == false)', ->
+ it 'reports the number of points possible and graded, results hidden', ->
+ testProgessData(@problem, null, 1, 0, "True", "1 point possible (graded, results hidden)")
+
+ it 'reports the number of points possible (plural) and graded, results hidden', ->
+ testProgessData(@problem, null, 2, 0, "True", "2 points possible (graded, results hidden)")
+
+ it 'reports the number of points possible and ungraded, results hidden', ->
+ testProgessData(@problem, null, 1, 0, "False", "1 point possible (ungraded, results hidden)")
+
+ it 'displays ungraded if number of points possible is 0, results hidden', ->
+ testProgessData(@problem, null, 0, 0, "False", "0 points possible (ungraded, results hidden)")
+
+ it 'displays ungraded if number of points possible is 0, even if graded value is True, results hidden', ->
+ testProgessData(@problem, null, 0, 0, "True", "0 points possible (ungraded, results hidden)")
+
+ it 'reports the correct score with status none and >0 attempts, results hidden', ->
+ testProgessData(@problem, null, 1, 1, "True", "1 point possible (graded, results hidden)")
+
+ it 'reports the correct score with >1 weight, status none, and >0 attempts, results hidden', ->
+ testProgessData(@problem, null, 2, 2, "True", "2 points possible (graded, results hidden)")
+
describe 'render', ->
beforeEach ->
@problem = new Problem($('.xblock-student_view'))
diff --git a/common/lib/xmodule/xmodule/js/src/capa/display.js b/common/lib/xmodule/xmodule/js/src/capa/display.js
index 8a40513cd3..4a2f721004 100644
--- a/common/lib/xmodule/xmodule/js/src/capa/display.js
+++ b/common/lib/xmodule/xmodule/js/src/capa/display.js
@@ -214,11 +214,36 @@
attemptsUsed = this.el.data('attempts-used');
graded = this.el.data('graded');
+ // The problem is ungraded if it's explicitly marked as such, or if the total possible score is 0
+ if (graded === 'True' && totalScore !== 0) {
+ graded = true;
+ } else {
+ graded = false;
+ }
+
if (curScore === undefined || totalScore === undefined) {
- progress = '';
+ // Render an empty string.
+ progressTemplate = '';
+ } else if (curScore === null || curScore === 'None') {
+ // Render 'x point(s) possible (un/graded, results hidden)' if no current score provided.
+ if (graded) {
+ progressTemplate = ngettext(
+ // Translators: %(num_points)s is the number of points possible (examples: 1, 3, 10).;
+ '%(num_points)s point possible (graded, results hidden)',
+ '%(num_points)s points possible (graded, results hidden)',
+ totalScore
+ );
+ } else {
+ progressTemplate = ngettext(
+ // Translators: %(num_points)s is the number of points possible (examples: 1, 3, 10).;
+ '%(num_points)s point possible (ungraded, results hidden)',
+ '%(num_points)s points possible (ungraded, results hidden)',
+ totalScore
+ );
+ }
} else if (attemptsUsed === 0 || totalScore === 0) {
// Render 'x point(s) possible' if student has not yet attempted question
- if (graded === 'True' && totalScore !== 0) {
+ if (graded) {
progressTemplate = ngettext(
// Translators: %(num_points)s is the number of points possible (examples: 1, 3, 10).;
'%(num_points)s point possible (graded)', '%(num_points)s points possible (graded)',
@@ -231,10 +256,9 @@
totalScore
);
}
- progress = interpolate(progressTemplate, {num_points: totalScore}, true);
} else {
// Render 'x/y point(s)' if student has attempted question
- if (graded === 'True' && totalScore !== 0) {
+ if (graded) {
progressTemplate = ngettext(
// This comment needs to be on one line to be properly scraped for the translators.
// Translators: %(earned)s is the number of points earned. %(possible)s is the total number of points (examples: 0/1, 1/1, 2/3, 5/10). The total number of points will always be at least 1. We pluralize based on the total number of points (example: 0/1 point; 1/2 points);
@@ -249,13 +273,14 @@
totalScore
);
}
- progress = interpolate(
- progressTemplate, {
- earned: curScore,
- possible: totalScore
- }, true
- );
}
+ progress = interpolate(
+ progressTemplate, {
+ earned: curScore,
+ num_points: totalScore,
+ possible: totalScore
+ }, true
+ );
return this.$('.problem-progress').text(progress);
};
@@ -573,6 +598,7 @@
complete: this.enableSubmitButtonAfterResponse,
success: function(response) {
switch (response.success) {
+ case 'submitted':
case 'incorrect':
case 'correct':
that.render(response.contents);
@@ -599,6 +625,7 @@
Logger.log('problem_check', this.answers);
return $.postWithPrefix('' + this.url + '/problem_check', this.answers, function(response) {
switch (response.success) {
+ case 'submitted':
case 'incorrect':
case 'correct':
window.SR.readTexts(that.get_sr_status(response.contents));
diff --git a/common/lib/xmodule/xmodule/modulestore/inheritance.py b/common/lib/xmodule/xmodule/modulestore/inheritance.py
index 2eb0e54f48..91c2811b35 100644
--- a/common/lib/xmodule/xmodule/modulestore/inheritance.py
+++ b/common/lib/xmodule/xmodule/modulestore/inheritance.py
@@ -100,6 +100,19 @@ class InheritanceMixin(XBlockMixin):
scope=Scope.settings,
default="finished",
)
+
+ show_correctness = String(
+ display_name=_("Show Results"),
+ help=_(
+ # Translators: DO NOT translate the words in quotes here, they are
+ # specific words for the acceptable values.
+ 'Specify when to show answer correctness and score to learners. '
+ 'Valid values are "always", "never", and "past_due".'
+ ),
+ scope=Scope.settings,
+ default="always",
+ )
+
rerandomize = String(
display_name=_("Randomization"),
help=_(
diff --git a/common/lib/xmodule/xmodule/tests/test_capa_module.py b/common/lib/xmodule/xmodule/tests/test_capa_module.py
index 3af41a46fc..73b716de68 100644
--- a/common/lib/xmodule/xmodule/tests/test_capa_module.py
+++ b/common/lib/xmodule/xmodule/tests/test_capa_module.py
@@ -265,6 +265,46 @@ class CapaModuleTest(unittest.TestCase):
problem.attempts = 1
self.assertTrue(problem.answer_available())
+ @ddt.data(
+ # If show_correctness=always, Answer is visible after attempted
+ ({
+ 'showanswer': 'attempted',
+ 'max_attempts': '1',
+ 'show_correctness': 'always',
+ }, True),
+ # If show_correctness=never, Answer is never visible
+ ({
+ 'showanswer': 'attempted',
+ 'max_attempts': '1',
+ 'show_correctness': 'never',
+ }, False),
+ # If show_correctness=past_due, answer is not visible before due date
+ ({
+ 'showanswer': 'attempted',
+ 'show_correctness': 'past_due',
+ 'max_attempts': '1',
+ 'due': 'tomorrow_str',
+ }, False),
+ # If show_correctness=past_due, answer is visible after due date
+ ({
+ 'showanswer': 'attempted',
+ 'show_correctness': 'past_due',
+ 'max_attempts': '1',
+ 'due': 'yesterday_str',
+ }, True),
+ )
+ @ddt.unpack
+ def test_showanswer_hide_correctness(self, problem_data, answer_available):
+ """
+ Ensure that the answer will not be shown when correctness is being hidden.
+ """
+ if 'due' in problem_data:
+ problem_data['due'] = getattr(self, problem_data['due'])
+ problem = CapaFactory.create(**problem_data)
+ self.assertFalse(problem.answer_available())
+ problem.attempts = 1
+ self.assertEqual(problem.answer_available(), answer_available)
+
def test_showanswer_closed(self):
# can see after attempts used up, even with due date in the future
@@ -414,6 +454,73 @@ class CapaModuleTest(unittest.TestCase):
graceperiod=self.two_day_delta_str)
self.assertTrue(still_in_grace.answer_available())
+ @ddt.data('', 'other-value')
+ def test_show_correctness_other(self, show_correctness):
+ """
+ Test that correctness is visible if show_correctness is not set to one of the values
+ from SHOW_CORRECTNESS constant.
+ """
+ problem = CapaFactory.create(show_correctness=show_correctness)
+ self.assertTrue(problem.correctness_available())
+
+ def test_show_correctness_default(self):
+ """
+ Test that correctness is visible by default.
+ """
+ problem = CapaFactory.create()
+ self.assertTrue(problem.correctness_available())
+
+ def test_show_correctness_never(self):
+ """
+ Test that correctness is hidden when show_correctness turned off.
+ """
+ problem = CapaFactory.create(show_correctness='never')
+ self.assertFalse(problem.correctness_available())
+
+ @ddt.data(
+ # Correctness not visible if due date in the future, even after using up all attempts
+ ({
+ 'show_correctness': 'past_due',
+ 'max_attempts': '1',
+ 'attempts': '1',
+ 'due': 'tomorrow_str',
+ }, False),
+ # Correctness visible if due date in the past
+ ({
+ 'show_correctness': 'past_due',
+ 'max_attempts': '1',
+ 'attempts': '0',
+ 'due': 'yesterday_str',
+ }, True),
+ # Correctness not visible if due date in the future
+ ({
+ 'show_correctness': 'past_due',
+ 'max_attempts': '1',
+ 'attempts': '0',
+ 'due': 'tomorrow_str',
+ }, False),
+ # Correctness not visible because grace period hasn't expired,
+ # even after using up all attempts
+ ({
+ 'show_correctness': 'past_due',
+ 'max_attempts': '1',
+ 'attempts': '1',
+ 'due': 'yesterday_str',
+ 'graceperiod': 'two_day_delta_str',
+ }, False),
+ )
+ @ddt.unpack
+ def test_show_correctness_past_due(self, problem_data, expected_result):
+ """
+ Test that with show_correctness="past_due", correctness will only be visible
+ after the problem is closed for everyone--e.g. after due date + grace period.
+ """
+ problem_data['due'] = getattr(self, problem_data['due'])
+ if 'graceperiod' in problem_data:
+ problem_data['graceperiod'] = getattr(self, problem_data['graceperiod'])
+ problem = CapaFactory.create(**problem_data)
+ self.assertEqual(problem.correctness_available(), expected_result)
+
def test_closed(self):
# Attempts < Max attempts --> NOT closed
@@ -814,6 +921,36 @@ class CapaModuleTest(unittest.TestCase):
# Expect that the number of attempts is NOT incremented
self.assertEqual(module.attempts, 1)
+ @ddt.data(
+ ("never", True, None, 'submitted'),
+ ("never", False, None, 'submitted'),
+ ("past_due", True, None, 'submitted'),
+ ("past_due", False, None, 'submitted'),
+ ("always", True, 1, 'correct'),
+ ("always", False, 0, 'incorrect'),
+ )
+ @ddt.unpack
+ def test_handle_ajax_show_correctness(self, show_correctness, is_correct, expected_score, expected_success):
+ module = CapaFactory.create(show_correctness=show_correctness,
+ due=self.tomorrow_str,
+ correct=is_correct)
+
+ # Simulate marking the input correct/incorrect
+ with patch('capa.correctmap.CorrectMap.is_correct') as mock_is_correct:
+ mock_is_correct.return_value = is_correct
+
+ # Check the problem
+ get_request_dict = {CapaFactory.input_key(): '0'}
+ json_result = module.handle_ajax('problem_check', get_request_dict)
+ result = json.loads(json_result)
+
+ # Expect that the AJAX result withholds correctness and score
+ self.assertEqual(result['current_score'], expected_score)
+ self.assertEqual(result['success'], expected_success)
+
+ # Expect that the number of attempts is incremented by 1
+ self.assertEqual(module.attempts, 1)
+
def test_reset_problem(self):
module = CapaFactory.create(done=True)
module.new_lcp = Mock(wraps=module.new_lcp)
@@ -1584,6 +1721,27 @@ class CapaModuleTest(unittest.TestCase):
other_module.get_progress()
mock_progress.assert_called_with(1, 1)
+ @ddt.data(
+ ("never", True, None),
+ ("never", False, None),
+ ("past_due", True, None),
+ ("past_due", False, None),
+ ("always", True, 1),
+ ("always", False, 0),
+ )
+ @ddt.unpack
+ def test_get_display_progress_show_correctness(self, show_correctness, is_correct, expected_score):
+ """
+ Check that score and total are calculated correctly for the progress fraction.
+ """
+ module = CapaFactory.create(correct=is_correct,
+ show_correctness=show_correctness,
+ due=self.tomorrow_str)
+ module.weight = 1
+ score, total = module.get_display_progress()
+ self.assertEqual(score, expected_score)
+ self.assertEqual(total, 1)
+
def test_get_html(self):
"""
Check that get_html() calls get_progress() with no arguments.
diff --git a/common/static/sass/edx-pattern-library-shims/base/_variables.scss b/common/static/sass/edx-pattern-library-shims/base/_variables.scss
index 9b62622f00..3ae121632d 100644
--- a/common/static/sass/edx-pattern-library-shims/base/_variables.scss
+++ b/common/static/sass/edx-pattern-library-shims/base/_variables.scss
@@ -151,6 +151,7 @@ $general-color-accent: $uxpl-blue-base !default
$correct: $success-color !default;
$partially-correct: $success-color !default;
$incorrect: $error-color !default;
+$submitted: $general-color !default;
// BUTTONS
diff --git a/common/test/acceptance/pages/lms/problem.py b/common/test/acceptance/pages/lms/problem.py
index 2f5a4ed0ff..4abe520abd 100644
--- a/common/test/acceptance/pages/lms/problem.py
+++ b/common/test/acceptance/pages/lms/problem.py
@@ -317,6 +317,14 @@ class ProblemPage(PageObject):
self.wait_for_element_visibility('.fa-asterisk', "Waiting for asterisk notification icon")
self.wait_for_focus_on_submit_notification()
+ def wait_submitted_notification(self):
+ """
+ Check for visibility of the "answer received" general notification and icon.
+ """
+ msg = "Wait for submitted notification to be visible"
+ self.wait_for_element_visibility('.notification.general.notification-submit', msg)
+ self.wait_for_focus_on_submit_notification()
+
def click_hint(self):
"""
Click the Hint button.
@@ -418,14 +426,16 @@ class ProblemPage(PageObject):
solution_selector = '.solution-span div.detailed-solution'
return self.q(css=solution_selector).is_present()
- def is_correct_choice_highlighted(self, correct_choices):
+ def is_choice_highlighted(self, choice, choices_list):
"""
- Check if correct answer/choice highlighted for choice group.
+ Check if the given answer/choice is highlighted for choice group.
"""
- correct_status_xpath = '//fieldset/div[contains(@class, "field")][{0}]/label[contains(@class, "choicegroup_correct")]/span[contains(@class, "status correct")]' # pylint: disable=line-too-long
+ choice_status_xpath = ('//fieldset/div[contains(@class, "field")][{{0}}]'
+ '/label[contains(@class, "choicegroup_{choice}")]'
+ '/span[contains(@class, "status {choice}")]'.format(choice=choice))
any_status_xpath = '//fieldset/div[contains(@class, "field")][{0}]/label/span'
- for choice in correct_choices:
- if not self.q(xpath=correct_status_xpath.format(choice)).is_present():
+ for choice in choices_list:
+ if not self.q(xpath=choice_status_xpath.format(choice)).is_present():
return False
# Check that there is only a single status span, as there were some bugs with multiple
@@ -435,6 +445,18 @@ class ProblemPage(PageObject):
return True
+ def is_correct_choice_highlighted(self, correct_choices):
+ """
+ Check if correct answer/choice highlighted for choice group.
+ """
+ return self.is_choice_highlighted('correct', correct_choices)
+
+ def is_submitted_choice_highlighted(self, correct_choices):
+ """
+ Check if submitted answer/choice highlighted for choice group.
+ """
+ return self.is_choice_highlighted('submitted', correct_choices)
+
@property
def problem_question(self):
"""
diff --git a/common/test/acceptance/pages/studio/overview.py b/common/test/acceptance/pages/studio/overview.py
index a0c0a051a6..b1c6d35abc 100644
--- a/common/test/acceptance/pages/studio/overview.py
+++ b/common/test/acceptance/pages/studio/overview.py
@@ -575,6 +575,13 @@ class CourseOutlinePage(CoursePage, CourseOutlineContainer):
self.q(css=".action-save").first.click()
self.wait_for_ajax()
+ def select_visibility_tab(self):
+ """
+ Select the advanced settings tab
+ """
+ self.q(css=".settings-tab-button[data-tab='visibility']").first.click()
+ self.wait_for_element_presence('input[value=hide_after_due]', 'Visibility fields not present.')
+
def select_advanced_tab(self, desired_item='special_exam'):
"""
Select the advanced settings tab
@@ -584,8 +591,6 @@ class CourseOutlinePage(CoursePage, CourseOutlineContainer):
self.wait_for_element_presence('input.no_special_exam', 'Special exam settings fields not present.')
if desired_item == 'gated_content':
self.wait_for_element_visibility('#is_prereq', 'Gating settings fields are present.')
- if desired_item == 'hide_after_due_date':
- self.wait_for_element_presence('input[value=hide_after_due]', 'Visibility fields not present.')
def make_exam_proctored(self):
"""
@@ -601,6 +606,7 @@ class CourseOutlinePage(CoursePage, CourseOutlineContainer):
"""
self.q(css="input.timed_exam").first.click()
if hide_after_due:
+ self.select_visibility_tab()
self.q(css='input[name=content-visibility][value=hide_after_due]').first.click()
self.q(css=".action-save").first.click()
self.wait_for_ajax()
@@ -1057,7 +1063,7 @@ class CourseOutlineModal(object):
if needed.
"""
if not self.is_staff_lock_visible:
- self.find_css(".settings-tab-button[data-tab=advanced]").click()
+ self.find_css(".settings-tab-button[data-tab=visibility]").click()
EmptyPromise(
lambda: self.is_staff_lock_visible,
"Staff lock option is visible",
diff --git a/common/test/acceptance/tests/lms/test_lms_courseware.py b/common/test/acceptance/tests/lms/test_lms_courseware.py
index 33145ebe80..c43fb4528c 100644
--- a/common/test/acceptance/tests/lms/test_lms_courseware.py
+++ b/common/test/acceptance/tests/lms/test_lms_courseware.py
@@ -900,7 +900,7 @@ class SubsectionHiddenAfterDueDateTest(UniqueCourseTest):
self.studio_course_outline.visit()
self.studio_course_outline.open_subsection_settings_dialog()
- self.studio_course_outline.select_advanced_tab('hide_after_due_date')
+ self.studio_course_outline.select_visibility_tab()
self.studio_course_outline.make_subsection_hidden_after_due_date()
self.logout_page.visit()
diff --git a/common/test/acceptance/tests/lms/test_problem_types.py b/common/test/acceptance/tests/lms/test_problem_types.py
index d96639d197..c14bf0c1ea 100644
--- a/common/test/acceptance/tests/lms/test_problem_types.py
+++ b/common/test/acceptance/tests/lms/test_problem_types.py
@@ -3,6 +3,7 @@ Bok choy acceptance and a11y tests for problem types in the LMS
See also lettuce tests in lms/djangoapps/courseware/features/problems.feature
"""
+import ddt
import random
import textwrap
@@ -84,12 +85,14 @@ class ProblemTypeTestBase(ProblemsTest, EventsTestMixin):
problem_name = None
problem_type = None
+ problem_points = 1
factory = None
factory_kwargs = {}
status_indicators = {
'correct': ['span.correct'],
'incorrect': ['span.incorrect'],
'unanswered': ['span.unanswered'],
+ 'submitted': ['span.submitted'],
}
def setUp(self):
@@ -100,6 +103,10 @@ class ProblemTypeTestBase(ProblemsTest, EventsTestMixin):
self.courseware_page.visit()
self.problem_page = ProblemPage(self.browser)
+ def get_sequential(self):
+ """ Allow any class in the inheritance chain to customize subsection metadata."""
+ return XBlockFixtureDesc('sequential', 'Test Subsection', metadata=getattr(self, 'sequential_metadata', {}))
+
def get_problem(self):
"""
Creates a {problem_type} problem
@@ -117,7 +124,7 @@ class ProblemTypeTestBase(ProblemsTest, EventsTestMixin):
Waits for the expected status indicator.
Args:
- status: one of ("correct", "incorrect", "unanswered)
+ status: one of ("correct", "incorrect", "unanswered", "submitted")
"""
msg = "Wait for status to be {}".format(status)
selector = ', '.join(self.status_indicators[status])
@@ -381,12 +388,83 @@ class ProblemTypeTestMixin(ProblemTypeA11yTestMixin):
self.problem_page.wait_partial_notification()
-class AnnotationProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
+@ddt.ddt
+class ProblemNeverShowCorrectnessMixin(object):
"""
- TestCase Class for Annotation Problem Type
+ Tests the effect of adding `show_correctness: never` to the sequence metadata
+ for subclasses of ProblemTypeTestMixin.
+ """
+ sequential_metadata = {'show_correctness': 'never'}
+
+ @attr(shard=7)
+ @ddt.data('correct', 'incorrect', 'partially-correct')
+ def test_answer_says_submitted(self, correctness):
+ """
+ Scenario: I can answer a problem ly
+ Given External graders respond ""
+ And I am viewing a "" problem
+ in a subsection with show_correctness set to "never"
+ Then I should see a score of "N point(s) possible (ungraded, results hidden)"
+ When I answer a "" problem "ly"
+ And the "" problem displays only a "submitted" notification.
+ And I should see a score of "N point(s) possible (ungraded, results hidden)"
+ And a "problem_check" server event is emitted
+ And a "problem_check" browser event is emitted
+ """
+
+ # Not all problems have partially correct solutions configured
+ if correctness == 'partially-correct' and not self.partially_correct:
+ raise SkipTest("Test incompatible with the current problem type")
+
+ # Problem progress text depends on points possible
+ possible = 'possible (ungraded, results hidden)'
+ if self.problem_points == 1:
+ problem_progress = '1 point {}'.format(possible)
+ else:
+ problem_progress = '{} points {}'.format(self.problem_points, possible)
+
+ # Make sure we're looking at the right problem
+ self.problem_page.wait_for(
+ lambda: self.problem_page.problem_name == self.problem_name,
+ "Make sure the correct problem is on the page"
+ )
+
+ # Learner can see that score will be hidden prior to submitting answer
+ self.assertEqual(self.problem_page.problem_progress_graded_value, problem_progress)
+
+ # Answer the problem correctly
+ self.answer_problem(correctness=correctness)
+ self.problem_page.click_submit()
+ self.wait_for_status('submitted')
+ self.problem_page.wait_submitted_notification()
+
+ # Score is still hidden after submitting answer
+ self.assertEqual(self.problem_page.problem_progress_graded_value, problem_progress)
+
+ # Check for corresponding tracking event
+ expected_events = [
+ {
+ 'event_source': 'server',
+ 'event_type': 'problem_check',
+ 'username': self.username,
+ }, {
+ 'event_source': 'browser',
+ 'event_type': 'problem_check',
+ 'username': self.username,
+ },
+ ]
+
+ for event in expected_events:
+ self.wait_for_events(event_filter=event, number_of_matches=1)
+
+
+class AnnotationProblemTypeBase(ProblemTypeTestBase):
+ """
+ ProblemTypeTestBase specialization for Annotation Problem Type
"""
problem_name = 'ANNOTATION TEST PROBLEM'
problem_type = 'annotationresponse'
+ problem_points = 2
factory = AnnotationResponseXMLFactory()
partially_correct = True
@@ -411,13 +489,14 @@ class AnnotationProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
'incorrect': ['span.incorrect'],
'partially-correct': ['span.partially-correct'],
'unanswered': ['span.unanswered'],
+ 'submitted': ['span.submitted'],
}
def setUp(self, *args, **kwargs):
"""
- Additional setup for AnnotationProblemTypeTest
+ Additional setup for AnnotationProblemTypeBase
"""
- super(AnnotationProblemTypeTest, self).setUp(*args, **kwargs)
+ super(AnnotationProblemTypeBase, self).setUp(*args, **kwargs)
self.problem_page.a11y_audit.config.set_rules({
"ignore": [
@@ -443,9 +522,23 @@ class AnnotationProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
).nth(choice).click()
-class CheckboxProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
+class AnnotationProblemTypeTest(AnnotationProblemTypeBase, ProblemTypeTestMixin):
"""
- TestCase Class for Checkbox Problem Type
+ Standard tests for the Annotation Problem Type
+ """
+ pass
+
+
+class AnnotationProblemTypeNeverShowCorrectnessTest(AnnotationProblemTypeBase, ProblemNeverShowCorrectnessMixin):
+ """
+ Ensure that correctness can be withheld for Annotation Problem Type problems.
+ """
+ pass
+
+
+class CheckboxProblemTypeBase(ProblemTypeTestBase):
+ """
+ ProblemTypeTestBase specialization Checkbox Problem Type
"""
problem_name = 'CHECKBOX TEST PROBLEM'
problem_type = 'checkbox'
@@ -462,12 +555,6 @@ class CheckboxProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
'explanation_text': 'This is explanation text'
}
- def setUp(self, *args, **kwargs):
- """
- Additional setup for CheckboxProblemTypeTest
- """
- super(CheckboxProblemTypeTest, self).setUp(*args, **kwargs)
-
def answer_problem(self, correctness):
"""
Answer checkbox problem.
@@ -481,6 +568,11 @@ class CheckboxProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
self.problem_page.click_choice("choice_1")
self.problem_page.click_choice("choice_3")
+
+class CheckboxProblemTypeTest(CheckboxProblemTypeBase, ProblemTypeTestMixin):
+ """
+ Standard tests for the Checkbox Problem Type
+ """
@attr(shard=7)
def test_can_show_answer(self):
"""
@@ -498,9 +590,16 @@ class CheckboxProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
self.problem_page.wait_for_show_answer_notification()
-class MultipleChoiceProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
+class CheckboxProblemTypeNeverShowCorrectnessTest(CheckboxProblemTypeBase, ProblemNeverShowCorrectnessMixin):
"""
- TestCase Class for Multiple Choice Problem Type
+ Ensure that correctness can be withheld for Checkbox Problem Type problems.
+ """
+ pass
+
+
+class MultipleChoiceProblemTypeBase(ProblemTypeTestBase):
+ """
+ ProblemTypeTestBase specialization Multiple Choice Problem Type
"""
problem_name = 'MULTIPLE CHOICE TEST PROBLEM'
problem_type = 'multiple choice'
@@ -518,14 +617,9 @@ class MultipleChoiceProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
'correct': ['label.choicegroup_correct'],
'incorrect': ['label.choicegroup_incorrect', 'span.incorrect'],
'unanswered': ['span.unanswered'],
+ 'submitted': ['label.choicegroup_submitted', 'span.submitted'],
}
- def setUp(self, *args, **kwargs):
- """
- Additional setup for MultipleChoiceProblemTypeTest
- """
- super(MultipleChoiceProblemTypeTest, self).setUp(*args, **kwargs)
-
def answer_problem(self, correctness):
"""
Answer multiple choice problem.
@@ -535,6 +629,11 @@ class MultipleChoiceProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
else:
self.problem_page.click_choice("choice_choice_2")
+
+class MultipleChoiceProblemTypeTest(MultipleChoiceProblemTypeBase, ProblemTypeTestMixin):
+ """
+ Standard tests for the Multiple Choice Problem Type
+ """
@attr(shard=7)
def test_can_show_answer(self):
"""
@@ -565,9 +664,17 @@ class MultipleChoiceProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
self.problem_page.wait_for_show_answer_notification()
-class RadioProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
+class MultipleChoiceProblemTypeNeverShowCorrectnessTest(MultipleChoiceProblemTypeBase,
+ ProblemNeverShowCorrectnessMixin):
"""
- TestCase Class for Radio Problem Type
+ Ensure that correctness can be withheld for Multiple Choice Problem Type problems.
+ """
+ pass
+
+
+class RadioProblemTypeBase(ProblemTypeTestBase):
+ """
+ ProblemTypeTestBase specialization for Radio Problem Type
"""
problem_name = 'RADIO TEST PROBLEM'
problem_type = 'radio'
@@ -586,14 +693,9 @@ class RadioProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
'correct': ['label.choicegroup_correct'],
'incorrect': ['label.choicegroup_incorrect', 'span.incorrect'],
'unanswered': ['span.unanswered'],
+ 'submitted': ['label.choicegroup_submitted', 'span.submitted'],
}
- def setUp(self, *args, **kwargs):
- """
- Additional setup for RadioProblemTypeTest
- """
- super(RadioProblemTypeTest, self).setUp(*args, **kwargs)
-
def answer_problem(self, correctness):
"""
Answer radio problem.
@@ -604,9 +706,23 @@ class RadioProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
self.problem_page.click_choice("choice_1")
-class DropDownProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
+class RadioProblemTypeTest(RadioProblemTypeBase, ProblemTypeTestMixin):
"""
- TestCase Class for Drop Down Problem Type
+ Standard tests for the Multiple Radio Problem Type
+ """
+ pass
+
+
+class RadioProblemTypeNeverShowCorrectnessTest(RadioProblemTypeBase, ProblemNeverShowCorrectnessMixin):
+ """
+ Ensure that correctness can be withheld for Radio Problem Type problems.
+ """
+ pass
+
+
+class DropDownProblemTypeBase(ProblemTypeTestBase):
+ """
+ ProblemTypeTestBase specialization for Drop Down Problem Type
"""
problem_name = 'DROP DOWN TEST PROBLEM'
problem_type = 'drop down'
@@ -621,12 +737,6 @@ class DropDownProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
'correct_option': 'Option 2'
}
- def setUp(self, *args, **kwargs):
- """
- Additional setup for DropDownProblemTypeTest
- """
- super(DropDownProblemTypeTest, self).setUp(*args, **kwargs)
-
def answer_problem(self, correctness):
"""
Answer drop down problem.
@@ -637,9 +747,23 @@ class DropDownProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
select_option_by_text(selector_element, answer)
-class StringProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
+class DropDownProblemTypeTest(DropDownProblemTypeBase, ProblemTypeTestMixin):
"""
- TestCase Class for String Problem Type
+ Standard tests for the Multiple Radio Problem Type
+ """
+ pass
+
+
+class DropDownProblemTypeNeverShowCorrectnessTest(DropDownProblemTypeBase, ProblemNeverShowCorrectnessMixin):
+ """
+ Ensure that correctness can be withheld for Drop Down Problem Type problems.
+ """
+ pass
+
+
+class StringProblemTypeBase(ProblemTypeTestBase):
+ """
+ ProblemTypeTestBase specialization for String Problem Type
"""
problem_name = 'STRING TEST PROBLEM'
problem_type = 'string'
@@ -658,14 +782,9 @@ class StringProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
'correct': ['div.correct'],
'incorrect': ['div.incorrect'],
'unanswered': ['div.unanswered', 'div.unsubmitted'],
+ 'submitted': ['span.submitted'],
}
- def setUp(self, *args, **kwargs):
- """
- Additional setup for StringProblemTypeTest
- """
- super(StringProblemTypeTest, self).setUp(*args, **kwargs)
-
def answer_problem(self, correctness):
"""
Answer string problem.
@@ -674,9 +793,23 @@ class StringProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
self.problem_page.fill_answer(textvalue)
-class NumericalProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
+class StringProblemTypeTest(StringProblemTypeBase, ProblemTypeTestMixin):
"""
- TestCase Class for Numerical Problem Type
+ Standard tests for the String Problem Type
+ """
+ pass
+
+
+class StringProblemTypeNeverShowCorrectnessTest(StringProblemTypeBase, ProblemNeverShowCorrectnessMixin):
+ """
+ Ensure that correctness can be withheld for String Problem Type problems.
+ """
+ pass
+
+
+class NumericalProblemTypeBase(ProblemTypeTestBase):
+ """
+ ProblemTypeTestBase specialization for Numerical Problem Type
"""
problem_name = 'NUMERICAL TEST PROBLEM'
problem_type = 'numerical'
@@ -695,14 +828,9 @@ class NumericalProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
'correct': ['div.correct'],
'incorrect': ['div.incorrect'],
'unanswered': ['div.unanswered', 'div.unsubmitted'],
+ 'submitted': ['div.submitted'],
}
- def setUp(self, *args, **kwargs):
- """
- Additional setup for NumericalProblemTypeTest
- """
- super(NumericalProblemTypeTest, self).setUp(*args, **kwargs)
-
def answer_problem(self, correctness):
"""
Answer numerical problem.
@@ -716,6 +844,11 @@ class NumericalProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
textvalue = str(random.randint(-2, 2))
self.problem_page.fill_answer(textvalue)
+
+class NumericalProblemTypeTest(NumericalProblemTypeBase, ProblemTypeTestMixin):
+ """
+ Standard tests for the Numerical Problem Type
+ """
def test_error_input_gentle_alert(self):
"""
Scenario: I can answer a problem with erroneous input and will see a gentle alert
@@ -741,9 +874,16 @@ class NumericalProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
self.problem_page.wait_for_focus_on_problem_meta()
-class FormulaProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
+class NumericalProblemTypeNeverShowCorrectnessTest(NumericalProblemTypeBase, ProblemNeverShowCorrectnessMixin):
"""
- TestCase Class for Formula Problem Type
+ Ensure that correctness can be withheld for Numerical Problem Type problems.
+ """
+ pass
+
+
+class FormulaProblemTypeBase(ProblemTypeTestBase):
+ """
+ ProblemTypeTestBase specialization for Formula Problem Type
"""
problem_name = 'FORMULA TEST PROBLEM'
problem_type = 'formula'
@@ -764,14 +904,9 @@ class FormulaProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
'correct': ['div.correct'],
'incorrect': ['div.incorrect'],
'unanswered': ['div.unanswered', 'div.unsubmitted'],
+ 'submitted': ['div.submitted'],
}
- def setUp(self, *args, **kwargs):
- """
- Additional setup for FormulaProblemTypeTest
- """
- super(FormulaProblemTypeTest, self).setUp(*args, **kwargs)
-
def answer_problem(self, correctness):
"""
Answer formula problem.
@@ -780,12 +915,27 @@ class FormulaProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
self.problem_page.fill_answer(textvalue)
-class ScriptProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
+class FormulaProblemTypeTest(FormulaProblemTypeBase, ProblemTypeTestMixin):
"""
- TestCase Class for Script Problem Type
+ Standard tests for the Formula Problem Type
+ """
+ pass
+
+
+class FormulaProblemTypeNeverShowCorrectnessTest(FormulaProblemTypeBase, ProblemNeverShowCorrectnessMixin):
+ """
+ Ensure that correctness can be withheld for Formula Problem Type problems.
+ """
+ pass
+
+
+class ScriptProblemTypeBase(ProblemTypeTestBase):
+ """
+ ProblemTypeTestBase specialization for Script Problem Type
"""
problem_name = 'SCRIPT TEST PROBLEM'
problem_type = 'script'
+ problem_points = 2
partially_correct = False
factory = CustomResponseXMLFactory()
@@ -811,14 +961,9 @@ class ScriptProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
'correct': ['div.correct'],
'incorrect': ['div.incorrect'],
'unanswered': ['div.unanswered', 'div.unsubmitted'],
+ 'submitted': ['div.submitted'],
}
- def setUp(self, *args, **kwargs):
- """
- Additional setup for ScriptProblemTypeTest
- """
- super(ScriptProblemTypeTest, self).setUp(*args, **kwargs)
-
def answer_problem(self, correctness):
"""
Answer script problem.
@@ -836,6 +981,20 @@ class ScriptProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
self.problem_page.fill_answer(second_addend, input_num=1)
+class ScriptProblemTypeTest(ScriptProblemTypeBase, ProblemTypeTestMixin):
+ """
+ Standard tests for the Script Problem Type
+ """
+ pass
+
+
+class ScriptProblemTypeNeverShowCorrectnessTest(ScriptProblemTypeBase, ProblemNeverShowCorrectnessMixin):
+ """
+ Ensure that correctness can be withheld for Script Problem Type problems.
+ """
+ pass
+
+
class JSInputTypeTest(ProblemTypeTestBase, ProblemTypeA11yTestMixin):
"""
TestCase Class for jsinput (custom JavaScript) problem type.
@@ -859,9 +1018,9 @@ class JSInputTypeTest(ProblemTypeTestBase, ProblemTypeA11yTestMixin):
raise NotImplementedError()
-class CodeProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
+class CodeProblemTypeBase(ProblemTypeTestBase):
"""
- TestCase Class for Code Problem Type
+ ProblemTypeTestBase specialization for Code Problem Type
"""
problem_name = 'CODE TEST PROBLEM'
problem_type = 'code'
@@ -879,6 +1038,7 @@ class CodeProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
'correct': ['.grader-status .correct ~ .debug'],
'incorrect': ['.grader-status .incorrect ~ .debug'],
'unanswered': ['.grader-status .unanswered ~ .debug'],
+ 'submitted': ['.grader-status .submitted ~ .debug'],
}
def answer_problem(self, correctness):
@@ -895,6 +1055,11 @@ class CodeProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
# (configured in the problem XML above)
pass
+
+class CodeProblemTypeTest(CodeProblemTypeBase, ProblemTypeTestMixin):
+ """
+ Standard tests for the Code Problem Type
+ """
def test_answer_incorrectly(self):
"""
Overridden for script test because the testing grader always responds
@@ -924,7 +1089,14 @@ class CodeProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
pass
-class ChoiceTextProbelmTypeTestBase(ProblemTypeTestBase):
+class CodeProblemTypeNeverShowCorrectnessTest(CodeProblemTypeBase, ProblemNeverShowCorrectnessMixin):
+ """
+ Ensure that correctness can be withheld for Code Problem Type problems.
+ """
+ pass
+
+
+class ChoiceTextProblemTypeTestBase(ProblemTypeTestBase):
"""
Base class for "Choice + Text" Problem Types.
(e.g. RadioText, CheckboxText)
@@ -961,9 +1133,9 @@ class ChoiceTextProbelmTypeTestBase(ProblemTypeTestBase):
self._fill_input_text(input_value, choice)
-class RadioTextProblemTypeTest(ChoiceTextProbelmTypeTestBase, ProblemTypeTestMixin):
+class RadioTextProblemTypeBase(ChoiceTextProblemTypeTestBase):
"""
- TestCase Class for Radio Text Problem Type
+ ProblemTypeTestBase specialization for Radio Text Problem Type
"""
problem_name = 'RADIO TEXT TEST PROBLEM'
problem_type = 'radio_text'
@@ -986,13 +1158,14 @@ class RadioTextProblemTypeTest(ChoiceTextProbelmTypeTestBase, ProblemTypeTestMix
'correct': ['section.choicetextgroup_correct'],
'incorrect': ['section.choicetextgroup_incorrect', 'span.incorrect'],
'unanswered': ['span.unanswered'],
+ 'submitted': ['section.choicetextgroup_submitted', 'span.submitted'],
}
def setUp(self, *args, **kwargs):
"""
- Additional setup for RadioTextProblemTypeTest
+ Additional setup for RadioTextProblemTypeBase
"""
- super(RadioTextProblemTypeTest, self).setUp(*args, **kwargs)
+ super(RadioTextProblemTypeBase, self).setUp(*args, **kwargs)
self.problem_page.a11y_audit.config.set_rules({
"ignore": [
@@ -1003,9 +1176,23 @@ class RadioTextProblemTypeTest(ChoiceTextProbelmTypeTestBase, ProblemTypeTestMix
})
-class CheckboxTextProblemTypeTest(ChoiceTextProbelmTypeTestBase, ProblemTypeTestMixin):
+class RadioTextProblemTypeTest(RadioTextProblemTypeBase, ProblemTypeTestMixin):
"""
- TestCase Class for Checkbox Text Problem Type
+ Standard tests for the Radio Text Problem Type
+ """
+ pass
+
+
+class RadioTextProblemTypeNeverShowCorrectnessTest(RadioTextProblemTypeBase, ProblemNeverShowCorrectnessMixin):
+ """
+ Ensure that correctness can be withheld for Radio + Text Problem Type problems.
+ """
+ pass
+
+
+class CheckboxTextProblemTypeBase(ChoiceTextProblemTypeTestBase):
+ """
+ ProblemTypeTestBase specialization for Checkbox Text Problem Type
"""
problem_name = 'CHECKBOX TEXT TEST PROBLEM'
problem_type = 'checkbox_text'
@@ -1025,9 +1212,9 @@ class CheckboxTextProblemTypeTest(ChoiceTextProbelmTypeTestBase, ProblemTypeTest
def setUp(self, *args, **kwargs):
"""
- Additional setup for CheckboxTextProblemTypeTest
+ Additional setup for CheckboxTextProblemTypeBase
"""
- super(CheckboxTextProblemTypeTest, self).setUp(*args, **kwargs)
+ super(CheckboxTextProblemTypeBase, self).setUp(*args, **kwargs)
self.problem_page.a11y_audit.config.set_rules({
"ignore": [
@@ -1038,9 +1225,23 @@ class CheckboxTextProblemTypeTest(ChoiceTextProbelmTypeTestBase, ProblemTypeTest
})
-class ImageProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
+class CheckboxTextProblemTypeTest(CheckboxTextProblemTypeBase, ProblemTypeTestMixin):
"""
- TestCase Class for Image Problem Type
+ Standard tests for the Checkbox Text Problem Type
+ """
+ pass
+
+
+class CheckboxTextProblemTypeNeverShowCorrectnessTest(CheckboxTextProblemTypeBase, ProblemNeverShowCorrectnessMixin):
+ """
+ Ensure that correctness can be withheld for Checkbox + Text Problem Type problems.
+ """
+ pass
+
+
+class ImageProblemTypeBase(ProblemTypeTestBase):
+ """
+ ProblemTypeTestBase specialization for Image Problem Type
"""
problem_name = 'IMAGE TEST PROBLEM'
problem_type = 'image'
@@ -1071,9 +1272,23 @@ class ImageProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
chain.perform()
-class SymbolicProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
+class ImageProblemTypeTest(ImageProblemTypeBase, ProblemTypeTestMixin):
"""
- TestCase Class for Symbolic Problem Type
+ Standard tests for the Image Problem Type
+ """
+ pass
+
+
+class ImageProblemTypeNeverShowCorrectnessTest(ImageProblemTypeBase, ProblemNeverShowCorrectnessMixin):
+ """
+ Ensure that correctness can be withheld for Image Problem Type problems.
+ """
+ pass
+
+
+class SymbolicProblemTypeBase(ProblemTypeTestBase):
+ """
+ ProblemTypeTestBase specialization for Symbolic Problem Type
"""
problem_name = 'SYMBOLIC TEST PROBLEM'
problem_type = 'symbolicresponse'
@@ -1090,6 +1305,7 @@ class SymbolicProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
'correct': ['div.capa_inputtype div.correct'],
'incorrect': ['div.capa_inputtype div.incorrect'],
'unanswered': ['div.capa_inputtype div.unanswered'],
+ 'submitted': ['div.capa_inputtype div.submitted'],
}
def answer_problem(self, correctness):
@@ -1098,3 +1314,17 @@ class SymbolicProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
"""
choice = "2*x+3*y" if correctness == 'correct' else "3*a+4*b"
self.problem_page.fill_answer(choice)
+
+
+class SymbolicProblemTypeTest(SymbolicProblemTypeBase, ProblemTypeTestMixin):
+ """
+ Standard tests for the Symbolic Problem Type
+ """
+ pass
+
+
+class SymbolicProblemTypeNeverShowCorrectnessTest(SymbolicProblemTypeBase, ProblemNeverShowCorrectnessMixin):
+ """
+ Ensure that correctness can be withheld for Symbolic Problem Type problems.
+ """
+ pass
diff --git a/lms/templates/problem.html b/lms/templates/problem.html
index 646a16650a..f4d196bc5e 100644
--- a/lms/templates/problem.html
+++ b/lms/templates/problem.html
@@ -98,6 +98,15 @@ from openedx.core.djangolib.markup import HTML
notification_message=answer_notification_message"
/>
% endif
+ % if 'submitted' == answer_notification_type:
+ <%include file="problem_notifications.html" args="
+ notification_type='general',
+ notification_icon='fa-info-circle',
+ notification_name='submit',
+ is_hidden=False,
+ notification_message=answer_notification_message"
+ />
+ % endif
% endif
<%include file="problem_notifications.html" args="
notification_type='warning',