diff --git a/cms/static/sass/_build-v1.scss b/cms/static/sass/_build-v1.scss index b2d56c57ec..94cec810c8 100644 --- a/cms/static/sass/_build-v1.scss +++ b/cms/static/sass/_build-v1.scss @@ -87,3 +87,7 @@ // +CodeMirror Overrides // ==================== @import 'elements/codemirror-overrides'; + +// CAPA Problem Feedback +@import 'edx-pattern-library-shims/buttons'; + diff --git a/cms/static/sass/edx-pattern-library-shims b/cms/static/sass/edx-pattern-library-shims new file mode 120000 index 0000000000..eae51650c7 --- /dev/null +++ b/cms/static/sass/edx-pattern-library-shims @@ -0,0 +1 @@ +../../../common/static/sass/edx-pattern-library-shims \ No newline at end of file diff --git a/cms/static/sass/elements/_system-feedback.scss b/cms/static/sass/elements/_system-feedback.scss index 44510875eb..2bdcc879c0 100644 --- a/cms/static/sass/elements/_system-feedback.scss +++ b/cms/static/sass/elements/_system-feedback.scss @@ -399,7 +399,6 @@ margin: 0 auto; width: flex-grid(12); max-width: $fg-max-width; - min-width: $fg-min-width; strong { @extend %t-strong; diff --git a/cms/static/sass/elements/_xblocks.scss b/cms/static/sass/elements/_xblocks.scss index e702861075..ed77384405 100644 --- a/cms/static/sass/elements/_xblocks.scss +++ b/cms/static/sass/elements/_xblocks.scss @@ -248,15 +248,7 @@ color: $color-visibility-set; } } - - .action { - - .save { - // taking styles from LMS for these Save buttons to maintain consistency - // there is no studio-specific style for these LMS-styled buttons - @extend %btn-lms-style; - } - } + } // +Messaging - Xblocks diff --git a/cms/static/sass/xmodule/_headings.scss b/cms/static/sass/xmodule/_headings.scss index e44d31ec2d..c58fba2f3e 100644 --- a/cms/static/sass/xmodule/_headings.scss +++ b/cms/static/sass/xmodule/_headings.scss @@ -32,9 +32,8 @@ $headings-base-color: $gray-d2; %hd-2 { - margin-bottom: 1em; - font-size: 1.5em; - font-weight: $headings-font-weight-normal; + font-size: 1.1125em; + font-weight: $headings-font-weight-bold; line-height: 1.4em; } diff --git a/common/lib/capa/capa/capa_problem.py b/common/lib/capa/capa/capa_problem.py index 548ccd43da..8ee50b1c6a 100644 --- a/common/lib/capa/capa/capa_problem.py +++ b/common/lib/capa/capa/capa_problem.py @@ -46,6 +46,7 @@ ACCESSIBLE_CAPA_INPUT_TYPES = [ 'optioninput', 'textline', 'formulaequationinput', + 'textbox', ] # these get captured as student responses @@ -376,7 +377,7 @@ class LoncapaProblem(object): def grade_answers(self, answers): """ - Grade student responses. Called by capa_module.check_problem. + Grade student responses. Called by capa_module.submit_problem. `answers` is a dict of all the entries from request.POST, but with the first part of each key removed (the string before the first "_"). @@ -496,6 +497,7 @@ class LoncapaProblem(object): choice-level explanations shown to a student after submission. Does nothing if there is no targeted-feedback attribute. """ + _ = self.capa_system.i18n.ugettext # Note that the modifications has been done, avoiding problems if called twice. if hasattr(self, 'has_targeted'): return @@ -515,9 +517,12 @@ class LoncapaProblem(object): # Keep track of the explanation-id that corresponds to the student's answer # Also, keep track of the solution-id solution_id = None + choice_correctness_for_student_answer = _('Incorrect') for choice in choices_list: if choice.get('name') == student_answer: expl_id_for_student_answer = choice.get('explanation-id') + if choice.get('correct') == 'true': + choice_correctness_for_student_answer = _('Correct') if choice.get('correct') == 'true': solution_id = choice.get('explanation-id') @@ -527,7 +532,15 @@ class LoncapaProblem(object): if len(targetedfeedbackset) != 0: targetedfeedbackset = targetedfeedbackset[0] targetedfeedbacks = targetedfeedbackset.xpath('./targetedfeedback') + # find the legend by id in choicegroup.html for aria-describedby + problem_legend_id = str(choicegroup.get('id')) + '-legend' for targetedfeedback in targetedfeedbacks: + screenreadertext = etree.Element("span") + targetedfeedback.insert(0, screenreadertext) + screenreadertext.set('class', 'sr') + screenreadertext.text = choice_correctness_for_student_answer + targetedfeedback.set('role', 'group') + targetedfeedback.set('aria-describedby', problem_legend_id) # Don't show targeted feedback if the student hasn't answer the problem # or if the target feedback doesn't match the student's (incorrect) answer if not self.done or targetedfeedback.get('explanation-id') != expl_id_for_student_answer: @@ -561,6 +574,7 @@ class LoncapaProblem(object): # Add our solution instead to the targetedfeedbackset and change its tag name solution_element.tag = 'targetedfeedback' + targetedfeedbackset.append(solution_element) def get_html(self): @@ -923,12 +937,26 @@ class LoncapaProblem(object): if len(inputfields) > 1: response.set('multiple_inputtypes', 'true') group_label_tag = response.find('label') + group_description_tags = response.findall('description') + group_label_tag_id = u'multiinput-group-label-{}'.format(responsetype_id) group_label_tag_text = '' if group_label_tag is not None: group_label_tag.tag = 'p' - group_label_tag.set('id', responsetype_id) + group_label_tag.set('id', group_label_tag_id) group_label_tag.set('class', 'multi-inputs-group-label') group_label_tag_text = stringify_children(group_label_tag) + response.set('multiinput-group-label-id', group_label_tag_id) + + group_description_ids = [] + for index, group_description_tag in enumerate(group_description_tags): + group_description_tag_id = u'multiinput-group-description-{}-{}'.format(responsetype_id, index) + group_description_tag.tag = 'p' + group_description_tag.set('id', group_description_tag_id) + group_description_tag.set('class', 'multi-inputs-group-description question-description') + group_description_ids.append(group_description_tag_id) + + if group_description_ids: + response.set('multiinput-group_description_ids', ' '.join(group_description_ids)) for inputfield in inputfields: problem_data[inputfield.get('id')] = { diff --git a/common/lib/capa/capa/inputtypes.py b/common/lib/capa/capa/inputtypes.py index 1365fde94a..25b5fe30b2 100644 --- a/common/lib/capa/capa/inputtypes.py +++ b/common/lib/capa/capa/inputtypes.py @@ -818,8 +818,17 @@ class CodeInput(InputTypeBase): self.setup_code_response_rendering() def _extra_context(self): - """Defined queue_len, add it """ - return {'queue_len': self.queue_len, } + """ + Define queue_len, arial_label and code mirror exit message context variables + """ + _ = self.capa_system.i18n.ugettext + return { + 'queue_len': self.queue_len, + 'aria_label': _('{programming_language} editor').format( + programming_language=self.loaded_attributes.get('mode') + ), + 'code_mirror_exit_message': _('Press ESC then TAB or click outside of the code editor to exit') + } #----------------------------------------------------------------------------- diff --git a/common/lib/capa/capa/responsetypes.py b/common/lib/capa/capa/responsetypes.py index 161f04155e..2587966ac3 100644 --- a/common/lib/capa/capa/responsetypes.py +++ b/common/lib/capa/capa/responsetypes.py @@ -51,6 +51,7 @@ from lxml.html.soupparser import fromstring as fromstring_bs # uses Beautifu import capa.xqueue_interface as xqueue_interface import capa.safe_exec as safe_exec +from openedx.core.djangolib.markup import HTML, Text log = logging.getLogger(__name__) @@ -252,23 +253,31 @@ class LoncapaResponse(object): """ _ = self.capa_system.i18n.ugettext - # get responsetype index to make responsetype label - response_index = self.xml.attrib['id'].split('_')[-1] + # response_id = problem_id + response index + response_id = self.xml.attrib['id'] + + response_index = response_id.split('_')[-1] # Translators: index here could be 1,2,3 and so on response_label = _(u'Question {index}').format(index=response_index) # wrap the content inside a section - tree = etree.Element('section') + tree = etree.Element('div') tree.set('class', 'wrapper-problem-response') tree.set('tabindex', '-1') tree.set('aria-label', response_label) + tree.set('role', 'group') if self.xml.get('multiple_inputtypes'): # add
to wrap all inputtypes content = etree.SubElement(tree, 'div') content.set('class', 'multi-inputs-group') content.set('role', 'group') - content.set('aria-labelledby', self.xml.get('id')) + + if self.xml.get('multiinput-group-label-id'): + content.set('aria-labelledby', self.xml.get('multiinput-group-label-id')) + + if self.xml.get('multiinput-group_description_ids'): + content.set('aria-describedby', self.xml.get('multiinput-group_description_ids')) else: content = tree @@ -352,9 +361,9 @@ class LoncapaResponse(object): # Tricky: label None means output defaults, while '' means output empty label if label is None: if correct: - label = _(u'Correct') + label = _(u'Correct:') else: - label = _(u'Incorrect') + label = _(u'Incorrect:') # self.runtime.track_function('get_demand_hint', event_info) # This this "feedback hint" event @@ -372,15 +381,23 @@ class LoncapaResponse(object): self.capa_module.runtime.track_function('edx.problem.hint.feedback_displayed', event_info) # Form the div-wrapped hint texts - hints_wrap = u''.join( - [u'
{1}
'.format(QUESTION_HINT_TEXT_STYLE, dct.get('text')) - for dct in hint_log] + hints_wrap = HTML('').join( + [HTML('
{hint_content}
').format( + question_hint_text_style=QUESTION_HINT_TEXT_STYLE, + hint_content=HTML(dct.get('text')) + ) for dct in hint_log] ) if multiline_mode: - hints_wrap = u'
{1}
'.format(QUESTION_HINT_MULTILINE, hints_wrap) + hints_wrap = HTML('
{hints_wrap}
').format( + question_hint_multiline=QUESTION_HINT_MULTILINE, + hints_wrap=hints_wrap + ) label_wrap = '' if label: - label_wrap = u'
{1}:
'.format(QUESTION_HINT_LABEL_STYLE, label) + label_wrap = HTML('{label} ').format( + question_hint_label_style=QUESTION_HINT_LABEL_STYLE, + label=Text(label) + ) # Establish the outer style if correct: @@ -389,7 +406,12 @@ class LoncapaResponse(object): style = QUESTION_HINT_INCORRECT_STYLE # Ready to go - return u'
{1}{2}
'.format(style, label_wrap, hints_wrap) + return HTML('
{text}
{lwrp}{hintswrap}
').format( + st=style, + text=Text(_("Answer")), + lwrp=label_wrap, + hintswrap=hints_wrap + ) def get_extended_hints(self, student_answers, new_cmap): """ diff --git a/common/lib/capa/capa/templates/annotationinput.html b/common/lib/capa/capa/templates/annotationinput.html index 3e20c975a3..02c434bd78 100644 --- a/common/lib/capa/capa/templates/annotationinput.html +++ b/common/lib/capa/capa/templates/annotationinput.html @@ -17,7 +17,7 @@
${comment_prompt}
-
${tag_prompt}
+
${tag_prompt}
% if msg: -${HTML(msg)} +${HTML(msg)} % endif diff --git a/common/lib/capa/capa/templates/chemicalequationinput.html b/common/lib/capa/capa/templates/chemicalequationinput.html index 380e4b6e8d..6d25a31d6d 100644 --- a/common/lib/capa/capa/templates/chemicalequationinput.html +++ b/common/lib/capa/capa/templates/chemicalequationinput.html @@ -11,8 +11,8 @@ />

- ${value|h} - - ${status.display_name} + ${value|h} + ${status.display_name}

diff --git a/common/lib/capa/capa/templates/choicegroup.html b/common/lib/capa/capa/templates/choicegroup.html index bcad73897a..259ea37373 100644 --- a/common/lib/capa/capa/templates/choicegroup.html +++ b/common/lib/capa/capa/templates/choicegroup.html @@ -15,7 +15,7 @@

${description_text}

% endfor % for choice_id, choice_label in choices: -
+
<% label_class = 'response-label field-label label-inline' %> @@ -60,7 +60,7 @@
% if input_type == 'checkbox' or not value: - + ${status.display_tooltip} % endif @@ -69,6 +69,6 @@
${submitted_message}
%endif % if msg: - ${HTML(msg)} + ${HTML(msg)} % endif diff --git a/common/lib/capa/capa/templates/choicetext.html b/common/lib/capa/capa/templates/choicetext.html index 0199a2f8d0..88422e6c1e 100644 --- a/common/lib/capa/capa/templates/choicetext.html +++ b/common/lib/capa/capa/templates/choicetext.html @@ -1,5 +1,8 @@ -<%! from capa.util import remove_markup %> -<%! from django.utils.translation import ugettext as _ %> +<%! from capa.util import remove_markup +from django.utils.translation import ugettext as _ +from openedx.core.djangolib.markup import HTML +%> + <% element_checked = False %> % for choice_id, _ in choices: <% choice_id = choice_id %> @@ -63,7 +66,9 @@
% if input_type == 'checkbox' or not element_checked: - + + ${status.display_name} + % endif
@@ -71,7 +76,7 @@
${_(submitted_message)}
%endif % if msg: - ${msg|n} + ${HTML(msg)} % endif diff --git a/common/lib/capa/capa/templates/codeinput.html b/common/lib/capa/capa/templates/codeinput.html index dda2d6b97b..57d4d82807 100644 --- a/common/lib/capa/capa/templates/codeinput.html +++ b/common/lib/capa/capa/templates/codeinput.html @@ -1,9 +1,16 @@ -<%! from django.utils.translation import ugettext as _ %> -
- + >${value} + + ${code_mirror_exit_message} +
-
- ${msg|n} +
+ ${HTML(msg)}
-
+
diff --git a/common/lib/capa/capa/templates/crystallography.html b/common/lib/capa/capa/templates/crystallography.html index 0442fb0fa6..3229f75c10 100644 --- a/common/lib/capa/capa/templates/crystallography.html +++ b/common/lib/capa/capa/templates/crystallography.html @@ -1,3 +1,4 @@ +<%! from openedx.core.djangolib.markup import HTML %>
@@ -16,13 +17,13 @@

- ${status.display_name} + ${status.display_name}

% if msg: - ${msg|n} + ${HTML(msg)} % endif % if status in ['unsubmitted', 'correct', 'incorrect', 'partially-correct', 'incomplete']: diff --git a/common/lib/capa/capa/templates/designprotein2dinput.html b/common/lib/capa/capa/templates/designprotein2dinput.html index bb3490f5af..3cdce1c97a 100644 --- a/common/lib/capa/capa/templates/designprotein2dinput.html +++ b/common/lib/capa/capa/templates/designprotein2dinput.html @@ -11,7 +11,7 @@

- ${status.display_name} + ${status.display_name}

diff --git a/common/lib/capa/capa/templates/drag_and_drop_input.html b/common/lib/capa/capa/templates/drag_and_drop_input.html index 57803bf909..b18d9ed1dd 100644 --- a/common/lib/capa/capa/templates/drag_and_drop_input.html +++ b/common/lib/capa/capa/templates/drag_and_drop_input.html @@ -17,14 +17,14 @@ -

- ${status.display_name} +

+ ${status.display_name}

% if msg: - ${HTML(msg)} + ${HTML(msg)} % endif % if status in ['unsubmitted', 'correct', 'incorrect', 'partially-correct', 'incomplete']: diff --git a/common/lib/capa/capa/templates/editageneinput.html b/common/lib/capa/capa/templates/editageneinput.html index 11c947a955..4ae36cb964 100644 --- a/common/lib/capa/capa/templates/editageneinput.html +++ b/common/lib/capa/capa/templates/editageneinput.html @@ -12,7 +12,7 @@

- ${status.display_name} + ${status.display_name}

diff --git a/common/lib/capa/capa/templates/editamolecule.html b/common/lib/capa/capa/templates/editamolecule.html index c51101dc9d..633bff4c05 100644 --- a/common/lib/capa/capa/templates/editamolecule.html +++ b/common/lib/capa/capa/templates/editamolecule.html @@ -17,9 +17,8 @@

- ${status.display_name} + ${status.display_name}

-

diff --git a/common/lib/capa/capa/templates/filesubmission.html b/common/lib/capa/capa/templates/filesubmission.html index 7b3c41dcaf..1c1ac2b76a 100644 --- a/common/lib/capa/capa/templates/filesubmission.html +++ b/common/lib/capa/capa/templates/filesubmission.html @@ -10,5 +10,5 @@
-
${HTML(msg)}
+
${HTML(msg)}
diff --git a/common/lib/capa/capa/templates/formulaequationinput.html b/common/lib/capa/capa/templates/formulaequationinput.html index 8a857b58a0..32839565d8 100644 --- a/common/lib/capa/capa/templates/formulaequationinput.html +++ b/common/lib/capa/capa/templates/formulaequationinput.html @@ -4,7 +4,7 @@
% if response_data['label']: - + % endif % for description_id, description_text in response_data['descriptions'].items():

${description_text}

@@ -18,7 +18,7 @@ /> ${trailing_text} - + ${status.display_tooltip} @@ -33,6 +33,6 @@
% if msg: - ${HTML(msg)} + ${HTML(msg)} % endif
diff --git a/common/lib/capa/capa/templates/jsinput.html b/common/lib/capa/capa/templates/jsinput.html index 9a9b03742d..6b42e83ef4 100644 --- a/common/lib/capa/capa/templates/jsinput.html +++ b/common/lib/capa/capa/templates/jsinput.html @@ -1,3 +1,4 @@ +<%! from openedx.core.djangolib.markup import HTML %>

- ${status.display_name} + ${status.display_name}

-

@@ -52,6 +52,6 @@ % endif % if msg: - ${msg|n} + ${HTML(msg)} % endif
diff --git a/common/lib/capa/capa/templates/jstextline.html b/common/lib/capa/capa/templates/jstextline.html deleted file mode 100644 index ae7ed71ca1..0000000000 --- a/common/lib/capa/capa/templates/jstextline.html +++ /dev/null @@ -1,28 +0,0 @@ -
- - - % if dojs == 'math': - `{::}` - % endif - - - - % if dojs == 'math': - - % endif - - - ${status.display_name} - - % if msg: -
- ${msg|n} - % endif -
diff --git a/common/lib/capa/capa/templates/optioninput.html b/common/lib/capa/capa/templates/optioninput.html index f32544145b..dd8c42b130 100644 --- a/common/lib/capa/capa/templates/optioninput.html +++ b/common/lib/capa/capa/templates/optioninput.html @@ -4,7 +4,7 @@
% if response_data['label']: - + % endif % for description_id, description_text in response_data['descriptions'].items(): @@ -23,12 +23,14 @@
- + ${status.display_tooltip}

% if msg: - ${HTML(msg)} + ${HTML(msg)} % endif
diff --git a/common/lib/capa/capa/templates/textline.html b/common/lib/capa/capa/templates/textline.html index de4d2740bc..f7e85a7478 100644 --- a/common/lib/capa/capa/templates/textline.html +++ b/common/lib/capa/capa/templates/textline.html @@ -17,7 +17,7 @@ % endif % if response_data['label']: - + % endif % for description_id, description_text in response_data['descriptions'].items(): @@ -36,7 +36,7 @@ /> ${trailing_text} - + ${status.display_tooltip} @@ -51,8 +51,8 @@
% endif -% if msg: - ${HTML(msg)} -% endif + % if msg: + ${HTML(msg)} + % endif
diff --git a/common/lib/capa/capa/templates/vsepr_input.html b/common/lib/capa/capa/templates/vsepr_input.html index c3af6222ae..5306e3dd88 100644 --- a/common/lib/capa/capa/templates/vsepr_input.html +++ b/common/lib/capa/capa/templates/vsepr_input.html @@ -20,14 +20,14 @@ style="display:none;" /> -

- ${status.display_name} +

+ ${status.display_name}

% if msg: - ${HTML(msg)} + ${HTML(msg)} % endif % if status in ['unsubmitted', 'correct', 'incorrect', 'partially-correct', 'incomplete']:
diff --git a/common/lib/capa/capa/tests/helpers.py b/common/lib/capa/capa/tests/helpers.py index 7ae7a3020d..8947a93552 100644 --- a/common/lib/capa/capa/tests/helpers.py +++ b/common/lib/capa/capa/tests/helpers.py @@ -90,10 +90,10 @@ def mock_capa_module(): return capa_module -def new_loncapa_problem(xml, capa_system=None, seed=723, use_capa_render_template=False): +def new_loncapa_problem(xml, problem_id='1', capa_system=None, seed=723, use_capa_render_template=False): """Construct a `LoncapaProblem` suitable for unit tests.""" render_template = capa_render_template if use_capa_render_template else None - return LoncapaProblem(xml, id='1', seed=seed, capa_system=capa_system or test_capa_system(render_template), + return LoncapaProblem(xml, id=problem_id, seed=seed, capa_system=capa_system or test_capa_system(render_template), capa_module=mock_capa_module()) diff --git a/common/lib/capa/capa/tests/response_xml_factory.py b/common/lib/capa/capa/tests/response_xml_factory.py index b01dbf4fc8..0d35ea1021 100644 --- a/common/lib/capa/capa/tests/response_xml_factory.py +++ b/common/lib/capa/capa/tests/response_xml_factory.py @@ -37,7 +37,7 @@ class ResponseXMLFactory(object): For all response types, **kwargs can contain: *question_text*: The text of the question to display, - wrapped in

tags. + wrapped in

with question text - question = etree.SubElement(root, "p") - question.text = question_text - # Add the response(s) for __ in range(int(num_responses)): response_element = self.create_response_element(**kwargs) @@ -86,6 +82,10 @@ class ResponseXMLFactory(object): root.append(response_element) + # Add the question label + question = etree.SubElement(response_element, "label") + question.text = question_text + # Add input elements for __ in range(int(num_inputs)): input_element = self.create_input_element(**kwargs) @@ -113,9 +113,13 @@ class ResponseXMLFactory(object): """ math_display = kwargs.get('math_display', False) size = kwargs.get('size', None) + input_element_label = kwargs.get('input_element_label', '') input_element = etree.Element('textline') + if input_element_label: + input_element.set('label', input_element_label) + if math_display: input_element.set('math', '1') @@ -267,9 +271,6 @@ class CustomResponseXMLFactory(ResponseXMLFactory): *answer_attr*: The "answer" attribute on the tag itself (treated as an alias to "expect", though "expect" takes priority if both are given) - - *group_label*: Text to represent group of inputs when there are - multiple inputs. """ # Retrieve **kwargs @@ -279,7 +280,6 @@ class CustomResponseXMLFactory(ResponseXMLFactory): answer = kwargs.get('answer', None) options = kwargs.get('options', None) cfn_extra_args = kwargs.get('cfn_extra_args', None) - group_label = kwargs.get('group_label', None) # Create the response element response_element = etree.Element("customresponse") @@ -297,10 +297,6 @@ class CustomResponseXMLFactory(ResponseXMLFactory): answer_element = etree.SubElement(response_element, "answer") answer_element.text = str(answer) - if group_label: - group_label_element = etree.SubElement(response_element, "label") - group_label_element.text = group_label - if options: response_element.set('options', str(options)) diff --git a/common/lib/capa/capa/tests/test_capa_problem.py b/common/lib/capa/capa/tests/test_capa_problem.py index 381f0d0722..c914ce579f 100644 --- a/common/lib/capa/capa/tests/test_capa_problem.py +++ b/common/lib/capa/capa/tests/test_capa_problem.py @@ -468,21 +468,30 @@ class CAPAMultiInputProblemTest(unittest.TestCase): def assert_problem_html(self, problme_html, group_label, *input_labels): """ Verify that correct html is rendered for multiple inputtypes. + + Arguments: + problme_html (str): problem HTML + group_label (str or None): multi input group label or None if label is not present + input_labels (tuple): individual input labels """ html = etree.XML(problme_html) # verify that only one multi input group div is present at correct path multi_inputs_group = html.xpath( - '//section[@class="wrapper-problem-response"]/div[@class="multi-inputs-group"]' + '//div[@class="wrapper-problem-response"]/div[@class="multi-inputs-group"]' ) self.assertEqual(len(multi_inputs_group), 1) - # verify that multi input group label

tag exists and its - # id matches with correct multi input group aria-labelledby - multi_inputs_group_label_id = multi_inputs_group[0].attrib.get('aria-labelledby') - multi_inputs_group_label = html.xpath('//p[@id="{}"]'.format(multi_inputs_group_label_id)) - self.assertEqual(len(multi_inputs_group_label), 1) - self.assertEqual(multi_inputs_group_label[0].text, group_label) + if group_label is None: + # if multi inputs group label is not present then there shouldn't be `aria-labelledby` attribute + self.assertEqual(multi_inputs_group[0].attrib.get('aria-labelledby'), None) + else: + # verify that multi input group label

tag exists and its + # id matches with correct multi input group aria-labelledby + multi_inputs_group_label_id = multi_inputs_group[0].attrib.get('aria-labelledby') + multi_inputs_group_label = html.xpath('//p[@id="{}"]'.format(multi_inputs_group_label_id)) + self.assertEqual(len(multi_inputs_group_label), 1) + self.assertEqual(multi_inputs_group_label[0].text, group_label) # verify that label for each input comes only once for input_label in input_labels: @@ -490,22 +499,26 @@ class CAPAMultiInputProblemTest(unittest.TestCase): input_label_element = multi_inputs_group[0].xpath('//*[normalize-space(text())="{}"]'.format(input_label)) self.assertEqual(len(input_label_element), 1) - def test_optionresponse(self): + @ddt.unpack + @ddt.data( + {'label_html': '', 'group_label': 'Choose the correct color'}, + {'label_html': '', 'group_label': None} + ) + def test_optionresponse(self, label_html, group_label): """ Verify that optionresponse problem with multiple inputtypes is rendered correctly. """ - group_label = 'Choose the correct color' input1_label = 'What color is the sky?' input2_label = 'What color are pine needles?' xml = """ - - - + {label_html} + + - """.format(group_label, input1_label, input2_label) + """.format(label_html=label_html, input1_label=input1_label, input2_label=input2_label) problem = self.capa_problem(xml) self.assert_problem_html(problem.get_html(), group_label, input1_label, input2_label) @@ -537,3 +550,43 @@ class CAPAMultiInputProblemTest(unittest.TestCase): """.format(group_label, input1_label, input2_label, inputtype=inputtype)) problem = self.capa_problem(xml) self.assert_problem_html(problem.get_html(), group_label, input1_label, input2_label) + + @ddt.unpack + @ddt.data( + { + 'descriptions': ('desc1', 'desc2'), + 'descriptions_html': 'desc1desc2' + }, + { + 'descriptions': (), + 'descriptions_html': '' + } + ) + def test_descriptions(self, descriptions, descriptions_html): + """ + Verify that groups descriptions are rendered correctly. + """ + xml = """ + + + + {descriptions_html} + + + + + """.format(descriptions_html=descriptions_html) + problem = self.capa_problem(xml) + problem_html = etree.XML(problem.get_html()) + + multi_inputs_group = problem_html.xpath('//div[@class="multi-inputs-group"]')[0] + description_ids = multi_inputs_group.attrib.get('aria-describedby', '').split() + + # Verify that number of descriptions matches description_ids + self.assertEqual(len(description_ids), len(descriptions)) + + # For each description, check its order and text is correct + for index, description_id in enumerate(description_ids): + description_element = multi_inputs_group.xpath('//p[@id="{}"]'.format(description_id)) + self.assertEqual(len(description_element), 1) + self.assertEqual(description_element[0].text, descriptions[index]) diff --git a/common/lib/capa/capa/tests/test_hint_functionality.py b/common/lib/capa/capa/tests/test_hint_functionality.py index b96f6cadbd..d890193a5a 100644 --- a/common/lib/capa/capa/tests/test_hint_functionality.py +++ b/common/lib/capa/capa/tests/test_hint_functionality.py @@ -55,7 +55,7 @@ class TextInputHintsTest(HintTest): {'module_id': 'i4x://Foo/bar/mock/abc', 'problem_part_id': '1_2', 'trigger_type': 'single', - 'hint_label': u'Correct', + 'hint_label': u'Correct:', 'correctness': True, 'student_answer': [u'Blue'], 'question_type': 'stringresponse', @@ -64,23 +64,23 @@ class TextInputHintsTest(HintTest): @data( {'problem_id': u'1_2_1', u'choice': u'GermanyΩ', - 'expected_string': u'

Incorrect:
I do not think so.Ω
'}, + 'expected_string': u'
Answer
Incorrect:
I do not think so.Ω
'}, {'problem_id': u'1_2_1', u'choice': u'franceΩ', - 'expected_string': u'
Correct:
Viva la France!Ω
'}, + 'expected_string': u'
Answer
Correct:
Viva la France!Ω
'}, {'problem_id': u'1_2_1', u'choice': u'FranceΩ', - 'expected_string': u'
Correct:
Viva la France!Ω
'}, + 'expected_string': u'
Answer
Correct:
Viva la France!Ω
'}, {'problem_id': u'1_2_1', u'choice': u'Mexico', 'expected_string': ''}, {'problem_id': u'1_2_1', u'choice': u'USAΩ', - 'expected_string': u'
Correct:
Less well known, but yes, there is a Paris, Texas.Ω
'}, + 'expected_string': u'
Answer
Correct:
Less well known, but yes, there is a Paris, Texas.Ω
'}, {'problem_id': u'1_2_1', u'choice': u'usaΩ', - 'expected_string': u'
Correct:
Less well known, but yes, there is a Paris, Texas.Ω
'}, + 'expected_string': u'
Answer
Correct:
Less well known, but yes, there is a Paris, Texas.Ω
'}, {'problem_id': u'1_2_1', u'choice': u'uSAxΩ', 'expected_string': u''}, {'problem_id': u'1_2_1', u'choice': u'NICKLANDΩ', - 'expected_string': u'
Incorrect:
The country name does not end in LANDΩ
'}, + 'expected_string': u'
Answer
Incorrect:
The country name does not end in LANDΩ
'}, {'problem_id': u'1_3_1', u'choice': u'Blue', - 'expected_string': u'
Correct:
The red light is scattered by water molecules leaving only blue light.
'}, + 'expected_string': u'
Answer
Correct:
The red light is scattered by water molecules leaving only blue light.
'}, {'problem_id': u'1_3_1', u'choice': u'blue', 'expected_string': u''}, {'problem_id': u'1_3_1', u'choice': u'b', @@ -101,22 +101,22 @@ class TextInputExtendedHintsCaseInsensitive(HintTest): @data( {'problem_id': u'1_5_1', 'choice': 'abc', 'expected_string': ''}, # wrong answer yielding no hint {'problem_id': u'1_5_1', 'choice': 'A', 'expected_string': - u'
Woo Hoo:
hint1
'}, + u'
Answer
Woo Hoo
hint1
'}, {'problem_id': u'1_5_1', 'choice': 'a', 'expected_string': - u'
Woo Hoo:
hint1
'}, + u'
Answer
Woo Hoo
hint1
'}, {'problem_id': u'1_5_1', 'choice': 'B', 'expected_string': - u'
hint2
'}, + u'
Answer
hint2
'}, {'problem_id': u'1_5_1', 'choice': 'b', 'expected_string': - u'
hint2
'}, + u'
Answer
hint2
'}, {'problem_id': u'1_5_1', 'choice': 'C', 'expected_string': - u'
hint4
'}, + u'
Answer
hint4
'}, {'problem_id': u'1_5_1', 'choice': 'c', 'expected_string': - u'
hint4
'}, + u'
Answer
hint4
'}, # regexp cases {'problem_id': u'1_5_1', 'choice': 'FGGG', 'expected_string': - u'
hint6
'}, + u'
Answer
hint6
'}, {'problem_id': u'1_5_1', 'choice': 'fgG', 'expected_string': - u'
hint6
'}, + u'
Answer
hint6
'}, ) @unpack def test_text_input_hints(self, problem_id, choice, expected_string): @@ -133,17 +133,17 @@ class TextInputExtendedHintsCaseSensitive(HintTest): @data( {'problem_id': u'1_6_1', 'choice': 'abc', 'expected_string': ''}, {'problem_id': u'1_6_1', 'choice': 'A', 'expected_string': - u'
Correct:
hint1
'}, + u'
Answer
Correct:
hint1
'}, {'problem_id': u'1_6_1', 'choice': 'a', 'expected_string': u''}, {'problem_id': u'1_6_1', 'choice': 'B', 'expected_string': - u'
Correct:
hint2
'}, + u'
Answer
Correct:
hint2
'}, {'problem_id': u'1_6_1', 'choice': 'b', 'expected_string': u''}, {'problem_id': u'1_6_1', 'choice': 'C', 'expected_string': - u'
Incorrect:
hint4
'}, + u'
Answer
Incorrect:
hint4
'}, {'problem_id': u'1_6_1', 'choice': 'c', 'expected_string': u''}, # regexp cases {'problem_id': u'1_6_1', 'choice': 'FGG', 'expected_string': - u'
Incorrect:
hint6
'}, + u'
Answer
Incorrect:
hint6
'}, {'problem_id': u'1_6_1', 'choice': 'fgG', 'expected_string': u''}, ) @unpack @@ -162,10 +162,10 @@ class TextInputExtendedHintsCompatible(HintTest): @data( {'problem_id': u'1_7_1', 'choice': 'A', 'correct': 'correct', - 'expected_string': '
Correct:
hint1
'}, + 'expected_string': '
Answer
Correct:
hint1
'}, {'problem_id': u'1_7_1', 'choice': 'B', 'correct': 'correct', 'expected_string': ''}, {'problem_id': u'1_7_1', 'choice': 'C', 'correct': 'correct', - 'expected_string': '
Correct:
hint2
'}, + 'expected_string': '
Answer
Correct:
hint2
'}, {'problem_id': u'1_7_1', 'choice': 'D', 'correct': 'incorrect', 'expected_string': ''}, # check going through conversion with difficult chars {'problem_id': u'1_7_1', 'choice': """<&"'>""", 'correct': 'correct', 'expected_string': ''}, @@ -188,23 +188,23 @@ class TextInputExtendedHintsRegex(HintTest): @data( {'problem_id': u'1_8_1', 'choice': 'ABwrong', 'correct': 'incorrect', 'expected_string': ''}, {'problem_id': u'1_8_1', 'choice': 'ABC', 'correct': 'correct', - 'expected_string': '
Correct:
hint1
'}, + 'expected_string': '
Answer
Correct:
hint1
'}, {'problem_id': u'1_8_1', 'choice': 'ABBBBC', 'correct': 'correct', - 'expected_string': '
Correct:
hint1
'}, + 'expected_string': '
Answer
Correct:
hint1
'}, {'problem_id': u'1_8_1', 'choice': 'aBc', 'correct': 'correct', - 'expected_string': '
Correct:
hint1
'}, + 'expected_string': '
Answer
Correct:
hint1
'}, {'problem_id': u'1_8_1', 'choice': 'BBBB', 'correct': 'correct', - 'expected_string': '
Correct:
hint2
'}, + 'expected_string': '
Answer
Correct:
hint2
'}, {'problem_id': u'1_8_1', 'choice': 'bbb', 'correct': 'correct', - 'expected_string': '
Correct:
hint2
'}, + 'expected_string': '
Answer
Correct:
hint2
'}, {'problem_id': u'1_8_1', 'choice': 'C', 'correct': 'incorrect', - 'expected_string': u'
Incorrect:
hint4
'}, + 'expected_string': u'
Answer
Incorrect:
hint4
'}, {'problem_id': u'1_8_1', 'choice': 'c', 'correct': 'incorrect', - 'expected_string': u'
Incorrect:
hint4
'}, + 'expected_string': u'
Answer
Incorrect:
hint4
'}, {'problem_id': u'1_8_1', 'choice': 'D', 'correct': 'incorrect', - 'expected_string': u'
Incorrect:
hint6
'}, + 'expected_string': u'
Answer
Incorrect:
hint6
'}, {'problem_id': u'1_8_1', 'choice': 'd', 'correct': 'incorrect', - 'expected_string': u'
Incorrect:
hint6
'}, + 'expected_string': u'
Answer
Incorrect:
hint6
'}, ) @unpack def test_text_input_hints(self, problem_id, choice, correct, expected_string): @@ -235,12 +235,12 @@ class NumericInputHintsTest(HintTest): @data( {'problem_id': u'1_2_1', 'choice': '1.141', - 'expected_string': u'
Nice:
The square root of two turns up in the strangest places.
'}, + 'expected_string': u'
Answer
Nice
The square root of two turns up in the strangest places.
'}, {'problem_id': u'1_3_1', 'choice': '4', - 'expected_string': u'
Correct:
Pretty easy, uh?.
'}, + 'expected_string': u'
Answer
Correct:
Pretty easy, uh?.
'}, # should get hint, when correct via numeric-tolerance {'problem_id': u'1_2_1', 'choice': '1.15', - 'expected_string': u'
Nice:
The square root of two turns up in the strangest places.
'}, + 'expected_string': u'
Answer
Nice
The square root of two turns up in the strangest places.
'}, # when they answer wrong, nothing {'problem_id': u'1_2_1', 'choice': '2', 'expected_string': ''}, ) @@ -260,67 +260,67 @@ class CheckboxHintsTest(HintTest): @data( {'problem_id': u'1_2_1', 'choice': [u'choice_0'], - 'expected_string': u'
Incorrect:
You are right that apple is a fruit.
You are right that mushrooms are not fruit
Remember that grape is also a fruit.
What is a camero anyway?
'}, + 'expected_string': u'
Answer
Incorrect:
You are right that apple is a fruit.
You are right that mushrooms are not fruit
Remember that grape is also a fruit.
What is a camero anyway?
'}, {'problem_id': u'1_2_1', 'choice': [u'choice_1'], - 'expected_string': u'
Incorrect:
Remember that apple is also a fruit.
Mushroom is a fungus, not a fruit.
Remember that grape is also a fruit.
What is a camero anyway?
'}, + 'expected_string': u'
Answer
Incorrect:
Remember that apple is also a fruit.
Mushroom is a fungus, not a fruit.
Remember that grape is also a fruit.
What is a camero anyway?
'}, {'problem_id': u'1_2_1', 'choice': [u'choice_2'], - 'expected_string': u'
Incorrect:
Remember that apple is also a fruit.
You are right that mushrooms are not fruit
You are right that grape is a fruit
What is a camero anyway?
'}, + 'expected_string': u'
Answer
Incorrect:
Remember that apple is also a fruit.
You are right that mushrooms are not fruit
You are right that grape is a fruit
What is a camero anyway?
'}, {'problem_id': u'1_2_1', 'choice': [u'choice_3'], - 'expected_string': u'
Incorrect:
Remember that apple is also a fruit.
You are right that mushrooms are not fruit
Remember that grape is also a fruit.
What is a camero anyway?
'}, + 'expected_string': u'
Answer
Incorrect:
Remember that apple is also a fruit.
You are right that mushrooms are not fruit
Remember that grape is also a fruit.
What is a camero anyway?
'}, {'problem_id': u'1_2_1', 'choice': [u'choice_4'], - 'expected_string': u'
Incorrect:
Remember that apple is also a fruit.
You are right that mushrooms are not fruit
Remember that grape is also a fruit.
I do not know what a Camero is but it is not a fruit.
'}, + 'expected_string': u'
Answer
Incorrect:
Remember that apple is also a fruit.
You are right that mushrooms are not fruit
Remember that grape is also a fruit.
I do not know what a Camero is but it is not a fruit.
'}, {'problem_id': u'1_2_1', 'choice': [u'choice_0', u'choice_1'], # compound - 'expected_string': u'
Almost right:
You are right that apple is a fruit, but there is one you are missing. Also, mushroom is not a fruit.
'}, + 'expected_string': u'
Answer
Almost right
You are right that apple is a fruit, but there is one you are missing. Also, mushroom is not a fruit.
'}, {'problem_id': u'1_2_1', 'choice': [u'choice_1', u'choice_2'], # compound - 'expected_string': u'
Incorrect:
You are right that grape is a fruit, but there is one you are missing. Also, mushroom is not a fruit.
'}, + 'expected_string': u'
Answer
Incorrect:
You are right that grape is a fruit, but there is one you are missing. Also, mushroom is not a fruit.
'}, {'problem_id': u'1_2_1', 'choice': [u'choice_0', u'choice_2'], - 'expected_string': u'
Correct:
You are right that apple is a fruit.
You are right that mushrooms are not fruit
You are right that grape is a fruit
What is a camero anyway?
'}, + 'expected_string': u'
Answer
Correct:
You are right that apple is a fruit.
You are right that mushrooms are not fruit
You are right that grape is a fruit
What is a camero anyway?
'}, {'problem_id': u'1_3_1', 'choice': [u'choice_0'], - 'expected_string': u'
Incorrect:
No, sorry, a banana is a fruit.
You are right that mushrooms are not vegatbles
Brussel sprout is the only vegetable in this list.
'}, + 'expected_string': u'
Answer
Incorrect:
No, sorry, a banana is a fruit.
You are right that mushrooms are not vegatbles
Brussel sprout is the only vegetable in this list.
'}, {'problem_id': u'1_3_1', 'choice': [u'choice_1'], - 'expected_string': u'
Incorrect:
poor banana.
You are right that mushrooms are not vegatbles
Brussel sprout is the only vegetable in this list.
'}, + 'expected_string': u'
Answer
Incorrect:
poor banana.
You are right that mushrooms are not vegatbles
Brussel sprout is the only vegetable in this list.
'}, {'problem_id': u'1_3_1', 'choice': [u'choice_2'], - 'expected_string': u'
Incorrect:
poor banana.
Mushroom is a fungus, not a vegetable.
Brussel sprout is the only vegetable in this list.
'}, + 'expected_string': u'
Answer
Incorrect:
poor banana.
Mushroom is a fungus, not a vegetable.
Brussel sprout is the only vegetable in this list.
'}, {'problem_id': u'1_3_1', 'choice': [u'choice_3'], - 'expected_string': u'
Correct:
poor banana.
You are right that mushrooms are not vegatbles
Brussel sprouts are vegetables.
'}, + 'expected_string': u'
Answer
Correct:
poor banana.
You are right that mushrooms are not vegatbles
Brussel sprouts are vegetables.
'}, {'problem_id': u'1_3_1', 'choice': [u'choice_0', u'choice_1'], # compound - 'expected_string': u'
Very funny:
Making a banana split?
'}, + 'expected_string': u'
Answer
Very funny
Making a banana split?
'}, {'problem_id': u'1_3_1', 'choice': [u'choice_1', u'choice_2'], - 'expected_string': u'
Incorrect:
poor banana.
Mushroom is a fungus, not a vegetable.
Brussel sprout is the only vegetable in this list.
'}, + 'expected_string': u'
Answer
Incorrect:
poor banana.
Mushroom is a fungus, not a vegetable.
Brussel sprout is the only vegetable in this list.
'}, {'problem_id': u'1_3_1', 'choice': [u'choice_0', u'choice_2'], - 'expected_string': u'
Incorrect:
No, sorry, a banana is a fruit.
Mushroom is a fungus, not a vegetable.
Brussel sprout is the only vegetable in this list.
'}, + 'expected_string': u'
Answer
Incorrect:
No, sorry, a banana is a fruit.
Mushroom is a fungus, not a vegetable.
Brussel sprout is the only vegetable in this list.
'}, # check for interaction between compoundhint and correct/incorrect {'problem_id': u'1_4_1', 'choice': [u'choice_0', u'choice_1'], # compound - 'expected_string': u'
Incorrect:
AB
'}, + 'expected_string': u'
Answer
Incorrect:
AB
'}, {'problem_id': u'1_4_1', 'choice': [u'choice_0', u'choice_2'], # compound - 'expected_string': u'
Correct:
AC
'}, + 'expected_string': u'
Answer
Correct:
AC
'}, # check for labeling where multiple child hints have labels # These are some tricky cases {'problem_id': '1_5_1', 'choice': ['choice_0', 'choice_1'], - 'expected_string': '
AA:
aa
'}, + 'expected_string': '
Answer
AA
aa
'}, {'problem_id': '1_5_1', 'choice': ['choice_0'], - 'expected_string': '
Incorrect:
aa
bb
'}, + 'expected_string': '
Answer
Incorrect:
aa
bb
'}, {'problem_id': '1_5_1', 'choice': ['choice_1'], 'expected_string': ''}, {'problem_id': '1_5_1', 'choice': [], - 'expected_string': '
BB:
bb
'}, + 'expected_string': '
Answer
BB
bb
'}, {'problem_id': '1_6_1', 'choice': ['choice_0'], - 'expected_string': '
aa
'}, + 'expected_string': '
Answer
aa
'}, {'problem_id': '1_6_1', 'choice': ['choice_0', 'choice_1'], - 'expected_string': '
compoundo
'}, + 'expected_string': '
Answer
compoundo
'}, # The user selects *nothing*, but can still get "unselected" feedback {'problem_id': '1_7_1', 'choice': [], - 'expected_string': '
Incorrect:
bb
'}, + 'expected_string': '
Answer
Incorrect:
bb
'}, # 100% not match of sel/unsel feedback {'problem_id': '1_7_1', 'choice': ['choice_1'], 'expected_string': ''}, # Here we have the correct combination, and that makes feedback too {'problem_id': '1_7_1', 'choice': ['choice_0'], - 'expected_string': '
Correct:
aa
bb
'}, + 'expected_string': '
Answer
Correct:
aa
bb
'}, ) @unpack def test_checkbox_hints(self, problem_id, choice, expected_string): @@ -360,7 +360,7 @@ class CheckboxHintsTestTracking(HintTest): self.get_hint(u'1_2_1', [u'choice_0']) self.problem.capa_module.runtime.track_function.assert_called_with( 'edx.problem.hint.feedback_displayed', - {'hint_label': u'Incorrect', + {'hint_label': u'Incorrect:', 'module_id': 'i4x://Foo/bar/mock/abc', 'problem_part_id': '1_1', 'choice_all': ['choice_0', 'choice_1', 'choice_2'], @@ -376,7 +376,7 @@ class CheckboxHintsTestTracking(HintTest): self.get_hint(u'1_2_1', [u'choice_1', u'choice_2']) self.problem.capa_module.runtime.track_function.assert_called_with( 'edx.problem.hint.feedback_displayed', - {'hint_label': u'Incorrect', + {'hint_label': u'Incorrect:', 'module_id': 'i4x://Foo/bar/mock/abc', 'problem_part_id': '1_1', 'choice_all': ['choice_0', 'choice_1', 'choice_2'], @@ -395,7 +395,7 @@ class CheckboxHintsTestTracking(HintTest): self.get_hint(u'1_2_1', [u'choice_0', u'choice_2']) self.problem.capa_module.runtime.track_function.assert_called_with( 'edx.problem.hint.feedback_displayed', - {'hint_label': u'Correct', + {'hint_label': u'Correct:', 'module_id': 'i4x://Foo/bar/mock/abc', 'problem_part_id': '1_1', 'choice_all': ['choice_0', 'choice_1', 'choice_2'], @@ -431,15 +431,15 @@ class MultpleChoiceHintsTest(HintTest): @data( {'problem_id': u'1_2_1', 'choice': u'choice_0', - 'expected_string': '
Mushroom is a fungus, not a fruit.
'}, + 'expected_string': '
Answer
Mushroom is a fungus, not a fruit.
'}, {'problem_id': u'1_2_1', 'choice': u'choice_1', 'expected_string': ''}, {'problem_id': u'1_3_1', 'choice': u'choice_1', - 'expected_string': '
Correct:
Potato is a root vegetable.
'}, + 'expected_string': '
Answer
Correct:
Potato is a root vegetable.
'}, {'problem_id': u'1_2_1', 'choice': u'choice_2', - 'expected_string': '
OUTSTANDING:
Apple is indeed a fruit.
'}, + 'expected_string': '
Answer
OUTSTANDING
Apple is indeed a fruit.
'}, {'problem_id': u'1_3_1', 'choice': u'choice_2', - 'expected_string': '
OOPS:
Apple is a fruit.
'}, + 'expected_string': '
Answer
OOPS
Apple is a fruit.
'}, {'problem_id': u'1_3_1', 'choice': u'choice_9', 'expected_string': ''}, ) @@ -466,16 +466,16 @@ class MultpleChoiceHintsWithHtmlTest(HintTest): 'edx.problem.hint.feedback_displayed', {'module_id': 'i4x://Foo/bar/mock/abc', 'problem_part_id': '1_1', 'trigger_type': 'single', 'student_answer': [u'choice_0'], 'correctness': False, 'question_type': 'multiplechoiceresponse', - 'hint_label': 'Incorrect', 'hints': [{'text': 'Mushroom is a fungus, not a fruit.'}]} + 'hint_label': 'Incorrect:', 'hints': [{'text': 'Mushroom is a fungus, not a fruit.'}]} ) @data( {'problem_id': u'1_2_1', 'choice': u'choice_0', - 'expected_string': '
Incorrect:
Mushroom is a fungus, not a fruit.
'}, + 'expected_string': '
Answer
Incorrect:
Mushroom is a fungus, not a fruit.
'}, {'problem_id': u'1_2_1', 'choice': u'choice_1', - 'expected_string': '
Incorrect:
Potato is not a fruit.
'}, + 'expected_string': '
Answer
Incorrect:
Potato is not a fruit.
'}, {'problem_id': u'1_2_1', 'choice': u'choice_2', - 'expected_string': '
Correct:
Apple is a fruit.
'} + 'expected_string': '
Answer
Correct:
Apple is a fruit.
'} ) @unpack def test_multiplechoice_hints(self, problem_id, choice, expected_string): @@ -499,28 +499,28 @@ class DropdownHintsTest(HintTest): 'edx.problem.hint.feedback_displayed', {'module_id': 'i4x://Foo/bar/mock/abc', 'problem_part_id': '1_2', 'trigger_type': 'single', 'student_answer': [u'FACES'], 'correctness': True, 'question_type': 'optionresponse', - 'hint_label': 'Correct', 'hints': [{'text': 'With lots of makeup, doncha know?'}]} + 'hint_label': 'Correct:', 'hints': [{'text': 'With lots of makeup, doncha know?'}]} ) @data( {'problem_id': u'1_2_1', 'choice': 'Multiple Choice', - 'expected_string': '
Good Job:
Yes, multiple choice is the right answer.
'}, + 'expected_string': '
Answer
Good Job
Yes, multiple choice is the right answer.
'}, {'problem_id': u'1_2_1', 'choice': 'Text Input', - 'expected_string': '
Incorrect:
No, text input problems do not present options.
'}, + 'expected_string': '
Answer
Incorrect:
No, text input problems do not present options.
'}, {'problem_id': u'1_2_1', 'choice': 'Numerical Input', - 'expected_string': '
Incorrect:
No, numerical input problems do not present options.
'}, + 'expected_string': '
Answer
Incorrect:
No, numerical input problems do not present options.
'}, {'problem_id': u'1_3_1', 'choice': 'FACES', - 'expected_string': '
Correct:
With lots of makeup, doncha know?
'}, + 'expected_string': '
Answer
Correct:
With lots of makeup, doncha know?
'}, {'problem_id': u'1_3_1', 'choice': 'dogs', - 'expected_string': '
NOPE:
Not dogs, not cats, not toads
'}, + 'expected_string': '
Answer
NOPE
Not dogs, not cats, not toads
'}, {'problem_id': u'1_3_1', 'choice': 'wrongo', 'expected_string': ''}, # Regression case where feedback includes answer substring {'problem_id': u'1_4_1', 'choice': 'AAA', - 'expected_string': '
Incorrect:
AAABBB1
'}, + 'expected_string': '
Answer
Incorrect:
AAABBB1
'}, {'problem_id': u'1_4_1', 'choice': 'BBB', - 'expected_string': '
Correct:
AAABBB2
'}, + 'expected_string': '
Answer
Correct:
AAABBB2
'}, {'problem_id': u'1_4_1', 'choice': 'not going to match', 'expected_string': ''}, ) diff --git a/common/lib/capa/capa/tests/test_html_render.py b/common/lib/capa/capa/tests/test_html_render.py index d446bce228..c2a9491855 100644 --- a/common/lib/capa/capa/tests/test_html_render.py +++ b/common/lib/capa/capa/tests/test_html_render.py @@ -1,3 +1,7 @@ +""" +CAPA HTML rendering tests. +""" +import ddt import unittest from lxml import etree import os @@ -9,7 +13,11 @@ from .response_xml_factory import StringResponseXMLFactory, CustomResponseXMLFac from capa.tests.helpers import test_capa_system, new_loncapa_problem +@ddt.ddt class CapaHtmlRenderTest(unittest.TestCase): + """ + CAPA HTML rendering tests class. + """ def setUp(self): super(CapaHtmlRenderTest, self).setUp() @@ -142,32 +150,28 @@ class CapaHtmlRenderTest(unittest.TestCase): # Mock out the template renderer the_system = test_capa_system() the_system.render_template = mock.Mock() - the_system.render_template.return_value = "
Input Template Render
" + the_system.render_template.return_value = "
Input Template Render
" # Create the problem and render the HTML problem = new_loncapa_problem(xml_str, capa_system=the_system) rendered_html = etree.XML(problem.get_html()) - # Expect problem has been turned into a
self.assertEqual(rendered_html.tag, "div") - # Expect question text is in a

child - question_element = rendered_html.find("p") - self.assertEqual(question_element.text, "Test question") + # Expect that the response has been turned into a

with correct attributes + response_element = rendered_html.find('div') - # Expect that the response has been turned into a
with correct attributes - response_element = rendered_html.find("section") - self.assertEqual(response_element.tag, "section") + self.assertEqual(response_element.tag, "div") self.assertEqual(response_element.attrib["aria-label"], "Question 1") - # Expect that the response
+ # Expect that the response div.wrapper-problem-response # that contains a
for the textline - textline_element = response_element.find("div") + textline_element = response_element.find('div') self.assertEqual(textline_element.text, 'Input Template Render') # Expect a child
for the solution # with the rendered template - solution_element = rendered_html.find("div") + solution_element = rendered_html.xpath('//div[@class="input-template-render"]')[0] self.assertEqual(solution_element.text, 'Input Template Render') # Expect that the template renderer was called with the correct @@ -185,7 +189,7 @@ class CapaHtmlRenderTest(unittest.TestCase): 'id': '1_2_1', 'trailing_text': '', 'size': None, - 'response_data': {'label': '', 'descriptions': {}}, + 'response_data': {'label': 'Test question', 'descriptions': {}}, 'describedby_html': '' } @@ -222,9 +226,9 @@ class CapaHtmlRenderTest(unittest.TestCase): """ problem = new_loncapa_problem(xml) rendered_html = etree.XML(problem.get_html()) - sections = rendered_html.findall('section') - self.assertEqual(sections[0].attrib['aria-label'], 'Question 1') - self.assertEqual(sections[1].attrib['aria-label'], 'Question 2') + response_elements = rendered_html.findall('div') + self.assertEqual(response_elements[0].attrib['aria-label'], 'Question 1') + self.assertEqual(response_elements[1].attrib['aria-label'], 'Question 2') def test_render_response_with_overall_msg(self): # CustomResponse script that sets an overall_message diff --git a/common/lib/capa/capa/tests/test_input_templates.py b/common/lib/capa/capa/tests/test_input_templates.py index 0c70f79f13..a4f8eae3dd 100644 --- a/common/lib/capa/capa/tests/test_input_templates.py +++ b/common/lib/capa/capa/tests/test_input_templates.py @@ -930,7 +930,7 @@ class DragAndDropTemplateTest(TemplateTestCase): self.assert_has_xpath(xml, xpath, self.context) # Expect a

with the status - xpath = "//p[@class='status']" + xpath = "//p[@class='status drag-and-drop--status']/span[@class='sr']" self.assert_has_text(xml, xpath, expected_text, exact=False) def test_drag_and_drop_json_html(self): @@ -1181,3 +1181,43 @@ class SchematicInputTemplateTest(TemplateTestCase): Verify aria-label attribute rendering. """ self.assert_label(aria_label=True) + + +class CodeinputTemplateTest(TemplateTestCase): + """ + Test mako template for `` input + """ + + TEMPLATE_NAME = 'codeinput.html' + + def setUp(self): + super(CodeinputTemplateTest, self).setUp() + self.context = { + 'id': '1', + 'status': Status('correct'), + 'mode': 'parrot', + 'linenumbers': 'false', + 'rows': '37', + 'cols': '11', + 'tabsize': '7', + 'hidden': '', + 'msg': '', + 'value': 'print "good evening"', + 'aria_label': 'python editor', + 'code_mirror_exit_message': 'Press ESC then TAB or click outside of the code editor to exit', + 'response_data': self.RESPONSE_DATA, + 'describedby': self.DESCRIBEDBY, + } + + def test_label(self): + """ + Verify question label is rendered correctly. + """ + self.assert_label(xpath="//label[@class='problem-group-label']") + + def test_editor_exit_message(self): + """ + Verify that editor exit message is rendered. + """ + xml = self.render_to_xml(self.context) + self.assert_has_text(xml, '//span[@id="cm-editor-exit-message-1"]', self.context['code_mirror_exit_message']) diff --git a/common/lib/capa/capa/tests/test_inputtypes.py b/common/lib/capa/capa/tests/test_inputtypes.py index 01122c1df3..d94ab947ea 100644 --- a/common/lib/capa/capa/tests/test_inputtypes.py +++ b/common/lib/capa/capa/tests/test_inputtypes.py @@ -421,6 +421,8 @@ class CodeInputTest(unittest.TestCase): 'hidden': '', 'tabsize': int(tabsize), 'queue_len': '3', + 'aria_label': '{mode} editor'.format(mode=mode), + 'code_mirror_exit_message': 'Press ESC then TAB or click outside of the code editor to exit', 'response_data': RESPONSE_DATA, 'describedby_html': DESCRIBEDBY } diff --git a/common/lib/capa/capa/tests/test_targeted_feedback.py b/common/lib/capa/capa/tests/test_targeted_feedback.py index 41f70d33b2..91a2cd01d4 100644 --- a/common/lib/capa/capa/tests/test_targeted_feedback.py +++ b/common/lib/capa/capa/tests/test_targeted_feedback.py @@ -96,8 +96,8 @@ class CapaTargetedFeedbackTest(unittest.TestCase): the_html = problem.get_html() without_new_lines = the_html.replace("\n", "") - - self.assertRegexpMatches(without_new_lines, r".*3rd WRONG solution") + # pylint: disable=line-too-long + self.assertRegexpMatches(without_new_lines, r"\s*Incorrect.*3rd WRONG solution") self.assertNotRegexpMatches(without_new_lines, r"feedback1|feedback2|feedbackC") # Check that calling it multiple times yields the same thing the_html2 = problem.get_html() @@ -110,11 +110,24 @@ class CapaTargetedFeedbackTest(unittest.TestCase): the_html = problem.get_html() without_new_lines = the_html.replace("\n", "") - - self.assertRegexpMatches(without_new_lines, r".*1st WRONG solution") + # pylint: disable=line-too-long + self.assertRegexpMatches(without_new_lines, r"\s*Incorrect.*1st WRONG solution") self.assertRegexpMatches(without_new_lines, r"

\{.*'1_solution_1'.*\}
") self.assertNotRegexpMatches(without_new_lines, r"feedback2|feedback3|feedbackC") + def test_targeted_feedback_correct_answer(self): + """ Test the case of targeted feedback for a correct answer. """ + problem = new_loncapa_problem(load_fixture('targeted_feedback.xml')) + problem.done = True + problem.student_answers = {'1_2_1': 'choice_2'} + + the_html = problem.get_html() + without_new_lines = the_html.replace("\n", "") + # pylint: disable=line-too-long + self.assertRegexpMatches(without_new_lines, + r"\s*Correct.*Feedback on your correct solution...") + self.assertNotRegexpMatches(without_new_lines, r"feedback1|feedback2|feedback3") + def test_targeted_feedback_id_typos(self): """Cases where the explanation-id's don't match anything.""" xml_str = textwrap.dedent(""" @@ -280,8 +293,8 @@ class CapaTargetedFeedbackTest(unittest.TestCase): the_html = problem.get_html() without_new_lines = the_html.replace("\n", "") - - self.assertRegexpMatches(without_new_lines, r".*1st WRONG solution") + # pylint: disable=line-too-long + self.assertRegexpMatches(without_new_lines, r".*1st WRONG solution") self.assertRegexpMatches(without_new_lines, r"\{.*'1_solution_1'.*\}
") self.assertNotRegexpMatches(without_new_lines, r"feedback2|feedback3") @@ -350,8 +363,8 @@ class CapaTargetedFeedbackTest(unittest.TestCase): the_html = problem.get_html() without_new_lines = the_html.replace("\n", "") - - self.assertRegexpMatches(without_new_lines, r".*1st WRONG solution") + # pylint: disable=line-too-long + self.assertRegexpMatches(without_new_lines, r".*1st WRONG solution") self.assertNotRegexpMatches(without_new_lines, r"\{.*'1_solution_1'.*\}
") self.assertNotRegexpMatches(without_new_lines, r"feedback2|feedback3|feedbackC") @@ -427,8 +440,8 @@ class CapaTargetedFeedbackTest(unittest.TestCase): the_html = problem.get_html() without_new_lines = the_html.replace("\n", "") - - self.assertRegexpMatches(without_new_lines, r".*1st WRONG solution") + # pylint: disable=line-too-long + self.assertRegexpMatches(without_new_lines, r".*1st WRONG solution") self.assertRegexpMatches(without_new_lines, r"\{.*'1_solution_1'.*\}
") self.assertNotRegexpMatches(without_new_lines, r"feedback2|feedback3") diff --git a/common/lib/xmodule/xmodule/capa_base.py b/common/lib/xmodule/xmodule/capa_base.py index e49881b6f9..afbabe2858 100644 --- a/common/lib/xmodule/xmodule/capa_base.py +++ b/common/lib/xmodule/xmodule/capa_base.py @@ -29,6 +29,8 @@ from django.utils.timezone import UTC from xmodule.capa_base_constants import RANDOMIZATION, SHOWANSWER from django.conf import settings +from openedx.core.djangolib.markup import HTML, Text + log = logging.getLogger("edx.courseware") # Make '_' a no-op so we can scrape strings. Using lambda instead of @@ -180,12 +182,6 @@ class CapaFields(object): help=_("Source code for LaTeX and Word problems. This feature is not well-supported."), scope=Scope.settings ) - text_customization = Dict( - help=_("String customization substitutions for particular locations"), - scope=Scope.settings - # TODO: someday it should be possible to not duplicate this definition here - # and in inheritance.py - ) use_latex_compiler = Boolean( help=_("Enable LaTeX templates?"), default=False, @@ -347,7 +343,7 @@ class CapaMixin(CapaFields): def set_last_submission_time(self): """ - Set the module's last submission time (when the problem was checked) + Set the module's last submission time (when the problem was submitted) """ self.last_submission_time = datetime.datetime.now(UTC()) @@ -400,62 +396,40 @@ class CapaMixin(CapaFields): 'progress_status': Progress.to_js_status_str(progress), 'progress_detail': Progress.to_js_detail_str(progress), 'content': self.get_problem_html(encapsulate=False), + 'graded': self.graded, }) - def check_button_name(self): + def submit_button_name(self): """ - Determine the name for the "check" button. - - Usually it is just "Check", but if this is the student's - final attempt, change the name to "Final Check". - The text can be customized by the text_customization setting. + Determine the name for the "submit" button. """ # The logic flow is a little odd so that _('xxx') strings can be found for # translation while also running _() just once for each string. _ = self.runtime.service(self, "i18n").ugettext - check = _('Check') - final_check = _('Final Check') + submit = _('Submit') - # Apply customizations if present - if 'custom_check' in self.text_customization: - check = _(self.text_customization.get('custom_check')) # pylint: disable=translation-of-non-string - if 'custom_final_check' in self.text_customization: - final_check = _(self.text_customization.get('custom_final_check')) # pylint: disable=translation-of-non-string - # TODO: need a way to get the customized words into the list of - # words to be translated + return submit - if self.max_attempts is not None and self.attempts >= self.max_attempts - 1: - return final_check - else: - return check - - def check_button_checking_name(self): + def submit_button_submitting_name(self): """ - Return the "checking..." text for the "check" button. + Return the "Submitting" text for the "submit" button. - After the user presses the "check" button, the button will briefly + After the user presses the "submit" button, the button will briefly display the value returned by this function until a response is received by the server. - - The text can be customized by the text_customization setting. - """ - # Apply customizations if present - if 'custom_checking' in self.text_customization: - return self.text_customization.get('custom_checking') - _ = self.runtime.service(self, "i18n").ugettext - return _('Checking...') + return _('Submitting') - def should_show_check_button(self): + def should_enable_submit_button(self): """ - Return True/False to indicate whether to show the "Check" button. + Return True/False to indicate whether to enable the "Submit" button. """ submitted_without_reset = (self.is_submitted() and self.rerandomize == RANDOMIZATION.ALWAYS) # If the problem is closed (past due / too many attempts) - # then we do NOT show the "check" button - # Also, do not show the "check" button if we're waiting + # then we disable the "submit" button + # Also, disable the "submit" button if we're waiting # for the user to reset a randomized problem if self.closed() or submitted_without_reset: return False @@ -591,51 +565,81 @@ class CapaMixin(CapaFields): return html + def _should_enable_demand_hint(self, hint_index, demand_hints): + """ + Should the demand hint option be enabled? + + Arguments: + hint_index (int): The current hint index. + demand_hints (list): List of hints. + Returns: + bool: True is the demand hint is possible. + bool: True is demand hint should be enabled. + """ + # hint_index is the index of the last hint that will be displayed in this rendering, + # so add 1 to check if others exist. + return len(demand_hints) > 0, len(demand_hints) > 0 and hint_index + 1 < len(demand_hints) + def get_demand_hint(self, hint_index): """ - Return html for the problem. + Return html for the problem, including demand hints. - Adds check, reset, save, and hint buttons as necessary based on the problem config - and state. - encapsulate: if True (the default) embed the html in a problem
- hint_index: (None is the default) if not None, this is the index of the next demand - hint to show. + hint_index (int): (None is the default) if not None, this is the index of the next demand + hint to show. """ demand_hints = self.lcp.tree.xpath("//problem/demandhint/hint") hint_index = hint_index % len(demand_hints) _ = self.runtime.service(self, "i18n").ugettext - hint_element = demand_hints[hint_index] - hint_text = get_inner_html_from_xpath(hint_element) - if len(demand_hints) == 1: - prefix = _('Hint: ') - else: - # Translators: e.g. "Hint 1 of 3" meaning we are showing the first of three hints. - prefix = _('Hint ({hint_num} of {hints_count}): ').format(hint_num=hint_index + 1, - hints_count=len(demand_hints)) - # Log this demand-hint request + counter = 0 + total_text = '' + while counter <= hint_index: + # Translators: {previous_hints} is the HTML of hints that have already been generated, {hint_number_prefix} + # is a header for this hint, and {hint_text} is the text of the hint itself. + # This string is being passed to translation only for possible reordering of the placeholders. + total_text = HTML(_('{previous_hints}
  • {hint_number_prefix}{hint_text}
  • ')).format( + previous_hints=HTML(total_text), + # Translators: e.g. "Hint 1 of 3: " meaning we are showing the first of three hints. + # This text is shown in bold before the accompanying hint text. + hint_number_prefix=Text(_("Hint ({hint_num} of {hints_count}): ")).format( + hint_num=counter + 1, hints_count=len(demand_hints) + ), + # Course-authored HTML demand hints are supported. + hint_text=HTML(get_inner_html_from_xpath(demand_hints[counter])) + ) + counter += 1 + + total_text = HTML('
      {hints}
    ').format(hints=total_text) + + # Log this demand-hint request. Note that this only logs the last hint requested (although now + # all previously shown hints are still displayed). event_info = dict() event_info['module_id'] = self.location.to_deprecated_string() event_info['hint_index'] = hint_index event_info['hint_len'] = len(demand_hints) - event_info['hint_text'] = hint_text + event_info['hint_text'] = get_inner_html_from_xpath(demand_hints[hint_index]) self.runtime.publish(self, 'edx.problem.hint.demandhint_displayed', event_info) + _, should_enable_next_hint = self._should_enable_demand_hint(hint_index, demand_hints) + # We report the index of this hint, the client works out what index to use to get the next hint return { 'success': True, - 'contents': prefix + hint_text, - 'hint_index': hint_index + 'hint_index': hint_index, + 'should_enable_next_hint': should_enable_next_hint, + 'msg': total_text, } - def get_problem_html(self, encapsulate=True): + def get_problem_html(self, encapsulate=True, submit_notification=False): """ Return html for the problem. - Adds check, reset, save, and hint buttons as necessary based on the problem config + Adds submit, reset, save, and hint buttons as necessary based on the problem config and state. - encapsulate: if True (the default) embed the html in a problem
    + + encapsulate (bool): if True (the default) embed the html in a problem
    + submit_notification (bool): True if the submit notification should be added """ try: html = self.lcp.get_html() @@ -647,16 +651,10 @@ class CapaMixin(CapaFields): html = self.remove_tags_from_html(html) - # The convention is to pass the name of the check button if we want - # to show a check button, and False otherwise This works because - # non-empty strings evaluate to True. We use the same convention - # for the "checking" state text. - if self.should_show_check_button(): - check_button = self.check_button_name() - check_button_checking = self.check_button_checking_name() - else: - check_button = False - check_button_checking = False + # Enable/Disable Submit button if should_enable_submit_button returns True/False. + submit_button = self.submit_button_name() + submit_button_submitting = self.submit_button_submitting_name() + should_enable_submit_button = self.should_enable_submit_button() content = { 'name': self.display_name_with_default, @@ -665,20 +663,29 @@ class CapaMixin(CapaFields): } # If demand hints are available, emit hint button and div. + hint_index = 0 demand_hints = self.lcp.tree.xpath("//problem/demandhint/hint") - demand_hint_possible = len(demand_hints) > 0 + demand_hint_possible, should_enable_next_hint = self._should_enable_demand_hint(hint_index, demand_hints) + + answer_notification_type, answer_notification_message = self._get_answer_notification( + render_notifications=submit_notification) context = { 'problem': content, 'id': self.location.to_deprecated_string(), - 'check_button': check_button, - 'check_button_checking': check_button_checking, + 'short_id': self.location.html_id(), + 'submit_button': submit_button, + 'submit_button_submitting': submit_button_submitting, + 'should_enable_submit_button': should_enable_submit_button, 'reset_button': self.should_show_reset_button(), 'save_button': self.should_show_save_button(), 'answer_available': self.answer_available(), 'attempts_used': self.attempts, 'attempts_allowed': self.max_attempts, - 'demand_hint_possible': demand_hint_possible + 'demand_hint_possible': demand_hint_possible, + 'should_enable_next_hint': should_enable_next_hint, + 'answer_notification_type': answer_notification_type, + 'answer_notification_message': answer_notification_message, } html = self.runtime.render_template('problem.html', context) @@ -699,6 +706,65 @@ class CapaMixin(CapaFields): return html + def _get_answer_notification(self, render_notifications): + """ + Generate the answer notification type and message from the current problem status. + + Arguments: + render_notifications (bool): If false the method will return an None for type and message + """ + answer_notification_message = None + answer_notification_type = None + + if render_notifications: + progress = self.get_progress() + id_list = self.lcp.correct_map.keys() + if len(id_list) == 1: + # Only one answer available + answer_notification_type = self.lcp.correct_map.get_correctness(id_list[0]) + elif len(id_list) > 1: + # Check the multiple answers that are available + answer_notification_type = self.lcp.correct_map.get_correctness(id_list[0]) + for answer_id in id_list[1:]: + if self.lcp.correct_map.get_correctness(answer_id) != answer_notification_type: + # There is at least 1 of the following combinations of correctness states + # Correct and incorrect, Correct and partially correct, or Incorrect and partially correct + # which all should have a message type of Partially Correct + answer_notification_type = 'partially-correct' + break + + # Build the notification message based on the notification type and translate it. + ungettext = self.runtime.service(self, "i18n").ungettext + if answer_notification_type == 'incorrect': + if progress is not None: + answer_notification_message = ungettext( + "Incorrect ({progress} point)", + "Incorrect ({progress} points)", + progress.frac()[1] + ).format(progress=str(progress)) + else: + answer_notification_message = _('Incorrect') + elif answer_notification_type == 'correct': + if progress is not None: + answer_notification_message = ungettext( + "Correct ({progress} point)", + "Correct ({progress} points)", + progress.frac()[1] + ).format(progress=str(progress)) + else: + answer_notification_message = _('Correct') + elif answer_notification_type == 'partially-correct': + if progress is not None: + answer_notification_message = ungettext( + "Partially correct ({progress} point)", + "Partially correct ({progress} points)", + progress.frac()[1] + ).format(progress=str(progress)) + else: + answer_notification_message = _('Partially Correct') + + return answer_notification_type, answer_notification_message + def remove_tags_from_html(self, html): """ The capa xml includes many tags such as or which are not @@ -894,7 +960,7 @@ class CapaMixin(CapaFields): Used if we want to reconfirm we have the right thing e.g. after several AJAX calls. """ - return {'html': self.get_problem_html(encapsulate=False)} + return {'html': self.get_problem_html(encapsulate=False, submit_notification=True)} @staticmethod def make_dict_of_responses(data): @@ -996,7 +1062,7 @@ class CapaMixin(CapaFields): return {'grade': score['score'], 'max_grade': score['total']} # pylint: disable=too-many-statements - def check_problem(self, data, override_time=False): + def submit_problem(self, data, override_time=False): """ Checks whether answers to a problem are correct @@ -1034,7 +1100,7 @@ class CapaMixin(CapaFields): self.track_function_unmask('problem_check_fail', event_info) if dog_stats_api: dog_stats_api.increment(metric_name('checks'), tags=[u'result:failed', u'failure:unreset']) - raise NotFoundError(_("Problem must be reset before it can be checked again.")) + raise NotFoundError(_("Problem must be reset before it can be submitted again.")) # Problem queued. Students must wait a specified waittime before they are allowed to submit # IDEA: consider stealing code from below: pretty-print of seconds, cueing of time remaining @@ -1131,7 +1197,7 @@ class CapaMixin(CapaFields): ) # render problem into HTML - html = self.get_problem_html(encapsulate=False) + html = self.get_problem_html(encapsulate=False, submit_notification=True) return { 'success': success, @@ -1424,11 +1490,11 @@ class CapaMixin(CapaFields): if not self.max_attempts == 0: msg = _( "Your answers have been saved but not graded. Click '{button_name}' to grade them." - ).format(button_name=self.check_button_name()) + ).format(button_name=self.submit_button_name()) return { 'success': True, 'msg': msg, - 'html': self.get_problem_html(encapsulate=False), + 'html': self.get_problem_html(encapsulate=False) } def reset_problem(self, _data): @@ -1454,7 +1520,7 @@ class CapaMixin(CapaFields): return { 'success': False, # Translators: 'closed' means the problem's due date has passed. You may no longer attempt to solve the problem. - 'error': _("Problem is closed."), + 'msg': _("You cannot select Reset for a problem that is closed."), } if not self.is_submitted(): @@ -1462,8 +1528,7 @@ class CapaMixin(CapaFields): self.track_function_unmask('reset_problem_fail', event_info) return { 'success': False, - # Translators: A student must "make an attempt" to solve the problem on the page before they can reset it. - 'error': _("Refresh the page and make an attempt before resetting."), + 'msg': _("You must submit an answer before you can select Reset."), } if self.is_submitted() and self.rerandomize in [RANDOMIZATION.ALWAYS, RANDOMIZATION.ONRESET]: diff --git a/common/lib/xmodule/xmodule/capa_module.py b/common/lib/xmodule/xmodule/capa_module.py index c91cdb3e6f..42e52e5bc9 100644 --- a/common/lib/xmodule/xmodule/capa_module.py +++ b/common/lib/xmodule/xmodule/capa_module.py @@ -69,7 +69,7 @@ class CapaModule(CapaMixin, XModule): handlers = { 'hint_button': self.hint_button, 'problem_get': self.get_problem, - 'problem_check': self.check_problem, + 'problem_check': self.submit_problem, 'problem_reset': self.reset_problem, 'problem_save': self.save_problem, 'problem_show': self.get_answer, @@ -212,7 +212,6 @@ class CapaDescriptor(CapaFields, RawDescriptor): CapaDescriptor.graceperiod, CapaDescriptor.force_save_button, CapaDescriptor.markdown, - CapaDescriptor.text_customization, CapaDescriptor.use_latex_compiler, ]) return non_editable_fields @@ -276,9 +275,9 @@ class CapaDescriptor(CapaFields, RawDescriptor): # Proxy to CapaModule for access to any of its attributes answer_available = module_attr('answer_available') - check_button_name = module_attr('check_button_name') - check_button_checking_name = module_attr('check_button_checking_name') - check_problem = module_attr('check_problem') + submit_button_name = module_attr('submit_button_name') + submit_button_submitting_name = module_attr('submit_button_submitting_name') + submit_problem = module_attr('submit_problem') choose_new_seed = module_attr('choose_new_seed') closed = module_attr('closed') get_answer = module_attr('get_answer') @@ -301,7 +300,7 @@ class CapaDescriptor(CapaFields, RawDescriptor): reset_problem = module_attr('reset_problem') save_problem = module_attr('save_problem') set_state_from_lcp = module_attr('set_state_from_lcp') - should_show_check_button = module_attr('should_show_check_button') + should_show_submit_button = module_attr('should_show_submit_button') should_show_reset_button = module_attr('should_show_reset_button') should_show_save_button = module_attr('should_show_save_button') update_score = module_attr('update_score') diff --git a/common/lib/xmodule/xmodule/css/capa/display.scss b/common/lib/xmodule/xmodule/css/capa/display.scss index 5fcd28a630..2e45ff8af0 100644 --- a/common/lib/xmodule/xmodule/css/capa/display.scss +++ b/common/lib/xmodule/xmodule/css/capa/display.scss @@ -24,9 +24,18 @@ $annotation-yellow: rgba(255,255,10,0.3); $color-copy-tip: rgb(100,100,100); $correct: $green-d2; -$partiallycorrect: $green-d2; +$partially-correct: $green-d2; $incorrect: $red; +// FontAwesome Icon code +// ==================== +$checkmark-icon: '\f00c'; // .fa-check +$cross-icon: '\f00d'; // .fa-close +$asterisk-icon: '\f069'; // .fa-asterisk + + +@import '../../../../../static/sass/edx-pattern-library-shims/base/variables'; + // +Extends - Capa // ==================== // Duplicated from _mixins.scss due to xmodule compilation, inheritance issues @@ -70,19 +79,31 @@ h2 { } } -.feedback-hint-correct { - margin-top: ($baseline/2); - color: $correct; +.explanation-title { + font-weight: bold; } -.feedback-hint-partially-correct { - margin-top: ($baseline/2); - color: $partiallycorrect; +%feedback-hint { + margin-top: ($baseline / 4); + + .icon { + @include margin-right($baseline / 4); + } } .feedback-hint-incorrect { - margin-top: ($baseline/2); - color: $incorrect; + @extend %feedback-hint; + .icon { + color: $incorrect; + } +} + +.feedback-hint-partially-correct, +.feedback-hint-correct { + @extend %feedback-hint; + .icon { + color: $correct; + } } .feedback-hint-text { @@ -90,12 +111,10 @@ h2 { } .problem-hint { - color: $color-copy-tip; margin-bottom: 20px; } .hint-label { - font-weight: bold; display: inline-block; padding-right: 0.5em; } @@ -120,17 +139,16 @@ iframe[seamless]{ } div.problem-progress { - @include padding-left($baseline/4); - @extend %t-ultralight; display: inline-block; color: $gray-d1; - font-weight: 100; - font-size: em(16); + font-size: em(14); } // +Problem - Base // ==================== div.problem { + padding-top: $baseline; + @media print { display: block; padding: 0; @@ -154,7 +172,8 @@ div.problem { } .question-description { - @include margin(($baseline*0.75), 0); + color: $gray-d1; + font-size: $small-font-size; } form > label, .problem-group-label { @@ -162,11 +181,20 @@ div.problem { margin-bottom: $baseline; font: inherit; color: inherit; + -webkit-font-smoothing: initial; } - .wrapper-problem-response:not(:last-child) { - margin-bottom: $baseline; + .problem-group-label + .question-description { + margin-top: -$baseline; } + +} + +// CAPA gap spacing between problem parts +// can not use the & + & since .problem is nested deeply in .xmodule_display.xmodule_CapaModule +.wrapper-problem-response + .wrapper-problem-response, +.wrapper-problem-response + p { + margin-top: ($baseline * 1.5); } // Choice Group - silent class @@ -195,7 +223,7 @@ div.problem { } &.choicegroup_correct { - @include status-icon($correct, "\f00c"); + @include status-icon($correct, $checkmark-icon); border: 2px solid $correct; // keep green for correct answers on hover. @@ -205,17 +233,17 @@ div.problem { } &.choicegroup_partially-correct { - @include status-icon($partiallycorrect, "\f069"); - border: 2px solid $partiallycorrect; + @include status-icon($partially-correct, $asterisk-icon); + border: 2px solid $partially-correct; // keep green for correct answers on hover. &:hover { - border-color: $partiallycorrect; + border-color: $partially-correct; } } &.choicegroup_incorrect { - @include status-icon($incorrect, "\f00d"); + @include status-icon($incorrect, $cross-icon); border: 2px solid $incorrect; // keep red for incorrect answers on hover. @@ -226,7 +254,6 @@ div.problem { } .indicator-container { - display: inline-block; min-height: 1px; width: 25px; } @@ -253,15 +280,18 @@ div.problem { @extend %choicegroup-base; label { @include padding($baseline/2); - @include padding-left($baseline*1.75); + @include padding-left($baseline*1.9); position: relative; + font-size: $base-font-size; + line-height: normal; + cursor: pointer; } input[type="radio"], input[type="checkbox"] { - @include left($baseline/4); + @include left(em(9)); position: absolute; - top: em(11); + top: em(9); } } } @@ -276,31 +306,22 @@ div.problem { .status { width: $baseline; - height: $baseline; // CASE: correct answer &.correct { - @include status-icon($correct, "\f00c"); + @include status-icon($correct, $checkmark-icon); } // CASE: partially correct answer &.partially-correct { - @include status-icon($partiallycorrect, "\f069"); + @include status-icon($partially-correct, $asterisk-icon); } // CASE: incorrect answer &.incorrect { - @include status-icon($incorrect, "\f00d"); + @include status-icon($incorrect, $cross-icon); } - // CASE: unanswered - &.unanswered { - @include status-icon($gray-l4, "\f128"); - } - - // CASE: processing - &.processing { - } } } } @@ -323,12 +344,7 @@ div.problem { > span { margin: $baseline 0; display: block; - border: 1px solid #ddd; - padding: 9px 15px $baseline; - background: $white; position: relative; - box-shadow: inset 0 0 0 1px #eee; - border-radius: 3px; &:empty { display: none; @@ -338,14 +354,8 @@ div.problem { .targeted-feedback-span { > span { - margin: $baseline 0; display: block; - border: 1px solid $black; - padding: 9px 15px $baseline; - background: $white; position: relative; - box-shadow: inset 0 0 0 1px #eee; - border-radius: 3px; &:empty { display: none; @@ -362,13 +372,6 @@ div.problem { margin-top: -2px; } - &.status { - @include margin(8px, 0, 0, ($baseline/2)); - text-indent: 100%; - white-space: nowrap; - overflow: hidden; - } - span.clarification i { font-style: normal; &:hover { @@ -377,21 +380,18 @@ div.problem { } } - &.unanswered { - p.status { - display: inline-block; - width: 14px; - height: 14px; - background: url('#{$static-path}/images/unanswered-icon.png') center center no-repeat; + .unanswered { + p.status.drag-and-drop--status { + @include margin(8px, 0, 0, ($baseline/2)); + text-indent: 100%; + white-space: nowrap; + overflow: hidden; } } &.correct, &.ui-icon-check { p.status { - display: inline-block; - width: 25px; - height: 20px; - background: url('#{$static-path}/images/correct-icon.png') center center no-repeat; + @include status-icon($correct, $checkmark-icon); } input { @@ -401,14 +401,11 @@ div.problem { &.partially-correct, &.ui-icon-check { p.status { - display: inline-block; - width: 25px; - height: 20px; - background: url('#{$static-path}/images/partially-correct-icon.png') center center no-repeat; + @include status-icon($partially-correct, $asterisk-icon); } input { - border-color: $partiallycorrect; + border-color: $partially-correct; } } @@ -427,10 +424,7 @@ div.problem { &.ui-icon-close { p.status { - display: inline-block; - width: 20px; - height: 20px; - background: url('#{$static-path}/images/incorrect-icon.png') center center no-repeat; + @include status-icon($incorrect, $cross-icon); } input { @@ -441,10 +435,7 @@ div.problem { &.incorrect, &.incomplete { p.status { - display: inline-block; - width: 20px; - height: 20px; - background: url('#{$static-path}/images/incorrect-icon.png') center center no-repeat; + @include status-icon($incorrect, $cross-icon); } input { @@ -452,14 +443,9 @@ div.problem { } } - > span { - display: block; - margin-bottom: lh(0.5); - } - p.answer { - @include margin-left($baseline/2); display: inline-block; + margin-top: ($baseline / 2); margin-bottom: 0; &:before { @@ -789,7 +775,6 @@ div.problem { .status { display: inline-block; margin-top: ($baseline/2); - @include margin-left($baseline*.75); background: none; } @@ -801,7 +786,7 @@ div.problem { } .status { - @include status-icon($incorrect, "\f00d"); + @include status-icon($incorrect, $cross-icon); } } @@ -809,11 +794,11 @@ div.problem { > .partially-correct { input { - border: 2px solid $partiallycorrect; + border: 2px solid $partially-correct; } .status { - @include status-icon($partiallycorrect, "\f069"); + @include status-icon($partially-correct, $asterisk-icon); } } @@ -825,7 +810,7 @@ div.problem { } .status { - @include status-icon($correct, "\f00c"); + @include status-icon($correct, $checkmark-icon); } } @@ -837,12 +822,16 @@ div.problem { } .status { - @include status-icon($gray-l4, "\f128"); + &:after { + content: ''; // clear out correct or incorrect icon + } } + } } .trailing_text { + @include margin-right($baseline/2); display: inline-block; } } @@ -853,7 +842,6 @@ div.problem { .problem { .inputtype.option-input { margin: (-$baseline/2) 0 $baseline; - padding-bottom: $baseline; .indicator-container { display: inline-block; @@ -920,50 +908,75 @@ div.problem { } } +.capa-message { + display: inline-block; + color: $gray-d1; + -webkit-font-smoothing: antialiased; +} + // +Problem - Actions // ==================== div.problem .action { - margin-top: $baseline; + @include margin($baseline 0); + min-height: $baseline; + + .problem-action-buttons-wrapper { + margin-bottom: $baseline / 2; + + @media (min-width: $bp-screen-lg) { + @include right($baseline * 1.5); + margin-top: -$baseline / 2; + position: absolute; + } + + } + + .problem-action-button-wrapper { + @include border-right(1px solid $light-gray1); + display: inline-block; + + &:last-child { + border: none; + } + } + + .problem-action-btn { + @include margin-right($baseline / 5); + max-width: 110px; + + .icon { + margin-bottom: $baseline / 10; + display: block; + } - .save, .check, .show, .reset, .hint-button { - @include margin-right($baseline/2); - margin-bottom: ($baseline/2); - height: ($baseline*2); - vertical-align: middle; - text-transform: uppercase; - font-weight: 600; @media print { display: none; } - } - .save { - @extend .blue-button !optional; - } - - .show { - - .show-label { - font-weight: 600; - font-size: 1.0em; - } } .submission_feedback { - // background: #F3F3F3; - // border: 1px solid #ddd; - // border-radius: 3px; - // padding: 8px 12px; - // margin-top: ($baseline/2); @include margin-left($baseline/2); display: inline-block; - margin-top: 8px; color: $gray-d1; - font-style: italic; + font-size: $medium-font-size; -webkit-font-smoothing: antialiased; + vertical-align: middle; + + @media (min-width: $bp-screen-lg) and (max-width: $bp-screen-xl) { + @include margin-left(0); + margin-top: $baseline / 2; + display: block; + } + + @media (min-width: $bp-screen-xl) { + max-width: flex-grid(3, 10); + } + } } + // +Problem - Misc, Unclassified Mess Part 2 // ==================== div.problem { @@ -996,71 +1009,148 @@ div.problem { border: 1px solid $gray-l3; } - .detailed-solution { - > p:first-child { + .message { + font-size: inherit; + } + + .detailed-solution > p { + margin: 0; + + &:first-child { @extend %t-strong; - color: $gray; - text-transform: uppercase; - font-style: normal; - font-size: 0.9em; - } - - p:last-child { - margin-bottom: 0; - } - } - - .detailed-targeted-feedback { - > p:first-child { - @extend %t-strong; - color: $incorrect; - text-transform: uppercase; - font-style: normal; - font-size: 0.9em; - } - - p:last-child { - margin-bottom: 0; - } - } - - .detailed-targeted-feedback-partially-correct { - > p:first-child { - @extend %t-strong; - color: $partiallycorrect; - text-transform: uppercase; - font-style: normal; - font-size: 0.9em; - } - - p:last-child { margin-bottom: 0; } + } + .detailed-targeted-feedback, + .detailed-targeted-feedback-partially-correct, .detailed-targeted-feedback-correct { - > p:first-child { - @extend %t-strong; - color: $correct; - text-transform: uppercase; - font-style: normal; - font-size: 0.9em; - } + > p { + margin: 0; + font-weight: normal; - p:last-child { - margin-bottom: 0; + &:first-child { + @extend %t-strong; + } } } div.capa_alert { margin-top: $baseline; padding: 8px 12px; - border: 1px solid #ebe8bf; + border: 1px solid $warning-color; border-radius: 3px; - background: #fffcdd; + background: $warning-color-accent; font-size: 0.9em; } + .notification { + margin-top: $baseline / 2; + padding: ($baseline / 2.5) ($baseline / 2) ($baseline / 5) ($baseline / 2); + line-height: $base-line-height; + + &.success { + @include notification-by-type($success-color); + } + + &.error { + @include notification-by-type($error-color); + } + + &.warning { + @include notification-by-type($warning-color); + } + + &.problem-hint { + border: 1px solid $uxpl-gray-background; + border-radius: 6px; + + .icon { + @include margin-right(3 * $baseline / 4); + color: $uxpl-gray-dark; + } + + li { + color: $uxpl-gray-base; + + strong { + color: $uxpl-gray-dark; + } + } + } + + .icon { + @include float(left); + position: relative; + top: $baseline / 5; + } + + .notification-message { + display: inline-block; + width: flex-grid(8,10); + // Make notification tall enough that when the "Review" button is displayed, + // the notification does not grow in height. + margin-bottom: 8px; + + ol { + list-style: none outside none; + padding: 0; + margin: 0; + + li:not(:last-child) { + margin-bottom: $baseline / 4; + } + } + } + + .notification-btn-wrapper { + @include float(right); + } + + } + + .notification-btn { + @include float(right); + padding: ($baseline / 10) ($baseline / 4); + min-width: ($baseline * 3); + display: block; + clear: both; + + &:first-child { + margin-bottom: $baseline / 4; + } + } + + // override default button hover + button { + &:hover { + background-image: none; + box-shadow: none; + } + + &:focus { + box-shadow: none; + } + + &.btn-default { + background-color: transparent; + } + + &.btn-brand { + &:hover { + background-color: $btn-brand-focus-background; + } + } + } + + .review-btn { + color: $blue; // notification type has other colors + &.sr { + color: $blue; + } + } + div.capa_reset { padding: 25px; border: 1px solid $error-color; @@ -1449,7 +1539,7 @@ div.problem { @extend label.choicegroup_partially-correct; input[type="text"] { - border-color: $partiallycorrect; + border-color: $partially-correct; } } @@ -1459,7 +1549,7 @@ div.problem { label.choicetextgroup_show_correct, section.choicetextgroup_show_correct { &:after { - margin-left:15px; + @include margin-left($baseline*.75); content: url('#{$static-path}/images/correct-icon.png'); } } @@ -1485,15 +1575,15 @@ div.problem .imageinput.capa_inputtype { } .correct { - background: url('#{$static-path}/images/correct-icon.png') center center no-repeat; + @include status-icon($correct, $checkmark-icon); } .incorrect { - background: url('#{$static-path}/images/incorrect-icon.png') center center no-repeat; + @include status-icon($incorrect, $cross-icon); } .partially-correct { - background: url('#{$static-path}/images/partially-correct-icon.png') center center no-repeat; + @include status-icon($partially-correct, $asterisk-icon); } } @@ -1512,14 +1602,14 @@ div.problem .annotation-input { } .correct { - background: url('#{$static-path}/images/correct-icon.png') center center no-repeat; + @include status-icon($correct, $checkmark-icon); } .incorrect { - background: url('#{$static-path}/images/incorrect-icon.png') center center no-repeat; + @include status-icon($incorrect, $cross-icon); } .partially-correct { - background: url('#{$static-path}/images/partially-correct-icon.png') center center no-repeat; + @include status-icon($partially-correct, $asterisk-icon); } } diff --git a/common/lib/xmodule/xmodule/js/fixtures/codeinput_problem.html b/common/lib/xmodule/xmodule/js/fixtures/codeinput_problem.html new file mode 100644 index 0000000000..2f5992bac3 --- /dev/null +++ b/common/lib/xmodule/xmodule/js/fixtures/codeinput_problem.html @@ -0,0 +1,21 @@ +
    + + + + Press ESC then TAB or click outside of the code editor to exit + + +
    + + correct + +
    +
    diff --git a/common/lib/xmodule/xmodule/js/fixtures/matlabinput_problem.html b/common/lib/xmodule/xmodule/js/fixtures/matlabinput_problem.html index 5dad82c727..79c4a51218 100644 --- a/common/lib/xmodule/xmodule/js/fixtures/matlabinput_problem.html +++ b/common/lib/xmodule/xmodule/js/fixtures/matlabinput_problem.html @@ -1,43 +1,46 @@
    -
    -
    - -

    -

    -
    -
    - +
    + +
    + -
    - - processing - - +
    + + processing + + +

    processing

    +
    + +
    + Submitted. As soon as a response is returned, this message will be replaced by that feedback. +
    +
    +
    -

    processing

    +
    + +
    +
    +
    - - - -
    - Submitted. As soon as a response is returned, this message will be replaced by that feedback. -
    -
    - -
    - -
    - -
    - - -
    -
    -
    + +
    diff --git a/common/lib/xmodule/xmodule/js/fixtures/problem_content.html b/common/lib/xmodule/xmodule/js/fixtures/problem_content.html index 2091a03a00..d05b353c2f 100644 --- a/common/lib/xmodule/xmodule/js/fixtures/problem_content.html +++ b/common/lib/xmodule/xmodule/js/fixtures/problem_content.html @@ -12,11 +12,29 @@ - - - - +
    + + + + + + + + + +
    + + Explanation
    + +
    diff --git a/common/lib/xmodule/xmodule/js/karma_xmodule.conf.js b/common/lib/xmodule/xmodule/js/karma_xmodule.conf.js index 62966778c4..c0535dc111 100644 --- a/common/lib/xmodule/xmodule/js/karma_xmodule.conf.js +++ b/common/lib/xmodule/xmodule/js/karma_xmodule.conf.js @@ -17,9 +17,7 @@ var options = { // Avoid adding files to this list. Use RequireJS. libraryFilesToInclude: [ - {pattern: 'common_static/js/vendor/requirejs/require.js', included: true}, - {pattern: 'RequireJS-namespace-undefine.js', included: true}, - + // Load the core JavaScript dependencies {pattern: 'common_static/coffee/src/ajax_prefix.js', included: true}, {pattern: 'common_static/common/js/vendor/underscore.js', included: true}, {pattern: 'common_static/common/js/vendor/backbone.js', included: true}, @@ -45,11 +43,20 @@ var options = { {pattern: 'public/js/split_test_staff.js', included: true}, {pattern: 'src/word_cloud/d3.min.js', included: true}, + // Load test utilities {pattern: 'common_static/js/vendor/jasmine-imagediff.js', included: true}, {pattern: 'common_static/common/js/spec_helpers/jasmine-waituntil.js', included: true}, {pattern: 'common_static/common/js/spec_helpers/jasmine-extensions.js', included: true}, {pattern: 'common_static/js/vendor/sinon-1.17.0.js', included: true}, + // Load the edX global namespace before RequireJS is installed + {pattern: 'common_static/edx-ui-toolkit/js/utils/global-loader.js', included: true}, + {pattern: 'common_static/edx-ui-toolkit/js/utils/string-utils.js', included: true}, + {pattern: 'common_static/edx-ui-toolkit/js/utils/html-utils.js', included: true}, + + // Load RequireJS and move it into the RequireJS namespace + {pattern: 'common_static/js/vendor/requirejs/require.js', included: true}, + {pattern: 'RequireJS-namespace-undefine.js', included: true}, {pattern: 'spec/main_requirejs.js', included: true} ], diff --git a/common/lib/xmodule/xmodule/js/spec/capa/display_spec.coffee b/common/lib/xmodule/xmodule/js/spec/capa/display_spec.coffee index bc2b0df70d..ce50dfd0c1 100644 --- a/common/lib/xmodule/xmodule/js/spec/capa/display_spec.coffee +++ b/common/lib/xmodule/xmodule/js/spec/capa/display_spec.coffee @@ -9,8 +9,8 @@ describe 'Problem', -> @stubbedJax = root: jasmine.createSpyObj('jax.root', ['toMathML']) MathJax.Hub.getAllJax.and.returnValue [@stubbedJax] window.update_schematics = -> - spyOn SR, 'readElts' spyOn SR, 'readText' + spyOn SR, 'readTexts' # Load this function from spec/helper.coffee # Note that if your test fails with a message like: @@ -58,14 +58,14 @@ describe 'Problem', -> it 'bind answer refresh on button click', -> expect($('div.action button')).toHandleWith 'click', @problem.refreshAnswers - it 'bind the check button', -> - expect($('div.action button.check')).toHandleWith 'click', @problem.check_fd + it 'bind the submit button', -> + expect($('.action .submit')).toHandleWith 'click', @problem.submit_fd it 'bind the reset button', -> expect($('div.action button.reset')).toHandleWith 'click', @problem.reset it 'bind the show button', -> - expect($('div.action button.show')).toHandleWith 'click', @problem.show + expect($('.action .show')).toHandleWith 'click', @problem.show it 'bind the save button', -> expect($('div.action button.save')).toHandleWith 'click', @problem.save @@ -80,8 +80,8 @@ describe 'Problem', -> @problem = new Problem($('.xblock-student_view')) $(@).html readFixtures('problem_content_1240.html') - it 'bind the check button', -> - expect($('div.action button.check')).toHandleWith 'click', @problem.check_fd + it 'bind the submit button', -> + expect($('.action .submit')).toHandleWith 'click', @problem.submit_fd it 'bind the show button', -> expect($('div.action button.show')).toHandleWith 'click', @problem.show @@ -90,34 +90,46 @@ describe 'Problem', -> describe 'renderProgressState', -> beforeEach -> @problem = new Problem($('.xblock-student_view')) - #@renderProgressState = @problem.renderProgressState - testProgessData = (problem, status, detail, expected_progress_after_render) -> + testProgessData = (problem, status, detail, graded, expected_progress_after_render) -> problem.el.data('progress_status', status) problem.el.data('progress_detail', detail) + problem.el.data('graded', graded) expect(problem.$('.problem-progress').html()).toEqual "" problem.renderProgressState() expect(problem.$('.problem-progress').html()).toEqual expected_progress_after_render describe 'with a status of "none"', -> - it 'reports the number of points possible', -> - testProgessData(@problem, 'none', '0/1', "(1 point possible)") + it 'reports the number of points possible and graded', -> + testProgessData(@problem, 'none', '0/1', "True", "1 point possible (graded)") it 'displays the number of points possible when rendering happens with the content', -> - testProgessData(@problem, 'none', '0/2', "(2 points possible)") + testProgessData(@problem, 'none', '0/2', "True", "2 points possible (graded)") + + it 'reports the number of points possible and ungraded', -> + testProgessData(@problem, 'none', '0/1', "False", "1 point possible (ungraded)") + + it 'displays ungraded if number of points possible is 0', -> + testProgessData(@problem, 'none', '0', "False", "0 points possible (ungraded)") + + it 'displays ungraded if number of points possible is 0, even if graded value is True', -> + testProgessData(@problem, 'none', '0', "True", "0 points possible (ungraded)") describe 'with any other valid status', -> it 'reports the current score', -> - testProgessData(@problem, 'foo', '1/1', "(1/1 point)") + testProgessData(@problem, 'foo', '1/1', "True", "1/1 point (graded)") it 'shows current score when rendering happens with the content', -> - testProgessData(@problem, 'test status', '2/2', "(2/2 points)") + testProgessData(@problem, 'test status', '2/2', "True", "2/2 points (graded)") + + it 'reports the current score even if problem is ungraded', -> + testProgessData(@problem, 'test status', '1/1', "False", "1/1 point (ungraded)") describe 'with valid status and string containing an integer like "0" for detail', -> # These tests are to address a failure specific to Chrome 51 and 52 + - it 'shows no score possible for the detail', -> - testProgessData(@problem, 'foo', '0', "") + it 'shows 0 points possible for the detail', -> + testProgessData(@problem, 'foo', '0', "False", "") describe 'render', -> beforeEach -> @@ -147,18 +159,18 @@ describe 'Problem', -> it 're-bind the content', -> expect(@problem.bind).toHaveBeenCalled() - describe 'check_fd', -> + describe 'submit_fd', -> beforeEach -> # Insert an input of type file outside of the problem. $('.xblock-student_view').after('') @problem = new Problem($('.xblock-student_view')) - spyOn(@problem, 'check') + spyOn(@problem, 'submit') - it 'check method is called if input of type file is not in problem', -> - @problem.check_fd() - expect(@problem.check).toHaveBeenCalled() + it 'submit method is called if input of type file is not in problem', -> + @problem.submit_fd() + expect(@problem.submit).toHaveBeenCalled() - describe 'check', -> + describe 'submit', -> beforeEach -> @problem = new Problem($('.xblock-student_view')) @problem.answers = 'foo=1&bar=2' @@ -168,7 +180,7 @@ describe 'Problem', -> promise = always: (callable) -> callable() done: (callable) -> callable() - @problem.check() + @problem.submit() expect(Logger.log).toHaveBeenCalledWith 'problem_check', 'foo=1&bar=2' it 'log the problem_graded event, after the problem is done grading.', -> @@ -180,41 +192,44 @@ describe 'Problem', -> promise = always: (callable) -> callable() done: (callable) -> callable() - @problem.check() + @problem.submit() expect(Logger.log).toHaveBeenCalledWith 'problem_graded', ['foo=1&bar=2', 'mock grader response'], @problem.id - it 'submit the answer for check', -> + it 'submit the answer for submit', -> spyOn($, 'postWithPrefix').and.callFake (url, answers, callback) -> promise = always: (callable) -> callable() done: (callable) -> callable() - @problem.check() + @problem.submit() expect($.postWithPrefix).toHaveBeenCalledWith '/problem/Problem1/problem_check', 'foo=1&bar=2', jasmine.any(Function) describe 'when the response is correct', -> it 'call render with returned content', -> + contents = '

    Correctexcellent

    ' + + '

    Yepcorrect

    ' spyOn($, 'postWithPrefix').and.callFake (url, answers, callback) -> - callback(success: 'correct', contents: 'Correct') + callback(success: 'correct', contents: contents) promise = always: (callable) -> callable() done: (callable) -> callable() - @problem.check() - expect(@problem.el.html()).toEqual 'Correct' - expect(window.SR.readElts).toHaveBeenCalled() + @problem.submit() + expect(@problem.el).toHaveHtml contents + expect(window.SR.readTexts).toHaveBeenCalledWith ['Question 1: excellent', 'Question 2: correct'] describe 'when the response is incorrect', -> it 'call render with returned content', -> + contents = '

    Incorrectno, try again

    ' spyOn($, 'postWithPrefix').and.callFake (url, answers, callback) -> - callback(success: 'incorrect', contents: 'Incorrect') + callback(success: 'incorrect', contents: contents) promise = always: (callable) -> callable() done: (callable) -> callable() - @problem.check() - expect(@problem.el.html()).toEqual 'Incorrect' - expect(window.SR.readElts).toHaveBeenCalled() + @problem.submit() + expect(@problem.el).toHaveHtml contents + expect(window.SR.readTexts).toHaveBeenCalledWith ['no, try again'] - it 'tests if all the capa buttons are disabled while checking', (done)-> + it 'tests if all the capa buttons are disabled while submitting', (done)-> deferred = $.Deferred() self = this @@ -230,7 +245,7 @@ describe 'Problem', -> done: (callable) -> callable() spyOn @problem, 'enableAllButtons' - @problem.check() + @problem.submit() expect(@problem.enableAllButtons).toHaveBeenCalledWith false, true if jQuery.active == 0 deferred.resolve() @@ -241,7 +256,7 @@ describe 'Problem', -> return ).always done - it 'tests the expected change in text of check button', (done) -> + it 'tests the expected change in text of submit button', (done) -> deferred = $.Deferred() self = this @@ -253,31 +268,35 @@ describe 'Problem', -> callable() done: (callable) -> callable() - spyOn @problem.checkButtonLabel, 'text' - @problem.check() - expect(@problem.checkButtonLabel.text).toHaveBeenCalledWith 'Checking...' + spyOn @problem.submitButtonLabel, 'text' + @problem.submit() + expect(@problem.submitButtonLabel.text).toHaveBeenCalledWith 'Submitting' if jQuery.active == 0 deferred.resolve() deferred.promise() runs.call(self).then(-> - expect(self.problem.checkButtonLabel.text).toHaveBeenCalledWith 'Check' + expect(self.problem.submitButtonLabel.text).toHaveBeenCalledWith 'Submit' return ).always done - describe 'check button on problems', -> + describe 'submit button on problems', -> beforeEach -> @problem = new Problem($('.xblock-student_view')) - @checkDisabled = (v) -> expect(@problem.checkButton.hasClass('is-disabled')).toBe(v) + @submitDisabled = (disabled) => + if disabled + expect(@problem.submitButton).toHaveAttr('disabled') + else + expect(@problem.submitButton).not.toHaveAttr('disabled') - describe 'some basic tests for check button', -> + describe 'some basic tests for submit button', -> it 'should become enabled after a value is entered into the text box', -> $('#input_example_1').val('test').trigger('input') - @checkDisabled false + @submitDisabled false $('#input_example_1').val('').trigger('input') - @checkDisabled true + @submitDisabled true - describe 'some advanced tests for check button', -> + describe 'some advanced tests for submit button', -> it 'should become enabled after a checkbox is checked', -> html = '''
    @@ -287,12 +306,12 @@ describe 'Problem', ->
    ''' $('#input_example_1').replaceWith(html) - @problem.checkAnswersAndCheckButton true - @checkDisabled true + @problem.submitAnswersAndSubmitButton true + @submitDisabled true $('#input_1_1_1').click() - @checkDisabled false + @submitDisabled false $('#input_1_1_1').click() - @checkDisabled true + @submitDisabled true it 'should become enabled after a radiobutton is checked', -> html = ''' @@ -303,12 +322,12 @@ describe 'Problem', ->
    ''' $('#input_example_1').replaceWith(html) - @problem.checkAnswersAndCheckButton true - @checkDisabled true + @problem.submitAnswersAndSubmitButton true + @submitDisabled true $('#input_1_1_1').attr('checked', true).trigger('click') - @checkDisabled false + @submitDisabled false $('#input_1_1_1').attr('checked', false).trigger('click') - @checkDisabled true + @submitDisabled true it 'should become enabled after a value is selected in a selector', -> html = ''' @@ -321,12 +340,12 @@ describe 'Problem', ->
    ''' $('#input_example_1').replaceWith(html) - @problem.checkAnswersAndCheckButton true - @checkDisabled true + @problem.submitAnswersAndSubmitButton true + @submitDisabled true $("#problem_sel select").val("val2").trigger('change') - @checkDisabled false + @submitDisabled false $("#problem_sel select").val("val0").trigger('change') - @checkDisabled true + @submitDisabled true it 'should become enabled after a radiobutton is checked and a value is entered into the text box', -> html = ''' @@ -337,22 +356,22 @@ describe 'Problem', -> ''' $(html).insertAfter('#input_example_1') - @problem.checkAnswersAndCheckButton true - @checkDisabled true + @problem.submitAnswersAndSubmitButton true + @submitDisabled true $('#input_1_1_1').attr('checked', true).trigger('click') - @checkDisabled true + @submitDisabled true $('#input_example_1').val('111').trigger('input') - @checkDisabled false + @submitDisabled false $('#input_1_1_1').attr('checked', false).trigger('click') - @checkDisabled true + @submitDisabled true it 'should become enabled if there are only hidden input fields', -> html = ''' ''' $('#input_example_1').replaceWith(html) - @problem.checkAnswersAndCheckButton true - @checkDisabled false + @problem.submitAnswersAndSubmitButton true + @submitDisabled false describe 'reset', -> beforeEach -> @@ -376,13 +395,29 @@ describe 'Problem', -> it 'render the returned content', -> spyOn($, 'postWithPrefix').and.callFake (url, answers, callback) -> - callback html: "Reset" + callback html: "Reset", success: true promise = always: (callable) -> callable() @problem.reset() expect(@problem.el.html()).toEqual 'Reset' - it 'tests if all the buttons are disabled and the text of check button remains same while resetting', (done) -> + it 'sends a message to the window SR element', -> + spyOn($, 'postWithPrefix').and.callFake (url, answers, callback) -> + callback html: "Reset", success: true + promise = + always: (callable) -> callable() + @problem.reset() + expect(window.SR.readText).toHaveBeenCalledWith 'This problem has been reset.' + + it 'shows a notification on error', -> + spyOn($, 'postWithPrefix').and.callFake (url, answers, callback) -> + callback msg: "Error on reset.", success: false + promise = + always: (callable) -> callable() + @problem.reset() + expect($('.notification-gentle-alert .notification-message').text()).toEqual("Error on reset.") + + it 'tests if all the buttons are disabled and the text of submit button remains same while resetting', (done) -> deferred = $.Deferred() self = this @@ -394,14 +429,14 @@ describe 'Problem', -> spyOn @problem, 'enableAllButtons' @problem.reset() expect(@problem.enableAllButtons).toHaveBeenCalledWith false, false - expect(@problem.checkButtonLabel).toHaveText 'Check' + expect(@problem.submitButtonLabel).toHaveText 'Submit' if jQuery.active == 0 deferred.resolve() deferred.promise() runs.call(self).then(-> expect(self.problem.enableAllButtons).toHaveBeenCalledWith true, false - expect(self.problem.checkButtonLabel).toHaveText 'Check' + expect(self.problem.submitButtonLabel).toHaveText 'Submit' ).always done describe 'show', -> @@ -411,7 +446,7 @@ describe 'Problem', -> describe 'when the answer has not yet shown', -> beforeEach -> - @problem.el.removeClass 'showed' + expect(@problem.el.find('.show').attr('disabled')).not.toEqual('disabled') it 'log the problem_show event', -> @problem.show() @@ -431,32 +466,17 @@ describe 'Problem', -> expect($('#answer_1_1')).toHaveHtml 'One' expect($('#answer_1_2')).toHaveHtml 'Two' - it 'toggle the show answer button', -> + it 'sends a message to the window SR element', -> spyOn($, 'postWithPrefix').and.callFake (url, callback) -> callback(answers: {}) @problem.show() - expect($('.show .show-label')).toHaveText 'Hide Answer' - expect(window.SR.readElts).toHaveBeenCalled() + expect(window.SR.readText).toHaveBeenCalledWith 'Answers to this problem are now shown. Navigate through the problem to review it with answers inline.' - it 'toggle the show answer button, answers are strings', -> - spyOn($, 'postWithPrefix').and.callFake (url, callback) -> callback(answers: '1_1': 'One', '1_2': 'Two') - @problem.show() - expect($('.show .show-label')).toHaveText 'Hide Answer' - expect(window.SR.readElts).toHaveBeenCalledWith ['

    Answer: One

    ', '

    Answer: Two

    '] - - it 'toggle the show answer button, answers are elements', -> - answer1 = '
    one
    ' - answer2 = '
    two
    ' - spyOn($, 'postWithPrefix').and.callFake (url, callback) -> callback(answers: '1_1': answer1, '1_2': answer2) - @problem.show() - expect($('.show .show-label')).toHaveText 'Hide Answer' - expect(window.SR.readElts).toHaveBeenCalledWith [jasmine.any(jQuery), jasmine.any(jQuery)] - - it 'add the showed class to element', -> + it 'disables the show answer button', -> spyOn($, 'postWithPrefix').and.callFake (url, callback) -> callback(answers: {}) @problem.show() - expect(@problem.el).toHaveClass 'showed' + expect(@problem.el.find('.show').attr('disabled')).toEqual('disabled') - it 'reads the answers', (done) -> + it 'sends a SR message when answer is present', (done) -> deferred = $.Deferred() runs = -> @@ -469,7 +489,7 @@ describe 'Problem', -> deferred.promise() runs.call(this).then(-> - expect(window.SR.readElts).toHaveBeenCalled() + expect(window.SR.readText).toHaveBeenCalledWith 'Answers to this problem are now shown. Navigate through the problem to review it with answers inline.' return ).always done @@ -676,32 +696,6 @@ describe 'Problem', -> expect(el.find('canvas')).not.toExist() expect(console.log).toHaveBeenCalledWith('Answer is absent for image input with id=12345') - describe 'when the answers are already shown', -> - beforeEach -> - @problem.el.addClass 'showed' - @problem.el.prepend ''' - - ''' - $('#answer_1_1').html('One') - $('#answer_1_2').html('Two') - - it 'hide the answers', -> - @problem.show() - expect($('#answer_1_1')).toHaveHtml '' - expect($('#answer_1_2')).toHaveHtml '' - expect($('label[for="input_1_1_1"]')).not.toHaveAttr 'correct_answer' - - it 'toggle the show answer button', -> - @problem.show() - expect($('.show .show-label')).toHaveText 'Show Answer' - - it 'remove the showed class from element', -> - @problem.show() - expect(@problem.el).not.toHaveClass 'showed' - describe 'save', -> beforeEach -> @problem = new Problem($('.xblock-student_view')) @@ -722,46 +716,27 @@ describe 'Problem', -> expect($.postWithPrefix).toHaveBeenCalledWith '/problem/Problem1/problem_save', 'foo=1&bar=2', jasmine.any(Function) - it 'reads the save message', (done) -> - deferred = $.Deferred() - - runs = -> - spyOn($, 'postWithPrefix').and.callFake (url, answers, callback) -> - promise = undefined - callback success: 'OK' - promise = always: (callable) -> - callable() - @problem.save() - if jQuery.active == 0 - deferred.resolve() - deferred.promise() - - runs.call(this).then(-> - expect(window.SR.readElts).toHaveBeenCalled() - return - ).always done - - it 'tests if all the buttons are disabled and the text of check button does not change while saving.', (done) -> + it 'tests if all the buttons are disabled and the text of submit button does not change while saving.', (done) -> deferred = $.Deferred() self = this - + curr_html = @problem.el.html() runs = -> spyOn($, 'postWithPrefix').and.callFake (url, answers, callback) -> promise = undefined - callback success: 'OK' + callback(success: 'correct', html: curr_html) promise = always: (callable) -> callable() spyOn @problem, 'enableAllButtons' @problem.save() expect(@problem.enableAllButtons).toHaveBeenCalledWith false, false - expect(@problem.checkButtonLabel).toHaveText 'Check' + expect(@problem.submitButtonLabel).toHaveText 'Submit' if jQuery.active == 0 deferred.resolve() deferred.promise() runs.call(self).then(-> expect(self.problem.enableAllButtons).toHaveBeenCalledWith true, false - expect(self.problem.checkButtonLabel).toHaveText 'Check' + expect(self.problem.submitButtonLabel).toHaveText 'Submit' ).always done describe 'refreshMath', -> @@ -825,9 +800,9 @@ describe 'Problem', -> @problem = new Problem($('.xblock-student_view')) @problem.render(jsinput_html) - it 'check_save_waitfor should return false', -> + it 'submit_save_waitfor should return false', -> $(@problem.inputs[0]).data('waitfor', ->) - expect(@problem.check_save_waitfor()).toEqual(false) + expect(@problem.submit_save_waitfor()).toEqual(false) describe 'Submitting an xqueue-graded problem', -> matlabinput_html = readFixtures('matlabinput_problem.html') @@ -858,4 +833,26 @@ describe 'Problem', -> jasmine.clock().tick(64000) expect(@problem.poll.calls.count()).toEqual(6) - expect($('.capa_alert').text()).toEqual("The grading process is still running. Refresh the page to see updates.") + expect($('.notification-gentle-alert .notification-message').text()).toEqual("The grading process is still running. Refresh the page to see updates.") + + describe 'codeinput problem', -> + codeinputProblemHtml = readFixtures('codeinput_problem.html') + + beforeEach -> + spyOn($, 'postWithPrefix').and.callFake (url, callback) -> + callback html: codeinputProblemHtml + @problem = new Problem($('.xblock-student_view')) + @problem.render(codeinputProblemHtml) + + it 'has rendered with correct a11y info', -> + CodeMirrorTextArea = $('textarea')[1] + CodeMirrorTextAreaId = 'cm-textarea-101' + + # verify that question label has correct `for` attribute value + expect($('.problem-group-label').attr('for')).toEqual(CodeMirrorTextAreaId) + + # verify that codemirror textarea has correct `id` attribute value + expect($(CodeMirrorTextArea).attr('id')).toEqual(CodeMirrorTextAreaId) + + # verify that codemirror textarea has correct `aria-describedby` attribute value + expect($(CodeMirrorTextArea).attr('aria-describedby')).toEqual('cm-editor-exit-message-101 status_101') diff --git a/common/lib/xmodule/xmodule/js/src/capa/display.coffee b/common/lib/xmodule/xmodule/js/src/capa/display.coffee index 87fe317bae..f0ef87b24c 100644 --- a/common/lib/xmodule/xmodule/js/src/capa/display.coffee +++ b/common/lib/xmodule/xmodule/js/src/capa/display.coffee @@ -8,7 +8,7 @@ class @Problem @content = @el.data('content') # has_timed_out and has_response are used to ensure that are used to - # ensure that we wait a minimum of ~ 1s before transitioning the check + # ensure that we wait a minimum of ~ 1s before transitioning the submit # button from disabled to enabled @has_timed_out = false @has_response = false @@ -28,19 +28,25 @@ class @Problem problem_prefix = @element_id.replace(/problem_/,'') @inputs = @$("[id^='input_#{problem_prefix}_']") @$('div.action button').click @refreshAnswers - @checkButton = @$('div.action button.check') - @checkButtonLabel = @$('div.action button.check span.check-label') - @checkButtonCheckText = @checkButtonLabel.text() - @checkButtonCheckingText = @checkButton.data('checking') - @checkButton.click @check_fd - @hintButton = @$('div.action button.hint-button') + @reviewButton = @$('.notification-btn.review-btn') + @reviewButton.click @scroll_to_problem_meta + @submitButton = @$('.action .submit') + @submitButtonLabel = @$('.action .submit .submit-label') + @submitButtonSubmitText = @submitButtonLabel.text() + @submitButtonSubmittingText = @submitButton.data('submitting') + @submitButton.click @submit_fd + @hintButton = @$('.action .hint-button') @hintButton.click @hint_button - @resetButton = @$('div.action button.reset') + @resetButton = @$('.action .reset') @resetButton.click @reset - @showButton = @$('div.action button.show') + @showButton = @$('.action .show') @showButton.click @show - @saveButton = @$('div.action button.save') + @saveButton = @$('.action .save') + @saveNotification = @$('.notification-save') + @saveButtonLabel = @$('.action .save .save-label') @saveButton.click @save + @gentleAlertNotification = @$('.notification-gentle-alert') + @submitNotification = @$('.notification-submit') # Accessibility helper for sighted keyboard users to show tooltips on focus: @$('.clarification').focus (ev) => @@ -49,9 +55,15 @@ class @Problem @$('.clarification').blur (ev) => window.globalTooltipManager.hide() + @$('.review-btn').focus (ev) => + $(ev.target).removeClass('sr'); + + @$('.review-btn').blur (ev) => + $(ev.target).addClass('sr'); + @bindResetCorrectness() - if @checkButton.length - @checkAnswersAndCheckButton true + if @submitButton.length + @submitAnswersAndSubmitButton true # Collapsibles Collapsible.setCollapsibles(@el) @@ -65,26 +77,42 @@ class @Problem renderProgressState: => detail = @el.data('progress_detail') status = @el.data('progress_status') + graded = @el.data('graded') # Render 'x/y point(s)' if student has attempted question if status != 'none' and detail? and (jQuery.type(detail) == "string") and detail.indexOf('/') > 0 a = detail.split('/') earned = parseFloat(a[0]) possible = parseFloat(a[1]) - # This comment needs to be on one line to be properly scraped for the translators. Sry for length. - `// Translators: %(earned)s is the number of points earned. %(total)s is the total number of points (examples: 0/1, 1/1, 2/3, 5/10). The total number of points will always be at least 1. We pluralize based on the total number of points (example: 0/1 point; 1/2 points)` - progress_template = ngettext('(%(earned)s/%(possible)s point)', '(%(earned)s/%(possible)s points)', possible) + + if graded == "True" and possible != 0 + # This comment needs to be on one line to be properly scraped for the translators. Sry for length. + `// Translators: %(earned)s is the number of points earned. %(possible)s is the total number of points (examples: 0/1, 1/1, 2/3, 5/10). The total number of points will always be at least 1. We pluralize based on the total number of points (example: 0/1 point; 1/2 points)` + progress_template = ngettext('%(earned)s/%(possible)s point (graded)', '%(earned)s/%(possible)s points (graded)', possible) + else + # This comment needs to be on one line to be properly scraped for the translators. Sry for length. + `// Translators: %(earned)s is the number of points earned. %(possible)s is the total number of points (examples: 0/1, 1/1, 2/3, 5/10). The total number of points will always be at least 1. We pluralize based on the total number of points (example: 0/1 point; 1/2 points)` + progress_template = ngettext('%(earned)s/%(possible)s point (ungraded)', '%(earned)s/%(possible)s points (ungraded)', possible) progress = interpolate(progress_template, {'earned': earned, 'possible': possible}, true) # Render 'x point(s) possible' if student has not yet attempted question - if status == 'none' and detail? and (jQuery.type(detail) == "string") and detail.indexOf('/') > 0 - a = detail.split('/') - possible = parseFloat(a[1]) - `// Translators: %(num_points)s is the number of points possible (examples: 1, 3, 10). There will always be at least 1 point possible.` - progress_template = ngettext("(%(num_points)s point possible)", "(%(num_points)s points possible)", possible) + # Status is set to none when a user has a score of 0, and 0 when the problem has a weight of 0. + if status == 'none' or status == 0 + if detail? and (jQuery.type(detail) == "string") and detail.indexOf('/') > 0 + a = detail.split('/') + possible = parseFloat(a[1]) + else + possible = 0 + + if graded == "True" and possible != 0 + `// Translators: %(num_points)s is the number of points possible (examples: 1, 3, 10).` + progress_template = ngettext("%(num_points)s point possible (graded)", "%(num_points)s points possible (graded)", possible) + else + `// Translators: %(num_points)s is the number of points possible (examples: 1, 3, 10).` + progress_template = ngettext("%(num_points)s point possible (ungraded)", "%(num_points)s points possible (ungraded)", possible) progress = interpolate(progress_template, {'num_points': possible}, true) - @$('.problem-progress').html(progress) + @$('.problem-progress').text(progress) updateProgress: (response) => if response.progress_changed @@ -99,22 +127,23 @@ class @Problem @el.trigger('progressChanged') @renderProgressState() - queueing: => + queueing: (focus_callback) => @queued_items = @$(".xqueue") @num_queued_items = @queued_items.length if @num_queued_items > 0 if window.queuePollerID # Only one poller 'thread' per Problem window.clearTimeout(window.queuePollerID) window.queuePollerID = window.setTimeout( - => @poll(1000), + => @poll(1000, focus_callback), 1000) - poll: (prev_timeout) => + poll: (prev_timeout, focus_callback) => $.postWithPrefix "#{@url}/problem_get", (response) => # If queueing status changed, then render @new_queued_items = $(response.html).find(".xqueue") if @new_queued_items.length isnt @num_queued_items - @el.html(response.html) + edx.HtmlUtils.setHtml(@el, edx.HtmlUtils.HTML(response.html)).promise().done => + focus_callback?() JavascriptLoader.executeModuleScripts @el, () => @setupInputTypes() @bind() @@ -131,7 +160,7 @@ class @Problem @gentle_alert gettext("The grading process is still running. Refresh the page to see updates.") else window.queuePollerID = window.setTimeout( - => @poll(new_timeout), + => @poll(new_timeout, focus_callback), new_timeout ) @@ -153,16 +182,15 @@ class @Problem $.postWithPrefix "#{url}/input_ajax", data, callback - render: (content) -> + render: (content, focus_callback) -> if content - @el.attr({'aria-busy': 'true', 'aria-live': 'off', 'aria-atomic': 'false'}) @el.html(content) JavascriptLoader.executeModuleScripts @el, () => @setupInputTypes() @bind() - @queueing() + @queueing(focus_callback) @renderProgressState() - @el.attr('aria-busy', 'false') + focus_callback?() else $.postWithPrefix "#{@url}/problem_get", (response) => @el.html(response.html) @@ -188,15 +216,15 @@ class @Problem # If some function wants to be called before sending the answer to the # server, give it a chance to do so. # - # check_save_waitfor allows the callee to send alerts if the user's input is + # submit_save_waitfor allows the callee to send alerts if the user's input is # invalid. To do so, the callee must throw an exception named "Waitfor # Exception". This and any other errors or exceptions that arise from the # callee are rethrown and abort the submission. # # In order to use this feature, add a 'data-waitfor' attribute to the input, - # and specify the function to be called by the check button before sending + # and specify the function to be called by the submit button before sending # off @answers - check_save_waitfor: (callback) => + submit_save_waitfor: (callback) => flag = false for inp in @inputs if ($(inp).is("input[waitfor]")) @@ -216,28 +244,50 @@ class @Problem flag = false return flag + # Scroll to problem metadata and next focus is problem input + scroll_to_problem_meta: => + questionTitle = @$(".problem-header") + if questionTitle.length > 0 + $('html, body').animate({ + scrollTop: questionTitle.offset().top + }, 500); + questionTitle.focus() + + focus_on_notification: (type) => + notification = @$('.notification-'+type) + if notification.length > 0 + notification.focus() + + focus_on_submit_notification: => + @focus_on_notification('submit') + + focus_on_hint_notification: => + @focus_on_notification('hint') + + focus_on_save_notification: => + @focus_on_notification('save') ### - # 'check_fd' uses FormData to allow file submissions in the 'problem_check' dispatch, + # 'submit_fd' uses FormData to allow file submissions in the 'problem_check' dispatch, # in addition to simple querystring-based answers # # NOTE: The dispatch 'problem_check' is being singled out for the use of FormData; # maybe preferable to consolidate all dispatches to use FormData ### - check_fd: => - # If there are no file inputs in the problem, we can fall back on @check + submit_fd: => + # If there are no file inputs in the problem, we can fall back on @submit if @el.find('input:file').length == 0 - @check() + @submit() return - @enableCheckButton false + @enableSubmitButton false if not window.FormData alert "Submission aborted! Sorry, your browser does not support file uploads. If you can, please use Chrome or Safari which have been verified to support file uploads." - @enableCheckButton true + @enableSubmitButton true return - timeout_id = @enableCheckButtonAfterTimeout() + timeout_id = @enableSubmitButtonAfterTimeout() fd = new FormData() @@ -287,7 +337,7 @@ class @Problem abort_submission = file_too_large or file_not_selected or unallowed_file_submitted or required_files_not_submitted if abort_submission window.clearTimeout(timeout_id) - @enableCheckButton true + @enableSubmitButton true return settings = @@ -295,7 +345,7 @@ class @Problem data: fd processData: false contentType: false - complete: @enableCheckButtonAfterResponse + complete: @enableSubmitButtonAfterResponse success: (response) => switch response.success when 'incorrect', 'correct' @@ -307,115 +357,121 @@ class @Problem $.ajaxWithPrefix("#{@url}/problem_check", settings) - check: => - if not @check_save_waitfor(@check_internal) - @disableAllButtonsWhileRunning @check_internal, true + submit: => + if not @submit_save_waitfor(@submit_internal) + @disableAllButtonsWhileRunning @submit_internal, true - check_internal: => + submit_internal: => Logger.log 'problem_check', @answers $.postWithPrefix "#{@url}/problem_check", @answers, (response) => switch response.success when 'incorrect', 'correct' - window.SR.readElts($(response.contents).find('.status')) + window.SR.readTexts(@get_sr_status(response.contents)) @el.trigger('contentChanged', [@id, response.contents]) - @render(response.contents) + @render(response.contents, @focus_on_submit_notification) @updateProgress response - if @el.hasClass 'showed' - @el.removeClass 'showed' - @$('div.action button.check').focus() else + @saveNotification.hide() @gentle_alert response.success Logger.log 'problem_graded', [@answers, response.contents], @id + get_sr_status: (contents) => + # This method builds up an array of strings to send to the page screen-reader span. + # It first gets all elements with class "status", and then looks to see if they are contained + # in sections with aria-labels. If so, labels are prepended to the status element text. + # If not, just the text of the status elements are returned. + status_elements = $(contents).find('.status') + labeled_status = [] + for element in status_elements + parent_section = $(element).closest('section') + added_status = false + if parent_section + aria_label = parent_section.attr('aria-label') + if aria_label + `// Translators: This is only translated to allow for reording of label and associated status.` + template = gettext("{label}: {status}") + labeled_status.push(edx.StringUtils.interpolate(template, {label: aria_label, status: $(element).text()})) + added_status = true + + if not added_status + labeled_status.push($(element).text()) + + return labeled_status + reset: => @disableAllButtonsWhileRunning @reset_internal, false reset_internal: => Logger.log 'problem_reset', @answers $.postWithPrefix "#{@url}/problem_reset", id: @id, (response) => + if response.success @el.trigger('contentChanged', [@id, response.html]) - @render(response.html) + @render(response.html, @scroll_to_problem_meta) @updateProgress response + window.SR.readText(gettext('This problem has been reset.')) + else + @gentle_alert response.msg # TODO this needs modification to deal with javascript responses; perhaps we # need something where responsetypes can define their own behavior when show # is called. show: => - if !@el.hasClass 'showed' - Logger.log 'problem_show', problem: @id - answer_text = [] - $.postWithPrefix "#{@url}/problem_show", (response) => - answers = response.answers - $.each answers, (key, value) => - if $.isArray(value) - for choice in value - @$("label[for='input_#{key}_#{choice}']").attr correct_answer: 'true' - answer_text.push('

    ' + gettext('Answer:') + ' ' + value + '

    ') - else - answer = @$("#answer_#{key}, #solution_#{key}") - answer.html(value) - Collapsible.setCollapsibles(answer) + Logger.log 'problem_show', problem: @id + $.postWithPrefix "#{@url}/problem_show", (response) => + answers = response.answers + $.each answers, (key, value) => + if $.isArray(value) + for choice in value + @$("label[for='input_#{key}_#{choice}']").attr correct_answer: 'true' + else + answer = @$("#answer_#{key}, #solution_#{key}") + edx.HtmlUtils.setHtml(answer, edx.HtmlUtils.HTML(value)) + Collapsible.setCollapsibles(answer) - # Sometimes, `value` is just a string containing a MathJax formula. - # If this is the case, jQuery will throw an error in some corner cases - # because of an incorrect selector. We setup a try..catch so that - # the script doesn't break in such cases. - # - # We will fallback to the second `if statement` below, if an - # error is thrown by jQuery. - try - solution = $(value).find('.detailed-solution') - catch e - solution = {} - if solution.length - answer_text.push(solution) - else - answer_text.push('

    ' + gettext('Answer:') + ' ' + value + '

    ') + # Sometimes, `value` is just a string containing a MathJax formula. + # If this is the case, jQuery will throw an error in some corner cases + # because of an incorrect selector. We setup a try..catch so that + # the script doesn't break in such cases. + # + # We will fallback to the second `if statement` below, if an + # error is thrown by jQuery. + try + solution = $(value).find('.detailed-solution') + catch e + solution = {} - # TODO remove the above once everything is extracted into its own - # inputtype functions. - - @el.find(".capa_inputtype").each (index, inputtype) => - classes = $(inputtype).attr('class').split(' ') - for cls in classes - display = @inputtypeDisplays[$(inputtype).attr('id')] - showMethod = @inputtypeShowAnswerMethods[cls] - showMethod(inputtype, display, answers) if showMethod? - - if MathJax? - @el.find('.problem > div').each (index, element) => - MathJax.Hub.Queue ["Typeset", MathJax.Hub, element] - - `// Translators: the word Answer here refers to the answer to a problem the student must solve.` - @$('.show-label').text gettext('Hide Answer') - @el.addClass 'showed' - @updateProgress response - window.SR.readElts(answer_text) - else - @$('[id^=answer_], [id^=solution_]').text '' - @$('[correct_answer]').attr correct_answer: null - @el.removeClass 'showed' - `// Translators: the word Answer here refers to the answer to a problem the student must solve.` - @$('.show-label').text gettext('Show Answer') - window.SR.readText(gettext('Answer hidden')) + # TODO remove the above once everything is extracted into its own + # inputtype functions. @el.find(".capa_inputtype").each (index, inputtype) => - display = @inputtypeDisplays[$(inputtype).attr('id')] classes = $(inputtype).attr('class').split(' ') for cls in classes - hideMethod = @inputtypeHideAnswerMethods[cls] - hideMethod(inputtype, display) if hideMethod? + display = @inputtypeDisplays[$(inputtype).attr('id')] + showMethod = @inputtypeShowAnswerMethods[cls] + showMethod(inputtype, display, answers) if showMethod? + + if MathJax? + @el.find('.problem > div').each (index, element) => + MathJax.Hub.Queue ["Typeset", MathJax.Hub, element] + + @el.find('.show').attr('disabled', 'disabled') + @updateProgress response + window.SR.readText(gettext('Answers to this problem are now shown. Navigate through the problem to review it with answers inline.')) + @scroll_to_problem_meta() + + clear_all_notifications: => + @submitNotification.remove() + @gentleAlertNotification.hide() + @saveNotification.hide() gentle_alert: (msg) => - if @el.find('.capa_alert').length - @el.find('.capa_alert').remove() - alert_elem = "
    " + msg + "
    " - @el.find('.action').after(alert_elem) - @el.find('.capa_alert').css(opacity: 0).animate(opacity: 1, 700) - window.SR.readElts @el.find('.capa_alert') + edx.HtmlUtils.setHtml(@el.find('.notification-gentle-alert .notification-message'), edx.HtmlUtils.HTML(msg)) + @clear_all_notifications() + @gentleAlertNotification.show() + @gentleAlertNotification.focus() save: => - if not @check_save_waitfor(@save_internal) + if not @submit_save_waitfor(@save_internal) @disableAllButtonsWhileRunning @save_internal, false save_internal: => @@ -424,8 +480,12 @@ class @Problem saveMessage = response.msg if response.success @el.trigger('contentChanged', [@id, response.html]) - @gentle_alert saveMessage - @updateProgress response + edx.HtmlUtils.setHtml(@el.find('.notification-save .notification-message'), edx.HtmlUtils.HTML(saveMessage)) + @clear_all_notifications() + @saveNotification.show() + @focus_on_save_notification() + else + @gentle_alert saveMessage refreshMath: (event, element) => element = event.target unless element @@ -459,12 +519,15 @@ class @Problem element.CodeMirror.save() if element.CodeMirror.save @answers = @inputs.serialize() - checkAnswersAndCheckButton: (bind=false) => - # Used to check available answers and if something is checked (or the answer is set in some textbox) - # "Check"/"Final check" button becomes enabled. Otherwise it is disabled by default. - # params: - # 'bind' used on the first check to attach event handlers to input fields - # to change "Check"/"Final check" enable status in case of some manipulations with answers + submitAnswersAndSubmitButton: (bind=false) => + """ + Used to check available answers and if something is checked (or the answer is set in some textbox) + "Submit" button becomes enabled. Otherwise it is disabled by default. + + Arguments: + bind (bool): used on the first check to attach event handlers to input fields + to change "Submit" enable status in case of some manipulations with answers + """ answered = true at_least_one_text_input_found = false @@ -476,7 +539,8 @@ class @Problem one_text_input_filled = true if bind $(text_field).on 'input', (e) => - @checkAnswersAndCheckButton() + @saveNotification.hide() + @submitAnswersAndSubmitButton() return return if at_least_one_text_input_found and not one_text_input_filled @@ -489,7 +553,8 @@ class @Problem checked = true if bind $(checkbox_or_radio).on 'click', (e) => - @checkAnswersAndCheckButton() + @saveNotification.hide() + @submitAnswersAndSubmitButton() return return if not checked @@ -502,14 +567,15 @@ class @Problem answered = false if bind $(select_field).on 'change', (e) => - @checkAnswersAndCheckButton() + @saveNotification.hide() + @submitAnswersAndSubmitButton() return return if answered - @enableCheckButton true + @enableSubmitButton true else - @enableCheckButton false, false + @enableSubmitButton false, false bindResetCorrectness: -> # Loop through all input types @@ -605,7 +671,7 @@ class @Problem mode = element.data("mode") linenumbers = element.data("linenums") spaces = Array(parseInt(tabsize) + 1).join(" ") - CodeMirror.fromTextArea element[0], { + CodeMirrorEditor = CodeMirror.fromTextArea element[0], { lineNumbers: linenumbers indentUnit: tabsize tabSize: tabsize @@ -622,7 +688,12 @@ class @Problem cm.replaceSelection(spaces, "end") return false } - } + } + id = element.attr("id").replace(/^input_/, "") + CodeMirrorTextArea = CodeMirrorEditor.getInputField() + CodeMirrorTextArea.setAttribute("id", "cm-textarea-#{id}") + CodeMirrorTextArea.setAttribute("aria-describedby", "cm-editor-exit-message-#{id} status_#{id}") + return CodeMirrorEditor inputtypeShowAnswerMethods: choicegroup: (element, display, answers) => @@ -740,84 +811,95 @@ class @Problem # params: # 'operationCallback' is an operation to be run. # 'isFromCheckOperation' is a boolean to keep track if 'operationCallback' was - # @check, if so then text of check button will be changed as well. + # @submit, if so then text of submit button will be changed as well. @enableAllButtons false, isFromCheckOperation operationCallback().always => @enableAllButtons true, isFromCheckOperation + # Called by disableAllButtonsWhileRunning to automatically disable all buttons while check,reset, or + # save internal are running. Then enable all the buttons again after it is done. enableAllButtons: (enable, isFromCheckOperation) => # Used to enable/disable all buttons in problem. # params: # 'enable' is a boolean to determine enabling/disabling of buttons. # 'isFromCheckOperation' is a boolean to keep track if operation was initiated - # from @check so that text of check button will also be changed while disabling/enabling - # the check button. + # from @submit so that text of submit button will also be changed while disabling/enabling + # the submit button. if enable @resetButton .add(@saveButton) .add(@hintButton) .add(@showButton) - .removeClass('is-disabled') - .attr({'aria-disabled': 'false'}) + .removeAttr 'disabled' else @resetButton .add(@saveButton) .add(@hintButton) .add(@showButton) - .addClass('is-disabled') - .attr({'aria-disabled': 'true'}) + .attr({'disabled': 'disabled'}) - @enableCheckButton enable, isFromCheckOperation + @enableSubmitButton enable, isFromCheckOperation - enableCheckButton: (enable, changeText = true) => - # Used to disable check button to reduce chance of accidental double-submissions. + enableSubmitButton: (enable, changeText = true) => + # Used to disable submit button to reduce chance of accidental double-submissions. # params: - # 'enable' is a boolean to determine enabling/disabling of check button. + # 'enable' is a boolean to determine enabling/disabling of submit button. # 'changeText' is a boolean to determine if there is need to change the - # text of check button as well. + # text of submit button as well. if enable - @checkButton.removeClass 'is-disabled' - @checkButton.attr({'aria-disabled': 'false'}) + submitCanBeEnabled = @submitButton.data('should-enable-submit-button') == 'True' + if submitCanBeEnabled + @submitButton.removeAttr 'disabled' if changeText - @checkButtonLabel.text(@checkButtonCheckText) + @submitButtonLabel.text(@submitButtonSubmitText) else - @checkButton.addClass 'is-disabled' - @checkButton.attr({'aria-disabled': 'true'}) + @submitButton.attr({'disabled': 'disabled'}) if changeText - @checkButtonLabel.text(@checkButtonCheckingText) + @submitButtonLabel.text(@submitButtonSubmittingText) - enableCheckButtonAfterResponse: => + enableSubmitButtonAfterResponse: => @has_response = true if not @has_timed_out # Server has returned response before our timeout - @enableCheckButton false + @enableSubmitButton false else - @enableCheckButton true + @enableSubmitButton true - enableCheckButtonAfterTimeout: => + enableSubmitButtonAfterTimeout: => @has_timed_out = false @has_response = false - enableCheckButton = () => + enableSubmitButton = () => @has_timed_out = true if @has_response - @enableCheckButton true - window.setTimeout(enableCheckButton, 750) + @enableSubmitButton true + window.setTimeout(enableSubmitButton, 750) hint_button: => # Store the index of the currently shown hint as an attribute. # Use that to compute the next hint number when the button is clicked. - hint_index = @$('.problem-hint').attr('hint_index') + hint_container = @.$('.problem-hint') + hint_index = hint_container.attr('hint_index') if hint_index == undefined next_index = 0 else next_index = parseInt(hint_index) + 1 $.postWithPrefix "#{@url}/hint_button", hint_index: next_index, input_id: @id, (response) => - hint_container = @.$('.problem-hint') - hint_container.html(response.contents) - MathJax.Hub.Queue [ - 'Typeset' - MathJax.Hub - hint_container[0] - ] - hint_container.attr('hint_index', response.hint_index) - @$('.hint-button').focus() # a11y focus on click, like the Check button + if response.success + hint_msg_container = @.$('.problem-hint .notification-message') + hint_container.attr('hint_index', response.hint_index) + edx.HtmlUtils.setHtml(hint_msg_container, edx.HtmlUtils.HTML(response.msg)) + # Update any Mathjax entries + MathJax.Hub.Queue [ + 'Typeset' + MathJax.Hub + hint_container[0] + ] + # Enable/Disable the next hint button + if response.should_enable_next_hint + @hintButton.removeAttr 'disabled' + else + @hintButton.attr({'disabled': 'disabled'}) + @el.find('.notification-hint').show() + @focus_on_hint_notification() + else + @gentle_alert response.msg diff --git a/common/lib/xmodule/xmodule/modulestore/inheritance.py b/common/lib/xmodule/xmodule/modulestore/inheritance.py index 21fda25534..2eb0e54f48 100644 --- a/common/lib/xmodule/xmodule/modulestore/inheritance.py +++ b/common/lib/xmodule/xmodule/modulestore/inheritance.py @@ -125,11 +125,6 @@ class InheritanceMixin(XBlockMixin): scope=Scope.settings, default='', ) - text_customization = Dict( - display_name=_("Text Customization"), - help=_("Enter string customization substitutions for particular locations."), - scope=Scope.settings, - ) use_latex_compiler = Boolean( display_name=_("Enable LaTeX Compiler"), help=_("Enter true or false. If true, you can use the LaTeX templates for HTML components and advanced Problem components."), diff --git a/common/lib/xmodule/xmodule/templates/problem/jsinput_response.yaml b/common/lib/xmodule/xmodule/templates/problem/jsinput_response.yaml index d74836d052..7e44026eb2 100644 --- a/common/lib/xmodule/xmodule/templates/problem/jsinput_response.yaml +++ b/common/lib/xmodule/xmodule/templates/problem/jsinput_response.yaml @@ -43,7 +43,7 @@ data: | par is a dictionary that contains two keys, "answer" and "state". The value of "answer" is the JSON string that "getGrade" returns. The value of "state" is the JSON string that "getState" returns. - Clicking either "Check" or "Save" registers the current state. + Clicking either "Submit" or "Save" registers the current state. ''' par = json.loads(ans) diff --git a/common/lib/xmodule/xmodule/tests/test_capa_module.py b/common/lib/xmodule/xmodule/tests/test_capa_module.py index 84c3125e55..2eec8e5760 100644 --- a/common/lib/xmodule/xmodule/tests/test_capa_module.py +++ b/common/lib/xmodule/xmodule/tests/test_capa_module.py @@ -140,6 +140,7 @@ class CapaFactory(object): else: module.get_score = lambda: {'score': 0, 'total': 1} + module.graded = 'False' return module @@ -479,7 +480,7 @@ class CapaModuleTest(unittest.TestCase): with self.assertRaises(ValueError): result = CapaModule.make_dict_of_responses(invalid_get_dict) - def test_check_problem_correct(self): + def test_submit_problem_correct(self): module = CapaFactory.create(attempts=1) @@ -494,7 +495,7 @@ class CapaModuleTest(unittest.TestCase): # Check the problem get_request_dict = {CapaFactory.input_key(): '3.14'} - result = module.check_problem(get_request_dict) + result = module.submit_problem(get_request_dict) # Expect that the problem is marked correct self.assertEqual(result['success'], 'correct') @@ -505,7 +506,7 @@ class CapaModuleTest(unittest.TestCase): # Expect that the number of attempts is incremented by 1 self.assertEqual(module.attempts, 2) - def test_check_problem_incorrect(self): + def test_submit_problem_incorrect(self): module = CapaFactory.create(attempts=0) @@ -515,7 +516,7 @@ class CapaModuleTest(unittest.TestCase): # Check the problem get_request_dict = {CapaFactory.input_key(): '0'} - result = module.check_problem(get_request_dict) + result = module.submit_problem(get_request_dict) # Expect that the problem is marked correct self.assertEqual(result['success'], 'incorrect') @@ -523,7 +524,7 @@ class CapaModuleTest(unittest.TestCase): # Expect that the number of attempts is incremented by 1 self.assertEqual(module.attempts, 1) - def test_check_problem_closed(self): + def test_submit_problem_closed(self): module = CapaFactory.create(attempts=3) # Problem closed -- cannot submit @@ -532,7 +533,7 @@ class CapaModuleTest(unittest.TestCase): mock_closed.return_value = True with self.assertRaises(xmodule.exceptions.NotFoundError): get_request_dict = {CapaFactory.input_key(): '3.14'} - module.check_problem(get_request_dict) + module.submit_problem(get_request_dict) # Expect that number of attempts NOT incremented self.assertEqual(module.attempts, 3) @@ -541,7 +542,7 @@ class CapaModuleTest(unittest.TestCase): RANDOMIZATION.ALWAYS, 'true' ) - def test_check_problem_resubmitted_with_randomize(self, rerandomize): + def test_submit_problem_resubmitted_with_randomize(self, rerandomize): # Randomize turned on module = CapaFactory.create(rerandomize=rerandomize, attempts=0) @@ -551,7 +552,7 @@ class CapaModuleTest(unittest.TestCase): # Expect that we cannot submit with self.assertRaises(xmodule.exceptions.NotFoundError): get_request_dict = {CapaFactory.input_key(): '3.14'} - module.check_problem(get_request_dict) + module.submit_problem(get_request_dict) # Expect that number of attempts NOT incremented self.assertEqual(module.attempts, 0) @@ -561,20 +562,20 @@ class CapaModuleTest(unittest.TestCase): 'false', RANDOMIZATION.PER_STUDENT ) - def test_check_problem_resubmitted_no_randomize(self, rerandomize): + def test_submit_problem_resubmitted_no_randomize(self, rerandomize): # Randomize turned off module = CapaFactory.create(rerandomize=rerandomize, attempts=0, done=True) # Expect that we can submit successfully get_request_dict = {CapaFactory.input_key(): '3.14'} - result = module.check_problem(get_request_dict) + result = module.submit_problem(get_request_dict) self.assertEqual(result['success'], 'correct') # Expect that number of attempts IS incremented self.assertEqual(module.attempts, 1) - def test_check_problem_queued(self): + def test_submit_problem_queued(self): module = CapaFactory.create(attempts=1) # Simulate that the problem is queued @@ -588,7 +589,7 @@ class CapaModuleTest(unittest.TestCase): values['get_recentmost_queuetime'].return_value = datetime.datetime.now(UTC) get_request_dict = {CapaFactory.input_key(): '3.14'} - result = module.check_problem(get_request_dict) + result = module.submit_problem(get_request_dict) # Expect an AJAX alert message in 'success' self.assertIn('You must wait', result['success']) @@ -596,8 +597,8 @@ class CapaModuleTest(unittest.TestCase): # Expect that the number of attempts is NOT incremented self.assertEqual(module.attempts, 1) - def test_check_problem_with_files(self): - # Check a problem with uploaded files, using the check_problem API. + def test_submit_problem_with_files(self): + # Check a problem with uploaded files, using the submit_problem API. # pylint: disable=protected-access # The files we'll be uploading. @@ -614,13 +615,13 @@ class CapaModuleTest(unittest.TestCase): xqueue_interface._http_post = Mock(return_value=(0, "ok")) module.system.xqueue['interface'] = xqueue_interface - # Create a request dictionary for check_problem. + # Create a request dictionary for submit_problem. get_request_dict = { CapaFactoryWithFiles.input_key(response_num=2): fileobjs, CapaFactoryWithFiles.input_key(response_num=3): 'None', } - module.check_problem(get_request_dict) + module.submit_problem(get_request_dict) # _http_post is called like this: # _http_post( @@ -645,7 +646,7 @@ class CapaModuleTest(unittest.TestCase): for fpath, fileobj in kwargs['files'].iteritems(): self.assertEqual(fpath, fileobj.name) - def test_check_problem_with_files_as_xblock(self): + def test_submit_problem_with_files_as_xblock(self): # Check a problem with uploaded files, using the XBlock API. # pylint: disable=protected-access @@ -678,7 +679,7 @@ class CapaModuleTest(unittest.TestCase): for fpath, fileobj in kwargs['files'].iteritems(): self.assertEqual(fpath, fileobj.name) - def test_check_problem_error(self): + def test_submit_problem_error(self): # Try each exception that capa_module should handle exception_classes = [StudentInputError, @@ -697,7 +698,7 @@ class CapaModuleTest(unittest.TestCase): mock_grade.side_effect = exception_class('test error') get_request_dict = {CapaFactory.input_key(): '3.14'} - result = module.check_problem(get_request_dict) + result = module.submit_problem(get_request_dict) # Expect an AJAX alert message in 'success' expected_msg = 'Error: test error' @@ -706,11 +707,11 @@ class CapaModuleTest(unittest.TestCase): # Expect that the number of attempts is NOT incremented self.assertEqual(module.attempts, 1) - def test_check_problem_other_errors(self): + def test_submit_problem_other_errors(self): """ Test that errors other than the expected kinds give an appropriate message. - See also `test_check_problem_error` for the "expected kinds" or errors. + See also `test_submit_problem_error` for the "expected kinds" or errors. """ # Create the module module = CapaFactory.create(attempts=1) @@ -727,12 +728,12 @@ class CapaModuleTest(unittest.TestCase): mock_grade.side_effect = Exception(error_msg) get_request_dict = {CapaFactory.input_key(): '3.14'} - result = module.check_problem(get_request_dict) + result = module.submit_problem(get_request_dict) # Expect an AJAX alert message in 'success' self.assertIn(error_msg, result['success']) - def test_check_problem_zero_max_grade(self): + def test_submit_problem_zero_max_grade(self): """ Test that a capa problem with a max grade of zero doesn't generate an error. """ @@ -744,9 +745,9 @@ class CapaModuleTest(unittest.TestCase): # Check the problem get_request_dict = {CapaFactory.input_key(): '3.14'} - module.check_problem(get_request_dict) + module.submit_problem(get_request_dict) - def test_check_problem_error_nonascii(self): + def test_submit_problem_error_nonascii(self): # Try each exception that capa_module should handle exception_classes = [StudentInputError, @@ -765,7 +766,7 @@ class CapaModuleTest(unittest.TestCase): mock_grade.side_effect = exception_class(u"ȧƈƈḗƞŧḗḓ ŧḗẋŧ ƒǿř ŧḗşŧīƞɠ") get_request_dict = {CapaFactory.input_key(): '3.14'} - result = module.check_problem(get_request_dict) + result = module.submit_problem(get_request_dict) # Expect an AJAX alert message in 'success' expected_msg = u'Error: ȧƈƈḗƞŧḗḓ ŧḗẋŧ ƒǿř ŧḗşŧīƞɠ' @@ -774,7 +775,7 @@ class CapaModuleTest(unittest.TestCase): # Expect that the number of attempts is NOT incremented self.assertEqual(module.attempts, 1) - def test_check_problem_error_with_staff_user(self): + def test_submit_problem_error_with_staff_user(self): # Try each exception that capa module should handle for exception_class in [StudentInputError, @@ -792,7 +793,7 @@ class CapaModuleTest(unittest.TestCase): mock_grade.side_effect = exception_class('test error') get_request_dict = {CapaFactory.input_key(): '3.14'} - result = module.check_problem(get_request_dict) + result = module.submit_problem(get_request_dict) # Expect an AJAX alert message in 'success' self.assertIn('test error', result['success']) @@ -990,120 +991,55 @@ class CapaModuleTest(unittest.TestCase): # Expect that we succeed self.assertTrue('success' in result and result['success']) - def test_check_button_name(self): - - # If last attempt, button name changes to "Final Check" - # Just in case, we also check what happens if we have - # more attempts than allowed. - attempts = random.randint(1, 10) - module = CapaFactory.create(attempts=attempts - 1, max_attempts=attempts) - self.assertEqual(module.check_button_name(), "Final Check") - - module = CapaFactory.create(attempts=attempts, max_attempts=attempts) - self.assertEqual(module.check_button_name(), "Final Check") - - module = CapaFactory.create(attempts=attempts + 1, max_attempts=attempts) - self.assertEqual(module.check_button_name(), "Final Check") - - # Otherwise, button name is "Check" - module = CapaFactory.create(attempts=attempts - 2, max_attempts=attempts) - self.assertEqual(module.check_button_name(), "Check") - - module = CapaFactory.create(attempts=attempts - 3, max_attempts=attempts) - self.assertEqual(module.check_button_name(), "Check") - - # If no limit on attempts, then always show "Check" - module = CapaFactory.create(attempts=attempts - 3) - self.assertEqual(module.check_button_name(), "Check") - + def test_submit_button_name(self): module = CapaFactory.create(attempts=0) - self.assertEqual(module.check_button_name(), "Check") + self.assertEqual(module.submit_button_name(), "Submit") - def test_check_button_checking_name(self): + def test_submit_button_submitting_name(self): module = CapaFactory.create(attempts=1, max_attempts=10) - self.assertEqual(module.check_button_checking_name(), "Checking...") + self.assertEqual(module.submit_button_submitting_name(), "Submitting") - module = CapaFactory.create(attempts=10, max_attempts=10) - self.assertEqual(module.check_button_checking_name(), "Checking...") - - def test_check_button_name_customization(self): - module = CapaFactory.create( - attempts=1, - max_attempts=10, - text_customization={"custom_check": "Submit", "custom_final_check": "Final Submit"} - ) - self.assertEqual(module.check_button_name(), "Submit") - - module = CapaFactory.create(attempts=9, - max_attempts=10, - text_customization={"custom_check": "Submit", "custom_final_check": "Final Submit"} - ) - self.assertEqual(module.check_button_name(), "Final Submit") - - def test_check_button_checking_name_customization(self): - module = CapaFactory.create( - attempts=1, - max_attempts=10, - text_customization={ - "custom_check": "Submit", - "custom_final_check": "Final Submit", - "custom_checking": "Checking..." - } - ) - self.assertEqual(module.check_button_checking_name(), "Checking...") - - module = CapaFactory.create( - attempts=9, - max_attempts=10, - text_customization={ - "custom_check": "Submit", - "custom_final_check": "Final Submit", - "custom_checking": "Checking..." - } - ) - self.assertEqual(module.check_button_checking_name(), "Checking...") - - def test_should_show_check_button(self): + def test_should_enable_submit_button(self): attempts = random.randint(1, 10) - # If we're after the deadline, do NOT show check button + # If we're after the deadline, disable the submit button module = CapaFactory.create(due=self.yesterday_str) - self.assertFalse(module.should_show_check_button()) + self.assertFalse(module.should_enable_submit_button()) - # If user is out of attempts, do NOT show the check button + # If user is out of attempts, disable the submit button module = CapaFactory.create(attempts=attempts, max_attempts=attempts) - self.assertFalse(module.should_show_check_button()) + self.assertFalse(module.should_enable_submit_button()) - # If survey question (max_attempts = 0), do NOT show the check button + # If survey question (max_attempts = 0), disable the submit button module = CapaFactory.create(max_attempts=0) - self.assertFalse(module.should_show_check_button()) + self.assertFalse(module.should_enable_submit_button()) # If user submitted a problem but hasn't reset, - # do NOT show the check button + # disable the submit button # Note: we can only reset when rerandomize="always" or "true" module = CapaFactory.create(rerandomize=RANDOMIZATION.ALWAYS, done=True) - self.assertFalse(module.should_show_check_button()) + self.assertFalse(module.should_enable_submit_button()) module = CapaFactory.create(rerandomize="true", done=True) - self.assertFalse(module.should_show_check_button()) + self.assertFalse(module.should_enable_submit_button()) - # Otherwise, DO show the check button + # Otherwise, enable the submit button module = CapaFactory.create() - self.assertTrue(module.should_show_check_button()) + self.assertTrue(module.should_enable_submit_button()) # If the user has submitted the problem - # and we do NOT have a reset button, then we can show the check button + # and we do NOT have a reset button, then we can enable the submit button # Setting rerandomize to "never" or "false" ensures that the reset button # is not shown module = CapaFactory.create(rerandomize=RANDOMIZATION.NEVER, done=True) - self.assertTrue(module.should_show_check_button()) + self.assertTrue(module.should_enable_submit_button()) module = CapaFactory.create(rerandomize="false", done=True) - self.assertTrue(module.should_show_check_button()) + self.assertTrue(module.should_enable_submit_button()) module = CapaFactory.create(rerandomize=RANDOMIZATION.PER_STUDENT, done=True) - self.assertTrue(module.should_show_check_button()) + self.assertTrue(module.should_enable_submit_button()) def test_should_show_reset_button(self): @@ -1239,11 +1175,11 @@ class CapaModuleTest(unittest.TestCase): # We've tested the show/hide button logic in other tests, # so here we hard-wire the values - show_check_button = bool(random.randint(0, 1) % 2) + enable_submit_button = bool(random.randint(0, 1) % 2) show_reset_button = bool(random.randint(0, 1) % 2) show_save_button = bool(random.randint(0, 1) % 2) - module.should_show_check_button = Mock(return_value=show_check_button) + module.should_enable_submit_button = Mock(return_value=enable_submit_button) module.should_show_reset_button = Mock(return_value=show_reset_button) module.should_show_save_button = Mock(return_value=show_save_button) @@ -1272,9 +1208,10 @@ class CapaModuleTest(unittest.TestCase): context = render_args[1] self.assertEqual(context['problem']['html'], "
    Test Problem HTML
    ") - self.assertEqual(bool(context['check_button']), show_check_button) + self.assertEqual(bool(context['should_enable_submit_button']), enable_submit_button) self.assertEqual(bool(context['reset_button']), show_reset_button) self.assertEqual(bool(context['save_button']), show_save_button) + self.assertFalse(context['demand_hint_possible']) # Assert that the encapsulated html contains the original html self.assertIn(html, html_encapsulated) @@ -1305,14 +1242,16 @@ class CapaModuleTest(unittest.TestCase): # Check the AJAX call that gets the hint by index result = module.get_demand_hint(0) - self.assertEqual(result['contents'], u'Hint (1 of 2): Demand 1') self.assertEqual(result['hint_index'], 0) + self.assertTrue(result['should_enable_next_hint']) + result = module.get_demand_hint(1) - self.assertEqual(result['contents'], u'Hint (2 of 2): Demand 2') self.assertEqual(result['hint_index'], 1) + self.assertFalse(result['should_enable_next_hint']) + result = module.get_demand_hint(2) # here the server wraps around to index 0 - self.assertEqual(result['contents'], u'Hint (1 of 2): Demand 1') self.assertEqual(result['hint_index'], 0) + self.assertTrue(result['should_enable_next_hint']) def test_demand_hint_logging(self): module = CapaFactory.create(xml=self.demand_xml) @@ -1430,7 +1369,7 @@ class CapaModuleTest(unittest.TestCase): # Check the problem get_request_dict = {CapaFactory.input_key(): '3.14'} - module.check_problem(get_request_dict) + module.submit_problem(get_request_dict) # Expect that the seed is the same self.assertEqual(seed, module.seed) @@ -1612,7 +1551,7 @@ class CapaModuleTest(unittest.TestCase): module = CapaFactory.create() module.get_progress = Mock(wraps=module.get_progress) module.get_html() - module.get_progress.assert_called_once_with() + module.get_progress.assert_called_with() def test_get_problem(self): """ @@ -1637,13 +1576,13 @@ class CapaModuleTest(unittest.TestCase): def test_check_unmask(self): """ - Check that shuffle unmasking is plumbed through: when check_problem is called, + Check that shuffle unmasking is plumbed through: when submit_problem is called, unmasked names should appear in the track_function event_info. """ module = CapaFactory.create(xml=self.common_shuffle_xml) with patch.object(module.runtime, 'publish') as mock_track_function: get_request_dict = {CapaFactory.input_key(): 'choice_3'} # the correct choice - module.check_problem(get_request_dict) + module.submit_problem(get_request_dict) mock_call = mock_track_function.mock_calls[1] event_info = mock_call[1][2] self.assertEqual(event_info['answers'][CapaFactory.answer_key()], 'choice_3') @@ -1669,7 +1608,7 @@ class CapaModuleTest(unittest.TestCase): """On problem reset, unmask names should appear track_function.""" module = CapaFactory.create(xml=self.common_shuffle_xml) get_request_dict = {CapaFactory.input_key(): 'mask_0'} - module.check_problem(get_request_dict) + module.submit_problem(get_request_dict) # On reset, 'old_state' should use unmasked names with patch.object(module.runtime, 'track_function') as mock_track_function: module.reset_problem(None) @@ -1684,7 +1623,7 @@ class CapaModuleTest(unittest.TestCase): """On problem rescore, unmasked names should appear on track_function.""" module = CapaFactory.create(xml=self.common_shuffle_xml) get_request_dict = {CapaFactory.input_key(): 'mask_0'} - module.check_problem(get_request_dict) + module.submit_problem(get_request_dict) # On rescore, state/student_answers should use unmasked names with patch.object(module.runtime, 'track_function') as mock_track_function: module.rescore_problem() @@ -1711,7 +1650,7 @@ class CapaModuleTest(unittest.TestCase): module = CapaFactory.create(xml=xml) with patch.object(module.runtime, 'publish') as mock_track_function: get_request_dict = {CapaFactory.input_key(): 'choice_2'} # mask_X form when masking enabled - module.check_problem(get_request_dict) + module.submit_problem(get_request_dict) mock_call = mock_track_function.mock_calls[1] event_info = mock_call[1][2] self.assertEqual(event_info['answers'][CapaFactory.answer_key()], 'choice_2') @@ -2631,7 +2570,7 @@ class TestProblemCheckTracking(unittest.TestCase): def get_event_for_answers(self, module, answer_input_dict): with patch.object(module.runtime, 'publish') as mock_track_function: - module.check_problem(answer_input_dict) + module.submit_problem(answer_input_dict) self.assertGreaterEqual(len(mock_track_function.mock_calls), 2) # There are potentially 2 track logs: answers and hint. [-1]=answers. diff --git a/common/lib/xmodule/xmodule/tests/test_delay_between_attempts.py b/common/lib/xmodule/xmodule/tests/test_delay_between_attempts.py index 1b400269de..d56876e704 100644 --- a/common/lib/xmodule/xmodule/tests/test_delay_between_attempts.py +++ b/common/lib/xmodule/xmodule/tests/test_delay_between_attempts.py @@ -3,7 +3,7 @@ Tests the logic of problems with a delay between attempt submissions. Note that this test file is based off of test_capa_module.py and as such, uses the same CapaFactory problem setup to test the functionality -of the check_problem method of a capa module when the "delay between quiz +of the submit_problem method of a capa module when the "delay between quiz submissions" setting is set to different values """ @@ -128,7 +128,7 @@ class XModuleQuizAttemptsDelayTest(unittest.TestCase): last_submission_time=None, submission_wait_seconds=None, considered_now=None, - skip_check_problem=False): + skip_submit_problem=False): """Unified create and check code for the tests here.""" module = CapaFactoryWithDelay.create( attempts=num_attempts, @@ -138,12 +138,12 @@ class XModuleQuizAttemptsDelayTest(unittest.TestCase): ) module.done = False get_request_dict = {CapaFactoryWithDelay.input_key(): "3.14"} - if skip_check_problem: + if skip_submit_problem: return (module, None) if considered_now is not None: - result = module.check_problem(get_request_dict, considered_now) + result = module.submit_problem(get_request_dict, considered_now) else: - result = module.check_problem(get_request_dict) + result = module.submit_problem(get_request_dict) return (module, result) def test_first_submission(self): @@ -251,13 +251,13 @@ class XModuleQuizAttemptsDelayTest(unittest.TestCase): considered_now=datetime.datetime(2013, 12, 6, 0, 24, 0, tzinfo=UTC) ) - # Now try it without the check_problem + # Now try it without the submit_problem (module, unused_result) = self.create_and_check( num_attempts=num_attempts, last_submission_time=datetime.datetime(2013, 12, 6, 0, 17, 36, tzinfo=UTC), submission_wait_seconds=180, considered_now=datetime.datetime(2013, 12, 6, 0, 24, 0, tzinfo=UTC), - skip_check_problem=True + skip_submit_problem=True ) # Expect that number of attempts NOT incremented self.assertEqual(module.attempts, num_attempts) diff --git a/common/lib/xmodule/xmodule/tests/test_xblock_wrappers.py b/common/lib/xmodule/xmodule/tests/test_xblock_wrappers.py index 6cf57b43b0..cacde168e9 100644 --- a/common/lib/xmodule/xmodule/tests/test_xblock_wrappers.py +++ b/common/lib/xmodule/xmodule/tests/test_xblock_wrappers.py @@ -290,6 +290,7 @@ class XBlockWrapperTestMixin(object): # pylint: disable=no-member descriptor.runtime.id_reader.get_definition_id = Mock(return_value='a') descriptor.runtime.modulestore = modulestore + descriptor._xmodule.graded = 'False' self.check_property(descriptor) # Test that when an xmodule is generated from descriptor_cls diff --git a/common/static/js/fixtures/sr-fixture.html b/common/static/js/fixtures/sr-fixture.html new file mode 100644 index 0000000000..4a98262cd7 --- /dev/null +++ b/common/static/js/fixtures/sr-fixture.html @@ -0,0 +1,4 @@ +
    + Yes!Your answer is correct! + No!Your answer is wrong! +
    diff --git a/common/static/js/spec/accessibility_tools_spec.js b/common/static/js/spec/accessibility_tools_spec.js index 3f3b5172cb..44b587e99d 100644 --- a/common/static/js/spec/accessibility_tools_spec.js +++ b/common/static/js/spec/accessibility_tools_spec.js @@ -91,4 +91,38 @@ describe('Tests for accessibility_tools.js', function() { }); }); }); + + describe('Tests for SR region', function() { + var getSRText = function() { + return $('#reader-feedback').html(); + }; + + beforeEach(function() { + loadFixtures('js/fixtures/sr-fixture.html'); + }); + + it('has the sr class and is aria-live', function() { + var $reader = $('#reader-feedback'); + expect($reader.hasClass('sr')).toBe(true); + expect($reader.attr('aria-live')).toBe('polite'); + }); + + it('supports the setting of simple text', function() { + window.SR.readText('Simple Text'); + expect(getSRText()).toContain('

    Simple Text

    '); + }); + + it('supports the setting of an array of text', function() { + window.SR.readTexts(['One', 'Two']); + expect(getSRText()).toContain('

    One

    \n

    Two

    '); + }); + + it('supports setting an array of elements', function() { + window.SR.readElts($('.status')); + expect(getSRText()).toContain( + '

    Yes!Your answer is correct!

    \n

    No!Your answer is wrong!

    ' + ); + }); + }); + }); diff --git a/common/static/js/src/accessibility_tools.js b/common/static/js/src/accessibility_tools.js index 52a8a06282..a593c65a3e 100644 --- a/common/static/js/src/accessibility_tools.js +++ b/common/static/js/src/accessibility_tools.js @@ -147,28 +147,52 @@ $(function() { SRAlert = (function() { function SRAlert() { - $('body').append(''); - this.el = $('#reader-feedback'); + // This initialization sometimes gets done twice, so take to only create a single reader-feedback div. + var readerFeedbackID = 'reader-feedback', + $readerFeedbackSelector = $('#' + readerFeedbackID); + + if ($readerFeedbackSelector.length === 0) { + edx.HtmlUtils.append( + $('body'), + edx.HtmlUtils.interpolateHtml( + edx.HtmlUtils.HTML('
    '), + {readerFeedbackID: readerFeedbackID} + ) + ); + } + this.el = $('#' + readerFeedbackID); } SRAlert.prototype.clear = function() { - return this.el.html(' '); + edx.HtmlUtils.setHtml(this.el, ''); }; SRAlert.prototype.readElts = function(elts) { - var feedback = ''; + var texts = []; $.each(elts, function(idx, value) { - return feedback += '

    ' + $(value).html() + '

    \n'; + texts.push($(value).html()); }); - return this.el.html(feedback); + return this.readTexts(texts); }; SRAlert.prototype.readText = function(text) { - return this.el.text(text); + return this.readTexts([text]); + }; + + SRAlert.prototype.readTexts = function(texts) { + var htmlFeedback = edx.HtmlUtils.HTML(''); + $.each(texts, function(idx, value) { + htmlFeedback = edx.HtmlUtils.interpolateHtml( + edx.HtmlUtils.HTML('{previous_feedback}

    {value}

    \n'), + // "value" may be HTML, if an element is being passed + {previous_feedback: htmlFeedback, value: edx.HtmlUtils.HTML(value)} + ); + }); + edx.HtmlUtils.setHtml(this.el, htmlFeedback); }; return SRAlert; })(); - window.SR = new SRAlert; + window.SR = new SRAlert(); }); diff --git a/common/static/sass/edx-pattern-library-shims/_buttons.scss b/common/static/sass/edx-pattern-library-shims/_buttons.scss new file mode 100644 index 0000000000..0f290713bb --- /dev/null +++ b/common/static/sass/edx-pattern-library-shims/_buttons.scss @@ -0,0 +1,119 @@ +// ------------------------------ +// LMS Problem Feedback Revamp styling +// Mirror styles from the Pattern Library + +@import 'base/variables'; + + +// ---------------------------- +// #GLOBALS +// ---------------------------- +%btn { + display: inline-block; + border-style: $btn-border-style; + border-radius: $btn-border-radius; + border-width: $btn-border-size; + box-shadow: none; + padding: 0.625rem 1.25rem; + font-size: 16px; + font-weight: normal; + text-shadow: none; + text-transform: capitalize; + + // Display: block, one button per line, full width + &.block { + display: block; + width: 100%; + } + + // STATE: is disabled + &:disabled, + &.is-disabled { + @extend %state-disabled; + } + + .icon { + display: inline-block; + vertical-align: baseline; + + &:only-child, + .sr-only + & { + @include margin-right(0); + } + } + &.btn-small { + @extend %btn-small; + } +} + +// ---------------------------- +// #DEFAULT +// ---------------------------- +.btn-default { + @extend %btn; + border-color: $btn-default-border-color; + background: $btn-default-background; + color: $btn-default-color; + + // STATE: hover and focus + &:hover, + &.is-hovered, + &:focus, + &.is-focused { + border-color: $btn-default-focus-border-color; + background-color: $btn-default-background; + color: $btn-default-focus-color; + } + + // STATE: is pressed or active + &:active, + &.is-pressed, + &.is-active { + border-color: $btn-default-active-border-color; + color: $btn-default-active-color; + } + + // STATE: is disabled + &:disabled, + &.is-disabled { + border-color: $btn-disabled-border-color; + color: $btn-disabled-color; + } +} + +// ---------------------------- +// #BRAND +// ---------------------------- +.btn-brand { + @extend %btn; + border-color: $btn-brand-border-color; + background: $btn-brand-background; + color: $btn-brand-color; + + // STATE: hover and focus + &:hover, + &.is-hovered, + &:focus, + &.is-focused { + border-color: $btn-brand-focus-border-color; + background-color: $btn-brand-focus-background; + color: $btn-brand-focus-color; + } + + // STATE: is pressed or active + &:active, + &.is-pressed, + &.is-active { + border-color: $btn-brand-active-border-color; + background: $btn-brand-active-background; + } + + // STATE: is disabled + &:disabled, + &.is-disabled { + border-color: $btn-disabled-border-color; + background: $btn-brand-disabled-background; + color: $btn-brand-disabled-color; + } +} + diff --git a/common/static/sass/edx-pattern-library-shims/base/_variables.scss b/common/static/sass/edx-pattern-library-shims/base/_variables.scss new file mode 100644 index 0000000000..def96c46c2 --- /dev/null +++ b/common/static/sass/edx-pattern-library-shims/base/_variables.scss @@ -0,0 +1,222 @@ +// COLORS +$light-gray1: rgb(221, 221, 221); + + +// Font Sizes in em +$small-font-size: 0.85em !default; +$medium-font-size: 0.9em !default; +$base-font-size: 1em !default; + +// Line height +$base-line-height: 1.5em !default; + + +$component-border-radius: 3px !default; + +// grid - breakpoints +$bp-screen-sm: 480px !default; +$bp-screen-md: 768px !default; +$bp-screen-lg: 1024px !default; +$bp-screen-xl: 1280px !default; + + +// #SPACING +// ---------------------------- +// spacing - baseline +$baseline: 20px !default; + +// vertical spacing +$baseline-vertical: ($baseline*2) !default; + +$spacing-vertical: ( + base: $baseline-vertical, + mid-small: ($baseline-vertical*0.75), + small: ($baseline-vertical/2), + x-small: ($baseline-vertical/4), + xx-small: ($baseline-vertical/8), + xxx-small: ($baseline-vertical/10), + mid-large: ($baseline-vertical*1.5), + large: ($baseline-vertical*2), + x-large: ($baseline-vertical*4) +); + +// horizontal spacing +$baseline-horizontal: $baseline !default; + +$spacing-horizontal: ( + base: $baseline-horizontal, + mid-small: ($baseline-horizontal*0.75), + small: ($baseline-horizontal/2), + x-small: ($baseline-horizontal/4), + xx-small: ($baseline-horizontal/8), + mid-large: ($baseline-horizontal*1.5), + large: ($baseline-horizontal*2), + x-large: ($baseline-horizontal*4) +); + +// get vertical spacings from defined map values +@function spacing-vertical($key) { + @if map-has-key($spacing-vertical, $key) { + @return rem(map-get($spacing-vertical, $key)); + } + + @warn "Unknown `#{$key}` in $spacing-vertical."; + @return null; +} + +// get horizontal spacings from defined map values +@function spacing-horizontal($key) { + @if map-has-key($spacing-horizontal, $key) { + @return rem(map-get($spacing-horizontal, $key)); + } + + @warn "Unknown `#{$key}` in $spacing-horizontal."; + @return null; +} + +// typography: weights +$font-weights: ( + normal: 400, + light: 300, + x-light: 200, + semi-bold: 600, + bold: 700 +); + +// typography: sizes +$font-sizes: ( + xxxx-large: 38, + xxx-large: 28, + xx-large: 24, + x-large: 21, + large: 18, + base: 16, + small: 14, + x-small: 12, + xx-small: 11, + xxx-small: 10, +); + +// get font sizes from defined map values +@function font-size($key) { + @if map-has-key($font-sizes, $key) { + @return rem(map-get($font-sizes, $key)); + } + + @warn "Unknown `#{$key}` in $font-sizes."; + @return null; +} + +// get font weight from defined map values +@function font-weight($key) { + @if map-has-key($font-weights, $key) { + @return map-get($font-weights, $key); + } + + @warn "Unknown `#{$key}` in $font-weights."; + @return null; +} + + +// visual disabled +%state-disabled { + pointer-events: none; + outline: none; + cursor: not-allowed; +} + +// +Colors - UXPL new pattern library colors +// ==================== +$uxpl-blue-base: rgba(0, 116, 180, 1); // wcag2a compliant +$uxpl-blue-hover-active: darken($uxpl-blue-base, 7%); // wcag2a compliant + +$uxpl-green-base: rgba(0, 129, 0, 1); // wcag2a compliant +$uxpl-green-hover-active: lighten($uxpl-green-base, 7%); // wcag2a compliant + +$uxpl-gray-dark: rgb(17, 17, 17); +$uxpl-gray-base: rgb(65, 65, 65); +$uxpl-gray-background: rgb(217, 217, 217); + + +// Alert styles +$error-color: rgb(203, 7, 18) !default; +$success-color: rgb(0, 155, 0) !default; +$warning-color: rgb(255, 192, 31) !default; +$warning-color-accent: rgb(255, 252, 221) !default; + +// BUTTONS + +// disabled button +$btn-disabled-border-color: #d2d0d0 !default; +$btn-disabled-color: #6b6969 !default; + +// base button +$btn-default-border-color: transparent !default; +$btn-default-background: transparent !default; +$btn-default-color: $uxpl-blue-base !default; +$btn-default-focus-border-color: $uxpl-blue-base !default; +$btn-default-focus-color: $uxpl-blue-base !default; +$btn-default-active-border-color: $uxpl-blue-base !default; +$btn-default-active-color: $uxpl-blue-base !default; + +// brand button +$btn-brand-border-color: $uxpl-blue-base !default; +$btn-brand-background: $uxpl-blue-base !default; +$btn-brand-color: #fcfcfc !default; +$btn-brand-focus-color: $btn-brand-color !default; +$btn-brand-focus-border-color: $uxpl-blue-hover-active !default; +$btn-brand-focus-background: $uxpl-blue-hover-active !default; +$btn-brand-active-border-color: $uxpl-blue-base !default; +$btn-brand-active-background: $uxpl-blue-base !default; +$btn-brand-disabled-background: #f2f3f3 !default; +$btn-brand-disabled-color: #676666 !default; + +// ---------------------------- +// #SETTINGS +// ---------------------------- +$btn-border-style: solid !default; +$btn-border-size: 1px !default; +$btn-shadow: 3px !default; +$btn-font-weight: font-weight(semi-bold) !default; +$btn-border-radius: $component-border-radius !default; + +// sizes +$btn-large-padding-vertical: spacing-vertical(small); +$btn-large-padding-horizontal: spacing-horizontal(mid-large); + + +$btn-base-padding-vertical: spacing-vertical(x-small); +$btn-base-padding-horizontal: spacing-horizontal(base); +$btn-base-font-size: font-size(base); + +$btn-small-padding-vertical: spacing-vertical(x-small); +$btn-small-padding-horizontal: spacing-horizontal(small); + + +// ---------------------------- +// #SIZES +// ---------------------------- +// large +%btn-large { + padding: 1.25rem 1.875rem; + font-size: font-size(large); +} + +// small +%btn-small { + padding: 0.625rem 0.625rem; + font-size: 14px; +} + +// ---------------------------- +// Problem Notifications +// ---------------------------- + +@mixin notification-by-type($color) { + border-top: 3px solid $color; + .icon { + @include margin-right(3 * $baseline/ 4); + color: $color; + } +} + diff --git a/common/test/acceptance/pages/lms/annotation_component.py b/common/test/acceptance/pages/lms/annotation_component.py index ec8ef3906c..ca6223cd9e 100644 --- a/common/test/acceptance/pages/lms/annotation_component.py +++ b/common/test/acceptance/pages/lms/annotation_component.py @@ -72,7 +72,7 @@ class AnnotationComponentPage(PageObject): # Wait for the click to take effect, which is after the class is applied. self.wait_for(lambda: 'selected' in self.q(css=answer_css).attrs('class')[0], description='answer selected') # Click the "Check" button. - self.q(css=self.active_problem_selector('.check')).click() + self.q(css=self.active_problem_selector('.submit')).click() # This will trigger a POST to problem_check so wait until the response is returned. self.wait_for_ajax() diff --git a/common/test/acceptance/pages/lms/problem.py b/common/test/acceptance/pages/lms/problem.py index 02f885db53..7ffbd19005 100644 --- a/common/test/acceptance/pages/lms/problem.py +++ b/common/test/acceptance/pages/lms/problem.py @@ -2,6 +2,8 @@ Problem Page. """ from bok_choy.page_object import PageObject +from common.test.acceptance.pages.common.utils import click_css +from selenium.webdriver.common.keys import Keys class ProblemPage(PageObject): @@ -20,6 +22,7 @@ class ProblemPage(PageObject): """ Return the current problem name. """ + self.wait_for_element_visibility(self.CSS_PROBLEM_HEADER, 'wait for problem header') return self.q(css='.problem-header').text[0] @property @@ -48,14 +51,15 @@ class ProblemPage(PageObject): """ Return the "hint" text of the problem from html """ - return self.q(css="div.problem div.problem-hint").html[0].split(' <', 1)[0] + hints_html = self.q(css="div.problem .notification-hint .notification-message li").html + return [hint_html.split(' -

    Which piece of furniture is built for sitting?

    + a table a desk @@ -15,8 +15,8 @@ a bookshelf -

    Which of the following are musical instruments?

    + a piano a tree diff --git a/common/test/acceptance/tests/lms/test_certificate_web_view.py b/common/test/acceptance/tests/lms/test_certificate_web_view.py index 10cfa2f814..350c027a81 100644 --- a/common/test/acceptance/tests/lms/test_certificate_web_view.py +++ b/common/test/acceptance/tests/lms/test_certificate_web_view.py @@ -218,14 +218,14 @@ class CertificateProgressPageTest(UniqueCourseTest): self.course_nav.q(css='select option[value="{}"]'.format('blue')).first.click() # Select correct radio button for the answer - self.course_nav.q(css='fieldset div.field:nth-child(3) input').nth(0).click() + self.course_nav.q(css='fieldset div.field:nth-child(4) input').nth(0).click() # Select correct radio buttons for the answer - self.course_nav.q(css='fieldset div.field:nth-child(1) input').nth(1).click() - self.course_nav.q(css='fieldset div.field:nth-child(3) input').nth(1).click() + self.course_nav.q(css='fieldset div.field:nth-child(2) input').nth(1).click() + self.course_nav.q(css='fieldset div.field:nth-child(4) input').nth(1).click() # Submit the answer - self.course_nav.q(css='button.check.Check').click() + self.course_nav.q(css='button.submit').click() self.course_nav.wait_for_ajax() # Navigate to the 'Test Subsection 2' of 'Test Section 2' @@ -238,5 +238,5 @@ class CertificateProgressPageTest(UniqueCourseTest): self.course_nav.q(css='input[id^=input_][id$=_2_1]').fill('A*x^2 + sqrt(y)') # Submit the answer - self.course_nav.q(css='button.check.Check').click() + self.course_nav.q(css='button.submit').click() self.course_nav.wait_for_ajax() diff --git a/common/test/acceptance/tests/lms/test_conditional.py b/common/test/acceptance/tests/lms/test_conditional.py index 3b7cf2afe2..945b0669d4 100644 --- a/common/test/acceptance/tests/lms/test_conditional.py +++ b/common/test/acceptance/tests/lms/test_conditional.py @@ -109,7 +109,7 @@ class ConditionalTest(UniqueCourseTest): # Answer the problem problem_page = ProblemPage(self.browser) problem_page.fill_answer('correct string') - problem_page.click_check() + problem_page.click_submit() # The conditional does not update on its own, so we need to reload the page. self.courseware_page.visit() # Verify that we can see the content. diff --git a/common/test/acceptance/tests/lms/test_lms.py b/common/test/acceptance/tests/lms/test_lms.py index 9238dd0d25..fe5a189808 100644 --- a/common/test/acceptance/tests/lms/test_lms.py +++ b/common/test/acceptance/tests/lms/test_lms.py @@ -1085,12 +1085,12 @@ class ProblemExecutionTest(UniqueCourseTest): # Fill in the answer correctly. problem_page.fill_answer("20") - problem_page.click_check() + problem_page.click_submit() self.assertTrue(problem_page.is_correct()) # Fill in the answer incorrectly. problem_page.fill_answer("4") - problem_page.click_check() + problem_page.click_submit() self.assertFalse(problem_page.is_correct()) diff --git a/common/test/acceptance/tests/lms/test_lms_courseware.py b/common/test/acceptance/tests/lms/test_lms_courseware.py index 5e5da26f39..72e80f62b6 100644 --- a/common/test/acceptance/tests/lms/test_lms_courseware.py +++ b/common/test/acceptance/tests/lms/test_lms_courseware.py @@ -714,12 +714,12 @@ class ProblemStateOnNavigationTest(UniqueCourseTest): ) self.assertEqual(self.problem_page.problem_name, problem_name) - def test_perform_problem_check_and_navigate(self): + def test_perform_problem_submit_and_navigate(self): """ Scenario: I go to sequential position 1 Facing problem1, I select 'choice_1' - Then I click check button + Then I click submit button Then I go to sequential position 2 Then I came back to sequential position 1 again Facing problem1, I observe the problem1 content is not @@ -730,7 +730,7 @@ class ProblemStateOnNavigationTest(UniqueCourseTest): # Update problem 1's content state by clicking check button. self.problem_page.click_choice('choice_choice_1') - self.problem_page.click_check() + self.problem_page.click_submit() self.problem_page.wait_for_expected_status('label.choicegroup_incorrect', 'incorrect') # Save problem 1's content state as we're about to switch units in the sequence. @@ -761,7 +761,7 @@ class ProblemStateOnNavigationTest(UniqueCourseTest): # Update problem 1's content state by clicking save button. self.problem_page.click_choice('choice_choice_1') self.problem_page.click_save() - self.problem_page.wait_for_expected_status('div.capa_alert', 'saved') + self.problem_page.wait_for_save_notification() # Save problem 1's content state as we're about to switch units in the sequence. problem1_content_before_switch = self.problem_page.problem_content @@ -790,7 +790,7 @@ class ProblemStateOnNavigationTest(UniqueCourseTest): # Update problem 1's content state – by performing reset operation. self.problem_page.click_choice('choice_choice_1') - self.problem_page.click_check() + self.problem_page.click_submit() self.problem_page.wait_for_expected_status('label.choicegroup_incorrect', 'incorrect') self.problem_page.click_reset() self.problem_page.wait_for_expected_status('span.unanswered', 'unanswered') diff --git a/common/test/acceptance/tests/lms/test_lms_entrance_exams.py b/common/test/acceptance/tests/lms/test_lms_entrance_exams.py index a1e8bc1e8e..d3b2a63b8f 100644 --- a/common/test/acceptance/tests/lms/test_lms_entrance_exams.py +++ b/common/test/acceptance/tests/lms/test_lms_entrance_exams.py @@ -99,7 +99,7 @@ class EntranceExamPassTest(EntranceExamTest): self.assertTrue(self.courseware_page.has_entrance_exam_message()) self.assertFalse(self.courseware_page.has_passed_message()) problem_page.click_choice('choice_1') - problem_page.click_check() + problem_page.click_submit() self.courseware_page.wait_for_page() self.assertTrue(self.courseware_page.has_passed_message()) self.assertEqual(self.courseware_page.chapter_count_in_navigation, 2) diff --git a/common/test/acceptance/tests/lms/test_lms_gating.py b/common/test/acceptance/tests/lms/test_lms_gating.py index bad8fd4c15..52223859ac 100644 --- a/common/test/acceptance/tests/lms/test_lms_gating.py +++ b/common/test/acceptance/tests/lms/test_lms_gating.py @@ -114,7 +114,7 @@ class GatingTest(UniqueCourseTest): problem_page = ProblemPage(self.browser) self.assertEqual(problem_page.wait_for_page().problem_name, 'HEIGHT OF EIFFEL TOWER') problem_page.click_choice('choice_1') - problem_page.click_check() + problem_page.click_submit() def test_subsection_gating_in_studio(self): """ diff --git a/common/test/acceptance/tests/lms/test_lms_problems.py b/common/test/acceptance/tests/lms/test_lms_problems.py index 9000e2e61f..ef1b4af535 100644 --- a/common/test/acceptance/tests/lms/test_lms_problems.py +++ b/common/test/acceptance/tests/lms/test_lms_problems.py @@ -39,9 +39,10 @@ class ProblemsTest(UniqueCourseTest): ) problem = self.get_problem() + sequential = self.get_sequential() course_fixture.add_children( XBlockFixtureDesc('chapter', 'Test Section').add_children( - XBlockFixtureDesc('sequential', 'Test Subsection').add_children(problem) + sequential.add_children(problem) ) ).install() @@ -59,6 +60,10 @@ class ProblemsTest(UniqueCourseTest): """ Subclasses should override this to complete the fixture """ raise NotImplementedError() + def get_sequential(self): + """ Subclasses can override this to add a sequential with metadata """ + return XBlockFixtureDesc('sequential', 'Test Subsection') + class ProblemClarificationTest(ProblemsTest): """ @@ -102,7 +107,211 @@ class ProblemClarificationTest(ProblemsTest): self.assertNotIn('strong', tooltip_text) -class ProblemExtendedHintTest(ProblemsTest, EventsTestMixin): +class ProblemHintTest(ProblemsTest, EventsTestMixin): + """ + Base test class for problem hint tests. + """ + def verify_check_hint(self, answer, answer_text, expected_events): + """ + Verify clicking Check shows the extended hint in the problem message. + """ + self.courseware_page.visit() + problem_page = ProblemPage(self.browser) + self.assertEqual(problem_page.problem_text[0], u'question text') + problem_page.fill_answer(answer) + problem_page.click_submit() + self.assertEqual(problem_page.message_text, answer_text) + # Check for corresponding tracking event + actual_events = self.wait_for_events( + event_filter={'event_type': 'edx.problem.hint.feedback_displayed'}, + number_of_matches=1 + ) + self.assert_events_match(expected_events, actual_events) + + def verify_demand_hints(self, first_hint, second_hint, expected_events): + """ + Test clicking through the demand hints and verify the events sent. + """ + self.courseware_page.visit() + problem_page = ProblemPage(self.browser) + + # The hint notification should not be visible on load + self.assertFalse(problem_page.is_hint_notification_visible()) + + # The two Hint button should be enabled. One visible, one present, but not visible in the DOM + self.assertEqual([None, None], problem_page.get_hint_button_disabled_attr()) + + # The hint button rotates through multiple hints + problem_page.click_hint() + self.assertTrue(problem_page.is_hint_notification_visible()) + self.assertEqual(problem_page.hint_text, first_hint) + # Now there are two "hint" buttons, as there is also one in the hint notification. + self.assertEqual([None, None], problem_page.get_hint_button_disabled_attr()) + + problem_page.click_hint() + self.assertEqual(problem_page.hint_text, second_hint) + # Now both "hint" buttons should be disabled, as there are no more hints. + self.assertEqual(['true', 'true'], problem_page.get_hint_button_disabled_attr()) + + # Now click on "Review" and make sure the focus goes to the correct place. + problem_page.click_review_in_notification() + self.assertTrue(problem_page.is_focus_on_problem_meta()) + + # Check corresponding tracking events + actual_events = self.wait_for_events( + event_filter={'event_type': 'edx.problem.hint.demandhint_displayed'}, + number_of_matches=2 + ) + self.assert_events_match(expected_events, actual_events) + + def get_problem(self): + """ Subclasses should override this to complete the fixture """ + raise NotImplementedError() + + +class ProblemNotificationTests(ProblemsTest): + """ + Tests that the notifications are visible when expected. + """ + + def get_problem(self): + """ + Problem structure. + """ + xml = dedent(""" + + + + + Brazil timely feedback -- explain why an almost correct answer is wrong + Germany + Indonesia + Russia + + + + """) + return XBlockFixtureDesc('problem', 'TEST PROBLEM', data=xml, + metadata={'max_attempts': 10}, + grader_type='Final Exam') + + def test_notification_updates(self): + """ + Verifies that the notification is removed and not visible when it should be + """ + self.courseware_page.visit() + problem_page = ProblemPage(self.browser) + problem_page.click_choice("choice_2") + self.assertFalse(problem_page.is_success_notification_visible()) + problem_page.click_submit() + problem_page.wait_success_notification() + # Clicking Save should clear the submit notification + problem_page.click_save() + self.assertFalse(problem_page.is_success_notification_visible()) + problem_page.wait_for_save_notification() + # Changing the answer should clear the save notification + problem_page.click_choice("choice_1") + self.assertFalse(problem_page.is_save_notification_visible()) + problem_page.click_save() + # Submitting the problem again should clear the save notification + problem_page.click_submit() + problem_page.wait_incorrect_notification() + self.assertFalse(problem_page.is_save_notification_visible()) + + +class ProblemSubmitButtonMaxAttemptsTest(ProblemsTest): + """ + Tests that the Submit button disables after the number of max attempts is reached. + """ + + def get_problem(self): + """ + Problem structure. + """ + xml = dedent(""" + + + + + Brazil timely feedback -- explain why an almost correct answer is wrong + Germany + Indonesia + Russia + + + + """) + return XBlockFixtureDesc('problem', 'TEST PROBLEM', data=xml, + metadata={'max_attempts': 2}, + grader_type='Final Exam') + + def test_max_attempts(self): + """ + Verifies that the Submit button disables when the max number of attempts is reached. + """ + self.courseware_page.visit() + problem_page = ProblemPage(self.browser) + + # Submit first answer (correct) + problem_page.click_choice("choice_2") + self.assertFalse(problem_page.is_submit_disabled()) + problem_page.click_submit() + problem_page.wait_success_notification() + + # Submit second and final answer (incorrect) + problem_page.click_choice("choice_1") + problem_page.click_submit() + problem_page.wait_incorrect_notification() + + # Make sure that the Submit button disables. + problem_page.wait_for_submit_disabled() + + +class ProblemSubmitButtonPastDueTest(ProblemsTest): + """ + Tests that the Submit button is disabled if it is past the due date. + """ + + def get_problem(self): + """ + Problem structure. + """ + xml = dedent(""" + + + + + Brazil timely feedback -- explain why an almost correct answer is wrong + Germany + Indonesia + Russia + + + + """) + return XBlockFixtureDesc('problem', 'TEST PROBLEM', data=xml, + metadata={'max_attempts': 2}, + grader_type='Final Exam') + + def get_sequential(self): + """ Subclasses can override this to add a sequential with metadata """ + return XBlockFixtureDesc('sequential', 'Test Subsection', metadata={'due': "2016-10-01T00"}) + + def test_past_due(self): + """ + Verifies that the Submit button disables when the max number of attempts is reached. + """ + self.courseware_page.visit() + problem_page = ProblemPage(self.browser) + # Should have Submit button disabled on original rendering. + problem_page.wait_for_submit_disabled() + + # Select a choice, and make sure that the Submit button remains disabled. + problem_page.click_choice("choice_2") + problem_page.wait_for_submit_disabled() + + +class ProblemExtendedHintTest(ProblemHintTest, EventsTestMixin): """ Test that extended hint features plumb through to the page html and tracking log. """ @@ -130,54 +339,39 @@ class ProblemExtendedHintTest(ProblemsTest, EventsTestMixin): """ Test clicking Check shows the extended hint in the problem message. """ - self.courseware_page.visit() - problem_page = ProblemPage(self.browser) - self.assertEqual(problem_page.problem_text[0], u'question text') - problem_page.fill_answer('B') - problem_page.click_check() - self.assertEqual(problem_page.message_text, u'Incorrect: hint') - # Check for corresponding tracking event - actual_events = self.wait_for_events( - event_filter={'event_type': 'edx.problem.hint.feedback_displayed'}, - number_of_matches=1 + self.verify_check_hint( + 'B', + u'Answer\nIncorrect: hint', + [ + { + 'event': + { + 'hint_label': u'Incorrect:', + 'trigger_type': 'single', + 'student_answer': [u'B'], + 'correctness': False, + 'question_type': 'stringresponse', + 'hints': [{'text': 'hint'}] + } + } + ] ) - self.assert_events_match( - [{'event': {'hint_label': u'Incorrect', - 'trigger_type': 'single', - 'student_answer': [u'B'], - 'correctness': False, - 'question_type': 'stringresponse', - 'hints': [{'text': 'hint'}]}}], - actual_events) def test_demand_hint(self): """ Test clicking hint button shows the demand hint in its div. """ - self.courseware_page.visit() - problem_page = ProblemPage(self.browser) - # The hint button rotates through multiple hints - problem_page.click_hint() - self.assertEqual(problem_page.hint_text, u'Hint (1 of 2): demand-hint1') - problem_page.click_hint() - self.assertEqual(problem_page.hint_text, u'Hint (2 of 2): demand-hint2') - problem_page.click_hint() - self.assertEqual(problem_page.hint_text, u'Hint (1 of 2): demand-hint1') - # Check corresponding tracking events - actual_events = self.wait_for_events( - event_filter={'event_type': 'edx.problem.hint.demandhint_displayed'}, - number_of_matches=3 - ) - self.assert_events_match( + self.verify_demand_hints( + u'Hint (1 of 2): demand-hint1', + u'Hint (1 of 2): demand-hint1\nHint (2 of 2): demand-hint2', [ {'event': {u'hint_index': 0, u'hint_len': 2, u'hint_text': u'demand-hint1'}}, - {'event': {u'hint_index': 1, u'hint_len': 2, u'hint_text': u'demand-hint2'}}, - {'event': {u'hint_index': 0, u'hint_len': 2, u'hint_text': u'demand-hint1'}} - ], - actual_events) + {'event': {u'hint_index': 1, u'hint_len': 2, u'hint_text': u'demand-hint2'}} + ] + ) -class ProblemHintWithHtmlTest(ProblemsTest, EventsTestMixin): +class ProblemHintWithHtmlTest(ProblemHintTest, EventsTestMixin): """ Tests that hints containing html get rendered properly """ @@ -205,51 +399,36 @@ class ProblemHintWithHtmlTest(ProblemsTest, EventsTestMixin): """ Test clicking Check shows the extended hint in the problem message. """ - self.courseware_page.visit() - problem_page = ProblemPage(self.browser) - self.assertEqual(problem_page.problem_text[0], u'question text') - problem_page.fill_answer('C') - problem_page.click_check() - self.assertEqual(problem_page.message_text, u'Incorrect: aa bb cc') - # Check for corresponding tracking event - actual_events = self.wait_for_events( - event_filter={'event_type': 'edx.problem.hint.feedback_displayed'}, - number_of_matches=1 + self.verify_check_hint( + 'C', + u'Answer\nIncorrect: aa bb cc', + [ + { + 'event': + { + 'hint_label': u'Incorrect:', + 'trigger_type': 'single', + 'student_answer': [u'C'], + 'correctness': False, + 'question_type': 'stringresponse', + 'hints': [{'text': 'aa bb cc'}] + } + } + ] ) - self.assert_events_match( - [{'event': {'hint_label': u'Incorrect', - 'trigger_type': 'single', - 'student_answer': [u'C'], - 'correctness': False, - 'question_type': 'stringresponse', - 'hints': [{'text': 'aa bb cc'}]}}], - actual_events) def test_demand_hint(self): """ - Test clicking hint button shows the demand hint in its div. + Test clicking hint button shows the demand hints in a notification area. """ - self.courseware_page.visit() - problem_page = ProblemPage(self.browser) - # The hint button rotates through multiple hints - problem_page.click_hint() - self.assertEqual(problem_page.hint_text, u'Hint (1 of 2): aa bb cc') - problem_page.click_hint() - self.assertEqual(problem_page.hint_text, u'Hint (2 of 2): dd ee ff') - problem_page.click_hint() - self.assertEqual(problem_page.hint_text, u'Hint (1 of 2): aa bb cc') - # Check corresponding tracking events - actual_events = self.wait_for_events( - event_filter={'event_type': 'edx.problem.hint.demandhint_displayed'}, - number_of_matches=3 - ) - self.assert_events_match( + self.verify_demand_hints( + u'Hint (1 of 2): aa bb cc', + u'Hint (1 of 2): aa bb cc\nHint (2 of 2): dd ee ff', [ {'event': {u'hint_index': 0, u'hint_len': 2, u'hint_text': u'aa bb cc'}}, - {'event': {u'hint_index': 1, u'hint_len': 2, u'hint_text': u'dd ee ff'}}, - {'event': {u'hint_index': 0, u'hint_len': 2, u'hint_text': u'aa bb cc'}} - ], - actual_events) + {'event': {u'hint_index': 1, u'hint_len': 2, u'hint_text': u'dd ee ff'}} + ] + ) class ProblemWithMathjax(ProblemsTest): @@ -291,13 +470,23 @@ class ProblemWithMathjax(ProblemsTest): # The hint button rotates through multiple hints problem_page.click_hint() - self.assertIn("Hint (1 of 2): mathjax should work1", problem_page.extract_hint_text_from_html) + self.assertEqual( + ["Hint (1 of 2): mathjax should work1"], + problem_page.extract_hint_text_from_html + ) problem_page.verify_mathjax_rendered_in_hint() # Rotate the hint and check the problem hint problem_page.click_hint() - self.assertIn("Hint (2 of 2): mathjax should work2", problem_page.extract_hint_text_from_html) + self.assertEqual( + [ + "Hint (1 of 2): mathjax should work1", + "Hint (2 of 2): mathjax should work2" + ], + problem_page.extract_hint_text_from_html + ) + problem_page.verify_mathjax_rendered_in_hint() @@ -328,10 +517,9 @@ class ProblemPartialCredit(ProblemsTest): """ self.courseware_page.visit() problem_page = ProblemPage(self.browser) - problem_page.wait_for_element_visibility(problem_page.CSS_PROBLEM_HEADER, 'wait for problem header') self.assertEqual(problem_page.problem_name, 'PARTIAL CREDIT TEST PROBLEM') problem_page.fill_answer_numerical('-1') - problem_page.click_check() + problem_page.click_submit() problem_page.wait_for_status_icon() self.assertTrue(problem_page.simpleprob_is_partially_correct()) @@ -382,7 +570,7 @@ class LogoutDuringAnswering(ProblemsTest): self.log_user_out() with problem_page.handle_alert(confirm=True): - problem_page.click_check() + problem_page.click_submit() login_page = CombinedLoginAndRegisterPage(self.browser) login_page.wait_for_page() @@ -393,7 +581,7 @@ class LogoutDuringAnswering(ProblemsTest): self.assertEqual(problem_page.problem_name, 'TEST PROBLEM') problem_page.fill_answer_numerical('1') - problem_page.click_check() + problem_page.click_submit() self.assertTrue(problem_page.simpleprob_is_correct()) def test_logout_cancel_no_redirect(self): @@ -412,7 +600,7 @@ class LogoutDuringAnswering(ProblemsTest): problem_page.fill_answer_numerical('1') self.log_user_out() with problem_page.handle_alert(confirm=False): - problem_page.click_check() + problem_page.click_submit() self.assertTrue(problem_page.is_browser_on_page()) self.assertEqual(problem_page.problem_name, 'TEST PROBLEM') @@ -453,7 +641,6 @@ class ProblemQuestionDescriptionTest(ProblemsTest): """ self.courseware_page.visit() problem_page = ProblemPage(self.browser) - problem_page.wait_for_element_visibility(problem_page.CSS_PROBLEM_HEADER, 'wait for problem header') self.assertEqual(problem_page.problem_name, 'Label with Description') self.assertEqual(problem_page.problem_question, 'Eggplant is a _____?') self.assertEqual(problem_page.problem_question_descriptions, self.descriptions) @@ -471,7 +658,7 @@ class CAPAProblemA11yBaseTestMixin(object): # Set the scope to the problem question problem_page.a11y_audit.config.set_scope( - include=['section.wrapper-problem-response'] + include=['.wrapper-problem-response'] ) # Run the accessibility audit. @@ -600,3 +787,63 @@ class ProblemMathExpressionInputA11yTest(CAPAProblemA11yBaseTestMixin, ProblemsT """) return XBlockFixtureDesc('problem', 'MATHEXPRESSIONINPUT PROBLEM', data=xml) + + +class ProblemMetaGradedTest(ProblemsTest): + """ + TestCase Class to verify that the graded variable is passed + """ + def get_problem(self): + """ + Problem structure + """ + xml = dedent(""" + + + + + Brazil timely feedback -- explain why an almost correct answer is wrong + Germany + Indonesia + Russia + + + + """) + return XBlockFixtureDesc('problem', 'TEST PROBLEM', data=xml, grader_type='Final Exam') + + def test_grader_type_displayed(self): + self.courseware_page.visit() + problem_page = ProblemPage(self.browser) + self.assertEqual(problem_page.problem_name, 'TEST PROBLEM') + self.assertEqual(problem_page.problem_progress_graded_value, "1 point possible (graded)") + + +class ProblemMetaUngradedTest(ProblemsTest): + """ + TestCase Class to verify that the ungraded variable is passed + """ + def get_problem(self): + """ + Problem structure + """ + xml = dedent(""" + + + + + Brazil timely feedback -- explain why an almost correct answer is wrong + Germany + Indonesia + Russia + + + + """) + return XBlockFixtureDesc('problem', 'TEST PROBLEM', data=xml) + + def test_grader_type_displayed(self): + self.courseware_page.visit() + problem_page = ProblemPage(self.browser) + self.assertEqual(problem_page.problem_name, 'TEST PROBLEM') + self.assertEqual(problem_page.problem_progress_graded_value, "1 point possible (ungraded)") diff --git a/common/test/acceptance/tests/lms/test_lms_user_preview.py b/common/test/acceptance/tests/lms/test_lms_user_preview.py index 83bc672ce7..529c123602 100644 --- a/common/test/acceptance/tests/lms/test_lms_user_preview.py +++ b/common/test/acceptance/tests/lms/test_lms_user_preview.py @@ -234,7 +234,6 @@ class StaffDebugTest(CourseWithoutContentGroupsTest): 'for user {}'.format(self.USERNAME), msg) -@attr(shard=3) class CourseWithContentGroupsTest(StaffViewTest): """ Verifies that changing the "View this course as" selector works properly for content groups. @@ -265,8 +264,8 @@ class CourseWithContentGroupsTest(StaffViewTest): """ problem_data = dedent(""" -

    Choose Yes.

    + Yes @@ -294,6 +293,7 @@ class CourseWithContentGroupsTest(StaffViewTest): ) ) + @attr(shard=3) def test_staff_sees_all_problems(self): """ Scenario: Staff see all problems @@ -305,6 +305,7 @@ class CourseWithContentGroupsTest(StaffViewTest): course_page = self._goto_staff_page() verify_expected_problem_visibility(self, course_page, [self.alpha_text, self.beta_text, self.everyone_text]) + @attr(shard=3) def test_student_not_in_content_group(self): """ Scenario: When previewing as a student, only content visible to all is shown @@ -318,6 +319,7 @@ class CourseWithContentGroupsTest(StaffViewTest): course_page.set_staff_view_mode('Student') verify_expected_problem_visibility(self, course_page, [self.everyone_text]) + @attr(shard=3) def test_as_student_in_alpha(self): """ Scenario: When previewing as a student in group alpha, only content visible to alpha is shown @@ -331,6 +333,7 @@ class CourseWithContentGroupsTest(StaffViewTest): course_page.set_staff_view_mode('Student in alpha') verify_expected_problem_visibility(self, course_page, [self.alpha_text, self.everyone_text]) + @attr(shard=3) def test_as_student_in_beta(self): """ Scenario: When previewing as a student in group beta, only content visible to beta is shown @@ -366,6 +369,7 @@ class CourseWithContentGroupsTest(StaffViewTest): add_cohort_with_student("Cohort Beta", "beta", student_b_username) cohort_management_page.wait_for_ajax() + @attr(shard=3) def test_as_specific_student(self): student_a_username = 'tass_student_a' student_b_username = 'tass_student_b' diff --git a/common/test/acceptance/tests/lms/test_problem_types.py b/common/test/acceptance/tests/lms/test_problem_types.py index 35c5b915a8..7e337d3ae1 100644 --- a/common/test/acceptance/tests/lms/test_problem_types.py +++ b/common/test/acceptance/tests/lms/test_problem_types.py @@ -108,7 +108,7 @@ class ProblemTypeTestBase(ProblemsTest, EventsTestMixin): 'problem', self.problem_name, data=self.factory.build_xml(**self.factory_kwargs), - metadata={'rerandomize': 'always'} + metadata={'rerandomize': 'always', 'show_reset_button': True} ) def wait_for_status(self, status): @@ -123,7 +123,7 @@ class ProblemTypeTestBase(ProblemsTest, EventsTestMixin): self.problem_page.wait_for_element_visibility(selector, msg) @abstractmethod - def answer_problem(self, correct): + def answer_problem(self, correctness): """ Args: `correct` (bool): Inputs correct answer if True, else inputs @@ -137,6 +137,7 @@ class ProblemTypeTestMixin(object): Test cases shared amongst problem types. """ can_submit_blank = False + can_update_save_notification = True @attr(shard=7) def test_answer_correctly(self): @@ -147,16 +148,25 @@ class ProblemTypeTestMixin(object): When I answer a "" problem "correctly" Then my "" answer is marked "correct" And The "" problem displays a "correct" answer + And a success notification is shown + And clicking on "Review" moves focus to the problem meta area And a "problem_check" server event is emitted And a "problem_check" browser event is emitted """ # Make sure we're looking at the right problem - self.assertEqual(self.problem_page.problem_name, self.problem_name) + self.problem_page.wait_for( + lambda: self.problem_page.problem_name == self.problem_name, + "Make sure the correct problem is on the page" + ) # Answer the problem correctly - self.answer_problem(correct=True) - self.problem_page.click_check() + self.answer_problem(correctness='correct') + self.problem_page.click_submit() self.wait_for_status('correct') + self.problem_page.wait_success_notification() + # Check that clicking on "Review" goes to the problem meta location + self.problem_page.click_review_in_notification() + self.assertTrue(self.problem_page.is_focus_on_problem_meta()) # Check for corresponding tracking event expected_events = [ @@ -190,16 +200,17 @@ class ProblemTypeTestMixin(object): ) # Answer the problem incorrectly - self.answer_problem(correct=False) - self.problem_page.click_check() + self.answer_problem(correctness='incorrect') + self.problem_page.click_submit() self.wait_for_status('incorrect') + self.problem_page.wait_incorrect_notification() @attr(shard=7) def test_submit_blank_answer(self): """ Scenario: I can submit a blank answer Given I am viewing a "" problem - When I check a problem + When I submit a problem Then my "" answer is marked "incorrect" And The "" problem displays a "blank" answer """ @@ -210,9 +221,10 @@ class ProblemTypeTestMixin(object): lambda: self.problem_page.problem_name == self.problem_name, "Make sure the correct problem is on the page" ) - # Leave the problem unchanged and click check. - self.assertNotIn('is-disabled', self.problem_page.q(css='div.problem button.check').attrs('class')[0]) - self.problem_page.click_check() + # Leave the problem unchanged and assure submit is disabled. + self.wait_for_status('unanswered') + self.assertFalse(self.problem_page.is_submit_disabled()) + self.problem_page.click_submit() self.wait_for_status('incorrect') @attr(shard=7) @@ -220,7 +232,7 @@ class ProblemTypeTestMixin(object): """ Scenario: I can't submit a blank answer When I try to submit blank answer - Then I can't check a problem + Then I can't submit a problem """ if self.can_submit_blank: raise SkipTest("Test incompatible with the current problem type") @@ -229,7 +241,121 @@ class ProblemTypeTestMixin(object): lambda: self.problem_page.problem_name == self.problem_name, "Make sure the correct problem is on the page" ) - self.assertIn('is-disabled', self.problem_page.q(css='div.problem button.check').attrs('class')[0]) + self.assertTrue(self.problem_page.is_submit_disabled()) + + @attr(shard=7) + def test_can_show_answer(self): + """ + Scenario: Verifies that show answer button is working as expected. + + Given that I am on courseware page + And I can see a CAPA problem with show answer button + When I click "Show Answer" button + And I should see question's solution + And I should see the problem title is focused + """ + self.problem_page.click_show() + self.assertTrue(self.problem_page.is_focus_on_problem_meta()) + + @attr(shard=7) + def test_save_reaction(self): + """ + Scenario: Verify that the save button performs as expected with problem types + + Given that I am on a problem page + And I can see a CAPA problem with the Save button present + When I select and answer and click the "Save" button + Then I should see the Save notification + And the Save button should not be disabled + And clicking on "Review" moves focus to the problem meta area + And if I change the answer selected + Then the Save notification should be removed + """ + self.problem_page.wait_for( + lambda: self.problem_page.problem_name == self.problem_name, + "Make sure the correct problem is on the page" + ) + self.problem_page.wait_for_page() + self.answer_problem(correctness='correct') + self.assertTrue(self.problem_page.is_save_button_enabled()) + self.problem_page.click_save() + # Ensure "Save" button is enabled after save is complete. + self.assertTrue(self.problem_page.is_save_button_enabled()) + self.problem_page.wait_for_save_notification() + # Check that clicking on "Review" goes to the problem meta location + self.problem_page.click_review_in_notification() + self.assertTrue(self.problem_page.is_focus_on_problem_meta()) + + # Not all problems will detect the change and remove the save notification + if self.can_update_save_notification: + self.answer_problem(correctness='incorrect') + self.assertFalse(self.problem_page.is_save_notification_visible()) + + @attr(shard=7) + def test_reset_clears_answer_and_focus(self): + """ + Scenario: Reset will clear answers and focus on problem meta + If I select an answer + and then reset the problem + There should be no answer selected + And the focus should shift appropriately + """ + self.problem_page.wait_for( + lambda: self.problem_page.problem_name == self.problem_name, + "Make sure the correct problem is on the page" + ) + self.wait_for_status('unanswered') + # Set an answer + self.answer_problem(correctness='correct') + self.problem_page.click_submit() + self.wait_for_status('correct') + # clear the answers + self.problem_page.click_reset() + # Focus should change to meta + self.assertTrue(self.problem_page.is_focus_on_problem_meta()) + # Answer should be reset + self.wait_for_status('unanswered') + + @attr(shard=7) + def test_reset_shows_errors(self): + """ + Scenario: Reset will show server errors + If I reset a problem without first answering it + Then a "gentle notification" is shown + And the focus moves to the "gentle notification" + """ + self.problem_page.wait_for( + lambda: self.problem_page.problem_name == self.problem_name, + "Make sure the correct problem is on the page" + ) + self.wait_for_status('unanswered') + self.assertFalse(self.problem_page.is_gentle_alert_notification_visible()) + # Click reset without first answering the problem (possible because show_reset_button is set to True) + self.problem_page.click_reset() + self.problem_page.wait_for_gentle_alert_notification() + + @attr(shard=7) + def test_partially_complete_notifications(self): + """ + Scenario: If a partially correct problem is submitted the correct notification is shown + If I submit an answer that is partially correct + Then the partially correct notification should be shown + """ + + # Not all problems have partially correct solutions configured + if not self.partially_correct: + raise SkipTest("Test incompatible with the current problem type") + + self.problem_page.wait_for( + lambda: self.problem_page.problem_name == self.problem_name, + "Make sure the correct problem is on the page" + ) + + self.wait_for_status('unanswered') + # Set an answer + self.answer_problem(correctness='partially-correct') + self.problem_page.click_submit() + self.problem_page.wait_partial_notification() @attr('a11y') def test_problem_type_a11y(self): @@ -245,18 +371,6 @@ class ProblemTypeTestMixin(object): self.problem_page.a11y_audit.config.set_scope( include=['div#seq_content']) - self.problem_page.a11y_audit.config.set_rules({ - "ignore": [ - 'aria-allowed-attr', # TODO: AC-491 - 'aria-valid-attr', # TODO: AC-491 - 'aria-roles', # TODO: AC-491 - 'checkboxgroup', # TODO: AC-491 - 'radiogroup', # TODO: AC-491 - 'section', # TODO: AC-491 - 'label', # TODO: AC-491 - ] - }) - # Run the accessibility audit. self.problem_page.a11y_audit.check_for_accessibility_errors() @@ -269,9 +383,10 @@ class AnnotationProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin): problem_type = 'annotationresponse' factory = AnnotationResponseXMLFactory() + partially_correct = True can_submit_blank = True - + can_update_save_notification = False factory_kwargs = { 'title': 'Annotation Problem', 'text': 'The text being annotated', @@ -298,11 +413,22 @@ class AnnotationProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin): """ super(AnnotationProblemTypeTest, self).setUp(*args, **kwargs) - def answer_problem(self, correct): + self.problem_page.a11y_audit.config.set_rules({ + "ignore": [ + 'label', # TODO: AC-491 + ] + }) + + def answer_problem(self, correctness): """ Answer annotation problem. """ - choice = 0 if correct else 1 + if correctness == 'correct': + choice = 0 + elif correctness == 'partially-correct': + choice = 2 + else: + choice = 1 answer = 'Student comment' self.problem_page.q(css='div.problem textarea.comment').fill(answer) @@ -317,12 +443,14 @@ class CheckboxProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin): """ problem_name = 'CHECKBOX TEST PROBLEM' problem_type = 'checkbox' + partially_correct = True factory = ChoiceResponseXMLFactory() factory_kwargs = { - 'question_text': 'The correct answer is Choice 0 and Choice 2', + 'question_text': 'The correct answer is Choice 0 and Choice 2, Choice 1 and Choice 3 together are incorrect.', 'choice_type': 'checkbox', + 'credit_type': 'edc', 'choices': [True, False, True, False], 'choice_names': ['Choice 0', 'Choice 1', 'Choice 2', 'Choice 3'], 'explanation_text': 'This is explanation text' @@ -334,39 +462,34 @@ class CheckboxProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin): """ super(CheckboxProblemTypeTest, self).setUp(*args, **kwargs) - def answer_problem(self, correct): + def answer_problem(self, correctness): """ Answer checkbox problem. """ - if correct: + if correctness == 'correct': self.problem_page.click_choice("choice_0") self.problem_page.click_choice("choice_2") + elif correctness == 'partially-correct': + self.problem_page.click_choice("choice_2") else: self.problem_page.click_choice("choice_1") + self.problem_page.click_choice("choice_3") - @attr('shard_7') - def test_can_show_hide_answer(self): + @attr(shard=7) + def test_can_show_answer(self): """ - Scenario: Verifies that show/hide answer button is working as expected. + Scenario: Verifies that show answer button is working as expected. Given that I am on courseware page And I can see a CAPA problem with show answer button When I click "Show Answer" button - Then I should see "Hide Answer" text on button And I should see question's solution And I should see correct choices highlighted - When I click "Hide Answer" button - Then I should see "Show Answer" text on button - And I should not see question's solution - And I should not see correct choices highlighted """ - self.problem_page.click_show_hide_button() + self.problem_page.click_show() self.assertTrue(self.problem_page.is_solution_tag_present()) self.assertTrue(self.problem_page.is_correct_choice_highlighted(correct_choices=[1, 3])) - - self.problem_page.click_show_hide_button() - self.assertFalse(self.problem_page.is_solution_tag_present()) - self.assertFalse(self.problem_page.is_correct_choice_highlighted(correct_choices=[1, 3])) + self.assertTrue(self.problem_page.is_focus_on_problem_meta()) class MultipleChoiceProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin): @@ -378,6 +501,8 @@ class MultipleChoiceProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin): factory = MultipleChoiceResponseXMLFactory() + partially_correct = False + factory_kwargs = { 'question_text': 'The correct answer is Choice 2', 'choices': [False, False, True, False], @@ -395,14 +520,14 @@ class MultipleChoiceProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin): """ super(MultipleChoiceProblemTypeTest, self).setUp(*args, **kwargs) - def answer_problem(self, correct): + def answer_problem(self, correctness): """ Answer multiple choice problem. """ - if correct: - self.problem_page.click_choice("choice_choice_2") - else: + if correctness == 'incorrect': self.problem_page.click_choice("choice_choice_1") + else: + self.problem_page.click_choice("choice_choice_2") class RadioProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin): @@ -412,6 +537,8 @@ class RadioProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin): problem_name = 'RADIO TEST PROBLEM' problem_type = 'radio' + partially_correct = False + factory = ChoiceResponseXMLFactory() factory_kwargs = { @@ -432,11 +559,11 @@ class RadioProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin): """ super(RadioProblemTypeTest, self).setUp(*args, **kwargs) - def answer_problem(self, correct): + def answer_problem(self, correctness): """ Answer radio problem. """ - if correct: + if correctness == 'correct': self.problem_page.click_choice("choice_2") else: self.problem_page.click_choice("choice_1") @@ -449,6 +576,8 @@ class DropDownProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin): problem_name = 'DROP DOWN TEST PROBLEM' problem_type = 'drop down' + partially_correct = False + factory = OptionResponseXMLFactory() factory_kwargs = { @@ -463,11 +592,11 @@ class DropDownProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin): """ super(DropDownProblemTypeTest, self).setUp(*args, **kwargs) - def answer_problem(self, correct): + def answer_problem(self, correctness): """ Answer drop down problem. """ - answer = 'Option 2' if correct else 'Option 3' + answer = 'Option 2' if correctness == 'correct' else 'Option 3' selector_element = self.problem_page.q( css='.problem .option-input select') select_option_by_text(selector_element, answer) @@ -480,6 +609,8 @@ class StringProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin): problem_name = 'STRING TEST PROBLEM' problem_type = 'string' + partially_correct = False + factory = StringResponseXMLFactory() factory_kwargs = { @@ -500,11 +631,11 @@ class StringProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin): """ super(StringProblemTypeTest, self).setUp(*args, **kwargs) - def answer_problem(self, correct): + def answer_problem(self, correctness): """ Answer string problem. """ - textvalue = 'correct string' if correct else 'incorrect string' + textvalue = 'correct string' if correctness == 'correct' else 'incorrect string' self.problem_page.fill_answer(textvalue) @@ -514,6 +645,7 @@ class NumericalProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin): """ problem_name = 'NUMERICAL TEST PROBLEM' problem_type = 'numerical' + partially_correct = False factory = NumericalResponseXMLFactory() @@ -536,13 +668,43 @@ class NumericalProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin): """ super(NumericalProblemTypeTest, self).setUp(*args, **kwargs) - def answer_problem(self, correct): + def answer_problem(self, correctness): """ Answer numerical problem. """ - textvalue = "pi + 1" if correct else str(random.randint(-2, 2)) + textvalue = '' + if correctness == 'correct': + textvalue = "pi + 1" + elif correctness == 'error': + textvalue = 'notNum' + else: + textvalue = str(random.randint(-2, 2)) self.problem_page.fill_answer(textvalue) + def test_error_input_gentle_alert(self): + """ + Scenario: I can answer a problem with erroneous input and will see a gentle alert + Given a Numerical Problem type + I can input a string answer + Then I will see a Gentle alert notification + And focus will shift to that notification + And clicking on "Review" moves focus to the problem meta area + """ + # Make sure we're looking at the right problem + self.problem_page.wait_for( + lambda: self.problem_page.problem_name == self.problem_name, + "Make sure the correct problem is on the page" + ) + + # Answer the problem with an erroneous input to cause a gentle alert + self.assertFalse(self.problem_page.is_gentle_alert_notification_visible()) + self.answer_problem(correctness='error') + self.problem_page.click_submit() + self.problem_page.wait_for_gentle_alert_notification() + # Check that clicking on "Review" goes to the problem meta location + self.problem_page.click_review_in_notification() + self.assertTrue(self.problem_page.is_focus_on_problem_meta()) + class FormulaProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin): """ @@ -550,6 +712,7 @@ class FormulaProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin): """ problem_name = 'FORMULA TEST PROBLEM' problem_type = 'formula' + partially_correct = False factory = FormulaResponseXMLFactory() @@ -574,11 +737,11 @@ class FormulaProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin): """ super(FormulaProblemTypeTest, self).setUp(*args, **kwargs) - def answer_problem(self, correct): + def answer_problem(self, correctness): """ Answer formula problem. """ - textvalue = "x^2+2*x+y" if correct else 'x^2' + textvalue = "x^2+2*x+y" if correctness == 'correct' else 'x^2' self.problem_page.fill_answer(textvalue) @@ -588,6 +751,7 @@ class ScriptProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin): """ problem_name = 'SCRIPT TEST PROBLEM' problem_type = 'script' + partially_correct = False factory = CustomResponseXMLFactory() @@ -595,7 +759,8 @@ class ScriptProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin): 'cfn': 'test_add_to_ten', 'expect': '10', 'num_inputs': 2, - 'group_label': 'Enter two integers that sum to 10.', + 'question_text': 'Enter two integers that sum to 10.', + 'input_element_label': 'Enter an integer', 'script': textwrap.dedent(""" def test_add_to_ten(expect,ans): try: @@ -619,7 +784,7 @@ class ScriptProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin): """ super(ScriptProblemTypeTest, self).setUp(*args, **kwargs) - def answer_problem(self, correct): + def answer_problem(self, correctness): """ Answer script problem. """ @@ -629,7 +794,7 @@ class ScriptProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin): # If we want an incorrect answer, then change # the second addend so they no longer sum to 10 - if not correct: + if not correctness == 'correct': second_addend += random.randint(1, 10) self.problem_page.fill_answer(first_addend, input_num=0) @@ -642,7 +807,8 @@ class CodeProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin): """ problem_name = 'CODE TEST PROBLEM' problem_type = 'code' - + partially_correct = False + can_update_save_notification = False factory = CodeResponseXMLFactory() factory_kwargs = { @@ -657,19 +823,7 @@ class CodeProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin): 'unanswered': ['.grader-status .unanswered ~ .debug'], } - def setUp(self, *args, **kwargs): - """ - Additional setup for CodeProblemTypeTest - """ - super(CodeProblemTypeTest, self).setUp(*args, **kwargs) - self.problem_page.a11y_audit.config.set_rules({ - 'ignore': [ - 'section', # TODO: AC-491 - 'label', # TODO: AC-286 - ] - }) - - def answer_problem(self, correct): + def answer_problem(self, correctness): """ Answer code problem. """ @@ -704,6 +858,13 @@ class CodeProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin): """ pass + def wait_for_status(self, status): + """ + Overridden for script test because the testing grader always responds + with "correct" + """ + pass + class ChoiceTextProbelmTypeTestBase(ProblemTypeTestBase): """ @@ -711,6 +872,8 @@ class ChoiceTextProbelmTypeTestBase(ProblemTypeTestBase): (e.g. RadioText, CheckboxText) """ choice_type = None + partially_correct = False + can_update_save_notification = False def _select_choice(self, input_num): """ @@ -729,12 +892,12 @@ class ChoiceTextProbelmTypeTestBase(ProblemTypeTestBase): css='div.problem input.ctinput[type="text"]' ).nth(input_num).fill(value) - def answer_problem(self, correct): + def answer_problem(self, correctness): """ Answer radio text problem. """ - choice = 0 if correct else 1 - input_value = "8" if correct else "5" + choice = 0 if correctness == 'correct' else 1 + input_value = "8" if correctness == 'correct' else "5" self._select_choice(choice) self._fill_input_text(input_value, choice) @@ -747,6 +910,8 @@ class RadioTextProblemTypeTest(ChoiceTextProbelmTypeTestBase, ProblemTypeTestMix problem_name = 'RADIO TEXT TEST PROBLEM' problem_type = 'radio_text' choice_type = 'radio' + partially_correct = False + can_update_save_notification = False factory = ChoiceTextResponseXMLFactory() @@ -771,6 +936,14 @@ class RadioTextProblemTypeTest(ChoiceTextProbelmTypeTestBase, ProblemTypeTestMix """ super(RadioTextProblemTypeTest, self).setUp(*args, **kwargs) + self.problem_page.a11y_audit.config.set_rules({ + "ignore": [ + 'radiogroup', # TODO: AC-491 + 'label', # TODO: AC-491 + 'section', # TODO: AC-491 + ] + }) + class CheckboxTextProblemTypeTest(ChoiceTextProbelmTypeTestBase, ProblemTypeTestMixin): """ @@ -779,8 +952,9 @@ class CheckboxTextProblemTypeTest(ChoiceTextProbelmTypeTestBase, ProblemTypeTest problem_name = 'CHECKBOX TEXT TEST PROBLEM' problem_type = 'checkbox_text' choice_type = 'checkbox' - factory = ChoiceTextResponseXMLFactory() + partially_correct = False + can_update_save_notification = False factory_kwargs = { 'question_text': 'The correct answer is Choice 0 and input 8', @@ -797,6 +971,14 @@ class CheckboxTextProblemTypeTest(ChoiceTextProbelmTypeTestBase, ProblemTypeTest """ super(CheckboxTextProblemTypeTest, self).setUp(*args, **kwargs) + self.problem_page.a11y_audit.config.set_rules({ + "ignore": [ + 'checkboxgroup', # TODO: AC-491 + 'label', # TODO: AC-491 + 'section', # TODO: AC-491 + ] + }) + class ImageProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin): """ @@ -804,21 +986,23 @@ class ImageProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin): """ problem_name = 'IMAGE TEST PROBLEM' problem_type = 'image' + partially_correct = False factory = ImageResponseXMLFactory() can_submit_blank = True + can_update_save_notification = False factory_kwargs = { 'src': '/static/images/placeholder-image.png', 'rectangle': '(0,0)-(50,50)', } - def answer_problem(self, correct): + def answer_problem(self, correctness): """ Answer image problem. """ - offset = 25 if correct else -25 + offset = 25 if correctness == 'correct' else -25 input_selector = ".imageinput [id^='imageinput_'] img" input_element = self.problem_page.q(css=input_selector)[0] @@ -835,11 +1019,13 @@ class SymbolicProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin): """ problem_name = 'SYMBOLIC TEST PROBLEM' problem_type = 'symbolicresponse' + partially_correct = False factory = SymbolicResponseXMLFactory() factory_kwargs = { 'expect': '2*x+3*y', + 'question_text': 'Enter a value' } status_indicators = { @@ -848,21 +1034,9 @@ class SymbolicProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin): 'unanswered': ['div.capa_inputtype div.unanswered'], } - def setUp(self, *args, **kwargs): - """ - Additional setup for SymbolicProblemTypeTest - """ - super(SymbolicProblemTypeTest, self).setUp(*args, **kwargs) - self.problem_page.a11y_audit.config.set_rules({ - 'ignore': [ - 'section', # TODO: AC-491 - 'label', # TODO: AC-294 - ] - }) - - def answer_problem(self, correct): + def answer_problem(self, correctness): """ Answer symbolic problem. """ - choice = "2*x+3*y" if correct else "3*a+4*b" + choice = "2*x+3*y" if correctness == 'correct' else "3*a+4*b" self.problem_page.fill_answer(choice) diff --git a/common/test/acceptance/tests/lms/test_progress_page.py b/common/test/acceptance/tests/lms/test_progress_page.py index 3553cc9e44..111181eaea 100644 --- a/common/test/acceptance/tests/lms/test_progress_page.py +++ b/common/test/acceptance/tests/lms/test_progress_page.py @@ -76,7 +76,7 @@ class ProgressPageBaseTest(UniqueCourseTest): """ self.courseware_page.go_to_sequential_position(1) self.problem_page.click_choice('choice_choice_2') - self.problem_page.click_check() + self.problem_page.click_submit() def _get_section_score(self): """ diff --git a/lms/djangoapps/courseware/features/problems.feature b/lms/djangoapps/courseware/features/problems.feature index a209133bbb..b23ce18bbb 100644 --- a/lms/djangoapps/courseware/features/problems.feature +++ b/lms/djangoapps/courseware/features/problems.feature @@ -85,37 +85,33 @@ Feature: LMS.Answer problems Scenario: I can answer a problem with multiple attempts correctly and still reset the problem Given I am viewing a "multiple choice" problem with "3" attempts - Then I should see "You have used 0 of 3 submissions" somewhere in the page + Then I should see "You have used 0 of 3 attempts" somewhere in the page When I answer a "multiple choice" problem "correctly" Then The "Reset" button does appear Scenario: I can answer a problem with multiple attempts correctly but cannot reset because randomization is off Given I am viewing a randomization "never" "multiple choice" problem with "3" attempts with reset - Then I should see "You have used 0 of 3 submissions" somewhere in the page + Then I should see "You have used 0 of 3 attempts" somewhere in the page When I answer a "multiple choice" problem "correctly" Then The "Reset" button does not appear Scenario: I can view how many attempts I have left on a problem Given I am viewing a "multiple choice" problem with "3" attempts - Then I should see "You have used 0 of 3 submissions" somewhere in the page + Then I should see "You have used 0 of 3 attempts" somewhere in the page When I answer a "multiple choice" problem "incorrectly" And I reset the problem - Then I should see "You have used 1 of 3 submissions" somewhere in the page + Then I should see "You have used 1 of 3 attempts" somewhere in the page When I answer a "multiple choice" problem "incorrectly" And I reset the problem - Then I should see "You have used 2 of 3 submissions" somewhere in the page - And The "Final Check" button does appear + Then I should see "You have used 2 of 3 attempts" somewhere in the page + And The "Submit" button does appear When I answer a "multiple choice" problem "correctly" Then The "Reset" button does not appear - Scenario: I can view and hide the answer if the problem has it: + Scenario: I can view the answer if the problem has it: Given I am viewing a "numerical" that shows the answer "always" - When I press the button with the label "SHOW ANSWER" - Then the Show/Hide button label is "HIDE ANSWER" + When I press the button with the label "Show Answer" And I should see "4.14159" somewhere in the page - When I press the button with the label "HIDE ANSWER" - Then the Show/Hide button label is "SHOW ANSWER" - And I should not see "4.14159" anywhere on the page Scenario: I can see my score on a problem when I answer it and after I reset it Given I am viewing a "" problem @@ -125,25 +121,23 @@ Feature: LMS.Answer problems Then I should see a score of "" Examples: - | ProblemType | Correctness | Score | Points Possible | - | drop down | correct | 1/1 point | 1 point possible | - | drop down | incorrect | 1 point possible | 1 point possible | - | multiple choice | correct | 1/1 point | 1 point possible | - | multiple choice | incorrect | 1 point possible | 1 point possible | - | checkbox | correct | 1/1 point | 1 point possible | - | checkbox | incorrect | 1 point possible | 1 point possible | - | radio | correct | 1/1 point | 1 point possible | - | radio | incorrect | 1 point possible | 1 point possible | - #| string | correct | 1/1 point | 1 point possible | - #| string | incorrect | 1 point possible | 1 point possible | - | numerical | correct | 1/1 point | 1 point possible | - | numerical | incorrect | 1 point possible | 1 point possible | - | formula | correct | 1/1 point | 1 point possible | - | formula | incorrect | 1 point possible | 1 point possible | - | script | correct | 2/2 points | 2 points possible | - | script | incorrect | 2 points possible | 2 points possible | - | image | correct | 1/1 point | 1 point possible | - | image | incorrect | 1 point possible | 1 point possible | + | ProblemType | Correctness | Score | Points Possible | + | drop down | correct | 1/1 point (ungraded) | 1 point possible (ungraded) | + | drop down | incorrect | 1 point possible (ungraded) | 1 point possible (ungraded) | + | multiple choice | correct | 1/1 point (ungraded) | 1 point possible (ungraded) | + | multiple choice | incorrect | 1 point possible (ungraded) | 1 point possible (ungraded) | + | checkbox | correct | 1/1 point (ungraded) | 1 point possible (ungraded) | + | checkbox | incorrect | 1 point possible (ungraded) | 1 point possible (ungraded) | + | radio | correct | 1/1 point (ungraded) | 1 point possible (ungraded) | + | radio | incorrect | 1 point possible (ungraded) | 1 point possible (ungraded) | + | numerical | correct | 1/1 point (ungraded) | 1 point possible (ungraded) | + | numerical | incorrect | 1 point possible (ungraded) | 1 point possible (ungraded) | + | formula | correct | 1/1 point (ungraded) | 1 point possible (ungraded) | + | formula | incorrect | 1 point possible (ungraded) | 1 point possible (ungraded) | + | script | correct | 2/2 points (ungraded) | 2 points possible (ungraded) | + | script | incorrect | 2 points possible (ungraded) | 2 points possible (ungraded) | + | image | correct | 1/1 point (ungraded) | 1 point possible (ungraded) | + | image | incorrect | 1 point possible (ungraded) | 1 point possible (ungraded) | Scenario: I can see my score on a problem when I answer it and after I reset it Given I am viewing a "" problem with randomization "" with reset button on @@ -153,49 +147,32 @@ Feature: LMS.Answer problems Then I should see a score of "" Examples: - | ProblemType | Correctness | Score | Points Possible | Randomization | - | drop down | correct | 1/1 point | 1 point possible | never | - | drop down | incorrect | 1 point possible | 1 point possible | never | - | multiple choice | correct | 1/1 point | 1 point possible | never | - | multiple choice | incorrect | 1 point possible | 1 point possible | never | - | checkbox | correct | 1/1 point | 1 point possible | never | - | checkbox | incorrect | 1 point possible | 1 point possible | never | - | radio | correct | 1/1 point | 1 point possible | never | - | radio | incorrect | 1 point possible | 1 point possible | never | - #| string | correct | 1/1 point | 1 point possible | never | - #| string | incorrect | 1 point possible | 1 point possible | never | - | numerical | correct | 1/1 point | 1 point possible | never | - | numerical | incorrect | 1 point possible | 1 point possible | never | - | formula | correct | 1/1 point | 1 point possible | never | - | formula | incorrect | 1 point possible | 1 point possible | never | - | script | correct | 2/2 points | 2 points possible | never | - | script | incorrect | 2 points possible | 2 points possible | never | - | image | correct | 1/1 point | 1 point possible | never | - | image | incorrect | 1 point possible | 1 point possible | never | + | ProblemType | Correctness | Score | Points Possible | Randomization | + | drop down | correct | 1/1 point (ungraded) | 1 point possible (ungraded) | never | + | drop down | incorrect | 1 point possible (ungraded) | 1 point possible (ungraded) | never | + | multiple choice | correct | 1/1 point (ungraded) | 1 point possible (ungraded) | never | + | multiple choice | incorrect | 1 point possible (ungraded) | 1 point possible (ungraded) | never | + | checkbox | correct | 1/1 point (ungraded) | 1 point possible (ungraded) | never | + | checkbox | incorrect | 1 point possible (ungraded) | 1 point possible (ungraded) | never | + | radio | correct | 1/1 point (ungraded) | 1 point possible (ungraded) | never | + | radio | incorrect | 1 point possible (ungraded) | 1 point possible (ungraded) | never | + | numerical | correct | 1/1 point (ungraded) | 1 point possible (ungraded) | never | + | numerical | incorrect | 1 point possible (ungraded) | 1 point possible (ungraded) | never | + | formula | correct | 1/1 point (ungraded) | 1 point possible (ungraded) | never | + | formula | incorrect | 1 point possible (ungraded) | 1 point possible (ungraded) | never | + | script | correct | 2/2 points (ungraded) | 2 points possible (ungraded) | never | + | script | incorrect | 2 points possible (ungraded) | 2 points possible (ungraded) | never | + | image | correct | 1/1 point (ungraded) | 1 point possible (ungraded) | never | + | image | incorrect | 1 point possible (ungraded) | 1 point possible (ungraded) | never | Scenario: I can see my score on a problem to which I submit a blank answer Given I am viewing a "" problem - When I check a problem + When I submit a problem Then I should see a score of "" Examples: - | ProblemType | Points Possible | - | image | 1 point possible | - - Scenario: I can't submit a blank answer - Given I am viewing a "" problem - Then I can't check a problem - - Examples: - | ProblemType | - | drop down | - | multiple choice | - | checkbox | - | radio | - | string | - | numerical | - | formula | - | script | + | ProblemType | Points Possible | + | image | 1 point possible (ungraded) | Scenario: I can reset the correctness of a problem after changing my answer Given I am viewing a "" problem diff --git a/lms/djangoapps/courseware/features/problems.py b/lms/djangoapps/courseware/features/problems.py index 559a90e0af..9346084972 100644 --- a/lms/djangoapps/courseware/features/problems.py +++ b/lms/djangoapps/courseware/features/problems.py @@ -74,7 +74,7 @@ def answer_problem_step(step, problem_type, correctness): input_problem_answer(step, problem_type, correctness) # Submit the problem - check_problem(step) + submit_problem(step) @step(u'I input an answer on a "([^"]*)" problem "([^"]*)ly"') @@ -87,26 +87,18 @@ def input_problem_answer(_, problem_type, correctness): answer_problem(world.scenario_dict['COURSE'].number, problem_type, correctness) -@step(u'I check a problem') -def check_problem(step): +@step(u'I submit a problem') +# pylint: disable=unused-argument +def submit_problem(step): # first scroll down so the loading mathjax button does not - # cover up the Check button + # cover up the Submit button world.browser.execute_script("window.scrollTo(0,1024)") - assert world.is_css_not_present("button.check.is-disabled") - world.css_click("button.check") + world.css_click("button.submit") # Wait for the problem to finish re-rendering world.wait_for_ajax_complete() -@step(u"I can't check a problem") -def assert_cant_check_problem(step): # pylint: disable=unused-argument - # first scroll down so the loading mathjax button does not - # cover up the Check button - world.browser.execute_script("window.scrollTo(0,1024)") - assert world.is_css_present("button.check.is-disabled") - - @step(u'The "([^"]*)" problem displays a "([^"]*)" answer') def assert_problem_has_answer(step, problem_type, answer_class): ''' @@ -147,21 +139,13 @@ def action_button_present(_step, buttonname, doesnt_appear): assert world.is_css_present(button_css) -@step(u'the Show/Hide button label is "([^"]*)"$') -def show_hide_label_is(_step, label_name): - # The label text is changed by static/xmodule_js/src/capa/display.js - # so give it some time to change on the page. - label_css = 'button.show span.show-label' - world.wait_for(lambda _: world.css_has_text(label_css, label_name)) - - @step(u'I should see a score of "([^"]*)"$') def see_score(_step, score): # The problem progress is changed by # cms/static/xmodule_js/src/capa/display.js # so give it some time to render on the page. score_css = 'div.problem-progress' - expected_text = '({})'.format(score) + expected_text = '{}'.format(score) world.wait_for(lambda _: world.css_has_text(score_css, expected_text)) diff --git a/lms/djangoapps/courseware/features/problems_setup.py b/lms/djangoapps/courseware/features/problems_setup.py index ffb0046d1e..bbfa911dce 100644 --- a/lms/djangoapps/courseware/features/problems_setup.py +++ b/lms/djangoapps/courseware/features/problems_setup.py @@ -295,27 +295,27 @@ def problem_has_answer(course, problem_type, answer_class): elif problem_type == "multiple choice": if answer_class == 'correct': - assert_checked(course, 'multiple choice', ['choice_2']) + assert_submitted(course, 'multiple choice', ['choice_2']) elif answer_class == 'incorrect': - assert_checked(course, 'multiple choice', ['choice_1']) + assert_submitted(course, 'multiple choice', ['choice_1']) else: - assert_checked(course, 'multiple choice', []) + assert_submitted(course, 'multiple choice', []) elif problem_type == "checkbox": if answer_class == 'correct': - assert_checked(course, 'checkbox', ['choice_0', 'choice_2']) + assert_submitted(course, 'checkbox', ['choice_0', 'choice_2']) elif answer_class == 'incorrect': - assert_checked(course, 'checkbox', ['choice_3']) + assert_submitted(course, 'checkbox', ['choice_3']) else: - assert_checked(course, 'checkbox', []) + assert_submitted(course, 'checkbox', []) elif problem_type == "radio": if answer_class == 'correct': - assert_checked(course, 'radio', ['choice_2']) + assert_submitted(course, 'radio', ['choice_2']) elif answer_class == 'incorrect': - assert_checked(course, 'radio', ['choice_1']) + assert_submitted(course, 'radio', ['choice_1']) else: - assert_checked(course, 'radio', []) + assert_submitted(course, 'radio', []) elif problem_type == 'string': if answer_class == 'blank': @@ -410,23 +410,23 @@ def inputfield(course, problem_type, choice=None, input_num=1): return sel -def assert_checked(course, problem_type, choices): +def assert_submitted(course, problem_type, choices): ''' Assert that choice names given in *choices* are the only - ones checked. + ones submitted. Works for both radio and checkbox problems ''' all_choices = ['choice_0', 'choice_1', 'choice_2', 'choice_3'] for this_choice in all_choices: - def check_problem(): + def submit_problem(): element = world.css_find(inputfield(course, problem_type, choice=this_choice)) if this_choice in choices: assert element.checked else: assert not element.checked - world.retry_on_exception(check_problem) + world.retry_on_exception(submit_problem) def assert_textfield(course, problem_type, expected_text, input_num=1): diff --git a/lms/static/sass/_build-lms-v1.scss b/lms/static/sass/_build-lms-v1.scss index 25dc84bd7e..190ce65c16 100644 --- a/lms/static/sass/_build-lms-v1.scss +++ b/lms/static/sass/_build-lms-v1.scss @@ -90,3 +90,6 @@ // overrides @import 'developer'; // used for any developer-created scss that needs further polish/refactoring @import 'shame'; // used for any bad-form/orphaned scss + +// CAPA Problem Feedback +@import 'edx-pattern-library-shims/buttons'; diff --git a/lms/static/sass/base/_headings.scss b/lms/static/sass/base/_headings.scss index 6dca5054e0..038462cca6 100644 --- a/lms/static/sass/base/_headings.scss +++ b/lms/static/sass/base/_headings.scss @@ -32,9 +32,8 @@ $headings-base-color: $gray-d2; %hd-2 { - margin-bottom: 1em; - font-size: 1.5em; - font-weight: $headings-font-weight-normal; + font-size: em(18); + font-weight: $headings-font-weight-bold; line-height: 1.4em; } @@ -118,7 +117,7 @@ $headings-base-color: $gray-d2; h3 { @extend %hd-2; - font-weight: $headings-font-weight-normal; + font-weight: $headings-font-weight-bold; // override external modules and xblocks that use inline CSS text-transform: initial; diff --git a/lms/static/sass/course/courseware/_courseware.scss b/lms/static/sass/course/courseware/_courseware.scss index 71251222b7..b213359a79 100644 --- a/lms/static/sass/course/courseware/_courseware.scss +++ b/lms/static/sass/course/courseware/_courseware.scss @@ -560,7 +560,7 @@ html.video-fullscreen { } } - section.xqa-modal, section.staff-modal, section.history-modal { + .xqa-modal, .staff-modal, .history-modal { width: 80%; height: 80%; left: left(20%); diff --git a/lms/static/sass/edx-pattern-library-shims b/lms/static/sass/edx-pattern-library-shims new file mode 120000 index 0000000000..eae51650c7 --- /dev/null +++ b/lms/static/sass/edx-pattern-library-shims @@ -0,0 +1 @@ +../../../common/static/sass/edx-pattern-library-shims \ No newline at end of file diff --git a/lms/static/sass/elements/_system-feedback.scss b/lms/static/sass/elements/_system-feedback.scss index 9f934f7660..c915be77e9 100644 --- a/lms/static/sass/elements/_system-feedback.scss +++ b/lms/static/sass/elements/_system-feedback.scss @@ -547,7 +547,6 @@ margin: 0 auto; width: flex-grid(12); max-width: $fg-max-width; - min-width: $fg-min-width; strong { @extend %t-strong; diff --git a/lms/static/sass/partials/base/_variables.scss b/lms/static/sass/partials/base/_variables.scss index d8da190786..5b786e0a43 100644 --- a/lms/static/sass/partials/base/_variables.scss +++ b/lms/static/sass/partials/base/_variables.scss @@ -211,14 +211,13 @@ $shadow-d1: rgba(0,0,0,0.4) !default; $shadow-d2: rgba($black, 0.6) !default; // system feedback-based colors -$error-color: rgb(253, 87, 87) !default; -$warning-color: rgb(181,42,103) !default; +$error-color: rgb(203, 7, 18) !default; +$warning-color: rgb(255, 192, 31) !default; $confirm-color: rgb(0, 132, 1) !default; $active-color: $blue !default; $highlight-color: rgb(255,255,0) !default; $alert-color: rgb(212, 64, 64) !default; -$warning-color: rgb(237, 189, 60) !default; -$success-color: rgb(37, 184, 90) !default; +$success-color: rgb(0, 155, 0) !default; // ---------------------------- diff --git a/lms/templates/courseware/proctored-exam-status.underscore b/lms/templates/courseware/proctored-exam-status.underscore index e775dfd5f0..68fd212247 100644 --- a/lms/templates/courseware/proctored-exam-status.underscore +++ b/lms/templates/courseware/proctored-exam-status.underscore @@ -8,7 +8,7 @@ %>
    <%= interpolate_text('You are taking "{exam_link}" as a {exam_type} exam. The timer on the right shows the time remaining in the exam.', {exam_link: ""+gtLtEscape(exam_display_name)+"", exam_type: (!_.isUndefined(arguments[0].exam_type)) ? exam_type : gettext('timed')}) %> - <%- gettext('To receive credit on a problem, you must click "Check" or "Final Check" on it before you select "End My Exam".') %> + <%- gettext('To receive credit for problems, you must select "Submit" for each problem before you select "End My Exam".') %>
    diff --git a/lms/templates/homework.html b/lms/templates/homework.html deleted file mode 100644 index 058f513280..0000000000 --- a/lms/templates/homework.html +++ /dev/null @@ -1,17 +0,0 @@ -

    ${ homework['name']} Test

    - -
      -% for problem in homework['problems']: -
    1. -

      ${ problem['name'] }

      - - ${ problem['html'] } - -
      - - -
      -
    2. -% endfor -
    - diff --git a/lms/templates/problem.html b/lms/templates/problem.html index 1b912ad821..67c6f791f6 100644 --- a/lms/templates/problem.html +++ b/lms/templates/problem.html @@ -5,38 +5,97 @@ from openedx.core.djangolib.markup import HTML %> <%namespace name='static' file='static_content.html'/> -

    +

    ${ problem['name'] }

    -
    +
    ${ HTML(problem['html']) }
    % if demand_hint_possible: -
    - % endif - % if check_button: - +
    + <%include file="problem_notifications.html" args=" + notification_name='hint', + notification_type='problem-hint', + notification_icon='fa-question', + notification_message=''" + /> +
    % endif +
    % if demand_hint_possible: - - % endif - % if reset_button: - + + + % endif % if save_button: - + + + + % endif + % if reset_button: + + + % endif % if answer_available: - + + + % endif - % if attempts_allowed : -
    - ${_("You have used {num_used} of {num_total} submissions").format(num_used=attempts_used, num_total=attempts_allowed)}
    + +
    + % if attempts_allowed: + ${_("You have used {num_used} of {num_total} attempts").format(num_used=attempts_used, num_total=attempts_allowed)} % endif +
    + <%include file="problem_notifications.html" args=" + notification_type='warning', + notification_icon='fa-exclamation-circle', + notification_name='gentle-alert', + notification_message=''" + /> + % if answer_notification_type: + % if 'correct' == answer_notification_type: + <%include file="problem_notifications.html" args=" + notification_type='success', + notification_icon='fa-check', + notification_name='submit', + notification_message=answer_notification_message" + /> + % endif + % if 'incorrect' == answer_notification_type: + <%include file="problem_notifications.html" args=" + notification_type='error', + notification_icon='fa-close', + notification_name='submit', + notification_message=answer_notification_message" + /> + % endif + % if 'partially-correct' == answer_notification_type: + <%include file="problem_notifications.html" args=" + notification_type='success', + notification_icon='fa-asterisk', + notification_name='submit', + notification_message=answer_notification_message" + /> + % endif + % endif + <%include file="problem_notifications.html" args=" + notification_type='warning', + notification_icon='fa-save', + notification_name='save', + notification_message=''" + />
    diff --git a/lms/templates/problem_ajax.html b/lms/templates/problem_ajax.html index 664b7567c0..ae55c3e3a6 100644 --- a/lms/templates/problem_ajax.html +++ b/lms/templates/problem_ajax.html @@ -1 +1 @@ -
    +
    diff --git a/lms/templates/problem_notifications.html b/lms/templates/problem_notifications.html new file mode 100644 index 0000000000..a4bbf35fa9 --- /dev/null +++ b/lms/templates/problem_notifications.html @@ -0,0 +1,19 @@ +<%page expression_filter="h" args="notification_name, notification_type, notification_icon, + notification_message, should_enable_next_hint"/> +<%! from django.utils.translation import ugettext as _ %> + +
    + + ${notification_message} + +
    + % if notification_name is 'hint': + + % endif + +
    +
    diff --git a/lms/templates/staff_problem_info.html b/lms/templates/staff_problem_info.html index 82d4cb1b0c..589c39c68b 100644 --- a/lms/templates/staff_problem_info.html +++ b/lms/templates/staff_problem_info.html @@ -31,7 +31,7 @@ ${block_content}
    % endif - +
    - + - +
    diff --git a/requirements/edx/github.txt b/requirements/edx/github.txt index 5156ff9001..0c2394fd31 100644 --- a/requirements/edx/github.txt +++ b/requirements/edx/github.txt @@ -53,7 +53,7 @@ git+https://github.com/edx/MongoDBProxy.git@25b99097615bda06bd7cdfe5669ed80dc2a7 git+https://github.com/edx/nltk.git@2.0.6#egg=nltk==2.0.6 -e git+https://github.com/dementrock/pystache_custom.git@776973740bdaad83a3b029f96e415a7d1e8bec2f#egg=pystache_custom-dev -e git+https://github.com/appliedsec/pygeoip.git@95e69341cebf5a6a9fbf7c4f5439d458898bdc3b#egg=pygeoip --e git+https://github.com/jazkarta/edx-jsme.git@c5bfa5d361d6685d8c643838fc0055c25f8b7999#egg=edx-jsme +-e git+https://github.com/jazkarta/edx-jsme.git@0908b4db16168382be5685e7e9b7b4747ac410e0#egg=edx-jsme git+https://github.com/edx/django-pyfs.git@1.0.3#egg=django-pyfs==1.0.3 git+https://github.com/mitodl/django-cas.git@v2.1.1#egg=django-cas -e git+https://github.com/dgrtwo/ParsePy.git@7949b9f754d1445eff8e8f20d0e967b9a6420639#egg=parse_rest