Merge pull request #13397 from edx/ammar/poc

Making CAPA problems accessible TNL-4971
This commit is contained in:
Muzaffar yousaf
2016-09-03 01:06:48 +05:00
committed by GitHub
69 changed files with 3366 additions and 2137 deletions

View File

@@ -35,8 +35,8 @@ class TemplateTests(ModuleStoreTestCase):
self.assertIsNotNone(dropdown)
self.assertIn('markdown', dropdown['metadata'])
self.assertIn('data', dropdown)
self.assertRegexpMatches(dropdown['metadata']['markdown'], r'^Dropdown.*')
self.assertRegexpMatches(dropdown['data'], r'<problem>\s*<p>Dropdown.*')
self.assertRegexpMatches(dropdown['metadata']['markdown'], r'.*dropdown problems.*')
self.assertRegexpMatches(dropdown['data'], r'<problem>\s*<optionresponse>\s*<p>.*dropdown problems.*')
def test_get_some_templates(self):
self.assertEqual(len(SequenceDescriptor.templates()), 0)

View File

@@ -28,6 +28,7 @@
'mustache': 'js/vendor/mustache',
'codemirror': 'js/vendor/codemirror-compressed',
'codemirror/stex': 'js/vendor/CodeMirror/stex',
'pretty-print': 'js/lib/pretty-print',
'jquery': 'common/js/vendor/jquery',
'jquery-migrate': 'common/js/vendor/jquery-migrate',
'jquery.ui': 'js/vendor/jquery-ui.min',

View File

@@ -6,9 +6,8 @@
## and attach them to the global context manually.
define(["jquery", "underscore", "codemirror", "tinymce",
"jquery.tinymce", "jquery.qtip", "jquery.scrollTo", "jquery.flot",
"jquery.cookie",
"utility"],
function($, _, CodeMirror, tinymce) {
"jquery.cookie", "pretty-print", "utility"],
function($, _, CodeMirror) {
window.$ = $;
window._ = _;
require(['mathjax']);

View File

@@ -13,6 +13,7 @@ Main module which shows problems (of "capa" type).
This is used by capa_module.
"""
from collections import OrderedDict
from copy import deepcopy
from datetime import datetime
import logging
@@ -35,6 +36,16 @@ from capa.safe_exec import safe_exec
# extra things displayed after "show answers" is pressed
solution_tags = ['solution']
# fully accessible capa input types
ACCESSIBLE_CAPA_INPUT_TYPES = [
'checkboxgroup',
'radiogroup',
'choicegroup',
'optioninput',
'textline',
'formulaequationinput',
]
# these get captured as student responses
response_properties = ["codeparam", "responseparam", "answer", "openendedparam"]
@@ -176,7 +187,7 @@ class LoncapaProblem(object):
# transformations. This also creates the dict (self.responders) of Response
# instances for each question in the problem. The dict has keys = xml subtree of
# Response, values = Response instance
self._preprocess_problem(self.tree)
self.problem_data = self._preprocess_problem(self.tree)
if not self.student_answers: # True when student_answers is an empty dict
self.set_initial_display()
@@ -752,7 +763,9 @@ class LoncapaProblem(object):
if problemtree.tag in inputtypes.registry.registered_tags():
# If this is an inputtype subtree, let it render itself.
status = "unsubmitted"
response_data = self.problem_data[problemid]
status = 'unsubmitted'
msg = ''
hint = ''
hintmode = None
@@ -766,7 +779,7 @@ class LoncapaProblem(object):
hintmode = self.correct_map.get_hintmode(pid)
answervariable = self.correct_map.get_property(pid, 'answervariable')
value = ""
value = ''
if self.student_answers and problemid in self.student_answers:
value = self.student_answers[problemid]
@@ -780,6 +793,7 @@ class LoncapaProblem(object):
'id': input_id,
'input_state': self.input_state[input_id],
'answervariable': answervariable,
'response_data': response_data,
'feedback': {
'message': msg,
'hint': hint,
@@ -836,27 +850,30 @@ class LoncapaProblem(object):
Obtain all responder answers and save as self.responder_answers dict (key = response)
"""
response_id = 1
problem_data = {}
self.responders = {}
for response in tree.xpath('//' + "|//".join(responsetypes.registry.registered_tags())):
response_id_str = self.problem_id + "_" + str(response_id)
responsetype_id = self.problem_id + "_" + str(response_id)
# create and save ID for this response
response.set('id', response_id_str)
response.set('id', responsetype_id)
response_id += 1
answer_id = 1
input_tags = inputtypes.registry.registered_tags()
inputfields = tree.xpath(
"|".join(['//' + response.tag + '[@id=$id]//' + x for x in input_tags + solution_tags]),
id=response_id_str
"|".join(['//' + response.tag + '[@id=$id]//' + x for x in input_tags]),
id=responsetype_id
)
# assign one answer_id for each input type or solution type
# assign one answer_id for each input type
for entry in inputfields:
entry.attrib['response_id'] = str(response_id)
entry.attrib['answer_id'] = str(answer_id)
entry.attrib['id'] = "%s_%i_%i" % (self.problem_id, response_id, answer_id)
answer_id = answer_id + 1
self.response_a11y_data(response, inputfields, responsetype_id, problem_data)
# instantiate capa Response
responsetype_cls = responsetypes.registry.get_class_for_tag(response.tag)
responder = responsetype_cls(response, inputfields, self.context, self.capa_system, self.capa_module)
@@ -881,3 +898,75 @@ class LoncapaProblem(object):
for solution in tree.findall('.//solution'):
solution.attrib['id'] = "%s_solution_%i" % (self.problem_id, solution_id)
solution_id += 1
return problem_data
def response_a11y_data(self, response, inputfields, responsetype_id, problem_data):
"""
Construct data to be used for a11y.
Arguments:
response (object): xml response object
inputfields (list): list of inputfields in a responsetype
responsetype_id (str): responsetype id
problem_data (dict): dict to be filled with response data
"""
# if there are no inputtypes then don't do anything
if not inputfields:
return
element_to_be_deleted = None
label = ''
if len(inputfields) > 1:
response.set('multiple_inputtypes', 'true')
group_label_tag = response.find('label')
group_label_tag_text = ''
if group_label_tag is not None:
group_label_tag.tag = 'p'
group_label_tag.set('id', responsetype_id)
group_label_tag.set('class', 'multi-inputs-group-label')
group_label_tag_text = group_label_tag.text
for inputfield in inputfields:
problem_data[inputfield.get('id')] = {
'group_label': group_label_tag_text,
'label': inputfield.attrib.get('label', ''),
'descriptions': {}
}
else:
# Extract label value from <label> tag or label attribute from inside the responsetype
responsetype_label_tag = response.find('label')
if responsetype_label_tag is not None:
label = responsetype_label_tag.text
# store <label> tag containing question text to delete
# it later otherwise question will be rendered twice
element_to_be_deleted = responsetype_label_tag
elif 'label' not in inputfields[0].attrib:
# In this case the problems don't have tag or label attribute inside the responsetype
# so we will get the first preceding label tag w.r.t to this responsetype.
# This will take care of those multi-question problems that are not using --- in their markdown.
label_tag = response.xpath('preceding-sibling::label[1]')
if label_tag:
label = label_tag[0].text
element_to_be_deleted = label_tag[0]
# delete label or p element only if inputtype is fully accessible
if inputfields[0].tag in ACCESSIBLE_CAPA_INPUT_TYPES and element_to_be_deleted is not None:
element_to_be_deleted.getparent().remove(element_to_be_deleted)
# Extract descriptions and set unique id on each description tag
description_tags = response.findall('description')
description_id = 1
descriptions = OrderedDict()
for description in description_tags:
descriptions[
"description_%s_%i" % (responsetype_id, description_id)
] = description.text
response.remove(description)
description_id += 1
problem_data[inputfields[0].get('id')] = {
'label': label.strip() if label else '',
'descriptions': descriptions
}

View File

@@ -96,10 +96,13 @@ class Status(object):
'correct': _('This answer is correct.'),
'incorrect': _('This answer is incorrect.'),
'partially-correct': _('This answer is partially correct.'),
'unanswered': _('This answer is unanswered.'),
'unsubmitted': _('This answer is unanswered.'),
'queued': _('This answer is being processed.'),
}
tooltips.update(
dict.fromkeys(
['incomplete', 'unanswered', 'unsubmitted'], _('Not yet answered.')
)
)
self.display_name = names.get(status, unicode(status))
self.display_tooltip = tooltips.get(status, u'')
self._status = status or ''
@@ -224,7 +227,8 @@ class InputTypeBase(object):
self.hint = feedback.get('hint', '')
self.hintmode = feedback.get('hintmode', None)
self.input_state = state.get('input_state', {})
self.answervariable = state.get("answervariable", None)
self.answervariable = state.get('answervariable', None)
self.response_data = state.get('response_data')
# put hint above msg if it should be displayed
if self.hintmode == 'always':
@@ -316,8 +320,18 @@ class InputTypeBase(object):
'value': self.value,
'status': Status(self.status, self.capa_system.i18n.ugettext),
'msg': self.msg,
'response_data': self.response_data,
'STATIC_URL': self.capa_system.STATIC_URL,
'describedby': '',
}
# Don't add aria-describedby attribute if there are no descriptions
if self.response_data.get('descriptions'):
description_ids = ' '.join(self.response_data.get('descriptions').keys())
context.update(
{'describedby': 'aria-describedby="{}"'.format(description_ids)}
)
context.update(
(a, v) for (a, v) in self.loaded_attributes.iteritems() if a in self.to_render
)
@@ -344,7 +358,7 @@ class InputTypeBase(object):
context = self._get_render_context()
html = self.capa_system.render_template(self.template, context)
html = self.capa_system.render_template(self.template, context).strip()
try:
output = etree.XML(html)
@@ -377,7 +391,7 @@ class OptionInput(InputTypeBase):
Example:
<optioninput options="('Up','Down')" label="Where is the sky?" correct="Up"/><text>The location of the sky</text>
<optioninput options="('Up','Down')" correct="Up"/><text>The location of the sky</text>
# TODO: allow ordering to be randomized
"""
@@ -413,9 +427,15 @@ class OptionInput(InputTypeBase):
Convert options to a convenient format.
"""
return [Attribute('options', transform=cls.parse_options),
Attribute('label', ''),
Attribute('inline', False)]
def _extra_context(self):
"""
Return extra context.
"""
_ = self.capa_system.i18n.ugettext
return {'default_option_text': _('Select an option')}
#-----------------------------------------------------------------------------
@@ -432,7 +452,7 @@ class ChoiceGroup(InputTypeBase):
Example:
<choicegroup label="Which foil?">
<choicegroup>
<choice correct="false" name="foil1">
<text>This is foil One.</text>
</choice>
@@ -475,7 +495,6 @@ class ChoiceGroup(InputTypeBase):
# `django.utils.translation.ugettext_noop` because Django cannot be imported in this file
_ = lambda text: text
return [Attribute("show_correctness", "always"),
Attribute('label', ''),
Attribute("submitted_message", _("Answer received."))]
def _extra_context(self):
@@ -637,7 +656,7 @@ class TextLine(InputTypeBase):
is used e.g. for embedding simulations turned into questions.
Example:
<textline math="1" trailing_text="m/s" label="How fast is a cheetah?" />
<textline math="1" trailing_text="m/s"/>
This example will render out a text line with a math preview and the text 'm/s'
after the end of the text line.
@@ -653,7 +672,6 @@ class TextLine(InputTypeBase):
"""
return [
Attribute('size', None),
Attribute('label', ''),
Attribute('hidden', False),
Attribute('inline', False),
@@ -713,7 +731,6 @@ class FileSubmission(InputTypeBase):
Convert the list of allowed files to a convenient format.
"""
return [Attribute('allowed_files', '[]', transform=cls.parse_files),
Attribute('label', ''),
Attribute('required_files', '[]', transform=cls.parse_files), ]
def setup(self):
@@ -1027,7 +1044,6 @@ class Schematic(InputTypeBase):
Attribute('analyses', None),
Attribute('initial_value', None),
Attribute('submit_analyses', None),
Attribute('label', ''),
]
def _extra_context(self):
@@ -1063,7 +1079,6 @@ class ImageInput(InputTypeBase):
"""
return [Attribute('src'),
Attribute('height'),
Attribute('label', ''),
Attribute('width'), ]
def setup(self):
@@ -1154,8 +1169,7 @@ class ChemicalEquationInput(InputTypeBase):
"""
Can set size of text field.
"""
return [Attribute('size', '20'),
Attribute('label', ''), ]
return [Attribute('size', '20'), ]
def _extra_context(self):
"""
@@ -1218,7 +1232,7 @@ class FormulaEquationInput(InputTypeBase):
Example:
<formulaequationinput size="50" label="Enter the equation for motion" />
<formulaequationinput size="50"/>
options: size -- width of the textbox.
trailing_text -- text to show after the input textbox when
@@ -1236,7 +1250,6 @@ class FormulaEquationInput(InputTypeBase):
return [
Attribute('size', '20'),
Attribute('inline', False),
Attribute('label', ''),
Attribute('trailing_text', ''),
]
@@ -1626,7 +1639,7 @@ class ChoiceTextGroup(InputTypeBase):
select the correct choice and fill in numbers to make it accurate.
<endouttext/>
<choicetextresponse>
<radiotextgroup label="What is the correct choice?">
<radiotextgroup>
<choice correct="false">The lowest number rolled was:
<decoy_input/> and the highest number rolled was:
<decoy_input/> .</choice>
@@ -1649,7 +1662,7 @@ class ChoiceTextGroup(InputTypeBase):
select the correct choices and fill in numbers to make them accurate.
<endouttext/>
<choicetextresponse>
<checkboxtextgroup label="What is the answer?">
<checkboxtextgroup>
<choice correct="true">
The lowest number selected was <numtolerance_input answer="1.4142" tolerance="0.01"/>
</choice>
@@ -1715,7 +1728,6 @@ class ChoiceTextGroup(InputTypeBase):
return [
Attribute("show_correctness", "always"),
Attribute("submitted_message", _("Answer received.")),
Attribute("label", ""),
]
def _extra_context(self):

View File

@@ -250,8 +250,27 @@ class LoncapaResponse(object):
- renderer : procedure which produces HTML given an ElementTree
- response_msg: a message displayed at the end of the Response
"""
# render ourself as a <span> + our content
tree = etree.Element('span')
_ = self.capa_system.i18n.ugettext
# get responsetype index to make responsetype label
response_index = self.xml.attrib['id'].split('_')[-1]
# Translators: index here could be 1,2,3 and so on
response_label = _(u'Question {index}').format(index=response_index)
# wrap the content inside a section
tree = etree.Element('section')
tree.set('class', 'wrapper-problem-response')
tree.set('tabindex', '-1')
tree.set('aria-label', response_label)
if self.xml.get('multiple_inputtypes'):
# add <div> to wrap all inputtypes
content = etree.SubElement(tree, 'div')
content.set('class', 'multi-inputs-group')
content.set('role', 'group')
content.set('aria-labelledby', self.xml.get('id'))
else:
content = tree
# problem author can make this span display:inline
if self.xml.get('inline', ''):
@@ -261,12 +280,12 @@ class LoncapaResponse(object):
# call provided procedure to do the rendering
item_xhtml = renderer(item)
if item_xhtml is not None:
tree.append(item_xhtml)
content.append(item_xhtml)
tree.tail = self.xml.tail
# Add a <div> for the message at the end of the response
if response_msg:
tree.append(self._render_response_msg_html(response_msg))
content.append(self._render_response_msg_html(response_msg))
return tree

View File

@@ -1,3 +1,4 @@
<%! from openedx.core.djangolib.markup import HTML %>
<form class="annotation-input">
<div class="script_placeholder" data-src="${STATIC_URL}js/capa/annotationinput.js"/>
@@ -59,6 +60,5 @@
</form>
% if msg:
<span class="message">${msg|n}</span>
<span class="message">${HTML(msg)}</span>
% endif

View File

@@ -3,7 +3,7 @@
<div class="${status.classname}" id="status_${id}">
<input type="text" name="input_${id}" id="input_${id}" aria-label="${label}" aria-describedby="answer_${id}" data-input-id="${id}" value="${value|h}"
<input type="text" name="input_${id}" id="input_${id}" aria-label="${response_data['label']}" aria-describedby="answer_${id}" data-input-id="${id}" value="${value|h}"
% if size:
size="${size}"
% endif

View File

@@ -1,58 +1,66 @@
<%! from openedx.core.djangolib.markup import HTML %>
<%
def is_radio_input(choice_id):
return input_type == 'radio' and ((isinstance(value, basestring) and (choice_id == value)) or (
not isinstance(value, basestring) and choice_id in value
))
%>
<form class="choicegroup capa_inputtype" id="inputtype_${id}">
<fieldset role="${input_type}group" aria-label="${label}">
% for choice_id, choice_description in choices:
<label for="input_${id}_${choice_id}"
## If the student has selected this choice...
% if input_type == 'radio' and ( (isinstance(value, basestring) and (choice_id == value)) or (not isinstance(value, basestring) and choice_id in value) ):
<fieldset ${describedby}>
% if response_data['label']:
<legend id="${id}-legend" class="response-fieldset-legend field-group-hd">${response_data['label']}</legend>
% endif
% for description_id, description_text in response_data['descriptions'].items():
<p class="question-description" id="${description_id}">${description_text}</p>
% endfor
% for choice_id, choice_label in choices:
<div class="field" aria-live="polite" aria-atomic="true">
<%
if status == 'correct':
correctness = 'correct'
elif status == 'partially-correct':
correctness = 'partially-correct'
elif status == 'incorrect':
correctness = 'incorrect'
else:
correctness = None
label_class = 'response-label field-label label-inline'
%>
% if correctness and not show_correctness=='never':
class="choicegroup_${correctness}"
% endif
% endif
>
<input type="${input_type}" name="input_${id}${name_array_suffix}" id="input_${id}_${choice_id}" aria-role="radio" aria-describedby="answer_${id}" value="${choice_id}"
## If the student selected this choice...
% if input_type == 'radio' and ( (isinstance(value, basestring) and (choice_id == value)) or (not isinstance(value, basestring) and choice_id in value) ):
checked="true"
% elif input_type != 'radio' and choice_id in value:
checked="true"
% endif
% if input_type != 'radio':
aria-multiselectable="true"
% endif
<label id="${id}-${choice_id}-label"
## If the student has selected this choice...
% if is_radio_input(choice_id):
<%
if status == 'correct':
correctness = 'correct'
elif status == 'partially-correct':
correctness = 'partially-correct'
elif status == 'incorrect':
correctness = 'incorrect'
else:
correctness = None
%>
% if correctness and not show_correctness == 'never':
<% label_class += ' choicegroup_' + correctness %>
% endif
% endif
class="${label_class}"
${describedby}
>
<input type="${input_type}" name="input_${id}${name_array_suffix}" id="input_${id}_${choice_id}" class="field-input input-${input_type}" value="${choice_id}"
## If the student selected this choice...
% if is_radio_input(choice_id):
checked="true"
% elif input_type != 'radio' and choice_id in value:
checked="true"
% endif
/> ${choice_label}
/> ${choice_description}
% if input_type == 'radio' and ( (isinstance(value, basestring) and (choice_id == value)) or (not isinstance(value, basestring) and choice_id in value) ):
% if status in ('correct', 'partially-correct', 'incorrect') and not show_correctness=='never':
<span class="sr status">${choice_description|h} - ${status.display_name}</span>
% endif
% endif
</label>
% if is_radio_input(choice_id):
% if status in ('correct', 'partially-correct', 'incorrect') and not show_correctness == 'never':
<span class="sr status" id="${id}-${choice_id}-labeltext">${status.display_name}</span>
% endif
% endif
</label>
</div>
% endfor
<span id="answer_${id}"></span>
</fieldset>
<div class="indicator-container">
% if input_type == 'checkbox' or not value:
<span class="status ${status.classname if show_correctness != 'never' else 'unanswered'}" id="status_${id}" aria-describedby="inputtype_${id}" data-tooltip="${status.display_tooltip}">
<span class="sr">
%for choice_id, choice_description in choices:
% if choice_id in value:
${choice_description},
%endif
%endfor
-
${status.display_tooltip}
</span>
<span class="status ${status.classname if show_correctness != 'never' else 'unanswered'}" id="status_${id}" data-tooltip="${status.display_tooltip}">
<span class="sr">${status.display_tooltip}</span>
</span>
% endif
</div>
@@ -60,6 +68,6 @@
<div class="capa_alert">${submitted_message}</div>
%endif
% if msg:
<span class="message">${msg|n}</span>
<span class="message">${HTML(msg)}</span>
% endif
</form>

View File

@@ -1,18 +1,18 @@
<%! from django.utils.translation import ugettext as _ %>
<% element_checked = False %>
% for choice_id, _ in choices:
<%choice_id = choice_id %>
<% choice_id = choice_id %>
%if choice_id in value:
<% element_checked = True %>
%endif
%endfor
% endfor
<section id="choicetextinput_${id}" class="choicetextinput">
<form class="choicetextgroup capa_inputtype" id="inputtype_${id}">
<div class="script_placeholder" data-src="${STATIC_URL}js/capa/choicetextinput.js"/>
<fieldset aria-label="${label}">
<fieldset aria-label="${response_data['label']}">
% for choice_id, choice_description in choices:
<%choice_id= choice_id %>
<% choice_id = choice_id %>
<section id="forinput${choice_id}"
% if input_type == 'radio' and choice_id in value :
<%
@@ -59,7 +59,7 @@
<span id="answer_${id}"></span>
</fieldset>
<input class= "choicetextvalue" type="hidden" name="input_${id}{}" id="input_${id}" value="${value|h}" />
<div class="indicator-container">
% if input_type == 'checkbox' or not element_checked:
<span class="status ${status.classname}" id="status_${id}"></span>

View File

@@ -1,3 +1,4 @@
<%! from openedx.core.djangolib.markup import HTML %>
<div id="inputtype_${id}" class="capa_inputtype">
<div class="drag_and_drop_problem_div" id="drag_and_drop_div_${id}"
data-plain-id="${id}">
@@ -23,7 +24,7 @@
<p id="answer_${id}" class="answer"></p>
% if msg:
<span class="message">${msg|n}</span>
<span class="message">${HTML(msg)}</span>
% endif
% if status in ['unsubmitted', 'correct', 'incorrect', 'partially-correct', 'incomplete']:

View File

@@ -1,3 +1,4 @@
<%! from openedx.core.djangolib.markup import HTML %>
<section id="filesubmission_${id}" class="filesubmission">
<div class="grader-status file">
@@ -7,7 +8,7 @@
% endif
<p class="debug">${status}</p>
<input type="file" name="input_${id}" id="input_${id}" value="${value}" multiple="multiple" data-required_files="${required_files|h}" data-allowed_files="${allowed_files|h}" aria-label="${label}" />
<input type="file" name="input_${id}" id="input_${id}" value="${value}" multiple="multiple" data-required_files="${required_files|h}" data-allowed_files="${allowed_files|h}" aria-label="${response_data['label']}"/>
</div>
<div class="message">${msg|n}</div>
<div class="message">${HTML(msg)}</div>
</section>

View File

@@ -1,35 +1,38 @@
<%page expression_filter="h"/>
<%! from openedx.core.djangolib.markup import HTML %>
<% doinline = 'style="display:inline-block;vertical-align:top"' if inline else "" %>
<section id="formulaequationinput_${id}" class="inputtype formulaequationinput" ${doinline | n}>
<div class="${status.classname}" id="status_${id}">
<input type="text" name="input_${id}" id="input_${id}"
data-input-id="${id}" value="${value}"
aria-label="${label}"
aria-describedby="${id}_status"
% if size:
size="${size}"
% endif
/>
<span class="trailing_text">${trailing_text}</span>
<div id="formulaequationinput_${id}" class="inputtype formulaequationinput" ${doinline | n, decode.utf8}>
<div class="${status.classname}" id="status_${id}">
% if response_data['label']:
<label class="problem-group-label" for="input_${id}">${response_data['label']}</label>
% endif
% for description_id, description_text in response_data['descriptions'].items():
<p class="question-description" id="${description_id}">${description_text}</p>
% endfor
<input type="text" name="input_${id}" id="input_${id}"
data-input-id="${id}" value="${value}"
${describedby | n, decode.utf8}
% if size:
size="${size}"
% endif
/>
<span class="trailing_text">${trailing_text}</span>
<span class="status" id="${id}_status" data-tooltip="${status.display_tooltip}">
<span class="sr">
${status.display_name}
<span class="status" id="${id}_status" data-tooltip="${status.display_tooltip}">
<span class="sr">${status.display_tooltip}</span>
</span>
</span>
<p id="answer_${id}" class="answer"></p>
<p id="answer_${id}" class="answer"></p>
<div id="input_${id}_preview" class="equation">
\(\)
<img src="${STATIC_URL}images/spinner.gif" class="loading" alt="Loading"/>
</div>
</div>
<div id="input_${id}_preview" class="equation">
\(\)
<img src="${STATIC_URL}images/spinner.gif" class="loading" alt="Loading"/>
</div>
</div>
<div class="script_placeholder" data-src="${previewer}"/>
<div class="script_placeholder" data-src="${previewer}"/>
% if msg:
<span class="message">${HTML(msg)}</span>
% endif
</section>
</div>

View File

@@ -1,11 +1,20 @@
<%! from openedx.core.djangolib.markup import HTML %>
<% doinline = "inline" if inline else "" %>
<form class="inputtype option-input ${doinline}">
<select name="input_${id}" id="input_${id}" aria-label="${label}" aria-describedby="answer_${id}">
<option value="option_${id}_dummy_default"> </option>
% if response_data['label']:
<label class="problem-group-label" for="input_${id}">${response_data['label']}</label>
% endif
% for description_id, description_text in response_data['descriptions'].items():
<p class="question-description" id="${description_id}">${description_text}</p>
% endfor
<select name="input_${id}" id="input_${id}" ${describedby}>
<option value="option_${id}_dummy_default">${default_option_text}</option>
% for option_id, option_description in options:
<option value="${option_id}"
% if (option_id==value or option_id==answervariable):
% if (option_id == value or option_id == answervariable):
selected="true"
% endif
> ${option_description}</option>
@@ -13,15 +22,12 @@
</select>
<div class="indicator-container">
<span class="status ${status.classname}"
id="status_${id}"
aria-describedby="input_${id}" data-tooltip="${status.display_tooltip}">
<span class="sr">${value|h} - ${status.display_tooltip}</span>
<span class="status ${status.classname}" id="status_${id}" data-tooltip="${status.display_tooltip}">
<span class="sr">${status.display_tooltip}</span>
</span>
</div>
<p class="answer" id="answer_${id}"></p>
% if msg:
<span class="message">${msg|n}</span>
<span class="message">${HTML(msg)}</span>
% endif
</form>

View File

@@ -8,7 +8,7 @@
analyses="${analyses}"
name="input_${id}"
id="input_${id}"
aria-label="${label}"
aria-label="${response_data['label']}"
aria-describedby="answer_${id}"
value="${value|h}"
initial_value="${initial_value|h}"

View File

@@ -1,3 +1,3 @@
<section class="solution-span">
<div class="solution-span">
<span id="solution_${id}"></span>
</section>
</div>

View File

@@ -2,62 +2,57 @@
<%! from openedx.core.djangolib.markup import HTML %>
<% doinline = "inline" if inline else "" %>
<div id="inputtype_${id}" class="${'text-input-dynamath' if do_math else ''} capa_inputtype ${doinline} textline" >
% if preprocessor is not None:
<div id="inputtype_${id}" class="${'text-input-dynamath' if do_math else ''} capa_inputtype ${doinline} textline">
% if preprocessor is not None:
<div class="text-input-dynamath_data ${doinline}" data-preprocessor="${preprocessor['class_name']}"/>
<div class="script_placeholder" data-src="${preprocessor['script_src']}"/>
% endif
% if status in ('unsubmitted', 'correct', 'incorrect', 'partially-correct', 'incomplete'):
<div class="${status.classname} ${doinline}" id="status_${id}">
% endif
% if hidden:
<div style="display:none;" name="${hidden}" inputid="input_${id}" />
% endif
<input type="text" name="input_${id}" id="input_${id}" aria-label="${label}" aria-describedby="answer_${id}" value="${value}"
% if do_math:
class="math"
% endif
% if size:
size="${size}"
% endif
% if hidden:
style="display:none;"
% endif
/>
<span class="trailing_text">${trailing_text}</span>
<span class="status"
%if status != 'unsubmitted':
%endif
aria-describedby="input_${id}" data-tooltip="${status.display_tooltip}">
<span class="sr">
%if value:
${value}
% else:
${label}
%endif
-
${status.display_name}
</span>
</span>
<p id="answer_${id}" class="answer"></p>
% if do_math:
<div id="display_${id}" class="equation">`{::}`</div>
<textarea style="display:none" id="input_${id}_dynamath" name="input_${id}_dynamath"></textarea>
% endif
% if status in ('unsubmitted', 'correct', 'incorrect', 'partially-correct', 'incomplete'):
</div>
% endif
% if msg:
<span class="message">${HTML(msg)}</span>
% endif
% if status in ('unsubmitted', 'correct', 'incorrect', 'partially-correct', 'incomplete'):
<div class="${status.classname} ${doinline}" id="status_${id}">
% endif
% if hidden:
<div style="display:none;" name="${hidden}" inputid="input_${id}" />
% endif
% if response_data['label']:
<label class="problem-group-label" for="input_${id}">${response_data['label']}</label>
% endif
% for description_id, description_text in response_data['descriptions'].items():
<p class="question-description" id="${description_id}">${description_text}</p>
% endfor
<input type="text" name="input_${id}" id="input_${id}" ${describedby | n, decode.utf8} value="${value}"
% if do_math:
class="math"
% endif
% if size:
size="${size}"
% endif
% if hidden:
style="display:none;"
% endif
/>
<span class="trailing_text">${trailing_text}</span>
<span class="status" data-tooltip="${status.display_tooltip}">
<span class="sr">${status.display_tooltip}</span>
</span>
<p id="answer_${id}" class="answer"></p>
% if do_math:
<div id="display_${id}" class="equation">`{::}`</div>
<textarea style="display:none" id="input_${id}_dynamath" name="input_${id}_dynamath"></textarea>
% endif
% if status in ('unsubmitted', 'correct', 'incorrect', 'partially-correct', 'incomplete'):
</div>
% endif
% if msg:
<span class="message">${HTML(msg)}</span>
% endif
</div>

View File

@@ -1,3 +1,4 @@
<%! from openedx.core.djangolib.markup import HTML %>
<section id="inputtype_${id}" class="capa_inputtype" >
<table><tr><td height='600'>
<div id="vsepr_div_${id}" style="position:relative;" data-molecules="${molecules}" data-geometries="${geometries}">
@@ -26,7 +27,7 @@
<p id="answer_${id}" class="answer"></p>
% if msg:
<span class="message">${msg|n}</span>
<span class="message">${HTML(msg)}</span>
% endif
% if status in ['unsubmitted', 'correct', 'incorrect', 'partially-correct', 'incomplete']:
</div>

View File

@@ -1,6 +1,7 @@
"""Tools for helping with testing capa."""
import gettext
from path import path # pylint: disable=no-name-in-module
import os
import os.path
@@ -9,12 +10,29 @@ import fs.osfs
from capa.capa_problem import LoncapaProblem, LoncapaSystem
from capa.inputtypes import Status
from mock import Mock, MagicMock
from mako.lookup import TemplateLookup
import xml.sax.saxutils as saxutils
TEST_DIR = os.path.dirname(os.path.realpath(__file__))
def get_template(template_name):
"""
Return template for a capa inputtype.
"""
return TemplateLookup(
directories=[path(__file__).dirname().dirname() / 'templates']
).get_template(template_name)
def capa_render_template(template, context):
"""
Render template for a capa inputtype.
"""
return get_template(template).render_unicode(**context)
def tst_render_template(template, context):
"""
A test version of render to template. Renders to the repr of the context, completely ignoring
@@ -30,7 +48,7 @@ xqueue_interface = MagicMock()
xqueue_interface.send_to_queue.return_value = (0, 'Success!')
def test_capa_system():
def test_capa_system(render_template=None):
"""
Construct a mock LoncapaSystem instance.
@@ -46,7 +64,7 @@ def test_capa_system():
filestore=fs.osfs.OSFS(os.path.join(TEST_DIR, "test_files")),
i18n=gettext.NullTranslations(),
node_path=os.environ.get("NODE_PATH", "/usr/local/lib/node_modules"),
render_template=tst_render_template,
render_template=render_template or tst_render_template,
seed=0,
STATIC_URL='/dummy-static/',
STATUS_CLASS=Status,
@@ -66,9 +84,10 @@ def mock_capa_module():
return capa_module
def new_loncapa_problem(xml, capa_system=None, seed=723):
def new_loncapa_problem(xml, capa_system=None, seed=723, use_capa_render_template=False):
"""Construct a `LoncapaProblem` suitable for unit tests."""
return LoncapaProblem(xml, id='1', seed=seed, capa_system=capa_system or test_capa_system(),
render_template = capa_render_template if use_capa_render_template else None
return LoncapaProblem(xml, id='1', seed=seed, capa_system=capa_system or test_capa_system(render_template),
capa_module=mock_capa_module())

View File

@@ -267,6 +267,9 @@ class CustomResponseXMLFactory(ResponseXMLFactory):
*answer_attr*: The "answer" attribute on the tag itself (treated as an
alias to "expect", though "expect" takes priority if both are given)
*group_label*: Text to represent group of inputs when there are
multiple inputs.
"""
# Retrieve **kwargs
@@ -276,6 +279,7 @@ class CustomResponseXMLFactory(ResponseXMLFactory):
answer = kwargs.get('answer', None)
options = kwargs.get('options', None)
cfn_extra_args = kwargs.get('cfn_extra_args', None)
group_label = kwargs.get('group_label', None)
# Create the response element
response_element = etree.Element("customresponse")
@@ -293,6 +297,10 @@ class CustomResponseXMLFactory(ResponseXMLFactory):
answer_element = etree.SubElement(response_element, "answer")
answer_element.text = str(answer)
if group_label:
group_label_element = etree.SubElement(response_element, "label")
group_label_element.text = group_label
if options:
response_element.set('options', str(options))

View File

@@ -0,0 +1,446 @@
"""
Test capa problem.
"""
import ddt
import textwrap
from lxml import etree
import unittest
from . import new_loncapa_problem
class CAPAProblemTest(unittest.TestCase):
""" CAPA problem related tests"""
def test_label_and_description_inside_responsetype(self):
"""
Verify that
* label is extracted
* <label> tag is removed to avoid duplication
This is the case when we have a problem with single question or
problem with multiple-questions separated as per the new format.
"""
xml = """
<problem>
<choiceresponse>
<label>Select the correct synonym of paranoid?</label>
<description>Only the paranoid survive.</description>
<checkboxgroup>
<choice correct="true">over-suspicious</choice>
<choice correct="false">funny</choice>
</checkboxgroup>
</choiceresponse>
</problem>
"""
problem = new_loncapa_problem(xml)
self.assertEqual(
problem.problem_data,
{
'1_2_1':
{
'label': 'Select the correct synonym of paranoid?',
'descriptions': {'description_1_1_1': 'Only the paranoid survive.'}
}
}
)
self.assertEqual(len(problem.tree.xpath('//label')), 0)
def test_legacy_problem(self):
"""
Verify that legacy problem is handled correctly.
"""
question = "Once we become predictable, we become ______?"
xml = """
<problem>
<p>Be sure to check your spelling.</p>
<p>{}</p>
<stringresponse answer="vulnerable" type="ci">
<textline label="{}" size="40"/>
</stringresponse>
</problem>
""".format(question, question)
problem = new_loncapa_problem(xml)
self.assertEqual(
problem.problem_data,
{
'1_2_1':
{
'label': '',
'descriptions': {}
}
}
)
self.assertEqual(
len(problem.tree.xpath("//*[normalize-space(text())='{}']".format(question))),
1
)
def test_neither_label_tag_nor_attribute(self):
"""
Verify that label is extracted correctly.
This is the case when we have a markdown problem with multiple-questions.
In this case when markdown is converted to xml, there will be no label
tag and label attribute inside responsetype. But we have a label tag
before the responsetype.
"""
question1 = 'People who say they have nothing to ____ almost always do?'
question2 = 'Select the correct synonym of paranoid?'
xml = """
<problem>
<p>Be sure to check your spelling.</p>
<label>{}</label>
<stringresponse answer="hide" type="ci">
<textline size="40"/>
</stringresponse>
<choiceresponse>
<label>{}</label>
<checkboxgroup>
<choice correct="true">over-suspicious</choice>
<choice correct="false">funny</choice>
</checkboxgroup>
</choiceresponse>
</problem>
""".format(question1, question2)
problem = new_loncapa_problem(xml)
self.assertEqual(
problem.problem_data,
{
'1_2_1':
{
'label': question1,
'descriptions': {}
},
'1_3_1':
{
'label': question2,
'descriptions': {}
}
}
)
for question in (question1, question2):
self.assertEqual(
len(problem.tree.xpath('//label[text()="{}"]'.format(question))),
0
)
def test_multiple_descriptions(self):
"""
Verify that multiple descriptions are handled correctly.
"""
xml = """
<problem>
<p>Be sure to check your spelling.</p>
<stringresponse answer="War" type="ci">
<label>___ requires sacrifices.</label>
<description>The problem with trying to be the bad guy, there's always someone worse.</description>
<description>Anyone who looks the world as if it was a game of chess deserves to lose.</description>
<textline size="40"/>
</stringresponse>
</problem>
"""
problem = new_loncapa_problem(xml)
self.assertEqual(
problem.problem_data,
{
'1_2_1':
{
'label': '___ requires sacrifices.',
'descriptions': {
'description_1_1_1': "The problem with trying to be the bad guy, there's always someone worse.",
'description_1_1_2': "Anyone who looks the world as if it was a game of chess deserves to lose."
}
}
}
)
def test_non_accessible_inputtype(self):
"""
Verify that tag with question text is not removed when inputtype is not fully accessible.
"""
question = "Click the country which is home to the Pyramids."
xml = """
<problem>
<p>{}</p>
<imageresponse>
<imageinput label="{}"
src="/static/Africa.png" width="600" height="638" rectangle="(338,98)-(412,168)"/>
</imageresponse>
</problem>
""".format(question, question)
problem = new_loncapa_problem(xml)
self.assertEqual(
problem.problem_data,
{
'1_2_1':
{
'label': '',
'descriptions': {}
}
}
)
# <p> tag with question text should not be deleted
self.assertEqual(problem.tree.xpath("string(p[text()='{}'])".format(question)), question)
def test_label_is_empty_if_no_label_attribute(self):
"""
Verify that label in response_data is empty string when label
attribute is missing and responsetype is not fully accessible.
"""
question = "Click the country which is home to the Pyramids."
xml = """
<problem>
<p>{}</p>
<imageresponse>
<imageinput
src="/static/Africa.png" width="600" height="638" rectangle="(338,98)-(412,168)"/>
</imageresponse>
</problem>
""".format(question)
problem = new_loncapa_problem(xml)
self.assertEqual(
problem.problem_data,
{
'1_2_1':
{
'label': '',
'descriptions': {}
}
}
)
def test_multiple_questions_problem(self):
"""
For a problem with multiple questions verify that for each question
* label is extracted
* descriptions info is constructed
* <label> tag is removed to avoid duplication
"""
xml = """
<problem>
<choiceresponse>
<label>Select the correct synonym of paranoid?</label>
<description>Only the paranoid survive.</description>
<checkboxgroup>
<choice correct="true">over-suspicious</choice>
<choice correct="false">funny</choice>
</checkboxgroup>
</choiceresponse>
<multiplechoiceresponse>
<p>one more question</p>
<label>What Apple device competed with the portable CD player?</label>
<description>Device looks like an egg plant.</description>
<choicegroup type="MultipleChoice">
<choice correct="false">The iPad</choice>
<choice correct="false">Napster</choice>
<choice correct="true">The iPod</choice>
<choice correct="false">The vegetable peeler</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
"""
problem = new_loncapa_problem(xml)
self.assertEqual(
problem.problem_data,
{
'1_2_1':
{
'label': 'Select the correct synonym of paranoid?',
'descriptions': {'description_1_1_1': 'Only the paranoid survive.'}
},
'1_3_1':
{
'label': 'What Apple device competed with the portable CD player?',
'descriptions': {'description_1_2_1': 'Device looks like an egg plant.'}
}
}
)
self.assertEqual(len(problem.tree.xpath('//label')), 0)
def test_question_title_not_removed_got_children(self):
"""
Verify that <p> question text before responsetype not deleted when
it contains other children and label is picked from label attribute of inputtype
This is the case when author updated the <p> immediately before
responsetype to contain other elements. We do not want to delete information in that case.
"""
question = 'Is egg plant a fruit?'
xml = """
<problem>
<p>Choose wisely.</p>
<p>Select the correct synonym of paranoid?</p>
<p><img src="" /></p>
<choiceresponse>
<checkboxgroup label="{}">
<choice correct="true">over-suspicious</choice>
<choice correct="false">funny</choice>
</checkboxgroup>
</choiceresponse>
</problem>
""".format(question)
problem = new_loncapa_problem(xml)
self.assertEqual(
problem.problem_data,
{
'1_2_1':
{
'label': '',
'descriptions': {}
}
}
)
self.assertEqual(
len(problem.tree.xpath('//p/img')),
1
)
def test_multiple_inputtypes(self):
"""
Verify that group label and labels for individual inputtypes are extracted correctly.
"""
group_label = 'Choose the correct color'
input1_label = 'What color is the sky?'
input2_label = 'What color are pine needles?'
xml = """
<problem>
<optionresponse>
<label>{}</label>
<optioninput options="('yellow','blue','green')" correct="blue" label="{}"/>
<optioninput options="('yellow','blue','green')" correct="green" label="{}"/>
</optionresponse>
</problem>
""".format(group_label, input1_label, input2_label)
problem = new_loncapa_problem(xml)
self.assertEqual(
problem.problem_data,
{
'1_2_1':
{
'group_label': group_label,
'label': input1_label,
'descriptions': {}
},
'1_2_2':
{
'group_label': group_label,
'label': input2_label,
'descriptions': {}
}
}
)
def test_single_inputtypes(self):
"""
Verify that HTML is correctly rendered when there is single inputtype.
"""
question = 'Enter sum of 1+2'
xml = textwrap.dedent("""
<problem>
<customresponse cfn="test_sum" expect="3">
<script type="loncapa/python">
def test_sum(expect, ans):
return int(expect) == int(ans)
</script>
<label>{}</label>
<textline size="20" correct_answer="3" />
</customresponse>
</problem>
""".format(question))
problem = new_loncapa_problem(xml, use_capa_render_template=True)
problem_html = etree.XML(problem.get_html())
# verify that only no multi input group div is present
multi_inputs_group = problem_html.xpath('//div[@class="multi-inputs-group"]')
self.assertEqual(len(multi_inputs_group), 0)
# verify that question is rendered only once
question = problem_html.xpath("//*[normalize-space(text())='{}']".format(question))
self.assertEqual(len(question), 1)
@ddt.ddt
class CAPAMultiInputProblemTest(unittest.TestCase):
""" TestCase for CAPA problems with multiple inputtypes """
def capa_problem(self, xml):
"""
Create capa problem.
"""
return new_loncapa_problem(xml, use_capa_render_template=True)
def assert_problem_html(self, problme_html, group_label, *input_labels):
"""
Verify that correct html is rendered for multiple inputtypes.
"""
html = etree.XML(problme_html)
# verify that only one multi input group div is present at correct path
multi_inputs_group = html.xpath(
'//section[@class="wrapper-problem-response"]/div[@class="multi-inputs-group"]'
)
self.assertEqual(len(multi_inputs_group), 1)
# verify that multi input group label <p> tag exists and its
# id matches with correct multi input group aria-labelledby
multi_inputs_group_label_id = multi_inputs_group[0].attrib.get('aria-labelledby')
multi_inputs_group_label = html.xpath('//p[@id="{}"]'.format(multi_inputs_group_label_id))
self.assertEqual(len(multi_inputs_group_label), 1)
self.assertEqual(multi_inputs_group_label[0].text, group_label)
# verify that label for each input comes only once
for input_label in input_labels:
# normalize-space is used to remove whitespace around the text
input_label_element = multi_inputs_group[0].xpath('//*[normalize-space(text())="{}"]'.format(input_label))
self.assertEqual(len(input_label_element), 1)
def test_optionresponse(self):
"""
Verify that optionresponse problem with multiple inputtypes is rendered correctly.
"""
group_label = 'Choose the correct color'
input1_label = 'What color is the sky?'
input2_label = 'What color are pine needles?'
xml = """
<problem>
<optionresponse>
<label>{}</label>
<optioninput options="('yellow','blue','green')" correct="blue" label="{}"/>
<optioninput options="('yellow','blue','green')" correct="green" label="{}"/>
</optionresponse>
</problem>
""".format(group_label, input1_label, input2_label)
problem = self.capa_problem(xml)
self.assert_problem_html(problem.get_html(), group_label, input1_label, input2_label)
@ddt.unpack
@ddt.data(
{'inputtype': 'textline'},
{'inputtype': 'formulaequationinput'}
)
def test_customresponse(self, inputtype):
"""
Verify that customresponse problem with multiple textline
and formulaequationinput inputtypes is rendered correctly.
"""
group_label = 'Enter two integers that sum to 10.'
input1_label = 'Integer 1'
input2_label = 'Integer 2'
xml = textwrap.dedent("""
<problem>
<customresponse cfn="test_add_to_ten">
<script type="loncapa/python">
def test_add_to_ten(expect, ans):
return test_add(10, ans)
</script>
<label>{}</label>
<{inputtype} size="40" correct_answer="3" label="{}" /><br/>
<{inputtype} size="40" correct_answer="7" label="{}" />
</customresponse>
</problem>
""".format(group_label, input1_label, input2_label, inputtype=inputtype))
problem = self.capa_problem(xml)
self.assert_problem_html(problem.get_html(), group_label, input1_label, input2_label)

View File

@@ -1,7 +1,8 @@
<problem>
<p>Select all the fruits from the list. In retrospect, the wordiness of these tests increases the dizziness!</p>
<p>In retrospect, the wordiness of these tests increases the dizziness!</p>
<choiceresponse>
<checkboxgroup label="Select all the fruits from the list">
<label>Select all the fruits from the list</label>
<checkboxgroup>
<choice correct="true" id="alpha">Apple
<choicehint selected="TrUe">You are right that apple is a fruit.
</choicehint>
@@ -33,9 +34,10 @@
</compoundhint>
</checkboxgroup>
</choiceresponse>
<p>Select all the vegetables from the list</p>
<choiceresponse>
<checkboxgroup label="Select all the vegetables from the list">
<label>Select all the vegetables from the list</label>
<checkboxgroup>
<choice correct="false">Banana
<choicehint selected="true">No, sorry, a banana is a fruit.
</choicehint>
@@ -52,11 +54,11 @@
<choice correct="true">
Brussel Sprout
<choicehint selected="true">
Brussel sprouts are vegetables.
</choicehint>
<choicehint selected="false">
Brussel sprout is the only vegetable in this list.
</choicehint>
</choice>
@@ -66,6 +68,7 @@
</compoundhint>
</checkboxgroup>
</choiceresponse>
<p>Compoundhint vs. correctness</p>
<choiceresponse>
<checkboxgroup>
@@ -80,17 +83,17 @@
<choiceresponse>
<checkboxgroup>
<choice correct="true">
A
A
<choicehint selected="true" label="AA">
aa
</choicehint></choice>
<choice correct="true">
B <choicehint selected="false" label="BB">
bb
</choicehint></choice>
</checkboxgroup>
</choiceresponse>
@@ -114,4 +117,3 @@
</problem>

View File

@@ -1,8 +1,8 @@
<problem>
<p>(note the blank line before mushroom -- be sure to include this test case)</p>
<p>Select the fruit from the list</p>
<multiplechoiceresponse>
<choicegroup label="Select the fruit from the list" type="MultipleChoice">
<label>Select the fruit from the list</label>
<choicegroup type="MultipleChoice">
<choice correct="false">Mushroom
<choicehint label="">Mushroom is a fungus, not a fruit.
</choicehint>
@@ -14,9 +14,10 @@
</choice>
</choicegroup>
</multiplechoiceresponse>
<p>Select the vegetables from the list</p>
<multiplechoiceresponse>
<choicegroup label="Select the vegetables from the list" type="MultipleChoice">
<label>Select the vegetables from the list</label>
<choicegroup type="MultipleChoice">
<choice correct="false">Mushroom
<choicehint>Mushroom is a fungus, not a vegetable.
</choicehint>

View File

@@ -1,7 +1,7 @@
<problem>
<p>Select the fruit from the list</p>
<multiplechoiceresponse>
<choicegroup label="Select the fruit from the list" type="MultipleChoice">
<label>Select the fruit from the list</label>
<choicegroup type="MultipleChoice">
<choice correct="false">Mushroom
<choicehint>Mushroom <img src="#" ale="#"/>is a fungus, not a fruit.</choicehint>
</choice>

View File

@@ -1,7 +1,8 @@
<problem>
<numericalresponse answer="1.141">
<label>What value when squared is approximately equal to 2 (give your answer to 2 decimal places)?</label>
<responseparam default=".01" type="tolerance"/>
<formulaequationinput label="What value when squared is approximately equal to 2 (give your answer to 2 decimal places)?"/>
<formulaequationinput/>
<correcthint label="Nice">
The square root of two turns up in the strangest places.
@@ -11,8 +12,9 @@
</numericalresponse>
<numericalresponse answer="4">
<label>What is 2 + 2?</label>
<responseparam default=".01" type="tolerance"/>
<formulaequationinput label="What is 2 + 2?"/>
<formulaequationinput/>
<correcthint>
Pretty easy, uh?.
</correcthint>
@@ -34,4 +36,3 @@ also not multiple correcthint
</lehint>
-->
</problem>

View File

@@ -2,7 +2,8 @@
<p>In which country would you find the city of Paris?</p>
<stringresponse answer="FranceΩ" type="ci" >
<textline label="In which country would you find the city of Paris?" size="20"/>
<label>In which country would you find the city of Paris?</label>
<textline size="20"/>
<correcthint>
Viva la France!Ω
</correcthint>
@@ -22,16 +23,18 @@
<p>What color is the sky? A minimal example, case sensitive, not regex.</p>
<stringresponse answer="Blue">
<label>What color is the sky?</label>
<correcthint >The red light is scattered by water molecules leaving only blue light.
</correcthint>
<textline label="What color is the sky?" size="20"/>
<textline size="20"/>
</stringresponse>
<p>(This question will cause an illegal regular expression exception)</p>
<stringresponse answer="Bonk">
<label>Why not?</label>
<correcthint >This hint should never appear.
</correcthint>
<textline label="Why not?" size="20"/>
<textline size="20"/>
<regexphint answer="[">
This hint should never appear either because the regex is illegal.
</regexphint>
@@ -56,7 +59,7 @@
<regexphint answer="FG+"> hint6 </regexphint>
<textline size="20"/>
</stringresponse>
<!-- backward compatibility for additional_answer: old and new format together in
a problem, scored correclty and new style has a hint -->
<stringresponse answer="A">

View File

@@ -1,6 +1,7 @@
<problem>
<choiceresponse>
<checkboxgroup label="Select all the vegetables from the list">
<label>Select all the vegetables from the list</label>
<checkboxgroup>
<choice correct="false">Banana
<choicehint selected="true">No, sorry, a banana is a fruit.
</choicehint>

View File

@@ -155,11 +155,12 @@ class CapaHtmlRenderTest(unittest.TestCase):
question_element = rendered_html.find("p")
self.assertEqual(question_element.text, "Test question")
# Expect that the response has been turned into a <span>
response_element = rendered_html.find("span")
self.assertEqual(response_element.tag, "span")
# Expect that the response has been turned into a <section> with correct attributes
response_element = rendered_html.find("section")
self.assertEqual(response_element.tag, "section")
self.assertEqual(response_element.attrib["aria-label"], "Question 1")
# Expect that the response <span>
# Expect that the response <section>
# that contains a <div> for the textline
textline_element = response_element.find("div")
self.assertEqual(textline_element.text, 'Input Template Render')
@@ -175,7 +176,6 @@ class CapaHtmlRenderTest(unittest.TestCase):
expected_textline_context = {
'STATIC_URL': '/dummy-static/',
'status': the_system.STATUS_CLASS('unsubmitted'),
'label': '',
'value': '',
'preprocessor': None,
'msg': '',
@@ -185,6 +185,8 @@ class CapaHtmlRenderTest(unittest.TestCase):
'id': '1_2_1',
'trailing_text': '',
'size': None,
'response_data': {'label': '', 'descriptions': {}},
'describedby': ''
}
expected_solution_context = {'id': '1_solution_1'}
@@ -201,6 +203,29 @@ class CapaHtmlRenderTest(unittest.TestCase):
expected_calls
)
def test_correct_aria_label(self):
xml = """
<problem>
<choiceresponse>
<checkboxgroup>
<choice correct="true">over-suspicious</choice>
<choice correct="false">funny</choice>
</checkboxgroup>
</choiceresponse>
<choiceresponse>
<checkboxgroup>
<choice correct="true">Urdu</choice>
<choice correct="false">Finnish</choice>
</checkboxgroup>
</choiceresponse>
</problem>
"""
problem = new_loncapa_problem(xml)
rendered_html = etree.XML(problem.get_html())
sections = rendered_html.findall('section')
self.assertEqual(sections[0].attrib['aria-label'], 'Question 1')
self.assertEqual(sections[1].attrib['aria-label'], 'Question 2')
def test_render_response_with_overall_msg(self):
# CustomResponse script that sets an overall_message
script = textwrap.dedent("""

View File

@@ -2,6 +2,7 @@
Tests for the logic in input type mako templates.
"""
from collections import OrderedDict
import unittest
import capa
import os.path
@@ -29,6 +30,13 @@ class TemplateTestCase(unittest.TestCase):
# The template name should include the .html extension:
# for example: choicegroup.html
TEMPLATE_NAME = None
DESCRIBEDBY = 'aria-describedby="desc-1 desc-2"'
DESCRIPTIONS = OrderedDict([('desc-1', 'description text 1'), ('desc-2', 'description text 2')])
DESCRIPTION_IDS = ' '.join(DESCRIPTIONS.keys())
RESPONSE_DATA = {
'label': 'question text 101',
'descriptions': DESCRIPTIONS
}
def setUp(self):
"""
@@ -42,6 +50,8 @@ class TemplateTestCase(unittest.TestCase):
with open(self.template_path) as f:
self.template = MakoTemplate(f.read())
self.context = {}
def render_to_xml(self, context_dict):
"""
Render the template using the `context_dict` dict.
@@ -112,6 +122,83 @@ class TemplateTestCase(unittest.TestCase):
else:
self.assertIn(text, element_list[0].text)
def assert_description(self, describedby_xpaths, descriptions=True):
"""
Verify that descriptions information is correct.
Arguments:
describedby_xpaths (list): list of xpaths to check aria-describedby attribute
descriptions (bool): tells whether we need to check description <p> tags
"""
xml = self.render_to_xml(self.context)
# TODO! This check should be removed once description <p> tags are added into all templates.
if descriptions:
# Verify that each description <p> tag has correct id, text and order
descriptions = OrderedDict(
(tag.get('id'), tag.text) for tag in xml.xpath('//p[@class="question-description"]')
)
self.assertEqual(self.DESCRIPTIONS, descriptions)
# for each xpath verify that description_ids are set correctly
for describedby_xpath in describedby_xpaths:
describedbys = xml.xpath(describedby_xpath)
# aria-describedby attributes must have ids
self.assertTrue(describedbys)
for describedby in describedbys:
self.assertEqual(describedby, self.DESCRIPTION_IDS)
def assert_describedby_attribute(self, describedby_xpaths):
"""
Verify that an element has no aria-describedby attribute if there are no descriptions.
Arguments:
describedby_xpaths (list): list of xpaths to check aria-describedby attribute
"""
self.context['describedby'] = ''
xml = self.render_to_xml(self.context)
# for each xpath verify that description_ids are set correctly
for describedby_xpath in describedby_xpaths:
describedbys = xml.xpath(describedby_xpath)
self.assertFalse(describedbys)
def assert_status(self, status_div=False, status_class=False):
"""
Verify status information.
Arguments:
status_div (bool): check presence of status div
status_class (bool): check presence of status class
"""
cases = [
('correct', 'correct'),
('unsubmitted', 'unanswered'),
('incorrect', 'incorrect'),
('incomplete', 'incorrect')
]
for context_status, div_class in cases:
self.context['status'] = Status(context_status)
xml = self.render_to_xml(self.context)
# Expect that we get a <div> with correct class
if status_div:
xpath = "//div[normalize-space(@class)='%s']" % div_class
self.assert_has_xpath(xml, xpath, self.context)
# Expect that we get a <span> with class="status"
# (used to by CSS to draw the green check / red x)
self.assert_has_text(
xml,
"//span[@class=normalize-space('status {}')]/span[@class='sr']".format(
div_class if status_class else ''
),
self.context['status'].display_tooltip
)
class ChoiceGroupTemplateTest(TemplateTestCase):
"""
@@ -121,15 +208,18 @@ class ChoiceGroupTemplateTest(TemplateTestCase):
TEMPLATE_NAME = 'choicegroup.html'
def setUp(self):
choices = [('1', 'choice 1'), ('2', 'choice 2'), ('3', 'choice 3')]
self.context = {'id': '1',
'choices': choices,
'status': Status('correct'),
'label': 'test',
'input_type': 'checkbox',
'name_array_suffix': '1',
'value': '3'}
super(ChoiceGroupTemplateTest, self).setUp()
choices = [('1', 'choice 1'), ('2', 'choice 2'), ('3', 'choice 3')]
self.context = {
'id': '1',
'choices': choices,
'status': Status('correct'),
'input_type': 'checkbox',
'name_array_suffix': '1',
'value': '3',
'response_data': self.RESPONSE_DATA,
'describedby': self.DESCRIBEDBY,
}
def test_problem_marked_correct(self):
"""
@@ -229,7 +319,7 @@ class ChoiceGroupTemplateTest(TemplateTestCase):
for test_conditions in conditions:
self.context.update(test_conditions)
xml = self.render_to_xml(self.context)
xpath = "//label[@class='choicegroup_correct']"
xpath = "//label[contains(@class, 'choicegroup_correct')]"
self.assert_has_xpath(xml, xpath, self.context)
# Should NOT mark the whole problem
@@ -250,7 +340,7 @@ class ChoiceGroupTemplateTest(TemplateTestCase):
for test_conditions in conditions:
self.context.update(test_conditions)
xml = self.render_to_xml(self.context)
xpath = "//label[@class='choicegroup_incorrect']"
xpath = "//label[contains(@class, 'choicegroup_incorrect')]"
self.assert_has_xpath(xml, xpath, self.context)
# Should NOT mark the whole problem
@@ -340,8 +430,22 @@ class ChoiceGroupTemplateTest(TemplateTestCase):
def test_label(self):
xml = self.render_to_xml(self.context)
xpath = "//fieldset[@aria-label='%s']" % self.context['label']
self.assert_has_xpath(xml, xpath, self.context)
xpath = "//legend"
self.assert_has_text(xml, xpath, self.context['response_data']['label'])
def test_description(self):
"""
Test that correct description information is set on desired elements.
"""
xpaths = ['//fieldset/@aria-describedby', '//label/@aria-describedby']
self.assert_description(xpaths)
self.assert_describedby_attribute(xpaths)
def test_status(self):
"""
Verify status information.
"""
self.assert_status(status_class=True)
class TextlineTemplateTest(TemplateTestCase):
@@ -352,13 +456,16 @@ class TextlineTemplateTest(TemplateTestCase):
TEMPLATE_NAME = 'textline.html'
def setUp(self):
self.context = {'id': '1',
'status': Status('correct'),
'label': 'test',
'value': '3',
'preprocessor': None,
'trailing_text': None}
super(TextlineTemplateTest, self).setUp()
self.context = {
'id': '1',
'status': Status('correct'),
'value': '3',
'preprocessor': None,
'trailing_text': None,
'response_data': self.RESPONSE_DATA,
'describedby': self.DESCRIBEDBY,
}
def test_section_class(self):
cases = [({}, ' capa_inputtype textline'),
@@ -374,28 +481,14 @@ class TextlineTemplateTest(TemplateTestCase):
self.assert_has_xpath(xml, xpath, self.context)
def test_status(self):
cases = [('correct', 'correct', 'correct'),
('unsubmitted', 'unanswered', 'unanswered'),
('incorrect', 'incorrect', 'incorrect'),
('incomplete', 'incorrect', 'incomplete')]
for (context_status, div_class, status_mark) in cases:
self.context['status'] = Status(context_status)
xml = self.render_to_xml(self.context)
# Expect that we get a <div> with correct class
xpath = "//div[@class='%s ']" % div_class
self.assert_has_xpath(xml, xpath, self.context)
# Expect that we get a <span> with class="status"
# (used to by CSS to draw the green check / red x)
self.assert_has_text(xml, "//span[@class='status']/span[@class='sr']",
status_mark, exact=False)
"""
Verify status information.
"""
self.assert_status(status_div=True)
def test_label(self):
xml = self.render_to_xml(self.context)
xpath = "//input[@aria-label='%s']" % self.context['label']
self.assert_has_xpath(xml, xpath, self.context)
self.assert_has_xpath(xml, "//label[@class='problem-group-label']", self.RESPONSE_DATA['label'])
def test_hidden(self):
self.context['hidden'] = True
@@ -470,6 +563,14 @@ class TextlineTemplateTest(TemplateTestCase):
xpath = "//span[@class='message']"
self.assert_has_text(xml, xpath, self.context['msg'])
def test_description(self):
"""
Test that correct description information is set on desired elements.
"""
xpaths = ['//input/@aria-describedby']
self.assert_description(xpaths)
self.assert_describedby_attribute(xpaths)
class FormulaEquationInputTemplateTest(TemplateTestCase):
"""
@@ -478,16 +579,17 @@ class FormulaEquationInputTemplateTest(TemplateTestCase):
TEMPLATE_NAME = 'formulaequationinput.html'
def setUp(self):
super(FormulaEquationInputTemplateTest, self).setUp()
self.context = {
'id': 2,
'value': 'PREFILLED_VALUE',
'status': Status('unsubmitted'),
'label': 'test',
'previewer': 'file.js',
'reported_status': 'REPORTED_STATUS',
'trailing_text': None,
'response_data': self.RESPONSE_DATA,
'describedby': self.DESCRIBEDBY,
}
super(FormulaEquationInputTemplateTest, self).setUp()
def test_no_size(self):
xml = self.render_to_xml(self.context)
@@ -499,6 +601,20 @@ class FormulaEquationInputTemplateTest(TemplateTestCase):
self.assert_has_xpath(xml, "//input[@size='40']", self.context)
def test_description(self):
"""
Test that correct description information is set on desired elements.
"""
xpaths = ['//input/@aria-describedby']
self.assert_description(xpaths)
self.assert_describedby_attribute(xpaths)
def test_status(self):
"""
Verify status information.
"""
self.assert_status(status_div=True)
class AnnotationInputTemplateTest(TemplateTestCase):
"""
@@ -508,21 +624,23 @@ class AnnotationInputTemplateTest(TemplateTestCase):
TEMPLATE_NAME = 'annotationinput.html'
def setUp(self):
self.context = {'id': 2,
'value': '<p>Test value</p>',
'title': '<h1>This is a title</h1>',
'text': '<p><b>This</b> is a test.</p>',
'comment': '<p>This is a test comment</p>',
'comment_prompt': '<p>This is a test comment prompt</p>',
'comment_value': '<p>This is the value of a test comment</p>',
'tag_prompt': '<p>This is a tag prompt</p>',
'options': [],
'has_options_value': False,
'debug': False,
'status': Status('unsubmitted'),
'return_to_annotation': False,
'msg': '<p>This is a test message</p>', }
super(AnnotationInputTemplateTest, self).setUp()
self.context = {
'id': 2,
'value': '<p>Test value</p>',
'title': '<h1>This is a title</h1>',
'text': '<p><b>This</b> is a test.</p>',
'comment': '<p>This is a test comment</p>',
'comment_prompt': '<p>This is a test comment prompt</p>',
'comment_value': '<p>This is the value of a test comment</p>',
'tag_prompt': '<p>This is a tag prompt</p>',
'options': [],
'has_options_value': False,
'debug': False,
'status': Status('unsubmitted'),
'return_to_annotation': False,
'msg': '<p>This is a test message</p>',
}
def test_return_to_annotation(self):
"""
@@ -634,8 +752,8 @@ class MathStringTemplateTest(TemplateTestCase):
TEMPLATE_NAME = 'mathstring.html'
def setUp(self):
self.context = {'isinline': False, 'mathstr': '', 'tail': ''}
super(MathStringTemplateTest, self).setUp()
self.context = {'isinline': False, 'mathstr': '', 'tail': ''}
def test_math_string_inline(self):
self.context['isinline'] = True
@@ -676,14 +794,16 @@ class OptionInputTemplateTest(TemplateTestCase):
TEMPLATE_NAME = 'optioninput.html'
def setUp(self):
super(OptionInputTemplateTest, self).setUp()
self.context = {
'id': 2,
'options': [],
'status': Status('unsubmitted'),
'label': 'test',
'value': 0
'value': 0,
'default_option_text': 'Select an option',
'response_data': self.RESPONSE_DATA,
'describedby': self.DESCRIBEDBY,
}
super(OptionInputTemplateTest, self).setUp()
def test_select_options(self):
@@ -710,25 +830,23 @@ class OptionInputTemplateTest(TemplateTestCase):
self.assert_has_text(xml, xpath, 'Option 2')
def test_status(self):
# Test cases, where each tuple represents
# `(input_status, expected_css_class)`
test_cases = [('unsubmitted', 'status unanswered'),
('correct', 'status correct'),
('incorrect', 'status incorrect'),
('incomplete', 'status incorrect')]
for (input_status, expected_css_class) in test_cases:
self.context['status'] = Status(input_status)
xml = self.render_to_xml(self.context)
xpath = "//span[@class='{0}']".format(expected_css_class)
self.assert_has_xpath(xml, xpath, self.context)
"""
Verify status information.
"""
self.assert_status(status_class=True)
def test_label(self):
xml = self.render_to_xml(self.context)
xpath = "//select[@aria-label='%s']" % self.context['label']
self.assert_has_xpath(xml, xpath, self.context)
xpath = "//label[@class='problem-group-label']"
self.assert_has_xpath(xml, xpath, self.RESPONSE_DATA['label'])
def test_description(self):
"""
Test that correct description information is set on desired elements.
"""
xpaths = ['//select/@aria-describedby']
self.assert_description(xpaths)
self.assert_describedby_attribute(xpaths)
class DragAndDropTemplateTest(TemplateTestCase):
@@ -739,12 +857,12 @@ class DragAndDropTemplateTest(TemplateTestCase):
TEMPLATE_NAME = 'drag_and_drop_input.html'
def setUp(self):
super(DragAndDropTemplateTest, self).setUp()
self.context = {'id': 2,
'drag_and_drop_json': '',
'value': 0,
'status': Status('unsubmitted'),
'msg': ''}
super(DragAndDropTemplateTest, self).setUp()
def test_status(self):
@@ -796,6 +914,7 @@ class ChoiceTextGroupTemplateTest(TemplateTestCase):
'1_choiceinput_1_textinput_0': '0'}
def setUp(self):
super(ChoiceTextGroupTemplateTest, self).setUp()
choices = [
(
'1_choiceinput_0bc',
@@ -817,12 +936,10 @@ class ChoiceTextGroupTemplateTest(TemplateTestCase):
'choices': choices,
'status': Status('correct'),
'input_type': 'radio',
'label': 'choicetext label',
'value': self.VALUE_DICT,
'response_data': self.RESPONSE_DATA
}
super(ChoiceTextGroupTemplateTest, self).setUp()
def test_grouping_tag(self):
"""
Tests whether we are using a section or a label to wrap choice elements.
@@ -962,5 +1079,5 @@ class ChoiceTextGroupTemplateTest(TemplateTestCase):
def test_label(self):
xml = self.render_to_xml(self.context)
xpath = "//fieldset[@aria-label='%s']" % self.context['label']
xpath = "//fieldset[@aria-label='%s']" % self.context['response_data']['label']
self.assert_has_xpath(xml, xpath, self.context)

View File

@@ -16,7 +16,7 @@ TODO:
- test funny xml chars -- should never get xml parse error if things are escaped properly.
"""
from collections import OrderedDict
import json
from lxml import etree
from lxml.html import fromstring
@@ -36,6 +36,14 @@ from capa.xqueue_interface import XQUEUE_TIMEOUT
lookup_tag = inputtypes.registry.get_class_for_tag
DESCRIBEDBY = 'aria-describedby="desc-1 desc-2"'
DESCRIPTIONS = OrderedDict([('desc-1', 'description text 1'), ('desc-2', 'description text 2')])
RESPONSE_DATA = {
'label': 'question text 101',
'descriptions': DESCRIPTIONS
}
def quote_attr(s):
return saxutils.quoteattr(s)[1:-1] # don't want the outer quotes
@@ -49,9 +57,13 @@ class OptionInputTest(unittest.TestCase):
xml_str = """<optioninput options="('Up','Down','Don't know')" id="sky_input" correct="Up"/>"""
element = etree.fromstring(xml_str)
state = {'value': 'Down',
'id': 'sky_input',
'status': 'answered'}
state = {
'value': 'Down',
'id': 'sky_input',
'status': 'answered',
'default_option_text': 'Select an option',
'response_data': RESPONSE_DATA
}
option_input = lookup_tag('optioninput')(test_capa_system(), element, state)
context = option_input._get_render_context() # pylint: disable=protected-access
@@ -61,10 +73,12 @@ class OptionInputTest(unittest.TestCase):
'value': 'Down',
'options': [('Up', 'Up'), ('Down', 'Down'), ('Don\'t know', 'Don\'t know')],
'status': inputtypes.Status('answered'),
'label': '',
'msg': '',
'inline': False,
'id': 'sky_input',
'default_option_text': 'Select an option',
'response_data': RESPONSE_DATA,
'describedby': DESCRIBEDBY
}
self.assertEqual(context, expected)
@@ -105,12 +119,14 @@ class ChoiceGroupTest(unittest.TestCase):
<choice correct="false" name="foil4">This is <b>foil</b> Four.</choice>
</{tag}>
""".format(tag=tag)
element = etree.fromstring(xml_str)
state = {'value': 'foil3',
'id': 'sky_input',
'status': 'answered'}
state = {
'value': 'foil3',
'id': 'sky_input',
'status': 'answered',
'response_data': RESPONSE_DATA
}
the_input = lookup_tag(tag)(test_capa_system(), element, state)
@@ -121,7 +137,6 @@ class ChoiceGroupTest(unittest.TestCase):
'id': 'sky_input',
'value': 'foil3',
'status': inputtypes.Status('answered'),
'label': '',
'msg': '',
'input_type': expected_input_type,
'choices': [('foil1', '<text>This is foil One.</text>'),
@@ -131,6 +146,8 @@ class ChoiceGroupTest(unittest.TestCase):
'show_correctness': 'always',
'submitted_message': 'Answer received.',
'name_array_suffix': expected_suffix, # what is this for??
'response_data': RESPONSE_DATA,
'describedby': DESCRIBEDBY
}
self.assertEqual(context, expected)
@@ -165,7 +182,10 @@ class JavascriptInputTest(unittest.TestCase):
element = etree.fromstring(xml_str)
state = {'value': '3', }
state = {
'value': '3',
'response_data': RESPONSE_DATA
}
the_input = lookup_tag('javascriptinput')(test_capa_system(), element, state)
context = the_input._get_render_context() # pylint: disable=protected-access
@@ -174,13 +194,14 @@ class JavascriptInputTest(unittest.TestCase):
'STATIC_URL': '/dummy-static/',
'id': 'prob_1_2',
'status': inputtypes.Status('unanswered'),
# 'label': '',
'msg': '',
'value': '3',
'params': params,
'display_file': display_file,
'display_class': display_class,
'problem_state': problem_state,
'response_data': RESPONSE_DATA,
'describedby': DESCRIBEDBY
}
self.assertEqual(context, expected)
@@ -193,11 +214,14 @@ class TextLineTest(unittest.TestCase):
def test_rendering(self):
size = "42"
xml_str = """<textline id="prob_1_2" label="testing 123" size="{size}"/>""".format(size=size)
xml_str = """<textline id="prob_1_2" size="{size}"/>""".format(size=size)
element = etree.fromstring(xml_str)
state = {'value': 'BumbleBee', }
state = {
'value': 'BumbleBee',
'response_data': RESPONSE_DATA
}
the_input = lookup_tag('textline')(test_capa_system(), element, state)
context = the_input._get_render_context() # pylint: disable=protected-access
@@ -207,7 +231,6 @@ class TextLineTest(unittest.TestCase):
'id': 'prob_1_2',
'value': 'BumbleBee',
'status': inputtypes.Status('unanswered'),
'label': 'testing 123',
'size': size,
'msg': '',
'hidden': False,
@@ -215,6 +238,8 @@ class TextLineTest(unittest.TestCase):
'do_math': False,
'trailing_text': '',
'preprocessor': None,
'response_data': RESPONSE_DATA,
'describedby': DESCRIBEDBY
}
self.assertEqual(context, expected)
@@ -229,7 +254,10 @@ class TextLineTest(unittest.TestCase):
element = etree.fromstring(xml_str)
state = {'value': 'BumbleBee', }
state = {
'value': 'BumbleBee',
'response_data': RESPONSE_DATA
}
the_input = lookup_tag('textline')(test_capa_system(), element, state)
context = the_input._get_render_context() # pylint: disable=protected-access
@@ -239,7 +267,6 @@ class TextLineTest(unittest.TestCase):
'id': 'prob_1_2',
'value': 'BumbleBee',
'status': inputtypes.Status('unanswered'),
'label': '',
'size': size,
'msg': '',
'hidden': False,
@@ -250,6 +277,8 @@ class TextLineTest(unittest.TestCase):
'class_name': preprocessorClass,
'script_src': script,
},
'response_data': RESPONSE_DATA,
'describedby': DESCRIBEDBY
}
self.assertEqual(context, expected)
@@ -273,7 +302,10 @@ class TextLineTest(unittest.TestCase):
element = etree.fromstring(xml_str)
state = {'value': 'BumbleBee', }
state = {
'value': 'BumbleBee',
'response_data': RESPONSE_DATA
}
the_input = lookup_tag('textline')(test_capa_system(), element, state)
context = the_input._get_render_context() # pylint: disable=protected-access
@@ -283,7 +315,6 @@ class TextLineTest(unittest.TestCase):
'id': 'prob_1_2',
'value': 'BumbleBee',
'status': inputtypes.Status('unanswered'),
'label': '',
'size': size,
'msg': '',
'hidden': False,
@@ -291,6 +322,8 @@ class TextLineTest(unittest.TestCase):
'do_math': False,
'trailing_text': expected_text,
'preprocessor': None,
'response_data': RESPONSE_DATA,
'describedby': DESCRIBEDBY
}
self.assertEqual(context, expected)
@@ -312,9 +345,12 @@ class FileSubmissionTest(unittest.TestCase):
element = etree.fromstring(xml_str)
state = {'value': 'BumbleBee.py',
'status': 'incomplete',
'feedback': {'message': '3'}, }
state = {
'value': 'BumbleBee.py',
'status': 'incomplete',
'feedback': {'message': '3'},
'response_data': RESPONSE_DATA
}
input_class = lookup_tag('filesubmission')
the_input = input_class(test_capa_system(), element, state)
@@ -324,12 +360,13 @@ class FileSubmissionTest(unittest.TestCase):
'STATIC_URL': '/dummy-static/',
'id': 'prob_1_2',
'status': inputtypes.Status('queued'),
'label': '',
'msg': the_input.submitted_msg,
'value': 'BumbleBee.py',
'queue_len': '3',
'allowed_files': '["runme.py", "nooooo.rb", "ohai.java"]',
'required_files': '["cookies.py"]',
'response_data': RESPONSE_DATA,
'describedby': DESCRIBEDBY
}
self.assertEqual(context, expected)
@@ -359,9 +396,12 @@ class CodeInputTest(unittest.TestCase):
escapedict = {'"': '&quot;'}
state = {'value': 'print "good evening"',
'status': 'incomplete',
'feedback': {'message': '3'}, }
state = {
'value': 'print "good evening"',
'status': 'incomplete',
'feedback': {'message': '3'},
'response_data': RESPONSE_DATA
}
input_class = lookup_tag('codeinput')
the_input = input_class(test_capa_system(), element, state)
@@ -373,7 +413,6 @@ class CodeInputTest(unittest.TestCase):
'id': 'prob_1_2',
'value': 'print "good evening"',
'status': inputtypes.Status('queued'),
# 'label': '',
'msg': the_input.submitted_msg,
'mode': mode,
'linenumbers': linenumbers,
@@ -382,6 +421,8 @@ class CodeInputTest(unittest.TestCase):
'hidden': '',
'tabsize': int(tabsize),
'queue_len': '3',
'response_data': RESPONSE_DATA,
'describedby': DESCRIBEDBY
}
self.assertEqual(context, expected)
@@ -413,9 +454,12 @@ class MatlabTest(unittest.TestCase):
payload=self.payload,
ln=self.linenumbers)
elt = etree.fromstring(self.xml)
state = {'value': 'print "good evening"',
'status': 'incomplete',
'feedback': {'message': '3'}, }
state = {
'value': 'print "good evening"',
'status': 'incomplete',
'feedback': {'message': '3'},
'response_data': {}
}
self.input_class = lookup_tag('matlabinput')
self.the_input = self.input_class(test_capa_system(), elt, state)
@@ -428,7 +472,6 @@ class MatlabTest(unittest.TestCase):
'id': 'prob_1_2',
'value': 'print "good evening"',
'status': inputtypes.Status('queued'),
# 'label': '',
'msg': self.the_input.submitted_msg,
'mode': self.mode,
'rows': self.rows,
@@ -440,15 +483,20 @@ class MatlabTest(unittest.TestCase):
'button_enabled': True,
'queue_len': '3',
'matlab_editor_js': '/dummy-static/js/vendor/CodeMirror/octave.js',
'response_data': {},
'describedby': ''
}
self.assertEqual(context, expected)
def test_rendering_with_state(self):
state = {'value': 'print "good evening"',
'status': 'incomplete',
'input_state': {'queue_msg': 'message'},
'feedback': {'message': '3'}, }
state = {
'value': 'print "good evening"',
'status': 'incomplete',
'input_state': {'queue_msg': 'message'},
'feedback': {'message': '3'},
'response_data': RESPONSE_DATA
}
elt = etree.fromstring(self.xml)
the_input = self.input_class(test_capa_system(), elt, state)
@@ -459,7 +507,6 @@ class MatlabTest(unittest.TestCase):
'id': 'prob_1_2',
'value': 'print "good evening"',
'status': inputtypes.Status('queued'),
# 'label': '',
'msg': the_input.submitted_msg,
'mode': self.mode,
'rows': self.rows,
@@ -471,16 +518,20 @@ class MatlabTest(unittest.TestCase):
'button_enabled': True,
'queue_len': '3',
'matlab_editor_js': '/dummy-static/js/vendor/CodeMirror/octave.js',
'response_data': RESPONSE_DATA,
'describedby': DESCRIBEDBY
}
self.assertEqual(context, expected)
def test_rendering_when_completed(self):
for status in ['correct', 'incorrect']:
state = {'value': 'print "good evening"',
'status': status,
'input_state': {},
}
state = {
'value': 'print "good evening"',
'status': status,
'input_state': {},
'response_data': RESPONSE_DATA
}
elt = etree.fromstring(self.xml)
the_input = self.input_class(test_capa_system(), elt, state)
@@ -490,7 +541,6 @@ class MatlabTest(unittest.TestCase):
'id': 'prob_1_2',
'value': 'print "good evening"',
'status': inputtypes.Status(status),
# 'label': '',
'msg': '',
'mode': self.mode,
'rows': self.rows,
@@ -502,16 +552,20 @@ class MatlabTest(unittest.TestCase):
'button_enabled': False,
'queue_len': '0',
'matlab_editor_js': '/dummy-static/js/vendor/CodeMirror/octave.js',
'response_data': RESPONSE_DATA,
'describedby': DESCRIBEDBY
}
self.assertEqual(context, expected)
@patch('capa.inputtypes.time.time', return_value=10)
def test_rendering_while_queued(self, time):
state = {'value': 'print "good evening"',
'status': 'incomplete',
'input_state': {'queuestate': 'queued', 'queuetime': 5},
}
state = {
'value': 'print "good evening"',
'status': 'incomplete',
'input_state': {'queuestate': 'queued', 'queuetime': 5},
'response_data': RESPONSE_DATA
}
elt = etree.fromstring(self.xml)
the_input = self.input_class(test_capa_system(), elt, state)
@@ -521,7 +575,6 @@ class MatlabTest(unittest.TestCase):
'id': 'prob_1_2',
'value': 'print "good evening"',
'status': inputtypes.Status('queued'),
# 'label': '',
'msg': the_input.submitted_msg,
'mode': self.mode,
'rows': self.rows,
@@ -533,6 +586,8 @@ class MatlabTest(unittest.TestCase):
'button_enabled': True,
'queue_len': '1',
'matlab_editor_js': '/dummy-static/js/vendor/CodeMirror/octave.js',
'response_data': RESPONSE_DATA,
'describedby': DESCRIBEDBY
}
self.assertEqual(context, expected)
@@ -650,13 +705,13 @@ class MatlabTest(unittest.TestCase):
textwrap.dedent("""
<div>{\'status\': Status(\'queued\'), \'button_enabled\': True,
\'rows\': \'10\', \'queue_len\': \'3\', \'mode\': \'\',
\'cols\': \'80\', \'STATIC_URL\': \'/dummy-static/\',
\'linenumbers\': \'true\', \'queue_msg\': \'\',
\'tabsize\': 4, \'cols\': \'80\', \'STATIC_URL\': \'/dummy-static/\',
\'describedby\': \'\', \'queue_msg\': \'\',
\'value\': \'print "good evening"\',
\'msg\': u\'Submitted. As soon as a response is returned,
this message will be replaced by that feedback.\',
\'matlab_editor_js\': \'/dummy-static/js/vendor/CodeMirror/octave.js\',
\'hidden\': \'\', \'id\': \'prob_1_2\', \'tabsize\': 4}</div>
\'hidden\': \'\', \'linenumbers\': \'true\', \'id\': \'prob_1_2\', \'response_data\': {}}</div>
""").replace('\n', ' ').strip()
)
@@ -724,10 +779,13 @@ class MatlabTest(unittest.TestCase):
</div><ul></ul></div>
""")
state = {'value': 'print "good evening"',
'status': 'incomplete',
'input_state': {'queue_msg': queue_msg},
'feedback': {'message': '3'}, }
state = {
'value': 'print "good evening"',
'status': 'incomplete',
'input_state': {'queue_msg': queue_msg},
'feedback': {'message': '3'},
'response_data': RESPONSE_DATA
}
elt = etree.fromstring(self.xml)
the_input = self.input_class(test_capa_system(), elt, state)
@@ -759,6 +817,8 @@ class MatlabTest(unittest.TestCase):
'button_enabled': True,
'queue_len': '3',
'matlab_editor_js': '/dummy-static/js/vendor/CodeMirror/octave.js',
'response_data': {},
'describedby': ''
}
self.assertEqual(context, expected)
@@ -845,8 +905,11 @@ class SchematicTest(unittest.TestCase):
element = etree.fromstring(xml_str)
value = 'three resistors and an oscilating pendulum'
state = {'value': value,
'status': 'unsubmitted'}
state = {
'value': value,
'status': 'unsubmitted',
'response_data': RESPONSE_DATA
}
the_input = lookup_tag('schematic')(test_capa_system(), element, state)
@@ -857,7 +920,6 @@ class SchematicTest(unittest.TestCase):
'id': 'prob_1_2',
'value': value,
'status': inputtypes.Status('unsubmitted'),
'label': '',
'msg': '',
'initial_value': initial_value,
'width': width,
@@ -866,6 +928,8 @@ class SchematicTest(unittest.TestCase):
'setup_script': '/dummy-static/js/capa/schematicinput.js',
'analyses': analyses,
'submit_analyses': submit_analyses,
'response_data': RESPONSE_DATA,
'describedby': DESCRIBEDBY
}
self.assertEqual(context, expected)
@@ -889,8 +953,11 @@ class ImageInputTest(unittest.TestCase):
element = etree.fromstring(xml_str)
state = {'value': value,
'status': 'unsubmitted'}
state = {
'value': value,
'status': 'unsubmitted',
'response_data': RESPONSE_DATA
}
the_input = lookup_tag('imageinput')(test_capa_system(), element, state)
@@ -901,13 +968,14 @@ class ImageInputTest(unittest.TestCase):
'id': 'prob_1_2',
'value': value,
'status': inputtypes.Status('unsubmitted'),
'label': '',
'width': width,
'height': height,
'src': src,
'gx': egx,
'gy': egy,
'msg': '',
'response_data': RESPONSE_DATA,
'describedby': DESCRIBEDBY
}
self.assertEqual(context, expected)
@@ -944,8 +1012,11 @@ class CrystallographyTest(unittest.TestCase):
element = etree.fromstring(xml_str)
value = 'abc'
state = {'value': value,
'status': 'unsubmitted'}
state = {
'value': value,
'status': 'unsubmitted',
'response_data': RESPONSE_DATA
}
the_input = lookup_tag('crystallography')(test_capa_system(), element, state)
@@ -956,10 +1027,11 @@ class CrystallographyTest(unittest.TestCase):
'id': 'prob_1_2',
'value': value,
'status': inputtypes.Status('unsubmitted'),
# 'label': '',
'msg': '',
'width': width,
'height': height,
'response_data': RESPONSE_DATA,
'describedby': DESCRIBEDBY
}
self.assertEqual(context, expected)
@@ -986,8 +1058,11 @@ class VseprTest(unittest.TestCase):
element = etree.fromstring(xml_str)
value = 'abc'
state = {'value': value,
'status': 'unsubmitted'}
state = {
'value': value,
'status': 'unsubmitted',
'response_data': RESPONSE_DATA
}
the_input = lookup_tag('vsepr_input')(test_capa_system(), element, state)
@@ -1003,6 +1078,8 @@ class VseprTest(unittest.TestCase):
'height': height,
'molecules': molecules,
'geometries': geometries,
'response_data': RESPONSE_DATA,
'describedby': DESCRIBEDBY
}
self.assertEqual(context, expected)
@@ -1019,7 +1096,10 @@ class ChemicalEquationTest(unittest.TestCase):
element = etree.fromstring(xml_str)
state = {'value': 'H2OYeah', }
state = {
'value': 'H2OYeah',
'response_data': RESPONSE_DATA
}
self.the_input = lookup_tag('chemicalequationinput')(test_capa_system(), element, state)
def test_rendering(self):
@@ -1031,10 +1111,11 @@ class ChemicalEquationTest(unittest.TestCase):
'id': 'prob_1_2',
'value': 'H2OYeah',
'status': inputtypes.Status('unanswered'),
'label': '',
'msg': '',
'size': self.size,
'previewer': '/dummy-static/js/capa/chemical_equation_preview.js',
'response_data': RESPONSE_DATA,
'describedby': DESCRIBEDBY
}
self.assertEqual(context, expected)
@@ -1106,7 +1187,10 @@ class FormulaEquationTest(unittest.TestCase):
element = etree.fromstring(xml_str)
state = {'value': 'x^2+1/2'}
state = {
'value': 'x^2+1/2',
'response_data': RESPONSE_DATA
}
self.the_input = lookup_tag('formulaequationinput')(test_capa_system(), element, state)
def test_rendering(self):
@@ -1120,12 +1204,13 @@ class FormulaEquationTest(unittest.TestCase):
'id': 'prob_1_2',
'value': 'x^2+1/2',
'status': inputtypes.Status('unanswered'),
'label': '',
'msg': '',
'size': self.size,
'previewer': '/dummy-static/js/capa/src/formula_equation_preview.js',
'inline': False,
'trailing_text': '',
'response_data': RESPONSE_DATA,
'describedby': DESCRIBEDBY
}
self.assertEqual(context, expected)
@@ -1152,7 +1237,10 @@ class FormulaEquationTest(unittest.TestCase):
element = etree.fromstring(xml_str)
state = {'value': 'x^2+1/2', }
state = {
'value': 'x^2+1/2',
'response_data': RESPONSE_DATA
}
the_input = lookup_tag('formulaequationinput')(test_capa_system(), element, state)
context = the_input._get_render_context() # pylint: disable=protected-access
@@ -1162,12 +1250,13 @@ class FormulaEquationTest(unittest.TestCase):
'id': 'prob_1_2',
'value': 'x^2+1/2',
'status': inputtypes.Status('unanswered'),
'label': '',
'msg': '',
'size': size,
'previewer': '/dummy-static/js/capa/src/formula_equation_preview.js',
'inline': False,
'trailing_text': expected_text,
'response_data': RESPONSE_DATA,
'describedby': DESCRIBEDBY
}
self.assertEqual(context, expected)
@@ -1263,8 +1352,11 @@ class DragAndDropTest(unittest.TestCase):
element = etree.fromstring(xml_str)
value = 'abc'
state = {'value': value,
'status': 'unsubmitted'}
state = {
'value': value,
'status': 'unsubmitted',
'response_data': RESPONSE_DATA
}
user_input = { # order matters, for string comparison
"target_outline": "false",
@@ -1293,9 +1385,10 @@ class DragAndDropTest(unittest.TestCase):
'id': 'prob_1_2',
'value': value,
'status': inputtypes.Status('unsubmitted'),
# 'label': '',
'msg': '',
'drag_and_drop_json': json.dumps(user_input)
'drag_and_drop_json': json.dumps(user_input),
'response_data': RESPONSE_DATA,
'describedby': DESCRIBEDBY
}
# as we are dumping 'draggables' dicts while dumping user_input, string
@@ -1332,7 +1425,8 @@ class AnnotationInputTest(unittest.TestCase):
state = {
'value': json_value,
'id': 'annotation_input',
'status': 'answered'
'status': 'answered',
'response_data': RESPONSE_DATA
}
tag = 'annotationinput'
@@ -1345,7 +1439,6 @@ class AnnotationInputTest(unittest.TestCase):
'STATIC_URL': '/dummy-static/',
'id': 'annotation_input',
'status': inputtypes.Status('answered'),
# 'label': '',
'msg': '',
'title': 'foo',
'text': 'bar',
@@ -1362,7 +1455,9 @@ class AnnotationInputTest(unittest.TestCase):
'has_options_value': len(value['options']) > 0,
'comment_value': value['comment'],
'debug': False,
'return_to_annotation': True
'return_to_annotation': True,
'response_data': RESPONSE_DATA,
'describedby': DESCRIBEDBY
}
self.maxDiff = None
@@ -1405,6 +1500,7 @@ class TestChoiceText(unittest.TestCase):
'value': '{}',
'id': 'choicetext_input',
'status': inputtypes.Status('answered'),
'response_data': RESPONSE_DATA
}
first_input = self.build_choice_element('numtolerance_input', 'choiceinput_0_textinput_0', 'false', '')
@@ -1421,11 +1517,12 @@ class TestChoiceText(unittest.TestCase):
expected = {
'STATIC_URL': '/dummy-static/',
'msg': '',
'label': '',
'input_type': expected_input_type,
'choices': choices,
'show_correctness': 'always',
'submitted_message': 'Answer received.'
'submitted_message': 'Answer received.',
'response_data': RESPONSE_DATA,
'describedby': DESCRIBEDBY
}
expected.update(state)
the_input = lookup_tag(tag)(test_capa_system(), element, state)

View File

@@ -1256,7 +1256,6 @@ class CapaMixin(CapaFields):
of the problem. If problem related metadata cannot be located it should be replaced with empty
strings ''.
"""
input_metadata = {}
for input_id, internal_answer in answers.iteritems():
answer_input = self.lcp.inputs.get(input_id)
@@ -1290,13 +1289,16 @@ class CapaMixin(CapaFields):
is_correct = ''
input_metadata[input_id] = {
'question': getattr(answer_input, 'loaded_attributes', {}).get('label', ''),
'question': answer_input.response_data.get('label', ''),
'answer': user_visible_answer,
'response_type': getattr(getattr(answer_response, 'xml', None), 'tag', ''),
'input_type': getattr(answer_input, 'tag', ''),
'correct': is_correct,
'variant': variant,
}
# Add group_label in event data only if the responsetype contains multiple inputtypes
if answer_input.response_data.get('group_label'):
input_metadata[input_id]['group_label'] = answer_input.response_data.get('group_label')
return input_metadata

View File

@@ -152,6 +152,21 @@ div.problem {
margin-top: $baseline;
}
}
.question-description {
@include margin(($baseline*0.75), 0);
}
form > label, .problem-group-label {
display: block;
margin-bottom: $baseline;
font: inherit;
color: inherit;
}
.wrapper-problem-response:not(:last-child) {
margin-bottom: $baseline;
}
}
// Choice Group - silent class

View File

@@ -23,7 +23,8 @@ var options = {
{pattern: 'common_static/coffee/src/ajax_prefix.js', included: true},
{pattern: 'common_static/common/js/vendor/underscore.js', included: true},
{pattern: 'common_static/common/js/vendor/backbone.js', included: true},
{pattern: 'common_static/js/vendor/CodeMirror/codemirror.js', included: true},
{pattern: 'common_static/js/vendor/codemirror-compressed.js', included: true},
{pattern: 'common_static/js/lib/pretty-print.js', included: true},
{pattern: 'common_static/js/vendor/draggabilly.js'},
{pattern: 'common_static/common/js/vendor/jquery.js', included: true},
{pattern: 'common_static/common/js/vendor/jquery-migrate.js', included: true},

View File

@@ -314,7 +314,7 @@ describe 'Problem', ->
html = '''
<div id="problem_sel">
<select>
<option value="val0"></option>
<option value="val0">Select an option</option>
<option value="val1">1</option>
<option value="val2">2</option>
</select>

View File

@@ -5,7 +5,7 @@ describe 'MarkdownEditingDescriptor', ->
@descriptor = new MarkdownEditingDescriptor($('.problem-editor'))
saveResult = @descriptor.save()
expect(saveResult.metadata.markdown).toEqual('markdown')
expect(saveResult.data).toEqual('<problem>\n<p>markdown</p>\n</problem>')
expect(saveResult.data).toEqual('<problem>\n <p>markdown</p>\n</problem>')
it 'clears markdown when xml editor is selected', ->
loadFixtures 'problem-with-markdown.html'
@descriptor = new MarkdownEditingDescriptor($('.problem-editor'))
@@ -101,7 +101,7 @@ describe 'MarkdownEditingDescriptor', ->
describe 'markdownToXml', ->
it 'converts raw text to paragraph', ->
data = MarkdownEditingDescriptor.markdownToXml('foo')
expect(data).toEqual('<problem>\n<p>foo</p>\n</problem>')
expect(data).toEqual('<problem>\n <p>foo</p>\n</problem>')
# test default templates
it 'converts numerical response to xml', ->
data = MarkdownEditingDescriptor.markdownToXml("""A numerical response problem accepts a line of text input from the student, and evaluates the input for correctness based on its numerical value.
@@ -134,66 +134,55 @@ describe 'MarkdownEditingDescriptor', ->
[Explanation]
""")
expect(data).toEqual("""<problem>
<p>A numerical response problem accepts a line of text input from the student, and evaluates the input for correctness based on its numerical value.</p>
<p>The answer is correct if it is within a specified numerical tolerance of the expected answer.</p>
<p>Enter the numerical value of Pi:</p>
<numericalresponse answer="3.14159">
<responseparam type="tolerance" default=".02" />
<formulaequationinput />
</numericalresponse>
<p>Enter the approximate value of 502*9:</p>
<numericalresponse answer="502*9">
<responseparam type="tolerance" default="15%" />
<formulaequationinput />
</numericalresponse>
<p>Enter the number of fingers on a human hand:</p>
<numericalresponse answer="5">
<formulaequationinput />
</numericalresponse>
<p>Range tolerance case</p>
<numericalresponse answer="[6, 7]">
<formulaequationinput />
</numericalresponse>
<numericalresponse answer="(1, 2)">
<formulaequationinput />
</numericalresponse>
<p>If first and last symbols are not brackets, or they are not closed, stringresponse will appear.</p>
<stringresponse answer="(7), 7" type="ci" >
<textline size="20"/>
</stringresponse>
<stringresponse answer="(1+2" type="ci" >
<textline size="20"/>
</stringresponse>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>Pi, or the the ratio between a circle's circumference to its diameter, is an irrational number known to extreme precision. It is value is approximately equal to 3.14.</p>
<p>Although you can get an exact value by typing 502*9 into a calculator, the result will be close to 500*10, or 5,000. The grader accepts any response within 15% of the true value, 4518, so that you can use any estimation technique that you like.</p>
<p>If you look at your hand, you can count that you have five fingers.</p>
</div>
</solution>
<p>A numerical response problem accepts a line of text input from the student, and evaluates the input for correctness based on its numerical value.</p>
<p>The answer is correct if it is within a specified numerical tolerance of the expected answer.</p>
<p>Enter the numerical value of Pi:</p>
<numericalresponse answer="3.14159">
<responseparam type="tolerance" default=".02" />
<formulaequationinput />
</numericalresponse>
<p>Enter the approximate value of 502*9:</p>
<numericalresponse answer="502*9">
<responseparam type="tolerance" default="15%" />
<formulaequationinput />
</numericalresponse>
<p>Enter the number of fingers on a human hand:</p>
<numericalresponse answer="5">
<formulaequationinput />
</numericalresponse>
<p>Range tolerance case</p>
<numericalresponse answer="[6, 7]">
<formulaequationinput />
</numericalresponse>
<numericalresponse answer="(1, 2)">
<formulaequationinput />
</numericalresponse>
<p>If first and last symbols are not brackets, or they are not closed, stringresponse will appear.</p>
<stringresponse answer="(7), 7" type="ci" >
<textline size="20"/>
</stringresponse>
<stringresponse answer="(1+2" type="ci" >
<textline size="20"/>
</stringresponse>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>Pi, or the the ratio between a circle's circumference to its diameter, is an irrational number known to extreme precision. It is value is approximately equal to 3.14.</p>
<p>Although you can get an exact value by typing 502*9 into a calculator, the result will be close to 500*10, or 5,000. The grader accepts any response within 15% of the true value, 4518, so that you can use any estimation technique that you like.</p>
<p>If you look at your hand, you can count that you have five fingers.</p>
</div>
</solution>
</problem>""")
it 'will convert 0 as a numerical response (instead of string response)', ->
data = MarkdownEditingDescriptor.markdownToXml("""
Enter 0 with a tolerance:
= 0 +- .02
""")
expect(data).toEqual("""<problem>
<p>Enter 0 with a tolerance:</p>
expect(data).toXMLEqual("""<problem>
<numericalresponse answer="0">
<responseparam type="tolerance" default=".02" />
<formulaequationinput />
<p>Enter 0 with a tolerance:</p>
<responseparam type="tolerance" default=".02"/>
<formulaequationinput/>
</numericalresponse>
@@ -204,11 +193,11 @@ describe 'MarkdownEditingDescriptor', ->
= 1 +- .02
or= 2 +- 5%
""")
expect(data).toEqual("""<problem>
<p>Enter 1 with a tolerance:</p>
expect(data).toXMLEqual("""<problem>
<numericalresponse answer="1">
<responseparam type="tolerance" default=".02" />
<formulaequationinput />
<p>Enter 1 with a tolerance:</p>
<responseparam type="tolerance" default=".02"/>
<formulaequationinput/>
</numericalresponse>
@@ -218,7 +207,7 @@ describe 'MarkdownEditingDescriptor', ->
One of the main elements that goes into a good multiple choice question is the existence of good distractors. That is, each of the alternate responses presented to the student should be the result of a plausible mistake that a student might make.
What Apple device competed with the portable CD player?
>>What Apple device competed with the portable CD player?<<
( ) The iPad
( ) Napster
(x) The iPod
@@ -230,31 +219,26 @@ describe 'MarkdownEditingDescriptor', ->
The release of the iPod allowed consumers to carry their entire music library with them in a format that did not rely on fragile and energy-intensive spinning disks.
[Explanation]
""")
expect(data).toEqual("""<problem>
<p>A multiple choice problem presents radio buttons for student input. Students can only select a single option presented. Multiple Choice questions have been the subject of many areas of research due to the early invention and adoption of bubble sheets.</p>
<p>One of the main elements that goes into a good multiple choice question is the existence of good distractors. That is, each of the alternate responses presented to the student should be the result of a plausible mistake that a student might make.</p>
<p>What Apple device competed with the portable CD player?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice">
<choice correct="false">The iPad</choice>
<choice correct="false">Napster</choice>
<choice correct="true">The iPod</choice>
<choice correct="false">The vegetable peeler</choice>
<choice correct="false">Android</choice>
<choice correct="false">The Beatles</choice>
</choicegroup>
</multiplechoiceresponse>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>The release of the iPod allowed consumers to carry their entire music library with them in a format that did not rely on fragile and energy-intensive spinning disks.</p>
</div>
</solution>
expect(data).toXMLEqual("""<problem>
<multiplechoiceresponse>
<p>A multiple choice problem presents radio buttons for student input. Students can only select a single option presented. Multiple Choice questions have been the subject of many areas of research due to the early invention and adoption of bubble sheets.</p>
<p>One of the main elements that goes into a good multiple choice question is the existence of good distractors. That is, each of the alternate responses presented to the student should be the result of a plausible mistake that a student might make.</p>
<label>What Apple device competed with the portable CD player?</label>
<choicegroup type="MultipleChoice">
<choice correct="false">The iPad</choice>
<choice correct="false">Napster</choice>
<choice correct="true">The iPod</choice>
<choice correct="false">The vegetable peeler</choice>
<choice correct="false">Android</choice>
<choice correct="false">The Beatles</choice>
</choicegroup>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>The release of the iPod allowed consumers to carry their entire music library with them in a format that did not rely on fragile and energy-intensive spinning disks.</p>
</div>
</solution>
</multiplechoiceresponse>
</problem>""")
it 'converts multiple choice shuffle to xml', ->
data = MarkdownEditingDescriptor.markdownToXml("""A multiple choice problem presents radio buttons for student input. Students can only select a single option presented. Multiple Choice questions have been the subject of many areas of research due to the early invention and adoption of bubble sheets.
@@ -273,31 +257,27 @@ describe 'MarkdownEditingDescriptor', ->
The release of the iPod allowed consumers to carry their entire music library with them in a format that did not rely on fragile and energy-intensive spinning disks.
[Explanation]
""")
expect(data).toEqual("""<problem>
<p>A multiple choice problem presents radio buttons for student input. Students can only select a single option presented. Multiple Choice questions have been the subject of many areas of research due to the early invention and adoption of bubble sheets.</p>
<p>One of the main elements that goes into a good multiple choice question is the existence of good distractors. That is, each of the alternate responses presented to the student should be the result of a plausible mistake that a student might make.</p>
<p>What Apple device competed with the portable CD player?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" shuffle="true">
<choice correct="true" fixed="true">The iPad</choice>
<choice correct="false" fixed="true">Napster</choice>
<choice correct="false">The iPod</choice>
<choice correct="false">The vegetable peeler</choice>
<choice correct="false">Android</choice>
<choice correct="false" fixed="true">The Beatles</choice>
</choicegroup>
</multiplechoiceresponse>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>The release of the iPod allowed consumers to carry their entire music library with them in a format that did not rely on fragile and energy-intensive spinning disks.</p>
</div>
</solution>
expect(data).toXMLEqual("""
<problem>
<multiplechoiceresponse>
<p>A multiple choice problem presents radio buttons for student input. Students can only select a single option presented. Multiple Choice questions have been the subject of many areas of research due to the early invention and adoption of bubble sheets.</p>
<p>One of the main elements that goes into a good multiple choice question is the existence of good distractors. That is, each of the alternate responses presented to the student should be the result of a plausible mistake that a student might make.</p>
<p>What Apple device competed with the portable CD player?</p>
<choicegroup type="MultipleChoice" shuffle="true">
<choice correct="true" fixed="true">The iPad</choice>
<choice correct="false" fixed="true">Napster</choice>
<choice correct="false">The iPod</choice>
<choice correct="false">The vegetable peeler</choice>
<choice correct="false">Android</choice>
<choice correct="false" fixed="true">The Beatles</choice>
</choicegroup>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>The release of the iPod allowed consumers to carry their entire music library with them in a format that did not rely on fragile and energy-intensive spinning disks.</p>
</div>
</solution>
</multiplechoiceresponse>
</problem>""")
it 'converts a series of multiplechoice to xml', ->
@@ -317,42 +297,38 @@ describe 'MarkdownEditingDescriptor', ->
When the student is ready, the explanation appears.
[Explanation]
""")
expect(data).toEqual("""<problem>
<p>bleh</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" shuffle="true">
<choice correct="true">a</choice>
<choice correct="false">b</choice>
<choice correct="false">c</choice>
</choicegroup>
</multiplechoiceresponse>
<p>yatta</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice">
<choice correct="false">x</choice>
<choice correct="false">y</choice>
<choice correct="true">z</choice>
</choicegroup>
</multiplechoiceresponse>
<p>testa</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" shuffle="true">
<choice correct="false">i</choice>
<choice correct="false">ii</choice>
<choice correct="true">iii</choice>
</choicegroup>
</multiplechoiceresponse>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>When the student is ready, the explanation appears.</p>
</div>
</solution>
expect(data).toEqual("""
<problem>
<p>bleh</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" shuffle="true">
<choice correct="true">a</choice>
<choice correct="false">b</choice>
<choice correct="false">c</choice>
</choicegroup>
</multiplechoiceresponse>
<p>yatta</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice">
<choice correct="false">x</choice>
<choice correct="false">y</choice>
<choice correct="true">z</choice>
</choicegroup>
</multiplechoiceresponse>
<p>testa</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" shuffle="true">
<choice correct="false">i</choice>
<choice correct="false">ii</choice>
<choice correct="true">iii</choice>
</choicegroup>
</multiplechoiceresponse>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>When the student is ready, the explanation appears.</p>
</div>
</solution>
</problem>""")
it 'converts OptionResponse to xml', ->
@@ -367,25 +343,20 @@ describe 'MarkdownEditingDescriptor', ->
Multiple Choice also allows students to select from a variety of pre-written responses, although the format makes it easier for students to read very long response options. Optionresponse also differs slightly because students are more likely to think of an answer and then search for it rather than relying purely on recognition to answer the question.
[Explanation]
""")
expect(data).toEqual("""<problem>
<p>OptionResponse gives a limited set of options for students to respond with, and presents those options in a format that encourages them to search for a specific answer rather than being immediately presented with options from which to recognize the correct answer.</p>
<p>The answer options and the identification of the correct answer is defined in the <b>optioninput</b> tag.</p>
<p>Translation between Option Response and __________ is extremely straightforward:</p>
<optionresponse>
<optioninput options="('Multiple Choice','String Response','Numerical Response','External Response','Image Response')" correct="Multiple Choice"></optioninput>
</optionresponse>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>Multiple Choice also allows students to select from a variety of pre-written responses, although the format makes it easier for students to read very long response options. Optionresponse also differs slightly because students are more likely to think of an answer and then search for it rather than relying purely on recognition to answer the question.</p>
</div>
</solution>
expect(data).toXMLEqual("""
<problem>
<optionresponse>
<p>OptionResponse gives a limited set of options for students to respond with, and presents those options in a format that encourages them to search for a specific answer rather than being immediately presented with options from which to recognize the correct answer.</p>
<p>The answer options and the identification of the correct answer is defined in the <b>optioninput</b> tag.</p>
<p>Translation between Option Response and __________ is extremely straightforward:</p>
<optioninput options="('Multiple Choice','String Response','Numerical Response','External Response','Image Response')" correct="Multiple Choice"/>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>Multiple Choice also allows students to select from a variety of pre-written responses, although the format makes it easier for students to read very long response options. Optionresponse also differs slightly because students are more likely to think of an answer and then search for it rather than relying purely on recognition to answer the question.</p>
</div>
</solution>
</optionresponse>
</problem>""")
it 'converts StringResponse to xml', ->
data = MarkdownEditingDescriptor.markdownToXml("""A string response problem accepts a line of text input from the student, and evaluates the input for correctness based on an expected answer within each input box.
@@ -399,24 +370,20 @@ describe 'MarkdownEditingDescriptor', ->
Lansing is the capital of Michigan, although it is not Michgan's largest city, or even the seat of the county in which it resides.
[Explanation]
""")
expect(data).toEqual("""<problem>
<p>A string response problem accepts a line of text input from the student, and evaluates the input for correctness based on an expected answer within each input box.</p>
<p>The answer is correct if it matches every character of the expected answer. This can be a problem with international spelling, dates, or anything where the format of the answer is not clear.</p>
<p>Which US state has Lansing as its capital?</p>
<stringresponse answer="Michigan" type="ci" >
<textline size="20"/>
</stringresponse>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>Lansing is the capital of Michigan, although it is not Michgan's largest city, or even the seat of the county in which it resides.</p>
</div>
</solution>
expect(data).toXMLEqual("""
<problem>
<stringresponse answer="Michigan" type="ci">
<p>A string response problem accepts a line of text input from the student, and evaluates the input for correctness based on an expected answer within each input box.</p>
<p>The answer is correct if it matches every character of the expected answer. This can be a problem with international spelling, dates, or anything where the format of the answer is not clear.</p>
<p>Which US state has Lansing as its capital?</p>
<textline size="20"/>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>Lansing is the capital of Michigan, although it is not Michgan's largest city, or even the seat of the county in which it resides.</p>
</div>
</solution>
</stringresponse>
</problem>""")
it 'converts StringResponse with regular expression to xml', ->
data = MarkdownEditingDescriptor.markdownToXml("""Who lead the civil right movement in the United States of America?
@@ -426,20 +393,18 @@ describe 'MarkdownEditingDescriptor', ->
Test Explanation.
[Explanation]
""")
expect(data).toEqual("""<problem>
<p>Who lead the civil right movement in the United States of America?</p>
<stringresponse answer="\w*\.?\s*Luther King\s*.*" type="ci regexp" >
<textline size="20"/>
</stringresponse>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>Test Explanation.</p>
</div>
</solution>
expect(data).toXMLEqual("""
<problem>
<stringresponse answer="w*.?s*Luther Kings*.*" type="ci regexp">
<p>Who lead the civil right movement in the United States of America?</p>
<textline size="20"/>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>Test Explanation.</p>
</div>
</solution>
</stringresponse>
</problem>""")
it 'converts StringResponse with multiple answers to xml', ->
data = MarkdownEditingDescriptor.markdownToXml("""Who lead the civil right movement in the United States of America?
@@ -452,23 +417,21 @@ describe 'MarkdownEditingDescriptor', ->
Test Explanation.
[Explanation]
""")
expect(data).toEqual("""<problem>
<p>Who lead the civil right movement in the United States of America?</p>
<stringresponse answer="Dr. Martin Luther King Jr." type="ci" >
<additional_answer answer="Doctor Martin Luther King Junior"></additional_answer>
<additional_answer answer="Martin Luther King"></additional_answer>
<additional_answer answer="Martin Luther King Junior"></additional_answer>
<textline size="20"/>
</stringresponse>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>Test Explanation.</p>
</div>
</solution>
expect(data).toXMLEqual("""
<problem>
<stringresponse answer="Dr. Martin Luther King Jr." type="ci">
<p>Who lead the civil right movement in the United States of America?</p>
<additional_answer answer="Doctor Martin Luther King Junior"/>
<additional_answer answer="Martin Luther King"/>
<additional_answer answer="Martin Luther King Junior"/>
<textline size="20"/>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>Test Explanation.</p>
</div>
</solution>
</stringresponse>
</problem>""")
it 'converts StringResponse with multiple answers and regular expressions to xml', ->
data = MarkdownEditingDescriptor.markdownToXml("""Write a number from 1 to 4.
@@ -481,23 +444,21 @@ describe 'MarkdownEditingDescriptor', ->
Test Explanation.
[Explanation]
""")
expect(data).toEqual("""<problem>
<p>Write a number from 1 to 4.</p>
<stringresponse answer="^One$" type="ci regexp" >
<additional_answer answer="two"></additional_answer>
<additional_answer answer="^thre+"></additional_answer>
<additional_answer answer="^4|Four$"></additional_answer>
<textline size="20"/>
</stringresponse>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>Test Explanation.</p>
</div>
</solution>
expect(data).toXMLEqual("""
<problem>
<stringresponse answer="^One$" type="ci regexp">
<p>Write a number from 1 to 4.</p>
<additional_answer answer="two"/>
<additional_answer answer="^thre+"/>
<additional_answer answer="^4|Four$"/>
<textline size="20"/>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>Test Explanation.</p>
</div>
</solution>
</stringresponse>
</problem>""")
# test labels
it 'converts markdown labels to label attributes', ->
@@ -508,21 +469,19 @@ describe 'MarkdownEditingDescriptor', ->
Test Explanation.
[Explanation]
""")
expect(data).toEqual("""<problem>
<p>Who lead the civil right movement in the United States of America?</p>
<stringresponse answer="w*.?s*Luther Kings*.*" type="ci regexp" >
<textline label="Who lead the civil right movement in the United States of America?" size="20"/>
</stringresponse>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>Test Explanation.</p>
</div>
</solution>
</problem>""")
expect(data).toXMLEqual("""
<problem>
<stringresponse answer="w*.?s*Luther Kings*.*" type="ci regexp">
<label>Who lead the civil right movement in the United States of America?</label>
<textline size="20"/>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>Test Explanation.</p>
</div>
</solution>
</stringresponse>
</problem>""")
it 'handles multiple questions with labels', ->
data = MarkdownEditingDescriptor.markdownToXml("""
France is a country in Europe.
@@ -538,28 +497,27 @@ describe 'MarkdownEditingDescriptor', ->
(x) Berlin
( ) Donut
""")
expect(data).toEqual("""<problem>
<p>France is a country in Europe.</p>
expect(data).toXMLEqual("""
<problem>
<p>France is a country in Europe.</p>
<p>What is the capital of France?</p>
<stringresponse answer="Paris" type="ci" >
<textline label="What is the capital of France?" size="20"/>
</stringresponse>
<label>What is the capital of France?</label>
<stringresponse answer="Paris" type="ci" >
<textline size="20"/>
</stringresponse>
<p>Germany is a country in Europe, too.</p>
<p>Germany is a country in Europe, too.</p>
<p>What is the capital of Germany?</p>
<multiplechoiceresponse>
<choicegroup label="What is the capital of Germany?" type="MultipleChoice">
<choice correct="false">Bonn</choice>
<choice correct="false">Hamburg</choice>
<choice correct="true">Berlin</choice>
<choice correct="false">Donut</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>""")
<label>What is the capital of Germany?</label>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice">
<choice correct="false">Bonn</choice>
<choice correct="false">Hamburg</choice>
<choice correct="true">Berlin</choice>
<choice correct="false">Donut</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>""")
it 'tests multiple questions with only one label', ->
data = MarkdownEditingDescriptor.markdownToXml("""
France is a country in Europe.
@@ -575,88 +533,43 @@ describe 'MarkdownEditingDescriptor', ->
(x) Berlin
( ) Donut
""")
expect(data).toEqual("""<problem>
<p>France is a country in Europe.</p>
expect(data).toXMLEqual("""
<problem>
<p>France is a country in Europe.</p>
<p>What is the capital of France?</p>
<stringresponse answer="Paris" type="ci" >
<textline label="What is the capital of France?" size="20"/>
</stringresponse>
<label>What is the capital of France?</label>
<stringresponse answer="Paris" type="ci" >
<textline size="20"/>
</stringresponse>
<p>Germany is a country in Europe, too.</p>
<p>Germany is a country in Europe, too.</p>
<p>What is the capital of Germany?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice">
<choice correct="false">Bonn</choice>
<choice correct="false">Hamburg</choice>
<choice correct="true">Berlin</choice>
<choice correct="false">Donut</choice>
</choicegroup>
</multiplechoiceresponse>
<p>What is the capital of Germany?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice">
<choice correct="false">Bonn</choice>
<choice correct="false">Hamburg</choice>
<choice correct="true">Berlin</choice>
<choice correct="false">Donut</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>""")
</problem>""")
it 'tests malformed labels', ->
data = MarkdownEditingDescriptor.markdownToXml("""
France is a country in Europe.
>>What is the capital of France?<
= Paris
blah>>What is the capital of <<Germany?<<
( ) Bonn
( ) Hamburg
(x) Berlin
( ) Donut
""")
expect(data).toEqual("""<problem>
<p>France is a country in Europe.</p>
<p>>>What is the capital of France?<</p>
<stringresponse answer="Paris" type="ci" >
<textline size="20"/>
</stringresponse>
<p>blahWhat is the capital of Germany?</p>
<multiplechoiceresponse>
<choicegroup label="What is the capital of &lt;&lt;Germany?" type="MultipleChoice">
<choice correct="false">Bonn</choice>
<choice correct="false">Hamburg</choice>
<choice correct="true">Berlin</choice>
<choice correct="false">Donut</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>""")
it 'adds labels to formulae', ->
data = MarkdownEditingDescriptor.markdownToXml("""
>>Enter the numerical value of Pi:<<
= 3.14159 +- .02
""")
expect(data).toEqual("""<problem>
<p>Enter the numerical value of Pi:</p>
<numericalresponse answer="3.14159">
<responseparam type="tolerance" default=".02" />
<formulaequationinput label="Enter the numerical value of Pi:" />
</numericalresponse>
expect(data).toXMLEqual("""<problem>
<numericalresponse answer="3.14159">
<label>Enter the numerical value of Pi:</label>
<responseparam type="tolerance" default=".02"/>
<formulaequationinput/>
</numericalresponse>
</problem>""")
it 'escapes entities in labels', ->
data = MarkdownEditingDescriptor.markdownToXml("""
>>What is the "capital" of France & the 'best' > place < to live"?<<
= Paris
""")
expect(data).toEqual("""<problem>
<p>What is the "capital" of France & the 'best' > place < to live"?</p>
<stringresponse answer="Paris" type="ci" >
<textline label="What is the &quot;capital&quot; of France &amp; the &apos;best&apos; &gt; place &lt; to live&quot;?" size="20"/>
</stringresponse>
</problem>""")
</problem>""")
# test oddities
it 'converts headers and oddities to xml', ->
data = MarkdownEditingDescriptor.markdownToXml("""Not a header
@@ -708,10 +621,10 @@ describe 'MarkdownEditingDescriptor', ->
Code should be nicely monospaced.
[/code]
""")
expect(data).toEqual("""<problem>
expect(data).toEqual("""
<problem>
<p>Not a header</p>
<h3 class="hd hd-2 problem-header">A header</h3>
<p>Multiple choice w/ parentheticals</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice">
@@ -721,7 +634,6 @@ describe 'MarkdownEditingDescriptor', ->
<choice correct="false">no space b4 close paren</choice>
</choicegroup>
</multiplechoiceresponse>
<p>Choice checks</p>
<choiceresponse>
<checkboxgroup>
@@ -732,52 +644,283 @@ describe 'MarkdownEditingDescriptor', ->
<choice correct="false">no space</choice>
</checkboxgroup>
</choiceresponse>
<p>Option with multiple correct ones</p>
<optionresponse>
<optioninput options="('one option','correct one','should not be correct')" correct="correct one"></optioninput>
</optionresponse>
<p>Option with embedded parens</p>
<optionresponse>
<optioninput options="('My (heart)','another','correct')" correct="correct"></optioninput>
</optionresponse>
<p>What happens w/ empty correct options?</p>
<optionresponse>
<optioninput options="('')" correct=""></optioninput>
</optionresponse>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>see</p>
</div>
<div class="detailed-solution">
<p>Explanation</p>
<p>see</p>
</div>
</solution>
<p>[explanation]</p>
<p>orphaned start</p>
<p>No p tags in the below</p>
<script type='javascript'>
var two = 2;
console.log(two * 2);
</script>
var two = 2;
console.log(two * 2);
</script>
<p>But in this there should be</p>
<div>
<p>Great ideas require offsetting.</p>
<p>bad tests require drivel</p>
<p>Great ideas require offsetting.</p>
<p>bad tests require drivel</p>
</div>
<pre>
<code>Code should be nicely monospaced.
</code>
</pre>
</problem>""")
<pre><code>
Code should be nicely monospaced.
</code></pre>
</problem>""")
# failure tests
it 'can separate responsetypes based on ---', ->
data = MarkdownEditingDescriptor.markdownToXml("""
Multiple choice problems allow learners to select only one option. Learners can see all the options along with the problem text.
>>Which of the following countries has the largest population?<<
( ) Brazil {{ timely feedback -- explain why an almost correct answer is wrong }}
( ) Germany
(x) Indonesia
( ) Russia
[explanation]
According to September 2014 estimates:
The population of Indonesia is approximately 250 million.
The population of Brazil is approximately 200 million.
The population of Russia is approximately 146 million.
The population of Germany is approximately 81 million.
[explanation]
---
Checkbox problems allow learners to select multiple options. Learners can see all the options along with the problem text.
>>The following languages are in the Indo-European family:<<
[x] Urdu
[ ] Finnish
[x] Marathi
[x] French
[ ] Hungarian
Note: Make sure you select all of the correct options—there may be more than one!
[explanation]
Urdu, Marathi, and French are all Indo-European languages, while Finnish and Hungarian are in the Uralic family.
[explanation]
""")
expect(data).toXMLEqual("""
<problem>
<multiplechoiceresponse>
<p>Multiple choice problems allow learners to select only one option. Learners can see all the options along with the problem text.</p>
<label>Which of the following countries has the largest population?</label>
<choicegroup type="MultipleChoice">
<choice correct="false">Brazil <choicehint>timely feedback -- explain why an almost correct answer is wrong</choicehint>
</choice>
<choice correct="false">Germany</choice>
<choice correct="true">Indonesia</choice>
<choice correct="false">Russia</choice>
</choicegroup>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>According to September 2014 estimates:</p>
<p>The population of Indonesia is approximately 250 million.</p>
<p>The population of Brazil is approximately 200 million.</p>
<p>The population of Russia is approximately 146 million.</p>
<p>The population of Germany is approximately 81 million.</p>
</div>
</solution>
</multiplechoiceresponse>
<choiceresponse>
<p>Checkbox problems allow learners to select multiple options. Learners can see all the options along with the problem text.</p>
<label>The following languages are in the Indo-European family:</label>
<checkboxgroup>
<choice correct="true">Urdu</choice>
<choice correct="false">Finnish</choice>
<choice correct="true">Marathi</choice>
<choice correct="true">French</choice>
<choice correct="false">Hungarian</choice>
</checkboxgroup>
<p>Note: Make sure you select all of the correct options—there may be more than one!</p>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>Urdu, Marathi, and French are all Indo-European languages, while Finnish and Hungarian are in the Uralic family.</p>
</div>
</solution>
</choiceresponse>
</problem>
""")
it 'can separate other things based on ---', ->
data = MarkdownEditingDescriptor.markdownToXml("""
Multiple choice problems allow learners to select only one option. Learners can see all the options along with the problem text.
---
>>Which of the following countries has the largest population?<<
( ) Brazil {{ timely feedback -- explain why an almost correct answer is wrong }}
( ) Germany
(x) Indonesia
( ) Russia
[explanation]
According to September 2014 estimates:
The population of Indonesia is approximately 250 million.
The population of Brazil is approximately 200 million.
The population of Russia is approximately 146 million.
The population of Germany is approximately 81 million.
[explanation]
""")
expect(data).toXMLEqual("""
<problem>
<p>Multiple choice problems allow learners to select only one option. Learners can see all the options along with the problem text.</p>
<multiplechoiceresponse>
<label>Which of the following countries has the largest population?</label>
<choicegroup type="MultipleChoice">
<choice correct="false">Brazil <choicehint>timely feedback -- explain why an almost correct answer is wrong</choicehint>
</choice>
<choice correct="false">Germany</choice>
<choice correct="true">Indonesia</choice>
<choice correct="false">Russia</choice>
</choicegroup>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>According to September 2014 estimates:</p>
<p>The population of Indonesia is approximately 250 million.</p>
<p>The population of Brazil is approximately 200 million.</p>
<p>The population of Russia is approximately 146 million.</p>
<p>The population of Germany is approximately 81 million.</p>
</div>
</solution>
</multiplechoiceresponse>
</problem>
""")
it 'can do separation if spaces are present around ---', ->
data = MarkdownEditingDescriptor.markdownToXml("""
>>The following languages are in the Indo-European family:||There are three correct choices.<<
[x] Urdu
[ ] Finnish
[x] Marathi
[x] French
[ ] Hungarian
---
>>Which of the following countries has the largest population?||You have only choice.<<
( ) Brazil {{ timely feedback -- explain why an almost correct answer is wrong }}
( ) Germany
(x) Indonesia
( ) Russia
""")
expect(data).toXMLEqual("""
<problem>
<choiceresponse>
<label>The following languages are in the Indo-European family:</label>
<description>There are three correct choices.</description>
<checkboxgroup>
<choice correct="true">Urdu</choice>
<choice correct="false">Finnish</choice>
<choice correct="true">Marathi</choice>
<choice correct="true">French</choice>
<choice correct="false">Hungarian</choice>
</checkboxgroup>
</choiceresponse>
<multiplechoiceresponse>
<label>Which of the following countries has the largest population?</label>
<description>You have only choice.</description>
<choicegroup type="MultipleChoice">
<choice correct="false">Brazil
<choicehint>timely feedback -- explain why an almost correct answer is wrong</choicehint>
</choice>
<choice correct="false">Germany</choice>
<choice correct="true">Indonesia</choice>
<choice correct="false">Russia</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
it 'can extract question description', ->
data = MarkdownEditingDescriptor.markdownToXml("""
>>The following languages are in the Indo-European family:||Choose wisely.<<
[x] Urdu
[ ] Finnish
[x] Marathi
[x] French
[ ] Hungarian
""")
expect(data).toXMLEqual("""
<problem>
<choiceresponse>
<label>The following languages are in the Indo-European family:</label>
<description>Choose wisely.</description>
<checkboxgroup>
<choice correct="true">Urdu</choice>
<choice correct="false">Finnish</choice>
<choice correct="true">Marathi</choice>
<choice correct="true">French</choice>
<choice correct="false">Hungarian</choice>
</checkboxgroup>
</choiceresponse>
</problem>
""")
it 'can handle question and description spanned across multiple lines', ->
data = MarkdownEditingDescriptor.markdownToXml("""
>>The following languages
are in the
Indo-European family:
||
first second
third
<<
[x] Urdu
[ ] Finnish
[x] Marathi
""")
expect(data).toXMLEqual("""
<problem>
<choiceresponse>
<label>The following languages are in the Indo-European family:</label>
<description>first second third</description>
<checkboxgroup>
<choice correct="true">Urdu</choice>
<choice correct="false">Finnish</choice>
<choice correct="true">Marathi</choice>
</checkboxgroup>
</choiceresponse>
</problem>
""")
it 'will not add empty description', ->
data = MarkdownEditingDescriptor.markdownToXml("""
>>The following languages are in the Indo-European family:||<<
[x] Urdu
[ ] Finnish
""")
expect(data).toXMLEqual("""
<problem>
<choiceresponse>
<label>The following languages are in the Indo-European family:</label>
<checkboxgroup>
<choice correct="true">Urdu</choice>
<choice correct="false">Finnish</choice>
</checkboxgroup>
</choiceresponse>
</problem>
""")

View File

@@ -14,11 +14,11 @@ describe 'Markdown to xml extended hint dropdown', ->
Clowns have funny _________ to make people laugh.
[[
dogs {{ NOPE::Not dogs, not cats, not toads }}
(FACES) {{ With lots of makeup, doncha know?}}
money {{ Clowns don't have any money, of course }}
donkeys {{don't be an ass.}}
-no hint-
@@ -27,26 +27,38 @@ describe 'Markdown to xml extended hint dropdown', ->
""")
expect(data).toEqual("""
<problem>
<p>Translation between Dropdown and ________ is straightforward.</p>
<optionresponse>
<optioninput>
<option correct="True">Multiple Choice <optionhint label="Good Job">Yes, multiple choice is the right answer.</optionhint></option>
<option correct="False">Text Input <optionhint>No, text input problems don't present options.</optionhint></option>
<option correct="False">Numerical Input <optionhint>No, numerical input problems don't present options.</optionhint></option>
</optioninput>
</optionresponse>
<p>Clowns have funny _________ to make people laugh.</p>
<optionresponse>
<optioninput>
<option correct="False">dogs <optionhint label="NOPE">Not dogs, not cats, not toads</optionhint></option>
<option correct="True">FACES <optionhint>With lots of makeup, doncha know?</optionhint></option>
<option correct="False">money <optionhint>Clowns don't have any money, of course</optionhint></option>
<option correct="False">donkeys <optionhint>don't be an ass.</optionhint></option>
<option correct="False">-no hint-</option>
</optioninput>
</optionresponse>
<p>Translation between Dropdown and ________ is straightforward.</p>
<optionresponse>
<optioninput>
<option correct="True">Multiple Choice
<optionhint label="Good Job">Yes, multiple choice is the right answer.</optionhint>
</option>
<option correct="False">Text Input
<optionhint>No, text input problems don't present options.</optionhint>
</option>
<option correct="False">Numerical Input
<optionhint>No, numerical input problems don't present options.</optionhint>
</option>
</optioninput>
</optionresponse>
<p>Clowns have funny _________ to make people laugh.</p>
<optionresponse>
<optioninput>
<option correct="False">dogs
<optionhint label="NOPE">Not dogs, not cats, not toads</optionhint>
</option>
<option correct="True">FACES
<optionhint>With lots of makeup, doncha know?</optionhint>
</option>
<option correct="False">money
<optionhint>Clowns don't have any money, of course</optionhint>
</option>
<option correct="False">donkeys
<optionhint>don't be an ass.</optionhint>
</option>
<option correct="False">-no hint-</option>
</optioninput>
</optionresponse>
</problem>
""")
@@ -64,14 +76,17 @@ describe 'Markdown to xml extended hint dropdown', ->
|| 1) one ||
|| 2) two ||
""")
expect(data).toEqual("""
expect(data).toXMLEqual("""
<problem>
<p>Translation between Dropdown and ________ is straightforward.</p>
<optionresponse>
<optioninput>
<option correct="True">Right <optionhint label="Good Job">yes</optionhint></option>
<option correct="False">Wrong 1 <optionhint>no</optionhint></option>
<option correct="False">Wrong 2 <optionhint label="Label">no</optionhint></option>
<p>Translation between Dropdown and ________ is straightforward.</p>
<optioninput>
<option correct="True">Right <optionhint label="Good Job">yes</optionhint>
</option>
<option correct="False">Wrong 1 <optionhint>no</optionhint>
</option>
<option correct="False">Wrong 2 <optionhint label="Label">no</optionhint>
</option>
</optioninput>
</optionresponse>
@@ -91,11 +106,11 @@ describe 'Markdown to xml extended hint dropdown', ->
|| 0) zero ||
|| 1) one ||
""")
expect(data).toEqual("""
expect(data).toXMLEqual("""
<problem>
<p>A Question ________ is answered.</p>
<optionresponse>
<optioninput options="('Right','Wrong 1','Wrong 2')" correct="Right"></optioninput>
<p>A Question ________ is answered.</p>
<optioninput options="('Right','Wrong 1','Wrong 2')" correct="Right"/>
</optionresponse>
<demandhint>
@@ -112,19 +127,20 @@ describe 'Markdown to xml extended hint dropdown', ->
bb
cc {{ hint2 }} ]]
""")
expect(data).toEqual("""
expect(data).toXMLEqual("""
<problem>
<p>q1</p>
<optionresponse>
<optioninput label="q1">
<option correct="True">aa <optionhint>hint1</optionhint></option>
<label>q1</label>
<optioninput>
<option correct="True">aa <optionhint>hint1</optionhint>
</option>
<option correct="False">bb</option>
<option correct="False">cc <optionhint>hint2</optionhint></option>
<option correct="False">cc <optionhint>hint2</optionhint>
</option>
</optioninput>
</optionresponse>
</problem>
""")
@@ -132,28 +148,29 @@ describe 'Markdown to xml extended hint dropdown', ->
data = MarkdownEditingDescriptor.markdownToXml("""
>>q1<<
[[
aa {{ hint1 }}
bb {{ hint2 }}
(cc)
]]
""")
expect(data).toEqual("""
expect(data).toXMLEqual("""
<problem>
<p>q1</p>
<optionresponse>
<optioninput label="q1">
<option correct="False">aa <optionhint>hint1</optionhint></option>
<option correct="False">bb <optionhint>hint2</optionhint></option>
<label>q1</label>
<optioninput>
<option correct="False">aa <optionhint>hint1</optionhint>
</option>
<option correct="False">bb <optionhint>hint2</optionhint>
</option>
<option correct="True">cc</option>
</optioninput>
</optionresponse>
</problem>
""")
@@ -161,20 +178,20 @@ describe 'Markdown to xml extended hint checkbox', ->
it 'produces xml', ->
data = MarkdownEditingDescriptor.markdownToXml("""
>>Select all the fruits from the list<<
[x] Apple {{ selected: You're right that apple is a fruit. }, {unselected: Remember that apple is also a fruit.}}
[ ] Mushroom {{U: You're right that mushrooms aren't fruit}, { selected: Mushroom is a fungus, not a fruit.}}
[x] Grape {{ selected: You're right that grape is a fruit }, {unselected: Remember that grape is also a fruit.}}
[ ] Mustang
[ ] Camero {{S:I don't know what a Camero is but it isn't a fruit.},{U:What is a camero anyway?}}
{{ ((A*B)) You're right that apple is a fruit, but there's one you're missing. Also, mushroom is not a fruit.}}
{{ ((B*C)) You're right that grape is a fruit, but there's one you're missing. Also, mushroom is not a fruit. }}
>>Select all the vegetables from the list<<
[ ] Banana {{ selected: No, sorry, a banana is a fruit. }, {unselected: poor banana.}}
[ ] Ice Cream
[ ] Mushroom {{U: You're right that mushrooms aren't vegetables.}, { selected: Mushroom is a fungus, not a vegetable.}}
@@ -184,131 +201,136 @@ describe 'Markdown to xml extended hint checkbox', ->
{{ ((A*B)) Making a banana split? }}
{{ ((B*D)) That will make a horrible dessert: a brussel sprout split? }}
""")
expect(data).toEqual("""
expect(data).toXMLEqual("""
<problem>
<p>Select all the fruits from the list</p>
<choiceresponse>
<checkboxgroup label="Select all the fruits from the list">
<choice correct="true">Apple
<choicehint selected="true">You're right that apple is a fruit.</choicehint>
<choicehint selected="false">Remember that apple is also a fruit.</choicehint></choice>
<choice correct="false">Mushroom
<choicehint selected="true">Mushroom is a fungus, not a fruit.</choicehint>
<choicehint selected="false">You're right that mushrooms aren't fruit</choicehint></choice>
<choice correct="true">Grape
<choicehint selected="true">You're right that grape is a fruit</choicehint>
<choicehint selected="false">Remember that grape is also a fruit.</choicehint></choice>
<choice correct="false">Mustang</choice>
<choice correct="false">Camero
<choicehint selected="true">I don't know what a Camero is but it isn't a fruit.</choicehint>
<choicehint selected="false">What is a camero anyway?</choicehint></choice>
<compoundhint value="A*B">You're right that apple is a fruit, but there's one you're missing. Also, mushroom is not a fruit.</compoundhint>
<compoundhint value="B*C">You're right that grape is a fruit, but there's one you're missing. Also, mushroom is not a fruit.</compoundhint>
</checkboxgroup>
</choiceresponse>
<p>Select all the vegetables from the list</p>
<choiceresponse>
<checkboxgroup label="Select all the vegetables from the list">
<choice correct="false">Banana
<choicehint selected="true">No, sorry, a banana is a fruit.</choicehint>
<choicehint selected="false">poor banana.</choicehint></choice>
<choice correct="false">Ice Cream</choice>
<choice correct="false">Mushroom
<choicehint selected="true">Mushroom is a fungus, not a vegetable.</choicehint>
<choicehint selected="false">You're right that mushrooms aren't vegetables.</choicehint></choice>
<choice correct="true">Brussel Sprout
<choicehint selected="true">Brussel sprouts are vegetables.</choicehint>
<choicehint selected="false">Brussel sprout is the only vegetable in this list.</choicehint></choice>
<compoundhint value="A*B">Making a banana split?</compoundhint>
<compoundhint value="B*D">That will make a horrible dessert: a brussel sprout split?</compoundhint>
</checkboxgroup>
</choiceresponse>
<label>Select all the fruits from the list</label>
<choiceresponse>
<checkboxgroup>
<choice correct="true">Apple
<choicehint selected="true">You're right that apple is a fruit.</choicehint>
<choicehint selected="false">Remember that apple is also a fruit.</choicehint>
</choice>
<choice correct="false">Mushroom
<choicehint selected="true">Mushroom is a fungus, not a fruit.</choicehint>
<choicehint selected="false">You're right that mushrooms aren't fruit</choicehint>
</choice>
<choice correct="true">Grape
<choicehint selected="true">You're right that grape is a fruit</choicehint>
<choicehint selected="false">Remember that grape is also a fruit.</choicehint>
</choice>
<choice correct="false">Mustang</choice>
<choice correct="false">Camero
<choicehint selected="true">I don't know what a Camero is but it isn't a fruit.</choicehint>
<choicehint selected="false">What is a camero anyway?</choicehint>
</choice>
<compoundhint value="A*B">You're right that apple is a fruit, but there's one you're missing. Also, mushroom is not a fruit.</compoundhint>
<compoundhint value="B*C">You're right that grape is a fruit, but there's one you're missing. Also, mushroom is not a fruit.</compoundhint>
</checkboxgroup>
</choiceresponse>
<label>Select all the vegetables from the list</label>
<choiceresponse>
<checkboxgroup>
<choice correct="false">Banana
<choicehint selected="true">No, sorry, a banana is a fruit.</choicehint>
<choicehint selected="false">poor banana.</choicehint>
</choice>
<choice correct="false">Ice Cream</choice>
<choice correct="false">Mushroom
<choicehint selected="true">Mushroom is a fungus, not a vegetable.</choicehint>
<choicehint selected="false">You're right that mushrooms aren't vegetables.</choicehint>
</choice>
<choice correct="true">Brussel Sprout
<choicehint selected="true">Brussel sprouts are vegetables.</choicehint>
<choicehint selected="false">Brussel sprout is the only vegetable in this list.</choicehint>
</choice>
<compoundhint value="A*B">Making a banana split?</compoundhint>
<compoundhint value="B*D">That will make a horrible dessert: a brussel sprout split?</compoundhint>
</checkboxgroup>
</choiceresponse>
</problem>
""")
it 'produces xml also with demand hints', ->
data = MarkdownEditingDescriptor.markdownToXml("""
>>Select all the fruits from the list<<
[x] Apple {{ selected: You're right that apple is a fruit. }, {unselected: Remember that apple is also a fruit.}}
[ ] Mushroom {{U: You're right that mushrooms aren't fruit}, { selected: Mushroom is a fungus, not a fruit.}}
[x] Grape {{ selected: You're right that grape is a fruit }, {unselected: Remember that grape is also a fruit.}}
[ ] Mustang
[ ] Camero {{S:I don't know what a Camero is but it isn't a fruit.},{U:What is a camero anyway?}}
{{ ((A*B)) You're right that apple is a fruit, but there's one you're missing. Also, mushroom is not a fruit.}}
{{ ((B*C)) You're right that grape is a fruit, but there's one you're missing. Also, mushroom is not a fruit.}}
[x] Apple {{ selected: You're right that apple is a fruit. }, {unselected: Remember that apple is also a fruit.}}
[ ] Mushroom {{U: You're right that mushrooms aren't fruit}, { selected: Mushroom is a fungus, not a fruit.}}
[x] Grape {{ selected: You're right that grape is a fruit }, {unselected: Remember that grape is also a fruit.}}
[ ] Mustang
[ ] Camero {{S:I don't know what a Camero is but it isn't a fruit.},{U:What is a camero anyway?}}
{{ ((A*B)) You're right that apple is a fruit, but there's one you're missing. Also, mushroom is not a fruit.}}
{{ ((B*C)) You're right that grape is a fruit, but there's one you're missing. Also, mushroom is not a fruit.}}
>>Select all the vegetables from the list<<
[ ] Banana {{ selected: No, sorry, a banana is a fruit. }, {unselected: poor banana.}}
[ ] Ice Cream
[ ] Mushroom {{U: You're right that mushrooms aren't vegatbles}, { selected: Mushroom is a fungus, not a vegetable.}}
[x] Brussel Sprout {{S: Brussel sprouts are vegetables.}, {u: Brussel sprout is the only vegetable in this list.}}
{{ ((A*B)) Making a banana split? }}
{{ ((B*D)) That will make a horrible dessert: a brussel sprout split? }}
[ ] Banana {{ selected: No, sorry, a banana is a fruit. }, {unselected: poor banana.}}
[ ] Ice Cream
[ ] Mushroom {{U: You're right that mushrooms aren't vegatbles}, { selected: Mushroom is a fungus, not a vegetable.}}
[x] Brussel Sprout {{S: Brussel sprouts are vegetables.}, {u: Brussel sprout is the only vegetable in this list.}}
{{ ((A*B)) Making a banana split? }}
{{ ((B*D)) That will make a horrible dessert: a brussel sprout split? }}
|| Hint one.||
|| Hint two. ||
|| Hint three. ||
""")
expect(data).toEqual("""
expect(data).toXMLEqual("""
<problem>
<p>Select all the fruits from the list</p>
<choiceresponse>
<checkboxgroup label="Select all the fruits from the list">
<choice correct="true">Apple
<choicehint selected="true">You're right that apple is a fruit.</choicehint>
<choicehint selected="false">Remember that apple is also a fruit.</choicehint></choice>
<choice correct="false">Mushroom
<choicehint selected="true">Mushroom is a fungus, not a fruit.</choicehint>
<choicehint selected="false">You're right that mushrooms aren't fruit</choicehint></choice>
<choice correct="true">Grape
<choicehint selected="true">You're right that grape is a fruit</choicehint>
<choicehint selected="false">Remember that grape is also a fruit.</choicehint></choice>
<choice correct="false">Mustang</choice>
<choice correct="false">Camero
<choicehint selected="true">I don't know what a Camero is but it isn't a fruit.</choicehint>
<choicehint selected="false">What is a camero anyway?</choicehint></choice>
<compoundhint value="A*B">You're right that apple is a fruit, but there's one you're missing. Also, mushroom is not a fruit.</compoundhint>
<compoundhint value="B*C">You're right that grape is a fruit, but there's one you're missing. Also, mushroom is not a fruit.</compoundhint>
</checkboxgroup>
</choiceresponse>
<label>Select all the fruits from the list</label>
<choiceresponse>
<checkboxgroup>
<choice correct="true">Apple
<choicehint selected="true">You're right that apple is a fruit.</choicehint>
<choicehint selected="false">Remember that apple is also a fruit.</choicehint>
</choice>
<choice correct="false">Mushroom
<choicehint selected="true">Mushroom is a fungus, not a fruit.</choicehint>
<choicehint selected="false">You're right that mushrooms aren't fruit</choicehint>
</choice>
<choice correct="true">Grape
<choicehint selected="true">You're right that grape is a fruit</choicehint>
<choicehint selected="false">Remember that grape is also a fruit.</choicehint>
</choice>
<choice correct="false">Mustang</choice>
<choice correct="false">Camero
<choicehint selected="true">I don't know what a Camero is but it isn't a fruit.</choicehint>
<choicehint selected="false">What is a camero anyway?</choicehint>
</choice>
<compoundhint value="A*B">You're right that apple is a fruit, but there's one you're missing. Also, mushroom is not a fruit.</compoundhint>
<compoundhint value="B*C">You're right that grape is a fruit, but there's one you're missing. Also, mushroom is not a fruit.</compoundhint>
</checkboxgroup>
</choiceresponse>
<p>Select all the vegetables from the list</p>
<choiceresponse>
<checkboxgroup label="Select all the vegetables from the list">
<choice correct="false">Banana
<choicehint selected="true">No, sorry, a banana is a fruit.</choicehint>
<choicehint selected="false">poor banana.</choicehint></choice>
<choice correct="false">Ice Cream</choice>
<choice correct="false">Mushroom
<choicehint selected="true">Mushroom is a fungus, not a vegetable.</choicehint>
<choicehint selected="false">You're right that mushrooms aren't vegatbles</choicehint></choice>
<choice correct="true">Brussel Sprout
<choicehint selected="true">Brussel sprouts are vegetables.</choicehint>
<choicehint selected="false">Brussel sprout is the only vegetable in this list.</choicehint></choice>
<compoundhint value="A*B">Making a banana split?</compoundhint>
<compoundhint value="B*D">That will make a horrible dessert: a brussel sprout split?</compoundhint>
</checkboxgroup>
</choiceresponse>
<label>Select all the vegetables from the list</label>
<choiceresponse>
<checkboxgroup>
<choice correct="false">Banana
<choicehint selected="true">No, sorry, a banana is a fruit.</choicehint>
<choicehint selected="false">poor banana.</choicehint>
</choice>
<choice correct="false">Ice Cream</choice>
<choice correct="false">Mushroom
<choicehint selected="true">Mushroom is a fungus, not a vegetable.</choicehint>
<choicehint selected="false">You're right that mushrooms aren't vegatbles</choicehint>
</choice>
<choice correct="true">Brussel Sprout
<choicehint selected="true">Brussel sprouts are vegetables.</choicehint>
<choicehint selected="false">Brussel sprout is the only vegetable in this list.</choicehint>
</choice>
<compoundhint value="A*B">Making a banana split?</compoundhint>
<compoundhint value="B*D">That will make a horrible dessert: a brussel sprout split?</compoundhint>
</checkboxgroup>
</choiceresponse>
<demandhint>
<hint>Hint one.</hint>
<hint>Hint two.</hint>
<hint>Hint three.</hint>
</demandhint>
<demandhint>
<hint>Hint one.</hint>
<hint>Hint two.</hint>
<hint>Hint three.</hint>
</demandhint>
</problem>
""")
@@ -317,93 +339,106 @@ describe 'Markdown to xml extended hint multiple choice', ->
it 'produces xml', ->
data = MarkdownEditingDescriptor.markdownToXml("""
>>Select the fruit from the list<<
() Mushroom {{ Mushroom is a fungus, not a fruit.}}
() Potato
(x) Apple {{ OUTSTANDING::Apple is indeed a fruit.}}
() Mushroom {{ Mushroom is a fungus, not a fruit.}}
() Potato
(x) Apple {{ OUTSTANDING::Apple is indeed a fruit.}}
>>Select the vegetables from the list<<
() Mushroom {{ Mushroom is a fungus, not a vegetable.}}
(x) Potato {{ Potato is a root vegetable. }}
() Apple {{ OOPS::Apple is a fruit.}}
() Mushroom {{ Mushroom is a fungus, not a vegetable.}}
(x) Potato {{ Potato is a root vegetable. }}
() Apple {{ OOPS::Apple is a fruit.}}
""")
expect(data).toEqual("""
expect(data).toXMLEqual("""
<problem>
<p>Select the fruit from the list</p>
<multiplechoiceresponse>
<choicegroup label="Select the fruit from the list" type="MultipleChoice">
<choice correct="false">Mushroom <choicehint>Mushroom is a fungus, not a fruit.</choicehint></choice>
<choice correct="false">Potato</choice>
<choice correct="true">Apple <choicehint label="OUTSTANDING">Apple is indeed a fruit.</choicehint></choice>
</choicegroup>
</multiplechoiceresponse>
<p>Select the vegetables from the list</p>
<multiplechoiceresponse>
<choicegroup label="Select the vegetables from the list" type="MultipleChoice">
<choice correct="false">Mushroom <choicehint>Mushroom is a fungus, not a vegetable.</choicehint></choice>
<choice correct="true">Potato <choicehint>Potato is a root vegetable.</choicehint></choice>
<choice correct="false">Apple <choicehint label="OOPS">Apple is a fruit.</choicehint></choice>
</choicegroup>
</multiplechoiceresponse>
<label>Select the fruit from the list</label>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice">
<choice correct="false">Mushroom
<choicehint>Mushroom is a fungus, not a fruit.</choicehint>
</choice>
<choice correct="false">Potato</choice>
<choice correct="true">Apple
<choicehint label="OUTSTANDING">Apple is indeed a fruit.</choicehint>
</choice>
</choicegroup>
</multiplechoiceresponse>
<label>Select the vegetables from the list</label>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice">
<choice correct="false">Mushroom
<choicehint>Mushroom is a fungus, not a vegetable.</choicehint>
</choice>
<choice correct="true">Potato
<choicehint>Potato is a root vegetable.</choicehint>
</choice>
<choice correct="false">Apple
<choicehint label="OOPS">Apple is a fruit.</choicehint>
</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
it 'produces xml with demand hints', ->
data = MarkdownEditingDescriptor.markdownToXml("""
>>Select the fruit from the list<<
>>Select the fruit from the list<<
() Mushroom {{ Mushroom is a fungus, not a fruit.}}
() Potato
(x) Apple {{ OUTSTANDING::Apple is indeed a fruit.}}
|| 0) spaces on previous line. ||
|| 1) roses are red. ||
>>Select the vegetables from the list<<
() Mushroom {{ Mushroom is a fungus, not a fruit.}}
() Potato
(x) Apple {{ OUTSTANDING::Apple is indeed a fruit.}}
() Mushroom {{ Mushroom is a fungus, not a vegetable.}}
(x) Potato {{ Potato is a root vegetable. }}
() Apple {{ OOPS::Apple is a fruit.}}
|| 2) where are the lions? ||
|| 0) spaces on previous line. ||
|| 1) roses are red. ||
>>Select the vegetables from the list<<
() Mushroom {{ Mushroom is a fungus, not a vegetable.}}
(x) Potato {{ Potato is a root vegetable. }}
() Apple {{ OOPS::Apple is a fruit.}}
|| 2) where are the lions? ||
""")
expect(data).toEqual("""
<problem>
<p>Select the fruit from the list</p>
<multiplechoiceresponse>
<choicegroup label="Select the fruit from the list" type="MultipleChoice">
<choice correct="false">Mushroom <choicehint>Mushroom is a fungus, not a fruit.</choicehint></choice>
<choice correct="false">Potato</choice>
<choice correct="true">Apple <choicehint label="OUTSTANDING">Apple is indeed a fruit.</choicehint></choice>
</choicegroup>
</multiplechoiceresponse>
expect(data).toXMLEqual("""
<problem>
<label>Select the fruit from the list</label>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice">
<choice correct="false">Mushroom
<choicehint>Mushroom is a fungus, not a fruit.</choicehint>
</choice>
<choice correct="false">Potato</choice>
<choice correct="true">Apple
<choicehint label="OUTSTANDING">Apple is indeed a fruit.</choicehint>
</choice>
</choicegroup>
</multiplechoiceresponse>
<p>Select the vegetables from the list</p>
<multiplechoiceresponse>
<choicegroup label="Select the vegetables from the list" type="MultipleChoice">
<choice correct="false">Mushroom <choicehint>Mushroom is a fungus, not a vegetable.</choicehint></choice>
<choice correct="true">Potato <choicehint>Potato is a root vegetable.</choicehint></choice>
<choice correct="false">Apple <choicehint label="OOPS">Apple is a fruit.</choicehint></choice>
</choicegroup>
</multiplechoiceresponse>
<label>Select the vegetables from the list</label>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice">
<choice correct="false">Mushroom
<choicehint>Mushroom is a fungus, not a vegetable.</choicehint>
</choice>
<choice correct="true">Potato
<choicehint>Potato is a root vegetable.</choicehint>
</choice>
<choice correct="false">Apple
<choicehint label="OOPS">Apple is a fruit.</choicehint>
</choice>
</choicegroup>
</multiplechoiceresponse>
<demandhint>
<hint>0) spaces on previous line.</hint>
<hint>1) roses are red.</hint>
<hint>2) where are the lions?</hint>
</demandhint>
</problem>
""")
<demandhint>
<hint>0) spaces on previous line.</hint>
<hint>1) roses are red.</hint>
<hint>2) where are the lions?</hint>
</demandhint>
</problem>
""")
describe 'Markdown to xml extended hint text input', ->
@@ -412,14 +447,15 @@ describe 'Markdown to xml extended hint text input', ->
= France {{ BRAVO::Viva la France! }}
""")
expect(data).toEqual("""
expect(data).toXMLEqual("""
<problem>
<p>In which country would you find the city of Paris?</p>
<stringresponse answer="France" type="ci" >
<correcthint label="BRAVO">Viva la France!</correcthint>
<textline label="In which country would you find the city of Paris?" size="20"/>
<stringresponse answer="France" type="ci">
<label>In which country would you find the city of Paris?</label>
<correcthint label="BRAVO">Viva la France!</correcthint>
<textline size="20"/>
</stringresponse>
</problem>
""")
@@ -429,15 +465,17 @@ describe 'Markdown to xml extended hint text input', ->
or= USA {{ meh::hint2 }}
""")
expect(data).toEqual("""
expect(data).toXMLEqual("""
<problem>
<p>Where Paris?</p>
<stringresponse answer="France" type="ci" >
<correcthint label="BRAVO">hint1</correcthint>
<additional_answer answer="USA"><correcthint label="meh">hint2</correcthint></additional_answer>
<textline label="Where Paris?" size="20"/>
<stringresponse answer="France" type="ci">
<label>Where Paris?</label>
<correcthint label="BRAVO">hint1</correcthint>
<additional_answer answer="USA"><correcthint label="meh">hint2</correcthint>
</additional_answer>
<textline size="20"/>
</stringresponse>
</problem>
""")
@@ -447,15 +485,16 @@ describe 'Markdown to xml extended hint text input', ->
not= warm {{feedback2}}
""")
expect(data).toEqual("""
expect(data).toXMLEqual("""
<problem>
<p>Revenge is a dish best served</p>
<stringresponse answer="cold" type="ci" >
<correcthint>khaaaaaan!</correcthint>
<stringresponse answer="cold" type="ci">
<label>Revenge is a dish best served</label>
<correcthint>khaaaaaan!</correcthint>
<stringequalhint answer="warm">feedback2</stringequalhint>
<textline label="Revenge is a dish best served" size="20"/>
<textline size="20"/>
</stringresponse>
</problem>
""")
@@ -464,14 +503,15 @@ describe 'Markdown to xml extended hint text input', ->
s= 2 {{feedback1}}
""")
expect(data).toEqual("""
expect(data).toXMLEqual("""
<problem>
<p>q</p>
<stringresponse answer="2" type="ci" >
<correcthint>feedback1</correcthint>
<textline label="q" size="20"/>
<stringresponse answer="2" type="ci">
<label>q</label>
<correcthint>feedback1</correcthint>
<textline size="20"/>
</stringresponse>
</problem>
""")
@@ -483,16 +523,18 @@ describe 'Markdown to xml extended hint text input', ->
or= ccc
""")
expect(data).toEqual("""
expect(data).toXMLEqual("""
<problem>
<p>q</p>
<stringresponse answer="aaa" type="ci" >
<additional_answer answer="bbb"><correcthint>feedback1</correcthint></additional_answer>
<stringresponse answer="aaa" type="ci">
<label>q</label>
<additional_answer answer="bbb"><correcthint>feedback1</correcthint>
</additional_answer>
<stringequalhint answer="no">feedback2</stringequalhint>
<additional_answer answer="ccc"></additional_answer>
<textline label="q" size="20"/>
<additional_answer answer="ccc"/>
<textline size="20"/>
</stringresponse>
</problem>
""")
@@ -503,37 +545,38 @@ describe 'Markdown to xml extended hint text input', ->
or= ccc
""")
expect(data).toEqual("""
expect(data).toXMLEqual("""
<problem>
<p>q</p>
<stringresponse answer="2" type="ci" >
<correcthint>feedback1</correcthint>
<additional_answer answer="bbb"><correcthint>feedback2</correcthint></additional_answer>
<additional_answer answer="ccc"></additional_answer>
<textline label="q" size="20"/>
<stringresponse answer="2" type="ci">
<label>q</label>
<correcthint>feedback1</correcthint>
<additional_answer answer="bbb"><correcthint>feedback2</correcthint>
</additional_answer>
<additional_answer answer="ccc"/>
<textline size="20"/>
</stringresponse>
</problem>
""")
it 'produces xml with each = making a new question', ->
data = MarkdownEditingDescriptor.markdownToXml(""">>q<<
= aaa
or= bbb
s= ccc
data = MarkdownEditingDescriptor.markdownToXml("""
>>q<<
= aaa
or= bbb
s= ccc
""")
expect(data).toEqual("""
expect(data).toXMLEqual("""
<problem>
<p>q</p>
<stringresponse answer="aaa" type="ci" >
<additional_answer answer="bbb"></additional_answer>
<textline label="q" size="20"/>
</stringresponse>
<stringresponse answer="ccc" type="ci" >
<textline size="20"/>
</stringresponse>
<label>q</label>
<stringresponse answer="aaa" type="ci">
<additional_answer answer="bbb"></additional_answer>
<textline size="20"/>
</stringresponse>
<stringresponse answer="ccc" type="ci">
<textline size="20"/>
</stringresponse>
</problem>
""")
@@ -549,20 +592,18 @@ describe 'Markdown to xml extended hint text input', ->
paragraph 2
""")
expect(data).toEqual("""
expect(data).toXMLEqual("""
<problem>
<p>paragraph</p>
<p>q</p>
<stringresponse answer="aaa" type="ci" >
<additional_answer answer="bbb"></additional_answer>
<textline label="q" size="20"/>
</stringresponse>
<stringresponse answer="ccc" type="ci" >
<textline size="20"/>
</stringresponse>
<p>paragraph 2</p>
<p>paragraph</p>
<label>q</label>
<stringresponse answer="aaa" type="ci">
<additional_answer answer="bbb"></additional_answer>
<textline size="20"/>
</stringresponse>
<stringresponse answer="ccc" type="ci">
<textline size="20"/>
</stringresponse>
<p>paragraph 2</p>
</problem>
""")
@@ -574,35 +615,36 @@ describe 'Markdown to xml extended hint text input', ->
paragraph 2
""")
expect(data).toEqual("""
expect(data).toXMLEqual("""
<problem>
<p>paragraph</p>
<p>q</p>
<p>or= aaa</p>
<p>paragraph 2</p>
<p>paragraph</p>
<label>q</label>
<p>or= aaa</p>
<p>paragraph 2</p>
</problem>
""")
it 'produces xml with each = with feedback making a new question', ->
data = MarkdownEditingDescriptor.markdownToXml(""">>q<<
data = MarkdownEditingDescriptor.markdownToXml("""
>>q<<
s= aaa
or= bbb {{feedback1}}
= ccc {{feedback2}}
""")
expect(data).toEqual("""
expect(data).toXMLEqual("""
<problem>
<p>q</p>
<stringresponse answer="aaa" type="ci" >
<additional_answer answer="bbb"><correcthint>feedback1</correcthint></additional_answer>
<textline label="q" size="20"/>
</stringresponse>
<stringresponse answer="ccc" type="ci" >
<correcthint>feedback2</correcthint>
<textline size="20"/>
</stringresponse>
<label>q</label>
<stringresponse answer="aaa" type="ci">
<additional_answer answer="bbb">
<correcthint>feedback1</correcthint>
</additional_answer>
<textline size="20"/>
</stringresponse>
<stringresponse answer="ccc" type="ci">
<correcthint>feedback2</correcthint>
<textline size="20"/>
</stringresponse>
</problem>
""")
@@ -614,12 +656,12 @@ describe 'Markdown to xml extended hint text input', ->
|| Paris is the capital of one of those countries. ||
""")
expect(data).toEqual("""
expect(data).toXMLEqual("""
<problem>
<p>Where Paris?</p>
<stringresponse answer="France" type="ci" >
<correcthint label="BRAVO">hint1</correcthint>
<textline label="Where Paris?" size="20"/>
<stringresponse answer="France" type="ci">
<label>Where Paris?</label>
<correcthint label="BRAVO">hint1</correcthint>
<textline size="20"/>
</stringresponse>
<demandhint>
@@ -640,31 +682,28 @@ describe 'Markdown to xml extended hint numeric input', ->
>>Enter the number of fingers on a human hand<<
= 5
""")
expect(data).toEqual("""
expect(data).toXMLEqual("""
<problem>
<p>Enter the numerical value of Pi:</p>
<numericalresponse answer="3.14159">
<responseparam type="tolerance" default=".02" />
<formulaequationinput label="Enter the numerical value of Pi:" />
<correcthint>Pie for everyone!</correcthint>
</numericalresponse>
<p>Enter the approximate value of 502*9:</p>
<numericalresponse answer="4518">
<responseparam type="tolerance" default="15%" />
<formulaequationinput label="Enter the approximate value of 502*9:" />
<correcthint label="PIE">No pie for you!</correcthint>
</numericalresponse>
<p>Enter the number of fingers on a human hand</p>
<numericalresponse answer="5">
<formulaequationinput label="Enter the number of fingers on a human hand" />
</numericalresponse>
<label>Enter the numerical value of Pi:</label>
<numericalresponse answer="3.14159">
<responseparam type="tolerance" default=".02"/>
<formulaequationinput/>
<correcthint>Pie for everyone!</correcthint>
</numericalresponse>
<label>Enter the approximate value of 502*9:</label>
<numericalresponse answer="4518">
<responseparam type="tolerance" default="15%"/>
<formulaequationinput/>
<correcthint label="PIE">No pie for you!</correcthint>
</numericalresponse>
<label>Enter the number of fingers on a human hand</label>
<numericalresponse answer="5">
<formulaequationinput/>
</numericalresponse>
</problem>
""")
@@ -681,23 +720,23 @@ describe 'Markdown to xml extended hint numeric input', ->
|| hintB ||
""")
expect(data).toEqual("""
expect(data).toXMLEqual("""
<problem>
<p>text1</p>
<numericalresponse answer="1">
<formulaequationinput label="text1" />
<correcthint>hint1</correcthint>
</numericalresponse>
<p>text2</p>
<numericalresponse answer="2">
<formulaequationinput label="text2" />
<correcthint>hint2</correcthint>
</numericalresponse>
<label>text1</label>
<numericalresponse answer="1">
<formulaequationinput/>
<correcthint>hint1</correcthint>
</numericalresponse>
<label>text2</label>
<numericalresponse answer="2">
<formulaequationinput/>
<correcthint>hint2</correcthint>
</numericalresponse>
<demandhint>
<hint>hintA</hint>
<hint>hintB</hint>
</demandhint>
<demandhint>
<hint>hintA</hint>
<hint>hintB</hint>
</demandhint>
</problem>
""")
@@ -707,10 +746,10 @@ describe 'Markdown to xml extended hint with multiline hints', ->
data = MarkdownEditingDescriptor.markdownToXml("""
>>Checkboxes<<
[x] A {{
[x] A {{
selected: aaa },
{unselected:bbb}}
[ ] B {{U: c}, {
[ ] B {{U: c}, {
selected: d.}}
{{ ((A*B)) A*B hint}}
@@ -725,7 +764,7 @@ describe 'Markdown to xml extended hint with multiline hints', ->
hello
hint
}}
>>multiple choice<<
(x) AA{{hint1}}
() BB {{
@@ -733,11 +772,11 @@ describe 'Markdown to xml extended hint with multiline hints', ->
}}
( ) CC {{ hint3
}}
>>dropdown<<
[[
W1 {{
no }}
W1 {{
no }}
W2 {{
nope}}
(C1) {{ yes
@@ -749,57 +788,70 @@ describe 'Markdown to xml extended hint with multiline hints', ->
|| ccc ||
""")
expect(data).toEqual("""
expect(data).toXMLEqual("""
<problem>
<p>Checkboxes</p>
<choiceresponse>
<checkboxgroup label="Checkboxes">
<choice correct="true">A
<choicehint selected="true">aaa</choicehint>
<choicehint selected="false">bbb</choicehint></choice>
<choice correct="false">B
<choicehint selected="true">d.</choicehint>
<choicehint selected="false">c</choicehint></choice>
<compoundhint value="A*B">A*B hint</compoundhint>
</checkboxgroup>
</choiceresponse>
<label>Checkboxes</label>
<choiceresponse>
<checkboxgroup>
<choice correct="true">A
<choicehint selected="true">aaa</choicehint>
<choicehint selected="false">bbb</choicehint>
</choice>
<choice correct="false">B
<choicehint selected="true">d.</choicehint>
<choicehint selected="false">c</choicehint>
</choice>
<compoundhint value="A*B">A*B hint</compoundhint>
</checkboxgroup>
</choiceresponse>
<p>What is 1 + 1?</p>
<numericalresponse answer="2">
<formulaequationinput label="What is 1 + 1?" />
<correcthint>part one, and part two</correcthint>
</numericalresponse>
<label>What is 1 + 1?</label>
<numericalresponse answer="2">
<formulaequationinput/>
<correcthint>part one, and part two</correcthint>
</numericalresponse>
<p>hello?</p>
<stringresponse answer="hello" type="ci" >
<correcthint>hello hint</correcthint>
<textline label="hello?" size="20"/>
</stringresponse>
<label>hello?</label>
<stringresponse answer="hello" type="ci">
<correcthint>hello hint</correcthint>
<textline size="20"/>
</stringresponse>
<p>multiple choice</p>
<multiplechoiceresponse>
<choicegroup label="multiple choice" type="MultipleChoice">
<choice correct="true">AA <choicehint>hint1</choicehint></choice>
<choice correct="false">BB <choicehint>hint2</choicehint></choice>
<choice correct="false">CC <choicehint>hint3</choicehint></choice>
</choicegroup>
</multiplechoiceresponse>
<label>multiple choice</label>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice">
<choice correct="true">AA
<choicehint>hint1</choicehint>
</choice>
<choice correct="false">BB
<choicehint>hint2</choicehint>
</choice>
<choice correct="false">CC
<choicehint>hint3</choicehint>
</choice>
</choicegroup>
</multiplechoiceresponse>
<p>dropdown</p>
<label>dropdown</label>
<optionresponse>
<optioninput>
<option correct="False">W1
<optionhint>no</optionhint>
</option>
<option correct="False">W2
<optionhint>nope</optionhint>
</option>
<option correct="True">C1
<optionhint>yes</optionhint>
</option>
</optioninput>
</optionresponse>
<optionresponse>
<optioninput label="dropdown">
<option correct="False">W1 <optionhint>no</optionhint></option>
<option correct="False">W2 <optionhint>nope</optionhint></option>
<option correct="True">C1 <optionhint>yes</optionhint></option>
</optioninput>
</optionresponse>
<demandhint>
<hint>aaa</hint>
<hint>bbb</hint>
<hint>ccc</hint>
</demandhint>
<demandhint>
<hint>aaa</hint>
<hint>bbb</hint>
<hint>ccc</hint>
</demandhint>
</problem>
""")
@@ -816,23 +868,24 @@ describe 'Markdown to xml extended hint with tricky syntax cases', ->
|| Ø ||
""")
expect(data).toEqual("""
expect(data).toXMLEqual("""
<problem>
<p>á and Ø</p>
<multiplechoiceresponse>
<choicegroup label="á and Ø" type="MultipleChoice">
<choice correct="true" <choicehint>Ø</choicehint></choice>
<choice correct="false">BB</choice>
</choicegroup>
</multiplechoiceresponse>
<demandhint>
<hint>Ø</hint>
</demandhint>
<multiplechoiceresponse>
<label>á and Ø</label>
<choicegroup type="MultipleChoice">
<choice correct="true"
<choicehint>Ø</choicehint>
</choice>
<choice correct="false">BB</choice>
</choicegroup>
</multiplechoiceresponse>
<demandhint>
<hint>Ø</hint>
</demandhint>
</problem>
""")
it 'produces xml with quote-type characters', ->
data = MarkdownEditingDescriptor.markdownToXml("""
>>"quotes" aren't `fun`<<
@@ -840,17 +893,19 @@ describe 'Markdown to xml extended hint with tricky syntax cases', ->
(x) "isn't" {{ "hello" }}
""")
expect(data).toEqual("""
expect(data).toXMLEqual("""
<problem>
<p>"quotes" aren't `fun`</p>
<multiplechoiceresponse>
<choicegroup label="&quot;quotes&quot; aren&apos;t `fun`" type="MultipleChoice">
<choice correct="false">"hello" <choicehint>isn't</choicehint></choice>
<choice correct="true">"isn't" <choicehint>"hello"</choicehint></choice>
</choicegroup>
</multiplechoiceresponse>
<multiplechoiceresponse>
<label>"quotes" aren't `fun`</label>
<choicegroup type="MultipleChoice">
<choice correct="false">"hello"
<choicehint>isn't</choicehint>
</choice>
<choice correct="true">"isn't"
<choicehint>"hello"</choicehint>
</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
@@ -862,18 +917,20 @@ describe 'Markdown to xml extended hint with tricky syntax cases', ->
(x) b
that (y)
""")
expect(data).toEqual("""
expect(data).toXMLEqual("""
<problem>
<p>q1</p>
<p>this (x)</p>
<multiplechoiceresponse>
<choicegroup label="q1" type="MultipleChoice">
<choice correct="false">a <choicehint>(hint)</choicehint></choice>
<label>q1</label>
<p>this (x)</p>
<choicegroup type="MultipleChoice">
<choice correct="false">a <choicehint>(hint)</choicehint>
</choice>
<choice correct="true">b</choice>
</choicegroup>
<p>that (y)</p>
</multiplechoiceresponse>
<p>that (y)</p>
</problem>
""")
@@ -886,18 +943,19 @@ describe 'Markdown to xml extended hint with tricky syntax cases', ->
[x] b {{ this hint passes through }}
that []
""")
expect(data).toEqual("""
expect(data).toXMLEqual("""
<problem>
<p>q1</p>
<p>this [x]</p>
<choiceresponse>
<checkboxgroup label="q1">
<label>q1</label>
<p>this [x]</p>
<checkboxgroup>
<choice correct="false">a [square]</choice>
<choice correct="true">b {{ this hint passes through }}</choice>
</checkboxgroup>
<p>that []</p>
</choiceresponse>
<p>that []</p>
</problem>
""")
@@ -907,7 +965,7 @@ describe 'Markdown to xml extended hint with tricky syntax cases', ->
markdown = """
>>q22<<
[[
[[
(x) {{ hintx
these
span
@@ -919,18 +977,20 @@ describe 'Markdown to xml extended hint with tricky syntax cases', ->
"""
markdown = markdown.replace(/\n/g, '\r\n') # make DOS line endings
data = MarkdownEditingDescriptor.markdownToXml(markdown)
expect(data).toEqual("""
expect(data).toXMLEqual("""
<problem>
<p>q22</p>
<optionresponse>
<optioninput label="q22">
<option correct="True">x <optionhint>hintx these span</optionhint></option>
<option correct="False">yy <optionhint label="meh">hinty</optionhint></option>
<option correct="False">zzz <optionhint>hintz</optionhint></option>
<label>q22</label>
<optioninput>
<option correct="True">x <optionhint>hintx these span</optionhint>
</option>
<option correct="False">yy <optionhint label="meh">hinty</optionhint>
</option>
<option correct="False">zzz <optionhint>hintz</optionhint>
</option>
</optioninput>
</optionresponse>
</problem>
""")

View File

@@ -66,7 +66,7 @@ class @Problem
detail = @el.data('progress_detail')
status = @el.data('progress_status')
# Render 'x/y point(s)' if student has attempted question
# Render 'x/y point(s)' if student has attempted question
if status != 'none' and detail? and (jQuery.type(detail) == "string") and detail.indexOf('/') > 0
a = detail.split('/')
earned = parseFloat(a[0])
@@ -498,7 +498,7 @@ class @Problem
@el.find("select").each (i, select_field) =>
selected_option = $(select_field).find("option:selected").text().trim()
if selected_option is ''
if selected_option is 'Select an option'
answered = false
if bind
$(select_field).on 'change', (e) =>
@@ -628,10 +628,10 @@ class @Problem
choicegroup: (element, display, answers) =>
element = $(element)
input_id = element.attr('id').replace(/inputtype_/,'')
input_id = element.attr('id').replace(/inputtype_/, '')
answer = answers[input_id]
for choice in answer
element.find("label[for='input_#{input_id}_#{choice}']").addClass 'choicegroup_correct'
element.find("#input_#{input_id}_#{choice}").parent("label").addClass 'choicegroup_correct'
javascriptinput: (element, display, answers) =>
answer_id = $(element).attr('id').split("_")[1...].join("_")
@@ -641,7 +641,7 @@ class @Problem
choicetextgroup: (element, display, answers) =>
element = $(element)
input_id = element.attr('id').replace(/inputtype_/,'')
input_id = element.attr('id').replace(/inputtype_/, '')
answer = answers[input_id]
for choice in answer
element.find("section#forinput#{choice}").addClass 'choicetextgroup_show_correct'
@@ -821,4 +821,3 @@ class @Problem
]
hint_container.attr('hint_index', response.hint_index)
@$('.hint-button').focus() # a11y focus on click, like the Check button

View File

@@ -192,11 +192,15 @@ class @MarkdownEditingDescriptor extends XModule.Descriptor
else
return template
@markdownToXml: (markdown)->
# it will contain <hint>...</hint> tags
demandHintTags = [];
toXml = `function (markdown) {
var xml = markdown,
i, splits, scriptFlag;
i, splits, makeParagraph;
var responseTypes = [
'optionresponse', 'multiplechoiceresponse', 'stringresponse', 'numericalresponse', 'choiceresponse'
];
// fix DOS \r\n line endings to look like \n
xml = xml.replace(/\r\n/g, '\n');
@@ -205,6 +209,20 @@ class @MarkdownEditingDescriptor extends XModule.Descriptor
xml = xml.replace(/(^.*?$)(?=\n\=\=+$)/gm, '<h3 class="hd hd-2 problem-header">$1</h3>');
xml = xml.replace(/\n^\=\=+$/gm, '');
// extract question and description(optional)
// >>question||description<< converts to
// <label>question</label> <description>description</description>
xml = xml.replace(/>>([^]+?)<</gm, function(match, questionText) {
var result = questionText.split('||'),
label = '<label>' + result[0] + '</label>' + '\n';
// don't add empty <description> tag
if (result.length === 1 || !result[1]) {
return label;
}
return label + '<description>' + result[1] + '</description>\n'
})
// Pull out demand hints, || a hint ||
var demandhints = '';
xml = xml.replace(/(^\s*\|\|.*?\|\|\s*$\n?)+/gm, function(match) { // $\n
@@ -212,6 +230,7 @@ class @MarkdownEditingDescriptor extends XModule.Descriptor
for (i = 0; i < options.length; i += 1) {
var inner = /\s*\|\|(.*?)\|\|/.exec(options[i]);
if (inner) {
//safe-lint: disable=javascript-concat-html
demandhints += ' <hint>' + inner[1].trim() + '</hint>\n';
}
}
@@ -510,56 +529,31 @@ class @MarkdownEditingDescriptor extends XModule.Descriptor
return selectString;
});
// replace labels
// looks for >>arbitrary text<< and inserts it into the label attribute of the input type directly below the text.
var split = xml.split('\n');
var new_xml = [];
var line, i, curlabel, prevlabel = '';
var didinput = false;
for (i = 0; i < split.length; i++) {
line = split[i];
if (match = line.match(/>>(.*)<</)) {
curlabel = match[1].replace(/&/g, '&amp;')
.replace(/</g, '&lt;')
.replace(/>/g, '&gt;')
.replace(/"/g, '&quot;')
.replace(/'/g, '&apos;');
line = line.replace(/>>|<</g, '');
} else if (line.match(/<\w+response/) && didinput && curlabel == prevlabel) {
// reset label to prevent gobbling up previous one (if multiple questions)
curlabel = '';
didinput = false;
} else if (line.match(/<(textline|optioninput|formulaequationinput|choicegroup|checkboxgroup)/) && curlabel != '' && curlabel != undefined) {
line = line.replace(/<(textline|optioninput|formulaequationinput|choicegroup|checkboxgroup)/, '<$1 label="' + curlabel + '"');
didinput = true;
prevlabel = curlabel;
}
new_xml.push(line);
}
xml = new_xml.join('\n');
// replace code blocks
xml = xml.replace(/\[code\]\n?([^\]]*)\[\/?code\]/gmi, function(match, p1) {
var selectString = '<pre><code>\n' + p1 + '</code></pre>';
var selectString = '<pre><code>' + p1 + '</code></pre>';
return selectString;
});
// split scripts and preformatted sections, and wrap paragraphs
splits = xml.split(/(\<\/?(?:script|pre).*?\>)/g);
scriptFlag = false;
splits = xml.split(/(\<\/?(?:script|pre|label|description).*?\>)/g);
// Wrap a string by <p> tag when line is not already wrapped by another tag
// true when line is not already wrapped by another tag false otherwise
makeParagraph = true;
for (i = 0; i < splits.length; i += 1) {
if(/\<(script|pre)/.test(splits[i])) {
scriptFlag = true;
if (/\<(script|pre|label|description)/.test(splits[i])) {
makeParagraph = false;
}
if(!scriptFlag) {
if (makeParagraph) {
splits[i] = splits[i].replace(/(^(?!\s*\<|$).*$)/gm, '<p>$1</p>');
}
if(/\<\/(script|pre)/.test(splits[i])) {
scriptFlag = false;
if (/\<\/(script|pre|label|description)/.test(splits[i])) {
makeParagraph = true;
}
}
@@ -570,12 +564,68 @@ class @MarkdownEditingDescriptor extends XModule.Descriptor
// if we've come across demand hints, wrap in <demandhint> at the end
if (demandhints) {
demandhints = '\n<demandhint>\n' + demandhints + '</demandhint>';
demandHintTags.push(demandhints);
}
// make all elements descendants of a single problem element
xml = '<problem>\n' + xml + demandhints + '\n</problem>';
// make selector to search responsetypes in xml
var responseTypesSelector = responseTypes.join(', ');
// make temporary xml
// safe-lint: disable=javascript-concat-html
var $xml = $($.parseXML('<prob>' + xml + '</prob>'));
responseType = $xml.find(responseTypesSelector);
// convert if there is only one responsetype
if (responseType.length === 1) {
var inputtype = responseType[0].firstElementChild
// used to decide whether an element should be placed before or after an inputtype
var beforeInputtype = true;
_.each($xml.find('prob').children(), function(child, index){
// we don't want to add the responsetype again into new xml
if (responseType[0].nodeName === child.nodeName) {
beforeInputtype = false;
return;
}
if (beforeInputtype) {
// safe-lint: disable=javascript-jquery-insert-into-target
responseType[0].insertBefore(child, inputtype);
} else {
responseType[0].appendChild(child);
}
})
var serializer = new XMLSerializer();
xml = serializer.serializeToString(responseType[0]);
// remove xmlns attribute added by the serializer
xml = xml.replace(/\sxmlns=['"].*?['"]/gi, '');
// XMLSerializer messes the indentation of XML so add newline
// at the end of each ending tag to make the xml looks better
xml = xml.replace(/(\<\/.*?\>)(\<.*?\>)/gi, '$1\n$2');
}
// remove class attribute added on <p> tag for question title
xml = xml.replace(/\sclass=\'qtitle\'/gi, '');
return xml;
}`
return toXml markdown
responseTypesXML = []
responseTypesMarkdown = markdown.split(/\n\s*---\s*\n/g)
_.each responseTypesMarkdown, (responseTypeMarkdown, index) ->
if responseTypeMarkdown.trim().length > 0
responseTypesXML.push toXml(responseTypeMarkdown)
# combine demandhints
demandHints = ''
if demandHintTags.length
## safe-lint: disable=javascript-concat-html
demandHints = '\n<demandhint>\n' + demandHintTags.join('') + '</demandhint>'
# make all responsetypes descendants of a single problem element
## safe-lint: disable=javascript-concat-html
# format and return xml
finalXml = '<problem>' + responseTypesXML.join('\n\n') + demandHints + '</problem>'
return PrettyPrint.xml(finalXml);

View File

@@ -2,46 +2,27 @@
metadata:
display_name: Checkboxes
markdown: |
Checkbox problems allow learners to select multiple options. Learners can see all the options along with the problem text.
When you add the problem, be sure to select Settings to specify a Display Name and other values that apply.
You can use this template as a guide to the simple editor markdown and OLX markup to use for checkboxes problems. Edit this component to replace this template with your own assessment.
You can use the following example problem as a model.
>>Add the question text, or prompt, here. This text is required.||You can add an optional tip or note related to the prompt like this. <<
>>The following languages are in the Indo-European family:<<
[x] Urdu
[ ] Finnish
[x] Marathi
[x] French
[ ] Hungarian
[x] a correct answer
[ ] an incorrect answer
[ ] an incorrect answer
[x] a correct answer
Note: Make sure you select all of the correct options—there may be more than one!
data: |
<problem>
<choiceresponse>
<p>You can use this template as a guide to the simple editor markdown and OLX markup to use for checkboxes problems. Edit this component to replace this template with your own assessment.</p>
<label>Add the question text, or prompt, here. This text is required.</label>
<description>You can add an optional tip or note related to the prompt like this.</description>
<checkboxgroup>
<choice correct="true">a correct answer</choice>
<choice correct="false">an incorrect answer</choice>
<choice correct="false">an incorrect answer</choice>
<choice correct="true">a correct answer</choice>
</checkboxgroup>
</choiceresponse>
</problem>
[explanation]
Urdu, Marathi, and French are all Indo-European languages, while Finnish and Hungarian are in the Uralic family.
[explanation]
data: |
<problem>
<p>Checkbox problems allow learners to select multiple options. Learners can see all the options along with the problem text.</p>
<p>When you add the component, be sure to select <strong>Settings</strong>
to specify a <strong>Display Name</strong> and other values that apply.</p>
<p>You can use the following example problem as a model.</p>
<p>The following languages are in the Indo-European family:</p>
<choiceresponse>
<checkboxgroup>
<choice correct="true" name="urdu">Urdu</choice>
<choice correct="false" name="finnish">Finnish</choice>
<choice correct="true" name="marathi">Marathi</choice>
<choice correct="true" name="french">French</choice>
<choice correct="false" name="hungarian">Hungarian</choice>
</checkboxgroup>
</choiceresponse>
<p><strong>Note</strong>: Make sure you select all of the correct options—there may be more than one!</p>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>Urdu, Marathi, and French are all Indo-European languages, while Finnish and Hungarian are in the Uralic family.</p>
</div>
</solution>
</problem>

View File

@@ -2,68 +2,51 @@
metadata:
display_name: Checkboxes with Hints and Feedback
markdown: |
You can provide feedback for each option in a checkbox problem, with distinct feedback depending on whether or not the learner selects that option.
You can also provide compound feedback for a specific combination of answers. For example, if you have three possible answers in the problem, you can configure specific feedback for when a learner selects each combination of possible answers.
You can use this template as a guide to the simple editor markdown and OLX markup to use for checkboxes with hints and feedback problems. Edit this component to replace this template with your own assessment.
You can also add hints for learners.
>>Add the question text, or prompt, here. This text is required.||You can add an optional tip or note related to the prompt like this.<<
Be sure to select Settings to specify a Display Name and other values that apply.
Use the following example problem as a model.
>>Which of the following is a fruit? Check all that apply.<<
[x] apple {{ selected: You are correct that an apple is a fruit because it is the fertilized ovary that comes from an apple tree and contains seeds. }, { unselected: Remember that an apple is also a fruit.}}
[x] pumpkin {{ selected: You are correct that a pumpkin is a fruit because it is the fertilized ovary of a squash plant and contains seeds. }, { unselected: Remember that a pumpkin is also a fruit.}}
[ ] potato {{ U: You are correct that a potato is a vegetable because it is an edible part of a plant in tuber form.}, { S: A potato is a vegetable, not a fruit, because it does not come from a flower and does not contain seeds.}}
[x] tomato {{ S: You are correct that a tomato is a fruit because it is the fertilized ovary of a tomato plant and contains seeds. }, { U: Many people mistakenly think a tomato is a vegetable. However, because a tomato is the fertilized ovary of a tomato plant and contains seeds, it is a fruit.}}
[x] a correct answer {{ selected: You can specify optional feedback that appears after the learner selects and submits this answer. }, { unselected: You can specify optional feedback that appears after the learner clears and submits this answer.}}
[ ] an incorrect answer
[ ] an incorrect answer {{ selected: You can specify optional feedback for none, all, or a subset of the answers. }, { unselected: You can specify optional feedback for selected answers, cleared answers, or both.}}
[x] a correct answer
{{ ((A B D)) An apple, pumpkin, and tomato are all fruits as they all are fertilized ovaries of a plant and contain seeds. }}
{{ ((A B C D)) You are correct that an apple, pumpkin, and tomato are all fruits as they all are fertilized ovaries of a plant and contain seeds. However, a potato is not a fruit as it is an edible part of a plant in tuber form and is a vegetable. }}
{{ ((A B D)) You can specify optional feedback for a combination of answers which appears after the specified set of answers is submitted. }}
{{ ((A B C D)) You can specify optional feedback for one, several, or all answer combinations. }}
||You can add an optional hint like this. Problems that have a hint include a hint button, and this text appears the first time learners select the button.||
||If you add more than one hint, a different hint appears each time learners select the hint button.||
||A fruit is the fertilized ovary from a flower.||
||A fruit contains seeds of the plant.||
hinted: true
data: |
<problem>
<p>You can provide feedback for each option in a checkbox problem, with distinct feedback depending on whether or not the learner selects that option.</p>
data: |
<problem>
<choiceresponse>
<p>You can use this template as a guide to the simple editor markdown and OLX markup to use for checkboxes with hints and feedback problems. Edit this component to replace this template with your own assessment.</p>
<label>Add the question text, or prompt, here. This text is required.</label>
<description>You can add an optional tip or note related to the prompt like this.</description>
<checkboxgroup>
<choice correct="true">a correct answer
<choicehint selected="true">You can specify optional feedback that appears after the learner selects and submits this answer.</choicehint>
<choicehint selected="false">You can specify optional feedback that appears after the learner clears and submits this answer.</choicehint>
</choice>
<choice correct="false">an incorrect answer
</choice>
<choice correct="false">an incorrect answer
<choicehint selected="true">You can specify optional feedback for none, all, or a subset of the answers.</choicehint>
<choicehint selected="false">You can specify optional feedback for selected answers, cleared answers, or both.</choicehint>
</choice>
<choice correct="true">a correct answer
</choice>
<compoundhint value="A B D">You can specify optional feedback for a combination of answers which appears after the specified set of answers is submitted.</compoundhint>
<compoundhint value="A B C D">You can specify optional feedback for one, several, or all answer combinations.</compoundhint>
</checkboxgroup>
</choiceresponse>
<p>You can also provide compound feedback for a specific combination of answers. For example, if you have three possible answers in the problem, you can configure specific feedback for when a learner selects each combination of possible answers.</p>
<p>You can also add hints for learners.</p>
<p>Use the following example problem as a model.</p>
<p>Which of the following is a fruit? Check all that apply.</p>
<choiceresponse>
<checkboxgroup>
<choice correct="true">apple
<choicehint selected="true">You are correct that an apple is a fruit because it is the fertilized ovary that comes from an apple tree and contains seeds.</choicehint>
<choicehint selected="false">Remember that an apple is also a fruit.</choicehint>
</choice>
<choice correct="true">pumpkin
<choicehint selected="true">You are correct that a pumpkin is a fruit because it is the fertilized ovary of a squash plant and contains seeds.</choicehint>
<choicehint selected="false">Remember that a pumpkin is also a fruit.</choicehint>
</choice>
<choice correct="false">potato
<choicehint selected="true">A potato is a vegetable, not a fruit, because it does not come from a flower and does not contain seeds.</choicehint>
<choicehint selected="false">You are correct that a potato is a vegetable because it is an edible part of a plant in tuber form.</choicehint>
</choice>
<choice correct="true">tomato
<choicehint selected="true">You are correct that a tomato is a fruit because it is the fertilized ovary of a tomato plant and contains seeds.</choicehint>
<choicehint selected="false">Many people mistakenly think a tomato is a vegetable. However, because a tomato is the fertilized ovary of a tomato plant and contains seeds, it a fruit.</choicehint>
</choice>
<compoundhint value="A B D">An apple, pumpkin, and tomato are all fruits as they all are fertilized ovaries of a plant and contain seeds.</compoundhint>
<compoundhint value="A B C D">You are correct that an apple, pumpkin, and tomato are all fruits as they all are fertilized ovaries of a plant and contain seeds. However, a potato is not a fruit as it is an edible part of a plant in tuber form and is classified as a vegetable.</compoundhint>
</checkboxgroup>
</choiceresponse>
<demandhint>
<hint>A fruit is the fertilized ovary from a flower.</hint>
<hint>A fruit contains seeds of the plant.</hint>
</demandhint>
</problem>
<demandhint>
<hint>You can add an optional hint like this. Problems that have a hint include a hint button, and this text appears the first time learners select the button.</hint>
<hint>If you add more than one hint, a different hint appears each time learners select the hint button.</hint>
</demandhint>
</problem>

View File

@@ -5,27 +5,28 @@ metadata:
data: |
<problem>
<p>
Circuit schematic problems allow students to create virtual circuits by
arranging elements such as voltage sources, capacitors, resistors, and
MOSFETs on an interactive grid. The system evaluates a DC, AC, or
transient analysis of the circuit.
Circuit schematic problems allow students to create virtual circuits by
arranging elements such as voltage sources, capacitors, resistors, and
MOSFETs on an interactive grid. The system evaluates a DC, AC, or
transient analysis of the circuit.
</p>
<p>
For more information, see
<a href="http://edx.readthedocs.io/projects/edx-partner-course-staff/en/latest/exercises_tools/circuit_schematic_builder.html" target="_blank">
Circuit Schematic Builder Problem</a> in <i>Building and Running an edX Course</i>.
For more information, see
<a href="http://edx.readthedocs.io/projects/edx-partner-course-staff/en/latest/exercises_tools/circuit_schematic_builder.html" target="_blank">
Circuit Schematic Builder Problem</a> in <i>Building and Running an edX Course</i>.
</p>
<p>
When you add the problem, be sure to select <strong>Settings</strong>
to specify a <strong>Display Name</strong> and other values that apply.
When you add the problem, be sure to select <strong>Settings</strong>
to specify a <strong>Display Name</strong> and other values that apply.
</p>
<p>You can use the following example problems as models.</p>
<p>Make a voltage divider that splits the provided voltage evenly.</p>
<schematicresponse>
<center>
<schematic height="500" width="600" parts="g,r" analyses="dc"
initial_value="[[&quot;v&quot;,[168,144,0],{&quot;value&quot;:&quot;dc(1)&quot;,&quot;_json_&quot;:0},[&quot;1&quot;,&quot;0&quot;]],[&quot;r&quot;,[296,120,0],{&quot;r&quot;:&quot;1&quot;,&quot;_json_&quot;:1},[&quot;1&quot;,&quot;output&quot;]],[&quot;L&quot;,[296,168,3],{&quot;label&quot;:&quot;output&quot;,&quot;_json_&quot;:2},[&quot;output&quot;]],[&quot;w&quot;,[296,216,168,216]],[&quot;w&quot;,[168,216,168,192]],[&quot;w&quot;,[168,144,168,120]],[&quot;w&quot;,[168,120,296,120]],[&quot;g&quot;,[168,216,0],{&quot;_json_&quot;:7},[&quot;0&quot;]],[&quot;view&quot;,-67.49999999999994,-78.49999999999994,1.6000000000000003,&quot;50&quot;,&quot;10&quot;,&quot;1G&quot;,null,&quot;100&quot;,&quot;1&quot;,&quot;1000&quot;]]"/>
</center>
<p>Make a voltage divider that splits the provided voltage evenly.</p>
<center>
<schematic height="500" width="600" parts="g,r" analyses="dc"
initial_value="[[&quot;v&quot;,[168,144,0],{&quot;value&quot;:&quot;dc(1)&quot;,&quot;_json_&quot;:0},[&quot;1&quot;,&quot;0&quot;]],[&quot;r&quot;,[296,120,0],{&quot;r&quot;:&quot;1&quot;,&quot;_json_&quot;:1},[&quot;1&quot;,&quot;output&quot;]],[&quot;L&quot;,[296,168,3],{&quot;label&quot;:&quot;output&quot;,&quot;_json_&quot;:2},[&quot;output&quot;]],[&quot;w&quot;,[296,216,168,216]],[&quot;w&quot;,[168,216,168,192]],[&quot;w&quot;,[168,144,168,120]],[&quot;w&quot;,[168,120,296,120]],[&quot;g&quot;,[168,216,0],{&quot;_json_&quot;:7},[&quot;0&quot;]],[&quot;view&quot;,-67.49999999999994,-78.49999999999994,1.6000000000000003,&quot;50&quot;,&quot;10&quot;,&quot;1G&quot;,null,&quot;100&quot;,&quot;1&quot;,&quot;1000&quot;]]"/>
</center>
<answer type="loncapa/python">
dc_value = "dc analysis not found"
for response in submission[0]:
@@ -38,14 +39,26 @@ data: |
else:
correct = ['incorrect']
</answer>
</schematicresponse>
<p>Make a high-pass filter.</p>
<schematicresponse>
<center>
<schematic height="500" width="600" parts="g,r,s,c" analyses="ac"
submit_analyses="{&quot;ac&quot;:[[&quot;NodeA&quot;,1,9]]}"
initial_value="[[&quot;v&quot;,[160,152,0],{&quot;name&quot;:&quot;v1&quot;,&quot;value&quot;:&quot;sin(0,1,1,0,0)&quot;,&quot;_json_&quot;:0},[&quot;1&quot;,&quot;0&quot;]],[&quot;w&quot;,[160,200,240,200]],[&quot;g&quot;,[160,200,0],{&quot;_json_&quot;:2},[&quot;0&quot;]],[&quot;L&quot;,[240,152,3],{&quot;label&quot;:&quot;NodeA&quot;,&quot;_json_&quot;:3},[&quot;NodeA&quot;]],[&quot;s&quot;,[240,152,0],{&quot;color&quot;:&quot;cyan&quot;,&quot;offset&quot;:&quot;0&quot;,&quot;_json_&quot;:4},[&quot;NodeA&quot;]],[&quot;view&quot;,64.55878906250004,54.114697265625054,2.5000000000000004,&quot;50&quot;,&quot;10&quot;,&quot;1G&quot;,null,&quot;100&quot;,&quot;1&quot;,&quot;1000&quot;]]"/>
</center>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>
You can form a voltage divider that evenly divides the input
voltage with two identically valued resistors, with the sampled
voltage taken in between the two.
</p>
<p><img src="/static/images/voltage_divider.png" alt=""/></p>
</div>
</solution>
</schematicresponse>
<schematicresponse>
<p>Make a high-pass filter.</p>
<center>
<schematic height="500" width="600" parts="g,r,s,c" analyses="ac"
submit_analyses="{&quot;ac&quot;:[[&quot;NodeA&quot;,1,9]]}"
initial_value="[[&quot;v&quot;,[160,152,0],{&quot;name&quot;:&quot;v1&quot;,&quot;value&quot;:&quot;sin(0,1,1,0,0)&quot;,&quot;_json_&quot;:0},[&quot;1&quot;,&quot;0&quot;]],[&quot;w&quot;,[160,200,240,200]],[&quot;g&quot;,[160,200,0],{&quot;_json_&quot;:2},[&quot;0&quot;]],[&quot;L&quot;,[240,152,3],{&quot;label&quot;:&quot;NodeA&quot;,&quot;_json_&quot;:3},[&quot;NodeA&quot;]],[&quot;s&quot;,[240,152,0],{&quot;color&quot;:&quot;cyan&quot;,&quot;offset&quot;:&quot;0&quot;,&quot;_json_&quot;:4},[&quot;NodeA&quot;]],[&quot;view&quot;,64.55878906250004,54.114697265625054,2.5000000000000004,&quot;50&quot;,&quot;10&quot;,&quot;1G&quot;,null,&quot;100&quot;,&quot;1&quot;,&quot;1000&quot;]]"/>
</center>
<answer type="loncapa/python">
ac_values = None
for response in submission[0]:
@@ -60,24 +73,17 @@ data: |
else:
correct = ['incorrect']
</answer>
</schematicresponse>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>
You can form a voltage divider that evenly divides the input
voltage with two identically valued resistors, with the sampled
voltage taken in between the two.
</p>
<p><img src="/static/images/voltage_divider.png" alt=""/></p>
<p>
You can form a simple high-pass filter without any further
constraints by simply putting a resistor in series with a
capacitor. The actual values of the components do not really
matter in this problem.
</p>
<p><img src="/static/images/high_pass_filter.png" alt=""/></p>
</div>
</solution>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>
You can form a simple high-pass filter without any further
constraints by simply putting a resistor in series with a
capacitor. The actual values of the components do not really
matter in this problem.
</p>
<p><img src="/static/images/high_pass_filter.png" alt=""/></p>
</div>
</solution>
</schematicresponse>
</problem>

View File

@@ -4,33 +4,54 @@ metadata:
markdown: !!null
data: |
<problem>
<p>
In custom Python-evaluated input (also called "write-your-own-grader"
problems), the grader uses a Python script that you create and embed in
the problem to evaluate a learner's response or provide hints. These
problems can be any type. Numerical input and text input problems are
the most common write-your-own-grader problems.
</p>
<p>
You can use script tag format or answer tag format to create these problems.
</p>
<p>
You can create custom Python-evaluated input problems that provide
partial credit or that randomize variables in the Python code. You can
also add images to the solution by using an HTML "img" tag. Note that
the "img" tag must be between the "div" tags that are inside the
"solution" tags, and that learners do not see these images until they
click the "Show Answer" button.
</p>
<p> For more information, see <a
href="http://edx.readthedocs.io/projects/edx-partner-course-staff/en/latest/exercises_tools/custom_python.html" target="_blank">
Write-Your-Own-Grader Problem</a> in <i>Building and Running an edX Course</i>.
</p>
<p>When you add the problem, be sure to select <strong>Settings</strong>
to specify a <strong>Display Name</strong> and other values that apply.</p>
<p>You can use the following example problem as a model.</p>
<hr />
<p>
In custom Python-evaluated input (also called "write-your-own-grader"
problems), the grader uses a Python script that you create and embed in
the problem to evaluate a learner's response or provide hints. These
problems can be any type. Numerical input and text input problems are
the most common write-your-own-grader problems.
</p>
<p>
You can use script tag format or answer tag format to create these problems.
</p>
<p>
You can create custom Python-evaluated input problems that provide
partial credit or that randomize variables in the Python code. You can
also add images to the solution by using an HTML "img" tag. Note that
the "img" tag must be between the "div" tags that are inside the
"solution" tags, and that learners do not see these images until they
click the "Show Answer" button.
</p>
<p>
For more information, see <a href="http://edx.readthedocs.io/projects/edx-partner-course-staff/en/latest/exercises_tools/custom_python.html" target="_blank">
Write-Your-Own-Grader Problem</a> in <i>Building and Running an edX Course</i>.
</p>
<p>
When you add the problem, be sure to select <strong>Settings</strong>
to specify a <strong>Display Name</strong> and other values that apply.
</p>
<p>You can use the following example problem as a model.</p>
<hr/>
<customresponse cfn="test_add_to_ten">
<script type="loncapa/python">
def test_add_to_ten(expect, ans):
return test_add(10, ans)
</script>
<label>Enter two integers that sum to 10.</label>
<textline size="40" correct_answer="3" label="Enter first number" /><br/>
<textline size="40" correct_answer="7" label="Enter second number" />
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>Any set of integers on the line \(y = 10 - x\) satisfy these constraints.</p>
</div>
</solution>
</customresponse>
<customresponse cfn="test_add" expect="20">
<script type="loncapa/python">
def test_add(expect, ans):
@@ -41,29 +62,20 @@ data: |
except ValueError:
return False
def test_add_to_ten(expect, ans):
return test_add(10, ans)
</script>
<p>Enter two integers that sum to 10.</p>
<customresponse cfn="test_add_to_ten">
<textline size="40" correct_answer="3" label="Integer #1"/><br/>
<textline size="40" correct_answer="7" label="Integer #2"/>
<label>Enter two integers that sum to 20.</label>
<textline size="40" correct_answer="11" label="Enter first number" /><br/>
<textline size="40" correct_answer="9" label="Enter second number" />
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>Any set of integers on the line \(y = 20 - x\) satisfy these constraints.</p>
<p>To add an image to the solution, use an HTML "img" tag. Make sure to include alt text.</p>
<img src="/static/images/placeholder-image.png" width="400"
alt="Description of image, with a primary goal of explaining its
relevance to the problem or concept being illustrated for someone
who is unable to see the image."/>
</div>
</solution>
</customresponse>
<p>Enter two integers that sum to 20.</p>
<customresponse cfn="test_add" expect="20">
<textline size="40" correct_answer="11" label="Integer #1"/><br/>
<textline size="40" correct_answer="9" label="Integer #2"/>
</customresponse>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>Any set of integers on the line \(y = 10 - x\) and \(y = 20 - x\) satisfy these constraints.</p>
<p>To add an image to the solution, use an HTML "img" tag. Make sure to include alt text.</p>
<img src="/static/images/placeholder-image.png" width="400"
alt="Description of image, with a primary goal of explaining its
relevance to the problem or concept being illustrated for someone
who is unable to see the image."/>
</div>
</solution>
</problem>

View File

@@ -5,21 +5,22 @@ metadata:
showanswer: never
data: |
<problem>
<p>In drag and drop problems, students respond to a question by dragging text or objects to a specific location on an image.</p>
<p>
In drag and drop problems, students respond to a question by dragging text or objects to a specific location on an image.
For more information, see
<a href="http://edx.readthedocs.io/projects/edx-partner-course-staff/en/latest/exercises_tools/drag_and_drop_deprecated.html" target="_blank">
Drag and Drop Problem (Deprecated)</a> in <i>Building and Running an edX Course</i>.
</p>
<p>
For more information, see
<a href="http://edx.readthedocs.io/projects/edx-partner-course-staff/en/latest/exercises_tools/drag_and_drop_deprecated.html" target="_blank">
Drag and Drop Problem (Deprecated)</a> in <i>Building and Running an edX Course</i>.</p>
<p>When you add the problem, be sure to select <strong>Settings</strong>
to specify a <strong>Display Name</strong> and other values that apply.</p>
When you add the problem, be sure to select <strong>Settings</strong>
to specify a <strong>Display Name</strong> and other values that apply.
</p>
<p>You can use the following example problems as models.</p>
<hr />
<customresponse>
<h3>Simple Drag and Drop</h3>
<p>Drag each word in the scrollbar to the bucket that matches the number of letters in the word.</p>
<drag_and_drop_input img="https://studio.edx.org/c4x/edX/DemoX/asset/L9_buckets.png">
<hr/>
<customresponse>
<h3>Simple Drag and Drop</h3>
<p>Drag each word in the scrollbar to the bucket that matches the number of letters in the word.</p>
<drag_and_drop_input img="https://studio.edx.org/c4x/edX/DemoX/asset/L9_buckets.png">
<draggable id="1" label="a"/>
<draggable id="2" label="bog"/>
<draggable id="3" label="droll"/>
@@ -31,7 +32,7 @@ data: |
<draggable id="9" label="tap"/>
<draggable id="10" label="strop"/>
<draggable id="11" label="few"/>
</drag_and_drop_input>
</drag_and_drop_input>
<answer type="loncapa/python">
correct_answer = {
'1': [[70, 150], 121],
@@ -50,14 +51,14 @@ data: |
else:
correct = ['incorrect']
</answer>
</customresponse>
</customresponse>
<customresponse>
<h3>Drag and Drop with Outline</h3>
<p>Label the hydrogen atoms connected with the left carbon atom.</p>
<h3>Drag and Drop with Outline</h3>
<p>Label the hydrogen atoms connected with the left carbon atom.</p>
<drag_and_drop_input img="https://studio.edx.org/c4x/edX/DemoX/asset/ethglycol.jpg" target_outline="true" one_per_target="true" no_labels="true" label_bg_color="rgb(222, 139, 238)">
<draggable id="1" label="Hydrogen" />
<draggable id="2" label="Hydrogen" />
<target id="t1_o" x="10" y="67" w="100" h="100"/>
<target id="t2" x="133" y="3" w="70" h="70"/>
<target id="t3" x="2" y="384" w="70" h="70"/>

View File

@@ -3,54 +3,12 @@ metadata:
display_name: Math Expression Input
markdown: !!null
data: |
<problem>
<p>
In math expression input problems, learners enter text that represents a
mathematical expression into a field, and text is converted to a symbolic
expression that appears below that field. You can refer learners to
<a href="http://edx.readthedocs.io/projects/edx-guide-for-students/en/latest/completing_assignments/SFD_mathformatting.html" target="_blank">
Entering Mathematical and Scientific Expressions</a> in the <i>EdX Learner's
Guide</i> for information about how to enter text into the field.
</p>
<p>
Math expression problems can include unknown variables and relatively
complicated symbolic expressions. The grader uses a numerical sampling to
determine whether the students response matches your math expression, to a
specified numerical tolerance. You must specify the allowed variables in the
expression as well as the range of values for each variable.
</p>
<p>
To create these problems, you use MathJax to change your plain text into
"beautiful math." For more information about how to use MathJax in Studio,
see <a href="http://edx.readthedocs.io/projects/edx-partner-course-staff/en/latest/exercises_tools/mathjax.html" target="_blank">
A Brief Introduction to MathJax in Studio</a> in <i>Building and Running an edx
Course</i>.
</p>
<p>When you add the problem, be sure to select <strong>Settings</strong>
to specify a <strong>Display Name</strong> and other values that apply.</p>
<p>You can use the following example problems as models.</p>
<p>Write an expression for the product of \( R_1\), \( R_2\), and
the inverse of \( R_3\) .</p>
<formularesponse type="ci" samples="R_1,R_2,R_3@1,2,3:3,4,5#10" answer="$VoVi">
<responseparam type="tolerance" default="0.00001"/>
<formulaequationinput size="40" label="Enter the equation"/>
</formularesponse>
<script type="loncapa/python">
VoVi = "(R_1*R_2)/R_3"
</script>
<p>Let \( x\) be a variable, and let \( n\) be an arbitrary constant.
What is the derivative of \( x^n\)?</p>
<script type="loncapa/python">
derivative = "n*x^(n-1)"
</script>
<formularesponse type="ci" samples="x,n@1,2:3,4#10" answer="$derivative">
<responseparam type="tolerance" default="0.00001"/>
<formulaequationinput size="40" label="Enter the equation"/>
</formularesponse>
</problem>
<problem>
<formularesponse type="ci" samples="R_1,R_2,R_3@1,2,3:3,4,5#10" answer="R_1*R_2/R_3">
<p>You can use this template as a guide to the OLX markup to use for math expression problems. Edit this component to replace the example with your own assessment.</p>
<label>Add the question text, or prompt, here. This text is required. Example: Write an expression for the product of R_1, R_2, and the inverse of R_3.</label>
<description>You can add an optional tip or note related to the prompt like this. Example: To test this example, the correct answer is R_1*R_2/R_3</description>
<responseparam type="tolerance" default="0.00001"/>
<formulaequationinput size="40"/>
</formularesponse>
</problem>

View File

@@ -2,34 +2,29 @@
metadata:
display_name: Image Mapped Input
markdown: !!null
data: |
<problem>
<p>
In an image mapped input problem, also known as a "pointing on a picture"
problem, students click inside a defined region in an image. You define this
region by including coordinates in the body of the problem. You can define
one rectangular region, multiple rectangular regions, or one non-rectangular
region. For more information, see
<a href="http://edx.readthedocs.io/projects/edx-partner-course-staff/en/latest/exercises_tools/image_mapped_input.html" target="_blank">Image Mapped Input
Problem</a> in <i>Building and Running an edx Course</i>.
</p>
<p>When you add the problem, be sure to select <strong>Settings</strong>
to specify a <strong>Display Name</strong> and other values that apply.</p>
<p>You can use the following example problem as a model.</p>
<p>What country is home to the Great Pyramid of Giza as well as the cities
of Cairo and Memphis? Click the country on the map below.</p>
<imageresponse>
<imageinput src="https://studio.edx.org/c4x/edX/DemoX/asset/Africa.png"
width="600" height="638" rectangle="(338,98)-(412,168)" alt="Map of
Africa"/>
</imageresponse>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>Egypt is home to not only the Pyramids, Cairo, and Memphis, but also
the Sphinx and the ancient Royal Library of Alexandria.</p>
</div>
</solution>
</problem>
data: |
<problem>
<p>
In an image mapped input problem, also known as a "pointing on a picture" problem, students click inside a defined region in an image. You define this region by including coordinates in the body of the problem. You can define one rectangular region,
multiple rectangular regions, or one non-rectangular region. For more information, see
<a href="http://edx.readthedocs.io/projects/edx-partner-course-staff/en/latest/exercises_tools/image_mapped_input.html" target="_blank">Image Mapped Input Problem</a>
in
<i>Building and Running an edx Course</i>.
</p>
<p>When you add the problem, be sure to select
<strong>Settings</strong>
to specify a
<strong>Display Name</strong>
and other values that apply.</p>
<p>You can use the following example problem as a model.</p>
<imageresponse>
<p>What country is home to the Great Pyramid of Giza as well as the cities of Cairo and Memphis? Click the country on the map below.</p>
<imageinput src="https://studio.edx.org/c4x/edX/DemoX/asset/Africa.png" width="600" height="638" rectangle="(338,98)-(412,168)" alt="Map of Africa"/>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>Egypt is home to not only the Pyramids, Cairo, and Memphis, but also the Sphinx and the ancient Royal Library of Alexandria.</p>
</div>
</solution>
</imageresponse>
</problem>

View File

@@ -5,31 +5,36 @@ metadata:
showanswer: never
data: |
<problem>
<p>
In these problems (also called custom JavaScript problems or JS Input
problems), you add a problem or tool that uses JavaScript in Studio.
Studio embeds the problem in an IFrame so that your students can
interact with it in the LMS. You can grade your students' work using
JavaScript and some basic Python, and the grading is integrated into the
edX grading system.
</p>
<p>
The JS Input problem that you create must use HTML, JavaScript, and
cascading style sheets (CSS). You can use any application creation tool,
such as the Google Web Toolkit (GWT), to create your JS Input problem.
</p>
<p>
For more information, see
<a href="http://edx.readthedocs.io/projects/edx-partner-course-staff/en/latest/exercises_tools/custom_javascript.html" target="_blank">
Custom JavaScript Problem</a> in <i>Building and Running an edX Course</i>.
</p>
<p>JavaScript developers can also see
<a href="http://edx.readthedocs.io/projects/edx-developer-guide/en/latest/extending_platform/javascript.html" target="_blank">
Custom JavaScript Applications</a> in the <i>EdX Developer's Guide</i>.</p>
<p>When you add the problem, be sure to select <strong>Settings</strong>
to specify a <strong>Display Name</strong> and other values that apply.</p>
<p>You can use the following example problem as a model.</p>
<p>
In these problems (also called custom JavaScript problems or JS Input
problems), you add a problem or tool that uses JavaScript in Studio.
Studio embeds the problem in an IFrame so that your students can
interact with it in the LMS. You can grade your students' work using
JavaScript and some basic Python, and the grading is integrated into the
edX grading system.
</p>
<p>
The JS Input problem that you create must use HTML, JavaScript, and
cascading style sheets (CSS). You can use any application creation tool,
such as the Google Web Toolkit (GWT), to create your JS Input problem.
</p>
<p>
For more information, see
<a href="http://edx.readthedocs.io/projects/edx-partner-course-staff/en/latest/exercises_tools/custom_javascript.html" target="_blank">
Custom JavaScript Problem</a> in <i>Building and Running an edX Course</i>.
</p>
<p>
JavaScript developers can also see
<a href="http://edx.readthedocs.io/projects/edx-developer-guide/en/latest/extending_platform/javascript.html" target="_blank">
Custom JavaScript Applications</a> in the <i>EdX Developer's Guide</i>.
</p>
<p>
When you add the problem, be sure to select <strong>Settings</strong>
to specify a <strong>Display Name</strong> and other values that apply.
</p>
<p>You can use the following example problem as a model.</p>
<customresponse cfn="vglcfn">
<script type="loncapa/python">
<![CDATA[
import json
@@ -53,16 +58,14 @@ data: |
'''
]]>
</script>
<p>In the following image, click the objects until the cone is yellow
and the cube is blue.</p>
<customresponse cfn="vglcfn">
<jsinput gradefn="WebGLDemo.getGrade"
get_statefn="WebGLDemo.getState"
set_statefn="WebGLDemo.setState"
initial_state='{"selectedObjects":{"cube":true,"cylinder":false}}'
width="400"
height="400"
html_file="https://studio.edx.org/c4x/edX/DemoX/asset/webGLDemo.html"
sop="false"/>
</customresponse>
<p>In the following image, click the objects until the cone is yellow and the cube is blue.</p>
<jsinput gradefn="WebGLDemo.getGrade"
get_statefn="WebGLDemo.getState"
set_statefn="WebGLDemo.setState"
initial_state='{"selectedObjects":{"cube":true,"cylinder":false}}'
width="400"
height="400"
html_file="https://studio.edx.org/c4x/edX/DemoX/asset/webGLDemo.html"
sop="false"/>
</customresponse>
</problem>

View File

@@ -88,37 +88,41 @@ metadata:
data: |
<?xml version="1.0"?>
<problem showanswer="closed" rerandomize="never" weight="10" display_name="lec1_Q2">
<p>If you have a problem that is already written in LaTeX, you can use this problem type to
easily convert your code into XML. After you paste your code into the LaTeX editor,
you only need to make a few minor adjustments.</p>
<p>For more information, see
<a href="http://edx.readthedocs.io/projects/edx-partner-course-staff/en/latest/exercises_tools/problem_in_latex.html" target="_blank">
Problem Written in LaTeX</a> in <i>Building and Running an edX Course</i>.</p>
<problem>
<p>
If you have a problem that is already written in LaTeX, you can use this problem type to
easily convert your code into XML. After you paste your code into the LaTeX editor,
you only need to make a few minor adjustments.
</p>
<p>
For more information, see
<a href="http://edx.readthedocs.io/projects/edx-partner-course-staff/en/latest/exercises_tools/problem_in_latex.html" target="_blank">
Problem Written in LaTeX</a> in <i>Building and Running an edX Course</i>.
</p>
<p>You can use the following example problems as models.</p>
<p><strong>Example Option Problem</strong></p>
<p>Which of the following countries celebrates its independence on August 15?</p>
<br/>
<optionresponse>
<optioninput options="('India','Spain','China','Bermuda')" correct="India"></optioninput>
</optionresponse>
<p><strong>Example Option Problem</strong></p>
<br/>
<optionresponse>
<label>Which of the following countries celebrates its independence on August 15?</label>
<optioninput options="('India','Spain','China','Bermuda')" correct="India"></optioninput>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>India became an independent nation on August 15, 1947.</p>
</div>
</solution>
<br/>
<p><strong>Example Multiple Choice Problem</strong></p>
<p>Which of the following countries has the largest population?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice">
<choice correct="false" name="brazil">Brazil</choice>
<choice correct="false" name="germany">Germany</choice>
<choice correct="true" name="indonesia">Indonesia</choice>
<choice correct="false" name="russia">Russia</choice>
</choicegroup>
</multiplechoiceresponse>
</optionresponse>
<br/>
<p><strong>Example Multiple Choice Problem</strong></p>
<multiplechoiceresponse>
<label>Which of the following countries has the largest population?</label>
<choicegroup type="MultipleChoice">
<choice correct="false" name="brazil">Brazil</choice>
<choice correct="false" name="germany">Germany</choice>
<choice correct="true" name="indonesia">Indonesia</choice>
<choice correct="false" name="russia">Russia</choice>
</choicegroup>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
@@ -129,38 +133,58 @@ data: |
<p>The population of Germany is approximately 81 million.</p>
</div>
</solution>
<br/>
<p><strong>Example Math Expression Problem</strong></p>
<p>What is Einstein's equation for the energy equivalent of a mass [mathjaxinline]m[/mathjaxinline]?</p>
<symbolicresponse expect="m*c^2">
<textline size="90" correct_answer="m*c^2" math="1"/>
</symbolicresponse>
<br/>
<p><strong>Example Numerical Problem</strong></p>
<p>Estimate the energy savings (in J/y) if all the people ([mathjaxinline]3\times 10^8[/mathjaxinline]) in the U.&#xA0;S. switched from U.&#xA0;S. code to low-flow shower heads.</p>
<p style="display:inline">Energy saved = </p>
<numericalresponse inline="1" answer="0.52">
<textline inline="1">
<responseparam type="tolerance" default="0.02"/>
</textline>
<p style="display:inline">&#xA0;EJ/year</p>
</numericalresponse>
<br/>
<p><strong>Example Fill-in-the-Blank Problem</strong></p>
<p>What was the first post-secondary school in China to allow both male and female students?</p>
<stringresponse answer="Nanjing Higher Normal Institute" type="ci" >
<additional_answer>National Central University</additional_answer>
<additional_answer>Nanjing University</additional_answer>
<textline label="What was the first post-secondary school in China to allow both male and female students?" size="40"/>
</stringresponse>
</multiplechoiceresponse>
<br/>
<p><strong>Example Math Expression Problem</strong></p>
<symbolicresponse expect="m*c^2">
<p>What is Einstein's equation for the energy equivalent of a mass [mathjaxinline]m[/mathjaxinline]?</p>
<textline size="90" correct_answer="m*c^2" math="1"/>
</symbolicresponse>
<br/>
<p><strong>Example Numerical Problem</strong></p>
<numericalresponse inline="1" answer="0.52">
<label>Estimate the energy savings (in J/y) if all the people ([mathjaxinline]3\times 10^8[/mathjaxinline]) in the U.&#xA0;S. switched from U.&#xA0;S. code to low-flow shower heads.</label>
<formulaequationinput trailing_text="EJ/year" />
<responseparam type="tolerance" default="0.02"/>
</numericalresponse>
<br/>
<p><strong>Example Fill-in-the-Blank Problem</strong></p>
<stringresponse answer="Nanjing Higher Normal Institute" type="ci" >
<label>What was the first post-secondary school in China to allow both male and female students?</label>
<additional_answer>National Central University</additional_answer>
<additional_answer>Nanjing University</additional_answer>
<textline size="40"/>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>Nanjing Higher Normal Institute first admitted female students in 1920.</p>
</div>
</solution>
<br/>
<p><strong>Example Custom Python-Evaluated Input Problem</strong></p>
</stringresponse>
<br/>
<p><strong>Example Custom Python-Evaluated Input Problem</strong></p>
<customresponse cfn="test_add_to_ten">
<script type="loncapa/python">
def test_add_to_ten(expect, ans):
return test_add(10, ans)
</script>
<p>Enter two integers that sum to 10.</p>
<textline size="40" correct_answer="3"/><br/>
<textline size="40" correct_answer="7"/>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>Any set of integers on the line \(y = 10 - x\) satisfy these constraints.</p>
</div>
</solution>
</customresponse>
<customresponse cfn="test_add" expect="20">
<script type="loncapa/python">
def test_add(expect, ans):
try:
@@ -169,37 +193,30 @@ data: |
return (a1+a2) == int(expect)
except ValueError:
return False
def test_add_to_ten(expect, ans):
return test_add(10, ans)
</script>
<p>Enter two integers that sum to 10.</p>
<customresponse cfn="test_add_to_ten">
<textline size="40" correct_answer="3" label="Integer #1"/><br/>
<textline size="40" correct_answer="7" label="Integer #2"/>
</customresponse>
<p>Enter two integers that sum to 20.</p>
<customresponse cfn="test_add" expect="20">
<textline size="40" correct_answer="11" label="Integer #1"/><br/>
<textline size="40" correct_answer="9" label="Integer #2"/>
</customresponse>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>Any set of integers on the line \(y = 10 - x\) and \(y = 20 - x\) satisfy these constraints.</p>
<p>To add an image to the solution, use an HTML "img" tag. Make sure to include alt text.</p>
<img src="/static/images/placeholder-image.png" width="400" alt="Description of image"/>
</div>
</solution>
<textline size="40" correct_answer="11"/><br/>
<textline size="40" correct_answer="9"/>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>Any set of integers on the line \(y = 20 - x\) satisfy these constraints.</p>
<p>To add an image to the solution, use an HTML "img" tag. Make sure to include alt text.</p>
<img src="/static/images/placeholder-image.png" width="400" alt="Description of image"/>
</div>
</solution>
</customresponse>
<br/>
<p><strong>Example Image Mapped Input Problem</strong></p>
<p>What country is home to the Great Pyramid of Giza as well as the cities
of Cairo and Memphis? Click the country on the map below.</p>
<imageresponse>
<imageinput src="https://studio.edx.org/c4x/edX/DemoX/asset/Africa.png"
width="600" height="638" rectangle="(338,98)-(412,168)" alt="Map of
Africa"/>
</imageresponse>
<imageresponse>
<p>
What country is home to the Great Pyramid of Giza as well as the cities
of Cairo and Memphis? Click the country on the map below.
</p>
<imageinput src="https://studio.edx.org/c4x/edX/DemoX/asset/Africa.png"
width="600" height="638" rectangle="(338,98)-(412,168)" alt="Map of
Africa"/>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
@@ -207,22 +224,24 @@ data: |
the Sphinx and the ancient Royal Library of Alexandria.</p>
</div>
</solution>
<br/>
<p><strong>Example Hidden Explanation</strong></p>
<p>You can provide additional information that only appears at certain times by including a "showhide" flag. </p>
<p>
<table class="wikitable collapsible collapsed">
<tbody>
<tr>
<th> More Information [<a href="javascript:$('#sh1').toggle()" id="sh1l">show</a>]</th>
</tr>
<tr id="sh1" style="display:none">
<td>
<p>This is a hidden explanation. It can contain equations, such as [mathjaxinline]\alpha = \frac{2}{\sqrt {1+\gamma }}[/mathjaxinline]. </p>
<p>This is additional text after the hidden explanation. </p>
</td>
</tr>
</tbody>
</table>
</p>
</imageresponse>
<br/>
<p><strong>Example Hidden Explanation</strong></p>
<p>You can provide additional information that only appears at certain times by including a "showhide" flag. </p>
<p>
<table class="wikitable collapsible collapsed">
<tbody>
<tr>
<th> More Information [<a href="javascript:$('#sh1').toggle()" id="sh1l">show</a>]</th>
</tr>
<tr id="sh1" style="display:none">
<td>
<p>This is a hidden explanation. It can contain equations, such as [mathjaxinline]\alpha = \frac{2}{\sqrt {1+\gamma }}[/mathjaxinline]. </p>
<p>This is additional text after the hidden explanation. </p>
</td>
</tr>
</tbody>
</table>
</p>
</problem>

View File

@@ -2,52 +2,26 @@
metadata:
display_name: Multiple Choice
markdown: |
Multiple choice problems allow learners to select only one option. Learners can see all the options along with the problem text.
When you add the problem, be sure to select Settings to specify a Display Name and other values that apply.
You can use this template as a guide to the simple editor markdown and OLX markup to use for multiple choice problems. Edit this component to replace this template with your own assessment.
You can use the following example problem as a model.
>>Add the question text, or prompt, here. This text is required.||You can add an optional tip or note related to the prompt like this. <<
>>Which of the following countries has the largest population?<<
( ) Brazil {{ timely feedback -- explain why an almost correct answer is wrong }}
( ) Germany
(x) Indonesia
( ) Russia
( ) an incorrect answer
(x) the correct answer
( ) an incorrect answer
[explanation]
According to September 2014 estimates:
The population of Indonesia is approximately 250 million.
The population of Brazil is approximately 200 million.
The population of Russia is approximately 146 million.
The population of Germany is approximately 81 million.
[explanation]
data: |
<problem>
<p>Multiple choice problems allow learners to select only one option.
Learners can see all the options along with the problem text.</p>
<p>When you add the problem, be sure to select <strong>Settings</strong>
to specify a <strong>Display Name</strong> and other values that apply.</p>
<p>You can use the following example problem as a model.</p>
<p>Which of the following countries has the largest population?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice">
<choice correct="false" name="brazil">Brazil
<choicehint>timely feedback -- explain why an almost correct answer is wrong</choicehint>
</choice>
<choice correct="false" name="germany">Germany</choice>
<choice correct="true" name="indonesia">Indonesia</choice>
<choice correct="false" name="russia">Russia</choice>
</choicegroup>
</multiplechoiceresponse>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>According to September 2014 estimates:</p>
<p>The population of Indonesia is approximately 250 million.</p>
<p>The population of Brazil is approximately 200 million.</p>
<p>The population of Russia is approximately 146 million.</p>
<p>The population of Germany is approximately 81 million.</p>
</div>
</solution>
</problem>
data: |
<problem>
<multiplechoiceresponse>
<p>You can use this template as a guide to the simple editor markdown and OLX markup to use for multiple choice problems. Edit this component to replace this template with your own assessment.</p>
<label>Add the question text, or prompt, here. This text is required.</label>
<description>You can add an optional tip or note related to the prompt like this.</description>
<choicegroup>
<choice correct="false">an incorrect answer</choice>
<choice correct="true">the correct answer</choice>
<choice correct="false">an incorrect answer</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>

View File

@@ -3,44 +3,38 @@ metadata:
display_name: Multiple Choice with Hints and Feedback
markdown: |
You can provide feedback for each option in a multiple choice problem.
You can use this template as a guide to the simple editor markdown and OLX markup to use for multiple choice with hints and feedback problems. Edit this component to replace this template with your own assessment.
You can also add hints for learners.
>>Add the question text, or prompt, here. This text is required.||You can add an optional tip or note related to the prompt like this. <<
Be sure to select Settings to specify a Display Name and other values that apply.
( ) an incorrect answer {{You can specify optional feedback like this, which appears after this answer is submitted.}}
(x) the correct answer
( ) an incorrect answer {{You can specify optional feedback for none, a subset, or all of the answers.}}
Use the following example problem as a model.
>>Which of the following is a vegetable?<<
( ) apple {{An apple is the fertilized ovary that comes from an apple tree and contains seeds, meaning it is a fruit.}}
( ) pumpkin {{A pumpkin is the fertilized ovary of a squash plant and contains seeds, meaning it is a fruit.}}
(x) potato {{A potato is an edible part of a plant in tuber form and is a vegetable.}}
( ) tomato {{Many people mistakenly think a tomato is a vegetable. However, because a tomato is the fertilized ovary of a tomato plant and contains seeds, it is a fruit.}}
||A fruit is the fertilized ovary from a flower.||
||A fruit contains seeds of the plant.||
||You can add an optional hint like this. Problems that have a hint include a hint button, and this text appears the first time learners select the button.||
||If you add more than one hint, a different hint appears each time learners select the hint button.||
hinted: true
data: |
<problem>
<p>You can provide feedback for each option in a multiple choice problem.</p>
data: |
<problem>
<multiplechoiceresponse>
<p>You can use this template as a guide to the simple editor markdown and OLX markup to use for multiple choice with hints and feedback problems. Edit this component to replace this template with your own assessment.</p>
<label>Add the question text, or prompt, here. This text is required.</label>
<description>You can add an optional tip or note related to the prompt like this.</description>
<choicegroup>
<choice correct="false">an incorrect answer
<choicehint>You can specify optional feedback like this, which appears after this answer is submitted.</choicehint>
</choice>
<choice correct="true">the correct answer
</choice>
<choice correct="false">an incorrect answer
<choicehint>You can specify optional feedback for none, a subset, or all of the answers.</choicehint>
</choice>
</choicegroup>
</multiplechoiceresponse>
<p>You can also add hints for learners.</p>
<p>Use the following example problem as a model.</p>
<p>Which of the following is a vegetable?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice">
<choice correct="false">apple <choicehint>An apple is the fertilized ovary that comes from an apple tree and contains seeds, meaning it is a fruit.</choicehint></choice>
<choice correct="false">pumpkin <choicehint>A pumpkin is the fertilized ovary of a squash plant and contains seeds, meaning it is a fruit.</choicehint></choice>
<choice correct="true">potato <choicehint>A potato is an edible part of a plant in tuber form and is a vegetable.</choicehint></choice>
<choice correct="false">tomato <choicehint>Many people mistakenly think a tomato is a vegetable. However, because a tomato is the fertilized ovary of a tomato plant and contains seeds, it is a fruit.</choicehint></choice>
</choicegroup>
</multiplechoiceresponse>
<demandhint>
<hint>A fruit is the fertilized ovary from a flower.</hint>
<hint>A fruit contains seeds of the plant.</hint>
</demandhint>
</problem>
<demandhint>
<hint>You can add an optional hint like this. Problems that have a hint include a hint button, and this text appears the first time learners select the button.</hint>
<hint>If you add more than one hint, a different hint appears each time learners select the hint button.</hint>
</demandhint>
</problem>

View File

@@ -2,57 +2,19 @@
metadata:
display_name: Numerical Input
markdown: |
In a numerical input problem, learners enter numbers or a specific and relatively simple mathematical expression. Learners enter the response in plain text, and the system then converts the text to a symbolic expression that learners can see below the response field.
You can use this template as a guide to the simple editor markdown and OLX markup to use for numerical input problems. Edit this component to replace this template with your own assessment.
The system can handle several types of characters, including basic operators, fractions, exponents, and common constants such as "i". You can refer learners to "Entering Mathematical and Scientific Expressions" in the edX Guide for Students for more information.
>>Add the question text, or prompt, here. This text is required.||You can add an optional tip or note related to the prompt like this. <<
When you add the problem, be sure to select Settings to specify a Display Name and other values that apply.
= 100 +-5
You can use the following example problems as models.
>>How many miles away from Earth is the sun? Use scientific notation to answer.<<
= 9.3*10^7
or= 9.296*10^7
>>The square of what number is -100?<<
= 10*i
[explanation]
The sun is 93,000,000, or 9.3*10^7, miles away from Earth.
-100 is the square of 10 times the imaginary number, i.
[explanation]
data: |
<problem>
<p>In a numerical input problem, learners enter numbers or a specific and
relatively simple mathematical expression. Learners enter the response in
plain text, and the system then converts the text to a symbolic expression
that learners can see below the response field.</p>
<p>The system can handle several types of characters, including basic
operators, fractions, exponents, and common constants such as i. You can
refer learners to
<a href="http://edx.readthedocs.io/projects/edx-guide-for-students/en/latest/completing_assignments/SFD_mathformatting.html#math-formatting" target="_blank">Entering Mathematical and Scientific Expressions</a> in the <i>EdX Learner's Guide</i> for information about how to enter text into the field.</p>
<p>When you add the problem, be sure to select <strong>Settings</strong>
to specify a <strong>Display Name</strong> and other values that apply.</p>
<p>You can use the following example problems as models.</p>
<p>How many miles away from Earth is the sun? Use scientific notation to answer.</p>
<numericalresponse answer="9.3*10^7">
<formulaequationinput label="How many million miles are between Earth and the sun? Use scientific notation to answer." />
</numericalresponse>
<p>The square of what number is -100?</p>
<numericalresponse answer="10*i">
<formulaequationinput label="The square of what number is -100?" />
</numericalresponse>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>The sun is 93,000,000, or 9.3*10^7, miles away from Earth.</p>
<p>-100 is the square of 10 times the imaginary number, i.</p>
</div>
</solution>
</problem>
data: |
<problem>
<numericalresponse answer="100">
<p>You can use this template as a guide to the simple editor markdown and OLX markup to use for numerical input problems. Edit this component to replace this template with your own assessment.</p>
<label>Add the question text, or prompt, here. This text is required.</label>
<description>You can add an optional tip or note related to the prompt like this.</description>
<responseparam type="tolerance" default="5"/>
<formulaequationinput/>
</numericalresponse>
</problem>

View File

@@ -2,53 +2,28 @@
metadata:
display_name: Numerical Input with Hints and Feedback
markdown: |
You can provide feedback for correct answers in numerical input problems. You cannot provide feedback for incorrect answers.
You can use this template as a guide to the simple editor markdown and OLX markup to use for numerical input with hints and feedback problems. Edit this component to replace this template with your own assessment.
Use feedback for the correct answer to reinforce the process for arriving at the numerical value.
>>Add the question text, or prompt, here. This text is required.||You can add an optional tip or note related to the prompt like this. <<
You can also add hints for learners.
= 100 +-5 {{You can specify optional feedback like this, which appears after this answer is submitted.}}
Be sure to select Settings to specify a Display Name and other values that apply.
Use the following example problem as a model.
>>What is the arithmetic mean for the following set of numbers? (1, 5, 6, 3, 5)<<
= 4 {{The mean for this set of numbers is 20 / 5, which equals 4.}}
||The mean is calculated by summing the set of numbers and dividing by n.||
||n is the count of items in the set.||
[explanation]
The mean is calculated by summing the set of numbers and dividing by n. In this case: (1 + 5 + 6 + 3 + 5) / 5 = 20 / 5 = 4.
[explanation]
||You can add an optional hint like this. Problems that have a hint include a hint button, and this text appears the first time learners select the button.||
||If you add more than one hint, a different hint appears each time learners select the hint button.||
hinted: true
data: |
<problem>
<p>You can provide feedback for correct answers in numerical input problems. You cannot provide feedback for incorrect answers.</p>
<p>Use feedback for the correct answer to reinforce the process for arriving at the numerical value.</p>
<p>Use the following example problem as a model.</p>
<p>What is the arithmetic mean for the following set of numbers? (1, 5, 6, 3, 5)</p>
<numericalresponse answer="4">
<formulaequationinput label="What is the arithmetic mean for the following set of numbers? (1, 5, 6, 3, 5)" />
<correcthint>The mean for this set of numbers is 20 / 5, which equals 4.</correcthint>
</numericalresponse>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>The mean is calculated by summing the set of numbers and dividing by n. In this case: (1 + 5 + 6 + 3 + 5) / 5 = 20 / 5 = 4.</p>
</div>
</solution>
<demandhint>
<hint>The mean is calculated by summing the set of numbers and dividing by n.</hint>
<hint>n is the count of items in the set.</hint>
</demandhint>
</problem>
data: |
<problem>
<numericalresponse answer="100">
<p>You can use this template as a guide to the simple editor markdown and OLX markup to use for numerical input with hints and feedback problems. Edit this component to replace this template with your own assessment.</p>
<label>Add the question text, or prompt, here. This text is required.</label>
<description>You can add an optional tip or note related to the prompt like this.</description>
<responseparam type="tolerance" default="5"/>
<formulaequationinput/>
<correcthint>You can specify optional feedback like this, which appears after this answer is submitted.</correcthint>
</numericalresponse>
<demandhint>
<hint>You can add an optional hint like this. Problems that have a hint include a hint button, and this text appears the first time learners select the button.</hint>
<hint>If you add more than one hint, a different hint appears each time learners select the hint button.</hint>
</demandhint>
</problem>

View File

@@ -2,35 +2,23 @@
metadata:
display_name: Dropdown
markdown: |
Dropdown problems allow learners to select only one option from a list of options.
When you add the problem, be sure to select Settings to specify a Display Name and other values that apply.
You can use the following example problem as a model.
>>Which of the following countries celebrates its independence on August 15?<<
[[(India), Spain, China, Bermuda]]
[explanation]
India became an independent nation on August 15, 1947.
[explanation]
data: |
<problem>
<p>Dropdown problems allow learners to select only one option from a list of options.</p>
<p>When you add the problem, be sure to select <strong>Settings</strong>
to specify a <strong>Display Name</strong> and other values that apply.</p>
<p>You can use the following example problem as a model.</p>
<p>Which of the following countries celebrates its independence on August 15?</p>
<br/>
<optionresponse>
<optioninput options="('India','Spain','China','Bermuda')" correct="India"></optioninput>
</optionresponse>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>India became an independent nation on August 15, 1947.</p>
</div>
</solution>
</problem>
You can use this template as a guide to the simple editor markdown and OLX markup to use for dropdown problems. Edit this component to replace this template with your own assessment.
>>Add the question text, or prompt, here. This text is required.||You can add an optional tip or note related to the prompt like this. <<
[[
an incorrect answer
(the correct answer)
an incorrect answer
]]
data: |
<problem>
<optionresponse>
<p>You can use this template as a guide to the simple editor markdown and OLX markup to use for dropdown problems. Edit this component to replace this template with your own assessment.</p>
<label>Add the question text, or prompt, here. This text is required.</label>
<description>You can add an optional tip or note related to the prompt like this. </description>
<optioninput>
<option correct="False">an incorrect answer</option>
<option correct="True">the correct answer</option>
<option correct="False">an incorrect answer</option>
</optioninput>
</optionresponse>
</problem>

View File

@@ -2,50 +2,32 @@
metadata:
display_name: Dropdown with Hints and Feedback
markdown: |
You can provide feedback for each available option in a dropdown problem.
You can also add hints for learners.
Be sure to select Settings to specify a Display Name and other values that apply.
Use the following example problem as a model.
>> A/an ________ is a vegetable.<<
You can use this template as a guide to the simple editor markdown and OLX markup to use for dropdown with hints and feedback problems. Edit this component to replace this template with your own assessment.
>>Add the question text, or prompt, here. This text is required.||You can add an optional tip or note related to the prompt like this. <<
[[
apple {{An apple is the fertilized ovary that comes from an apple tree and contains seeds, meaning it is a fruit.}}
pumpkin {{A pumpkin is the fertilized ovary of a squash plant and contains seeds, meaning it is a fruit.}}
(potato) {{A potato is an edible part of a plant in tuber form and is a vegetable.}}
tomato {{Many people mistakenly think a tomato is a vegetable. However, because a tomato is the fertilized ovary of a tomato plant and contains seeds, it is a fruit.}}
an incorrect answer {{You can specify optional feedback like this, which appears after this answer is submitted.}}
(the correct answer)
an incorrect answer {{You can specify optional feedback for none, a subset, or all of the answers.}}
]]
||A fruit is the fertilized ovary from a flower.||
||A fruit contains seeds of the plant.||
||You can add an optional hint like this. Problems that have a hint include a hint button, and this text appears the first time learners select the button.||
||If you add more than one hint, a different hint appears each time learners select the hint button.||
hinted: true
data: |
<problem>
<p>You can provide feedback for each available option in a dropdown problem.</p>
<p>You can also add hints for learners.</p>
<p>Use the following example problem as a model.</p>
<p> A/an ________ is a vegetable.</p>
<br/>
<optionresponse>
<optioninput>
<option correct="False">apple <optionhint>An apple is the fertilized ovary that comes from an apple tree and contains seeds, meaning it is a fruit.</optionhint></option>
<option correct="False">pumpkin <optionhint>A pumpkin is the fertilized ovary of a squash plant and contains seeds, meaning it is a fruit.</optionhint></option>
<option correct="True">potato <optionhint>A potato is an edible part of a plant in tuber form and is a vegetable.</optionhint></option>
<option correct="False">tomato <optionhint>Many people mistakenly think a tomato is a vegetable. However, because a tomato is the fertilized ovary of a tomato plant and contains seeds, it is a fruit.</optionhint></option>
</optioninput>
</optionresponse>
<demandhint>
<hint>A fruit is the fertilized ovary from a flower.</hint>
<hint>A fruit contains seeds of the plant.</hint>
</demandhint>
</problem>
data: |
<problem>
<optionresponse>
<p>You can use this template as a guide to the simple editor markdown and OLX markup to use for dropdown with hints and feedback problems. Edit this component to replace this template with your own assessment.</p>
<label>Add the question text, or prompt, here. This text is required.</label>
<description>You can add an optional tip or note related to the prompt like this. </description>
<optioninput>
<option correct="False">an incorrect answer <optionhint>You can specify optional feedback like this, which appears after this answer is submitted.</optionhint></option>
<option correct="True">the correct answer</option>
<option correct="False">an incorrect answer <optionhint>You can specify optional feedback for none, a subset, or all of the answers.</optionhint></option>
</optioninput>
</optionresponse>
<demandhint>
<hint>You can add an optional hint like this. Problems that have a hint include a hint button, and this text appears the first time learners select the button.</hint>
<hint>If you add more than one hint, a different hint appears each time learners select the hint button.</hint>
</demandhint>
</problem>

View File

@@ -4,13 +4,12 @@ metadata:
markdown: !!null
data: |
<problem>
<text>
<p>
<h4>Problem With Adaptive Hint</h4>
</p>
<p>
This problem demonstrates a question with hints, based on using the <tt class="tt">hintfn</tt> method. </p>
<script type="text/python" system_path="python_lib">
<text>
<p><h4>Problem With Adaptive Hint</h4></p>
<p>This problem demonstrates a question with hints, based on using the <tt class="tt">hintfn</tt> method. </p>
<customresponse cfn="test_str" expect="python">
<script type="text/python" system_path="python_lib">
def test_str(expect, ans):
print expect, ans
ans = ans.strip("'")
@@ -36,12 +35,9 @@ data: |
hint = "&lt;font color='blue'&gt;Hint: {0}&lt;/font&gt;".format(hint)
new_cmap.set_hint_and_mode(aid,hint,'always')
</script>
<label>
What is the best programming language that exists today? You may enter your answer in upper or lower case, with or without quotes.
<customresponse cfn="test_str" expect="python">
<textline correct_answer="python"/>
<hintgroup hintfn="hint_fn"/>
</customresponse>
</label>
</text>
<label>What is the best programming language that exists today? You may enter your answer in upper or lower case, with or without quotes.</label>
<textline correct_answer="python"/>
<hintgroup hintfn="hint_fn"/>
</customresponse>
</text>
</problem>

View File

@@ -49,12 +49,11 @@ metadata:
markdown: !!null
data: |
<problem>
<text>
<p>
<h4>Problem With Adaptive Hint</h4>
</p>
<p>
This problem demonstrates a question with hints, based on using the <tt class="tt">hintfn</tt> method. </p>
<text>
<p><h4>Problem With Adaptive Hint</h4></p>
<p>This problem demonstrates a question with hints, based on using the <tt class="tt">hintfn</tt> method.</p>
<customresponse cfn="test_str" expect="python">
<script type="text/python" system_path="python_lib">
def test_str(expect, ans):
print expect, ans
@@ -81,12 +80,9 @@ data: |
hint = "&lt;font color='blue'&gt;Hint: {0}&lt;/font&gt;".format(hint)
new_cmap.set_hint_and_mode(aid,hint,'always')
</script>
<label>
What is the best programming language that exists today? You may enter your answer in upper or lower case, with or without quotes.
<customresponse cfn="test_str" expect="python">
<p>What is the best programming language that exists today? You may enter your answer in upper or lower case, with or without quotes.</p>
<textline correct_answer="python"/>
<hintgroup hintfn="hint_fn"/>
</customresponse>
</label>
</text>
</customresponse>
</text>
</problem>

View File

@@ -2,42 +2,20 @@
metadata:
display_name: Text Input
markdown: |
In text input problems, also known as "fill-in-the-blank" problems, learners enter text into a response field. The text can include letters and characters such as punctuation marks. The text that the learner enters must match your specified answer text exactly. You can specify more than one correct answer. Learners must enter a response that matches one of the correct answers exactly.
When you add the problem, be sure to select Settings to specify a Display Name and other values that apply.
You can use this template as a guide to the simple editor markdown and OLX markup to use for text input problems. Edit this component to replace this template with your own assessment.
You can use the following example problem as a model.
>>Add the question text, or prompt, here. This text is required.||You can add an optional tip or note related to the prompt like this. <<
>>What was the first post-secondary school in China to allow both male and female students?<<
= the correct answer
or= optional acceptable variant of the correct answer
= Nanjing Higher Normal Institute
or= National Central University
or= Nanjing University
[explanation]
Nanjing Higher Normal Institute first admitted female students in 1920.
[explanation]
data: |
<problem>
<p>In text input problems, also known as "fill-in-the-blank" problems,
learners enter text into a response field. The text that the learner enters
must match your specified answer text exactly. You can specify more than
one correct answer. Learners must enter a response that matches one of the
correct answers exactly.</p>
<p>When you add the problem, be sure to select <strong>Settings</strong>
to specify a <strong>Display Name</strong> and other values that apply.</p>
<p> You can use the following example problem as a model.</p>
<p>What was the first post-secondary school in China to allow both male and female students?</p>
<stringresponse answer="Nanjing Higher Normal Institute" type="ci" >
<additional_answer>National Central University</additional_answer>
<additional_answer>Nanjing University</additional_answer>
<textline label="What was the first post-secondary school in China to allow both male and female students?" size="40"/>
</stringresponse>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>Nanjing Higher Normal Institute first admitted female students in 1920.</p>
</div>
</solution>
</problem>
data: |
<problem>
<stringresponse answer="the correct answer" type="ci">
<p>You can use this template as a guide to the simple editor markdown and OLX markup to use for text input problems. Edit this component to replace this template with your own assessment.</p>
<label>Add the question text, or prompt, here. This text is required.</label>
<description>You can add an optional tip or note related to the prompt like this.</description>
<additional_answer answer="optional acceptable variant of the correct answer"/>
<textline size="20"/>
</stringresponse>
</problem>

View File

@@ -1,54 +1,33 @@
---
metadata:
display_name: Text Input with Hints and Feedback
markdown: |
markdown: |
You can provide feedback for the correct answer in text input problems, as well as for specific incorrect answers.
You can use this template as a guide to the simple editor markdown and OLX markup to use for text input with hints and feedback problems. Edit this component to replace this template with your own assessment.
Use feedback on expected incorrect answers to address common misconceptions and to provide guidance on how to arrive at the correct answer.
>>Add the question text, or prompt, here. This text is required.||You can add an optional tip or note related to the prompt like this. <<
Be sure to select Settings to specify a Display Name and other values that apply.
= the correct answer {{You can specify optional feedback like this, which appears after this answer is submitted.}}
or= optional acceptable variant of the correct answer
not= optional incorrect answer such as a frequent misconception {{You can specify optional feedback for none, a subset, or all of the answers.}}
Use the following example problem as a model.
>>Which U.S. state has the largest land area?<<
=Alaska {{Alaska is 576,400 square miles, more than double the land area
of the second largest state, Texas.}}
not=Texas {{While many people think Texas is the largest state, it is actually the second largest, with 261,797 square miles.}}
not=California {{California is the third largest state, with 155,959 square miles.}}
||Consider the square miles, not population.||
||Consider all 50 states, not just the continental United States.||
||You can add an optional hint like this. Problems that have a hint include a hint button, and this text appears the first time learners select the button.||
||If you add more than one hint, a different hint appears each time learners select the hint button.||
hinted: true
data: |
<problem>
<p>You can provide feedback for the correct answer in text input problems, as well as for specific incorrect answers.</p>
<p>Use feedback on expected incorrect answers to address common misconceptions and to provide guidance on how to arrive at the correct answer.</p>
<p>Use the following example problem as a model.</p>
<p>Which U.S. state has the largest land area?</p>
<stringresponse answer="Alaska" type="ci" >
<correcthint>Alaska is 576,400 square miles, more than double the land area of the second largest state, Texas.</correcthint>
<stringequalhint answer="Texas">While many people think Texas is the largest state, it is actually the second largest, with 261,797 square miles.</stringequalhint>
<stringequalhint answer="California">California is the third largest state, with 155,959 square miles.</stringequalhint>
<textline label="Which U.S. state has the largest land area?" size="20"/>
</stringresponse>
<demandhint>
<hint>Consider the square miles, not population.</hint>
<hint>Consider all 50 states, not just the continental United States.</hint>
</demandhint>
</problem>
data: |
<problem>
<stringresponse answer="the correct answer" type="ci">
<p>You can use this template as a guide to the simple editor markdown and OLX markup to use for text input with hints and feedback problems. Edit this component to replace this template with your own assessment.</p>
<label>Add the question text, or prompt, here. This text is required.</label>
<description>You can add an optional tip or note related to the prompt like this.</description>
<correcthint>You can specify optional feedback like this, which appears after this answer is submitted.</correcthint>
<additional_answer answer="optional acceptable variant of the correct answer"/>
<stringequalhint answer="optional incorrect answer such as a frequent misconception">You can specify optional feedback for none, a subset, or all of the answers.</stringequalhint>
<textline size="20"/>
</stringresponse>
<demandhint>
<hint>You can add an optional hint like this. Problems that have a hint include a hint button, and this text appears the first time learners select the button.</hint>
<hint>If you add more than one hint, a different hint appears each time learners select the hint button.</hint>
</demandhint>
</problem>

View File

@@ -1762,7 +1762,7 @@ class CapaDescriptorTest(unittest.TestCase):
<p>The following languages are in the Indo-European family:</p>
<choiceresponse>
<checkboxgroup label="The following languages are in the Indo-European family:">
<checkboxgroup>
<choice correct="true">Urdu</choice>
<choice correct="false">Finnish</choice>
<choice correct="true">Marathi</choice>
@@ -1797,7 +1797,7 @@ class CapaDescriptorTest(unittest.TestCase):
<optionresponse>
<optioninput label="lbl" options="('India','Spain','China','Bermuda')" correct="India"></optioninput>
<optioninput options="('India','Spain','China','Bermuda')" correct="India"></optioninput>
</optionresponse>
<solution>
@@ -1822,7 +1822,7 @@ class CapaDescriptorTest(unittest.TestCase):
<p>Which of the following countries has the largest population?</p>
<multiplechoiceresponse>
<choicegroup label="Which of the following countries has the largest population?" type="MultipleChoice">
<choicegroup type="MultipleChoice">
<choice correct="false">Brazil
<choicehint>timely feedback -- explain why an almost correct answer is wrong</choicehint>
</choice>
@@ -1866,14 +1866,13 @@ class CapaDescriptorTest(unittest.TestCase):
<p>How many miles away from Earth is the sun? Use scientific notation to answer.</p>
<numericalresponse answer="9.3*10^7">
<formulaequationinput label="How many miles away from Earth is the sun?
Use scientific notation to answer." />
<formulaequationinput/>
</numericalresponse>
<p>The square of what number is -100?</p>
<numericalresponse answer="10*i">
<formulaequationinput label="The square of what number is -100?" />
<formulaequationinput/>
</numericalresponse>
<solution>
@@ -1906,8 +1905,7 @@ class CapaDescriptorTest(unittest.TestCase):
<stringresponse answer="Nanjing Higher Normal Institute" type="ci" >
<additional_answer answer="National Central University"></additional_answer>
<additional_answer answer="Nanjing University"></additional_answer>
<textline label="What was the first post-secondary school in China to allow both male and female
students?" size="20"/>
<textline size="20"/>
</stringresponse>
<solution>
@@ -1939,7 +1937,7 @@ class CapaDescriptorTest(unittest.TestCase):
<p>Which of the following is a fruit? Check all that apply.</p>
<choiceresponse>
<checkboxgroup label="Which of the following is a fruit? Check all that apply.">
<checkboxgroup>
<choice correct="true">apple
<choicehint selected="true">You are correct that an apple is a fruit because it is the fertilized
ovary that comes from an apple tree and contains seeds.</choicehint>
@@ -1987,7 +1985,7 @@ class CapaDescriptorTest(unittest.TestCase):
<p> A/an ________ is a vegetable.</p>
<optionresponse>
<optioninput label=" A/an ________ is a vegetable.">
<optioninput>
<option correct="False">apple <optionhint>An apple is the fertilized ovary that comes from an apple
tree and contains seeds, meaning it is a fruit.</optionhint></option>
<option correct="False">pumpkin <optionhint>A pumpkin is the fertilized ovary of a squash plant and
@@ -2019,7 +2017,7 @@ class CapaDescriptorTest(unittest.TestCase):
<p>Which of the following is a vegetable?</p>
<multiplechoiceresponse>
<choicegroup label="Which of the following is a vegetable?" type="MultipleChoice">
<choicegroup type="MultipleChoice">
<choice correct="false">apple <choicehint>An apple is the fertilized ovary that comes from an apple
tree and contains seeds, meaning it is a fruit.</choicehint></choice>
<choice correct="false">pumpkin <choicehint>A pumpkin is the fertilized ovary of a squash plant and
@@ -2056,8 +2054,7 @@ class CapaDescriptorTest(unittest.TestCase):
<p>What is the arithmetic mean for the following set of numbers? (1, 5, 6, 3, 5)</p>
<numericalresponse answer="4">
<formulaequationinput label="What is the arithmetic mean for the following set of numbers?
(1, 5, 6, 3, 5)" />
<formulaequationinput/>
<correcthint>The mean for this set of numbers is 20 / 5, which equals 4.</correcthint>
</numericalresponse>
<solution>
@@ -2098,7 +2095,7 @@ class CapaDescriptorTest(unittest.TestCase):
second largest, with 261,797 square miles.</stringequalhint>
<stringequalhint answer="California">California is the third largest state, with 155,959 square miles.
</stringequalhint>
<textline label="Which U.S. state has the largest land area?" size="20"/>
<textline size="20"/>
</stringresponse>
<demandhint>
@@ -2175,7 +2172,7 @@ class CapaDescriptorTest(unittest.TestCase):
</choicegroup>
</multiplechoiceresponse>
<optionresponse>
<optioninput label="Option" options="('1','2')" correct="2"></optioninput>
<optioninput options="('1','2')" correct="2"></optioninput>
</optionresponse>
</problem>
""")
@@ -2557,12 +2554,13 @@ class TestProblemCheckTracking(unittest.TestCase):
def test_choice_answer_text(self):
xml = """\
<problem display_name="Multiple Choice Questions">
<p>What color is the open ocean on a sunny day?</p>
<optionresponse>
<optioninput options="('yellow','blue','green')" correct="blue" label="What color is the open ocean on a sunny day?"/>
<label>What color is the open ocean on a sunny day?</label>
<optioninput options="('yellow','blue','green')" correct="blue"/>
</optionresponse>
<p>Which piece of furniture is built for sitting?</p>
<multiplechoiceresponse>
<label>Which piece of furniture is built for sitting?</label>
<choicegroup type="MultipleChoice">
<choice correct="false"><text>a table</text></choice>
<choice correct="false"><text>a desk</text></choice>
@@ -2570,9 +2568,10 @@ class TestProblemCheckTracking(unittest.TestCase):
<choice correct="false"><text>a bookshelf</text></choice>
</choicegroup>
</multiplechoiceresponse>
<p>Which of the following are musical instruments?</p>
<choiceresponse>
<checkboxgroup label="Which of the following are musical instruments?">
<label>Which of the following are musical instruments?</label>
<checkboxgroup>
<choice correct="true">a piano</choice>
<choice correct="false">a tree</choice>
<choice correct="true">a guitar</choice>
@@ -2604,7 +2603,7 @@ class TestProblemCheckTracking(unittest.TestCase):
'variant': '',
},
factory.answer_key(3): {
'question': '',
'question': 'Which piece of furniture is built for sitting?',
'answer': u'<text>a table</text>',
'response_type': 'multiplechoiceresponse',
'input_type': 'choicegroup',
@@ -2662,19 +2661,19 @@ class TestProblemCheckTracking(unittest.TestCase):
})
def test_multiple_inputs(self):
group_label = 'Choose the correct color'
input1_label = 'What color is the sky?'
input2_label = 'What color are pine needles?'
factory = self.capa_factory_for_problem_xml("""\
<problem display_name="Multiple Inputs">
<p>Choose the correct color</p>
<optionresponse>
<p>What color is the sky?</p>
<optioninput options="('yellow','blue','green')" correct="blue"/>
<p>What color are pine needles?</p>
<optioninput options="('yellow','blue','green')" correct="green"/>
<label>{}</label>
<optioninput options="('yellow','blue','green')" correct="blue" label="{}"/>
<optioninput options="('yellow','blue','green')" correct="green" label="{}"/>
</optionresponse>
</problem>
""")
""".format(group_label, input1_label, input2_label))
module = factory.create()
answer_input_dict = {
factory.input_key(2, 1): 'blue',
factory.input_key(2, 2): 'yellow',
@@ -2683,7 +2682,8 @@ class TestProblemCheckTracking(unittest.TestCase):
event = self.get_event_for_answers(module, answer_input_dict)
self.assertEquals(event['submission'], {
factory.answer_key(2, 1): {
'question': '',
'group_label': group_label,
'question': input1_label,
'answer': 'blue',
'response_type': 'optionresponse',
'input_type': 'optioninput',
@@ -2691,7 +2691,8 @@ class TestProblemCheckTracking(unittest.TestCase):
'variant': '',
},
factory.answer_key(2, 2): {
'question': '',
'group_label': group_label,
'question': input2_label,
'answer': 'yellow',
'response_type': 'optionresponse',
'input_type': 'optioninput',
@@ -2702,11 +2703,14 @@ class TestProblemCheckTracking(unittest.TestCase):
def test_optioninput_extended_xml(self):
"""Test the new XML form of writing with <option> tag instead of options= attribute."""
group_label = 'Are you the Gatekeeper?'
input1_label = 'input 1 label'
input2_label = 'input 2 label'
factory = self.capa_factory_for_problem_xml("""\
<problem display_name="Woo Hoo">
<p>Are you the Gatekeeper?</p>
<optionresponse>
<optioninput>
<label>{}</label>
<optioninput label="{}">
<option correct="True" label="Good Job">
apple
<optionhint>
@@ -2721,7 +2725,7 @@ class TestProblemCheckTracking(unittest.TestCase):
</option>
</optioninput>
<optioninput>
<optioninput label="{}">
<option correct="True">
apple
<optionhint>
@@ -2737,7 +2741,7 @@ class TestProblemCheckTracking(unittest.TestCase):
</optioninput>
</optionresponse>
</problem>
""")
""".format(group_label, input1_label, input2_label))
module = factory.create()
answer_input_dict = {
@@ -2748,7 +2752,8 @@ class TestProblemCheckTracking(unittest.TestCase):
event = self.get_event_for_answers(module, answer_input_dict)
self.assertEquals(event['submission'], {
factory.answer_key(2, 1): {
'question': '',
'group_label': group_label,
'question': input1_label,
'answer': 'apple',
'response_type': 'optionresponse',
'input_type': 'optioninput',
@@ -2756,7 +2761,8 @@ class TestProblemCheckTracking(unittest.TestCase):
'variant': '',
},
factory.answer_key(2, 2): {
'question': '',
'group_label': group_label,
'question': input2_label,
'answer': 'cucumber',
'response_type': 'optionresponse',
'input_type': 'optioninput',

View File

@@ -86,6 +86,15 @@
};
}
};
},
toXMLEqual: function() {
return {
compare: function(actual, expected) {
return {
pass: actual.replace(/\s+/g, '') === expected.replace(/\s+/g, '')
};
}
};
}
});
});

View File

@@ -0,0 +1,106 @@
/**
* pretty-data - nodejs plugin to pretty-print or minify data in XML, JSON and CSS formats.
*
* Version - 0.40.0
* Copyright (c) 2012 Vadim Kiryukhin
* vkiryukhin @ gmail.com
* http://www.eslinstructor.net/pretty-data/
*
*
* Code extracted for xml formatting only
*/
/* eslint-disable */
(function (root, factory){
if (typeof define === 'function' && define.amd) {
// AMD. Register as an anonymous module.
define([], function (){
return (root.PrettyPrint = factory());
});
} else {
// Browser globals
root.PrettyPrint = factory();
}
}(this, function () {
function PrettyPrint(){
var maxdeep = 100, // nesting level
ix = 0;
this.shift = ['\n']; // array of shifts
this.step = ' '; // 2 spaces
// initialize array with shifts //
for (ix = 0; ix < maxdeep; ix++) {
this.shift.push(this.shift[ix] + this.step);
}
}
PrettyPrint.prototype.xml = function (text) {
var ar = text.replace(/>\s{0,}</g, "><")
.replace(/</g, "~::~<")
.replace(/xmlns\:/g, "~::~xmlns:")
.replace(/xmlns\=/g, "~::~xmlns=")
.split('~::~'),
len = ar.length,
inComment = false,
deep = 0,
str = '',
ix = 0;
for (ix = 0; ix < len; ix++) {
// start comment or <![CDATA[...]]> or <!DOCTYPE //
if (ar[ix].search(/<!/) > -1) {
str += this.shift[deep] + ar[ix];
inComment = true;
// end comment or <![CDATA[...]]> //
if (ar[ix].search(/-->/) > -1 || ar[ix].search(/\]>/) > -1 || ar[ix].search(/!DOCTYPE/) > -1) {
inComment = false;
}
} else
// end comment or <![CDATA[...]]> //
if (ar[ix].search(/-->/) > -1 || ar[ix].search(/\]>/) > -1) {
str += ar[ix];
inComment = false;
} else
// <elm></elm> //
if (/^<\w/.exec(ar[ix - 1]) && /^<\/\w/.exec(ar[ix]) &&
/^<[\w:\-\.\,]+/.exec(ar[ix - 1]) == /^<\/[\w:\-\.\,]+/.exec(ar[ix])[0].replace('/', '')) {
str += ar[ix];
if (!inComment) deep--;
} else
// <elm> //
if (ar[ix].search(/<\w/) > -1 && ar[ix].search(/<\//) == -1 && ar[ix].search(/\/>/) == -1) {
str = !inComment ? str += this.shift[deep++] + ar[ix] : str += ar[ix];
} else
// <elm>...</elm> //
if (ar[ix].search(/<\w/) > -1 && ar[ix].search(/<\//) > -1) {
str = !inComment ? str += this.shift[deep] + ar[ix] : str += ar[ix];
} else
// </elm> //
if (ar[ix].search(/<\//) > -1) {
str = !inComment ? str += this.shift[--deep] + ar[ix] : str += ar[ix];
} else
// <elm/> //
if (ar[ix].search(/\/>/) > -1) {
str = !inComment ? str += this.shift[deep] + ar[ix] : str += ar[ix];
} else
// <? xml ... ?> //
if (ar[ix].search(/<\?/) > -1) {
str += this.shift[deep] + ar[ix];
} else
// xmlns //
if (ar[ix].search(/xmlns\:/) > -1 || ar[ix].search(/xmlns\=/) > -1) {
str += this.shift[deep] + ar[ix];
}
else {
str += ar[ix];
}
}
return (str[0] == '\n') ? str.slice(1) : str;
};
return new PrettyPrint();
}));

View File

@@ -0,0 +1,20 @@
describe('XML Formatting Lib', function() {
'use strict';
it('correctly format the xml', function() {
var rawXml = '<breakfast><food><name>Belgian Waffles</name><price>$5.95</price></food></breakfast>',
expectedXml = '<breakfast>\n <food>\n <name>Belgian Waffles</name>' +
'\n <price>$5.95</price>\n </food>\n</breakfast>';
expect(window.PrettyPrint.xml(rawXml)).toEqual(expectedXml);
});
it('correctly handles the whitespaces and newlines', function() {
var rawXml = '<breakfast> <food> <name>Belgian Waffles</name>' +
'\n\n\n<price>$5.95</price></food> </breakfast>',
expectedXml = '<breakfast>\n <food>\n <name>Belgian Waffles</name>' +
'\n <price>$5.95</price>\n </food>\n</breakfast>';
expect(window.PrettyPrint.xml(rawXml)).toEqual(expectedXml);
});
});

View File

@@ -28,6 +28,7 @@ var options = {
{pattern: 'js/vendor/URI.min.js', included: true},
{pattern: 'js/test/add_ajax_prefix.js', included: true},
{pattern: 'js/test/i18n.js', included: true},
{pattern: 'js/lib/pretty-print.js', included: true},
{pattern: 'common/js/vendor/underscore.js', included: true},
{pattern: 'common/js/vendor/underscore.string.js', included: true},

View File

@@ -104,7 +104,7 @@ class ProblemPage(PageObject):
"""
Fill in the answer to a numerical problem.
"""
self.q(css='div.problem section.inputtype input').fill(text)
self.q(css='div.problem div.inputtype input').fill(text)
self.wait_for_element_invisibility('.loading', 'wait for loading icon to disappear')
self.wait_for_ajax()
@@ -129,11 +129,16 @@ class ProblemPage(PageObject):
self.q(css='div.problem button.reset').click()
self.wait_for_ajax()
def click_show_hide_button(self):
""" Click the Show/Hide button. """
self.q(css='div.problem div.action .show').click()
self.wait_for_ajax()
def wait_for_status_icon(self):
"""
wait for status icon
"""
self.wait_for_element_visibility('div.problem section.inputtype div .status', 'wait for status icon')
self.wait_for_element_visibility('div.problem div.inputtype div .status', 'wait for status icon')
def wait_for_expected_status(self, status_selector, message):
"""
@@ -170,19 +175,19 @@ class ProblemPage(PageObject):
"""
Is there a "correct" status showing? Works with simple problem types.
"""
return self.q(css="div.problem section.inputtype div.correct span.status").is_present()
return self.q(css="div.problem div.inputtype div.correct span.status").is_present()
def simpleprob_is_partially_correct(self):
"""
Is there a "partially correct" status showing? Works with simple problem types.
"""
return self.q(css="div.problem section.inputtype div.partially-correct span.status").is_present()
return self.q(css="div.problem div.inputtype div.partially-correct span.status").is_present()
def simpleprob_is_incorrect(self):
"""
Is there an "incorrect" status showing? Works with simple problem types.
"""
return self.q(css="div.problem section.inputtype div.incorrect span.status").is_present()
return self.q(css="div.problem div.inputtype div.incorrect span.status").is_present()
def click_clarification(self, index=0):
"""
@@ -199,3 +204,34 @@ class ProblemPage(PageObject):
"""
self.wait_for_element_visibility('body > .tooltip', 'A tooltip is visible.')
return self.q(css='body > .tooltip').text[0]
def is_solution_tag_present(self):
"""
Check if solution/explanation is shown.
"""
solution_selector = '.solution-span div.detailed-solution'
return self.q(css=solution_selector).is_present()
def is_correct_choice_highlighted(self, correct_choices):
"""
Check if correct answer/choice highlighted for choice group.
"""
xpath = '//fieldset/div[contains(@class, "field")][{0}]/label[contains(@class, "choicegroup_correct")]'
for choice in correct_choices:
if not self.q(xpath=xpath.format(choice)).is_present():
return False
return True
@property
def problem_question(self):
"""
Return the question text of the problem.
"""
return self.q(css="div.problem .wrapper-problem-response legend").text[0]
@property
def problem_question_descriptions(self):
"""
Return a list of question descriptions of the problem.
"""
return self.q(css="div.problem .wrapper-problem-response .question-description").text

View File

@@ -218,11 +218,11 @@ class CertificateProgressPageTest(UniqueCourseTest):
self.course_nav.q(css='select option[value="{}"]'.format('blue')).first.click()
# Select correct radio button for the answer
self.course_nav.q(css='fieldset label:nth-child(3) input').nth(0).click()
self.course_nav.q(css='fieldset div.field:nth-child(3) input').nth(0).click()
# Select correct radio buttons for the answer
self.course_nav.q(css='fieldset label:nth-child(1) input').nth(1).click()
self.course_nav.q(css='fieldset label:nth-child(3) input').nth(1).click()
self.course_nav.q(css='fieldset div.field:nth-child(1) input').nth(1).click()
self.course_nav.q(css='fieldset div.field:nth-child(3) input').nth(1).click()
# Submit the answer
self.course_nav.q(css='button.check.Check').click()

View File

@@ -71,9 +71,9 @@ class EntranceExamPassTest(EntranceExamTest):
"""
xml = dedent("""
<problem>
<p>What is height of eiffel tower without the antenna?.</p>
<multiplechoiceresponse>
<choicegroup label="What is height of eiffel tower without the antenna?" type="MultipleChoice">
<label>What is height of eiffel tower without the antenna?.</label>
<choicegroup type="MultipleChoice">
<choice correct="false">324 meters<choicehint>Antenna is 24 meters high</choicehint></choice>
<choice correct="true">300 meters</choice>
<choice correct="false">224 meters</choice>

View File

@@ -4,6 +4,7 @@ Bok choy acceptance tests for problems in the LMS
See also old lettuce tests in lms/djangoapps/courseware/features/problems.feature
"""
from nose.plugins.attrib import attr
from textwrap import dedent
from common.test.acceptance.tests.helpers import UniqueCourseTest
@@ -51,7 +52,7 @@ class ProblemsTest(UniqueCourseTest):
email=self.email,
password=self.password,
course_id=self.course_id,
staff=False
staff=True
).visit()
def get_problem(self):
@@ -77,7 +78,8 @@ class ProblemClarificationTest(ProblemsTest):
<clarification>Return on Investment <strong>(per year)</strong></clarification> over 20 years.
</p>
<numericalresponse answer="6.5">
<textline label="Enter the annual ROI" trailing_text="%" />
<label>Enter the annual ROI</label>
<textline trailing_text="%" />
</numericalresponse>
</text>
</problem>
@@ -263,7 +265,8 @@ class ProblemWithMathjax(ProblemsTest):
<problem>
<p>Check mathjax has rendered [mathjax]E=mc^2[/mathjax]</p>
<multiplechoiceresponse>
<choicegroup label="Answer this?" type="MultipleChoice">
<label>Answer this?</label>
<choicegroup type="MultipleChoice">
<choice correct="true">Choice1 <choicehint>Correct choice message</choicehint></choice>
<choice correct="false">Choice2<choicehint>Wrong choice message</choicehint></choice>
</choicegroup>
@@ -310,7 +313,8 @@ class ProblemPartialCredit(ProblemsTest):
<problem>
<p>The answer is 1. Partial credit for -1.</p>
<numericalresponse answer="1" partial_credit="list">
<formulaequationinput label="How many miles away from Earth is the sun? Use scientific notation to answer." />
<label>How many miles away from Earth is the sun? Use scientific notation to answer.</label>
<formulaequationinput/>
<responseparam type="tolerance" default="0.01" />
<responseparam partial_answers="-1" />
</numericalresponse>
@@ -343,9 +347,9 @@ class LogoutDuringAnswering(ProblemsTest):
"""
xml = dedent("""
<problem>
<p>The answer is 1</p>
<numericalresponse answer="1">
<formulaequationinput label="where are the songs of spring?" />
<label>The answer is 1</label>
<formulaequationinput/>
<responseparam type="tolerance" default="0.01" />
</numericalresponse>
</problem>
@@ -412,3 +416,187 @@ class LogoutDuringAnswering(ProblemsTest):
self.assertTrue(problem_page.is_browser_on_page())
self.assertEqual(problem_page.problem_name, 'TEST PROBLEM')
class ProblemQuestionDescriptionTest(ProblemsTest):
"""TestCase Class to verify question and description rendering."""
descriptions = [
"A vegetable is an edible part of a plant in tuber form.",
"A fruit is a fertilized ovary of a plant and contains seeds."
]
def get_problem(self):
"""
Create a problem with question and description.
"""
xml = dedent("""
<problem>
<choiceresponse>
<label>Eggplant is a _____?</label>
<description>{}</description>
<description>{}</description>
<checkboxgroup>
<choice correct="true">vegetable</choice>
<choice correct="false">fruit</choice>
</checkboxgroup>
</choiceresponse>
</problem>
""".format(*self.descriptions))
return XBlockFixtureDesc('problem', 'Label with Description', data=xml)
def test_question_with_description(self):
"""
Scenario: Test that question and description are rendered as expected.
Given I am enrolled in a course.
When I visit a unit page with a CAPA question.
Then label and description should be rendered correctly.
"""
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
problem_page.wait_for_element_visibility(problem_page.CSS_PROBLEM_HEADER, 'wait for problem header')
self.assertEqual(problem_page.problem_name, 'Label with Description')
self.assertEqual(problem_page.problem_question, 'Eggplant is a _____?')
self.assertEqual(problem_page.problem_question_descriptions, self.descriptions)
class CAPAProblemA11yBaseTestMixin(object):
"""Base TestCase Class to verify CAPA problem accessibility."""
def test_a11y(self):
"""
Verifies that there are no accessibility issues for a particular problem type
"""
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
# Set the scope to the problem question
problem_page.a11y_audit.config.set_scope(
include=['section.wrapper-problem-response']
)
# Run the accessibility audit.
problem_page.a11y_audit.check_for_accessibility_errors()
@attr('a11y')
class CAPAProblemChoiceA11yTest(CAPAProblemA11yBaseTestMixin, ProblemsTest):
"""TestCase Class to verify accessibility for checkboxes and multiplechoice CAPA problems."""
def get_problem(self):
"""
Problem structure.
"""
xml = dedent("""
<problem>
<choiceresponse>
<label>question 1 text here</label>
<description>description 2 text 1</description>
<description>description 2 text 2</description>
<checkboxgroup>
<choice correct="true">True</choice>
<choice correct="false">False</choice>
</checkboxgroup>
</choiceresponse>
<multiplechoiceresponse>
<label>question 2 text here</label>
<description>description 2 text 1</description>
<description>description 2 text 2</description>
<choicegroup type="MultipleChoice">
<choice correct="false">Alpha <choicehint>A hint</choicehint></choice>
<choice correct="true">Beta</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
return XBlockFixtureDesc('problem', 'Problem A11Y TEST', data=xml)
@attr('a11y')
class ProblemTextInputA11yTest(CAPAProblemA11yBaseTestMixin, ProblemsTest):
"""TestCase Class to verify TextInput problem accessibility."""
def get_problem(self):
"""
TextInput problem XML.
"""
xml = dedent("""
<problem>
<stringresponse answer="fight" type="ci">
<label>who wishes to _____ must first count the cost.</label>
<description>Appear weak when you are strong, and strong when you are weak.</description>
<description>In the midst of chaos, there is also opportunity.</description>
<textline size="40"/>
</stringresponse>
<stringresponse answer="force" type="ci">
<label>A leader leads by example not by _____.</label>
<description>The supreme art of war is to subdue the enemy without fighting.</description>
<description>Great results, can be achieved with small forces.</description>
<textline size="40"/>
</stringresponse>
</problem>""")
return XBlockFixtureDesc('problem', 'TEXTINPUT PROBLEM', data=xml)
@attr('a11y')
class CAPAProblemDropDownA11yTest(CAPAProblemA11yBaseTestMixin, ProblemsTest):
"""TestCase Class to verify accessibility for dropdowns(optioninput) CAPA problems."""
def get_problem(self):
"""
Problem structure.
"""
xml = dedent("""
<problem>
<optionresponse>
<p>You can use this template as a guide to the simple editor markdown and OLX markup to use for
dropdown problems. Edit this component to replace this template with your own assessment.</p>
<label>Which of the following is a fruit</label>
<description>Choose wisely</description>
<optioninput>
<option correct="False">radish</option>
<option correct="True">appple</option>
<option correct="False">carrot</option>
</optioninput>
</optionresponse>
</problem>
""")
return XBlockFixtureDesc('problem', 'Problem A11Y TEST', data=xml)
@attr('a11y')
class ProblemNumericalInputA11yTest(CAPAProblemA11yBaseTestMixin, ProblemsTest):
"""Tests NumericalInput accessibility."""
def get_problem(self):
"""NumericalInput problem XML."""
xml = dedent("""
<problem>
<numericalresponse answer="10*i">
<label>The square of what number is -100?</label>
<description>Use scientific notation to answer.</description>
<formulaequationinput/>
</numericalresponse>
</problem>""")
return XBlockFixtureDesc('problem', 'NUMERICALINPUT PROBLEM', data=xml)
@attr('a11y')
class ProblemMathExpressionInputA11yTest(CAPAProblemA11yBaseTestMixin, ProblemsTest):
"""Tests MathExpressionInput accessibility."""
def get_problem(self):
"""MathExpressionInput problem XML."""
xml = dedent(r"""
<problem>
<script type="loncapa/python">
derivative = "n*x^(n-1)"
</script>
<formularesponse type="ci" samples="x,n@1,2:3,4#10" answer="$derivative">
<label>Let \( x\) be a variable, and let \( n\) be an arbitrary constant. What is the derivative of \( x^n\)?</label>
<description>Enter the equation</description>
<responseparam type="tolerance" default="0.00001"/>
<formulaequationinput size="40"/>
</formularesponse>
</problem>""")
return XBlockFixtureDesc('problem', 'MATHEXPRESSIONINPUT PROBLEM', data=xml)

View File

@@ -324,7 +324,8 @@ class CheckboxProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
'question_text': 'The correct answer is Choice 0 and Choice 2',
'choice_type': 'checkbox',
'choices': [True, False, True, False],
'choice_names': ['Choice 0', 'Choice 1', 'Choice 2', 'Choice 3']
'choice_names': ['Choice 0', 'Choice 1', 'Choice 2', 'Choice 3'],
'explanation_text': 'This is explanation text'
}
def setUp(self, *args, **kwargs):
@@ -332,15 +333,6 @@ class CheckboxProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
Additional setup for CheckboxProblemTypeTest
"""
super(CheckboxProblemTypeTest, self).setUp(*args, **kwargs)
self.problem_page.a11y_audit.config.set_rules({
'ignore': [
'section', # TODO: AC-491
'aria-allowed-attr', # TODO: AC-251
'aria-valid-attr', # TODO: AC-251
'aria-roles', # TODO: AC-251
'checkboxgroup', # TODO: AC-251
]
})
def answer_problem(self, correct):
"""
@@ -352,6 +344,30 @@ class CheckboxProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
else:
self.problem_page.click_choice("choice_1")
@attr('shard_7')
def test_can_show_hide_answer(self):
"""
Scenario: Verifies that show/hide answer button is working as expected.
Given that I am on courseware page
And I can see a CAPA problem with show answer button
When I click "Show Answer" button
Then I should see "Hide Answer" text on button
And I should see question's solution
And I should see correct choices highlighted
When I click "Hide Answer" button
Then I should see "Show Answer" text on button
And I should not see question's solution
And I should not see correct choices highlighted
"""
self.problem_page.click_show_hide_button()
self.assertTrue(self.problem_page.is_solution_tag_present())
self.assertTrue(self.problem_page.is_correct_choice_highlighted(correct_choices=[1, 3]))
self.problem_page.click_show_hide_button()
self.assertFalse(self.problem_page.is_solution_tag_present())
self.assertFalse(self.problem_page.is_correct_choice_highlighted(correct_choices=[1, 3]))
class MultipleChoiceProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
"""
@@ -378,13 +394,6 @@ class MultipleChoiceProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
Additional setup for MultipleChoiceProblemTypeTest
"""
super(MultipleChoiceProblemTypeTest, self).setUp(*args, **kwargs)
self.problem_page.a11y_audit.config.set_rules({
'ignore': [
'section', # TODO: AC-491
'aria-valid-attr', # TODO: AC-251
'radiogroup', # TODO: AC-251
]
})
def answer_problem(self, correct):
"""
@@ -422,13 +431,6 @@ class RadioProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
Additional setup for RadioProblemTypeTest
"""
super(RadioProblemTypeTest, self).setUp(*args, **kwargs)
self.problem_page.a11y_audit.config.set_rules({
'ignore': [
'section', # TODO: AC-491
'aria-valid-attr', # TODO: AC-292
'radiogroup', # TODO: AC-292
]
})
def answer_problem(self, correct):
"""
@@ -460,12 +462,6 @@ class DropDownProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
Additional setup for DropDownProblemTypeTest
"""
super(DropDownProblemTypeTest, self).setUp(*args, **kwargs)
self.problem_page.a11y_audit.config.set_rules({
'ignore': [
'section', # TODO: AC-491
'label', # TODO: AC-291
]
})
def answer_problem(self, correct):
"""
@@ -503,12 +499,6 @@ class StringProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
Additional setup for StringProblemTypeTest
"""
super(StringProblemTypeTest, self).setUp(*args, **kwargs)
self.problem_page.a11y_audit.config.set_rules({
'ignore': [
'section', # TODO: AC-491
'label', # TODO: AC-290
]
})
def answer_problem(self, correct):
"""
@@ -545,12 +535,6 @@ class NumericalProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
Additional setup for NumericalProblemTypeTest
"""
super(NumericalProblemTypeTest, self).setUp(*args, **kwargs)
self.problem_page.a11y_audit.config.set_rules({
'ignore': [
'section', # TODO: AC-491
'label', # TODO: AC-289
]
})
def answer_problem(self, correct):
"""
@@ -589,12 +573,6 @@ class FormulaProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
Additional setup for FormulaProblemTypeTest
"""
super(FormulaProblemTypeTest, self).setUp(*args, **kwargs)
self.problem_page.a11y_audit.config.set_rules({
'ignore': [
'section', # TODO: AC-491
'label', # TODO: AC-288
]
})
def answer_problem(self, correct):
"""
@@ -614,10 +592,10 @@ class ScriptProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
factory = CustomResponseXMLFactory()
factory_kwargs = {
'question_text': 'Enter two integers that sum to 10.',
'cfn': 'test_add_to_ten',
'expect': '10',
'num_inputs': 2,
'group_label': 'Enter two integers that sum to 10.',
'script': textwrap.dedent("""
def test_add_to_ten(expect,ans):
try:
@@ -640,12 +618,6 @@ class ScriptProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
Additional setup for ScriptProblemTypeTest
"""
super(ScriptProblemTypeTest, self).setUp(*args, **kwargs)
self.problem_page.a11y_audit.config.set_rules({
'ignore': [
'section', # TODO: AC-491
'label', # TODO: AC-287
]
})
def answer_problem(self, correct):
"""
@@ -798,13 +770,6 @@ class RadioTextProblemTypeTest(ChoiceTextProbelmTypeTestBase, ProblemTypeTestMix
Additional setup for RadioTextProblemTypeTest
"""
super(RadioTextProblemTypeTest, self).setUp(*args, **kwargs)
self.problem_page.a11y_audit.config.set_rules({
'ignore': [
'section', # TODO: AC-491
'label', # TODO: AC-285
'radiogroup', # TODO: AC-285
]
})
class CheckboxTextProblemTypeTest(ChoiceTextProbelmTypeTestBase, ProblemTypeTestMixin):
@@ -831,13 +796,6 @@ class CheckboxTextProblemTypeTest(ChoiceTextProbelmTypeTestBase, ProblemTypeTest
Additional setup for CheckboxTextProblemTypeTest
"""
super(CheckboxTextProblemTypeTest, self).setUp(*args, **kwargs)
self.problem_page.a11y_audit.config.set_rules({
'ignore': [
'section', # TODO: AC-491
'label', # TODO: AC-284
'checkboxgroup', # TODO: AC-284
]
})
class ImageProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
@@ -885,9 +843,9 @@ class SymbolicProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
}
status_indicators = {
'correct': ['span div.correct'],
'incorrect': ['span div.incorrect'],
'unanswered': ['span div.unanswered'],
'correct': ['div.capa_inputtype div.correct'],
'incorrect': ['div.capa_inputtype div.incorrect'],
'unanswered': ['div.capa_inputtype div.unanswered'],
}
def setUp(self, *args, **kwargs):

View File

@@ -31,7 +31,7 @@ var options = {
{pattern: 'common/js/xblock/*.js', included: true},
{pattern: 'xmodule_js/common_static/js/src/logger.js', included: true},
{pattern: 'xmodule_js/common_static/js/test/i18n.js', included: true},
{pattern: 'xmodule_js/common_static/js/vendor/CodeMirror/codemirror.js', included: true},
{pattern: 'xmodule_js/common_static/js/vendor/codemirror-compressed.js', included: true},
{pattern: 'xmodule_js/common_static/js/vendor/jquery.cookie.js', included: true},
{pattern: 'xmodule_js/common_static/js/vendor/flot/jquery.flot.js', included: true},
{pattern: 'xmodule_js/common_static/coffee/src/jquery.immediateDescendents.js', included: true},

View File

@@ -12,9 +12,7 @@ from openedx.core.djangolib.markup import HTML
<div class="problem-progress"></div>
<div class="problem">
<div aria-live="polite">
${ HTML(problem['html']) }
</div>
${ HTML(problem['html']) }
<div class="action">
<input type="hidden" name="problem_id" value="${ problem['name'] }" />
% if demand_hint_possible: