converted annotationinput tests to the new responsetype testing format.
This commit is contained in:
@@ -666,3 +666,39 @@ class StringResponseXMLFactory(ResponseXMLFactory):
|
||||
|
||||
def create_input_element(self, **kwargs):
|
||||
return ResponseXMLFactory.textline_input_xml(**kwargs)
|
||||
|
||||
class AnnotationResponseXMLFactory(ResponseXMLFactory):
|
||||
""" Factory for creating <annotationresponse> XML trees """
|
||||
def create_response_element(self, **kwargs):
|
||||
""" Create a <annotationresponse> element """
|
||||
return etree.Element("annotationresponse")
|
||||
|
||||
def create_input_element(self, **kwargs):
|
||||
""" Create a <annotationinput> element."""
|
||||
|
||||
title = kwargs.get('title', 'super cool annotation')
|
||||
text = kwargs.get('text', 'texty text')
|
||||
comment = kwargs.get('comment', 'blah blah erudite comment blah blah')
|
||||
comment_prompt = kwargs.get('comment_prompt', 'type a commentary below')
|
||||
tag_prompt = kwargs.get('tag_prompt', 'select one tag')
|
||||
options = kwargs.get('options', [
|
||||
('green', 'correct'),
|
||||
('eggs', 'incorrect'),
|
||||
('ham', 'partially-correct')
|
||||
])
|
||||
|
||||
# Create the <annotationinput> element
|
||||
input_element = etree.Element("annotationinput")
|
||||
etree.SubElement(input_element, 'title')
|
||||
etree.SubElement(input_element, 'text')
|
||||
etree.SubElement(input_element, 'comment')
|
||||
etree.SubElement(input_element, 'comment_prompt')
|
||||
etree.SubElement(input_element, 'tag_prompt')
|
||||
|
||||
options_element = etree.SubElement(input_element, 'options')
|
||||
for (description, correctness) in options:
|
||||
option_element = etree.SubElement(options_element, 'option', {'choice': correctness})
|
||||
option_element.text = description
|
||||
|
||||
return input_element
|
||||
|
||||
|
||||
@@ -1,17 +0,0 @@
|
||||
<problem display_name="Exercise 1">
|
||||
<annotationresponse>
|
||||
<annotationinput>
|
||||
<title>the title</title>
|
||||
<text>the text</text>
|
||||
<comment>the comment</comment>
|
||||
<comment_prompt>Type a commentary below:</comment_prompt>
|
||||
<tag_prompt>Select one or more tags:</tag_prompt>
|
||||
<options>
|
||||
<option choice="correct">green</option>
|
||||
<option choice="incorrect">eggs</option>
|
||||
<option choice="partially-correct">ham</option>
|
||||
</options>
|
||||
</annotationinput>
|
||||
</annotationresponse>
|
||||
<solution>Instructor text here...</solution>
|
||||
</problem>
|
||||
@@ -772,3 +772,40 @@ class SchematicResponseTest(ResponseTest):
|
||||
# (That is, our script verifies that the context
|
||||
# is what we expect)
|
||||
self.assertEqual(correct_map.get_correctness('1_2_1'), 'correct')
|
||||
|
||||
class AnnotationResponseTest(ResponseTest):
|
||||
from response_xml_factory import AnnotationResponseXMLFactory
|
||||
xml_factory_class = AnnotationResponseXMLFactory
|
||||
|
||||
def test_grade(self):
|
||||
(correct, partially, incorrect) = ('correct', 'partially-correct', 'incorrect')
|
||||
|
||||
answer_id = '1_2_1'
|
||||
options = (('x', correct),('y', partially),('z', incorrect))
|
||||
make_answer = lambda option_ids: {answer_id: json.dumps({'options': option_ids })}
|
||||
|
||||
tests = [
|
||||
{'correctness': correct, 'points': 2,'answers': make_answer([0]) },
|
||||
{'correctness': partially, 'points': 1, 'answers': make_answer([1]) },
|
||||
{'correctness': incorrect, 'points': 0, 'answers': make_answer([2]) },
|
||||
{'correctness': incorrect, 'points': 0, 'answers': make_answer([0,1,2]) },
|
||||
{'correctness': incorrect, 'points': 0, 'answers': make_answer([]) },
|
||||
{'correctness': incorrect, 'points': 0, 'answers': make_answer('') },
|
||||
{'correctness': incorrect, 'points': 0, 'answers': make_answer(None) },
|
||||
{'correctness': incorrect, 'points': 0, 'answers': {answer_id: 'null' } },
|
||||
]
|
||||
|
||||
for (index, test) in enumerate(tests):
|
||||
expected_correctness = test['correctness']
|
||||
expected_points = test['points']
|
||||
answers = test['answers']
|
||||
|
||||
problem = self.build_problem(options=options)
|
||||
correct_map = problem.grade_answers(answers)
|
||||
actual_correctness = correct_map.get_correctness(answer_id)
|
||||
actual_points = correct_map.get_npoints(answer_id)
|
||||
|
||||
self.assertEqual(expected_correctness, actual_correctness,
|
||||
msg="%s should be marked %s" % (answer_id, expected_correctness))
|
||||
self.assertEqual(expected_points, actual_points,
|
||||
msg="%s should have %d points" % (answer_id, expected_points))
|
||||
Reference in New Issue
Block a user