diff --git a/common/lib/capa/capa/tests/response_xml_factory.py b/common/lib/capa/capa/tests/response_xml_factory.py index fe918ec5db..c7e43f5152 100644 --- a/common/lib/capa/capa/tests/response_xml_factory.py +++ b/common/lib/capa/capa/tests/response_xml_factory.py @@ -666,3 +666,39 @@ class StringResponseXMLFactory(ResponseXMLFactory): def create_input_element(self, **kwargs): return ResponseXMLFactory.textline_input_xml(**kwargs) + +class AnnotationResponseXMLFactory(ResponseXMLFactory): + """ Factory for creating XML trees """ + def create_response_element(self, **kwargs): + """ Create a element """ + return etree.Element("annotationresponse") + + def create_input_element(self, **kwargs): + """ Create a element.""" + + title = kwargs.get('title', 'super cool annotation') + text = kwargs.get('text', 'texty text') + comment = kwargs.get('comment', 'blah blah erudite comment blah blah') + comment_prompt = kwargs.get('comment_prompt', 'type a commentary below') + tag_prompt = kwargs.get('tag_prompt', 'select one tag') + options = kwargs.get('options', [ + ('green', 'correct'), + ('eggs', 'incorrect'), + ('ham', 'partially-correct') + ]) + + # Create the element + input_element = etree.Element("annotationinput") + etree.SubElement(input_element, 'title') + etree.SubElement(input_element, 'text') + etree.SubElement(input_element, 'comment') + etree.SubElement(input_element, 'comment_prompt') + etree.SubElement(input_element, 'tag_prompt') + + options_element = etree.SubElement(input_element, 'options') + for (description, correctness) in options: + option_element = etree.SubElement(options_element, 'option', {'choice': correctness}) + option_element.text = description + + return input_element + diff --git a/common/lib/capa/capa/tests/test_files/annotationresponse.xml b/common/lib/capa/capa/tests/test_files/annotationresponse.xml deleted file mode 100644 index 86af0bb789..0000000000 --- a/common/lib/capa/capa/tests/test_files/annotationresponse.xml +++ /dev/null @@ -1,17 +0,0 @@ - - - - the title - the text - the comment - Type a commentary below: - Select one or more tags: - - - - - - - - Instructor text here... - diff --git a/common/lib/capa/capa/tests/test_responsetypes.py b/common/lib/capa/capa/tests/test_responsetypes.py index 33b84d213d..d592bff976 100644 --- a/common/lib/capa/capa/tests/test_responsetypes.py +++ b/common/lib/capa/capa/tests/test_responsetypes.py @@ -772,3 +772,40 @@ class SchematicResponseTest(ResponseTest): # (That is, our script verifies that the context # is what we expect) self.assertEqual(correct_map.get_correctness('1_2_1'), 'correct') + +class AnnotationResponseTest(ResponseTest): + from response_xml_factory import AnnotationResponseXMLFactory + xml_factory_class = AnnotationResponseXMLFactory + + def test_grade(self): + (correct, partially, incorrect) = ('correct', 'partially-correct', 'incorrect') + + answer_id = '1_2_1' + options = (('x', correct),('y', partially),('z', incorrect)) + make_answer = lambda option_ids: {answer_id: json.dumps({'options': option_ids })} + + tests = [ + {'correctness': correct, 'points': 2,'answers': make_answer([0]) }, + {'correctness': partially, 'points': 1, 'answers': make_answer([1]) }, + {'correctness': incorrect, 'points': 0, 'answers': make_answer([2]) }, + {'correctness': incorrect, 'points': 0, 'answers': make_answer([0,1,2]) }, + {'correctness': incorrect, 'points': 0, 'answers': make_answer([]) }, + {'correctness': incorrect, 'points': 0, 'answers': make_answer('') }, + {'correctness': incorrect, 'points': 0, 'answers': make_answer(None) }, + {'correctness': incorrect, 'points': 0, 'answers': {answer_id: 'null' } }, + ] + + for (index, test) in enumerate(tests): + expected_correctness = test['correctness'] + expected_points = test['points'] + answers = test['answers'] + + problem = self.build_problem(options=options) + correct_map = problem.grade_answers(answers) + actual_correctness = correct_map.get_correctness(answer_id) + actual_points = correct_map.get_npoints(answer_id) + + self.assertEqual(expected_correctness, actual_correctness, + msg="%s should be marked %s" % (answer_id, expected_correctness)) + self.assertEqual(expected_points, actual_points, + msg="%s should have %d points" % (answer_id, expected_points)) \ No newline at end of file