added annotationresponse grade test
This commit is contained in:
@@ -1848,8 +1848,7 @@ class AnnotationResponse(LoncapaResponse):
|
||||
Checking of annotation responses.
|
||||
|
||||
The response contains both a comment (student commentary) and an option (student tag).
|
||||
Only the tag is currently graded. Answers may be incorrect, partially correct, or correct
|
||||
and are scored accordingly.
|
||||
Only the tag is currently graded. Answers may be incorrect, partially correct, or correct.
|
||||
'''
|
||||
response_tag = 'annotationresponse'
|
||||
allowed_inputfields = ['annotationinput']
|
||||
|
||||
17
common/lib/capa/capa/tests/test_files/annotationresponse.xml
Normal file
17
common/lib/capa/capa/tests/test_files/annotationresponse.xml
Normal file
@@ -0,0 +1,17 @@
|
||||
<problem display_name="Exercise 1">
|
||||
<annotationresponse>
|
||||
<annotationinput>
|
||||
<title>the title</title>
|
||||
<text>the text</text>
|
||||
<comment>the comment</comment>
|
||||
<comment_prompt>Type a commentary below:</comment_prompt>
|
||||
<tag_prompt>Select one or more tags:</tag_prompt>
|
||||
<options>
|
||||
<option choice="correct">green</option>
|
||||
<option choice="incorrect">eggs</option>
|
||||
<option choice="partially-correct">ham</option>
|
||||
</options>
|
||||
</annotationinput>
|
||||
</annotationresponse>
|
||||
<solution>Instructor text here...</solution>
|
||||
</problem>
|
||||
@@ -428,4 +428,16 @@ class JavascriptResponseTest(unittest.TestCase):
|
||||
self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct')
|
||||
|
||||
class AnnotationResponseTest(unittest.TestCase):
|
||||
pass
|
||||
|
||||
def test_grade(self):
|
||||
annotationresponse_file = os.path.dirname(__file__) + "/test_files/annotationresponse.xml"
|
||||
test_lcp = lcp.LoncapaProblem(open(annotationresponse_file).read(), '1', system=test_system)
|
||||
answers_for = {
|
||||
'correct': {'1_2_1': json.dumps({'options':[0]})},
|
||||
'incorrect': {'1_2_1': json.dumps({'options':[1]})},
|
||||
'partially-correct': {'1_2_1': json.dumps({'options':[2]})}
|
||||
}
|
||||
|
||||
for expected_correctness in answers_for.keys():
|
||||
actual_correctness = test_lcp.grade_answers(answers_for[expected_correctness]).get_correctness('1_2_1')
|
||||
self.assertEquals(expected_correctness, actual_correctness)
|
||||
Reference in New Issue
Block a user