Allow problem tests to run even if problems can't give us answers to check.
customresponses can't tell us what their answer is (there can be a wide range of answers -- it's verified by a small program). So here we simply ignore those fields we can't generate answers for when doing tests against problems.
This commit is contained in:
committed by
Calen Pennington
parent
2c6efbfd5d
commit
8131cb3334
@@ -200,6 +200,24 @@ class LoncapaProblem(object):
|
||||
|
||||
return answer_map
|
||||
|
||||
def get_answer_ids(self):
|
||||
"""Return the IDs of all the responses -- these are the keys used for
|
||||
the dicts returned by grade_answers and get_question_answers. (Though
|
||||
get_question_answers may only return a subset of these."""
|
||||
answer_ids = []
|
||||
context=self.extract_context(self.tree)
|
||||
problems_simple = self.extract_problems(self.tree)
|
||||
for response in problems_simple:
|
||||
responder = response_types[response.tag](response, self.context)
|
||||
if hasattr(responder, "answer_id"):
|
||||
answer_ids.append(responder.answer_id)
|
||||
# customresponse types can have multiple answer_ids
|
||||
elif hasattr(responder, "answer_ids"):
|
||||
answer_ids.extend(responder.answer_ids)
|
||||
|
||||
return answer_ids
|
||||
|
||||
|
||||
# ======= Private ========
|
||||
|
||||
def extract_context(self, tree, seed = struct.unpack('i', os.urandom(4))[0]): # private
|
||||
|
||||
@@ -69,18 +69,29 @@ def test_problem(problem):
|
||||
string "a or d".
|
||||
- L1-e00.xml
|
||||
"""
|
||||
answers = problem.get_question_answers()
|
||||
log.debug(answers)
|
||||
if answers:
|
||||
# These are actual answers we get from the responsetypes
|
||||
real_answers = problem.get_question_answers()
|
||||
|
||||
# all_answers is real_answers + blanks for other answer_ids for which the
|
||||
# responsetypes can't provide us pre-canned answers (customresopnse)
|
||||
all_answer_ids = problem.get_answer_ids()
|
||||
all_answers = dict((answer_id, real_answers.get(answer_id, ""))
|
||||
for answer_id in all_answer_ids)
|
||||
|
||||
log.debug(real_answers)
|
||||
if real_answers:
|
||||
try:
|
||||
results = problem.grade_answers(answers)
|
||||
log.debug(results)
|
||||
assert(all(result == 'correct' for result in results.values()))
|
||||
real_results = dict((answer_id, result) for answer_id, result
|
||||
in problem.grade_answers(all_answers).items()
|
||||
if answer_id in real_answers)
|
||||
log.debug(real_results)
|
||||
assert(all(result == 'correct'
|
||||
for answer_id, result in real_results.items()))
|
||||
except AssertionError:
|
||||
log.error("The following generated answers were not accepted:")
|
||||
for question_id, result in sorted(results.items()):
|
||||
for question_id, result in sorted(real_results.items()):
|
||||
if result != 'correct':
|
||||
log.error(" {0} = {1}".format(question_id, answers[question_id]))
|
||||
log.error(" {0} = {1}".format(question_id, real_answers[question_id]))
|
||||
except Exception as ex:
|
||||
log.exception(ex)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user