Merge pull request #40 from edx/feature/brian/regrade-celery-2
Feature/brian/regrade celery 2
This commit is contained in:
@@ -5,6 +5,13 @@ These are notable changes in edx-platform. This is a rolling list of changes,
|
||||
in roughly chronological order, most recent first. Add your entries at or near
|
||||
the top. Include a label indicating the component affected.
|
||||
|
||||
LMS: Problem rescoring. Added options on the Grades tab of the
|
||||
Instructor Dashboard to allow all students' submissions for a
|
||||
particular problem to be rescored. Also supports resetting all
|
||||
students' number of attempts to zero. Provides a list of background
|
||||
tasks that are currently running for the course, and an option to
|
||||
see a history of background tasks for a given problem.
|
||||
|
||||
LMS: Forums. Added handling for case where discussion module can get `None` as
|
||||
value of lms.start in `lms/djangoapps/django_comment_client/utils.py`
|
||||
|
||||
|
||||
@@ -1,13 +1,11 @@
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import pytz
|
||||
import datetime
|
||||
import dateutil.parser
|
||||
|
||||
from django.contrib.auth.decorators import login_required
|
||||
from django.http import HttpResponse
|
||||
from django.http import Http404
|
||||
from django.shortcuts import redirect
|
||||
from django.conf import settings
|
||||
from mitxmako.shortcuts import render_to_response
|
||||
@@ -22,6 +20,7 @@ LOGFIELDS = ['username', 'ip', 'event_source', 'event_type', 'event', 'agent', '
|
||||
|
||||
|
||||
def log_event(event):
|
||||
"""Write tracking event to log file, and optionally to TrackingLog model."""
|
||||
event_str = json.dumps(event)
|
||||
log.info(event_str[:settings.TRACK_MAX_EVENT])
|
||||
if settings.MITX_FEATURES.get('ENABLE_SQL_TRACKING_LOGS'):
|
||||
@@ -34,6 +33,11 @@ def log_event(event):
|
||||
|
||||
|
||||
def user_track(request):
|
||||
"""
|
||||
Log when GET call to "event" URL is made by a user.
|
||||
|
||||
GET call should provide "event_type", "event", and "page" arguments.
|
||||
"""
|
||||
try: # TODO: Do the same for many of the optional META parameters
|
||||
username = request.user.username
|
||||
except:
|
||||
@@ -50,7 +54,6 @@ def user_track(request):
|
||||
except:
|
||||
agent = ''
|
||||
|
||||
# TODO: Move a bunch of this into log_event
|
||||
event = {
|
||||
"username": username,
|
||||
"session": scookie,
|
||||
@@ -68,6 +71,7 @@ def user_track(request):
|
||||
|
||||
|
||||
def server_track(request, event_type, event, page=None):
|
||||
"""Log events related to server requests."""
|
||||
try:
|
||||
username = request.user.username
|
||||
except:
|
||||
@@ -95,9 +99,52 @@ def server_track(request, event_type, event, page=None):
|
||||
log_event(event)
|
||||
|
||||
|
||||
def task_track(request_info, task_info, event_type, event, page=None):
|
||||
"""
|
||||
Logs tracking information for events occuring within celery tasks.
|
||||
|
||||
The `event_type` is a string naming the particular event being logged,
|
||||
while `event` is a dict containing whatever additional contextual information
|
||||
is desired.
|
||||
|
||||
The `request_info` is a dict containing information about the original
|
||||
task request. Relevant keys are `username`, `ip`, `agent`, and `host`.
|
||||
While the dict is required, the values in it are not, so that {} can be
|
||||
passed in.
|
||||
|
||||
In addition, a `task_info` dict provides more information about the current
|
||||
task, to be stored with the `event` dict. This may also be an empty dict.
|
||||
|
||||
The `page` parameter is optional, and allows the name of the page to
|
||||
be provided.
|
||||
"""
|
||||
|
||||
# supplement event information with additional information
|
||||
# about the task in which it is running.
|
||||
full_event = dict(event, **task_info)
|
||||
|
||||
# All fields must be specified, in case the tracking information is
|
||||
# also saved to the TrackingLog model. Get values from the task-level
|
||||
# information, or just add placeholder values.
|
||||
event = {
|
||||
"username": request_info.get('username', 'unknown'),
|
||||
"ip": request_info.get('ip', 'unknown'),
|
||||
"event_source": "task",
|
||||
"event_type": event_type,
|
||||
"event": full_event,
|
||||
"agent": request_info.get('agent', 'unknown'),
|
||||
"page": page,
|
||||
"time": datetime.datetime.utcnow().isoformat(),
|
||||
"host": request_info.get('host', 'unknown')
|
||||
}
|
||||
|
||||
log_event(event)
|
||||
|
||||
|
||||
@login_required
|
||||
@ensure_csrf_cookie
|
||||
def view_tracking_log(request, args=''):
|
||||
"""View to output contents of TrackingLog model. For staff use only."""
|
||||
if not request.user.is_staff:
|
||||
return redirect('/')
|
||||
nlen = 100
|
||||
|
||||
@@ -15,25 +15,22 @@ This is used by capa_module.
|
||||
|
||||
from datetime import datetime
|
||||
import logging
|
||||
import math
|
||||
import numpy
|
||||
import os.path
|
||||
import re
|
||||
import sys
|
||||
|
||||
from lxml import etree
|
||||
from xml.sax.saxutils import unescape
|
||||
from copy import deepcopy
|
||||
|
||||
from .correctmap import CorrectMap
|
||||
import inputtypes
|
||||
import customrender
|
||||
from .util import contextualize_text, convert_files_to_filenames
|
||||
import xqueue_interface
|
||||
from capa.correctmap import CorrectMap
|
||||
import capa.inputtypes as inputtypes
|
||||
import capa.customrender as customrender
|
||||
from capa.util import contextualize_text, convert_files_to_filenames
|
||||
import capa.xqueue_interface as xqueue_interface
|
||||
|
||||
# to be replaced with auto-registering
|
||||
import responsetypes
|
||||
import safe_exec
|
||||
import capa.responsetypes as responsetypes
|
||||
from capa.safe_exec import safe_exec
|
||||
|
||||
# dict of tagname, Response Class -- this should come from auto-registering
|
||||
response_tag_dict = dict([(x.response_tag, x) for x in responsetypes.__all__])
|
||||
@@ -46,8 +43,8 @@ response_properties = ["codeparam", "responseparam", "answer", "openendedparam"]
|
||||
|
||||
# special problem tags which should be turned into innocuous HTML
|
||||
html_transforms = {'problem': {'tag': 'div'},
|
||||
"text": {'tag': 'span'},
|
||||
"math": {'tag': 'span'},
|
||||
'text': {'tag': 'span'},
|
||||
'math': {'tag': 'span'},
|
||||
}
|
||||
|
||||
# These should be removed from HTML output, including all subelements
|
||||
@@ -134,7 +131,6 @@ class LoncapaProblem(object):
|
||||
|
||||
self.extracted_tree = self._extract_html(self.tree)
|
||||
|
||||
|
||||
def do_reset(self):
|
||||
'''
|
||||
Reset internal state to unfinished, with no answers
|
||||
@@ -175,7 +171,7 @@ class LoncapaProblem(object):
|
||||
Return the maximum score for this problem.
|
||||
'''
|
||||
maxscore = 0
|
||||
for response, responder in self.responders.iteritems():
|
||||
for responder in self.responders.values():
|
||||
maxscore += responder.get_max_score()
|
||||
return maxscore
|
||||
|
||||
@@ -220,7 +216,7 @@ class LoncapaProblem(object):
|
||||
def ungraded_response(self, xqueue_msg, queuekey):
|
||||
'''
|
||||
Handle any responses from the xqueue that do not contain grades
|
||||
Will try to pass the queue message to all inputtypes that can handle ungraded responses
|
||||
Will try to pass the queue message to all inputtypes that can handle ungraded responses
|
||||
|
||||
Does not return any value
|
||||
'''
|
||||
@@ -230,7 +226,6 @@ class LoncapaProblem(object):
|
||||
if hasattr(the_input, 'ungraded_response'):
|
||||
the_input.ungraded_response(xqueue_msg, queuekey)
|
||||
|
||||
|
||||
def is_queued(self):
|
||||
'''
|
||||
Returns True if any part of the problem has been submitted to an external queue
|
||||
@@ -238,7 +233,6 @@ class LoncapaProblem(object):
|
||||
'''
|
||||
return any(self.correct_map.is_queued(answer_id) for answer_id in self.correct_map)
|
||||
|
||||
|
||||
def get_recentmost_queuetime(self):
|
||||
'''
|
||||
Returns a DateTime object that represents the timestamp of the most recent
|
||||
@@ -256,11 +250,11 @@ class LoncapaProblem(object):
|
||||
|
||||
return max(queuetimes)
|
||||
|
||||
|
||||
def grade_answers(self, answers):
|
||||
'''
|
||||
Grade student responses. Called by capa_module.check_problem.
|
||||
answers is a dict of all the entries from request.POST, but with the first part
|
||||
|
||||
`answers` is a dict of all the entries from request.POST, but with the first part
|
||||
of each key removed (the string before the first "_").
|
||||
|
||||
Thus, for example, input_ID123 -> ID123, and input_fromjs_ID123 -> fromjs_ID123
|
||||
@@ -270,24 +264,72 @@ class LoncapaProblem(object):
|
||||
|
||||
# if answers include File objects, convert them to filenames.
|
||||
self.student_answers = convert_files_to_filenames(answers)
|
||||
return self._grade_answers(answers)
|
||||
|
||||
def supports_rescoring(self):
|
||||
"""
|
||||
Checks that the current problem definition permits rescoring.
|
||||
|
||||
More precisely, it checks that there are no response types in
|
||||
the current problem that are not fully supported (yet) for rescoring.
|
||||
|
||||
This includes responsetypes for which the student's answer
|
||||
is not properly stored in state, i.e. file submissions. At present,
|
||||
we have no way to know if an existing response was actually a real
|
||||
answer or merely the filename of a file submitted as an answer.
|
||||
|
||||
It turns out that because rescoring is a background task, limiting
|
||||
it to responsetypes that don't support file submissions also means
|
||||
that the responsetypes are synchronous. This is convenient as it
|
||||
permits rescoring to be complete when the rescoring call returns.
|
||||
"""
|
||||
return all('filesubmission' not in responder.allowed_inputfields for responder in self.responders.values())
|
||||
|
||||
def rescore_existing_answers(self):
|
||||
"""
|
||||
Rescore student responses. Called by capa_module.rescore_problem.
|
||||
"""
|
||||
return self._grade_answers(None)
|
||||
|
||||
def _grade_answers(self, student_answers):
|
||||
"""
|
||||
Internal grading call used for checking new 'student_answers' and also
|
||||
rescoring existing student_answers.
|
||||
|
||||
For new student_answers being graded, `student_answers` is a dict of all the
|
||||
entries from request.POST, but with the first part of each key removed
|
||||
(the string before the first "_"). Thus, for example,
|
||||
input_ID123 -> ID123, and input_fromjs_ID123 -> fromjs_ID123.
|
||||
|
||||
For rescoring, `student_answers` is None.
|
||||
|
||||
Calls the Response for each question in this problem, to do the actual grading.
|
||||
"""
|
||||
# old CorrectMap
|
||||
oldcmap = self.correct_map
|
||||
|
||||
# start new with empty CorrectMap
|
||||
newcmap = CorrectMap()
|
||||
# log.debug('Responders: %s' % self.responders)
|
||||
|
||||
# Call each responsetype instance to do actual grading
|
||||
for responder in self.responders.values():
|
||||
# File objects are passed only if responsetype explicitly allows for file
|
||||
# submissions
|
||||
if 'filesubmission' in responder.allowed_inputfields:
|
||||
results = responder.evaluate_answers(answers, oldcmap)
|
||||
# File objects are passed only if responsetype explicitly allows
|
||||
# for file submissions. But we have no way of knowing if
|
||||
# student_answers contains a proper answer or the filename of
|
||||
# an earlier submission, so for now skip these entirely.
|
||||
# TODO: figure out where to get file submissions when rescoring.
|
||||
if 'filesubmission' in responder.allowed_inputfields and student_answers is None:
|
||||
raise Exception("Cannot rescore problems with possible file submissions")
|
||||
|
||||
# use 'student_answers' only if it is provided, and if it might contain a file
|
||||
# submission that would not exist in the persisted "student_answers".
|
||||
if 'filesubmission' in responder.allowed_inputfields and student_answers is not None:
|
||||
results = responder.evaluate_answers(student_answers, oldcmap)
|
||||
else:
|
||||
results = responder.evaluate_answers(convert_files_to_filenames(answers), oldcmap)
|
||||
results = responder.evaluate_answers(self.student_answers, oldcmap)
|
||||
newcmap.update(results)
|
||||
|
||||
self.correct_map = newcmap
|
||||
# log.debug('%s: in grade_answers, answers=%s, cmap=%s' % (self,answers,newcmap))
|
||||
return newcmap
|
||||
|
||||
def get_question_answers(self):
|
||||
@@ -331,7 +373,6 @@ class LoncapaProblem(object):
|
||||
html = contextualize_text(etree.tostring(self._extract_html(self.tree)), self.context)
|
||||
return html
|
||||
|
||||
|
||||
def handle_input_ajax(self, get):
|
||||
'''
|
||||
InputTypes can support specialized AJAX calls. Find the correct input and pass along the correct data
|
||||
@@ -348,8 +389,6 @@ class LoncapaProblem(object):
|
||||
log.warning("Could not find matching input for id: %s" % input_id)
|
||||
return {}
|
||||
|
||||
|
||||
|
||||
# ======= Private Methods Below ========
|
||||
|
||||
def _process_includes(self):
|
||||
@@ -359,16 +398,16 @@ class LoncapaProblem(object):
|
||||
'''
|
||||
includes = self.tree.findall('.//include')
|
||||
for inc in includes:
|
||||
file = inc.get('file')
|
||||
if file is not None:
|
||||
filename = inc.get('file')
|
||||
if filename is not None:
|
||||
try:
|
||||
# open using ModuleSystem OSFS filestore
|
||||
ifp = self.system.filestore.open(file)
|
||||
ifp = self.system.filestore.open(filename)
|
||||
except Exception as err:
|
||||
log.warning('Error %s in problem xml include: %s' % (
|
||||
err, etree.tostring(inc, pretty_print=True)))
|
||||
log.warning('Cannot find file %s in %s' % (
|
||||
file, self.system.filestore))
|
||||
filename, self.system.filestore))
|
||||
# if debugging, don't fail - just log error
|
||||
# TODO (vshnayder): need real error handling, display to users
|
||||
if not self.system.get('DEBUG'):
|
||||
@@ -381,7 +420,7 @@ class LoncapaProblem(object):
|
||||
except Exception as err:
|
||||
log.warning('Error %s in problem xml include: %s' % (
|
||||
err, etree.tostring(inc, pretty_print=True)))
|
||||
log.warning('Cannot parse XML in %s' % (file))
|
||||
log.warning('Cannot parse XML in %s' % (filename))
|
||||
# if debugging, don't fail - just log error
|
||||
# TODO (vshnayder): same as above
|
||||
if not self.system.get('DEBUG'):
|
||||
@@ -389,11 +428,11 @@ class LoncapaProblem(object):
|
||||
else:
|
||||
continue
|
||||
|
||||
# insert new XML into tree in place of inlcude
|
||||
# insert new XML into tree in place of include
|
||||
parent = inc.getparent()
|
||||
parent.insert(parent.index(inc), incxml)
|
||||
parent.remove(inc)
|
||||
log.debug('Included %s into %s' % (file, self.problem_id))
|
||||
log.debug('Included %s into %s' % (filename, self.problem_id))
|
||||
|
||||
def _extract_system_path(self, script):
|
||||
"""
|
||||
@@ -463,7 +502,7 @@ class LoncapaProblem(object):
|
||||
|
||||
if all_code:
|
||||
try:
|
||||
safe_exec.safe_exec(
|
||||
safe_exec(
|
||||
all_code,
|
||||
context,
|
||||
random_seed=self.seed,
|
||||
@@ -519,18 +558,18 @@ class LoncapaProblem(object):
|
||||
value = ""
|
||||
if self.student_answers and problemid in self.student_answers:
|
||||
value = self.student_answers[problemid]
|
||||
|
||||
|
||||
if input_id not in self.input_state:
|
||||
self.input_state[input_id] = {}
|
||||
|
||||
|
||||
# do the rendering
|
||||
state = {'value': value,
|
||||
'status': status,
|
||||
'id': input_id,
|
||||
'input_state': self.input_state[input_id],
|
||||
'feedback': {'message': msg,
|
||||
'hint': hint,
|
||||
'hintmode': hintmode, }}
|
||||
'status': status,
|
||||
'id': input_id,
|
||||
'input_state': self.input_state[input_id],
|
||||
'feedback': {'message': msg,
|
||||
'hint': hint,
|
||||
'hintmode': hintmode, }}
|
||||
|
||||
input_type_cls = inputtypes.registry.get_class_for_tag(problemtree.tag)
|
||||
# save the input type so that we can make ajax calls on it if we need to
|
||||
@@ -554,7 +593,7 @@ class LoncapaProblem(object):
|
||||
for item in problemtree:
|
||||
item_xhtml = self._extract_html(item)
|
||||
if item_xhtml is not None:
|
||||
tree.append(item_xhtml)
|
||||
tree.append(item_xhtml)
|
||||
|
||||
if tree.tag in html_transforms:
|
||||
tree.tag = html_transforms[problemtree.tag]['tag']
|
||||
|
||||
@@ -4,7 +4,6 @@ Tests of responsetypes
|
||||
|
||||
from datetime import datetime
|
||||
import json
|
||||
from nose.plugins.skip import SkipTest
|
||||
import os
|
||||
import random
|
||||
import unittest
|
||||
@@ -56,9 +55,18 @@ class ResponseTest(unittest.TestCase):
|
||||
self.assertEqual(result, 'incorrect',
|
||||
msg="%s should be marked incorrect" % str(input_str))
|
||||
|
||||
def _get_random_number_code(self):
|
||||
"""Returns code to be used to generate a random result."""
|
||||
return "str(random.randint(0, 1e9))"
|
||||
|
||||
def _get_random_number_result(self, seed_value):
|
||||
"""Returns a result that should be generated using the random_number_code."""
|
||||
rand = random.Random(seed_value)
|
||||
return str(rand.randint(0, 1e9))
|
||||
|
||||
|
||||
class MultiChoiceResponseTest(ResponseTest):
|
||||
from response_xml_factory import MultipleChoiceResponseXMLFactory
|
||||
from capa.tests.response_xml_factory import MultipleChoiceResponseXMLFactory
|
||||
xml_factory_class = MultipleChoiceResponseXMLFactory
|
||||
|
||||
def test_multiple_choice_grade(self):
|
||||
@@ -80,7 +88,7 @@ class MultiChoiceResponseTest(ResponseTest):
|
||||
|
||||
|
||||
class TrueFalseResponseTest(ResponseTest):
|
||||
from response_xml_factory import TrueFalseResponseXMLFactory
|
||||
from capa.tests.response_xml_factory import TrueFalseResponseXMLFactory
|
||||
xml_factory_class = TrueFalseResponseXMLFactory
|
||||
|
||||
def test_true_false_grade(self):
|
||||
@@ -120,7 +128,7 @@ class TrueFalseResponseTest(ResponseTest):
|
||||
|
||||
|
||||
class ImageResponseTest(ResponseTest):
|
||||
from response_xml_factory import ImageResponseXMLFactory
|
||||
from capa.tests.response_xml_factory import ImageResponseXMLFactory
|
||||
xml_factory_class = ImageResponseXMLFactory
|
||||
|
||||
def test_rectangle_grade(self):
|
||||
@@ -184,7 +192,7 @@ class ImageResponseTest(ResponseTest):
|
||||
|
||||
|
||||
class SymbolicResponseTest(ResponseTest):
|
||||
from response_xml_factory import SymbolicResponseXMLFactory
|
||||
from capa.tests.response_xml_factory import SymbolicResponseXMLFactory
|
||||
xml_factory_class = SymbolicResponseXMLFactory
|
||||
|
||||
def test_grade_single_input(self):
|
||||
@@ -224,8 +232,8 @@ class SymbolicResponseTest(ResponseTest):
|
||||
|
||||
def test_complex_number_grade(self):
|
||||
problem = self.build_problem(math_display=True,
|
||||
expect="[[cos(theta),i*sin(theta)],[i*sin(theta),cos(theta)]]",
|
||||
options=["matrix", "imaginary"])
|
||||
expect="[[cos(theta),i*sin(theta)],[i*sin(theta),cos(theta)]]",
|
||||
options=["matrix", "imaginary"])
|
||||
|
||||
# For LaTeX-style inputs, symmath_check() will try to contact
|
||||
# a server to convert the input to MathML.
|
||||
@@ -312,16 +320,16 @@ class SymbolicResponseTest(ResponseTest):
|
||||
# Should not allow multiple inputs, since we specify
|
||||
# only one "expect" value
|
||||
with self.assertRaises(Exception):
|
||||
problem = self.build_problem(math_display=True,
|
||||
expect="2*x+3*y",
|
||||
num_inputs=3)
|
||||
self.build_problem(math_display=True,
|
||||
expect="2*x+3*y",
|
||||
num_inputs=3)
|
||||
|
||||
def _assert_symbolic_grade(self, problem,
|
||||
student_input,
|
||||
dynamath_input,
|
||||
expected_correctness):
|
||||
student_input,
|
||||
dynamath_input,
|
||||
expected_correctness):
|
||||
input_dict = {'1_2_1': str(student_input),
|
||||
'1_2_1_dynamath': str(dynamath_input)}
|
||||
'1_2_1_dynamath': str(dynamath_input)}
|
||||
|
||||
correct_map = problem.grade_answers(input_dict)
|
||||
|
||||
@@ -330,7 +338,7 @@ class SymbolicResponseTest(ResponseTest):
|
||||
|
||||
|
||||
class OptionResponseTest(ResponseTest):
|
||||
from response_xml_factory import OptionResponseXMLFactory
|
||||
from capa.tests.response_xml_factory import OptionResponseXMLFactory
|
||||
xml_factory_class = OptionResponseXMLFactory
|
||||
|
||||
def test_grade(self):
|
||||
@@ -350,7 +358,7 @@ class FormulaResponseTest(ResponseTest):
|
||||
"""
|
||||
Test the FormulaResponse class
|
||||
"""
|
||||
from response_xml_factory import FormulaResponseXMLFactory
|
||||
from capa.tests.response_xml_factory import FormulaResponseXMLFactory
|
||||
xml_factory_class = FormulaResponseXMLFactory
|
||||
|
||||
def test_grade(self):
|
||||
@@ -570,7 +578,7 @@ class FormulaResponseTest(ResponseTest):
|
||||
|
||||
|
||||
class StringResponseTest(ResponseTest):
|
||||
from response_xml_factory import StringResponseXMLFactory
|
||||
from capa.tests.response_xml_factory import StringResponseXMLFactory
|
||||
xml_factory_class = StringResponseXMLFactory
|
||||
|
||||
def test_case_sensitive(self):
|
||||
@@ -647,19 +655,18 @@ class StringResponseTest(ResponseTest):
|
||||
hintfn="gimme_a_random_hint",
|
||||
script=textwrap.dedent("""
|
||||
def gimme_a_random_hint(answer_ids, student_answers, new_cmap, old_cmap):
|
||||
answer = str(random.randint(0, 1e9))
|
||||
answer = {code}
|
||||
new_cmap.set_hint_and_mode(answer_ids[0], answer, "always")
|
||||
|
||||
""")
|
||||
""".format(code=self._get_random_number_code()))
|
||||
)
|
||||
correct_map = problem.grade_answers({'1_2_1': '2'})
|
||||
hint = correct_map.get_hint('1_2_1')
|
||||
r = random.Random(problem.seed)
|
||||
self.assertEqual(hint, str(r.randint(0, 1e9)))
|
||||
self.assertEqual(hint, self._get_random_number_result(problem.seed))
|
||||
|
||||
|
||||
class CodeResponseTest(ResponseTest):
|
||||
from response_xml_factory import CodeResponseXMLFactory
|
||||
from capa.tests.response_xml_factory import CodeResponseXMLFactory
|
||||
xml_factory_class = CodeResponseXMLFactory
|
||||
|
||||
def setUp(self):
|
||||
@@ -673,6 +680,7 @@ class CodeResponseTest(ResponseTest):
|
||||
|
||||
@staticmethod
|
||||
def make_queuestate(key, time):
|
||||
"""Create queuestate dict"""
|
||||
timestr = datetime.strftime(time, dateformat)
|
||||
return {'key': key, 'time': timestr}
|
||||
|
||||
@@ -710,7 +718,7 @@ class CodeResponseTest(ResponseTest):
|
||||
old_cmap = CorrectMap()
|
||||
for i, answer_id in enumerate(answer_ids):
|
||||
queuekey = 1000 + i
|
||||
queuestate = CodeResponseTest.make_queuestate(1000 + i, datetime.now())
|
||||
queuestate = CodeResponseTest.make_queuestate(queuekey, datetime.now())
|
||||
old_cmap.update(CorrectMap(answer_id=answer_ids[i], queuestate=queuestate))
|
||||
|
||||
# Message format common to external graders
|
||||
@@ -771,7 +779,7 @@ class CodeResponseTest(ResponseTest):
|
||||
for i, answer_id in enumerate(answer_ids):
|
||||
queuekey = 1000 + i
|
||||
latest_timestamp = datetime.now()
|
||||
queuestate = CodeResponseTest.make_queuestate(1000 + i, latest_timestamp)
|
||||
queuestate = CodeResponseTest.make_queuestate(queuekey, latest_timestamp)
|
||||
cmap.update(CorrectMap(answer_id=answer_id, queuestate=queuestate))
|
||||
self.problem.correct_map.update(cmap)
|
||||
|
||||
@@ -796,7 +804,7 @@ class CodeResponseTest(ResponseTest):
|
||||
|
||||
|
||||
class ChoiceResponseTest(ResponseTest):
|
||||
from response_xml_factory import ChoiceResponseXMLFactory
|
||||
from capa.tests.response_xml_factory import ChoiceResponseXMLFactory
|
||||
xml_factory_class = ChoiceResponseXMLFactory
|
||||
|
||||
def test_radio_group_grade(self):
|
||||
@@ -828,7 +836,7 @@ class ChoiceResponseTest(ResponseTest):
|
||||
|
||||
|
||||
class JavascriptResponseTest(ResponseTest):
|
||||
from response_xml_factory import JavascriptResponseXMLFactory
|
||||
from capa.tests.response_xml_factory import JavascriptResponseXMLFactory
|
||||
xml_factory_class = JavascriptResponseXMLFactory
|
||||
|
||||
def test_grade(self):
|
||||
@@ -858,7 +866,7 @@ class JavascriptResponseTest(ResponseTest):
|
||||
system.can_execute_unsafe_code = lambda: False
|
||||
|
||||
with self.assertRaises(LoncapaProblemError):
|
||||
problem = self.build_problem(
|
||||
self.build_problem(
|
||||
system=system,
|
||||
generator_src="test_problem_generator.js",
|
||||
grader_src="test_problem_grader.js",
|
||||
@@ -869,7 +877,7 @@ class JavascriptResponseTest(ResponseTest):
|
||||
|
||||
|
||||
class NumericalResponseTest(ResponseTest):
|
||||
from response_xml_factory import NumericalResponseXMLFactory
|
||||
from capa.tests.response_xml_factory import NumericalResponseXMLFactory
|
||||
xml_factory_class = NumericalResponseXMLFactory
|
||||
|
||||
def test_grade_exact(self):
|
||||
@@ -961,7 +969,7 @@ class NumericalResponseTest(ResponseTest):
|
||||
|
||||
|
||||
class CustomResponseTest(ResponseTest):
|
||||
from response_xml_factory import CustomResponseXMLFactory
|
||||
from capa.tests.response_xml_factory import CustomResponseXMLFactory
|
||||
xml_factory_class = CustomResponseXMLFactory
|
||||
|
||||
def test_inline_code(self):
|
||||
@@ -1000,15 +1008,14 @@ class CustomResponseTest(ResponseTest):
|
||||
|
||||
def test_inline_randomization(self):
|
||||
# Make sure the seed from the problem gets fed into the script execution.
|
||||
inline_script = """messages[0] = str(random.randint(0, 1e9))"""
|
||||
inline_script = "messages[0] = {code}".format(code=self._get_random_number_code())
|
||||
problem = self.build_problem(answer=inline_script)
|
||||
|
||||
input_dict = {'1_2_1': '0'}
|
||||
correctmap = problem.grade_answers(input_dict)
|
||||
|
||||
input_msg = correctmap.get_msg('1_2_1')
|
||||
r = random.Random(problem.seed)
|
||||
self.assertEqual(input_msg, str(r.randint(0, 1e9)))
|
||||
self.assertEqual(input_msg, self._get_random_number_result(problem.seed))
|
||||
|
||||
def test_function_code_single_input(self):
|
||||
# For function code, we pass in these arguments:
|
||||
@@ -1241,25 +1248,23 @@ class CustomResponseTest(ResponseTest):
|
||||
def test_setup_randomization(self):
|
||||
# Ensure that the problem setup script gets the random seed from the problem.
|
||||
script = textwrap.dedent("""
|
||||
num = random.randint(0, 1e9)
|
||||
""")
|
||||
num = {code}
|
||||
""".format(code=self._get_random_number_code()))
|
||||
problem = self.build_problem(script=script)
|
||||
r = random.Random(problem.seed)
|
||||
self.assertEqual(r.randint(0, 1e9), problem.context['num'])
|
||||
self.assertEqual(problem.context['num'], self._get_random_number_result(problem.seed))
|
||||
|
||||
def test_check_function_randomization(self):
|
||||
# The check function should get random-seeded from the problem.
|
||||
script = textwrap.dedent("""
|
||||
def check_func(expect, answer_given):
|
||||
return {'ok': True, 'msg': str(random.randint(0, 1e9))}
|
||||
""")
|
||||
return {{'ok': True, 'msg': {code} }}
|
||||
""".format(code=self._get_random_number_code()))
|
||||
|
||||
problem = self.build_problem(script=script, cfn="check_func", expect="42")
|
||||
input_dict = {'1_2_1': '42'}
|
||||
correct_map = problem.grade_answers(input_dict)
|
||||
msg = correct_map.get_msg('1_2_1')
|
||||
r = random.Random(problem.seed)
|
||||
self.assertEqual(msg, str(r.randint(0, 1e9)))
|
||||
self.assertEqual(msg, self._get_random_number_result(problem.seed))
|
||||
|
||||
def test_module_imports_inline(self):
|
||||
'''
|
||||
@@ -1320,7 +1325,7 @@ class CustomResponseTest(ResponseTest):
|
||||
|
||||
|
||||
class SchematicResponseTest(ResponseTest):
|
||||
from response_xml_factory import SchematicResponseXMLFactory
|
||||
from capa.tests.response_xml_factory import SchematicResponseXMLFactory
|
||||
xml_factory_class = SchematicResponseXMLFactory
|
||||
|
||||
def test_grade(self):
|
||||
@@ -1349,11 +1354,10 @@ class SchematicResponseTest(ResponseTest):
|
||||
|
||||
def test_check_function_randomization(self):
|
||||
# The check function should get a random seed from the problem.
|
||||
script = "correct = ['correct' if (submission[0]['num'] == random.randint(0, 1e9)) else 'incorrect']"
|
||||
script = "correct = ['correct' if (submission[0]['num'] == {code}) else 'incorrect']".format(code=self._get_random_number_code())
|
||||
problem = self.build_problem(answer=script)
|
||||
|
||||
r = random.Random(problem.seed)
|
||||
submission_dict = {'num': r.randint(0, 1e9)}
|
||||
submission_dict = {'num': self._get_random_number_result(problem.seed)}
|
||||
input_dict = {'1_2_1': json.dumps(submission_dict)}
|
||||
correct_map = problem.grade_answers(input_dict)
|
||||
|
||||
@@ -1372,7 +1376,7 @@ class SchematicResponseTest(ResponseTest):
|
||||
|
||||
|
||||
class AnnotationResponseTest(ResponseTest):
|
||||
from response_xml_factory import AnnotationResponseXMLFactory
|
||||
from capa.tests.response_xml_factory import AnnotationResponseXMLFactory
|
||||
xml_factory_class = AnnotationResponseXMLFactory
|
||||
|
||||
def test_grade(self):
|
||||
@@ -1393,7 +1397,7 @@ class AnnotationResponseTest(ResponseTest):
|
||||
{'correctness': incorrect, 'points': 0, 'answers': {answer_id: 'null'}},
|
||||
]
|
||||
|
||||
for (index, test) in enumerate(tests):
|
||||
for test in tests:
|
||||
expected_correctness = test['correctness']
|
||||
expected_points = test['points']
|
||||
answers = test['answers']
|
||||
|
||||
@@ -424,7 +424,7 @@ class CapaModule(CapaFields, XModule):
|
||||
|
||||
# If we cannot construct the problem HTML,
|
||||
# then generate an error message instead.
|
||||
except Exception, err:
|
||||
except Exception as err:
|
||||
html = self.handle_problem_html_error(err)
|
||||
|
||||
# The convention is to pass the name of the check button
|
||||
@@ -655,7 +655,7 @@ class CapaModule(CapaFields, XModule):
|
||||
@staticmethod
|
||||
def make_dict_of_responses(get):
|
||||
'''Make dictionary of student responses (aka "answers")
|
||||
get is POST dictionary (Djano QueryDict).
|
||||
get is POST dictionary (Django QueryDict).
|
||||
|
||||
The *get* dict has keys of the form 'x_y', which are mapped
|
||||
to key 'y' in the returned dict. For example,
|
||||
@@ -739,13 +739,13 @@ class CapaModule(CapaFields, XModule):
|
||||
# Too late. Cannot submit
|
||||
if self.closed():
|
||||
event_info['failure'] = 'closed'
|
||||
self.system.track_function('save_problem_check_fail', event_info)
|
||||
self.system.track_function('problem_check_fail', event_info)
|
||||
raise NotFoundError('Problem is closed')
|
||||
|
||||
# Problem submitted. Student should reset before checking again
|
||||
if self.done and self.rerandomize == "always":
|
||||
event_info['failure'] = 'unreset'
|
||||
self.system.track_function('save_problem_check_fail', event_info)
|
||||
self.system.track_function('problem_check_fail', event_info)
|
||||
raise NotFoundError('Problem must be reset before it can be checked again')
|
||||
|
||||
# Problem queued. Students must wait a specified waittime before they are allowed to submit
|
||||
@@ -759,6 +759,8 @@ class CapaModule(CapaFields, XModule):
|
||||
|
||||
try:
|
||||
correct_map = self.lcp.grade_answers(answers)
|
||||
self.attempts = self.attempts + 1
|
||||
self.lcp.done = True
|
||||
self.set_state_from_lcp()
|
||||
|
||||
except (StudentInputError, ResponseError, LoncapaProblemError) as inst:
|
||||
@@ -778,17 +780,13 @@ class CapaModule(CapaFields, XModule):
|
||||
|
||||
return {'success': msg}
|
||||
|
||||
except Exception, err:
|
||||
except Exception as err:
|
||||
if self.system.DEBUG:
|
||||
msg = "Error checking problem: " + str(err)
|
||||
msg += '\nTraceback:\n' + traceback.format_exc()
|
||||
return {'success': msg}
|
||||
raise
|
||||
|
||||
self.attempts = self.attempts + 1
|
||||
self.lcp.done = True
|
||||
|
||||
self.set_state_from_lcp()
|
||||
self.publish_grade()
|
||||
|
||||
# success = correct if ALL questions in this problem are correct
|
||||
@@ -802,7 +800,7 @@ class CapaModule(CapaFields, XModule):
|
||||
event_info['correct_map'] = correct_map.get_dict()
|
||||
event_info['success'] = success
|
||||
event_info['attempts'] = self.attempts
|
||||
self.system.track_function('save_problem_check', event_info)
|
||||
self.system.track_function('problem_check', event_info)
|
||||
|
||||
if hasattr(self.system, 'psychometrics_handler'): # update PsychometricsData using callback
|
||||
self.system.psychometrics_handler(self.get_state_for_lcp())
|
||||
@@ -814,12 +812,92 @@ class CapaModule(CapaFields, XModule):
|
||||
'contents': html,
|
||||
}
|
||||
|
||||
def rescore_problem(self):
|
||||
"""
|
||||
Checks whether the existing answers to a problem are correct.
|
||||
|
||||
This is called when the correct answer to a problem has been changed,
|
||||
and the grade should be re-evaluated.
|
||||
|
||||
Returns a dict with one key:
|
||||
{'success' : 'correct' | 'incorrect' | AJAX alert msg string }
|
||||
|
||||
Raises NotFoundError if called on a problem that has not yet been
|
||||
answered, or NotImplementedError if it's a problem that cannot be rescored.
|
||||
|
||||
Returns the error messages for exceptions occurring while performing
|
||||
the rescoring, rather than throwing them.
|
||||
"""
|
||||
event_info = {'state': self.lcp.get_state(), 'problem_id': self.location.url()}
|
||||
|
||||
if not self.lcp.supports_rescoring():
|
||||
event_info['failure'] = 'unsupported'
|
||||
self.system.track_function('problem_rescore_fail', event_info)
|
||||
raise NotImplementedError("Problem's definition does not support rescoring")
|
||||
|
||||
if not self.done:
|
||||
event_info['failure'] = 'unanswered'
|
||||
self.system.track_function('problem_rescore_fail', event_info)
|
||||
raise NotFoundError('Problem must be answered before it can be graded again')
|
||||
|
||||
# get old score, for comparison:
|
||||
orig_score = self.lcp.get_score()
|
||||
event_info['orig_score'] = orig_score['score']
|
||||
event_info['orig_total'] = orig_score['total']
|
||||
|
||||
try:
|
||||
correct_map = self.lcp.rescore_existing_answers()
|
||||
|
||||
except (StudentInputError, ResponseError, LoncapaProblemError) as inst:
|
||||
log.warning("Input error in capa_module:problem_rescore", exc_info=True)
|
||||
event_info['failure'] = 'input_error'
|
||||
self.system.track_function('problem_rescore_fail', event_info)
|
||||
return {'success': u"Error: {0}".format(inst.message)}
|
||||
|
||||
except Exception as err:
|
||||
event_info['failure'] = 'unexpected'
|
||||
self.system.track_function('problem_rescore_fail', event_info)
|
||||
if self.system.DEBUG:
|
||||
msg = u"Error checking problem: {0}".format(err.message)
|
||||
msg += u'\nTraceback:\n' + traceback.format_exc()
|
||||
return {'success': msg}
|
||||
raise
|
||||
|
||||
# rescoring should have no effect on attempts, so don't
|
||||
# need to increment here, or mark done. Just save.
|
||||
self.set_state_from_lcp()
|
||||
|
||||
self.publish_grade()
|
||||
|
||||
new_score = self.lcp.get_score()
|
||||
event_info['new_score'] = new_score['score']
|
||||
event_info['new_total'] = new_score['total']
|
||||
|
||||
# success = correct if ALL questions in this problem are correct
|
||||
success = 'correct'
|
||||
for answer_id in correct_map:
|
||||
if not correct_map.is_correct(answer_id):
|
||||
success = 'incorrect'
|
||||
|
||||
# NOTE: We are logging both full grading and queued-grading submissions. In the latter,
|
||||
# 'success' will always be incorrect
|
||||
event_info['correct_map'] = correct_map.get_dict()
|
||||
event_info['success'] = success
|
||||
event_info['attempts'] = self.attempts
|
||||
self.system.track_function('problem_rescore', event_info)
|
||||
|
||||
# psychometrics should be called on rescoring requests in the same way as check-problem
|
||||
if hasattr(self.system, 'psychometrics_handler'): # update PsychometricsData using callback
|
||||
self.system.psychometrics_handler(self.get_state_for_lcp())
|
||||
|
||||
return {'success': success}
|
||||
|
||||
def save_problem(self, get):
|
||||
'''
|
||||
"""
|
||||
Save the passed in answers.
|
||||
Returns a dict { 'success' : bool, ['error' : error-msg]},
|
||||
with the error key only present if success is False.
|
||||
'''
|
||||
Returns a dict { 'success' : bool, 'msg' : message }
|
||||
The message is informative on success, and an error message on failure.
|
||||
"""
|
||||
event_info = dict()
|
||||
event_info['state'] = self.lcp.get_state()
|
||||
event_info['problem_id'] = self.location.url()
|
||||
|
||||
@@ -19,6 +19,7 @@ from django.http import QueryDict
|
||||
|
||||
from . import test_system
|
||||
from pytz import UTC
|
||||
from capa.correctmap import CorrectMap
|
||||
|
||||
|
||||
class CapaFactory(object):
|
||||
@@ -597,6 +598,85 @@ class CapaModuleTest(unittest.TestCase):
|
||||
# Expect that the problem was NOT reset
|
||||
self.assertTrue('success' in result and not result['success'])
|
||||
|
||||
def test_rescore_problem_correct(self):
|
||||
|
||||
module = CapaFactory.create(attempts=1, done=True)
|
||||
|
||||
# Simulate that all answers are marked correct, no matter
|
||||
# what the input is, by patching LoncapaResponse.evaluate_answers()
|
||||
with patch('capa.responsetypes.LoncapaResponse.evaluate_answers') as mock_evaluate_answers:
|
||||
mock_evaluate_answers.return_value = CorrectMap(CapaFactory.answer_key(), 'correct')
|
||||
result = module.rescore_problem()
|
||||
|
||||
# Expect that the problem is marked correct
|
||||
self.assertEqual(result['success'], 'correct')
|
||||
|
||||
# Expect that we get no HTML
|
||||
self.assertFalse('contents' in result)
|
||||
|
||||
# Expect that the number of attempts is not incremented
|
||||
self.assertEqual(module.attempts, 1)
|
||||
|
||||
def test_rescore_problem_incorrect(self):
|
||||
# make sure it also works when attempts have been reset,
|
||||
# so add this to the test:
|
||||
module = CapaFactory.create(attempts=0, done=True)
|
||||
|
||||
# Simulate that all answers are marked incorrect, no matter
|
||||
# what the input is, by patching LoncapaResponse.evaluate_answers()
|
||||
with patch('capa.responsetypes.LoncapaResponse.evaluate_answers') as mock_evaluate_answers:
|
||||
mock_evaluate_answers.return_value = CorrectMap(CapaFactory.answer_key(), 'incorrect')
|
||||
result = module.rescore_problem()
|
||||
|
||||
# Expect that the problem is marked incorrect
|
||||
self.assertEqual(result['success'], 'incorrect')
|
||||
|
||||
# Expect that the number of attempts is not incremented
|
||||
self.assertEqual(module.attempts, 0)
|
||||
|
||||
def test_rescore_problem_not_done(self):
|
||||
# Simulate that the problem is NOT done
|
||||
module = CapaFactory.create(done=False)
|
||||
|
||||
# Try to rescore the problem, and get exception
|
||||
with self.assertRaises(xmodule.exceptions.NotFoundError):
|
||||
module.rescore_problem()
|
||||
|
||||
def test_rescore_problem_not_supported(self):
|
||||
module = CapaFactory.create(done=True)
|
||||
|
||||
# Try to rescore the problem, and get exception
|
||||
with patch('capa.capa_problem.LoncapaProblem.supports_rescoring') as mock_supports_rescoring:
|
||||
mock_supports_rescoring.return_value = False
|
||||
with self.assertRaises(NotImplementedError):
|
||||
module.rescore_problem()
|
||||
|
||||
def _rescore_problem_error_helper(self, exception_class):
|
||||
"""Helper to allow testing all errors that rescoring might return."""
|
||||
# Create the module
|
||||
module = CapaFactory.create(attempts=1, done=True)
|
||||
|
||||
# Simulate answering a problem that raises the exception
|
||||
with patch('capa.capa_problem.LoncapaProblem.rescore_existing_answers') as mock_rescore:
|
||||
mock_rescore.side_effect = exception_class(u'test error \u03a9')
|
||||
result = module.rescore_problem()
|
||||
|
||||
# Expect an AJAX alert message in 'success'
|
||||
expected_msg = u'Error: test error \u03a9'
|
||||
self.assertEqual(result['success'], expected_msg)
|
||||
|
||||
# Expect that the number of attempts is NOT incremented
|
||||
self.assertEqual(module.attempts, 1)
|
||||
|
||||
def test_rescore_problem_student_input_error(self):
|
||||
self._rescore_problem_error_helper(StudentInputError)
|
||||
|
||||
def test_rescore_problem_problem_error(self):
|
||||
self._rescore_problem_error_helper(LoncapaProblemError)
|
||||
|
||||
def test_rescore_problem_response_error(self):
|
||||
self._rescore_problem_error_helper(ResponseError)
|
||||
|
||||
def test_save_problem(self):
|
||||
module = CapaFactory.create(done=False)
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@ from . import test_system
|
||||
|
||||
class DummySystem(ImportSystem):
|
||||
|
||||
@patch('xmodule.modulestore.xml.OSFS', lambda dir: MemoryFS())
|
||||
@patch('xmodule.modulestore.xml.OSFS', lambda directory: MemoryFS())
|
||||
def __init__(self, load_error_modules):
|
||||
|
||||
xmlstore = XMLModuleStore("data_dir", course_dirs=[], load_error_modules=load_error_modules)
|
||||
@@ -41,7 +41,8 @@ class DummySystem(ImportSystem):
|
||||
)
|
||||
|
||||
def render_template(self, template, context):
|
||||
raise Exception("Shouldn't be called")
|
||||
raise Exception("Shouldn't be called")
|
||||
|
||||
|
||||
class ConditionalFactory(object):
|
||||
"""
|
||||
@@ -93,7 +94,7 @@ class ConditionalFactory(object):
|
||||
# return dict:
|
||||
return {'cond_module': cond_module,
|
||||
'source_module': source_module,
|
||||
'child_module': child_module }
|
||||
'child_module': child_module}
|
||||
|
||||
|
||||
class ConditionalModuleBasicTest(unittest.TestCase):
|
||||
@@ -109,12 +110,11 @@ class ConditionalModuleBasicTest(unittest.TestCase):
|
||||
'''verify that get_icon_class works independent of condition satisfaction'''
|
||||
modules = ConditionalFactory.create(self.test_system)
|
||||
for attempted in ["false", "true"]:
|
||||
for icon_class in [ 'other', 'problem', 'video']:
|
||||
for icon_class in ['other', 'problem', 'video']:
|
||||
modules['source_module'].is_attempted = attempted
|
||||
modules['child_module'].get_icon_class = lambda: icon_class
|
||||
self.assertEqual(modules['cond_module'].get_icon_class(), icon_class)
|
||||
|
||||
|
||||
def test_get_html(self):
|
||||
modules = ConditionalFactory.create(self.test_system)
|
||||
# because test_system returns the repr of the context dict passed to render_template,
|
||||
@@ -224,4 +224,3 @@ class ConditionalModuleXmlTest(unittest.TestCase):
|
||||
print "post-attempt ajax: ", ajax
|
||||
html = ajax['html']
|
||||
self.assertTrue(any(['This is a secret' in item for item in html]))
|
||||
|
||||
|
||||
@@ -4,9 +4,9 @@ WE'RE USING MIGRATIONS!
|
||||
If you make changes to this model, be sure to create an appropriate migration
|
||||
file and check it in at the same time as your model changes. To do that,
|
||||
|
||||
1. Go to the mitx dir
|
||||
1. Go to the edx-platform dir
|
||||
2. ./manage.py schemamigration courseware --auto description_of_your_change
|
||||
3. Add the migration file created in mitx/courseware/migrations/
|
||||
3. Add the migration file created in edx-platform/lms/djangoapps/courseware/migrations/
|
||||
|
||||
|
||||
ASSUMPTIONS: modules have unique IDs, even across different module_types
|
||||
@@ -17,6 +17,7 @@ from django.db import models
|
||||
from django.db.models.signals import post_save
|
||||
from django.dispatch import receiver
|
||||
|
||||
|
||||
class StudentModule(models.Model):
|
||||
"""
|
||||
Keeps student state for a particular module in a particular course.
|
||||
|
||||
@@ -121,7 +121,7 @@ def toc_for_course(user, request, course, active_chapter, active_section, model_
|
||||
|
||||
|
||||
def get_module(user, request, location, model_data_cache, course_id,
|
||||
position=None, not_found_ok = False, wrap_xmodule_display=True,
|
||||
position=None, not_found_ok=False, wrap_xmodule_display=True,
|
||||
grade_bucket_type=None, depth=0):
|
||||
"""
|
||||
Get an instance of the xmodule class identified by location,
|
||||
@@ -161,16 +161,49 @@ def get_module(user, request, location, model_data_cache, course_id,
|
||||
return None
|
||||
|
||||
|
||||
def get_module_for_descriptor(user, request, descriptor, model_data_cache, course_id,
|
||||
position=None, wrap_xmodule_display=True, grade_bucket_type=None):
|
||||
"""
|
||||
Actually implement get_module. See docstring there for details.
|
||||
def get_xqueue_callback_url_prefix(request):
|
||||
"""
|
||||
Calculates default prefix based on request, but allows override via settings
|
||||
|
||||
This is separated from get_module_for_descriptor so that it can be called
|
||||
by the LMS before submitting background tasks to run. The xqueue callbacks
|
||||
should go back to the LMS, not to the worker.
|
||||
"""
|
||||
prefix = '{proto}://{host}'.format(
|
||||
proto=request.META.get('HTTP_X_FORWARDED_PROTO', 'https' if request.is_secure() else 'http'),
|
||||
host=request.get_host()
|
||||
)
|
||||
return settings.XQUEUE_INTERFACE.get('callback_url', prefix)
|
||||
|
||||
|
||||
def get_module_for_descriptor(user, request, descriptor, model_data_cache, course_id,
|
||||
position=None, wrap_xmodule_display=True, grade_bucket_type=None):
|
||||
"""
|
||||
Implements get_module, extracting out the request-specific functionality.
|
||||
|
||||
See get_module() docstring for further details.
|
||||
"""
|
||||
# allow course staff to masquerade as student
|
||||
if has_access(user, descriptor, 'staff', course_id):
|
||||
setup_masquerade(request, True)
|
||||
|
||||
track_function = make_track_function(request)
|
||||
xqueue_callback_url_prefix = get_xqueue_callback_url_prefix(request)
|
||||
|
||||
return get_module_for_descriptor_internal(user, descriptor, model_data_cache, course_id,
|
||||
track_function, xqueue_callback_url_prefix,
|
||||
position, wrap_xmodule_display, grade_bucket_type)
|
||||
|
||||
|
||||
def get_module_for_descriptor_internal(user, descriptor, model_data_cache, course_id,
|
||||
track_function, xqueue_callback_url_prefix,
|
||||
position=None, wrap_xmodule_display=True, grade_bucket_type=None):
|
||||
"""
|
||||
Actually implement get_module, without requiring a request.
|
||||
|
||||
See get_module() docstring for further details.
|
||||
"""
|
||||
|
||||
# Short circuit--if the user shouldn't have access, bail without doing any work
|
||||
if not has_access(user, descriptor, 'load', course_id):
|
||||
return None
|
||||
@@ -186,19 +219,13 @@ def get_module_for_descriptor(user, request, descriptor, model_data_cache, cours
|
||||
|
||||
def make_xqueue_callback(dispatch='score_update'):
|
||||
# Fully qualified callback URL for external queueing system
|
||||
xqueue_callback_url = '{proto}://{host}'.format(
|
||||
host=request.get_host(),
|
||||
proto=request.META.get('HTTP_X_FORWARDED_PROTO', 'https' if request.is_secure() else 'http')
|
||||
)
|
||||
xqueue_callback_url = settings.XQUEUE_INTERFACE.get('callback_url',xqueue_callback_url) # allow override
|
||||
|
||||
xqueue_callback_url += reverse('xqueue_callback',
|
||||
kwargs=dict(course_id=course_id,
|
||||
userid=str(user.id),
|
||||
id=descriptor.location.url(),
|
||||
dispatch=dispatch),
|
||||
)
|
||||
return xqueue_callback_url
|
||||
relative_xqueue_callback_url = reverse('xqueue_callback',
|
||||
kwargs=dict(course_id=course_id,
|
||||
userid=str(user.id),
|
||||
id=descriptor.location.url(),
|
||||
dispatch=dispatch),
|
||||
)
|
||||
return xqueue_callback_url_prefix + relative_xqueue_callback_url
|
||||
|
||||
# Default queuename is course-specific and is derived from the course that
|
||||
# contains the current module.
|
||||
@@ -211,20 +238,20 @@ def get_module_for_descriptor(user, request, descriptor, model_data_cache, cours
|
||||
'waittime': settings.XQUEUE_WAITTIME_BETWEEN_REQUESTS
|
||||
}
|
||||
|
||||
#This is a hacky way to pass settings to the combined open ended xmodule
|
||||
#It needs an S3 interface to upload images to S3
|
||||
#It needs the open ended grading interface in order to get peer grading to be done
|
||||
#this first checks to see if the descriptor is the correct one, and only sends settings if it is
|
||||
# This is a hacky way to pass settings to the combined open ended xmodule
|
||||
# It needs an S3 interface to upload images to S3
|
||||
# It needs the open ended grading interface in order to get peer grading to be done
|
||||
# this first checks to see if the descriptor is the correct one, and only sends settings if it is
|
||||
|
||||
#Get descriptor metadata fields indicating needs for various settings
|
||||
# Get descriptor metadata fields indicating needs for various settings
|
||||
needs_open_ended_interface = getattr(descriptor, "needs_open_ended_interface", False)
|
||||
needs_s3_interface = getattr(descriptor, "needs_s3_interface", False)
|
||||
|
||||
#Initialize interfaces to None
|
||||
# Initialize interfaces to None
|
||||
open_ended_grading_interface = None
|
||||
s3_interface = None
|
||||
|
||||
#Create interfaces if needed
|
||||
# Create interfaces if needed
|
||||
if needs_open_ended_interface:
|
||||
open_ended_grading_interface = settings.OPEN_ENDED_GRADING_INTERFACE
|
||||
open_ended_grading_interface['mock_peer_grading'] = settings.MOCK_PEER_GRADING
|
||||
@@ -238,10 +265,15 @@ def get_module_for_descriptor(user, request, descriptor, model_data_cache, cours
|
||||
|
||||
def inner_get_module(descriptor):
|
||||
"""
|
||||
Delegate to get_module. It does an access check, so may return None
|
||||
Delegate to get_module_for_descriptor_internal() with all values except `descriptor` set.
|
||||
|
||||
Because it does an access check, it may return None.
|
||||
"""
|
||||
return get_module_for_descriptor(user, request, descriptor,
|
||||
model_data_cache, course_id, position)
|
||||
# TODO: fix this so that make_xqueue_callback uses the descriptor passed into
|
||||
# inner_get_module, not the parent's callback. Add it as an argument....
|
||||
return get_module_for_descriptor_internal(user, descriptor, model_data_cache, course_id,
|
||||
track_function, make_xqueue_callback,
|
||||
position, wrap_xmodule_display, grade_bucket_type)
|
||||
|
||||
def xblock_model_data(descriptor):
|
||||
return DbModel(
|
||||
@@ -291,7 +323,7 @@ def get_module_for_descriptor(user, request, descriptor, model_data_cache, cours
|
||||
# TODO (cpennington): When modules are shared between courses, the static
|
||||
# prefix is going to have to be specific to the module, not the directory
|
||||
# that the xml was loaded from
|
||||
system = ModuleSystem(track_function=make_track_function(request),
|
||||
system = ModuleSystem(track_function=track_function,
|
||||
render_template=render_to_string,
|
||||
ajax_url=ajax_url,
|
||||
xqueue=xqueue,
|
||||
|
||||
@@ -11,21 +11,22 @@ from courseware.tests.tests import TEST_DATA_MONGO_MODULESTORE
|
||||
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
|
||||
from xmodule.modulestore.tests.factories import CourseFactory
|
||||
|
||||
|
||||
class ProgressTestCase(TestCase):
|
||||
|
||||
def setUp(self):
|
||||
def setUp(self):
|
||||
|
||||
self.mockuser1 = MagicMock()
|
||||
self.mockuser0 = MagicMock()
|
||||
self.course = MagicMock()
|
||||
self.mockuser1.is_authenticated.return_value = True
|
||||
self.mockuser0.is_authenticated.return_value = False
|
||||
self.course.id = 'edX/full/6.002_Spring_2012'
|
||||
self.tab = {'name': 'same'}
|
||||
self.active_page1 = 'progress'
|
||||
self.active_page0 = 'stagnation'
|
||||
self.mockuser1 = MagicMock()
|
||||
self.mockuser0 = MagicMock()
|
||||
self.course = MagicMock()
|
||||
self.mockuser1.is_authenticated.return_value = True
|
||||
self.mockuser0.is_authenticated.return_value = False
|
||||
self.course.id = 'edX/full/6.002_Spring_2012'
|
||||
self.tab = {'name': 'same'}
|
||||
self.active_page1 = 'progress'
|
||||
self.active_page0 = 'stagnation'
|
||||
|
||||
def test_progress(self):
|
||||
def test_progress(self):
|
||||
|
||||
self.assertEqual(tabs._progress(self.tab, self.mockuser0, self.course,
|
||||
self.active_page0), [])
|
||||
@@ -34,8 +35,8 @@ class ProgressTestCase(TestCase):
|
||||
self.active_page1)[0].name, 'same')
|
||||
|
||||
self.assertEqual(tabs._progress(self.tab, self.mockuser1, self.course,
|
||||
self.active_page1)[0].link,
|
||||
reverse('progress', args = [self.course.id]))
|
||||
self.active_page1)[0].link,
|
||||
reverse('progress', args=[self.course.id]))
|
||||
|
||||
self.assertEqual(tabs._progress(self.tab, self.mockuser1, self.course,
|
||||
self.active_page0)[0].is_active, False)
|
||||
@@ -63,15 +64,15 @@ class WikiTestCase(TestCase):
|
||||
'same')
|
||||
|
||||
self.assertEqual(tabs._wiki(self.tab, self.user,
|
||||
self.course, self.active_page1)[0].link,
|
||||
self.course, self.active_page1)[0].link,
|
||||
reverse('course_wiki', args=[self.course.id]))
|
||||
|
||||
self.assertEqual(tabs._wiki(self.tab, self.user,
|
||||
self.course, self.active_page1)[0].is_active,
|
||||
self.course, self.active_page1)[0].is_active,
|
||||
True)
|
||||
|
||||
self.assertEqual(tabs._wiki(self.tab, self.user,
|
||||
self.course, self.active_page0)[0].is_active,
|
||||
self.course, self.active_page0)[0].is_active,
|
||||
False)
|
||||
|
||||
@override_settings(WIKI_ENABLED=False)
|
||||
@@ -129,14 +130,13 @@ class StaticTabTestCase(TestCase):
|
||||
|
||||
self.assertEqual(tabs._static_tab(self.tabby, self.user,
|
||||
self.course, self.active_page1)[0].link,
|
||||
reverse('static_tab', args = [self.course.id,
|
||||
self.tabby['url_slug']]))
|
||||
reverse('static_tab', args=[self.course.id,
|
||||
self.tabby['url_slug']]))
|
||||
|
||||
self.assertEqual(tabs._static_tab(self.tabby, self.user,
|
||||
self.course, self.active_page1)[0].is_active,
|
||||
True)
|
||||
|
||||
|
||||
self.assertEqual(tabs._static_tab(self.tabby, self.user,
|
||||
self.course, self.active_page0)[0].is_active,
|
||||
False)
|
||||
@@ -183,7 +183,7 @@ class TextbooksTestCase(TestCase):
|
||||
|
||||
self.assertEqual(tabs._textbooks(self.tab, self.mockuser1,
|
||||
self.course, self.active_page1)[1].name,
|
||||
'Topology')
|
||||
'Topology')
|
||||
|
||||
self.assertEqual(tabs._textbooks(self.tab, self.mockuser1,
|
||||
self.course, self.active_page1)[1].link,
|
||||
@@ -206,6 +206,7 @@ class TextbooksTestCase(TestCase):
|
||||
self.assertEqual(tabs._textbooks(self.tab, self.mockuser0,
|
||||
self.course, self.active_pageX), [])
|
||||
|
||||
|
||||
class KeyCheckerTestCase(TestCase):
|
||||
|
||||
def setUp(self):
|
||||
@@ -223,39 +224,36 @@ class KeyCheckerTestCase(TestCase):
|
||||
|
||||
class NullValidatorTestCase(TestCase):
|
||||
|
||||
def setUp(self):
|
||||
def setUp(self):
|
||||
|
||||
self.d = {}
|
||||
self.dummy = {}
|
||||
|
||||
def test_null_validator(self):
|
||||
|
||||
self.assertIsNone(tabs.null_validator(self.d))
|
||||
def test_null_validator(self):
|
||||
self.assertIsNone(tabs.null_validator(self.dummy))
|
||||
|
||||
|
||||
class ValidateTabsTestCase(TestCase):
|
||||
|
||||
def setUp(self):
|
||||
|
||||
self.courses = [MagicMock() for i in range(0,5)]
|
||||
self.courses = [MagicMock() for i in range(0, 5)]
|
||||
|
||||
self.courses[0].tabs = None
|
||||
|
||||
self.courses[1].tabs = [{'type':'courseware'}, {'type': 'fax'}]
|
||||
self.courses[1].tabs = [{'type': 'courseware'}, {'type': 'fax'}]
|
||||
|
||||
self.courses[2].tabs = [{'type':'shadow'}, {'type': 'course_info'}]
|
||||
self.courses[2].tabs = [{'type': 'shadow'}, {'type': 'course_info'}]
|
||||
|
||||
self.courses[3].tabs = [{'type':'courseware'},{'type':'course_info', 'name': 'alice'},
|
||||
{'type': 'wiki', 'name':'alice'}, {'type':'discussion', 'name': 'alice'},
|
||||
{'type':'external_link', 'name': 'alice', 'link':'blink'},
|
||||
{'type':'textbooks'}, {'type':'progress', 'name': 'alice'},
|
||||
{'type':'static_tab', 'name':'alice', 'url_slug':'schlug'},
|
||||
{'type': 'staff_grading'}]
|
||||
|
||||
self.courses[4].tabs = [{'type':'courseware'},{'type': 'course_info'}, {'type': 'flying'}]
|
||||
self.courses[3].tabs = [{'type': 'courseware'}, {'type': 'course_info', 'name': 'alice'},
|
||||
{'type': 'wiki', 'name': 'alice'}, {'type': 'discussion', 'name': 'alice'},
|
||||
{'type': 'external_link', 'name': 'alice', 'link': 'blink'},
|
||||
{'type': 'textbooks'}, {'type': 'progress', 'name': 'alice'},
|
||||
{'type': 'static_tab', 'name': 'alice', 'url_slug': 'schlug'},
|
||||
{'type': 'staff_grading'}]
|
||||
|
||||
self.courses[4].tabs = [{'type': 'courseware'}, {'type': 'course_info'}, {'type': 'flying'}]
|
||||
|
||||
def test_validate_tabs(self):
|
||||
|
||||
self.assertIsNone(tabs.validate_tabs(self.courses[0]))
|
||||
self.assertRaises(tabs.InvalidTabsException, tabs.validate_tabs, self.courses[1])
|
||||
self.assertRaises(tabs.InvalidTabsException, tabs.validate_tabs, self.courses[2])
|
||||
@@ -268,15 +266,15 @@ class DiscussionLinkTestCase(ModuleStoreTestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.tabs_with_discussion = [
|
||||
{'type':'courseware'},
|
||||
{'type':'course_info'},
|
||||
{'type':'discussion'},
|
||||
{'type':'textbooks'},
|
||||
{'type': 'courseware'},
|
||||
{'type': 'course_info'},
|
||||
{'type': 'discussion'},
|
||||
{'type': 'textbooks'},
|
||||
]
|
||||
self.tabs_without_discussion = [
|
||||
{'type':'courseware'},
|
||||
{'type':'course_info'},
|
||||
{'type':'textbooks'},
|
||||
{'type': 'courseware'},
|
||||
{'type': 'course_info'},
|
||||
{'type': 'textbooks'},
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
|
||||
@@ -10,7 +10,6 @@ import os
|
||||
import re
|
||||
import requests
|
||||
from requests.status_codes import codes
|
||||
import urllib
|
||||
from collections import OrderedDict
|
||||
|
||||
from StringIO import StringIO
|
||||
@@ -20,8 +19,10 @@ from django.contrib.auth.models import User, Group
|
||||
from django.http import HttpResponse
|
||||
from django_future.csrf import ensure_csrf_cookie
|
||||
from django.views.decorators.cache import cache_control
|
||||
from mitxmako.shortcuts import render_to_response
|
||||
from django.core.urlresolvers import reverse
|
||||
import xmodule.graders as xmgraders
|
||||
from xmodule.modulestore.django import modulestore
|
||||
from xmodule.modulestore.exceptions import ItemNotFoundError
|
||||
|
||||
from courseware import grades
|
||||
from courseware.access import (has_access, get_access_group_name,
|
||||
@@ -33,13 +34,18 @@ from django_comment_common.models import (Role,
|
||||
FORUM_ROLE_MODERATOR,
|
||||
FORUM_ROLE_COMMUNITY_TA)
|
||||
from django_comment_client.utils import has_forum_access
|
||||
from instructor.offline_gradecalc import student_grades, offline_grades_available
|
||||
from instructor_task.api import (get_running_instructor_tasks,
|
||||
get_instructor_task_history,
|
||||
submit_rescore_problem_for_all_students,
|
||||
submit_rescore_problem_for_student,
|
||||
submit_reset_problem_attempts_for_all_students)
|
||||
from instructor_task.views import get_task_completion_info
|
||||
from mitxmako.shortcuts import render_to_response
|
||||
from psychometrics import psychoanalyze
|
||||
from student.models import CourseEnrollment, CourseEnrollmentAllowed
|
||||
from xmodule.modulestore.django import modulestore
|
||||
import xmodule.graders as xmgraders
|
||||
import track.views
|
||||
|
||||
from .offline_gradecalc import student_grades, offline_grades_available
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
@@ -68,6 +74,7 @@ def instructor_dashboard(request, course_id):
|
||||
msg = ''
|
||||
problems = []
|
||||
plots = []
|
||||
datatable = {}
|
||||
|
||||
# the instructor dashboard page is modal: grades, psychometrics, admin
|
||||
# keep that state in request.session (defaults to grades mode)
|
||||
@@ -78,26 +85,29 @@ def instructor_dashboard(request, course_id):
|
||||
idash_mode = request.session.get('idash_mode', 'Grades')
|
||||
|
||||
# assemble some course statistics for output to instructor
|
||||
datatable = {'header': ['Statistic', 'Value'],
|
||||
'title': 'Course Statistics At A Glance',
|
||||
}
|
||||
data = [['# Enrolled', CourseEnrollment.objects.filter(course_id=course_id).count()]]
|
||||
data += compute_course_stats(course).items()
|
||||
if request.user.is_staff:
|
||||
for field in course.fields:
|
||||
if getattr(field.scope, 'user', False):
|
||||
continue
|
||||
|
||||
data.append([field.name, json.dumps(field.read_json(course))])
|
||||
for namespace in course.namespaces:
|
||||
for field in getattr(course, namespace).fields:
|
||||
def get_course_stats_table():
|
||||
datatable = {'header': ['Statistic', 'Value'],
|
||||
'title': 'Course Statistics At A Glance',
|
||||
}
|
||||
data = [['# Enrolled', CourseEnrollment.objects.filter(course_id=course_id).count()]]
|
||||
data += compute_course_stats(course).items()
|
||||
if request.user.is_staff:
|
||||
for field in course.fields:
|
||||
if getattr(field.scope, 'user', False):
|
||||
continue
|
||||
|
||||
data.append(["{}.{}".format(namespace, field.name), json.dumps(field.read_json(course))])
|
||||
datatable['data'] = data
|
||||
data.append([field.name, json.dumps(field.read_json(course))])
|
||||
for namespace in course.namespaces:
|
||||
for field in getattr(course, namespace).fields:
|
||||
if getattr(field.scope, 'user', False):
|
||||
continue
|
||||
|
||||
data.append(["{}.{}".format(namespace, field.name), json.dumps(field.read_json(course))])
|
||||
datatable['data'] = data
|
||||
return datatable
|
||||
|
||||
def return_csv(fn, datatable, fp=None):
|
||||
"""Outputs a CSV file from the contents of a datatable."""
|
||||
if fp is None:
|
||||
response = HttpResponse(mimetype='text/csv')
|
||||
response['Content-Disposition'] = 'attachment; filename={0}'.format(fn)
|
||||
@@ -111,12 +121,15 @@ def instructor_dashboard(request, course_id):
|
||||
return response
|
||||
|
||||
def get_staff_group(course):
|
||||
"""Get or create the staff access group"""
|
||||
return get_group(course, 'staff')
|
||||
|
||||
def get_instructor_group(course):
|
||||
"""Get or create the instructor access group"""
|
||||
return get_group(course, 'instructor')
|
||||
|
||||
def get_group(course, groupname):
|
||||
"""Get or create an access group"""
|
||||
grpname = get_access_group_name(course, groupname)
|
||||
try:
|
||||
group = Group.objects.get(name=grpname)
|
||||
@@ -136,6 +149,39 @@ def instructor_dashboard(request, course_id):
|
||||
(group, _) = Group.objects.get_or_create(name=name)
|
||||
return group
|
||||
|
||||
def get_module_url(urlname):
|
||||
"""
|
||||
Construct full URL for a module from its urlname.
|
||||
|
||||
Form is either urlname or modulename/urlname. If no modulename
|
||||
is provided, "problem" is assumed.
|
||||
"""
|
||||
# tolerate an XML suffix in the urlname
|
||||
if urlname[-4:] == ".xml":
|
||||
urlname = urlname[:-4]
|
||||
|
||||
# implement default
|
||||
if '/' not in urlname:
|
||||
urlname = "problem/" + urlname
|
||||
|
||||
# complete the url using information about the current course:
|
||||
(org, course_name, _) = course_id.split("/")
|
||||
return "i4x://" + org + "/" + course_name + "/" + urlname
|
||||
|
||||
def get_student_from_identifier(unique_student_identifier):
|
||||
"""Gets a student object using either an email address or username"""
|
||||
msg = ""
|
||||
try:
|
||||
if "@" in unique_student_identifier:
|
||||
student = User.objects.get(email=unique_student_identifier)
|
||||
else:
|
||||
student = User.objects.get(username=unique_student_identifier)
|
||||
msg += "Found a single student. "
|
||||
except User.DoesNotExist:
|
||||
student = None
|
||||
msg += "<font color='red'>Couldn't find student with that email or username. </font>"
|
||||
return msg, student
|
||||
|
||||
# process actions from form POST
|
||||
action = request.POST.get('action', '')
|
||||
use_offline = request.POST.get('use_offline_grades', False)
|
||||
@@ -205,88 +251,138 @@ def instructor_dashboard(request, course_id):
|
||||
track.views.server_track(request, action, {}, page='idashboard')
|
||||
msg += dump_grading_context(course)
|
||||
|
||||
elif "Reset student's attempts" in action or "Delete student state for problem" in action:
|
||||
elif "Rescore ALL students' problem submissions" in action:
|
||||
problem_urlname = request.POST.get('problem_for_all_students', '')
|
||||
problem_url = get_module_url(problem_urlname)
|
||||
try:
|
||||
instructor_task = submit_rescore_problem_for_all_students(request, course_id, problem_url)
|
||||
if instructor_task is None:
|
||||
msg += '<font color="red">Failed to create a background task for rescoring "{0}".</font>'.format(problem_url)
|
||||
else:
|
||||
track_msg = 'rescore problem {problem} for all students in {course}'.format(problem=problem_url, course=course_id)
|
||||
track.views.server_track(request, track_msg, {}, page='idashboard')
|
||||
except ItemNotFoundError as e:
|
||||
msg += '<font color="red">Failed to create a background task for rescoring "{0}": problem not found.</font>'.format(problem_url)
|
||||
except Exception as e:
|
||||
log.error("Encountered exception from rescore: {0}".format(e))
|
||||
msg += '<font color="red">Failed to create a background task for rescoring "{0}": {1}.</font>'.format(problem_url, e.message)
|
||||
|
||||
elif "Reset ALL students' attempts" in action:
|
||||
problem_urlname = request.POST.get('problem_for_all_students', '')
|
||||
problem_url = get_module_url(problem_urlname)
|
||||
try:
|
||||
instructor_task = submit_reset_problem_attempts_for_all_students(request, course_id, problem_url)
|
||||
if instructor_task is None:
|
||||
msg += '<font color="red">Failed to create a background task for resetting "{0}".</font>'.format(problem_url)
|
||||
else:
|
||||
track_msg = 'reset problem {problem} for all students in {course}'.format(problem=problem_url, course=course_id)
|
||||
track.views.server_track(request, track_msg, {}, page='idashboard')
|
||||
except ItemNotFoundError as e:
|
||||
log.error('Failure to reset: unknown problem "{0}"'.format(e))
|
||||
msg += '<font color="red">Failed to create a background task for resetting "{0}": problem not found.</font>'.format(problem_url)
|
||||
except Exception as e:
|
||||
log.error("Encountered exception from reset: {0}".format(e))
|
||||
msg += '<font color="red">Failed to create a background task for resetting "{0}": {1}.</font>'.format(problem_url, e.message)
|
||||
|
||||
elif "Show Background Task History for Student" in action:
|
||||
# put this before the non-student case, since the use of "in" will cause this to be missed
|
||||
unique_student_identifier = request.POST.get('unique_student_identifier', '')
|
||||
message, student = get_student_from_identifier(unique_student_identifier)
|
||||
if student is None:
|
||||
msg += message
|
||||
else:
|
||||
problem_urlname = request.POST.get('problem_for_student', '')
|
||||
problem_url = get_module_url(problem_urlname)
|
||||
message, datatable = get_background_task_table(course_id, problem_url, student)
|
||||
msg += message
|
||||
|
||||
elif "Show Background Task History" in action:
|
||||
problem_urlname = request.POST.get('problem_for_all_students', '')
|
||||
problem_url = get_module_url(problem_urlname)
|
||||
message, datatable = get_background_task_table(course_id, problem_url)
|
||||
msg += message
|
||||
|
||||
elif ("Reset student's attempts" in action or
|
||||
"Delete student state for module" in action or
|
||||
"Rescore student's problem submission" in action):
|
||||
# get the form data
|
||||
unique_student_identifier = request.POST.get('unique_student_identifier', '')
|
||||
problem_to_reset = request.POST.get('problem_to_reset', '')
|
||||
|
||||
if problem_to_reset[-4:] == ".xml":
|
||||
problem_to_reset = problem_to_reset[:-4]
|
||||
|
||||
problem_urlname = request.POST.get('problem_for_student', '')
|
||||
module_state_key = get_module_url(problem_urlname)
|
||||
# try to uniquely id student by email address or username
|
||||
try:
|
||||
if "@" in unique_student_identifier:
|
||||
student_to_reset = User.objects.get(email=unique_student_identifier)
|
||||
else:
|
||||
student_to_reset = User.objects.get(username=unique_student_identifier)
|
||||
msg += "Found a single student to reset. "
|
||||
except:
|
||||
student_to_reset = None
|
||||
msg += "<font color='red'>Couldn't find student with that email or username. </font>"
|
||||
|
||||
if student_to_reset is not None:
|
||||
message, student = get_student_from_identifier(unique_student_identifier)
|
||||
msg += message
|
||||
student_module = None
|
||||
if student is not None:
|
||||
# find the module in question
|
||||
if '/' not in problem_to_reset: # allow state of modules other than problem to be reset
|
||||
problem_to_reset = "problem/" + problem_to_reset # but problem is the default
|
||||
try:
|
||||
(org, course_name, _) = course_id.split("/")
|
||||
module_state_key = "i4x://" + org + "/" + course_name + "/" + problem_to_reset
|
||||
module_to_reset = StudentModule.objects.get(student_id=student_to_reset.id,
|
||||
course_id=course_id,
|
||||
module_state_key=module_state_key)
|
||||
msg += "Found module to reset. "
|
||||
except Exception:
|
||||
student_module = StudentModule.objects.get(student_id=student.id,
|
||||
course_id=course_id,
|
||||
module_state_key=module_state_key)
|
||||
msg += "Found module. "
|
||||
except StudentModule.DoesNotExist:
|
||||
msg += "<font color='red'>Couldn't find module with that urlname. </font>"
|
||||
|
||||
if "Delete student state for problem" in action:
|
||||
# delete the state
|
||||
try:
|
||||
module_to_reset.delete()
|
||||
msg += "<font color='red'>Deleted student module state for %s!</font>" % module_state_key
|
||||
except:
|
||||
msg += "Failed to delete module state for %s/%s" % (unique_student_identifier, problem_to_reset)
|
||||
else:
|
||||
# modify the problem's state
|
||||
try:
|
||||
# load the state json
|
||||
problem_state = json.loads(module_to_reset.state)
|
||||
old_number_of_attempts = problem_state["attempts"]
|
||||
problem_state["attempts"] = 0
|
||||
if student_module is not None:
|
||||
if "Delete student state for module" in action:
|
||||
# delete the state
|
||||
try:
|
||||
student_module.delete()
|
||||
msg += "<font color='red'>Deleted student module state for %s!</font>" % module_state_key
|
||||
track_format = 'delete student module state for problem {problem} for student {student} in {course}'
|
||||
track_msg = track_format.format(problem=problem_url, student=unique_student_identifier, course=course_id)
|
||||
track.views.server_track(request, track_msg, {}, page='idashboard')
|
||||
except:
|
||||
msg += "Failed to delete module state for %s/%s" % (unique_student_identifier, problem_urlname)
|
||||
elif "Reset student's attempts" in action:
|
||||
# modify the problem's state
|
||||
try:
|
||||
# load the state json
|
||||
problem_state = json.loads(student_module.state)
|
||||
old_number_of_attempts = problem_state["attempts"]
|
||||
problem_state["attempts"] = 0
|
||||
|
||||
# save
|
||||
module_to_reset.state = json.dumps(problem_state)
|
||||
module_to_reset.save()
|
||||
track.views.server_track(request,
|
||||
'{instructor} reset attempts from {old_attempts} to 0 for {student} on problem {problem} in {course}'.format(
|
||||
old_attempts=old_number_of_attempts,
|
||||
student=student_to_reset,
|
||||
problem=module_to_reset.module_state_key,
|
||||
instructor=request.user,
|
||||
course=course_id),
|
||||
{},
|
||||
page='idashboard')
|
||||
msg += "<font color='green'>Module state successfully reset!</font>"
|
||||
except:
|
||||
msg += "<font color='red'>Couldn't reset module state. </font>"
|
||||
# save
|
||||
student_module.state = json.dumps(problem_state)
|
||||
student_module.save()
|
||||
track_format = '{instructor} reset attempts from {old_attempts} to 0 for {student} on problem {problem} in {course}'
|
||||
track_msg = track_format.format(old_attempts=old_number_of_attempts,
|
||||
student=student,
|
||||
problem=student_module.module_state_key,
|
||||
instructor=request.user,
|
||||
course=course_id)
|
||||
track.views.server_track(request, track_msg, {}, page='idashboard')
|
||||
msg += "<font color='green'>Module state successfully reset!</font>"
|
||||
except:
|
||||
msg += "<font color='red'>Couldn't reset module state. </font>"
|
||||
else:
|
||||
# "Rescore student's problem submission" case
|
||||
try:
|
||||
instructor_task = submit_rescore_problem_for_student(request, course_id, module_state_key, student)
|
||||
if instructor_task is None:
|
||||
msg += '<font color="red">Failed to create a background task for rescoring "{0}" for student {1}.</font>'.format(module_state_key, unique_student_identifier)
|
||||
else:
|
||||
track_msg = 'rescore problem {problem} for student {student} in {course}'.format(problem=module_state_key, student=unique_student_identifier, course=course_id)
|
||||
track.views.server_track(request, track_msg, {}, page='idashboard')
|
||||
except Exception as e:
|
||||
log.exception("Encountered exception from rescore: {0}")
|
||||
msg += '<font color="red">Failed to create a background task for rescoring "{0}": {1}.</font>'.format(module_state_key, e.message)
|
||||
|
||||
elif "Get link to student's progress page" in action:
|
||||
unique_student_identifier = request.POST.get('unique_student_identifier', '')
|
||||
try:
|
||||
if "@" in unique_student_identifier:
|
||||
student_to_reset = User.objects.get(email=unique_student_identifier)
|
||||
else:
|
||||
student_to_reset = User.objects.get(username=unique_student_identifier)
|
||||
progress_url = reverse('student_progress', kwargs={'course_id': course_id, 'student_id': student_to_reset.id})
|
||||
# try to uniquely id student by email address or username
|
||||
message, student = get_student_from_identifier(unique_student_identifier)
|
||||
msg += message
|
||||
if student is not None:
|
||||
progress_url = reverse('student_progress', kwargs={'course_id': course_id, 'student_id': student.id})
|
||||
track.views.server_track(request,
|
||||
'{instructor} requested progress page for {student} in {course}'.format(
|
||||
student=student_to_reset,
|
||||
student=student,
|
||||
instructor=request.user,
|
||||
course=course_id),
|
||||
{},
|
||||
page='idashboard')
|
||||
msg += "<a href='{0}' target='_blank'> Progress page for username: {1} with email address: {2}</a>.".format(progress_url, student_to_reset.username, student_to_reset.email)
|
||||
except:
|
||||
msg += "<font color='red'>Couldn't find student with that username. </font>"
|
||||
msg += "<a href='{0}' target='_blank'> Progress page for username: {1} with email address: {2}</a>.".format(progress_url, student.username, student.email)
|
||||
|
||||
#----------------------------------------
|
||||
# export grades to remote gradebook
|
||||
@@ -427,7 +523,7 @@ def instructor_dashboard(request, course_id):
|
||||
if problem_to_dump[-4:] == ".xml":
|
||||
problem_to_dump = problem_to_dump[:-4]
|
||||
try:
|
||||
(org, course_name, run) = course_id.split("/")
|
||||
(org, course_name, _) = course_id.split("/")
|
||||
module_state_key = "i4x://" + org + "/" + course_name + "/problem/" + problem_to_dump
|
||||
smdat = StudentModule.objects.filter(course_id=course_id,
|
||||
module_state_key=module_state_key)
|
||||
@@ -625,6 +721,16 @@ def instructor_dashboard(request, course_id):
|
||||
if use_offline:
|
||||
msg += "<br/><font color='orange'>Grades from %s</font>" % offline_grades_available(course_id)
|
||||
|
||||
# generate list of pending background tasks
|
||||
if settings.MITX_FEATURES.get('ENABLE_INSTRUCTOR_BACKGROUND_TASKS'):
|
||||
instructor_tasks = get_running_instructor_tasks(course_id)
|
||||
else:
|
||||
instructor_tasks = None
|
||||
|
||||
# display course stats only if there is no other table to display:
|
||||
course_stats = None
|
||||
if not datatable:
|
||||
course_stats = get_course_stats_table()
|
||||
#----------------------------------------
|
||||
# context for rendering
|
||||
|
||||
@@ -634,12 +740,13 @@ def instructor_dashboard(request, course_id):
|
||||
'instructor_access': instructor_access,
|
||||
'forum_admin_access': forum_admin_access,
|
||||
'datatable': datatable,
|
||||
'course_stats': course_stats,
|
||||
'msg': msg,
|
||||
'modeflag': {idash_mode: 'selectedmode'},
|
||||
'problems': problems, # psychometrics
|
||||
'plots': plots, # psychometrics
|
||||
'course_errors': modulestore().get_item_errors(course.location),
|
||||
|
||||
'instructor_tasks': instructor_tasks,
|
||||
'djangopid': os.getpid(),
|
||||
'mitx_version': getattr(settings, 'MITX_VERSION_STRING', ''),
|
||||
'offline_grade_log': offline_grades_available(course_id),
|
||||
@@ -1030,7 +1137,7 @@ def _do_unenroll_students(course_id, students):
|
||||
"""Do the actual work of un-enrolling multiple students, presented as a string
|
||||
of emails separated by commas or returns"""
|
||||
|
||||
old_students, old_students_lc = get_and_clean_student_list(students)
|
||||
old_students, _ = get_and_clean_student_list(students)
|
||||
status = dict([x, 'unprocessed'] for x in old_students)
|
||||
|
||||
for student in old_students:
|
||||
@@ -1054,7 +1161,7 @@ def _do_unenroll_students(course_id, students):
|
||||
try:
|
||||
ce[0].delete()
|
||||
status[student] = "un-enrolled"
|
||||
except Exception as err:
|
||||
except Exception:
|
||||
if not isok:
|
||||
status[student] = "Error! Failed to un-enroll"
|
||||
|
||||
@@ -1113,11 +1220,11 @@ def get_answers_distribution(request, course_id):
|
||||
|
||||
|
||||
def compute_course_stats(course):
|
||||
'''
|
||||
"""
|
||||
Compute course statistics, including number of problems, videos, html.
|
||||
|
||||
course is a CourseDescriptor from the xmodule system.
|
||||
'''
|
||||
"""
|
||||
|
||||
# walk the course by using get_children() until we come to the leaves; count the
|
||||
# number of different leaf types
|
||||
@@ -1137,10 +1244,10 @@ def compute_course_stats(course):
|
||||
|
||||
|
||||
def dump_grading_context(course):
|
||||
'''
|
||||
"""
|
||||
Dump information about course grading context (eg which problems are graded in what assignments)
|
||||
Very useful for debugging grading_policy.json and policy.json
|
||||
'''
|
||||
"""
|
||||
msg = "-----------------------------------------------------------------------------\n"
|
||||
msg += "Course grader:\n"
|
||||
|
||||
@@ -1164,10 +1271,10 @@ def dump_grading_context(course):
|
||||
msg += "--> Section %s:\n" % (gs)
|
||||
for sec in gsvals:
|
||||
s = sec['section_descriptor']
|
||||
format = getattr(s.lms, 'format', None)
|
||||
grade_format = getattr(s.lms, 'grade_format', None)
|
||||
aname = ''
|
||||
if format in graders:
|
||||
g = graders[format]
|
||||
if grade_format in graders:
|
||||
g = graders[grade_format]
|
||||
aname = '%s %02d' % (g.short_label, g.index)
|
||||
g.index += 1
|
||||
elif s.display_name in graders:
|
||||
@@ -1176,8 +1283,73 @@ def dump_grading_context(course):
|
||||
notes = ''
|
||||
if getattr(s, 'score_by_attempt', False):
|
||||
notes = ', score by attempt!'
|
||||
msg += " %s (format=%s, Assignment=%s%s)\n" % (s.display_name, format, aname, notes)
|
||||
msg += " %s (grade_format=%s, Assignment=%s%s)\n" % (s.display_name, grade_format, aname, notes)
|
||||
msg += "all descriptors:\n"
|
||||
msg += "length=%d\n" % len(gc['all_descriptors'])
|
||||
msg = '<pre>%s</pre>' % msg.replace('<', '<')
|
||||
return msg
|
||||
|
||||
|
||||
def get_background_task_table(course_id, problem_url, student=None):
|
||||
"""
|
||||
Construct the "datatable" structure to represent background task history.
|
||||
|
||||
Filters the background task history to the specified course and problem.
|
||||
If a student is provided, filters to only those tasks for which that student
|
||||
was specified.
|
||||
|
||||
Returns a tuple of (msg, datatable), where the msg is a possible error message,
|
||||
and the datatable is the datatable to be used for display.
|
||||
"""
|
||||
history_entries = get_instructor_task_history(course_id, problem_url, student)
|
||||
datatable = {}
|
||||
msg = ""
|
||||
# first check to see if there is any history at all
|
||||
# (note that we don't have to check that the arguments are valid; it
|
||||
# just won't find any entries.)
|
||||
if (history_entries.count()) == 0:
|
||||
if student is not None:
|
||||
template = '<font color="red">Failed to find any background tasks for course "{course}", module "{problem}" and student "{student}".</font>'
|
||||
msg += template.format(course=course_id, problem=problem_url, student=student.username)
|
||||
else:
|
||||
msg += '<font color="red">Failed to find any background tasks for course "{course}" and module "{problem}".</font>'.format(course=course_id, problem=problem_url)
|
||||
else:
|
||||
datatable['header'] = ["Task Type",
|
||||
"Task Id",
|
||||
"Requester",
|
||||
"Submitted",
|
||||
"Duration (sec)",
|
||||
"Task State",
|
||||
"Task Status",
|
||||
"Task Output"]
|
||||
|
||||
datatable['data'] = []
|
||||
for instructor_task in history_entries:
|
||||
# get duration info, if known:
|
||||
duration_sec = 'unknown'
|
||||
if hasattr(instructor_task, 'task_output') and instructor_task.task_output is not None:
|
||||
task_output = json.loads(instructor_task.task_output)
|
||||
if 'duration_ms' in task_output:
|
||||
duration_sec = int(task_output['duration_ms'] / 1000.0)
|
||||
# get progress status message:
|
||||
success, task_message = get_task_completion_info(instructor_task)
|
||||
status = "Complete" if success else "Incomplete"
|
||||
# generate row for this task:
|
||||
row = [str(instructor_task.task_type),
|
||||
str(instructor_task.task_id),
|
||||
str(instructor_task.requester),
|
||||
instructor_task.created.isoformat(' '),
|
||||
duration_sec,
|
||||
str(instructor_task.task_state),
|
||||
status,
|
||||
task_message]
|
||||
datatable['data'].append(row)
|
||||
|
||||
if student is not None:
|
||||
datatable['title'] = "{course_id} > {location} > {student}".format(course_id=course_id,
|
||||
location=problem_url,
|
||||
student=student.username)
|
||||
else:
|
||||
datatable['title'] = "{course_id} > {location}".format(course_id=course_id, location=problem_url)
|
||||
|
||||
return msg, datatable
|
||||
|
||||
0
lms/djangoapps/instructor_task/__init__.py
Normal file
0
lms/djangoapps/instructor_task/__init__.py
Normal file
164
lms/djangoapps/instructor_task/api.py
Normal file
164
lms/djangoapps/instructor_task/api.py
Normal file
@@ -0,0 +1,164 @@
|
||||
"""
|
||||
API for submitting background tasks by an instructor for a course.
|
||||
|
||||
Also includes methods for getting information about tasks that have
|
||||
already been submitted, filtered either by running state or input
|
||||
arguments.
|
||||
|
||||
"""
|
||||
|
||||
from celery.states import READY_STATES
|
||||
|
||||
from xmodule.modulestore.django import modulestore
|
||||
|
||||
from instructor_task.models import InstructorTask
|
||||
from instructor_task.tasks import (rescore_problem,
|
||||
reset_problem_attempts,
|
||||
delete_problem_state)
|
||||
|
||||
from instructor_task.api_helper import (check_arguments_for_rescoring,
|
||||
encode_problem_and_student_input,
|
||||
submit_task)
|
||||
|
||||
|
||||
def get_running_instructor_tasks(course_id):
|
||||
"""
|
||||
Returns a query of InstructorTask objects of running tasks for a given course.
|
||||
|
||||
Used to generate a list of tasks to display on the instructor dashboard.
|
||||
"""
|
||||
instructor_tasks = InstructorTask.objects.filter(course_id=course_id)
|
||||
# exclude states that are "ready" (i.e. not "running", e.g. failure, success, revoked):
|
||||
for state in READY_STATES:
|
||||
instructor_tasks = instructor_tasks.exclude(task_state=state)
|
||||
return instructor_tasks.order_by('-id')
|
||||
|
||||
|
||||
def get_instructor_task_history(course_id, problem_url, student=None):
|
||||
"""
|
||||
Returns a query of InstructorTask objects of historical tasks for a given course,
|
||||
that match a particular problem and optionally a student.
|
||||
"""
|
||||
_, task_key = encode_problem_and_student_input(problem_url, student)
|
||||
|
||||
instructor_tasks = InstructorTask.objects.filter(course_id=course_id, task_key=task_key)
|
||||
return instructor_tasks.order_by('-id')
|
||||
|
||||
|
||||
def submit_rescore_problem_for_student(request, course_id, problem_url, student):
|
||||
"""
|
||||
Request a problem to be rescored as a background task.
|
||||
|
||||
The problem will be rescored for the specified student only. Parameters are the `course_id`,
|
||||
the `problem_url`, and the `student` as a User object.
|
||||
The url must specify the location of the problem, using i4x-type notation.
|
||||
|
||||
ItemNotFoundException is raised if the problem doesn't exist, or AlreadyRunningError
|
||||
if the problem is already being rescored for this student, or NotImplementedError if
|
||||
the problem doesn't support rescoring.
|
||||
|
||||
This method makes sure the InstructorTask entry is committed.
|
||||
When called from any view that is wrapped by TransactionMiddleware,
|
||||
and thus in a "commit-on-success" transaction, an autocommit buried within here
|
||||
will cause any pending transaction to be committed by a successful
|
||||
save here. Any future database operations will take place in a
|
||||
separate transaction.
|
||||
|
||||
"""
|
||||
# check arguments: let exceptions return up to the caller.
|
||||
check_arguments_for_rescoring(course_id, problem_url)
|
||||
|
||||
task_type = 'rescore_problem'
|
||||
task_class = rescore_problem
|
||||
task_input, task_key = encode_problem_and_student_input(problem_url, student)
|
||||
return submit_task(request, task_type, task_class, course_id, task_input, task_key)
|
||||
|
||||
|
||||
def submit_rescore_problem_for_all_students(request, course_id, problem_url):
|
||||
"""
|
||||
Request a problem to be rescored as a background task.
|
||||
|
||||
The problem will be rescored for all students who have accessed the
|
||||
particular problem in a course and have provided and checked an answer.
|
||||
Parameters are the `course_id` and the `problem_url`.
|
||||
The url must specify the location of the problem, using i4x-type notation.
|
||||
|
||||
ItemNotFoundException is raised if the problem doesn't exist, or AlreadyRunningError
|
||||
if the problem is already being rescored, or NotImplementedError if the problem doesn't
|
||||
support rescoring.
|
||||
|
||||
This method makes sure the InstructorTask entry is committed.
|
||||
When called from any view that is wrapped by TransactionMiddleware,
|
||||
and thus in a "commit-on-success" transaction, an autocommit buried within here
|
||||
will cause any pending transaction to be committed by a successful
|
||||
save here. Any future database operations will take place in a
|
||||
separate transaction.
|
||||
"""
|
||||
# check arguments: let exceptions return up to the caller.
|
||||
check_arguments_for_rescoring(course_id, problem_url)
|
||||
|
||||
# check to see if task is already running, and reserve it otherwise
|
||||
task_type = 'rescore_problem'
|
||||
task_class = rescore_problem
|
||||
task_input, task_key = encode_problem_and_student_input(problem_url)
|
||||
return submit_task(request, task_type, task_class, course_id, task_input, task_key)
|
||||
|
||||
|
||||
def submit_reset_problem_attempts_for_all_students(request, course_id, problem_url):
|
||||
"""
|
||||
Request to have attempts reset for a problem as a background task.
|
||||
|
||||
The problem's attempts will be reset for all students who have accessed the
|
||||
particular problem in a course. Parameters are the `course_id` and
|
||||
the `problem_url`. The url must specify the location of the problem,
|
||||
using i4x-type notation.
|
||||
|
||||
ItemNotFoundException is raised if the problem doesn't exist, or AlreadyRunningError
|
||||
if the problem is already being reset.
|
||||
|
||||
This method makes sure the InstructorTask entry is committed.
|
||||
When called from any view that is wrapped by TransactionMiddleware,
|
||||
and thus in a "commit-on-success" transaction, an autocommit buried within here
|
||||
will cause any pending transaction to be committed by a successful
|
||||
save here. Any future database operations will take place in a
|
||||
separate transaction.
|
||||
"""
|
||||
# check arguments: make sure that the problem_url is defined
|
||||
# (since that's currently typed in). If the corresponding module descriptor doesn't exist,
|
||||
# an exception will be raised. Let it pass up to the caller.
|
||||
modulestore().get_instance(course_id, problem_url)
|
||||
|
||||
task_type = 'reset_problem_attempts'
|
||||
task_class = reset_problem_attempts
|
||||
task_input, task_key = encode_problem_and_student_input(problem_url)
|
||||
return submit_task(request, task_type, task_class, course_id, task_input, task_key)
|
||||
|
||||
|
||||
def submit_delete_problem_state_for_all_students(request, course_id, problem_url):
|
||||
"""
|
||||
Request to have state deleted for a problem as a background task.
|
||||
|
||||
The problem's state will be deleted for all students who have accessed the
|
||||
particular problem in a course. Parameters are the `course_id` and
|
||||
the `problem_url`. The url must specify the location of the problem,
|
||||
using i4x-type notation.
|
||||
|
||||
ItemNotFoundException is raised if the problem doesn't exist, or AlreadyRunningError
|
||||
if the particular problem's state is already being deleted.
|
||||
|
||||
This method makes sure the InstructorTask entry is committed.
|
||||
When called from any view that is wrapped by TransactionMiddleware,
|
||||
and thus in a "commit-on-success" transaction, an autocommit buried within here
|
||||
will cause any pending transaction to be committed by a successful
|
||||
save here. Any future database operations will take place in a
|
||||
separate transaction.
|
||||
"""
|
||||
# check arguments: make sure that the problem_url is defined
|
||||
# (since that's currently typed in). If the corresponding module descriptor doesn't exist,
|
||||
# an exception will be raised. Let it pass up to the caller.
|
||||
modulestore().get_instance(course_id, problem_url)
|
||||
|
||||
task_type = 'delete_problem_state'
|
||||
task_class = delete_problem_state
|
||||
task_input, task_key = encode_problem_and_student_input(problem_url)
|
||||
return submit_task(request, task_type, task_class, course_id, task_input, task_key)
|
||||
266
lms/djangoapps/instructor_task/api_helper.py
Normal file
266
lms/djangoapps/instructor_task/api_helper.py
Normal file
@@ -0,0 +1,266 @@
|
||||
import hashlib
|
||||
import json
|
||||
import logging
|
||||
|
||||
from django.db import transaction
|
||||
|
||||
from celery.result import AsyncResult
|
||||
from celery.states import READY_STATES, SUCCESS, FAILURE, REVOKED
|
||||
|
||||
from courseware.module_render import get_xqueue_callback_url_prefix
|
||||
|
||||
from xmodule.modulestore.django import modulestore
|
||||
from instructor_task.models import InstructorTask, PROGRESS
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AlreadyRunningError(Exception):
|
||||
"""Exception indicating that a background task is already running"""
|
||||
pass
|
||||
|
||||
|
||||
def _task_is_running(course_id, task_type, task_key):
|
||||
"""Checks if a particular task is already running"""
|
||||
runningTasks = InstructorTask.objects.filter(course_id=course_id, task_type=task_type, task_key=task_key)
|
||||
# exclude states that are "ready" (i.e. not "running", e.g. failure, success, revoked):
|
||||
for state in READY_STATES:
|
||||
runningTasks = runningTasks.exclude(task_state=state)
|
||||
return len(runningTasks) > 0
|
||||
|
||||
|
||||
@transaction.autocommit
|
||||
def _reserve_task(course_id, task_type, task_key, task_input, requester):
|
||||
"""
|
||||
Creates a database entry to indicate that a task is in progress.
|
||||
|
||||
Throws AlreadyRunningError if the task is already in progress.
|
||||
Includes the creation of an arbitrary value for task_id, to be
|
||||
submitted with the task call to celery.
|
||||
|
||||
Autocommit annotation makes sure the database entry is committed.
|
||||
When called from any view that is wrapped by TransactionMiddleware,
|
||||
and thus in a "commit-on-success" transaction, this autocommit here
|
||||
will cause any pending transaction to be committed by a successful
|
||||
save here. Any future database operations will take place in a
|
||||
separate transaction.
|
||||
|
||||
Note that there is a chance of a race condition here, when two users
|
||||
try to run the same task at almost exactly the same time. One user
|
||||
could be after the check and before the create when the second user
|
||||
gets to the check. At that point, both users are able to run their
|
||||
tasks simultaneously. This is deemed a small enough risk to not
|
||||
put in further safeguards.
|
||||
"""
|
||||
|
||||
if _task_is_running(course_id, task_type, task_key):
|
||||
raise AlreadyRunningError("requested task is already running")
|
||||
|
||||
# Create log entry now, so that future requests will know it's running.
|
||||
return InstructorTask.create(course_id, task_type, task_key, task_input, requester)
|
||||
|
||||
|
||||
def _get_xmodule_instance_args(request):
|
||||
"""
|
||||
Calculate parameters needed for instantiating xmodule instances.
|
||||
|
||||
The `request_info` will be passed to a tracking log function, to provide information
|
||||
about the source of the task request. The `xqueue_callback_url_prefix` is used to
|
||||
permit old-style xqueue callbacks directly to the appropriate module in the LMS.
|
||||
"""
|
||||
request_info = {'username': request.user.username,
|
||||
'ip': request.META['REMOTE_ADDR'],
|
||||
'agent': request.META.get('HTTP_USER_AGENT', ''),
|
||||
'host': request.META['SERVER_NAME'],
|
||||
}
|
||||
|
||||
xmodule_instance_args = {'xqueue_callback_url_prefix': get_xqueue_callback_url_prefix(request),
|
||||
'request_info': request_info,
|
||||
}
|
||||
return xmodule_instance_args
|
||||
|
||||
|
||||
def _update_instructor_task(instructor_task, task_result):
|
||||
"""
|
||||
Updates and possibly saves a InstructorTask entry based on a task Result.
|
||||
|
||||
Used when updated status is requested.
|
||||
|
||||
The `instructor_task` that is passed in is updated in-place, but
|
||||
is usually not saved. In general, tasks that have finished (either with
|
||||
success or failure) should have their entries updated by the task itself,
|
||||
so are not updated here. Tasks that are still running are not updated
|
||||
while they run. So the one exception to the no-save rule are tasks that
|
||||
are in a "revoked" state. This may mean that the task never had the
|
||||
opportunity to update the InstructorTask entry.
|
||||
|
||||
Calculates json to store in "task_output" field of the `instructor_task`,
|
||||
as well as updating the task_state.
|
||||
|
||||
For a successful task, the json contains the output of the task result.
|
||||
For a failed task, the json contains "exception", "message", and "traceback"
|
||||
keys. A revoked task just has a "message" stating it was revoked.
|
||||
"""
|
||||
# Pull values out of the result object as close to each other as possible.
|
||||
# If we wait and check the values later, the values for the state and result
|
||||
# are more likely to have changed. Pull the state out first, and
|
||||
# then code assuming that the result may not exactly match the state.
|
||||
task_id = task_result.task_id
|
||||
result_state = task_result.state
|
||||
returned_result = task_result.result
|
||||
result_traceback = task_result.traceback
|
||||
|
||||
# Assume we don't always update the InstructorTask entry if we don't have to:
|
||||
entry_needs_saving = False
|
||||
task_output = None
|
||||
|
||||
if result_state in [PROGRESS, SUCCESS]:
|
||||
# construct a status message directly from the task result's result:
|
||||
# it needs to go back with the entry passed in.
|
||||
log.info("background task (%s), state %s: result: %s", task_id, result_state, returned_result)
|
||||
task_output = InstructorTask.create_output_for_success(returned_result)
|
||||
elif result_state == FAILURE:
|
||||
# on failure, the result's result contains the exception that caused the failure
|
||||
exception = returned_result
|
||||
traceback = result_traceback if result_traceback is not None else ''
|
||||
log.warning("background task (%s) failed: %s %s", task_id, returned_result, traceback)
|
||||
task_output = InstructorTask.create_output_for_failure(exception, result_traceback)
|
||||
elif result_state == REVOKED:
|
||||
# on revocation, the result's result doesn't contain anything
|
||||
# but we cannot rely on the worker thread to set this status,
|
||||
# so we set it here.
|
||||
entry_needs_saving = True
|
||||
log.warning("background task (%s) revoked.", task_id)
|
||||
task_output = InstructorTask.create_output_for_revoked()
|
||||
|
||||
# save progress and state into the entry, even if it's not being saved:
|
||||
# when celery is run in "ALWAYS_EAGER" mode, progress needs to go back
|
||||
# with the entry passed in.
|
||||
instructor_task.task_state = result_state
|
||||
if task_output is not None:
|
||||
instructor_task.task_output = task_output
|
||||
|
||||
if entry_needs_saving:
|
||||
instructor_task.save()
|
||||
|
||||
|
||||
def get_updated_instructor_task(task_id):
|
||||
"""
|
||||
Returns InstructorTask object corresponding to a given `task_id`.
|
||||
|
||||
If the InstructorTask thinks the task is still running, then
|
||||
the task's result is checked to return an updated state and output.
|
||||
"""
|
||||
# First check if the task_id is known
|
||||
try:
|
||||
instructor_task = InstructorTask.objects.get(task_id=task_id)
|
||||
except InstructorTask.DoesNotExist:
|
||||
log.warning("query for InstructorTask status failed: task_id=(%s) not found", task_id)
|
||||
return None
|
||||
|
||||
# if the task is not already known to be done, then we need to query
|
||||
# the underlying task's result object:
|
||||
if instructor_task.task_state not in READY_STATES:
|
||||
result = AsyncResult(task_id)
|
||||
_update_instructor_task(instructor_task, result)
|
||||
|
||||
return instructor_task
|
||||
|
||||
|
||||
def get_status_from_instructor_task(instructor_task):
|
||||
"""
|
||||
Get the status for a given InstructorTask entry.
|
||||
|
||||
Returns a dict, with the following keys:
|
||||
'task_id': id assigned by LMS and used by celery.
|
||||
'task_state': state of task as stored in celery's result store.
|
||||
'in_progress': boolean indicating if task is still running.
|
||||
'task_progress': dict containing progress information. This includes:
|
||||
'attempted': number of attempts made
|
||||
'updated': number of attempts that "succeeded"
|
||||
'total': number of possible subtasks to attempt
|
||||
'action_name': user-visible verb to use in status messages. Should be past-tense.
|
||||
'duration_ms': how long the task has (or had) been running.
|
||||
'exception': name of exception class raised in failed tasks.
|
||||
'message': returned for failed and revoked tasks.
|
||||
'traceback': optional, returned if task failed and produced a traceback.
|
||||
|
||||
"""
|
||||
status = {}
|
||||
|
||||
if instructor_task is not None:
|
||||
# status basic information matching what's stored in InstructorTask:
|
||||
status['task_id'] = instructor_task.task_id
|
||||
status['task_state'] = instructor_task.task_state
|
||||
status['in_progress'] = instructor_task.task_state not in READY_STATES
|
||||
if instructor_task.task_output is not None:
|
||||
status['task_progress'] = json.loads(instructor_task.task_output)
|
||||
|
||||
return status
|
||||
|
||||
|
||||
def check_arguments_for_rescoring(course_id, problem_url):
|
||||
"""
|
||||
Do simple checks on the descriptor to confirm that it supports rescoring.
|
||||
|
||||
Confirms first that the problem_url is defined (since that's currently typed
|
||||
in). An ItemNotFoundException is raised if the corresponding module
|
||||
descriptor doesn't exist. NotImplementedError is raised if the
|
||||
corresponding module doesn't support rescoring calls.
|
||||
"""
|
||||
descriptor = modulestore().get_instance(course_id, problem_url)
|
||||
if not hasattr(descriptor, 'module_class') or not hasattr(descriptor.module_class, 'rescore_problem'):
|
||||
msg = "Specified module does not support rescoring."
|
||||
raise NotImplementedError(msg)
|
||||
|
||||
|
||||
def encode_problem_and_student_input(problem_url, student=None):
|
||||
"""
|
||||
Encode problem_url and optional student into task_key and task_input values.
|
||||
|
||||
`problem_url` is full URL of the problem.
|
||||
`student` is the user object of the student
|
||||
"""
|
||||
if student is not None:
|
||||
task_input = {'problem_url': problem_url, 'student': student.username}
|
||||
task_key_stub = "{student}_{problem}".format(student=student.id, problem=problem_url)
|
||||
else:
|
||||
task_input = {'problem_url': problem_url}
|
||||
task_key_stub = "_{problem}".format(problem=problem_url)
|
||||
|
||||
# create the key value by using MD5 hash:
|
||||
task_key = hashlib.md5(task_key_stub).hexdigest()
|
||||
|
||||
return task_input, task_key
|
||||
|
||||
|
||||
def submit_task(request, task_type, task_class, course_id, task_input, task_key):
|
||||
"""
|
||||
Helper method to submit a task.
|
||||
|
||||
Reserves the requested task, based on the `course_id`, `task_type`, and `task_key`,
|
||||
checking to see if the task is already running. The `task_input` is also passed so that
|
||||
it can be stored in the resulting InstructorTask entry. Arguments are extracted from
|
||||
the `request` provided by the originating server request. Then the task is submitted to run
|
||||
asynchronously, using the specified `task_class` and using the task_id constructed for it.
|
||||
|
||||
`AlreadyRunningError` is raised if the task is already running.
|
||||
|
||||
The _reserve_task method makes sure the InstructorTask entry is committed.
|
||||
When called from any view that is wrapped by TransactionMiddleware,
|
||||
and thus in a "commit-on-success" transaction, an autocommit buried within here
|
||||
will cause any pending transaction to be committed by a successful
|
||||
save here. Any future database operations will take place in a
|
||||
separate transaction.
|
||||
|
||||
"""
|
||||
# check to see if task is already running, and reserve it otherwise:
|
||||
instructor_task = _reserve_task(course_id, task_type, task_key, task_input, request.user)
|
||||
|
||||
# submit task:
|
||||
task_id = instructor_task.task_id
|
||||
task_args = [instructor_task.id, _get_xmodule_instance_args(request)]
|
||||
task_class.apply_async(task_args, task_id=task_id)
|
||||
|
||||
return instructor_task
|
||||
86
lms/djangoapps/instructor_task/migrations/0001_initial.py
Normal file
86
lms/djangoapps/instructor_task/migrations/0001_initial.py
Normal file
@@ -0,0 +1,86 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import datetime
|
||||
from south.db import db
|
||||
from south.v2 import SchemaMigration
|
||||
from django.db import models
|
||||
|
||||
|
||||
class Migration(SchemaMigration):
|
||||
|
||||
def forwards(self, orm):
|
||||
# Adding model 'InstructorTask'
|
||||
db.create_table('instructor_task_instructortask', (
|
||||
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
|
||||
('task_type', self.gf('django.db.models.fields.CharField')(max_length=50, db_index=True)),
|
||||
('course_id', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
|
||||
('task_key', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
|
||||
('task_input', self.gf('django.db.models.fields.CharField')(max_length=255)),
|
||||
('task_id', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
|
||||
('task_state', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, db_index=True)),
|
||||
('task_output', self.gf('django.db.models.fields.CharField')(max_length=1024, null=True)),
|
||||
('requester', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
|
||||
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
|
||||
('updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
|
||||
))
|
||||
db.send_create_signal('instructor_task', ['InstructorTask'])
|
||||
|
||||
|
||||
def backwards(self, orm):
|
||||
# Deleting model 'InstructorTask'
|
||||
db.delete_table('instructor_task_instructortask')
|
||||
|
||||
|
||||
models = {
|
||||
'auth.group': {
|
||||
'Meta': {'object_name': 'Group'},
|
||||
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
|
||||
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
|
||||
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
|
||||
},
|
||||
'auth.permission': {
|
||||
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
|
||||
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
|
||||
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
|
||||
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
|
||||
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
|
||||
},
|
||||
'auth.user': {
|
||||
'Meta': {'object_name': 'User'},
|
||||
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
|
||||
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
|
||||
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
|
||||
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
|
||||
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
|
||||
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
|
||||
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
|
||||
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
|
||||
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
|
||||
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
|
||||
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
|
||||
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
|
||||
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
|
||||
},
|
||||
'contenttypes.contenttype': {
|
||||
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
|
||||
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
|
||||
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
|
||||
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
|
||||
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
|
||||
},
|
||||
'instructor_task.instructortask': {
|
||||
'Meta': {'object_name': 'InstructorTask'},
|
||||
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
|
||||
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
|
||||
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
|
||||
'requester': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
|
||||
'task_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
|
||||
'task_input': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
|
||||
'task_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
|
||||
'task_output': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
|
||||
'task_state': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'db_index': 'True'}),
|
||||
'task_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
|
||||
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
|
||||
}
|
||||
}
|
||||
|
||||
complete_apps = ['instructor_task']
|
||||
156
lms/djangoapps/instructor_task/models.py
Normal file
156
lms/djangoapps/instructor_task/models.py
Normal file
@@ -0,0 +1,156 @@
|
||||
"""
|
||||
WE'RE USING MIGRATIONS!
|
||||
|
||||
If you make changes to this model, be sure to create an appropriate migration
|
||||
file and check it in at the same time as your model changes. To do that,
|
||||
|
||||
1. Go to the edx-platform dir
|
||||
2. ./manage.py schemamigration instructor_task --auto description_of_your_change
|
||||
3. Add the migration file created in edx-platform/lms/djangoapps/instructor_task/migrations/
|
||||
|
||||
|
||||
ASSUMPTIONS: modules have unique IDs, even across different module_types
|
||||
|
||||
"""
|
||||
from uuid import uuid4
|
||||
import json
|
||||
|
||||
from django.contrib.auth.models import User
|
||||
from django.db import models, transaction
|
||||
|
||||
|
||||
# define custom states used by InstructorTask
|
||||
QUEUING = 'QUEUING'
|
||||
PROGRESS = 'PROGRESS'
|
||||
|
||||
|
||||
class InstructorTask(models.Model):
|
||||
"""
|
||||
Stores information about background tasks that have been submitted to
|
||||
perform work by an instructor (or course staff).
|
||||
Examples include grading and rescoring.
|
||||
|
||||
`task_type` identifies the kind of task being performed, e.g. rescoring.
|
||||
`course_id` uses the course run's unique id to identify the course.
|
||||
`task_key` stores relevant input arguments encoded into key value for testing to see
|
||||
if the task is already running (together with task_type and course_id).
|
||||
`task_input` stores input arguments as JSON-serialized dict, for reporting purposes.
|
||||
Examples include url of problem being rescored, id of student if only one student being rescored.
|
||||
|
||||
`task_id` stores the id used by celery for the background task.
|
||||
`task_state` stores the last known state of the celery task
|
||||
`task_output` stores the output of the celery task.
|
||||
Format is a JSON-serialized dict. Content varies by task_type and task_state.
|
||||
|
||||
`requester` stores id of user who submitted the task
|
||||
`created` stores date that entry was first created
|
||||
`updated` stores date that entry was last modified
|
||||
"""
|
||||
task_type = models.CharField(max_length=50, db_index=True)
|
||||
course_id = models.CharField(max_length=255, db_index=True)
|
||||
task_key = models.CharField(max_length=255, db_index=True)
|
||||
task_input = models.CharField(max_length=255)
|
||||
task_id = models.CharField(max_length=255, db_index=True) # max_length from celery_taskmeta
|
||||
task_state = models.CharField(max_length=50, null=True, db_index=True) # max_length from celery_taskmeta
|
||||
task_output = models.CharField(max_length=1024, null=True)
|
||||
requester = models.ForeignKey(User, db_index=True)
|
||||
created = models.DateTimeField(auto_now_add=True, null=True)
|
||||
updated = models.DateTimeField(auto_now=True)
|
||||
|
||||
def __repr__(self):
|
||||
return 'InstructorTask<%r>' % ({
|
||||
'task_type': self.task_type,
|
||||
'course_id': self.course_id,
|
||||
'task_input': self.task_input,
|
||||
'task_id': self.task_id,
|
||||
'task_state': self.task_state,
|
||||
'task_output': self.task_output,
|
||||
},)
|
||||
|
||||
def __unicode__(self):
|
||||
return unicode(repr(self))
|
||||
|
||||
@classmethod
|
||||
def create(cls, course_id, task_type, task_key, task_input, requester):
|
||||
# create the task_id here, and pass it into celery:
|
||||
task_id = str(uuid4())
|
||||
|
||||
json_task_input = json.dumps(task_input)
|
||||
|
||||
# check length of task_input, and return an exception if it's too long:
|
||||
if len(json_task_input) > 255:
|
||||
fmt = 'Task input longer than 255: "{input}" for "{task}" of "{course}"'
|
||||
msg = fmt.format(input=json_task_input, task=task_type, course=course_id)
|
||||
raise ValueError(msg)
|
||||
|
||||
# create the task, then save it:
|
||||
instructor_task = cls(course_id=course_id,
|
||||
task_type=task_type,
|
||||
task_id=task_id,
|
||||
task_key=task_key,
|
||||
task_input=json_task_input,
|
||||
task_state=QUEUING,
|
||||
requester=requester)
|
||||
instructor_task.save_now()
|
||||
|
||||
return instructor_task
|
||||
|
||||
@transaction.autocommit
|
||||
def save_now(self):
|
||||
"""Writes InstructorTask immediately, ensuring the transaction is committed."""
|
||||
self.save()
|
||||
|
||||
@staticmethod
|
||||
def create_output_for_success(returned_result):
|
||||
"""
|
||||
Converts successful result to output format.
|
||||
|
||||
Raises a ValueError exception if the output is too long.
|
||||
"""
|
||||
# In future, there should be a check here that the resulting JSON
|
||||
# will fit in the column. In the meantime, just return an exception.
|
||||
json_output = json.dumps(returned_result)
|
||||
if len(json_output) > 1023:
|
||||
raise ValueError("Length of task output is too long: {0}".format(json_output))
|
||||
return json_output
|
||||
|
||||
@staticmethod
|
||||
def create_output_for_failure(exception, traceback_string):
|
||||
"""
|
||||
Converts failed result information to output format.
|
||||
|
||||
Traceback information is truncated or not included if it would result in an output string
|
||||
that would not fit in the database. If the output is still too long, then the
|
||||
exception message is also truncated.
|
||||
|
||||
Truncation is indicated by adding "..." to the end of the value.
|
||||
"""
|
||||
tag = '...'
|
||||
task_progress = {'exception': type(exception).__name__, 'message': str(exception.message)}
|
||||
if traceback_string is not None:
|
||||
# truncate any traceback that goes into the InstructorTask model:
|
||||
task_progress['traceback'] = traceback_string
|
||||
json_output = json.dumps(task_progress)
|
||||
# if the resulting output is too long, then first shorten the
|
||||
# traceback, and then the message, until it fits.
|
||||
too_long = len(json_output) - 1023
|
||||
if too_long > 0:
|
||||
if traceback_string is not None:
|
||||
if too_long >= len(traceback_string) - len(tag):
|
||||
# remove the traceback entry entirely (so no key or value)
|
||||
del task_progress['traceback']
|
||||
too_long -= (len(traceback_string) + len('traceback'))
|
||||
else:
|
||||
# truncate the traceback:
|
||||
task_progress['traceback'] = traceback_string[:-(too_long + len(tag))] + tag
|
||||
too_long = 0
|
||||
if too_long > 0:
|
||||
# we need to shorten the message:
|
||||
task_progress['message'] = task_progress['message'][:-(too_long + len(tag))] + tag
|
||||
json_output = json.dumps(task_progress)
|
||||
return json_output
|
||||
|
||||
@staticmethod
|
||||
def create_output_for_revoked():
|
||||
"""Creates standard message to store in output format for revoked tasks."""
|
||||
return json.dumps({'message': 'Task revoked before running'})
|
||||
97
lms/djangoapps/instructor_task/tasks.py
Normal file
97
lms/djangoapps/instructor_task/tasks.py
Normal file
@@ -0,0 +1,97 @@
|
||||
"""
|
||||
This file contains tasks that are designed to perform background operations on the
|
||||
running state of a course.
|
||||
|
||||
At present, these tasks all operate on StudentModule objects in one way or another,
|
||||
so they share a visitor architecture. Each task defines an "update function" that
|
||||
takes a module_descriptor, a particular StudentModule object, and xmodule_instance_args.
|
||||
|
||||
A task may optionally specify a "filter function" that takes a query for StudentModule
|
||||
objects, and adds additional filter clauses.
|
||||
|
||||
A task also passes through "xmodule_instance_args", that are used to provide
|
||||
information to our code that instantiates xmodule instances.
|
||||
|
||||
The task definition then calls the traversal function, passing in the three arguments
|
||||
above, along with the id value for an InstructorTask object. The InstructorTask
|
||||
object contains a 'task_input' row which is a JSON-encoded dict containing
|
||||
a problem URL and optionally a student. These are used to set up the initial value
|
||||
of the query for traversing StudentModule objects.
|
||||
|
||||
"""
|
||||
from celery import task
|
||||
from instructor_task.tasks_helper import (update_problem_module_state,
|
||||
rescore_problem_module_state,
|
||||
reset_attempts_module_state,
|
||||
delete_problem_module_state)
|
||||
|
||||
|
||||
@task
|
||||
def rescore_problem(entry_id, xmodule_instance_args):
|
||||
"""Rescores a problem in a course, for all students or one specific student.
|
||||
|
||||
`entry_id` is the id value of the InstructorTask entry that corresponds to this task.
|
||||
The entry contains the `course_id` that identifies the course, as well as the
|
||||
`task_input`, which contains task-specific input.
|
||||
|
||||
The task_input should be a dict with the following entries:
|
||||
|
||||
'problem_url': the full URL to the problem to be rescored. (required)
|
||||
|
||||
'student': the identifier (username or email) of a particular user whose
|
||||
problem submission should be rescored. If not specified, all problem
|
||||
submissions for the problem will be rescored.
|
||||
|
||||
`xmodule_instance_args` provides information needed by _get_module_instance_for_task()
|
||||
to instantiate an xmodule instance.
|
||||
"""
|
||||
action_name = 'rescored'
|
||||
update_fcn = rescore_problem_module_state
|
||||
filter_fcn = lambda(modules_to_update): modules_to_update.filter(state__contains='"done": true')
|
||||
return update_problem_module_state(entry_id,
|
||||
update_fcn, action_name, filter_fcn=filter_fcn,
|
||||
xmodule_instance_args=xmodule_instance_args)
|
||||
|
||||
|
||||
@task
|
||||
def reset_problem_attempts(entry_id, xmodule_instance_args):
|
||||
"""Resets problem attempts to zero for a particular problem for all students in a course.
|
||||
|
||||
`entry_id` is the id value of the InstructorTask entry that corresponds to this task.
|
||||
The entry contains the `course_id` that identifies the course, as well as the
|
||||
`task_input`, which contains task-specific input.
|
||||
|
||||
The task_input should be a dict with the following entries:
|
||||
|
||||
'problem_url': the full URL to the problem to be rescored. (required)
|
||||
|
||||
`xmodule_instance_args` provides information needed by _get_module_instance_for_task()
|
||||
to instantiate an xmodule instance.
|
||||
"""
|
||||
action_name = 'reset'
|
||||
update_fcn = reset_attempts_module_state
|
||||
return update_problem_module_state(entry_id,
|
||||
update_fcn, action_name, filter_fcn=None,
|
||||
xmodule_instance_args=xmodule_instance_args)
|
||||
|
||||
|
||||
@task
|
||||
def delete_problem_state(entry_id, xmodule_instance_args):
|
||||
"""Deletes problem state entirely for all students on a particular problem in a course.
|
||||
|
||||
`entry_id` is the id value of the InstructorTask entry that corresponds to this task.
|
||||
The entry contains the `course_id` that identifies the course, as well as the
|
||||
`task_input`, which contains task-specific input.
|
||||
|
||||
The task_input should be a dict with the following entries:
|
||||
|
||||
'problem_url': the full URL to the problem to be rescored. (required)
|
||||
|
||||
`xmodule_instance_args` provides information needed by _get_module_instance_for_task()
|
||||
to instantiate an xmodule instance.
|
||||
"""
|
||||
action_name = 'deleted'
|
||||
update_fcn = delete_problem_module_state
|
||||
return update_problem_module_state(entry_id,
|
||||
update_fcn, action_name, filter_fcn=None,
|
||||
xmodule_instance_args=xmodule_instance_args)
|
||||
388
lms/djangoapps/instructor_task/tasks_helper.py
Normal file
388
lms/djangoapps/instructor_task/tasks_helper.py
Normal file
@@ -0,0 +1,388 @@
|
||||
"""
|
||||
This file contains tasks that are designed to perform background operations on the
|
||||
running state of a course.
|
||||
|
||||
"""
|
||||
|
||||
import json
|
||||
from time import time
|
||||
from sys import exc_info
|
||||
from traceback import format_exc
|
||||
|
||||
from celery import current_task
|
||||
from celery.utils.log import get_task_logger
|
||||
from celery.signals import worker_process_init
|
||||
from celery.states import SUCCESS, FAILURE
|
||||
|
||||
from django.contrib.auth.models import User
|
||||
from django.db import transaction
|
||||
from dogapi import dog_stats_api
|
||||
|
||||
from xmodule.modulestore.django import modulestore
|
||||
|
||||
import mitxmako.middleware as middleware
|
||||
from track.views import task_track
|
||||
|
||||
from courseware.models import StudentModule
|
||||
from courseware.model_data import ModelDataCache
|
||||
from courseware.module_render import get_module_for_descriptor_internal
|
||||
from instructor_task.models import InstructorTask, PROGRESS
|
||||
|
||||
# define different loggers for use within tasks and on client side
|
||||
TASK_LOG = get_task_logger(__name__)
|
||||
|
||||
# define value to use when no task_id is provided:
|
||||
UNKNOWN_TASK_ID = 'unknown-task_id'
|
||||
|
||||
|
||||
def initialize_mako(sender=None, conf=None, **kwargs):
|
||||
"""
|
||||
Get mako templates to work on celery worker server's worker thread.
|
||||
|
||||
The initialization of Mako templating is usually done when Django is
|
||||
initializing middleware packages as part of processing a server request.
|
||||
When this is run on a celery worker server, no such initialization is
|
||||
called.
|
||||
|
||||
To make sure that we don't load this twice (just in case), we look for the
|
||||
result: the defining of the lookup paths for templates.
|
||||
"""
|
||||
if 'main' not in middleware.lookup:
|
||||
TASK_LOG.info("Initializing Mako middleware explicitly")
|
||||
middleware.MakoMiddleware()
|
||||
|
||||
# Actually make the call to define the hook:
|
||||
worker_process_init.connect(initialize_mako)
|
||||
|
||||
|
||||
class UpdateProblemModuleStateError(Exception):
|
||||
"""
|
||||
Error signaling a fatal condition while updating problem modules.
|
||||
|
||||
Used when the current module cannot be processed and no more
|
||||
modules should be attempted.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
def _get_current_task():
|
||||
"""Stub to make it easier to test without actually running Celery"""
|
||||
return current_task
|
||||
|
||||
|
||||
def _perform_module_state_update(course_id, module_state_key, student_identifier, update_fcn, action_name, filter_fcn,
|
||||
xmodule_instance_args):
|
||||
"""
|
||||
Performs generic update by visiting StudentModule instances with the update_fcn provided.
|
||||
|
||||
StudentModule instances are those that match the specified `course_id` and `module_state_key`.
|
||||
If `student_identifier` is not None, it is used as an additional filter to limit the modules to those belonging
|
||||
to that student. If `student_identifier` is None, performs update on modules for all students on the specified problem.
|
||||
|
||||
If a `filter_fcn` is not None, it is applied to the query that has been constructed. It takes one
|
||||
argument, which is the query being filtered, and returns the filtered version of the query.
|
||||
|
||||
The `update_fcn` is called on each StudentModule that passes the resulting filtering.
|
||||
It is passed three arguments: the module_descriptor for the module pointed to by the
|
||||
module_state_key, the particular StudentModule to update, and the xmodule_instance_args being
|
||||
passed through. If the value returned by the update function evaluates to a boolean True,
|
||||
the update is successful; False indicates the update on the particular student module failed.
|
||||
A raised exception indicates a fatal condition -- that no other student modules should be considered.
|
||||
|
||||
The return value is a dict containing the task's results, with the following keys:
|
||||
|
||||
'attempted': number of attempts made
|
||||
'updated': number of attempts that "succeeded"
|
||||
'total': number of possible subtasks to attempt
|
||||
'action_name': user-visible verb to use in status messages. Should be past-tense.
|
||||
Pass-through of input `action_name`.
|
||||
'duration_ms': how long the task has (or had) been running.
|
||||
|
||||
Because this is run internal to a task, it does not catch exceptions. These are allowed to pass up to the
|
||||
next level, so that it can set the failure modes and capture the error trace in the InstructorTask and the
|
||||
result object.
|
||||
|
||||
"""
|
||||
# get start time for task:
|
||||
start_time = time()
|
||||
|
||||
# find the problem descriptor:
|
||||
module_descriptor = modulestore().get_instance(course_id, module_state_key)
|
||||
|
||||
# find the module in question
|
||||
modules_to_update = StudentModule.objects.filter(course_id=course_id,
|
||||
module_state_key=module_state_key)
|
||||
|
||||
# give the option of rescoring an individual student. If not specified,
|
||||
# then rescores all students who have responded to a problem so far
|
||||
student = None
|
||||
if student_identifier is not None:
|
||||
# if an identifier is supplied, then look for the student,
|
||||
# and let it throw an exception if none is found.
|
||||
if "@" in student_identifier:
|
||||
student = User.objects.get(email=student_identifier)
|
||||
elif student_identifier is not None:
|
||||
student = User.objects.get(username=student_identifier)
|
||||
|
||||
if student is not None:
|
||||
modules_to_update = modules_to_update.filter(student_id=student.id)
|
||||
|
||||
if filter_fcn is not None:
|
||||
modules_to_update = filter_fcn(modules_to_update)
|
||||
|
||||
# perform the main loop
|
||||
num_updated = 0
|
||||
num_attempted = 0
|
||||
num_total = modules_to_update.count()
|
||||
|
||||
def get_task_progress():
|
||||
"""Return a dict containing info about current task"""
|
||||
current_time = time()
|
||||
progress = {'action_name': action_name,
|
||||
'attempted': num_attempted,
|
||||
'updated': num_updated,
|
||||
'total': num_total,
|
||||
'duration_ms': int((current_time - start_time) * 1000),
|
||||
}
|
||||
return progress
|
||||
|
||||
task_progress = get_task_progress()
|
||||
_get_current_task().update_state(state=PROGRESS, meta=task_progress)
|
||||
for module_to_update in modules_to_update:
|
||||
num_attempted += 1
|
||||
# There is no try here: if there's an error, we let it throw, and the task will
|
||||
# be marked as FAILED, with a stack trace.
|
||||
with dog_stats_api.timer('instructor_tasks.module.time.step', tags=['action:{name}'.format(name=action_name)]):
|
||||
if update_fcn(module_descriptor, module_to_update, xmodule_instance_args):
|
||||
# If the update_fcn returns true, then it performed some kind of work.
|
||||
# Logging of failures is left to the update_fcn itself.
|
||||
num_updated += 1
|
||||
|
||||
# update task status:
|
||||
task_progress = get_task_progress()
|
||||
_get_current_task().update_state(state=PROGRESS, meta=task_progress)
|
||||
|
||||
return task_progress
|
||||
|
||||
|
||||
def update_problem_module_state(entry_id, update_fcn, action_name, filter_fcn,
|
||||
xmodule_instance_args):
|
||||
"""
|
||||
Performs generic update by visiting StudentModule instances with the update_fcn provided.
|
||||
|
||||
The `entry_id` is the primary key for the InstructorTask entry representing the task. This function
|
||||
updates the entry on success and failure of the _perform_module_state_update function it
|
||||
wraps. It is setting the entry's value for task_state based on what Celery would set it to once
|
||||
the task returns to Celery: FAILURE if an exception is encountered, and SUCCESS if it returns normally.
|
||||
Other arguments are pass-throughs to _perform_module_state_update, and documented there.
|
||||
|
||||
If no exceptions are raised, a dict containing the task's result is returned, with the following keys:
|
||||
|
||||
'attempted': number of attempts made
|
||||
'updated': number of attempts that "succeeded"
|
||||
'total': number of possible subtasks to attempt
|
||||
'action_name': user-visible verb to use in status messages. Should be past-tense.
|
||||
Pass-through of input `action_name`.
|
||||
'duration_ms': how long the task has (or had) been running.
|
||||
|
||||
Before returning, this is also JSON-serialized and stored in the task_output column of the InstructorTask entry.
|
||||
|
||||
If an exception is raised internally, it is caught and recorded in the InstructorTask entry.
|
||||
This is also a JSON-serialized dict, stored in the task_output column, containing the following keys:
|
||||
|
||||
'exception': type of exception object
|
||||
'message': error message from exception object
|
||||
'traceback': traceback information (truncated if necessary)
|
||||
|
||||
Once the exception is caught, it is raised again and allowed to pass up to the
|
||||
task-running level, so that it can also set the failure modes and capture the error trace in the
|
||||
result object that Celery creates.
|
||||
|
||||
"""
|
||||
|
||||
# get the InstructorTask to be updated. If this fails, then let the exception return to Celery.
|
||||
# There's no point in catching it here.
|
||||
entry = InstructorTask.objects.get(pk=entry_id)
|
||||
|
||||
# get inputs to use in this task from the entry:
|
||||
task_id = entry.task_id
|
||||
course_id = entry.course_id
|
||||
task_input = json.loads(entry.task_input)
|
||||
module_state_key = task_input.get('problem_url')
|
||||
student_ident = task_input['student'] if 'student' in task_input else None
|
||||
|
||||
fmt = 'Starting to update problem modules as task "{task_id}": course "{course_id}" problem "{state_key}": nothing {action} yet'
|
||||
TASK_LOG.info(fmt.format(task_id=task_id, course_id=course_id, state_key=module_state_key, action=action_name))
|
||||
|
||||
# add task_id to xmodule_instance_args, so that it can be output with tracking info:
|
||||
if xmodule_instance_args is not None:
|
||||
xmodule_instance_args['task_id'] = task_id
|
||||
|
||||
# Now that we have an entry we can try to catch failures:
|
||||
task_progress = None
|
||||
try:
|
||||
# Check that the task_id submitted in the InstructorTask matches the current task
|
||||
# that is running.
|
||||
request_task_id = _get_current_task().request.id
|
||||
if task_id != request_task_id:
|
||||
fmt = 'Requested task "{task_id}" did not match actual task "{actual_id}"'
|
||||
message = fmt.format(task_id=task_id, course_id=course_id, state_key=module_state_key, actual_id=request_task_id)
|
||||
TASK_LOG.error(message)
|
||||
raise UpdateProblemModuleStateError(message)
|
||||
|
||||
# Now do the work:
|
||||
with dog_stats_api.timer('instructor_tasks.module.time.overall', tags=['action:{name}'.format(name=action_name)]):
|
||||
task_progress = _perform_module_state_update(course_id, module_state_key, student_ident, update_fcn,
|
||||
action_name, filter_fcn, xmodule_instance_args)
|
||||
# If we get here, we assume we've succeeded, so update the InstructorTask entry in anticipation.
|
||||
# But we do this within the try, in case creating the task_output causes an exception to be
|
||||
# raised.
|
||||
entry.task_output = InstructorTask.create_output_for_success(task_progress)
|
||||
entry.task_state = SUCCESS
|
||||
entry.save_now()
|
||||
|
||||
except Exception:
|
||||
# try to write out the failure to the entry before failing
|
||||
_, exception, traceback = exc_info()
|
||||
traceback_string = format_exc(traceback) if traceback is not None else ''
|
||||
TASK_LOG.warning("background task (%s) failed: %s %s", task_id, exception, traceback_string)
|
||||
entry.task_output = InstructorTask.create_output_for_failure(exception, traceback_string)
|
||||
entry.task_state = FAILURE
|
||||
entry.save_now()
|
||||
raise
|
||||
|
||||
# log and exit, returning task_progress info as task result:
|
||||
fmt = 'Finishing task "{task_id}": course "{course_id}" problem "{state_key}": final: {progress}'
|
||||
TASK_LOG.info(fmt.format(task_id=task_id, course_id=course_id, state_key=module_state_key, progress=task_progress))
|
||||
return task_progress
|
||||
|
||||
|
||||
def _get_task_id_from_xmodule_args(xmodule_instance_args):
|
||||
"""Gets task_id from `xmodule_instance_args` dict, or returns default value if missing."""
|
||||
return xmodule_instance_args.get('task_id', UNKNOWN_TASK_ID) if xmodule_instance_args is not None else UNKNOWN_TASK_ID
|
||||
|
||||
|
||||
def _get_module_instance_for_task(course_id, student, module_descriptor, xmodule_instance_args=None,
|
||||
grade_bucket_type=None):
|
||||
"""
|
||||
Fetches a StudentModule instance for a given `course_id`, `student` object, and `module_descriptor`.
|
||||
|
||||
`xmodule_instance_args` is used to provide information for creating a track function and an XQueue callback.
|
||||
These are passed, along with `grade_bucket_type`, to get_module_for_descriptor_internal, which sidesteps
|
||||
the need for a Request object when instantiating an xmodule instance.
|
||||
"""
|
||||
# reconstitute the problem's corresponding XModule:
|
||||
model_data_cache = ModelDataCache.cache_for_descriptor_descendents(course_id, student, module_descriptor)
|
||||
|
||||
# get request-related tracking information from args passthrough, and supplement with task-specific
|
||||
# information:
|
||||
request_info = xmodule_instance_args.get('request_info', {}) if xmodule_instance_args is not None else {}
|
||||
task_info = {"student": student.username, "task_id": _get_task_id_from_xmodule_args(xmodule_instance_args)}
|
||||
|
||||
def make_track_function():
|
||||
'''
|
||||
Make a tracking function that logs what happened.
|
||||
|
||||
For insertion into ModuleSystem, and used by CapaModule, which will
|
||||
provide the event_type (as string) and event (as dict) as arguments.
|
||||
The request_info and task_info (and page) are provided here.
|
||||
'''
|
||||
return lambda event_type, event: task_track(request_info, task_info, event_type, event, page='x_module_task')
|
||||
|
||||
xqueue_callback_url_prefix = xmodule_instance_args.get('xqueue_callback_url_prefix', '') \
|
||||
if xmodule_instance_args is not None else ''
|
||||
|
||||
return get_module_for_descriptor_internal(student, module_descriptor, model_data_cache, course_id,
|
||||
make_track_function(), xqueue_callback_url_prefix,
|
||||
grade_bucket_type=grade_bucket_type)
|
||||
|
||||
|
||||
@transaction.autocommit
|
||||
def rescore_problem_module_state(module_descriptor, student_module, xmodule_instance_args=None):
|
||||
'''
|
||||
Takes an XModule descriptor and a corresponding StudentModule object, and
|
||||
performs rescoring on the student's problem submission.
|
||||
|
||||
Throws exceptions if the rescoring is fatal and should be aborted if in a loop.
|
||||
In particular, raises UpdateProblemModuleStateError if module fails to instantiate,
|
||||
or if the module doesn't support rescoring.
|
||||
|
||||
Returns True if problem was successfully rescored for the given student, and False
|
||||
if problem encountered some kind of error in rescoring.
|
||||
'''
|
||||
# unpack the StudentModule:
|
||||
course_id = student_module.course_id
|
||||
student = student_module.student
|
||||
module_state_key = student_module.module_state_key
|
||||
instance = _get_module_instance_for_task(course_id, student, module_descriptor, xmodule_instance_args, grade_bucket_type='rescore')
|
||||
|
||||
if instance is None:
|
||||
# Either permissions just changed, or someone is trying to be clever
|
||||
# and load something they shouldn't have access to.
|
||||
msg = "No module {loc} for student {student}--access denied?".format(loc=module_state_key,
|
||||
student=student)
|
||||
TASK_LOG.debug(msg)
|
||||
raise UpdateProblemModuleStateError(msg)
|
||||
|
||||
if not hasattr(instance, 'rescore_problem'):
|
||||
# This should also not happen, since it should be already checked in the caller,
|
||||
# but check here to be sure.
|
||||
msg = "Specified problem does not support rescoring."
|
||||
raise UpdateProblemModuleStateError(msg)
|
||||
|
||||
result = instance.rescore_problem()
|
||||
if 'success' not in result:
|
||||
# don't consider these fatal, but false means that the individual call didn't complete:
|
||||
TASK_LOG.warning(u"error processing rescore call for course {course}, problem {loc} and student {student}: "
|
||||
"unexpected response {msg}".format(msg=result, course=course_id, loc=module_state_key, student=student))
|
||||
return False
|
||||
elif result['success'] not in ['correct', 'incorrect']:
|
||||
TASK_LOG.warning(u"error processing rescore call for course {course}, problem {loc} and student {student}: "
|
||||
"{msg}".format(msg=result['success'], course=course_id, loc=module_state_key, student=student))
|
||||
return False
|
||||
else:
|
||||
TASK_LOG.debug(u"successfully processed rescore call for course {course}, problem {loc} and student {student}: "
|
||||
"{msg}".format(msg=result['success'], course=course_id, loc=module_state_key, student=student))
|
||||
return True
|
||||
|
||||
|
||||
@transaction.autocommit
|
||||
def reset_attempts_module_state(_module_descriptor, student_module, xmodule_instance_args=None):
|
||||
"""
|
||||
Resets problem attempts to zero for specified `student_module`.
|
||||
|
||||
Always returns true, indicating success, if it doesn't raise an exception due to database error.
|
||||
"""
|
||||
problem_state = json.loads(student_module.state) if student_module.state else {}
|
||||
if 'attempts' in problem_state:
|
||||
old_number_of_attempts = problem_state["attempts"]
|
||||
if old_number_of_attempts > 0:
|
||||
problem_state["attempts"] = 0
|
||||
# convert back to json and save
|
||||
student_module.state = json.dumps(problem_state)
|
||||
student_module.save()
|
||||
# get request-related tracking information from args passthrough,
|
||||
# and supplement with task-specific information:
|
||||
request_info = xmodule_instance_args.get('request_info', {}) if xmodule_instance_args is not None else {}
|
||||
task_info = {"student": student_module.student.username, "task_id": _get_task_id_from_xmodule_args(xmodule_instance_args)}
|
||||
event_info = {"old_attempts": old_number_of_attempts, "new_attempts": 0}
|
||||
task_track(request_info, task_info, 'problem_reset_attempts', event_info, page='x_module_task')
|
||||
|
||||
# consider the reset to be successful, even if no update was performed. (It's just "optimized".)
|
||||
return True
|
||||
|
||||
|
||||
@transaction.autocommit
|
||||
def delete_problem_module_state(_module_descriptor, student_module, xmodule_instance_args=None):
|
||||
"""
|
||||
Delete the StudentModule entry.
|
||||
|
||||
Always returns true, indicating success, if it doesn't raise an exception due to database error.
|
||||
"""
|
||||
student_module.delete()
|
||||
# get request-related tracking information from args passthrough,
|
||||
# and supplement with task-specific information:
|
||||
request_info = xmodule_instance_args.get('request_info', {}) if xmodule_instance_args is not None else {}
|
||||
task_info = {"student": student_module.student.username, "task_id": _get_task_id_from_xmodule_args(xmodule_instance_args)}
|
||||
task_track(request_info, task_info, 'problem_delete_state', {}, page='x_module_task')
|
||||
return True
|
||||
0
lms/djangoapps/instructor_task/tests/__init__.py
Normal file
0
lms/djangoapps/instructor_task/tests/__init__.py
Normal file
19
lms/djangoapps/instructor_task/tests/factories.py
Normal file
19
lms/djangoapps/instructor_task/tests/factories.py
Normal file
@@ -0,0 +1,19 @@
|
||||
import json
|
||||
|
||||
from factory import DjangoModelFactory, SubFactory
|
||||
from student.tests.factories import UserFactory as StudentUserFactory
|
||||
from instructor_task.models import InstructorTask
|
||||
from celery.states import PENDING
|
||||
|
||||
|
||||
class InstructorTaskFactory(DjangoModelFactory):
|
||||
FACTORY_FOR = InstructorTask
|
||||
|
||||
task_type = 'rescore_problem'
|
||||
course_id = "MITx/999/Robot_Super_Course"
|
||||
task_input = json.dumps({})
|
||||
task_key = None
|
||||
task_id = None
|
||||
task_state = PENDING
|
||||
task_output = None
|
||||
requester = SubFactory(StudentUserFactory)
|
||||
138
lms/djangoapps/instructor_task/tests/test_api.py
Normal file
138
lms/djangoapps/instructor_task/tests/test_api.py
Normal file
@@ -0,0 +1,138 @@
|
||||
"""
|
||||
Test for LMS instructor background task queue management
|
||||
"""
|
||||
|
||||
from xmodule.modulestore.exceptions import ItemNotFoundError
|
||||
|
||||
from courseware.tests.factories import UserFactory
|
||||
|
||||
from instructor_task.api import (get_running_instructor_tasks,
|
||||
get_instructor_task_history,
|
||||
submit_rescore_problem_for_all_students,
|
||||
submit_rescore_problem_for_student,
|
||||
submit_reset_problem_attempts_for_all_students,
|
||||
submit_delete_problem_state_for_all_students)
|
||||
|
||||
from instructor_task.api_helper import AlreadyRunningError
|
||||
from instructor_task.models import InstructorTask, PROGRESS
|
||||
from instructor_task.tests.test_base import (InstructorTaskTestCase,
|
||||
InstructorTaskModuleTestCase,
|
||||
TEST_COURSE_ID)
|
||||
|
||||
|
||||
class InstructorTaskReportTest(InstructorTaskTestCase):
|
||||
"""
|
||||
Tests API and view methods that involve the reporting of status for background tasks.
|
||||
"""
|
||||
|
||||
def test_get_running_instructor_tasks(self):
|
||||
# when fetching running tasks, we get all running tasks, and only running tasks
|
||||
for _ in range(1, 5):
|
||||
self._create_failure_entry()
|
||||
self._create_success_entry()
|
||||
progress_task_ids = [self._create_progress_entry().task_id for _ in range(1, 5)]
|
||||
task_ids = [instructor_task.task_id for instructor_task in get_running_instructor_tasks(TEST_COURSE_ID)]
|
||||
self.assertEquals(set(task_ids), set(progress_task_ids))
|
||||
|
||||
def test_get_instructor_task_history(self):
|
||||
# when fetching historical tasks, we get all tasks, including running tasks
|
||||
expected_ids = []
|
||||
for _ in range(1, 5):
|
||||
expected_ids.append(self._create_failure_entry().task_id)
|
||||
expected_ids.append(self._create_success_entry().task_id)
|
||||
expected_ids.append(self._create_progress_entry().task_id)
|
||||
task_ids = [instructor_task.task_id for instructor_task
|
||||
in get_instructor_task_history(TEST_COURSE_ID, self.problem_url)]
|
||||
self.assertEquals(set(task_ids), set(expected_ids))
|
||||
|
||||
|
||||
class InstructorTaskSubmitTest(InstructorTaskModuleTestCase):
|
||||
"""Tests API methods that involve the submission of background tasks."""
|
||||
|
||||
def setUp(self):
|
||||
self.initialize_course()
|
||||
self.student = UserFactory.create(username="student", email="student@edx.org")
|
||||
self.instructor = UserFactory.create(username="instructor", email="instructor@edx.org")
|
||||
|
||||
def test_submit_nonexistent_modules(self):
|
||||
# confirm that a rescore of a non-existent module returns an exception
|
||||
problem_url = InstructorTaskModuleTestCase.problem_location("NonexistentProblem")
|
||||
course_id = self.course.id
|
||||
request = None
|
||||
with self.assertRaises(ItemNotFoundError):
|
||||
submit_rescore_problem_for_student(request, course_id, problem_url, self.student)
|
||||
with self.assertRaises(ItemNotFoundError):
|
||||
submit_rescore_problem_for_all_students(request, course_id, problem_url)
|
||||
with self.assertRaises(ItemNotFoundError):
|
||||
submit_reset_problem_attempts_for_all_students(request, course_id, problem_url)
|
||||
with self.assertRaises(ItemNotFoundError):
|
||||
submit_delete_problem_state_for_all_students(request, course_id, problem_url)
|
||||
|
||||
def test_submit_nonrescorable_modules(self):
|
||||
# confirm that a rescore of an existent but unscorable module returns an exception
|
||||
# (Note that it is easier to test a scoreable but non-rescorable module in test_tasks,
|
||||
# where we are creating real modules.)
|
||||
problem_url = self.problem_section.location.url()
|
||||
course_id = self.course.id
|
||||
request = None
|
||||
with self.assertRaises(NotImplementedError):
|
||||
submit_rescore_problem_for_student(request, course_id, problem_url, self.student)
|
||||
with self.assertRaises(NotImplementedError):
|
||||
submit_rescore_problem_for_all_students(request, course_id, problem_url)
|
||||
|
||||
def _test_submit_with_long_url(self, task_function, student=None):
|
||||
problem_url_name = 'x' * 255
|
||||
self.define_option_problem(problem_url_name)
|
||||
location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
|
||||
with self.assertRaises(ValueError):
|
||||
if student is not None:
|
||||
task_function(self.create_task_request(self.instructor), self.course.id, location, student)
|
||||
else:
|
||||
task_function(self.create_task_request(self.instructor), self.course.id, location)
|
||||
|
||||
def test_submit_rescore_all_with_long_url(self):
|
||||
self._test_submit_with_long_url(submit_rescore_problem_for_all_students)
|
||||
|
||||
def test_submit_rescore_student_with_long_url(self):
|
||||
self._test_submit_with_long_url(submit_rescore_problem_for_student, self.student)
|
||||
|
||||
def test_submit_reset_all_with_long_url(self):
|
||||
self._test_submit_with_long_url(submit_reset_problem_attempts_for_all_students)
|
||||
|
||||
def test_submit_delete_all_with_long_url(self):
|
||||
self._test_submit_with_long_url(submit_delete_problem_state_for_all_students)
|
||||
|
||||
def _test_submit_task(self, task_function, student=None):
|
||||
# tests submit, and then tests a second identical submission.
|
||||
problem_url_name = 'H1P1'
|
||||
self.define_option_problem(problem_url_name)
|
||||
location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
|
||||
if student is not None:
|
||||
instructor_task = task_function(self.create_task_request(self.instructor),
|
||||
self.course.id, location, student)
|
||||
else:
|
||||
instructor_task = task_function(self.create_task_request(self.instructor),
|
||||
self.course.id, location)
|
||||
|
||||
# test resubmitting, by updating the existing record:
|
||||
instructor_task = InstructorTask.objects.get(id=instructor_task.id)
|
||||
instructor_task.task_state = PROGRESS
|
||||
instructor_task.save()
|
||||
|
||||
with self.assertRaises(AlreadyRunningError):
|
||||
if student is not None:
|
||||
task_function(self.create_task_request(self.instructor), self.course.id, location, student)
|
||||
else:
|
||||
task_function(self.create_task_request(self.instructor), self.course.id, location)
|
||||
|
||||
def test_submit_rescore_all(self):
|
||||
self._test_submit_task(submit_rescore_problem_for_all_students)
|
||||
|
||||
def test_submit_rescore_student(self):
|
||||
self._test_submit_task(submit_rescore_problem_for_student, self.student)
|
||||
|
||||
def test_submit_reset_all(self):
|
||||
self._test_submit_task(submit_reset_problem_attempts_for_all_students)
|
||||
|
||||
def test_submit_delete_all(self):
|
||||
self._test_submit_task(submit_delete_problem_state_for_all_students)
|
||||
211
lms/djangoapps/instructor_task/tests/test_base.py
Normal file
211
lms/djangoapps/instructor_task/tests/test_base.py
Normal file
@@ -0,0 +1,211 @@
|
||||
"""
|
||||
Base test classes for LMS instructor-initiated background tasks
|
||||
|
||||
"""
|
||||
import json
|
||||
from uuid import uuid4
|
||||
from mock import Mock
|
||||
|
||||
from celery.states import SUCCESS, FAILURE
|
||||
|
||||
from django.test.testcases import TestCase
|
||||
from django.contrib.auth.models import User
|
||||
from django.test.utils import override_settings
|
||||
|
||||
from capa.tests.response_xml_factory import OptionResponseXMLFactory
|
||||
from xmodule.modulestore.django import modulestore
|
||||
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
|
||||
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
|
||||
|
||||
from student.tests.factories import CourseEnrollmentFactory, UserFactory
|
||||
from courseware.model_data import StudentModule
|
||||
from courseware.tests.tests import LoginEnrollmentTestCase, TEST_DATA_MONGO_MODULESTORE
|
||||
|
||||
from instructor_task.api_helper import encode_problem_and_student_input
|
||||
from instructor_task.models import PROGRESS, QUEUING
|
||||
from instructor_task.tests.factories import InstructorTaskFactory
|
||||
from instructor_task.views import instructor_task_status
|
||||
|
||||
|
||||
TEST_COURSE_ORG = 'edx'
|
||||
TEST_COURSE_NAME = 'Test Course'
|
||||
TEST_COURSE_NUMBER = '1.23x'
|
||||
TEST_SECTION_NAME = "Problem"
|
||||
TEST_COURSE_ID = 'edx/1.23x/test_course'
|
||||
|
||||
TEST_FAILURE_MESSAGE = 'task failed horribly'
|
||||
TEST_FAILURE_EXCEPTION = 'RandomCauseError'
|
||||
|
||||
OPTION_1 = 'Option 1'
|
||||
OPTION_2 = 'Option 2'
|
||||
|
||||
|
||||
class InstructorTaskTestCase(TestCase):
|
||||
"""
|
||||
Tests API and view methods that involve the reporting of status for background tasks.
|
||||
"""
|
||||
def setUp(self):
|
||||
self.student = UserFactory.create(username="student", email="student@edx.org")
|
||||
self.instructor = UserFactory.create(username="instructor", email="instructor@edx.org")
|
||||
self.problem_url = InstructorTaskTestCase.problem_location("test_urlname")
|
||||
|
||||
@staticmethod
|
||||
def problem_location(problem_url_name):
|
||||
"""
|
||||
Create an internal location for a test problem.
|
||||
"""
|
||||
return "i4x://{org}/{number}/problem/{problem_url_name}".format(org='edx',
|
||||
number='1.23x',
|
||||
problem_url_name=problem_url_name)
|
||||
|
||||
def _create_entry(self, task_state=QUEUING, task_output=None, student=None):
|
||||
"""Creates a InstructorTask entry for testing."""
|
||||
task_id = str(uuid4())
|
||||
progress_json = json.dumps(task_output) if task_output is not None else None
|
||||
task_input, task_key = encode_problem_and_student_input(self.problem_url, student)
|
||||
|
||||
instructor_task = InstructorTaskFactory.create(course_id=TEST_COURSE_ID,
|
||||
requester=self.instructor,
|
||||
task_input=json.dumps(task_input),
|
||||
task_key=task_key,
|
||||
task_id=task_id,
|
||||
task_state=task_state,
|
||||
task_output=progress_json)
|
||||
return instructor_task
|
||||
|
||||
def _create_failure_entry(self):
|
||||
"""Creates a InstructorTask entry representing a failed task."""
|
||||
# view task entry for task failure
|
||||
progress = {'message': TEST_FAILURE_MESSAGE,
|
||||
'exception': TEST_FAILURE_EXCEPTION,
|
||||
}
|
||||
return self._create_entry(task_state=FAILURE, task_output=progress)
|
||||
|
||||
def _create_success_entry(self, student=None):
|
||||
"""Creates a InstructorTask entry representing a successful task."""
|
||||
return self._create_progress_entry(student, task_state=SUCCESS)
|
||||
|
||||
def _create_progress_entry(self, student=None, task_state=PROGRESS):
|
||||
"""Creates a InstructorTask entry representing a task in progress."""
|
||||
progress = {'attempted': 3,
|
||||
'updated': 2,
|
||||
'total': 5,
|
||||
'action_name': 'rescored',
|
||||
}
|
||||
return self._create_entry(task_state=task_state, task_output=progress, student=student)
|
||||
|
||||
|
||||
@override_settings(MODULESTORE=TEST_DATA_MONGO_MODULESTORE)
|
||||
class InstructorTaskModuleTestCase(LoginEnrollmentTestCase, ModuleStoreTestCase):
|
||||
"""
|
||||
Base test class for InstructorTask-related tests that require
|
||||
the setup of a course and problem in order to access StudentModule state.
|
||||
"""
|
||||
course = None
|
||||
current_user = None
|
||||
|
||||
def initialize_course(self):
|
||||
"""Create a course in the store, with a chapter and section."""
|
||||
self.module_store = modulestore()
|
||||
|
||||
# Create the course
|
||||
self.course = CourseFactory.create(org=TEST_COURSE_ORG,
|
||||
number=TEST_COURSE_NUMBER,
|
||||
display_name=TEST_COURSE_NAME)
|
||||
|
||||
# Add a chapter to the course
|
||||
chapter = ItemFactory.create(parent_location=self.course.location,
|
||||
display_name=TEST_SECTION_NAME)
|
||||
|
||||
# add a sequence to the course to which the problems can be added
|
||||
self.problem_section = ItemFactory.create(parent_location=chapter.location,
|
||||
template='i4x://edx/templates/sequential/Empty',
|
||||
display_name=TEST_SECTION_NAME)
|
||||
|
||||
@staticmethod
|
||||
def get_user_email(username):
|
||||
"""Generate email address based on username"""
|
||||
return '{0}@test.com'.format(username)
|
||||
|
||||
def login_username(self, username):
|
||||
"""Login the user, given the `username`."""
|
||||
if self.current_user != username:
|
||||
self.login(InstructorTaskModuleTestCase.get_user_email(username), "test")
|
||||
self.current_user = username
|
||||
|
||||
def _create_user(self, username, is_staff=False):
|
||||
"""Creates a user and enrolls them in the test course."""
|
||||
email = InstructorTaskModuleTestCase.get_user_email(username)
|
||||
thisuser = UserFactory.create(username=username, email=email, is_staff=is_staff)
|
||||
CourseEnrollmentFactory.create(user=thisuser, course_id=self.course.id)
|
||||
return thisuser
|
||||
|
||||
def create_instructor(self, username):
|
||||
"""Creates an instructor for the test course."""
|
||||
return self._create_user(username, is_staff=True)
|
||||
|
||||
def create_student(self, username):
|
||||
"""Creates a student for the test course."""
|
||||
return self._create_user(username, is_staff=False)
|
||||
|
||||
@staticmethod
|
||||
def problem_location(problem_url_name):
|
||||
"""
|
||||
Create an internal location for a test problem.
|
||||
"""
|
||||
if "i4x:" in problem_url_name:
|
||||
return problem_url_name
|
||||
else:
|
||||
return "i4x://{org}/{number}/problem/{problem_url_name}".format(org=TEST_COURSE_ORG,
|
||||
number=TEST_COURSE_NUMBER,
|
||||
problem_url_name=problem_url_name)
|
||||
|
||||
def define_option_problem(self, problem_url_name):
|
||||
"""Create the problem definition so the answer is Option 1"""
|
||||
factory = OptionResponseXMLFactory()
|
||||
factory_args = {'question_text': 'The correct answer is {0}'.format(OPTION_1),
|
||||
'options': [OPTION_1, OPTION_2],
|
||||
'correct_option': OPTION_1,
|
||||
'num_responses': 2}
|
||||
problem_xml = factory.build_xml(**factory_args)
|
||||
ItemFactory.create(parent_location=self.problem_section.location,
|
||||
template="i4x://edx/templates/problem/Blank_Common_Problem",
|
||||
display_name=str(problem_url_name),
|
||||
data=problem_xml)
|
||||
|
||||
def redefine_option_problem(self, problem_url_name):
|
||||
"""Change the problem definition so the answer is Option 2"""
|
||||
factory = OptionResponseXMLFactory()
|
||||
factory_args = {'question_text': 'The correct answer is {0}'.format(OPTION_2),
|
||||
'options': [OPTION_1, OPTION_2],
|
||||
'correct_option': OPTION_2,
|
||||
'num_responses': 2}
|
||||
problem_xml = factory.build_xml(**factory_args)
|
||||
location = InstructorTaskTestCase.problem_location(problem_url_name)
|
||||
self.module_store.update_item(location, problem_xml)
|
||||
|
||||
def get_student_module(self, username, descriptor):
|
||||
"""Get StudentModule object for test course, given the `username` and the problem's `descriptor`."""
|
||||
return StudentModule.objects.get(course_id=self.course.id,
|
||||
student=User.objects.get(username=username),
|
||||
module_type=descriptor.location.category,
|
||||
module_state_key=descriptor.location.url(),
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get_task_status(task_id):
|
||||
"""Use api method to fetch task status, using mock request."""
|
||||
mock_request = Mock()
|
||||
mock_request.REQUEST = {'task_id': task_id}
|
||||
response = instructor_task_status(mock_request)
|
||||
status = json.loads(response.content)
|
||||
return status
|
||||
|
||||
def create_task_request(self, requester_username):
|
||||
"""Generate request that can be used for submitting tasks"""
|
||||
request = Mock()
|
||||
request.user = User.objects.get(username=requester_username)
|
||||
request.get_host = Mock(return_value="testhost")
|
||||
request.META = {'REMOTE_ADDR': '0:0:0:0', 'SERVER_NAME': 'testhost'}
|
||||
request.is_secure = Mock(return_value=False)
|
||||
return request
|
||||
475
lms/djangoapps/instructor_task/tests/test_integration.py
Normal file
475
lms/djangoapps/instructor_task/tests/test_integration.py
Normal file
@@ -0,0 +1,475 @@
|
||||
"""
|
||||
Integration Tests for LMS instructor-initiated background tasks
|
||||
|
||||
Runs tasks on answers to course problems to validate that code
|
||||
paths actually work.
|
||||
|
||||
"""
|
||||
import logging
|
||||
import json
|
||||
from mock import patch
|
||||
import textwrap
|
||||
|
||||
from celery.states import SUCCESS, FAILURE
|
||||
from django.contrib.auth.models import User
|
||||
from django.core.urlresolvers import reverse
|
||||
|
||||
from capa.tests.response_xml_factory import (CodeResponseXMLFactory,
|
||||
CustomResponseXMLFactory)
|
||||
from xmodule.modulestore.tests.factories import ItemFactory
|
||||
from xmodule.modulestore.exceptions import ItemNotFoundError
|
||||
|
||||
from courseware.model_data import StudentModule
|
||||
|
||||
from instructor_task.api import (submit_rescore_problem_for_all_students,
|
||||
submit_rescore_problem_for_student,
|
||||
submit_reset_problem_attempts_for_all_students,
|
||||
submit_delete_problem_state_for_all_students)
|
||||
from instructor_task.models import InstructorTask
|
||||
from instructor_task.tests.test_base import (InstructorTaskModuleTestCase, TEST_COURSE_ORG, TEST_COURSE_NUMBER,
|
||||
OPTION_1, OPTION_2)
|
||||
from capa.responsetypes import StudentInputError
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TestIntegrationTask(InstructorTaskModuleTestCase):
|
||||
"""
|
||||
Base class to provide general methods used for "integration" testing of particular tasks.
|
||||
"""
|
||||
|
||||
def submit_student_answer(self, username, problem_url_name, responses):
|
||||
"""
|
||||
Use ajax interface to submit a student answer.
|
||||
|
||||
Assumes the input list of responses has two values.
|
||||
"""
|
||||
def get_input_id(response_id):
|
||||
"""Creates input id using information about the test course and the current problem."""
|
||||
# Note that this is a capa-specific convention. The form is a version of the problem's
|
||||
# URL, modified so that it can be easily stored in html, prepended with "input-" and
|
||||
# appended with a sequence identifier for the particular response the input goes to.
|
||||
return 'input_i4x-{0}-{1}-problem-{2}_{3}'.format(TEST_COURSE_ORG.lower(),
|
||||
TEST_COURSE_NUMBER.replace('.', '_'),
|
||||
problem_url_name, response_id)
|
||||
|
||||
# make sure that the requested user is logged in, so that the ajax call works
|
||||
# on the right problem:
|
||||
self.login_username(username)
|
||||
# make ajax call:
|
||||
modx_url = reverse('modx_dispatch',
|
||||
kwargs={'course_id': self.course.id,
|
||||
'location': InstructorTaskModuleTestCase.problem_location(problem_url_name),
|
||||
'dispatch': 'problem_check', })
|
||||
|
||||
# we assume we have two responses, so assign them the correct identifiers.
|
||||
resp = self.client.post(modx_url, {
|
||||
get_input_id('2_1'): responses[0],
|
||||
get_input_id('3_1'): responses[1],
|
||||
})
|
||||
return resp
|
||||
|
||||
def _assert_task_failure(self, entry_id, task_type, problem_url_name, expected_message):
|
||||
"""Confirm that expected values are stored in InstructorTask on task failure."""
|
||||
instructor_task = InstructorTask.objects.get(id=entry_id)
|
||||
self.assertEqual(instructor_task.task_state, FAILURE)
|
||||
self.assertEqual(instructor_task.requester.username, 'instructor')
|
||||
self.assertEqual(instructor_task.task_type, task_type)
|
||||
task_input = json.loads(instructor_task.task_input)
|
||||
self.assertFalse('student' in task_input)
|
||||
self.assertEqual(task_input['problem_url'], InstructorTaskModuleTestCase.problem_location(problem_url_name))
|
||||
status = json.loads(instructor_task.task_output)
|
||||
self.assertEqual(status['exception'], 'ZeroDivisionError')
|
||||
self.assertEqual(status['message'], expected_message)
|
||||
# check status returned:
|
||||
status = InstructorTaskModuleTestCase.get_task_status(instructor_task.task_id)
|
||||
self.assertEqual(status['message'], expected_message)
|
||||
|
||||
|
||||
class TestRescoringTask(TestIntegrationTask):
|
||||
"""
|
||||
Integration-style tests for rescoring problems in a background task.
|
||||
|
||||
Exercises real problems with a minimum of patching.
|
||||
"""
|
||||
|
||||
def setUp(self):
|
||||
self.initialize_course()
|
||||
self.create_instructor('instructor')
|
||||
self.create_student('u1')
|
||||
self.create_student('u2')
|
||||
self.create_student('u3')
|
||||
self.create_student('u4')
|
||||
self.logout()
|
||||
|
||||
def render_problem(self, username, problem_url_name):
|
||||
"""
|
||||
Use ajax interface to request html for a problem.
|
||||
"""
|
||||
# make sure that the requested user is logged in, so that the ajax call works
|
||||
# on the right problem:
|
||||
self.login_username(username)
|
||||
# make ajax call:
|
||||
modx_url = reverse('modx_dispatch',
|
||||
kwargs={'course_id': self.course.id,
|
||||
'location': InstructorTaskModuleTestCase.problem_location(problem_url_name),
|
||||
'dispatch': 'problem_get', })
|
||||
resp = self.client.post(modx_url, {})
|
||||
return resp
|
||||
|
||||
def check_state(self, username, descriptor, expected_score, expected_max_score, expected_attempts):
|
||||
"""
|
||||
Check that the StudentModule state contains the expected values.
|
||||
|
||||
The student module is found for the test course, given the `username` and problem `descriptor`.
|
||||
|
||||
Values checked include the number of attempts, the score, and the max score for a problem.
|
||||
"""
|
||||
module = self.get_student_module(username, descriptor)
|
||||
self.assertEqual(module.grade, expected_score)
|
||||
self.assertEqual(module.max_grade, expected_max_score)
|
||||
state = json.loads(module.state)
|
||||
attempts = state['attempts']
|
||||
self.assertEqual(attempts, expected_attempts)
|
||||
if attempts > 0:
|
||||
self.assertTrue('correct_map' in state)
|
||||
self.assertTrue('student_answers' in state)
|
||||
self.assertGreater(len(state['correct_map']), 0)
|
||||
self.assertGreater(len(state['student_answers']), 0)
|
||||
|
||||
def submit_rescore_all_student_answers(self, instructor, problem_url_name):
|
||||
"""Submits the particular problem for rescoring"""
|
||||
return submit_rescore_problem_for_all_students(self.create_task_request(instructor), self.course.id,
|
||||
InstructorTaskModuleTestCase.problem_location(problem_url_name))
|
||||
|
||||
def submit_rescore_one_student_answer(self, instructor, problem_url_name, student):
|
||||
"""Submits the particular problem for rescoring for a particular student"""
|
||||
return submit_rescore_problem_for_student(self.create_task_request(instructor), self.course.id,
|
||||
InstructorTaskModuleTestCase.problem_location(problem_url_name),
|
||||
student)
|
||||
|
||||
def test_rescoring_option_problem(self):
|
||||
"""Run rescore scenario on option problem"""
|
||||
# get descriptor:
|
||||
problem_url_name = 'H1P1'
|
||||
self.define_option_problem(problem_url_name)
|
||||
location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
|
||||
descriptor = self.module_store.get_instance(self.course.id, location)
|
||||
|
||||
# first store answers for each of the separate users:
|
||||
self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1])
|
||||
self.submit_student_answer('u2', problem_url_name, [OPTION_1, OPTION_2])
|
||||
self.submit_student_answer('u3', problem_url_name, [OPTION_2, OPTION_1])
|
||||
self.submit_student_answer('u4', problem_url_name, [OPTION_2, OPTION_2])
|
||||
|
||||
self.check_state('u1', descriptor, 2, 2, 1)
|
||||
self.check_state('u2', descriptor, 1, 2, 1)
|
||||
self.check_state('u3', descriptor, 1, 2, 1)
|
||||
self.check_state('u4', descriptor, 0, 2, 1)
|
||||
|
||||
# update the data in the problem definition
|
||||
self.redefine_option_problem(problem_url_name)
|
||||
# confirm that simply rendering the problem again does not result in a change
|
||||
# in the grade:
|
||||
self.render_problem('u1', problem_url_name)
|
||||
self.check_state('u1', descriptor, 2, 2, 1)
|
||||
|
||||
# rescore the problem for only one student -- only that student's grade should change:
|
||||
self.submit_rescore_one_student_answer('instructor', problem_url_name, User.objects.get(username='u1'))
|
||||
self.check_state('u1', descriptor, 0, 2, 1)
|
||||
self.check_state('u2', descriptor, 1, 2, 1)
|
||||
self.check_state('u3', descriptor, 1, 2, 1)
|
||||
self.check_state('u4', descriptor, 0, 2, 1)
|
||||
|
||||
# rescore the problem for all students
|
||||
self.submit_rescore_all_student_answers('instructor', problem_url_name)
|
||||
self.check_state('u1', descriptor, 0, 2, 1)
|
||||
self.check_state('u2', descriptor, 1, 2, 1)
|
||||
self.check_state('u3', descriptor, 1, 2, 1)
|
||||
self.check_state('u4', descriptor, 2, 2, 1)
|
||||
|
||||
def test_rescoring_failure(self):
|
||||
"""Simulate a failure in rescoring a problem"""
|
||||
problem_url_name = 'H1P1'
|
||||
self.define_option_problem(problem_url_name)
|
||||
self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1])
|
||||
|
||||
expected_message = "bad things happened"
|
||||
with patch('capa.capa_problem.LoncapaProblem.rescore_existing_answers') as mock_rescore:
|
||||
mock_rescore.side_effect = ZeroDivisionError(expected_message)
|
||||
instructor_task = self.submit_rescore_all_student_answers('instructor', problem_url_name)
|
||||
self._assert_task_failure(instructor_task.id, 'rescore_problem', problem_url_name, expected_message)
|
||||
|
||||
def test_rescoring_bad_unicode_input(self):
|
||||
"""Generate a real failure in rescoring a problem, with an answer including unicode"""
|
||||
# At one point, the student answers that resulted in StudentInputErrors were being
|
||||
# persisted (even though they were not counted as an attempt). That is not possible
|
||||
# now, so it's harder to generate a test for how such input is handled.
|
||||
problem_url_name = 'H1P1'
|
||||
# set up an option problem -- doesn't matter really what problem it is, but we need
|
||||
# it to have an answer.
|
||||
self.define_option_problem(problem_url_name)
|
||||
self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1])
|
||||
|
||||
# return an input error as if it were a numerical response, with an embedded unicode character:
|
||||
expected_message = u"Could not interpret '2/3\u03a9' as a number"
|
||||
with patch('capa.capa_problem.LoncapaProblem.rescore_existing_answers') as mock_rescore:
|
||||
mock_rescore.side_effect = StudentInputError(expected_message)
|
||||
instructor_task = self.submit_rescore_all_student_answers('instructor', problem_url_name)
|
||||
|
||||
# check instructor_task returned
|
||||
instructor_task = InstructorTask.objects.get(id=instructor_task.id)
|
||||
self.assertEqual(instructor_task.task_state, 'SUCCESS')
|
||||
self.assertEqual(instructor_task.requester.username, 'instructor')
|
||||
self.assertEqual(instructor_task.task_type, 'rescore_problem')
|
||||
task_input = json.loads(instructor_task.task_input)
|
||||
self.assertFalse('student' in task_input)
|
||||
self.assertEqual(task_input['problem_url'], InstructorTaskModuleTestCase.problem_location(problem_url_name))
|
||||
status = json.loads(instructor_task.task_output)
|
||||
self.assertEqual(status['attempted'], 1)
|
||||
self.assertEqual(status['updated'], 0)
|
||||
self.assertEqual(status['total'], 1)
|
||||
|
||||
def define_code_response_problem(self, problem_url_name):
|
||||
"""
|
||||
Define an arbitrary code-response problem.
|
||||
|
||||
We'll end up mocking its evaluation later.
|
||||
"""
|
||||
factory = CodeResponseXMLFactory()
|
||||
grader_payload = json.dumps({"grader": "ps04/grade_square.py"})
|
||||
problem_xml = factory.build_xml(initial_display="def square(x):",
|
||||
answer_display="answer",
|
||||
grader_payload=grader_payload,
|
||||
num_responses=2)
|
||||
ItemFactory.create(parent_location=self.problem_section.location,
|
||||
template="i4x://edx/templates/problem/Blank_Common_Problem",
|
||||
display_name=str(problem_url_name),
|
||||
data=problem_xml)
|
||||
|
||||
def test_rescoring_code_problem(self):
|
||||
"""Run rescore scenario on problem with code submission"""
|
||||
problem_url_name = 'H1P2'
|
||||
self.define_code_response_problem(problem_url_name)
|
||||
# we fully create the CodeResponse problem, but just pretend that we're queuing it:
|
||||
with patch('capa.xqueue_interface.XQueueInterface.send_to_queue') as mock_send_to_queue:
|
||||
mock_send_to_queue.return_value = (0, "Successfully queued")
|
||||
self.submit_student_answer('u1', problem_url_name, ["answer1", "answer2"])
|
||||
|
||||
instructor_task = self.submit_rescore_all_student_answers('instructor', problem_url_name)
|
||||
|
||||
instructor_task = InstructorTask.objects.get(id=instructor_task.id)
|
||||
self.assertEqual(instructor_task.task_state, FAILURE)
|
||||
status = json.loads(instructor_task.task_output)
|
||||
self.assertEqual(status['exception'], 'NotImplementedError')
|
||||
self.assertEqual(status['message'], "Problem's definition does not support rescoring")
|
||||
|
||||
status = InstructorTaskModuleTestCase.get_task_status(instructor_task.task_id)
|
||||
self.assertEqual(status['message'], "Problem's definition does not support rescoring")
|
||||
|
||||
def define_randomized_custom_response_problem(self, problem_url_name, redefine=False):
|
||||
"""
|
||||
Defines a custom response problem that uses a random value to determine correctness.
|
||||
|
||||
Generated answer is also returned as the `msg`, so that the value can be used as a
|
||||
correct answer by a test.
|
||||
|
||||
If the `redefine` flag is set, then change the definition of correctness (from equals
|
||||
to not-equals).
|
||||
"""
|
||||
factory = CustomResponseXMLFactory()
|
||||
script = textwrap.dedent("""
|
||||
def check_func(expect, answer_given):
|
||||
expected = str(random.randint(0, 100))
|
||||
return {'ok': answer_given %s expected, 'msg': expected}
|
||||
""" % ('!=' if redefine else '=='))
|
||||
problem_xml = factory.build_xml(script=script, cfn="check_func", expect="42", num_responses=1)
|
||||
if redefine:
|
||||
self.module_store.update_item(InstructorTaskModuleTestCase.problem_location(problem_url_name), problem_xml)
|
||||
else:
|
||||
# Use "per-student" rerandomization so that check-problem can be called more than once.
|
||||
# Using "always" means we cannot check a problem twice, but we want to call once to get the
|
||||
# correct answer, and call a second time with that answer to confirm it's graded as correct.
|
||||
# Per-student rerandomization will at least generate different seeds for different users, so
|
||||
# we get a little more test coverage.
|
||||
ItemFactory.create(parent_location=self.problem_section.location,
|
||||
template="i4x://edx/templates/problem/Blank_Common_Problem",
|
||||
display_name=str(problem_url_name),
|
||||
data=problem_xml,
|
||||
metadata={"rerandomize": "per_student"})
|
||||
|
||||
def test_rescoring_randomized_problem(self):
|
||||
"""Run rescore scenario on custom problem that uses randomize"""
|
||||
# First define the custom response problem:
|
||||
problem_url_name = 'H1P1'
|
||||
self.define_randomized_custom_response_problem(problem_url_name)
|
||||
location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
|
||||
descriptor = self.module_store.get_instance(self.course.id, location)
|
||||
# run with more than one user
|
||||
userlist = ['u1', 'u2', 'u3', 'u4']
|
||||
for username in userlist:
|
||||
# first render the problem, so that a seed will be created for this user
|
||||
self.render_problem(username, problem_url_name)
|
||||
# submit a bogus answer, in order to get the problem to tell us its real answer
|
||||
dummy_answer = "1000"
|
||||
self.submit_student_answer(username, problem_url_name, [dummy_answer, dummy_answer])
|
||||
# we should have gotten the problem wrong, since we're way out of range:
|
||||
self.check_state(username, descriptor, 0, 1, 1)
|
||||
# dig the correct answer out of the problem's message
|
||||
module = self.get_student_module(username, descriptor)
|
||||
state = json.loads(module.state)
|
||||
correct_map = state['correct_map']
|
||||
log.info("Correct Map: %s", correct_map)
|
||||
# only one response, so pull it out:
|
||||
answer = correct_map.values()[0]['msg']
|
||||
self.submit_student_answer(username, problem_url_name, [answer, answer])
|
||||
# we should now get the problem right, with a second attempt:
|
||||
self.check_state(username, descriptor, 1, 1, 2)
|
||||
|
||||
# redefine the problem (as stored in Mongo) so that the definition of correct changes
|
||||
self.define_randomized_custom_response_problem(problem_url_name, redefine=True)
|
||||
# confirm that simply rendering the problem again does not result in a change
|
||||
# in the grade (or the attempts):
|
||||
self.render_problem('u1', problem_url_name)
|
||||
self.check_state('u1', descriptor, 1, 1, 2)
|
||||
|
||||
# rescore the problem for only one student -- only that student's grade should change
|
||||
# (and none of the attempts):
|
||||
self.submit_rescore_one_student_answer('instructor', problem_url_name, User.objects.get(username='u1'))
|
||||
for username in userlist:
|
||||
self.check_state(username, descriptor, 0 if username == 'u1' else 1, 1, 2)
|
||||
|
||||
# rescore the problem for all students
|
||||
self.submit_rescore_all_student_answers('instructor', problem_url_name)
|
||||
|
||||
# all grades should change to being wrong (with no change in attempts)
|
||||
for username in userlist:
|
||||
self.check_state(username, descriptor, 0, 1, 2)
|
||||
|
||||
|
||||
class TestResetAttemptsTask(TestIntegrationTask):
|
||||
"""
|
||||
Integration-style tests for resetting problem attempts in a background task.
|
||||
|
||||
Exercises real problems with a minimum of patching.
|
||||
"""
|
||||
userlist = ['u1', 'u2', 'u3', 'u4']
|
||||
|
||||
def setUp(self):
|
||||
self.initialize_course()
|
||||
self.create_instructor('instructor')
|
||||
for username in self.userlist:
|
||||
self.create_student(username)
|
||||
self.logout()
|
||||
|
||||
def get_num_attempts(self, username, descriptor):
|
||||
"""returns number of attempts stored for `username` on problem `descriptor` for test course"""
|
||||
module = self.get_student_module(username, descriptor)
|
||||
state = json.loads(module.state)
|
||||
return state['attempts']
|
||||
|
||||
def reset_problem_attempts(self, instructor, problem_url_name):
|
||||
"""Submits the current problem for resetting"""
|
||||
return submit_reset_problem_attempts_for_all_students(self.create_task_request(instructor), self.course.id,
|
||||
InstructorTaskModuleTestCase.problem_location(problem_url_name))
|
||||
|
||||
def test_reset_attempts_on_problem(self):
|
||||
"""Run reset-attempts scenario on option problem"""
|
||||
# get descriptor:
|
||||
problem_url_name = 'H1P1'
|
||||
self.define_option_problem(problem_url_name)
|
||||
location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
|
||||
descriptor = self.module_store.get_instance(self.course.id, location)
|
||||
num_attempts = 3
|
||||
# first store answers for each of the separate users:
|
||||
for _ in range(num_attempts):
|
||||
for username in self.userlist:
|
||||
self.submit_student_answer(username, problem_url_name, [OPTION_1, OPTION_1])
|
||||
|
||||
for username in self.userlist:
|
||||
self.assertEquals(self.get_num_attempts(username, descriptor), num_attempts)
|
||||
|
||||
self.reset_problem_attempts('instructor', problem_url_name)
|
||||
|
||||
for username in self.userlist:
|
||||
self.assertEquals(self.get_num_attempts(username, descriptor), 0)
|
||||
|
||||
def test_reset_failure(self):
|
||||
"""Simulate a failure in resetting attempts on a problem"""
|
||||
problem_url_name = 'H1P1'
|
||||
self.define_option_problem(problem_url_name)
|
||||
self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1])
|
||||
|
||||
expected_message = "bad things happened"
|
||||
with patch('courseware.models.StudentModule.save') as mock_save:
|
||||
mock_save.side_effect = ZeroDivisionError(expected_message)
|
||||
instructor_task = self.reset_problem_attempts('instructor', problem_url_name)
|
||||
self._assert_task_failure(instructor_task.id, 'reset_problem_attempts', problem_url_name, expected_message)
|
||||
|
||||
def test_reset_non_problem(self):
|
||||
"""confirm that a non-problem can still be successfully reset"""
|
||||
problem_url_name = self.problem_section.location.url()
|
||||
instructor_task = self.reset_problem_attempts('instructor', problem_url_name)
|
||||
instructor_task = InstructorTask.objects.get(id=instructor_task.id)
|
||||
self.assertEqual(instructor_task.task_state, SUCCESS)
|
||||
|
||||
|
||||
class TestDeleteProblemTask(TestIntegrationTask):
|
||||
"""
|
||||
Integration-style tests for deleting problem state in a background task.
|
||||
|
||||
Exercises real problems with a minimum of patching.
|
||||
"""
|
||||
userlist = ['u1', 'u2', 'u3', 'u4']
|
||||
|
||||
def setUp(self):
|
||||
self.initialize_course()
|
||||
self.create_instructor('instructor')
|
||||
for username in self.userlist:
|
||||
self.create_student(username)
|
||||
self.logout()
|
||||
|
||||
def delete_problem_state(self, instructor, problem_url_name):
|
||||
"""Submits the current problem for deletion"""
|
||||
return submit_delete_problem_state_for_all_students(self.create_task_request(instructor), self.course.id,
|
||||
InstructorTaskModuleTestCase.problem_location(problem_url_name))
|
||||
|
||||
def test_delete_problem_state(self):
|
||||
"""Run delete-state scenario on option problem"""
|
||||
# get descriptor:
|
||||
problem_url_name = 'H1P1'
|
||||
self.define_option_problem(problem_url_name)
|
||||
location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
|
||||
descriptor = self.module_store.get_instance(self.course.id, location)
|
||||
# first store answers for each of the separate users:
|
||||
for username in self.userlist:
|
||||
self.submit_student_answer(username, problem_url_name, [OPTION_1, OPTION_1])
|
||||
# confirm that state exists:
|
||||
for username in self.userlist:
|
||||
self.assertTrue(self.get_student_module(username, descriptor) is not None)
|
||||
# run delete task:
|
||||
self.delete_problem_state('instructor', problem_url_name)
|
||||
# confirm that no state can be found:
|
||||
for username in self.userlist:
|
||||
with self.assertRaises(StudentModule.DoesNotExist):
|
||||
self.get_student_module(username, descriptor)
|
||||
|
||||
def test_delete_failure(self):
|
||||
"""Simulate a failure in deleting state of a problem"""
|
||||
problem_url_name = 'H1P1'
|
||||
self.define_option_problem(problem_url_name)
|
||||
self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1])
|
||||
|
||||
expected_message = "bad things happened"
|
||||
with patch('courseware.models.StudentModule.delete') as mock_delete:
|
||||
mock_delete.side_effect = ZeroDivisionError(expected_message)
|
||||
instructor_task = self.delete_problem_state('instructor', problem_url_name)
|
||||
self._assert_task_failure(instructor_task.id, 'delete_problem_state', problem_url_name, expected_message)
|
||||
|
||||
def test_delete_non_problem(self):
|
||||
"""confirm that a non-problem can still be successfully deleted"""
|
||||
problem_url_name = self.problem_section.location.url()
|
||||
instructor_task = self.delete_problem_state('instructor', problem_url_name)
|
||||
instructor_task = InstructorTask.objects.get(id=instructor_task.id)
|
||||
self.assertEqual(instructor_task.task_state, SUCCESS)
|
||||
332
lms/djangoapps/instructor_task/tests/test_tasks.py
Normal file
332
lms/djangoapps/instructor_task/tests/test_tasks.py
Normal file
@@ -0,0 +1,332 @@
|
||||
"""
|
||||
Unit tests for LMS instructor-initiated background tasks,
|
||||
|
||||
Runs tasks on answers to course problems to validate that code
|
||||
paths actually work.
|
||||
|
||||
"""
|
||||
import json
|
||||
from uuid import uuid4
|
||||
|
||||
from mock import Mock, patch
|
||||
|
||||
from celery.states import SUCCESS, FAILURE
|
||||
|
||||
from xmodule.modulestore.exceptions import ItemNotFoundError
|
||||
|
||||
from courseware.model_data import StudentModule
|
||||
from courseware.tests.factories import StudentModuleFactory
|
||||
from student.tests.factories import UserFactory
|
||||
|
||||
from instructor_task.models import InstructorTask
|
||||
from instructor_task.tests.test_base import InstructorTaskModuleTestCase, TEST_COURSE_ORG, TEST_COURSE_NUMBER
|
||||
from instructor_task.tests.factories import InstructorTaskFactory
|
||||
from instructor_task.tasks import rescore_problem, reset_problem_attempts, delete_problem_state
|
||||
from instructor_task.tasks_helper import UpdateProblemModuleStateError, update_problem_module_state
|
||||
|
||||
|
||||
PROBLEM_URL_NAME = "test_urlname"
|
||||
|
||||
|
||||
class TestTaskFailure(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class TestInstructorTasks(InstructorTaskModuleTestCase):
|
||||
def setUp(self):
|
||||
super(InstructorTaskModuleTestCase, self).setUp()
|
||||
self.initialize_course()
|
||||
self.instructor = self.create_instructor('instructor')
|
||||
self.problem_url = InstructorTaskModuleTestCase.problem_location(PROBLEM_URL_NAME)
|
||||
|
||||
def _create_input_entry(self, student_ident=None):
|
||||
"""Creates a InstructorTask entry for testing."""
|
||||
task_id = str(uuid4())
|
||||
task_input = {'problem_url': self.problem_url}
|
||||
if student_ident is not None:
|
||||
task_input['student'] = student_ident
|
||||
|
||||
instructor_task = InstructorTaskFactory.create(course_id=self.course.id,
|
||||
requester=self.instructor,
|
||||
task_input=json.dumps(task_input),
|
||||
task_key='dummy value',
|
||||
task_id=task_id)
|
||||
return instructor_task
|
||||
|
||||
def _get_xmodule_instance_args(self):
|
||||
"""
|
||||
Calculate dummy values for parameters needed for instantiating xmodule instances.
|
||||
"""
|
||||
return {'xqueue_callback_url_prefix': 'dummy_value',
|
||||
'request_info': {},
|
||||
}
|
||||
|
||||
def _run_task_with_mock_celery(self, task_function, entry_id, task_id, expected_failure_message=None):
|
||||
self.current_task = Mock()
|
||||
self.current_task.request = Mock()
|
||||
self.current_task.request.id = task_id
|
||||
self.current_task.update_state = Mock()
|
||||
if expected_failure_message is not None:
|
||||
self.current_task.update_state.side_effect = TestTaskFailure(expected_failure_message)
|
||||
with patch('instructor_task.tasks_helper._get_current_task') as mock_get_task:
|
||||
mock_get_task.return_value = self.current_task
|
||||
return task_function(entry_id, self._get_xmodule_instance_args())
|
||||
|
||||
def _test_missing_current_task(self, task_function):
|
||||
# run without (mock) Celery running
|
||||
task_entry = self._create_input_entry()
|
||||
with self.assertRaises(UpdateProblemModuleStateError):
|
||||
task_function(task_entry.id, self._get_xmodule_instance_args())
|
||||
|
||||
def test_rescore_missing_current_task(self):
|
||||
self._test_missing_current_task(rescore_problem)
|
||||
|
||||
def test_reset_missing_current_task(self):
|
||||
self._test_missing_current_task(reset_problem_attempts)
|
||||
|
||||
def test_delete_missing_current_task(self):
|
||||
self._test_missing_current_task(delete_problem_state)
|
||||
|
||||
def _test_undefined_problem(self, task_function):
|
||||
# run with celery, but no problem defined
|
||||
task_entry = self._create_input_entry()
|
||||
with self.assertRaises(ItemNotFoundError):
|
||||
self._run_task_with_mock_celery(task_function, task_entry.id, task_entry.task_id)
|
||||
|
||||
def test_rescore_undefined_problem(self):
|
||||
self._test_undefined_problem(rescore_problem)
|
||||
|
||||
def test_reset_undefined_problem(self):
|
||||
self._test_undefined_problem(reset_problem_attempts)
|
||||
|
||||
def test_delete_undefined_problem(self):
|
||||
self._test_undefined_problem(delete_problem_state)
|
||||
|
||||
def _test_run_with_task(self, task_function, action_name, expected_num_updated):
|
||||
# run with some StudentModules for the problem
|
||||
task_entry = self._create_input_entry()
|
||||
status = self._run_task_with_mock_celery(task_function, task_entry.id, task_entry.task_id)
|
||||
# check return value
|
||||
self.assertEquals(status.get('attempted'), expected_num_updated)
|
||||
self.assertEquals(status.get('updated'), expected_num_updated)
|
||||
self.assertEquals(status.get('total'), expected_num_updated)
|
||||
self.assertEquals(status.get('action_name'), action_name)
|
||||
self.assertGreater('duration_ms', 0)
|
||||
# compare with entry in table:
|
||||
entry = InstructorTask.objects.get(id=task_entry.id)
|
||||
self.assertEquals(json.loads(entry.task_output), status)
|
||||
self.assertEquals(entry.task_state, SUCCESS)
|
||||
|
||||
def _test_run_with_no_state(self, task_function, action_name):
|
||||
# run with no StudentModules for the problem
|
||||
self.define_option_problem(PROBLEM_URL_NAME)
|
||||
self._test_run_with_task(task_function, action_name, 0)
|
||||
|
||||
def test_rescore_with_no_state(self):
|
||||
self._test_run_with_no_state(rescore_problem, 'rescored')
|
||||
|
||||
def test_reset_with_no_state(self):
|
||||
self._test_run_with_no_state(reset_problem_attempts, 'reset')
|
||||
|
||||
def test_delete_with_no_state(self):
|
||||
self._test_run_with_no_state(delete_problem_state, 'deleted')
|
||||
|
||||
def _create_students_with_state(self, num_students, state=None):
|
||||
"""Create students, a problem, and StudentModule objects for testing"""
|
||||
self.define_option_problem(PROBLEM_URL_NAME)
|
||||
students = [
|
||||
UserFactory.create(username='robot%d' % i, email='robot+test+%d@edx.org' % i)
|
||||
for i in xrange(num_students)
|
||||
]
|
||||
for student in students:
|
||||
StudentModuleFactory.create(course_id=self.course.id,
|
||||
module_state_key=self.problem_url,
|
||||
student=student,
|
||||
state=state)
|
||||
return students
|
||||
|
||||
def _assert_num_attempts(self, students, num_attempts):
|
||||
"""Check the number attempts for all students is the same"""
|
||||
for student in students:
|
||||
module = StudentModule.objects.get(course_id=self.course.id,
|
||||
student=student,
|
||||
module_state_key=self.problem_url)
|
||||
state = json.loads(module.state)
|
||||
self.assertEquals(state['attempts'], num_attempts)
|
||||
|
||||
def test_reset_with_some_state(self):
|
||||
initial_attempts = 3
|
||||
input_state = json.dumps({'attempts': initial_attempts})
|
||||
num_students = 10
|
||||
students = self._create_students_with_state(num_students, input_state)
|
||||
# check that entries were set correctly
|
||||
self._assert_num_attempts(students, initial_attempts)
|
||||
# run the task
|
||||
self._test_run_with_task(reset_problem_attempts, 'reset', num_students)
|
||||
# check that entries were reset
|
||||
self._assert_num_attempts(students, 0)
|
||||
|
||||
def test_delete_with_some_state(self):
|
||||
# This will create StudentModule entries -- we don't have to worry about
|
||||
# the state inside them.
|
||||
num_students = 10
|
||||
students = self._create_students_with_state(num_students)
|
||||
# check that entries were created correctly
|
||||
for student in students:
|
||||
StudentModule.objects.get(course_id=self.course.id,
|
||||
student=student,
|
||||
module_state_key=self.problem_url)
|
||||
self._test_run_with_task(delete_problem_state, 'deleted', num_students)
|
||||
# confirm that no state can be found anymore:
|
||||
for student in students:
|
||||
with self.assertRaises(StudentModule.DoesNotExist):
|
||||
StudentModule.objects.get(course_id=self.course.id,
|
||||
student=student,
|
||||
module_state_key=self.problem_url)
|
||||
|
||||
def _test_reset_with_student(self, use_email):
|
||||
# run with some StudentModules for the problem
|
||||
num_students = 10
|
||||
initial_attempts = 3
|
||||
input_state = json.dumps({'attempts': initial_attempts})
|
||||
students = self._create_students_with_state(num_students, input_state)
|
||||
# check that entries were set correctly
|
||||
for student in students:
|
||||
module = StudentModule.objects.get(course_id=self.course.id,
|
||||
student=student,
|
||||
module_state_key=self.problem_url)
|
||||
state = json.loads(module.state)
|
||||
self.assertEquals(state['attempts'], initial_attempts)
|
||||
|
||||
if use_email:
|
||||
student_ident = students[3].email
|
||||
else:
|
||||
student_ident = students[3].username
|
||||
task_entry = self._create_input_entry(student_ident)
|
||||
|
||||
status = self._run_task_with_mock_celery(reset_problem_attempts, task_entry.id, task_entry.task_id)
|
||||
# check return value
|
||||
self.assertEquals(status.get('attempted'), 1)
|
||||
self.assertEquals(status.get('updated'), 1)
|
||||
self.assertEquals(status.get('total'), 1)
|
||||
self.assertEquals(status.get('action_name'), 'reset')
|
||||
self.assertGreater('duration_ms', 0)
|
||||
# compare with entry in table:
|
||||
entry = InstructorTask.objects.get(id=task_entry.id)
|
||||
self.assertEquals(json.loads(entry.task_output), status)
|
||||
self.assertEquals(entry.task_state, SUCCESS)
|
||||
# check that the correct entry was reset
|
||||
for index, student in enumerate(students):
|
||||
module = StudentModule.objects.get(course_id=self.course.id,
|
||||
student=student,
|
||||
module_state_key=self.problem_url)
|
||||
state = json.loads(module.state)
|
||||
if index == 3:
|
||||
self.assertEquals(state['attempts'], 0)
|
||||
else:
|
||||
self.assertEquals(state['attempts'], initial_attempts)
|
||||
|
||||
def test_reset_with_student_username(self):
|
||||
self._test_reset_with_student(False)
|
||||
|
||||
def test_reset_with_student_email(self):
|
||||
self._test_reset_with_student(True)
|
||||
|
||||
def _test_run_with_failure(self, task_function, expected_message):
|
||||
# run with no StudentModules for the problem,
|
||||
# because we will fail before entering the loop.
|
||||
task_entry = self._create_input_entry()
|
||||
self.define_option_problem(PROBLEM_URL_NAME)
|
||||
with self.assertRaises(TestTaskFailure):
|
||||
self._run_task_with_mock_celery(task_function, task_entry.id, task_entry.task_id, expected_message)
|
||||
# compare with entry in table:
|
||||
entry = InstructorTask.objects.get(id=task_entry.id)
|
||||
self.assertEquals(entry.task_state, FAILURE)
|
||||
output = json.loads(entry.task_output)
|
||||
self.assertEquals(output['exception'], 'TestTaskFailure')
|
||||
self.assertEquals(output['message'], expected_message)
|
||||
|
||||
def test_rescore_with_failure(self):
|
||||
self._test_run_with_failure(rescore_problem, 'We expected this to fail')
|
||||
|
||||
def test_reset_with_failure(self):
|
||||
self._test_run_with_failure(reset_problem_attempts, 'We expected this to fail')
|
||||
|
||||
def test_delete_with_failure(self):
|
||||
self._test_run_with_failure(delete_problem_state, 'We expected this to fail')
|
||||
|
||||
def _test_run_with_long_error_msg(self, task_function):
|
||||
# run with an error message that is so long it will require
|
||||
# truncation (as well as the jettisoning of the traceback).
|
||||
task_entry = self._create_input_entry()
|
||||
self.define_option_problem(PROBLEM_URL_NAME)
|
||||
expected_message = "x" * 1500
|
||||
with self.assertRaises(TestTaskFailure):
|
||||
self._run_task_with_mock_celery(task_function, task_entry.id, task_entry.task_id, expected_message)
|
||||
# compare with entry in table:
|
||||
entry = InstructorTask.objects.get(id=task_entry.id)
|
||||
self.assertEquals(entry.task_state, FAILURE)
|
||||
self.assertGreater(1023, len(entry.task_output))
|
||||
output = json.loads(entry.task_output)
|
||||
self.assertEquals(output['exception'], 'TestTaskFailure')
|
||||
self.assertEquals(output['message'], expected_message[:len(output['message']) - 3] + "...")
|
||||
self.assertTrue('traceback' not in output)
|
||||
|
||||
def test_rescore_with_long_error_msg(self):
|
||||
self._test_run_with_long_error_msg(rescore_problem)
|
||||
|
||||
def test_reset_with_long_error_msg(self):
|
||||
self._test_run_with_long_error_msg(reset_problem_attempts)
|
||||
|
||||
def test_delete_with_long_error_msg(self):
|
||||
self._test_run_with_long_error_msg(delete_problem_state)
|
||||
|
||||
def _test_run_with_short_error_msg(self, task_function):
|
||||
# run with an error message that is short enough to fit
|
||||
# in the output, but long enough that the traceback won't.
|
||||
# Confirm that the traceback is truncated.
|
||||
task_entry = self._create_input_entry()
|
||||
self.define_option_problem(PROBLEM_URL_NAME)
|
||||
expected_message = "x" * 900
|
||||
with self.assertRaises(TestTaskFailure):
|
||||
self._run_task_with_mock_celery(task_function, task_entry.id, task_entry.task_id, expected_message)
|
||||
# compare with entry in table:
|
||||
entry = InstructorTask.objects.get(id=task_entry.id)
|
||||
self.assertEquals(entry.task_state, FAILURE)
|
||||
self.assertGreater(1023, len(entry.task_output))
|
||||
output = json.loads(entry.task_output)
|
||||
self.assertEquals(output['exception'], 'TestTaskFailure')
|
||||
self.assertEquals(output['message'], expected_message)
|
||||
self.assertEquals(output['traceback'][-3:], "...")
|
||||
|
||||
def test_rescore_with_short_error_msg(self):
|
||||
self._test_run_with_short_error_msg(rescore_problem)
|
||||
|
||||
def test_reset_with_short_error_msg(self):
|
||||
self._test_run_with_short_error_msg(reset_problem_attempts)
|
||||
|
||||
def test_delete_with_short_error_msg(self):
|
||||
self._test_run_with_short_error_msg(delete_problem_state)
|
||||
|
||||
def test_successful_result_too_long(self):
|
||||
# while we don't expect the existing tasks to generate output that is too
|
||||
# long, we can test the framework will handle such an occurrence.
|
||||
task_entry = self._create_input_entry()
|
||||
self.define_option_problem(PROBLEM_URL_NAME)
|
||||
action_name = 'x' * 1000
|
||||
update_fcn = lambda(_module_descriptor, _student_module, _xmodule_instance_args): True
|
||||
task_function = (lambda entry_id, xmodule_instance_args:
|
||||
update_problem_module_state(entry_id,
|
||||
update_fcn, action_name, filter_fcn=None,
|
||||
xmodule_instance_args=None))
|
||||
|
||||
with self.assertRaises(ValueError):
|
||||
self._run_task_with_mock_celery(task_function, task_entry.id, task_entry.task_id)
|
||||
# compare with entry in table:
|
||||
entry = InstructorTask.objects.get(id=task_entry.id)
|
||||
self.assertEquals(entry.task_state, FAILURE)
|
||||
self.assertGreater(1023, len(entry.task_output))
|
||||
output = json.loads(entry.task_output)
|
||||
self.assertEquals(output['exception'], 'ValueError')
|
||||
self.assertTrue("Length of task output is too long" in output['message'])
|
||||
self.assertTrue('traceback' not in output)
|
||||
266
lms/djangoapps/instructor_task/tests/test_views.py
Normal file
266
lms/djangoapps/instructor_task/tests/test_views.py
Normal file
@@ -0,0 +1,266 @@
|
||||
|
||||
"""
|
||||
Test for LMS instructor background task queue management
|
||||
"""
|
||||
import json
|
||||
from celery.states import SUCCESS, FAILURE, REVOKED, PENDING
|
||||
|
||||
from mock import Mock, patch
|
||||
|
||||
from django.utils.datastructures import MultiValueDict
|
||||
|
||||
from instructor_task.models import PROGRESS
|
||||
from instructor_task.tests.test_base import (InstructorTaskTestCase,
|
||||
TEST_FAILURE_MESSAGE,
|
||||
TEST_FAILURE_EXCEPTION)
|
||||
from instructor_task.views import instructor_task_status, get_task_completion_info
|
||||
|
||||
|
||||
class InstructorTaskReportTest(InstructorTaskTestCase):
|
||||
"""
|
||||
Tests API and view methods that involve the reporting of status for background tasks.
|
||||
"""
|
||||
|
||||
def _get_instructor_task_status(self, task_id):
|
||||
"""Returns status corresponding to task_id via api method."""
|
||||
request = Mock()
|
||||
request.REQUEST = {'task_id': task_id}
|
||||
return instructor_task_status(request)
|
||||
|
||||
def test_instructor_task_status(self):
|
||||
instructor_task = self._create_failure_entry()
|
||||
task_id = instructor_task.task_id
|
||||
request = Mock()
|
||||
request.REQUEST = {'task_id': task_id}
|
||||
response = instructor_task_status(request)
|
||||
output = json.loads(response.content)
|
||||
self.assertEquals(output['task_id'], task_id)
|
||||
|
||||
def test_missing_instructor_task_status(self):
|
||||
task_id = "missing_id"
|
||||
request = Mock()
|
||||
request.REQUEST = {'task_id': task_id}
|
||||
response = instructor_task_status(request)
|
||||
output = json.loads(response.content)
|
||||
self.assertEquals(output, {})
|
||||
|
||||
def test_instructor_task_status_list(self):
|
||||
# Fetch status for existing tasks by arg list, as if called from ajax.
|
||||
# Note that ajax does something funny with the marshalling of
|
||||
# list data, so the key value has "[]" appended to it.
|
||||
task_ids = [(self._create_failure_entry()).task_id for _ in range(1, 5)]
|
||||
request = Mock()
|
||||
request.REQUEST = MultiValueDict({'task_ids[]': task_ids})
|
||||
response = instructor_task_status(request)
|
||||
output = json.loads(response.content)
|
||||
self.assertEquals(len(output), len(task_ids))
|
||||
for task_id in task_ids:
|
||||
self.assertEquals(output[task_id]['task_id'], task_id)
|
||||
|
||||
def test_get_status_from_failure(self):
|
||||
# get status for a task that has already failed
|
||||
instructor_task = self._create_failure_entry()
|
||||
task_id = instructor_task.task_id
|
||||
response = self._get_instructor_task_status(task_id)
|
||||
output = json.loads(response.content)
|
||||
self.assertEquals(output['message'], TEST_FAILURE_MESSAGE)
|
||||
self.assertEquals(output['succeeded'], False)
|
||||
self.assertEquals(output['task_id'], task_id)
|
||||
self.assertEquals(output['task_state'], FAILURE)
|
||||
self.assertFalse(output['in_progress'])
|
||||
expected_progress = {'exception': TEST_FAILURE_EXCEPTION,
|
||||
'message': TEST_FAILURE_MESSAGE}
|
||||
self.assertEquals(output['task_progress'], expected_progress)
|
||||
|
||||
def test_get_status_from_success(self):
|
||||
# get status for a task that has already succeeded
|
||||
instructor_task = self._create_success_entry()
|
||||
task_id = instructor_task.task_id
|
||||
response = self._get_instructor_task_status(task_id)
|
||||
output = json.loads(response.content)
|
||||
self.assertEquals(output['message'], "Problem rescored for 2 of 3 students (out of 5)")
|
||||
self.assertEquals(output['succeeded'], False)
|
||||
self.assertEquals(output['task_id'], task_id)
|
||||
self.assertEquals(output['task_state'], SUCCESS)
|
||||
self.assertFalse(output['in_progress'])
|
||||
expected_progress = {'attempted': 3,
|
||||
'updated': 2,
|
||||
'total': 5,
|
||||
'action_name': 'rescored'}
|
||||
self.assertEquals(output['task_progress'], expected_progress)
|
||||
|
||||
def _test_get_status_from_result(self, task_id, mock_result):
|
||||
"""
|
||||
Provides mock result to caller of instructor_task_status, and returns resulting output.
|
||||
"""
|
||||
with patch('celery.result.AsyncResult.__new__') as mock_result_ctor:
|
||||
mock_result_ctor.return_value = mock_result
|
||||
response = self._get_instructor_task_status(task_id)
|
||||
output = json.loads(response.content)
|
||||
self.assertEquals(output['task_id'], task_id)
|
||||
return output
|
||||
|
||||
def test_get_status_to_pending(self):
|
||||
# get status for a task that hasn't begun to run yet
|
||||
instructor_task = self._create_entry()
|
||||
task_id = instructor_task.task_id
|
||||
mock_result = Mock()
|
||||
mock_result.task_id = task_id
|
||||
mock_result.state = PENDING
|
||||
output = self._test_get_status_from_result(task_id, mock_result)
|
||||
for key in ['message', 'succeeded', 'task_progress']:
|
||||
self.assertTrue(key not in output)
|
||||
self.assertEquals(output['task_state'], 'PENDING')
|
||||
self.assertTrue(output['in_progress'])
|
||||
|
||||
def test_update_progress_to_progress(self):
|
||||
# view task entry for task in progress
|
||||
instructor_task = self._create_progress_entry()
|
||||
task_id = instructor_task.task_id
|
||||
mock_result = Mock()
|
||||
mock_result.task_id = task_id
|
||||
mock_result.state = PROGRESS
|
||||
mock_result.result = {'attempted': 5,
|
||||
'updated': 4,
|
||||
'total': 10,
|
||||
'action_name': 'rescored'}
|
||||
output = self._test_get_status_from_result(task_id, mock_result)
|
||||
self.assertEquals(output['message'], "Progress: rescored 4 of 5 so far (out of 10)")
|
||||
self.assertEquals(output['succeeded'], False)
|
||||
self.assertEquals(output['task_state'], PROGRESS)
|
||||
self.assertTrue(output['in_progress'])
|
||||
self.assertEquals(output['task_progress'], mock_result.result)
|
||||
|
||||
def test_update_progress_to_failure(self):
|
||||
# view task entry for task in progress that later fails
|
||||
instructor_task = self._create_progress_entry()
|
||||
task_id = instructor_task.task_id
|
||||
mock_result = Mock()
|
||||
mock_result.task_id = task_id
|
||||
mock_result.state = FAILURE
|
||||
mock_result.result = NotImplementedError("This task later failed.")
|
||||
mock_result.traceback = "random traceback"
|
||||
output = self._test_get_status_from_result(task_id, mock_result)
|
||||
self.assertEquals(output['message'], "This task later failed.")
|
||||
self.assertEquals(output['succeeded'], False)
|
||||
self.assertEquals(output['task_state'], FAILURE)
|
||||
self.assertFalse(output['in_progress'])
|
||||
expected_progress = {'exception': 'NotImplementedError',
|
||||
'message': "This task later failed.",
|
||||
'traceback': "random traceback"}
|
||||
self.assertEquals(output['task_progress'], expected_progress)
|
||||
|
||||
def test_update_progress_to_revoked(self):
|
||||
# view task entry for task in progress that later fails
|
||||
instructor_task = self._create_progress_entry()
|
||||
task_id = instructor_task.task_id
|
||||
mock_result = Mock()
|
||||
mock_result.task_id = task_id
|
||||
mock_result.state = REVOKED
|
||||
output = self._test_get_status_from_result(task_id, mock_result)
|
||||
self.assertEquals(output['message'], "Task revoked before running")
|
||||
self.assertEquals(output['succeeded'], False)
|
||||
self.assertEquals(output['task_state'], REVOKED)
|
||||
self.assertFalse(output['in_progress'])
|
||||
expected_progress = {'message': "Task revoked before running"}
|
||||
self.assertEquals(output['task_progress'], expected_progress)
|
||||
|
||||
def _get_output_for_task_success(self, attempted, updated, total, student=None):
|
||||
"""returns the task_id and the result returned by instructor_task_status()."""
|
||||
# view task entry for task in progress
|
||||
instructor_task = self._create_progress_entry(student)
|
||||
task_id = instructor_task.task_id
|
||||
mock_result = Mock()
|
||||
mock_result.task_id = task_id
|
||||
mock_result.state = SUCCESS
|
||||
mock_result.result = {'attempted': attempted,
|
||||
'updated': updated,
|
||||
'total': total,
|
||||
'action_name': 'rescored'}
|
||||
output = self._test_get_status_from_result(task_id, mock_result)
|
||||
return output
|
||||
|
||||
def test_update_progress_to_success(self):
|
||||
output = self._get_output_for_task_success(10, 8, 10)
|
||||
self.assertEquals(output['message'], "Problem rescored for 8 of 10 students")
|
||||
self.assertEquals(output['succeeded'], False)
|
||||
self.assertEquals(output['task_state'], SUCCESS)
|
||||
self.assertFalse(output['in_progress'])
|
||||
expected_progress = {'attempted': 10,
|
||||
'updated': 8,
|
||||
'total': 10,
|
||||
'action_name': 'rescored'}
|
||||
self.assertEquals(output['task_progress'], expected_progress)
|
||||
|
||||
def test_success_messages(self):
|
||||
output = self._get_output_for_task_success(0, 0, 10)
|
||||
self.assertEqual(output['message'], "Unable to find any students with submissions to be rescored (out of 10)")
|
||||
self.assertFalse(output['succeeded'])
|
||||
|
||||
output = self._get_output_for_task_success(10, 0, 10)
|
||||
self.assertEqual(output['message'], "Problem failed to be rescored for any of 10 students")
|
||||
self.assertFalse(output['succeeded'])
|
||||
|
||||
output = self._get_output_for_task_success(10, 8, 10)
|
||||
self.assertEqual(output['message'], "Problem rescored for 8 of 10 students")
|
||||
self.assertFalse(output['succeeded'])
|
||||
|
||||
output = self._get_output_for_task_success(9, 8, 10)
|
||||
self.assertEqual(output['message'], "Problem rescored for 8 of 9 students (out of 10)")
|
||||
self.assertFalse(output['succeeded'])
|
||||
|
||||
output = self._get_output_for_task_success(10, 10, 10)
|
||||
self.assertEqual(output['message'], "Problem successfully rescored for 10 students")
|
||||
self.assertTrue(output['succeeded'])
|
||||
|
||||
output = self._get_output_for_task_success(0, 0, 1, student=self.student)
|
||||
self.assertTrue("Unable to find submission to be rescored for student" in output['message'])
|
||||
self.assertFalse(output['succeeded'])
|
||||
|
||||
output = self._get_output_for_task_success(1, 0, 1, student=self.student)
|
||||
self.assertTrue("Problem failed to be rescored for student" in output['message'])
|
||||
self.assertFalse(output['succeeded'])
|
||||
|
||||
output = self._get_output_for_task_success(1, 1, 1, student=self.student)
|
||||
self.assertTrue("Problem successfully rescored for student" in output['message'])
|
||||
self.assertTrue(output['succeeded'])
|
||||
|
||||
def test_get_info_for_queuing_task(self):
|
||||
# get status for a task that is still running:
|
||||
instructor_task = self._create_entry()
|
||||
succeeded, message = get_task_completion_info(instructor_task)
|
||||
self.assertFalse(succeeded)
|
||||
self.assertEquals(message, "No status information available")
|
||||
|
||||
def test_get_info_for_missing_output(self):
|
||||
# check for missing task_output
|
||||
instructor_task = self._create_success_entry()
|
||||
instructor_task.task_output = None
|
||||
succeeded, message = get_task_completion_info(instructor_task)
|
||||
self.assertFalse(succeeded)
|
||||
self.assertEquals(message, "No status information available")
|
||||
|
||||
def test_get_info_for_broken_output(self):
|
||||
# check for non-JSON task_output
|
||||
instructor_task = self._create_success_entry()
|
||||
instructor_task.task_output = "{ bad"
|
||||
succeeded, message = get_task_completion_info(instructor_task)
|
||||
self.assertFalse(succeeded)
|
||||
self.assertEquals(message, "No parsable status information available")
|
||||
|
||||
def test_get_info_for_empty_output(self):
|
||||
# check for JSON task_output with missing keys
|
||||
instructor_task = self._create_success_entry()
|
||||
instructor_task.task_output = "{}"
|
||||
succeeded, message = get_task_completion_info(instructor_task)
|
||||
self.assertFalse(succeeded)
|
||||
self.assertEquals(message, "No progress status information available")
|
||||
|
||||
def test_get_info_for_broken_input(self):
|
||||
# check for non-JSON task_input, but then just ignore it
|
||||
instructor_task = self._create_success_entry()
|
||||
instructor_task.task_input = "{ bad"
|
||||
succeeded, message = get_task_completion_info(instructor_task)
|
||||
self.assertFalse(succeeded)
|
||||
self.assertEquals(message, "Problem rescored for 2 of 3 students (out of 5)")
|
||||
|
||||
172
lms/djangoapps/instructor_task/views.py
Normal file
172
lms/djangoapps/instructor_task/views.py
Normal file
@@ -0,0 +1,172 @@
|
||||
|
||||
import json
|
||||
import logging
|
||||
|
||||
from django.http import HttpResponse
|
||||
|
||||
from celery.states import FAILURE, REVOKED, READY_STATES
|
||||
|
||||
from instructor_task.api_helper import (get_status_from_instructor_task,
|
||||
get_updated_instructor_task)
|
||||
from instructor_task.models import PROGRESS
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# return status for completed tasks and tasks in progress
|
||||
STATES_WITH_STATUS = [state for state in READY_STATES] + [PROGRESS]
|
||||
|
||||
|
||||
def _get_instructor_task_status(task_id):
|
||||
"""
|
||||
Returns status for a specific task.
|
||||
|
||||
Written as an internal method here (rather than as a helper)
|
||||
so that get_task_completion_info() can be called without
|
||||
causing a circular dependency (since it's also called directly).
|
||||
"""
|
||||
instructor_task = get_updated_instructor_task(task_id)
|
||||
status = get_status_from_instructor_task(instructor_task)
|
||||
if instructor_task is not None and instructor_task.task_state in STATES_WITH_STATUS:
|
||||
succeeded, message = get_task_completion_info(instructor_task)
|
||||
status['message'] = message
|
||||
status['succeeded'] = succeeded
|
||||
return status
|
||||
|
||||
|
||||
def instructor_task_status(request):
|
||||
"""
|
||||
View method that returns the status of a course-related task or tasks.
|
||||
|
||||
Status is returned as a JSON-serialized dict, wrapped as the content of a HTTPResponse.
|
||||
|
||||
The task_id can be specified to this view in one of three ways:
|
||||
|
||||
* by making a request containing 'task_id' as a parameter with a single value
|
||||
Returns a dict containing status information for the specified task_id
|
||||
|
||||
* by making a request containing 'task_ids' as a parameter,
|
||||
with a list of task_id values.
|
||||
Returns a dict of dicts, with the task_id as key, and the corresponding
|
||||
dict containing status information for the specified task_id
|
||||
|
||||
Task_id values that are unrecognized are skipped.
|
||||
|
||||
The dict with status information for a task contains the following keys:
|
||||
'message': on complete tasks, status message reporting on final progress,
|
||||
or providing exception message if failed. For tasks in progress,
|
||||
indicates the current progress.
|
||||
'succeeded': on complete tasks or tasks in progress, boolean value indicates if the
|
||||
task outcome was successful: did it achieve what it set out to do.
|
||||
This is in contrast with a successful task_state, which indicates that the
|
||||
task merely completed.
|
||||
'task_id': id assigned by LMS and used by celery.
|
||||
'task_state': state of task as stored in celery's result store.
|
||||
'in_progress': boolean indicating if task is still running.
|
||||
'task_progress': dict containing progress information. This includes:
|
||||
'attempted': number of attempts made
|
||||
'updated': number of attempts that "succeeded"
|
||||
'total': number of possible subtasks to attempt
|
||||
'action_name': user-visible verb to use in status messages. Should be past-tense.
|
||||
'duration_ms': how long the task has (or had) been running.
|
||||
'exception': name of exception class raised in failed tasks.
|
||||
'message': returned for failed and revoked tasks.
|
||||
'traceback': optional, returned if task failed and produced a traceback.
|
||||
|
||||
"""
|
||||
|
||||
output = {}
|
||||
if 'task_id' in request.REQUEST:
|
||||
task_id = request.REQUEST['task_id']
|
||||
output = _get_instructor_task_status(task_id)
|
||||
elif 'task_ids[]' in request.REQUEST:
|
||||
tasks = request.REQUEST.getlist('task_ids[]')
|
||||
for task_id in tasks:
|
||||
task_output = _get_instructor_task_status(task_id)
|
||||
if task_output is not None:
|
||||
output[task_id] = task_output
|
||||
|
||||
return HttpResponse(json.dumps(output, indent=4))
|
||||
|
||||
|
||||
def get_task_completion_info(instructor_task):
|
||||
"""
|
||||
Construct progress message from progress information in InstructorTask entry.
|
||||
|
||||
Returns (boolean, message string) duple, where the boolean indicates
|
||||
whether the task completed without incident. (It is possible for a
|
||||
task to attempt many sub-tasks, such as rescoring many students' problem
|
||||
responses, and while the task runs to completion, some of the students'
|
||||
responses could not be rescored.)
|
||||
|
||||
Used for providing messages to instructor_task_status(), as well as
|
||||
external calls for providing course task submission history information.
|
||||
"""
|
||||
succeeded = False
|
||||
|
||||
if instructor_task.task_state not in STATES_WITH_STATUS:
|
||||
return (succeeded, "No status information available")
|
||||
|
||||
# we're more surprised if there is no output for a completed task, but just warn:
|
||||
if instructor_task.task_output is None:
|
||||
log.warning("No task_output information found for instructor_task {0}".format(instructor_task.task_id))
|
||||
return (succeeded, "No status information available")
|
||||
|
||||
try:
|
||||
task_output = json.loads(instructor_task.task_output)
|
||||
except ValueError:
|
||||
fmt = "No parsable task_output information found for instructor_task {0}: {1}"
|
||||
log.warning(fmt.format(instructor_task.task_id, instructor_task.task_output))
|
||||
return (succeeded, "No parsable status information available")
|
||||
|
||||
if instructor_task.task_state in [FAILURE, REVOKED]:
|
||||
return (succeeded, task_output.get('message', 'No message provided'))
|
||||
|
||||
if any([key not in task_output for key in ['action_name', 'attempted', 'updated', 'total']]):
|
||||
fmt = "Invalid task_output information found for instructor_task {0}: {1}"
|
||||
log.warning(fmt.format(instructor_task.task_id, instructor_task.task_output))
|
||||
return (succeeded, "No progress status information available")
|
||||
|
||||
action_name = task_output['action_name']
|
||||
num_attempted = task_output['attempted']
|
||||
num_updated = task_output['updated']
|
||||
num_total = task_output['total']
|
||||
|
||||
student = None
|
||||
try:
|
||||
task_input = json.loads(instructor_task.task_input)
|
||||
except ValueError:
|
||||
fmt = "No parsable task_input information found for instructor_task {0}: {1}"
|
||||
log.warning(fmt.format(instructor_task.task_id, instructor_task.task_input))
|
||||
else:
|
||||
student = task_input.get('student')
|
||||
|
||||
if instructor_task.task_state == PROGRESS:
|
||||
# special message for providing progress updates:
|
||||
msg_format = "Progress: {action} {updated} of {attempted} so far"
|
||||
elif student is not None:
|
||||
if num_attempted == 0:
|
||||
msg_format = "Unable to find submission to be {action} for student '{student}'"
|
||||
elif num_updated == 0:
|
||||
msg_format = "Problem failed to be {action} for student '{student}'"
|
||||
else:
|
||||
succeeded = True
|
||||
msg_format = "Problem successfully {action} for student '{student}'"
|
||||
elif num_attempted == 0:
|
||||
msg_format = "Unable to find any students with submissions to be {action}"
|
||||
elif num_updated == 0:
|
||||
msg_format = "Problem failed to be {action} for any of {attempted} students"
|
||||
elif num_updated == num_attempted:
|
||||
succeeded = True
|
||||
msg_format = "Problem successfully {action} for {attempted} students"
|
||||
else: # num_updated < num_attempted
|
||||
msg_format = "Problem {action} for {updated} of {attempted} students"
|
||||
|
||||
if student is None and num_attempted != num_total:
|
||||
msg_format += " (out of {total})"
|
||||
|
||||
# Update status in task result object itself:
|
||||
message = msg_format.format(action=action_name, updated=num_updated,
|
||||
attempted=num_attempted, total=num_total,
|
||||
student=student)
|
||||
return (succeeded, message)
|
||||
@@ -122,7 +122,10 @@ MITX_FEATURES = {
|
||||
'USE_CUSTOM_THEME': False,
|
||||
|
||||
# Do autoplay videos for students
|
||||
'AUTOPLAY_VIDEOS': True
|
||||
'AUTOPLAY_VIDEOS': True,
|
||||
|
||||
# Enable instructor dash to submit background tasks
|
||||
'ENABLE_INSTRUCTOR_BACKGROUND_TASKS': True,
|
||||
}
|
||||
|
||||
# Used for A/B testing
|
||||
@@ -691,6 +694,7 @@ INSTALLED_APPS = (
|
||||
'util',
|
||||
'certificates',
|
||||
'instructor',
|
||||
'instructor_task',
|
||||
'open_ended_grading',
|
||||
'psychometrics',
|
||||
'licenses',
|
||||
|
||||
100
lms/static/js/pending_tasks.js
Normal file
100
lms/static/js/pending_tasks.js
Normal file
@@ -0,0 +1,100 @@
|
||||
// Define an InstructorTaskProgress object for updating a table on the instructor
|
||||
// dashboard that shows the current background tasks that are currently running
|
||||
// for the instructor's course. Any tasks that were running when the page is
|
||||
// first displayed are passed in as instructor_tasks, and populate the "Pending Instructor
|
||||
// Task" table. The InstructorTaskProgress is bound to this table, and periodically
|
||||
// polls the LMS to see if any of the tasks has completed. Once a task is complete,
|
||||
// it is not included in any further polling.
|
||||
|
||||
(function() {
|
||||
|
||||
var __bind = function(fn, me){ return function(){ return fn.apply(me, arguments); }; };
|
||||
|
||||
this.InstructorTaskProgress = (function() {
|
||||
|
||||
function InstructorTaskProgress(element) {
|
||||
this.update_progress = __bind(this.update_progress, this);
|
||||
this.get_status = __bind(this.get_status, this);
|
||||
this.element = element;
|
||||
this.entries = $(element).find('.task-progress-entry')
|
||||
if (window.queuePollerID) {
|
||||
window.clearTimeout(window.queuePollerID);
|
||||
}
|
||||
// Hardcode the initial delay before the first refresh to one second:
|
||||
window.queuePollerID = window.setTimeout(this.get_status, 1000);
|
||||
}
|
||||
|
||||
InstructorTaskProgress.prototype.$ = function(selector) {
|
||||
return $(selector, this.element);
|
||||
};
|
||||
|
||||
InstructorTaskProgress.prototype.update_progress = function(response) {
|
||||
var _this = this;
|
||||
// Response should be a dict with an entry for each requested task_id,
|
||||
// with a "task-state" and "in_progress" key and optionally a "message"
|
||||
// and a "task_progress.duration" key.
|
||||
var something_in_progress = false;
|
||||
for (task_id in response) {
|
||||
var task_dict = response[task_id];
|
||||
// find the corresponding entry, and update it:
|
||||
entry = $(_this.element).find('[data-task-id="' + task_id + '"]');
|
||||
entry.find('.task-state').text(task_dict.task_state)
|
||||
var duration_value = (task_dict.task_progress && task_dict.task_progress.duration_ms
|
||||
&& Math.round(task_dict.task_progress.duration_ms/1000)) || 'unknown';
|
||||
entry.find('.task-duration').text(duration_value);
|
||||
var progress_value = task_dict.message || '';
|
||||
entry.find('.task-progress').text(progress_value);
|
||||
// if the task is complete, then change the entry so it won't
|
||||
// be queried again. Otherwise set a flag.
|
||||
if (task_dict.in_progress === true) {
|
||||
something_in_progress = true;
|
||||
} else {
|
||||
entry.data('inProgress', "False")
|
||||
}
|
||||
}
|
||||
|
||||
// if some entries are still incomplete, then repoll:
|
||||
// Hardcode the refresh interval to be every five seconds.
|
||||
// TODO: allow the refresh interval to be set. (And if it is disabled,
|
||||
// then don't set the timeout at all.)
|
||||
if (something_in_progress) {
|
||||
window.queuePollerID = window.setTimeout(_this.get_status, 5000);
|
||||
} else {
|
||||
delete window.queuePollerID;
|
||||
}
|
||||
}
|
||||
|
||||
InstructorTaskProgress.prototype.get_status = function() {
|
||||
var _this = this;
|
||||
var task_ids = [];
|
||||
|
||||
// Construct the array of ids to get status for, by
|
||||
// including the subset of entries that are still in progress.
|
||||
this.entries.each(function(idx, element) {
|
||||
var task_id = $(element).data('taskId');
|
||||
var in_progress = $(element).data('inProgress');
|
||||
if (in_progress="True") {
|
||||
task_ids.push(task_id);
|
||||
}
|
||||
});
|
||||
|
||||
// Make call to get status for these ids.
|
||||
// Note that the keyname here ends up with "[]" being appended
|
||||
// in the POST parameter that shows up on the Django server.
|
||||
// TODO: add error handler.
|
||||
var ajax_url = '/instructor_task_status/';
|
||||
var data = {'task_ids': task_ids };
|
||||
$.post(ajax_url, data).done(this.update_progress);
|
||||
};
|
||||
|
||||
return InstructorTaskProgress;
|
||||
})();
|
||||
|
||||
}).call(this);
|
||||
|
||||
// once the page is rendered, create the progress object
|
||||
var instructorTaskProgress;
|
||||
$(document).ready(function() {
|
||||
instructorTaskProgress = new InstructorTaskProgress($('#task-progress-wrapper'));
|
||||
});
|
||||
|
||||
@@ -9,7 +9,9 @@
|
||||
<script type="text/javascript" src="${static.url('js/vendor/jquery-jvectormap-1.1.1/jquery-jvectormap-1.1.1.min.js')}"></script>
|
||||
<script type="text/javascript" src="${static.url('js/vendor/jquery-jvectormap-1.1.1/jquery-jvectormap-world-mill-en.js')}"></script>
|
||||
<script type="text/javascript" src="${static.url('js/course_groups/cohorts.js')}"></script>
|
||||
|
||||
%if instructor_tasks is not None:
|
||||
<script type="text/javascript" src="${static.url('js/pending_tasks.js')}"></script>>
|
||||
%endif
|
||||
</%block>
|
||||
|
||||
<%include file="/courseware/course_navigation.html" args="active_page='instructor'" />
|
||||
@@ -193,20 +195,78 @@ function goto( mode)
|
||||
</ul>
|
||||
<hr width="40%" style="align:left">
|
||||
|
||||
%endif
|
||||
%if settings.MITX_FEATURES.get('ENABLE_INSTRUCTOR_BACKGROUND_TASKS'):
|
||||
<H2>Course-specific grade adjustment</h2>
|
||||
|
||||
<p>
|
||||
Specify a particular problem in the course here by its url:
|
||||
<input type="text" name="problem_for_all_students" size="60">
|
||||
</p>
|
||||
<p>
|
||||
You may use just the "urlname" if a problem, or "modulename/urlname" if not.
|
||||
(For example, if the location is <tt>i4x://university/course/problem/problemname</tt>,
|
||||
then just provide the <tt>problemname</tt>.
|
||||
If the location is <tt>i4x://university/course/notaproblem/someothername</tt>, then
|
||||
provide <tt>notaproblem/someothername</tt>.)
|
||||
</p>
|
||||
<p>
|
||||
Then select an action:
|
||||
<input type="submit" name="action" value="Reset ALL students' attempts">
|
||||
<input type="submit" name="action" value="Rescore ALL students' problem submissions">
|
||||
</p>
|
||||
<p>
|
||||
<p>These actions run in the background, and status for active tasks will appear in a table below.
|
||||
To see status for all tasks submitted for this problem, click on this button:
|
||||
</p>
|
||||
<p>
|
||||
<input type="submit" name="action" value="Show Background Task History">
|
||||
</p>
|
||||
|
||||
<hr width="40%" style="align:left">
|
||||
%endif
|
||||
|
||||
<H2>Student-specific grade inspection and adjustment</h2>
|
||||
<p>edX email address or their username: </p>
|
||||
<p><input type="text" name="unique_student_identifier"> <input type="submit" name="action" value="Get link to student's progress page"></p>
|
||||
<p>and, if you want to reset the number of attempts for a problem, the urlname of that problem
|
||||
(e.g. if the location is <tt>i4x://university/course/problem/problemname</tt>, then the urlname is <tt>problemname</tt>).</p>
|
||||
<p> <input type="text" name="problem_to_reset" size="60"> <input type="submit" name="action" value="Reset student's attempts"> </p>
|
||||
<p>
|
||||
Specify the edX email address or username of a student here:
|
||||
<input type="text" name="unique_student_identifier">
|
||||
</p>
|
||||
<p>
|
||||
Click this, and a link to student's progress page will appear below:
|
||||
<input type="submit" name="action" value="Get link to student's progress page">
|
||||
</p>
|
||||
<p>
|
||||
Specify a particular problem in the course here by its url:
|
||||
<input type="text" name="problem_for_student" size="60">
|
||||
</p>
|
||||
<p>
|
||||
You may use just the "urlname" if a problem, or "modulename/urlname" if not.
|
||||
(For example, if the location is <tt>i4x://university/course/problem/problemname</tt>,
|
||||
then just provide the <tt>problemname</tt>.
|
||||
If the location is <tt>i4x://university/course/notaproblem/someothername</tt>, then
|
||||
provide <tt>notaproblem/someothername</tt>.)
|
||||
</p>
|
||||
<p>
|
||||
Then select an action:
|
||||
<input type="submit" name="action" value="Reset student's attempts">
|
||||
%if settings.MITX_FEATURES.get('ENABLE_COURSE_BACKGROUND_TASKS'):
|
||||
<input type="submit" name="action" value="Rescore student's problem submission">
|
||||
%endif
|
||||
</p>
|
||||
|
||||
%if instructor_access:
|
||||
<p> You may also delete the entire state of a student for a problem:
|
||||
<input type="submit" name="action" value="Delete student state for problem"> </p>
|
||||
<p>To delete the state of other XBlocks specify modulename/urlname, eg
|
||||
<tt>combinedopenended/Humanities_SA_Peer</tt></p>
|
||||
<p>
|
||||
You may also delete the entire state of a student for the specified module:
|
||||
<input type="submit" name="action" value="Delete student state for module">
|
||||
</p>
|
||||
%endif
|
||||
%if settings.MITX_FEATURES.get('ENABLE_COURSE_BACKGROUND_TASKS'):
|
||||
<p>Rescoring runs in the background, and status for active tasks will appear in a table below.
|
||||
To see status for all tasks submitted for this course and student, click on this button:
|
||||
</p>
|
||||
<p>
|
||||
<input type="submit" name="action" value="Show Background Task History for Student">
|
||||
</p>
|
||||
%endif
|
||||
|
||||
%endif
|
||||
@@ -234,6 +294,7 @@ function goto( mode)
|
||||
|
||||
##-----------------------------------------------------------------------------
|
||||
%if modeflag.get('Admin'):
|
||||
|
||||
%if instructor_access:
|
||||
<hr width="40%" style="align:left">
|
||||
<p>
|
||||
@@ -373,6 +434,7 @@ function goto( mode)
|
||||
%if msg:
|
||||
<p></p><p>${msg}</p>
|
||||
%endif
|
||||
|
||||
##-----------------------------------------------------------------------------
|
||||
|
||||
%if modeflag.get('Analytics'):
|
||||
@@ -559,6 +621,69 @@ function goto( mode)
|
||||
</p>
|
||||
%endif
|
||||
|
||||
## Output tasks in progress
|
||||
|
||||
%if instructor_tasks is not None and len(instructor_tasks) > 0:
|
||||
<hr width="100%">
|
||||
<h2>Pending Instructor Tasks</h2>
|
||||
<div id="task-progress-wrapper">
|
||||
<table class="stat_table">
|
||||
<tr>
|
||||
<th>Task Type</th>
|
||||
<th>Task inputs</th>
|
||||
<th>Task Id</th>
|
||||
<th>Requester</th>
|
||||
<th>Submitted</th>
|
||||
<th>Task State</th>
|
||||
<th>Duration (sec)</th>
|
||||
<th>Task Progress</th>
|
||||
</tr>
|
||||
%for tasknum, instructor_task in enumerate(instructor_tasks):
|
||||
<tr id="task-progress-entry-${tasknum}" class="task-progress-entry"
|
||||
data-task-id="${instructor_task.task_id}"
|
||||
data-in-progress="true">
|
||||
<td>${instructor_task.task_type}</td>
|
||||
<td>${instructor_task.task_input}</td>
|
||||
<td class="task-id">${instructor_task.task_id}</td>
|
||||
<td>${instructor_task.requester}</td>
|
||||
<td>${instructor_task.created}</td>
|
||||
<td class="task-state">${instructor_task.task_state}</td>
|
||||
<td class="task-duration">unknown</td>
|
||||
<td class="task-progress">unknown</td>
|
||||
</tr>
|
||||
%endfor
|
||||
</table>
|
||||
</div>
|
||||
<br/>
|
||||
|
||||
%endif
|
||||
|
||||
##-----------------------------------------------------------------------------
|
||||
|
||||
%if course_stats and modeflag.get('Psychometrics') is None:
|
||||
|
||||
<br/>
|
||||
<br/>
|
||||
<p>
|
||||
<hr width="100%">
|
||||
<h2>${course_stats['title'] | h}</h2>
|
||||
<table class="stat_table">
|
||||
<tr>
|
||||
%for hname in course_stats['header']:
|
||||
<th>${hname | h}</th>
|
||||
%endfor
|
||||
</tr>
|
||||
%for row in course_stats['data']:
|
||||
<tr>
|
||||
%for value in row:
|
||||
<td>${value | h}</td>
|
||||
%endfor
|
||||
</tr>
|
||||
%endfor
|
||||
</table>
|
||||
</p>
|
||||
%endif
|
||||
|
||||
##-----------------------------------------------------------------------------
|
||||
%if modeflag.get('Psychometrics'):
|
||||
|
||||
|
||||
@@ -394,6 +394,11 @@ if settings.MITX_FEATURES.get('ENABLE_SERVICE_STATUS'):
|
||||
url(r'^status/', include('service_status.urls')),
|
||||
)
|
||||
|
||||
if settings.MITX_FEATURES.get('ENABLE_INSTRUCTOR_BACKGROUND_TASKS'):
|
||||
urlpatterns += (
|
||||
url(r'^instructor_task_status/$', 'instructor_task.views.instructor_task_status', name='instructor_task_status'),
|
||||
)
|
||||
|
||||
# FoldIt views
|
||||
urlpatterns += (
|
||||
# The path is hardcoded into their app...
|
||||
|
||||
Reference in New Issue
Block a user