Merge pull request #1094 from MITx/feature/victor/instructor-grading
Feature/victor/instructor grading
This commit is contained in:
0
.gitmodules
vendored
0
.gitmodules
vendored
@@ -36,7 +36,7 @@ file and check it in at the same time as your model changes. To do that,
|
||||
3. Add the migration file created in mitx/common/djangoapps/student/migrations/
|
||||
"""
|
||||
from datetime import datetime
|
||||
from hashlib import sha1
|
||||
import hashlib
|
||||
import json
|
||||
import logging
|
||||
import uuid
|
||||
@@ -197,14 +197,13 @@ def unique_id_for_user(user):
|
||||
"""
|
||||
Return a unique id for a user, suitable for inserting into
|
||||
e.g. personalized survey links.
|
||||
|
||||
Currently happens to be implemented as a sha1 hash of the username
|
||||
(and thus assumes that usernames don't change).
|
||||
"""
|
||||
# Using the user id as the salt because it's sort of random, and is already
|
||||
# in the db.
|
||||
salt = str(user.id)
|
||||
return sha1(salt + user.username).hexdigest()
|
||||
# include the secret key as a salt, and to make the ids unique accross
|
||||
# different LMS installs.
|
||||
h = hashlib.md5()
|
||||
h.update(settings.SECRET_KEY)
|
||||
h.update(str(user.id))
|
||||
return h.hexdigest()
|
||||
|
||||
|
||||
## TODO: Should be renamed to generic UserGroup, and possibly
|
||||
|
||||
@@ -4,6 +4,11 @@ import json
|
||||
|
||||
|
||||
def expect_json(view_function):
|
||||
"""
|
||||
View decorator for simplifying handing of requests that expect json. If the request's
|
||||
CONTENT_TYPE is application/json, parses the json dict from request.body, and updates
|
||||
request.POST with the contents.
|
||||
"""
|
||||
@wraps(view_function)
|
||||
def expect_json_with_cloned_request(request, *args, **kwargs):
|
||||
# cdodge: fix postback errors in CMS. The POST 'content-type' header can include additional information
|
||||
|
||||
@@ -53,7 +53,7 @@ response_tag_dict = dict([(x.response_tag, x) for x in responsetypes.__all__])
|
||||
solution_tags = ['solution']
|
||||
|
||||
# these get captured as student responses
|
||||
response_properties = ["codeparam", "responseparam", "answer"]
|
||||
response_properties = ["codeparam", "responseparam", "answer", "openendedparam"]
|
||||
|
||||
# special problem tags which should be turned into innocuous HTML
|
||||
html_transforms = {'problem': {'tag': 'div'},
|
||||
@@ -72,7 +72,7 @@ global_context = {'random': random,
|
||||
'miller': chem.miller}
|
||||
|
||||
# These should be removed from HTML output, including all subelements
|
||||
html_problem_semantics = ["codeparam", "responseparam", "answer", "script", "hintgroup"]
|
||||
html_problem_semantics = ["codeparam", "responseparam", "answer", "script", "hintgroup", "openendedparam","openendedrubric"]
|
||||
|
||||
log = logging.getLogger('mitx.' + __name__)
|
||||
|
||||
|
||||
@@ -733,3 +733,53 @@ class ChemicalEquationInput(InputTypeBase):
|
||||
return {'previewer': '/static/js/capa/chemical_equation_preview.js',}
|
||||
|
||||
registry.register(ChemicalEquationInput)
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
class OpenEndedInput(InputTypeBase):
|
||||
"""
|
||||
A text area input for code--uses codemirror, does syntax highlighting, special tab handling,
|
||||
etc.
|
||||
"""
|
||||
|
||||
template = "openendedinput.html"
|
||||
tags = ['openendedinput']
|
||||
|
||||
# pulled out for testing
|
||||
submitted_msg = ("Feedback not yet available. Reload to check again. "
|
||||
"Once the problem is graded, this message will be "
|
||||
"replaced with the grader's feedback")
|
||||
|
||||
@classmethod
|
||||
def get_attributes(cls):
|
||||
"""
|
||||
Convert options to a convenient format.
|
||||
"""
|
||||
return [Attribute('rows', '30'),
|
||||
Attribute('cols', '80'),
|
||||
Attribute('hidden', ''),
|
||||
]
|
||||
|
||||
def setup(self):
|
||||
"""
|
||||
Implement special logic: handle queueing state, and default input.
|
||||
"""
|
||||
# if no student input yet, then use the default input given by the problem
|
||||
if not self.value:
|
||||
self.value = self.xml.text
|
||||
|
||||
# Check if problem has been queued
|
||||
self.queue_len = 0
|
||||
# Flag indicating that the problem has been queued, 'msg' is length of queue
|
||||
if self.status == 'incomplete':
|
||||
self.status = 'queued'
|
||||
self.queue_len = self.msg
|
||||
self.msg = self.submitted_msg
|
||||
|
||||
def _extra_context(self):
|
||||
"""Defined queue_len, add it """
|
||||
return {'queue_len': self.queue_len,}
|
||||
|
||||
registry.register(OpenEndedInput)
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
@@ -8,21 +8,23 @@ Used by capa_problem.py
|
||||
'''
|
||||
|
||||
# standard library imports
|
||||
import abc
|
||||
import cgi
|
||||
import hashlib
|
||||
import inspect
|
||||
import json
|
||||
import logging
|
||||
import numbers
|
||||
import numpy
|
||||
import os
|
||||
import random
|
||||
import re
|
||||
import requests
|
||||
import traceback
|
||||
import hashlib
|
||||
import abc
|
||||
import os
|
||||
import subprocess
|
||||
import traceback
|
||||
import xml.sax.saxutils as saxutils
|
||||
|
||||
from collections import namedtuple
|
||||
from shapely.geometry import Point, MultiPoint
|
||||
|
||||
# specific library imports
|
||||
@@ -1101,6 +1103,15 @@ class SymbolicResponse(CustomResponse):
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
"""
|
||||
valid: Flag indicating valid score_msg format (Boolean)
|
||||
correct: Correctness of submission (Boolean)
|
||||
score: Points to be assigned (numeric, can be float)
|
||||
msg: Message from grader to display to student (string)
|
||||
"""
|
||||
ScoreMessage = namedtuple('ScoreMessage',
|
||||
['valid', 'correct', 'points', 'msg'])
|
||||
|
||||
|
||||
class CodeResponse(LoncapaResponse):
|
||||
"""
|
||||
@@ -1140,7 +1151,7 @@ class CodeResponse(LoncapaResponse):
|
||||
else:
|
||||
self._parse_coderesponse_xml(codeparam)
|
||||
|
||||
def _parse_coderesponse_xml(self,codeparam):
|
||||
def _parse_coderesponse_xml(self, codeparam):
|
||||
'''
|
||||
Parse the new CodeResponse XML format. When successful, sets:
|
||||
self.initial_display
|
||||
@@ -1152,17 +1163,9 @@ class CodeResponse(LoncapaResponse):
|
||||
grader_payload = grader_payload.text if grader_payload is not None else ''
|
||||
self.payload = {'grader_payload': grader_payload}
|
||||
|
||||
answer_display = codeparam.find('answer_display')
|
||||
if answer_display is not None:
|
||||
self.answer = answer_display.text
|
||||
else:
|
||||
self.answer = 'No answer provided.'
|
||||
|
||||
initial_display = codeparam.find('initial_display')
|
||||
if initial_display is not None:
|
||||
self.initial_display = initial_display.text
|
||||
else:
|
||||
self.initial_display = ''
|
||||
self.initial_display = find_with_default(codeparam, 'initial_display', '')
|
||||
self.answer = find_with_default(codeparam, 'answer_display',
|
||||
'No answer provided.')
|
||||
|
||||
def _parse_externalresponse_xml(self):
|
||||
'''
|
||||
@@ -1728,9 +1731,9 @@ class ImageResponse(LoncapaResponse):
|
||||
|
||||
Regions is list of lists [region1, region2, region3, ...] where regionN
|
||||
is disordered list of points: [[1,1], [100,100], [50,50], [20, 70]].
|
||||
|
||||
|
||||
If there is only one region in the list, simpler notation can be used:
|
||||
regions="[[10,10], [30,30], [10, 30], [30, 10]]" (without explicitly
|
||||
regions="[[10,10], [30,30], [10, 30], [30, 10]]" (without explicitly
|
||||
setting outer list)
|
||||
|
||||
Returns:
|
||||
@@ -1812,6 +1815,339 @@ class ImageResponse(LoncapaResponse):
|
||||
return (dict([(ie.get('id'), ie.get('rectangle')) for ie in self.ielements]),
|
||||
dict([(ie.get('id'), ie.get('regions')) for ie in self.ielements]))
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
class OpenEndedResponse(LoncapaResponse):
|
||||
"""
|
||||
Grade student open ended responses using an external grading system,
|
||||
accessed through the xqueue system.
|
||||
|
||||
Expects 'xqueue' dict in ModuleSystem with the following keys that are
|
||||
needed by OpenEndedResponse:
|
||||
|
||||
system.xqueue = { 'interface': XqueueInterface object,
|
||||
'callback_url': Per-StudentModule callback URL
|
||||
where results are posted (string),
|
||||
}
|
||||
|
||||
External requests are only submitted for student submission grading
|
||||
(i.e. and not for getting reference answers)
|
||||
|
||||
By default, uses the OpenEndedResponse.DEFAULT_QUEUE queue.
|
||||
"""
|
||||
|
||||
DEFAULT_QUEUE = 'open-ended'
|
||||
response_tag = 'openendedresponse'
|
||||
allowed_inputfields = ['openendedinput']
|
||||
max_inputfields = 1
|
||||
|
||||
def setup_response(self):
|
||||
'''
|
||||
Configure OpenEndedResponse from XML.
|
||||
'''
|
||||
xml = self.xml
|
||||
self.url = xml.get('url', None)
|
||||
self.queue_name = xml.get('queuename', self.DEFAULT_QUEUE)
|
||||
|
||||
# The openendedparam tag encapsulates all grader settings
|
||||
oeparam = self.xml.find('openendedparam')
|
||||
prompt = self.xml.find('prompt')
|
||||
rubric = self.xml.find('openendedrubric')
|
||||
self._parse(oeparam, prompt, rubric)
|
||||
|
||||
@staticmethod
|
||||
def stringify_children(node):
|
||||
"""
|
||||
Modify code from stringify_children in xmodule. Didn't import directly
|
||||
in order to avoid capa depending on xmodule (seems to be avoided in
|
||||
code)
|
||||
"""
|
||||
parts=[node.text]
|
||||
for p in node.getchildren():
|
||||
parts.append(etree.tostring(p, with_tail=True, encoding='unicode'))
|
||||
|
||||
return ' '.join(parts)
|
||||
|
||||
def _parse(self, oeparam, prompt, rubric):
|
||||
'''
|
||||
Parse OpenEndedResponse XML:
|
||||
self.initial_display
|
||||
self.payload - dict containing keys --
|
||||
'grader' : path to grader settings file, 'problem_id' : id of the problem
|
||||
|
||||
self.answer - What to display when show answer is clicked
|
||||
'''
|
||||
# Note that OpenEndedResponse is agnostic to the specific contents of grader_payload
|
||||
prompt_string = self.stringify_children(prompt)
|
||||
rubric_string = self.stringify_children(rubric)
|
||||
|
||||
grader_payload = oeparam.find('grader_payload')
|
||||
grader_payload = grader_payload.text if grader_payload is not None else ''
|
||||
|
||||
#Update grader payload with student id. If grader payload not json, error.
|
||||
try:
|
||||
parsed_grader_payload = json.loads(grader_payload)
|
||||
# NOTE: self.system.location is valid because the capa_module
|
||||
# __init__ adds it (easiest way to get problem location into
|
||||
# response types)
|
||||
except TypeError, ValueError:
|
||||
log.exception("Grader payload %r is not a json object!", grader_payload)
|
||||
parsed_grader_payload.update({
|
||||
'location' : self.system.location,
|
||||
'course_id' : self.system.course_id,
|
||||
'prompt' : prompt_string,
|
||||
'rubric' : rubric_string,
|
||||
})
|
||||
updated_grader_payload = json.dumps(parsed_grader_payload)
|
||||
|
||||
self.payload = {'grader_payload': updated_grader_payload}
|
||||
|
||||
self.initial_display = find_with_default(oeparam, 'initial_display', '')
|
||||
self.answer = find_with_default(oeparam, 'answer_display', 'No answer given.')
|
||||
try:
|
||||
self.max_score = int(find_with_default(oeparam, 'max_score', 1))
|
||||
except ValueError:
|
||||
self.max_score = 1
|
||||
|
||||
def get_score(self, student_answers):
|
||||
|
||||
try:
|
||||
submission = student_answers[self.answer_id]
|
||||
except KeyError:
|
||||
msg = ('Cannot get student answer for answer_id: {0}. student_answers {1}'
|
||||
.format(self.answer_id, student_answers))
|
||||
log.exception(msg)
|
||||
raise LoncapaProblemError(msg)
|
||||
|
||||
# Prepare xqueue request
|
||||
#------------------------------------------------------------
|
||||
|
||||
qinterface = self.system.xqueue['interface']
|
||||
qtime = datetime.strftime(datetime.now(), xqueue_interface.dateformat)
|
||||
|
||||
anonymous_student_id = self.system.anonymous_student_id
|
||||
|
||||
# Generate header
|
||||
queuekey = xqueue_interface.make_hashkey(str(self.system.seed) + qtime +
|
||||
anonymous_student_id +
|
||||
self.answer_id)
|
||||
|
||||
xheader = xqueue_interface.make_xheader(lms_callback_url=self.system.xqueue['callback_url'],
|
||||
lms_key=queuekey,
|
||||
queue_name=self.queue_name)
|
||||
|
||||
self.context.update({'submission': submission})
|
||||
|
||||
contents = self.payload.copy()
|
||||
|
||||
# Metadata related to the student submission revealed to the external grader
|
||||
student_info = {'anonymous_student_id': anonymous_student_id,
|
||||
'submission_time': qtime,
|
||||
}
|
||||
|
||||
#Update contents with student response and student info
|
||||
contents.update({
|
||||
'student_info': json.dumps(student_info),
|
||||
'student_response': submission,
|
||||
'max_score' : self.max_score
|
||||
})
|
||||
|
||||
# Submit request. When successful, 'msg' is the prior length of the queue
|
||||
(error, msg) = qinterface.send_to_queue(header=xheader,
|
||||
body=json.dumps(contents))
|
||||
|
||||
# State associated with the queueing request
|
||||
queuestate = {'key': queuekey,
|
||||
'time': qtime,}
|
||||
|
||||
cmap = CorrectMap()
|
||||
if error:
|
||||
cmap.set(self.answer_id, queuestate=None,
|
||||
msg='Unable to deliver your submission to grader. (Reason: {0}.)'
|
||||
' Please try again later.'.format(msg))
|
||||
else:
|
||||
# Queueing mechanism flags:
|
||||
# 1) Backend: Non-null CorrectMap['queuestate'] indicates that
|
||||
# the problem has been queued
|
||||
# 2) Frontend: correctness='incomplete' eventually trickles down
|
||||
# through inputtypes.textbox and .filesubmission to inform the
|
||||
# browser that the submission is queued (and it could e.g. poll)
|
||||
cmap.set(self.answer_id, queuestate=queuestate,
|
||||
correctness='incomplete', msg=msg)
|
||||
|
||||
return cmap
|
||||
|
||||
def update_score(self, score_msg, oldcmap, queuekey):
|
||||
log.debug(score_msg)
|
||||
score_msg = self._parse_score_msg(score_msg)
|
||||
if not score_msg.valid:
|
||||
oldcmap.set(self.answer_id,
|
||||
msg = 'Invalid grader reply. Please contact the course staff.')
|
||||
return oldcmap
|
||||
|
||||
correctness = 'correct' if score_msg.correct else 'incorrect'
|
||||
|
||||
# TODO: Find out how this is used elsewhere, if any
|
||||
self.context['correct'] = correctness
|
||||
|
||||
# Replace 'oldcmap' with new grading results if queuekey matches. If queuekey
|
||||
# does not match, we keep waiting for the score_msg whose key actually matches
|
||||
if oldcmap.is_right_queuekey(self.answer_id, queuekey):
|
||||
# Sanity check on returned points
|
||||
points = score_msg.points
|
||||
if points < 0:
|
||||
points = 0
|
||||
|
||||
# Queuestate is consumed, so reset it to None
|
||||
oldcmap.set(self.answer_id, npoints=points, correctness=correctness,
|
||||
msg = score_msg.msg.replace(' ', ' '), queuestate=None)
|
||||
else:
|
||||
log.debug('OpenEndedResponse: queuekey {0} does not match for answer_id={1}.'.format(
|
||||
queuekey, self.answer_id))
|
||||
|
||||
return oldcmap
|
||||
|
||||
def get_answers(self):
|
||||
anshtml = '<span class="openended-answer"><pre><code>{0}</code></pre></span>'.format(self.answer)
|
||||
return {self.answer_id: anshtml}
|
||||
|
||||
def get_initial_display(self):
|
||||
return {self.answer_id: self.initial_display}
|
||||
|
||||
def _convert_longform_feedback_to_html(self, response_items):
|
||||
"""
|
||||
Take in a dictionary, and return html strings for display to student.
|
||||
Input:
|
||||
response_items: Dictionary with keys success, feedback.
|
||||
if success is True, feedback should be a dictionary, with keys for
|
||||
types of feedback, and the corresponding feedback values.
|
||||
if success is False, feedback is actually an error string.
|
||||
|
||||
NOTE: this will need to change when we integrate peer grading, because
|
||||
that will have more complex feedback.
|
||||
|
||||
Output:
|
||||
String -- html that can be displayed to the student.
|
||||
"""
|
||||
|
||||
# We want to display available feedback in a particular order.
|
||||
# This dictionary specifies which goes first--lower first.
|
||||
priorities = {# These go at the start of the feedback
|
||||
'spelling': 0,
|
||||
'grammar': 1,
|
||||
# needs to be after all the other feedback
|
||||
'markup_text': 3}
|
||||
|
||||
default_priority = 2
|
||||
|
||||
def get_priority(elt):
|
||||
"""
|
||||
Args:
|
||||
elt: a tuple of feedback-type, feedback
|
||||
Returns:
|
||||
the priority for this feedback type
|
||||
"""
|
||||
return priorities.get(elt[0], default_priority)
|
||||
|
||||
def format_feedback(feedback_type, value):
|
||||
return """
|
||||
<div class="{feedback_type}">
|
||||
{value}
|
||||
</div>
|
||||
""".format(feedback_type, value)
|
||||
|
||||
# TODO (vshnayder): design and document the details of this format so
|
||||
# that we can do proper escaping here (e.g. are the graders allowed to
|
||||
# include HTML?)
|
||||
|
||||
for tag in ['success', 'feedback']:
|
||||
if tag not in response_items:
|
||||
return format_feedback('errors', 'Error getting feedback')
|
||||
|
||||
feedback_items = response_items['feedback']
|
||||
try:
|
||||
feedback = json.loads(feedback_items)
|
||||
except (TypeError, ValueError):
|
||||
log.exception("feedback_items have invalid json %r", feedback_items)
|
||||
return format_feedback('errors', 'Could not parse feedback')
|
||||
|
||||
if response_items['success']:
|
||||
if len(feedback) == 0:
|
||||
return format_feedback('errors', 'No feedback available')
|
||||
|
||||
feedback_lst = sorted(feedback.items(), key=get_priority)
|
||||
return u"\n".join(format_feedback(k, v) for k, v in feedback_lst)
|
||||
else:
|
||||
return format_feedback('errors', response_items['feedback'])
|
||||
|
||||
|
||||
def _format_feedback(self, response_items):
|
||||
"""
|
||||
Input:
|
||||
Dictionary called feedback. Must contain keys seen below.
|
||||
Output:
|
||||
Return error message or feedback template
|
||||
"""
|
||||
|
||||
feedback = self._convert_longform_feedback_to_html(response_items)
|
||||
|
||||
if not response_items['success']:
|
||||
return self.system.render_template("open_ended_error.html",
|
||||
{'errors' : feedback})
|
||||
|
||||
feedback_template = self.system.render_template("open_ended_feedback.html", {
|
||||
'grader_type': response_items['grader_type'],
|
||||
'score': response_items['score'],
|
||||
'feedback': feedback,
|
||||
})
|
||||
|
||||
return feedback_template
|
||||
|
||||
|
||||
def _parse_score_msg(self, score_msg):
|
||||
"""
|
||||
Grader reply is a JSON-dump of the following dict
|
||||
{ 'correct': True/False,
|
||||
'score': Numeric value (floating point is okay) to assign to answer
|
||||
'msg': grader_msg
|
||||
'feedback' : feedback from grader
|
||||
}
|
||||
|
||||
Returns (valid_score_msg, correct, score, msg):
|
||||
valid_score_msg: Flag indicating valid score_msg format (Boolean)
|
||||
correct: Correctness of submission (Boolean)
|
||||
score: Points to be assigned (numeric, can be float)
|
||||
"""
|
||||
fail = ScoreMessage(valid=False, correct=False, points=0, msg='')
|
||||
try:
|
||||
score_result = json.loads(score_msg)
|
||||
except (TypeError, ValueError):
|
||||
log.error("External grader message should be a JSON-serialized dict."
|
||||
" Received score_msg = {0}".format(score_msg))
|
||||
return fail
|
||||
|
||||
if not isinstance(score_result, dict):
|
||||
log.error("External grader message should be a JSON-serialized dict."
|
||||
" Received score_result = {0}".format(score_result))
|
||||
return fail
|
||||
|
||||
for tag in ['score', 'feedback', 'grader_type', 'success']:
|
||||
if tag not in score_result:
|
||||
log.error("External grader message is missing required tag: {0}"
|
||||
.format(tag))
|
||||
return fail
|
||||
|
||||
feedback = self._format_feedback(score_result)
|
||||
|
||||
# HACK: for now, just assume it's correct if you got more than 2/3.
|
||||
# Also assumes that score_result['score'] is an integer.
|
||||
score_ratio = int(score_result['score']) / self.max_score
|
||||
correct = (score_ratio >= 0.66)
|
||||
|
||||
#Currently ignore msg and only return feedback (which takes the place of msg)
|
||||
return ScoreMessage(valid=True, correct=correct,
|
||||
score=score_result['score'], msg=feedback)
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# TEMPORARY: List of all response subclasses
|
||||
# FIXME: To be replaced by auto-registration
|
||||
|
||||
@@ -1828,4 +2164,5 @@ __all__ = [CodeResponse,
|
||||
ChoiceResponse,
|
||||
MultipleChoiceResponse,
|
||||
TrueFalseResponse,
|
||||
JavascriptResponse]
|
||||
JavascriptResponse,
|
||||
OpenEndedResponse]
|
||||
|
||||
29
common/lib/capa/capa/templates/openendedinput.html
Normal file
29
common/lib/capa/capa/templates/openendedinput.html
Normal file
@@ -0,0 +1,29 @@
|
||||
<section id="openended_${id}" class="openended">
|
||||
<textarea rows="${rows}" cols="${cols}" name="input_${id}" id="input_${id}"
|
||||
% if hidden:
|
||||
style="display:none;"
|
||||
% endif
|
||||
>${value|h}</textarea>
|
||||
|
||||
<div class="grader-status">
|
||||
% if status == 'unsubmitted':
|
||||
<span class="unanswered" style="display:inline-block;" id="status_${id}">Unanswered</span>
|
||||
% elif status == 'correct':
|
||||
<span class="correct" id="status_${id}">Correct</span>
|
||||
% elif status == 'incorrect':
|
||||
<span class="incorrect" id="status_${id}">Incorrect</span>
|
||||
% elif status == 'queued':
|
||||
<span class="grading" id="status_${id}">Submitted for grading</span>
|
||||
% endif
|
||||
|
||||
% if hidden:
|
||||
<div style="display:none;" name="${hidden}" inputid="input_${id}" />
|
||||
% endif
|
||||
</div>
|
||||
|
||||
<span id="answer_${id}"></span>
|
||||
|
||||
<div class="external-grader-message">
|
||||
${msg|n}
|
||||
</div>
|
||||
</section>
|
||||
@@ -65,3 +65,25 @@ def is_file(file_to_test):
|
||||
Duck typing to check if 'file_to_test' is a File object
|
||||
'''
|
||||
return all(hasattr(file_to_test, method) for method in ['read', 'name'])
|
||||
|
||||
|
||||
def find_with_default(node, path, default):
|
||||
"""
|
||||
Look for a child of node using , and return its text if found.
|
||||
Otherwise returns default.
|
||||
|
||||
Arguments:
|
||||
node: lxml node
|
||||
path: xpath search expression
|
||||
default: value to return if nothing found
|
||||
|
||||
Returns:
|
||||
node.find(path).text if the find succeeds, default otherwise.
|
||||
|
||||
"""
|
||||
v = node.find(path)
|
||||
if v is not None:
|
||||
return v.text
|
||||
else:
|
||||
return default
|
||||
|
||||
|
||||
@@ -49,6 +49,7 @@ def parse_xreply(xreply):
|
||||
|
||||
return_code = xreply['return_code']
|
||||
content = xreply['content']
|
||||
|
||||
return (return_code, content)
|
||||
|
||||
|
||||
@@ -80,7 +81,11 @@ class XQueueInterface(object):
|
||||
|
||||
# Log in, then try again
|
||||
if error and (msg == 'login_required'):
|
||||
self._login()
|
||||
(error, content) = self._login()
|
||||
if error != 0:
|
||||
# when the login fails
|
||||
log.debug("Failed to login to queue: %s", content)
|
||||
return (error, content)
|
||||
if files_to_upload is not None:
|
||||
# Need to rewind file pointers
|
||||
for f in files_to_upload:
|
||||
|
||||
@@ -146,6 +146,11 @@ class CapaModule(XModule):
|
||||
else:
|
||||
self.seed = None
|
||||
|
||||
# Need the problem location in openendedresponse to send out. Adding
|
||||
# it to the system here seems like the least clunky way to get it
|
||||
# there.
|
||||
self.system.set('location', self.location.url())
|
||||
|
||||
try:
|
||||
# TODO (vshnayder): move as much as possible of this work and error
|
||||
# checking to descriptor load time
|
||||
|
||||
@@ -121,16 +121,6 @@ section.problem {
|
||||
}
|
||||
}
|
||||
|
||||
&.processing {
|
||||
p.status {
|
||||
@include inline-block();
|
||||
background: url('../images/spinner.gif') center center no-repeat;
|
||||
height: 20px;
|
||||
width: 20px;
|
||||
text-indent: -9999px;
|
||||
}
|
||||
}
|
||||
|
||||
&.correct, &.ui-icon-check {
|
||||
p.status {
|
||||
@include inline-block();
|
||||
@@ -266,6 +256,11 @@ section.problem {
|
||||
margin: -7px 7px 0 0;
|
||||
}
|
||||
|
||||
.grading {
|
||||
text-indent: 0px;
|
||||
margin: 0px 7px 0 0;
|
||||
}
|
||||
|
||||
p {
|
||||
line-height: 20px;
|
||||
text-transform: capitalize;
|
||||
@@ -685,6 +680,21 @@ section.problem {
|
||||
color: #B00;
|
||||
}
|
||||
}
|
||||
|
||||
.markup-text{
|
||||
margin: 5px;
|
||||
padding: 20px 0px 15px 50px;
|
||||
border-top: 1px solid #DDD;
|
||||
border-left: 20px solid #FAFAFA;
|
||||
|
||||
bs {
|
||||
color: #BB0000;
|
||||
}
|
||||
|
||||
bg {
|
||||
color: #BDA046;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -339,6 +339,12 @@ class ModuleStore(object):
|
||||
'''
|
||||
raise NotImplementedError
|
||||
|
||||
def get_course(self, course_id):
|
||||
'''
|
||||
Look for a specific course id. Returns the course descriptor, or None if not found.
|
||||
'''
|
||||
raise NotImplementedError
|
||||
|
||||
def get_parent_locations(self, location):
|
||||
'''Find all locations that are the parents of this location. Needed
|
||||
for path_to_location().
|
||||
@@ -399,3 +405,10 @@ class ModuleStoreBase(ModuleStore):
|
||||
|
||||
errorlog = self._get_errorlog(location)
|
||||
return errorlog.errors
|
||||
|
||||
def get_course(self, course_id):
|
||||
"""Default impl--linear search through course list"""
|
||||
for c in self.get_courses():
|
||||
if c.id == course_id:
|
||||
return c
|
||||
return None
|
||||
|
||||
@@ -373,6 +373,14 @@ class SelfAssessmentModule(XModule):
|
||||
def save_answer(self, get):
|
||||
"""
|
||||
After the answer is submitted, show the rubric.
|
||||
|
||||
Args:
|
||||
get: the GET dictionary passed to the ajax request. Should contain
|
||||
a key 'student_answer'
|
||||
|
||||
Returns:
|
||||
Dictionary with keys 'success' and either 'error' (if not success),
|
||||
or 'rubric_html' (if success).
|
||||
"""
|
||||
# Check to see if attempts are less than max
|
||||
if self.attempts > self.max_attempts:
|
||||
|
||||
@@ -10,7 +10,7 @@ from xmodule.progress import Progress
|
||||
from xmodule.exceptions import NotFoundError
|
||||
from pkg_resources import resource_string
|
||||
|
||||
log = logging.getLogger("mitx.common.lib.seq_module")
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# HACK: This shouldn't be hard-coded to two types
|
||||
# OBSOLETE: This obsoletes 'type'
|
||||
|
||||
@@ -809,7 +809,8 @@ class ModuleSystem(object):
|
||||
debug=False,
|
||||
xqueue=None,
|
||||
node_path="",
|
||||
anonymous_student_id=''):
|
||||
anonymous_student_id='',
|
||||
course_id=None):
|
||||
'''
|
||||
Create a closure around the system environment.
|
||||
|
||||
@@ -844,6 +845,8 @@ class ModuleSystem(object):
|
||||
ajax results.
|
||||
|
||||
anonymous_student_id - Used for tracking modules with student id
|
||||
|
||||
course_id - the course_id containing this module
|
||||
'''
|
||||
self.ajax_url = ajax_url
|
||||
self.xqueue = xqueue
|
||||
@@ -856,6 +859,7 @@ class ModuleSystem(object):
|
||||
self.replace_urls = replace_urls
|
||||
self.node_path = node_path
|
||||
self.anonymous_student_id = anonymous_student_id
|
||||
self.course_id = course_id
|
||||
self.user_is_staff = user is not None and user.is_staff
|
||||
|
||||
def get(self, attr):
|
||||
|
||||
@@ -67,6 +67,15 @@ To run a single nose test:
|
||||
|
||||
Very handy: if you uncomment the `--pdb` argument in `NOSE_ARGS` in `lms/envs/test.py`, it will drop you into pdb on error. This lets you go up and down the stack and see what the values of the variables are. Check out http://docs.python.org/library/pdb.html
|
||||
|
||||
## Testing using queue servers
|
||||
|
||||
When testing problems that use a queue server on AWS (e.g. sandbox-xqueue.edx.org), you'll need to run your server on your public IP, like so.
|
||||
|
||||
`django-admin.py runserver --settings=lms.envs.dev --pythonpath=. 0.0.0.0:8000`
|
||||
|
||||
When you connect to the LMS, you need to use the public ip. Use `ifconfig` to figure out the numnber, and connect e.g. to `http://18.3.4.5:8000/`
|
||||
|
||||
|
||||
## Content development
|
||||
|
||||
If you change course content, while running the LMS in dev mode, it is unnecessary to restart to refresh the modulestore.
|
||||
|
||||
@@ -418,6 +418,10 @@ If you want to customize the courseware tabs displayed for your course, specify
|
||||
* "external_link". Parameters "name", "link".
|
||||
* "textbooks". No parameters--generates tab names from book titles.
|
||||
* "progress". Parameter "name".
|
||||
* "static_tab". Parameters "name", 'url_slug'--will look for tab contents in
|
||||
'tabs/{course_url_name}/{tab url_slug}.html'
|
||||
* "staff_grading". No parameters. If specified, displays the staff grading tab for instructors.
|
||||
|
||||
|
||||
# Tips for content developers
|
||||
|
||||
@@ -429,9 +433,7 @@ before the week 1 material to make it easy to find in the file.
|
||||
|
||||
* Come up with a consistent pattern for url_names, so that it's easy to know where to look for any piece of content. It will also help to come up with a standard way of splitting your content files. As a point of departure, we suggest splitting chapters, sequences, html, and problems into separate files.
|
||||
|
||||
* A heads up: our content management system will allow you to develop content through a web browser, but will be backed by this same xml at first. Once that happens, every element will be in its own file to make access and updates faster.
|
||||
|
||||
* Prefer the most "semantic" name for containers: e.g., use problemset rather than vertical for a problem set. That way, if we decide to display problem sets differently, we don't have to change the xml.
|
||||
* Prefer the most "semantic" name for containers: e.g., use problemset rather than sequential for a problem set. That way, if we decide to display problem sets differently, we don't have to change the xml.
|
||||
|
||||
# Other file locations (info and about)
|
||||
|
||||
|
||||
@@ -34,7 +34,8 @@ def has_access(user, obj, action):
|
||||
|
||||
user: a Django user object. May be anonymous.
|
||||
|
||||
obj: The object to check access for. For now, a module or descriptor.
|
||||
obj: The object to check access for. A module, descriptor, location, or
|
||||
certain special strings (e.g. 'global')
|
||||
|
||||
action: A string specifying the action that the client is trying to perform.
|
||||
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import hashlib
|
||||
import json
|
||||
import logging
|
||||
import pyparsing
|
||||
@@ -20,6 +19,7 @@ from mitxmako.shortcuts import render_to_string
|
||||
from models import StudentModule, StudentModuleCache
|
||||
from psychometrics.psychoanalyze import make_psychometrics_data_update_handler
|
||||
from static_replace import replace_urls
|
||||
from student.models import unique_id_for_user
|
||||
from xmodule.errortracker import exc_info_to_str
|
||||
from xmodule.exceptions import NotFoundError
|
||||
from xmodule.modulestore import Location
|
||||
@@ -152,12 +152,6 @@ def _get_module(user, request, location, student_module_cache, course_id, positi
|
||||
if not has_access(user, descriptor, 'load'):
|
||||
return None
|
||||
|
||||
# Anonymized student identifier
|
||||
h = hashlib.md5()
|
||||
h.update(settings.SECRET_KEY)
|
||||
h.update(str(user.id))
|
||||
anonymous_student_id = h.hexdigest()
|
||||
|
||||
# Only check the cache if this module can possibly have state
|
||||
instance_module = None
|
||||
shared_module = None
|
||||
@@ -230,7 +224,8 @@ def _get_module(user, request, location, student_module_cache, course_id, positi
|
||||
# by the replace_static_urls code below
|
||||
replace_urls=replace_urls,
|
||||
node_path=settings.NODE_PATH,
|
||||
anonymous_student_id=anonymous_student_id,
|
||||
anonymous_student_id=unique_id_for_user(user),
|
||||
course_id=course_id,
|
||||
)
|
||||
# pass position specified in URL to module through ModuleSystem
|
||||
system.set('position', position)
|
||||
|
||||
@@ -36,7 +36,7 @@ CourseTab = namedtuple('CourseTab', 'name link is_active')
|
||||
# wrong. (e.g. "is there a 'name' field?). Validators can assume
|
||||
# that the type field is valid.
|
||||
#
|
||||
# - a function that takes a config, a user, and a course, and active_page and
|
||||
# - a function that takes a config, a user, and a course, an active_page and
|
||||
# return a list of CourseTabs. (e.g. "return a CourseTab with specified
|
||||
# name, link to courseware, and is_active=True/False"). The function can
|
||||
# assume that it is only called with configs of the appropriate type that
|
||||
@@ -97,6 +97,14 @@ def _textbooks(tab, user, course, active_page):
|
||||
for index, textbook in enumerate(course.textbooks)]
|
||||
return []
|
||||
|
||||
|
||||
def _staff_grading(tab, user, course, active_page):
|
||||
if has_access(user, course, 'staff'):
|
||||
link = reverse('staff_grading', args=[course.id])
|
||||
return [CourseTab('Staff grading', link, active_page == "staff_grading")]
|
||||
return []
|
||||
|
||||
|
||||
#### Validators
|
||||
|
||||
|
||||
@@ -132,6 +140,7 @@ VALID_TAB_TYPES = {
|
||||
'textbooks': TabImpl(null_validator, _textbooks),
|
||||
'progress': TabImpl(need_name, _progress),
|
||||
'static_tab': TabImpl(key_checker(['name', 'url_slug']), _static_tab),
|
||||
'staff_grading': TabImpl(null_validator, _staff_grading),
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -193,13 +193,27 @@ class PageLoader(ActivateLoginTestCase):
|
||||
|
||||
def check_for_get_code(self, code, url):
|
||||
"""
|
||||
Check that we got the expected code. Hacks around our broken 404
|
||||
handling.
|
||||
Check that we got the expected code when accessing url via GET.
|
||||
Returns the response.
|
||||
"""
|
||||
resp = self.client.get(url)
|
||||
self.assertEqual(resp.status_code, code,
|
||||
"got code {0} for url '{1}'. Expected code {2}"
|
||||
.format(resp.status_code, url, code))
|
||||
return resp
|
||||
|
||||
|
||||
def check_for_post_code(self, code, url, data={}):
|
||||
"""
|
||||
Check that we got the expected code when accessing url via POST.
|
||||
Returns the response.
|
||||
"""
|
||||
resp = self.client.post(url, data)
|
||||
self.assertEqual(resp.status_code, code,
|
||||
"got code {0} for url '{1}'. Expected code {2}"
|
||||
.format(resp.status_code, url, code))
|
||||
return resp
|
||||
|
||||
|
||||
|
||||
def check_pages_load(self, course_name, data_dir, modstore):
|
||||
@@ -286,14 +300,10 @@ class TestNavigation(PageLoader):
|
||||
|
||||
def setUp(self):
|
||||
xmodule.modulestore.django._MODULESTORES = {}
|
||||
courses = modulestore().get_courses()
|
||||
|
||||
def find_course(course_id):
|
||||
"""Assumes the course is present"""
|
||||
return [c for c in courses if c.id==course_id][0]
|
||||
|
||||
self.full = find_course("edX/full/6.002_Spring_2012")
|
||||
self.toy = find_course("edX/toy/2012_Fall")
|
||||
# Assume courses are there
|
||||
self.full = modulestore().get_course("edX/full/6.002_Spring_2012")
|
||||
self.toy = modulestore().get_course("edX/toy/2012_Fall")
|
||||
|
||||
# Create two accounts
|
||||
self.student = 'view@test.com'
|
||||
@@ -344,14 +354,9 @@ class TestViewAuth(PageLoader):
|
||||
|
||||
def setUp(self):
|
||||
xmodule.modulestore.django._MODULESTORES = {}
|
||||
courses = modulestore().get_courses()
|
||||
|
||||
def find_course(course_id):
|
||||
"""Assumes the course is present"""
|
||||
return [c for c in courses if c.id==course_id][0]
|
||||
|
||||
self.full = find_course("edX/full/6.002_Spring_2012")
|
||||
self.toy = find_course("edX/toy/2012_Fall")
|
||||
self.full = modulestore().get_course("edX/full/6.002_Spring_2012")
|
||||
self.toy = modulestore().get_course("edX/toy/2012_Fall")
|
||||
|
||||
# Create two accounts
|
||||
self.student = 'view@test.com'
|
||||
@@ -629,46 +634,46 @@ class TestCourseGrader(PageLoader):
|
||||
return [c for c in courses if c.id==course_id][0]
|
||||
|
||||
self.graded_course = find_course("edX/graded/2012_Fall")
|
||||
|
||||
|
||||
# create a test student
|
||||
self.student = 'view@test.com'
|
||||
self.password = 'foo'
|
||||
self.create_account('u1', self.student, self.password)
|
||||
self.activate_user(self.student)
|
||||
self.enroll(self.graded_course)
|
||||
|
||||
|
||||
self.student_user = user(self.student)
|
||||
|
||||
|
||||
self.factory = RequestFactory()
|
||||
|
||||
|
||||
def get_grade_summary(self):
|
||||
student_module_cache = StudentModuleCache.cache_for_descriptor_descendents(
|
||||
self.graded_course.id, self.student_user, self.graded_course)
|
||||
|
||||
fake_request = self.factory.get(reverse('progress',
|
||||
kwargs={'course_id': self.graded_course.id}))
|
||||
|
||||
return grades.grade(self.student_user, fake_request,
|
||||
self.graded_course, student_module_cache)
|
||||
|
||||
def get_homework_scores(self):
|
||||
return self.get_grade_summary()['totaled_scores']['Homework']
|
||||
|
||||
def get_progress_summary(self):
|
||||
student_module_cache = StudentModuleCache.cache_for_descriptor_descendents(
|
||||
self.graded_course.id, self.student_user, self.graded_course)
|
||||
|
||||
|
||||
fake_request = self.factory.get(reverse('progress',
|
||||
kwargs={'course_id': self.graded_course.id}))
|
||||
|
||||
progress_summary = grades.progress_summary(self.student_user, fake_request,
|
||||
return grades.grade(self.student_user, fake_request,
|
||||
self.graded_course, student_module_cache)
|
||||
|
||||
def get_homework_scores(self):
|
||||
return self.get_grade_summary()['totaled_scores']['Homework']
|
||||
|
||||
def get_progress_summary(self):
|
||||
student_module_cache = StudentModuleCache.cache_for_descriptor_descendents(
|
||||
self.graded_course.id, self.student_user, self.graded_course)
|
||||
|
||||
fake_request = self.factory.get(reverse('progress',
|
||||
kwargs={'course_id': self.graded_course.id}))
|
||||
|
||||
progress_summary = grades.progress_summary(self.student_user, fake_request,
|
||||
self.graded_course, student_module_cache)
|
||||
return progress_summary
|
||||
|
||||
|
||||
def check_grade_percent(self, percent):
|
||||
grade_summary = self.get_grade_summary()
|
||||
self.assertEqual(percent, grade_summary['percent'])
|
||||
|
||||
self.assertEqual(grade_summary['percent'], percent)
|
||||
|
||||
def submit_question_answer(self, problem_url_name, responses):
|
||||
"""
|
||||
The field names of a problem are hard to determine. This method only works
|
||||
@@ -678,96 +683,96 @@ class TestCourseGrader(PageLoader):
|
||||
input_i4x-edX-graded-problem-H1P3_2_2
|
||||
"""
|
||||
problem_location = "i4x://edX/graded/problem/{0}".format(problem_url_name)
|
||||
|
||||
modx_url = reverse('modx_dispatch',
|
||||
|
||||
modx_url = reverse('modx_dispatch',
|
||||
kwargs={
|
||||
'course_id' : self.graded_course.id,
|
||||
'location' : problem_location,
|
||||
'dispatch' : 'problem_check', }
|
||||
)
|
||||
|
||||
|
||||
resp = self.client.post(modx_url, {
|
||||
'input_i4x-edX-graded-problem-{0}_2_1'.format(problem_url_name): responses[0],
|
||||
'input_i4x-edX-graded-problem-{0}_2_2'.format(problem_url_name): responses[1],
|
||||
})
|
||||
print "modx_url" , modx_url, "responses" , responses
|
||||
print "resp" , resp
|
||||
|
||||
|
||||
return resp
|
||||
|
||||
|
||||
def problem_location(self, problem_url_name):
|
||||
return "i4x://edX/graded/problem/{0}".format(problem_url_name)
|
||||
|
||||
|
||||
def reset_question_answer(self, problem_url_name):
|
||||
problem_location = self.problem_location(problem_url_name)
|
||||
|
||||
modx_url = reverse('modx_dispatch',
|
||||
|
||||
modx_url = reverse('modx_dispatch',
|
||||
kwargs={
|
||||
'course_id' : self.graded_course.id,
|
||||
'location' : problem_location,
|
||||
'dispatch' : 'problem_reset', }
|
||||
)
|
||||
|
||||
|
||||
resp = self.client.post(modx_url)
|
||||
return resp
|
||||
|
||||
return resp
|
||||
|
||||
def test_get_graded(self):
|
||||
#### Check that the grader shows we have 0% in the course
|
||||
self.check_grade_percent(0)
|
||||
|
||||
|
||||
#### Submit the answers to a few problems as ajax calls
|
||||
def earned_hw_scores():
|
||||
"""Global scores, each Score is a Problem Set"""
|
||||
return [s.earned for s in self.get_homework_scores()]
|
||||
|
||||
|
||||
def score_for_hw(hw_url_name):
|
||||
hw_section = [section for section
|
||||
in self.get_progress_summary()[0]['sections']
|
||||
if section.get('url_name') == hw_url_name][0]
|
||||
return [s.earned for s in hw_section['scores']]
|
||||
|
||||
|
||||
# Only get half of the first problem correct
|
||||
self.submit_question_answer('H1P1', ['Correct', 'Incorrect'])
|
||||
self.check_grade_percent(0.06)
|
||||
self.assertEqual(earned_hw_scores(), [1.0, 0, 0]) # Order matters
|
||||
self.assertEqual(score_for_hw('Homework1'), [1.0, 0.0])
|
||||
|
||||
|
||||
# Get both parts of the first problem correct
|
||||
self.reset_question_answer('H1P1')
|
||||
self.submit_question_answer('H1P1', ['Correct', 'Correct'])
|
||||
self.check_grade_percent(0.13)
|
||||
self.assertEqual(earned_hw_scores(), [2.0, 0, 0])
|
||||
self.assertEqual(score_for_hw('Homework1'), [2.0, 0.0])
|
||||
|
||||
|
||||
# This problem is shown in an ABTest
|
||||
self.submit_question_answer('H1P2', ['Correct', 'Correct'])
|
||||
self.check_grade_percent(0.25)
|
||||
self.assertEqual(earned_hw_scores(), [4.0, 0.0, 0])
|
||||
self.assertEqual(score_for_hw('Homework1'), [2.0, 2.0])
|
||||
|
||||
self.assertEqual(score_for_hw('Homework1'), [2.0, 2.0])
|
||||
|
||||
# This problem is hidden in an ABTest. Getting it correct doesn't change total grade
|
||||
self.submit_question_answer('H1P3', ['Correct', 'Correct'])
|
||||
self.check_grade_percent(0.25)
|
||||
self.assertEqual(score_for_hw('Homework1'), [2.0, 2.0])
|
||||
|
||||
|
||||
# On the second homework, we only answer half of the questions.
|
||||
# Then it will be dropped when homework three becomes the higher percent
|
||||
# This problem is also weighted to be 4 points (instead of default of 2)
|
||||
# If the problem was unweighted the percent would have been 0.38 so we
|
||||
# If the problem was unweighted the percent would have been 0.38 so we
|
||||
# know it works.
|
||||
self.submit_question_answer('H2P1', ['Correct', 'Correct'])
|
||||
self.check_grade_percent(0.42)
|
||||
self.assertEqual(earned_hw_scores(), [4.0, 4.0, 0])
|
||||
|
||||
self.assertEqual(earned_hw_scores(), [4.0, 4.0, 0])
|
||||
|
||||
# Third homework
|
||||
self.submit_question_answer('H3P1', ['Correct', 'Correct'])
|
||||
self.check_grade_percent(0.42) # Score didn't change
|
||||
self.assertEqual(earned_hw_scores(), [4.0, 4.0, 2.0])
|
||||
|
||||
self.assertEqual(earned_hw_scores(), [4.0, 4.0, 2.0])
|
||||
|
||||
self.submit_question_answer('H3P2', ['Correct', 'Correct'])
|
||||
self.check_grade_percent(0.5) # Now homework2 dropped. Score changes
|
||||
self.assertEqual(earned_hw_scores(), [4.0, 4.0, 4.0])
|
||||
|
||||
self.assertEqual(earned_hw_scores(), [4.0, 4.0, 4.0])
|
||||
|
||||
# Now we answer the final question (worth half of the grade)
|
||||
self.submit_question_answer('FinalQuestion', ['Correct', 'Correct'])
|
||||
self.check_grade_percent(1.0) # Hooray! We got 100%
|
||||
|
||||
25
lms/djangoapps/instructor/grading.py
Normal file
25
lms/djangoapps/instructor/grading.py
Normal file
@@ -0,0 +1,25 @@
|
||||
"""
|
||||
LMS part of instructor grading:
|
||||
|
||||
- views + ajax handling
|
||||
- calls the instructor grading service
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class StaffGrading(object):
|
||||
"""
|
||||
Wrap up functionality for staff grading of submissions--interface exposes get_html, ajax views.
|
||||
"""
|
||||
def __init__(self, course):
|
||||
self.course = course
|
||||
|
||||
def get_html(self):
|
||||
return "<b>Instructor grading!</b>"
|
||||
# context = {}
|
||||
# return render_to_string('courseware/instructor_grading_view.html', context)
|
||||
|
||||
291
lms/djangoapps/instructor/staff_grading_service.py
Normal file
291
lms/djangoapps/instructor/staff_grading_service.py
Normal file
@@ -0,0 +1,291 @@
|
||||
"""
|
||||
This module provides views that proxy to the staff grading backend service.
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import requests
|
||||
from requests.exceptions import RequestException, ConnectionError, HTTPError
|
||||
import sys
|
||||
|
||||
from django.conf import settings
|
||||
from django.http import HttpResponse, Http404
|
||||
|
||||
from courseware.access import has_access
|
||||
from util.json_request import expect_json
|
||||
from xmodule.course_module import CourseDescriptor
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class GradingServiceError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class MockStaffGradingService(object):
|
||||
"""
|
||||
A simple mockup of a staff grading service, testing.
|
||||
"""
|
||||
def __init__(self):
|
||||
self.cnt = 0
|
||||
|
||||
def get_next(self, course_id, grader_id):
|
||||
self.cnt += 1
|
||||
return json.dumps({'success': True,
|
||||
'submission_id': self.cnt,
|
||||
'submission': 'Test submission {cnt}'.format(cnt=self.cnt),
|
||||
'max_score': 2 + self.cnt % 3,
|
||||
'rubric': 'A rubric'})
|
||||
|
||||
def save_grade(self, course_id, grader_id, submission_id, score, feedback):
|
||||
return self.get_next(course_id, grader_id)
|
||||
|
||||
|
||||
class StaffGradingService(object):
|
||||
"""
|
||||
Interface to staff grading backend.
|
||||
"""
|
||||
def __init__(self, config):
|
||||
self.username = config['username']
|
||||
self.password = config['password']
|
||||
self.url = config['url']
|
||||
|
||||
self.login_url = self.url + '/login/'
|
||||
self.get_next_url = self.url + '/get_next_submission/'
|
||||
self.save_grade_url = self.url + '/save_grade/'
|
||||
|
||||
self.session = requests.session()
|
||||
|
||||
|
||||
def _login(self):
|
||||
"""
|
||||
Log into the staff grading service.
|
||||
|
||||
Raises requests.exceptions.HTTPError if something goes wrong.
|
||||
|
||||
Returns the decoded json dict of the response.
|
||||
"""
|
||||
response = self.session.post(self.login_url,
|
||||
{'username': self.username,
|
||||
'password': self.password,})
|
||||
|
||||
response.raise_for_status()
|
||||
|
||||
return response.json
|
||||
|
||||
|
||||
def _try_with_login(self, operation):
|
||||
"""
|
||||
Call operation(), which should return a requests response object. If
|
||||
the request fails with a 'login_required' error, call _login() and try
|
||||
the operation again.
|
||||
|
||||
Returns the result of operation(). Does not catch exceptions.
|
||||
"""
|
||||
response = operation()
|
||||
if (response.json
|
||||
and response.json.get('success') == False
|
||||
and response.json.get('error') == 'login_required'):
|
||||
# apparrently we aren't logged in. Try to fix that.
|
||||
r = self._login()
|
||||
if r and not r.get('success'):
|
||||
log.warning("Couldn't log into staff_grading backend. Response: %s",
|
||||
r)
|
||||
# try again
|
||||
return operation()
|
||||
|
||||
return response
|
||||
|
||||
|
||||
def get_next(self, course_id, grader_id):
|
||||
"""
|
||||
Get the next thing to grade.
|
||||
|
||||
Args:
|
||||
course_id: course id to get submission for
|
||||
grader_id: who is grading this? The anonymous user_id of the grader.
|
||||
|
||||
Returns:
|
||||
json string with the response from the service. (Deliberately not
|
||||
writing out the fields here--see the docs on the staff_grading view
|
||||
in the grading_controller repo)
|
||||
|
||||
Raises:
|
||||
GradingServiceError: something went wrong with the connection.
|
||||
"""
|
||||
op = lambda: self.session.get(self.get_next_url,
|
||||
allow_redirects=False,
|
||||
params={'course_id': course_id,
|
||||
'grader_id': grader_id})
|
||||
try:
|
||||
r = self._try_with_login(op)
|
||||
except (RequestException, ConnectionError, HTTPError) as err:
|
||||
# reraise as promised GradingServiceError, but preserve stacktrace.
|
||||
raise GradingServiceError, str(err), sys.exc_info()[2]
|
||||
|
||||
return r.text
|
||||
|
||||
|
||||
def save_grade(self, course_id, grader_id, submission_id, score, feedback):
|
||||
"""
|
||||
Save a score and feedback for a submission.
|
||||
|
||||
Returns:
|
||||
json dict with keys
|
||||
'success': bool
|
||||
'error': error msg, if something went wrong.
|
||||
|
||||
Raises:
|
||||
GradingServiceError if there's a problem connecting.
|
||||
"""
|
||||
try:
|
||||
data = {'course_id': course_id,
|
||||
'submission_id': submission_id,
|
||||
'score': score,
|
||||
'feedback': feedback,
|
||||
'grader_id': grader_id}
|
||||
|
||||
op = lambda: self.session.post(self.save_grade_url, data=data,
|
||||
allow_redirects=False)
|
||||
r = self._try_with_login(op)
|
||||
except (RequestException, ConnectionError, HTTPError) as err:
|
||||
# reraise as promised GradingServiceError, but preserve stacktrace.
|
||||
raise GradingServiceError, str(err), sys.exc_info()[2]
|
||||
|
||||
return r.text
|
||||
|
||||
# don't initialize until grading_service() is called--means that just
|
||||
# importing this file doesn't create objects that may not have the right config
|
||||
_service = None
|
||||
|
||||
def grading_service():
|
||||
"""
|
||||
Return a staff grading service instance--if settings.MOCK_STAFF_GRADING is True,
|
||||
returns a mock one, otherwise a real one.
|
||||
|
||||
Caches the result, so changing the setting after the first call to this
|
||||
function will have no effect.
|
||||
"""
|
||||
global _service
|
||||
if _service is not None:
|
||||
return _service
|
||||
|
||||
if settings.MOCK_STAFF_GRADING:
|
||||
_service = MockStaffGradingService()
|
||||
else:
|
||||
_service = StaffGradingService(settings.STAFF_GRADING_INTERFACE)
|
||||
|
||||
return _service
|
||||
|
||||
def _err_response(msg):
|
||||
"""
|
||||
Return a HttpResponse with a json dump with success=False, and the given error message.
|
||||
"""
|
||||
return HttpResponse(json.dumps({'success': False, 'error': msg}),
|
||||
mimetype="application/json")
|
||||
|
||||
|
||||
def _check_access(user, course_id):
|
||||
"""
|
||||
Raise 404 if user doesn't have staff access to course_id
|
||||
"""
|
||||
course_location = CourseDescriptor.id_to_location(course_id)
|
||||
if not has_access(user, course_location, 'staff'):
|
||||
raise Http404
|
||||
|
||||
return
|
||||
|
||||
|
||||
def get_next(request, course_id):
|
||||
"""
|
||||
Get the next thing to grade for course_id.
|
||||
|
||||
Returns a json dict with the following keys:
|
||||
|
||||
'success': bool
|
||||
|
||||
'submission_id': a unique identifier for the submission, to be passed back
|
||||
with the grade.
|
||||
|
||||
'submission': the submission, rendered as read-only html for grading
|
||||
|
||||
'rubric': the rubric, also rendered as html.
|
||||
|
||||
'message': if there was no submission available, but nothing went wrong,
|
||||
there will be a message field.
|
||||
|
||||
'error': if success is False, will have an error message with more info.
|
||||
"""
|
||||
_check_access(request.user, course_id)
|
||||
|
||||
return HttpResponse(_get_next(course_id, request.user.id),
|
||||
mimetype="application/json")
|
||||
|
||||
|
||||
def _get_next(course_id, grader_id):
|
||||
"""
|
||||
Implementation of get_next (also called from save_grade) -- returns a json string
|
||||
"""
|
||||
|
||||
try:
|
||||
return grading_service().get_next(course_id, grader_id)
|
||||
except GradingServiceError:
|
||||
log.exception("Error from grading service. server url: {0}"
|
||||
.format(grading_service().url))
|
||||
return json.dumps({'success': False,
|
||||
'error': 'Could not connect to grading service'})
|
||||
|
||||
|
||||
@expect_json
|
||||
def save_grade(request, course_id):
|
||||
"""
|
||||
Save the grade and feedback for a submission, and, if all goes well, return
|
||||
the next thing to grade.
|
||||
|
||||
Expects the following POST parameters:
|
||||
'score': int
|
||||
'feedback': string
|
||||
'submission_id': int
|
||||
|
||||
Returns the same thing as get_next, except that additional error messages
|
||||
are possible if something goes wrong with saving the grade.
|
||||
"""
|
||||
_check_access(request.user, course_id)
|
||||
|
||||
if request.method != 'POST':
|
||||
raise Http404
|
||||
|
||||
required = set('score', 'feedback', 'submission_id')
|
||||
actual = set(request.POST.keys())
|
||||
missing = required - actual
|
||||
if len(missing) != 0:
|
||||
return _err_response('Missing required keys {0}'.format(
|
||||
', '.join(missing)))
|
||||
|
||||
grader_id = request.user.id
|
||||
p = request.POST
|
||||
|
||||
try:
|
||||
result_json = grading_service().save_grade(course_id,
|
||||
grader_id,
|
||||
p['submission_id'],
|
||||
p['score'],
|
||||
p['feedback'])
|
||||
except GradingServiceError:
|
||||
log.exception("Error saving grade")
|
||||
return _err_response('Could not connect to grading service')
|
||||
|
||||
try:
|
||||
result = json.loads(result_json)
|
||||
except ValueError:
|
||||
log.exception("save_grade returned broken json: %s", result_json)
|
||||
return _err_response('Grading service returned mal-formatted data.')
|
||||
|
||||
if not result.get('success', False):
|
||||
log.warning('Got success=False from grading service. Response: %s', result_json)
|
||||
return _err_response('Grading service failed')
|
||||
|
||||
# Ok, save_grade seemed to work. Get the next submission to grade.
|
||||
return HttpResponse(_get_next(course_id, grader_id),
|
||||
mimetype="application/json")
|
||||
|
||||
@@ -8,15 +8,24 @@ Notes for running by hand:
|
||||
django-admin.py test --settings=lms.envs.test --pythonpath=. lms/djangoapps/instructor
|
||||
"""
|
||||
|
||||
import courseware.tests.tests as ct
|
||||
|
||||
import json
|
||||
|
||||
from nose import SkipTest
|
||||
from mock import patch, Mock
|
||||
|
||||
from override_settings import override_settings
|
||||
|
||||
from django.contrib.auth.models import \
|
||||
Group # Need access to internal func to put users in the right group
|
||||
# Need access to internal func to put users in the right group
|
||||
from django.contrib.auth.models import Group
|
||||
|
||||
from django.core.urlresolvers import reverse
|
||||
from django_comment_client.models import Role, FORUM_ROLE_ADMINISTRATOR, \
|
||||
FORUM_ROLE_MODERATOR, FORUM_ROLE_COMMUNITY_TA, FORUM_ROLE_STUDENT
|
||||
from django_comment_client.utils import has_forum_access
|
||||
|
||||
from instructor import staff_grading_service
|
||||
from courseware.access import _course_staff_group_name
|
||||
import courseware.tests.tests as ct
|
||||
from xmodule.modulestore.django import modulestore
|
||||
@@ -31,14 +40,9 @@ class TestInstructorDashboardGradeDownloadCSV(ct.PageLoader):
|
||||
|
||||
def setUp(self):
|
||||
xmodule.modulestore.django._MODULESTORES = {}
|
||||
courses = modulestore().get_courses()
|
||||
|
||||
def find_course(name):
|
||||
"""Assumes the course is present"""
|
||||
return [c for c in courses if c.location.course==name][0]
|
||||
|
||||
self.full = find_course("full")
|
||||
self.toy = find_course("toy")
|
||||
self.full = modulestore().get_course("edX/full/6.002_Spring_2012")
|
||||
self.toy = modulestore().get_course("edX/toy/2012_Fall")
|
||||
|
||||
# Create two accounts
|
||||
self.student = 'view@test.com'
|
||||
@@ -49,9 +53,12 @@ class TestInstructorDashboardGradeDownloadCSV(ct.PageLoader):
|
||||
self.activate_user(self.student)
|
||||
self.activate_user(self.instructor)
|
||||
|
||||
group_name = _course_staff_group_name(self.toy.location)
|
||||
g = Group.objects.create(name=group_name)
|
||||
g.user_set.add(ct.user(self.instructor))
|
||||
def make_instructor(course):
|
||||
group_name = _course_staff_group_name(course.location)
|
||||
g = Group.objects.create(name=group_name)
|
||||
g.user_set.add(ct.user(self.instructor))
|
||||
|
||||
make_instructor(self.toy)
|
||||
|
||||
self.logout()
|
||||
self.login(self.instructor, self.password)
|
||||
@@ -67,18 +74,21 @@ class TestInstructorDashboardGradeDownloadCSV(ct.PageLoader):
|
||||
|
||||
self.assertEqual(response['Content-Type'],'text/csv',msg)
|
||||
|
||||
cdisp = response['Content-Disposition'].replace('TT_2012','2012') # jenkins course_id is TT_2012_Fall instead of 2012_Fall?
|
||||
msg += "cdisp = '{0}'\n".format(cdisp)
|
||||
self.assertEqual(cdisp,'attachment; filename=grades_edX/toy/2012_Fall.csv',msg)
|
||||
cdisp = response['Content-Disposition']
|
||||
msg += "Content-Disposition = '%s'\n" % cdisp
|
||||
self.assertEqual(cdisp, 'attachment; filename=grades_{0}.csv'.format(course.id), msg)
|
||||
|
||||
body = response.content.replace('\r','')
|
||||
msg += "body = '{0}'\n".format(body)
|
||||
|
||||
# All the not-actually-in-the-course hw and labs come from the
|
||||
# default grading policy string in graders.py
|
||||
expected_body = '''"ID","Username","Full Name","edX email","External email","HW 01","HW 02","HW 03","HW 04","HW 05","HW 06","HW 07","HW 08","HW 09","HW 10","HW 11","HW 12","HW Avg","Lab 01","Lab 02","Lab 03","Lab 04","Lab 05","Lab 06","Lab 07","Lab 08","Lab 09","Lab 10","Lab 11","Lab 12","Lab Avg","Midterm","Final"
|
||||
"2","u2","Fred Weasley","view2@test.com","","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0.0","0.0"
|
||||
'''
|
||||
self.assertEqual(body, expected_body, msg)
|
||||
|
||||
|
||||
|
||||
FORUM_ROLES = [ FORUM_ROLE_ADMINISTRATOR, FORUM_ROLE_MODERATOR, FORUM_ROLE_COMMUNITY_TA ]
|
||||
FORUM_ADMIN_ACTION_SUFFIX = { FORUM_ROLE_ADMINISTRATOR : 'admin', FORUM_ROLE_MODERATOR : 'moderator', FORUM_ROLE_COMMUNITY_TA : 'community TA'}
|
||||
FORUM_ADMIN_USER = { FORUM_ROLE_ADMINISTRATOR : 'forumadmin', FORUM_ROLE_MODERATOR : 'forummoderator', FORUM_ROLE_COMMUNITY_TA : 'forummoderator'}
|
||||
@@ -89,22 +99,22 @@ def action_name(operation, rolename):
|
||||
else:
|
||||
return '{0} forum {1}'.format(operation, FORUM_ADMIN_ACTION_SUFFIX[rolename])
|
||||
|
||||
|
||||
_mock_service = staff_grading_service.MockStaffGradingService()
|
||||
|
||||
@override_settings(MODULESTORE=ct.TEST_DATA_XML_MODULESTORE)
|
||||
class TestInstructorDashboardForumAdmin(ct.PageLoader):
|
||||
'''
|
||||
Check for change in forum admin role memberships
|
||||
'''
|
||||
|
||||
|
||||
def setUp(self):
|
||||
xmodule.modulestore.django._MODULESTORES = {}
|
||||
courses = modulestore().get_courses()
|
||||
|
||||
def find_course(name):
|
||||
"""Assumes the course is present"""
|
||||
return [c for c in courses if c.location.course==name][0]
|
||||
|
||||
self.full = find_course("full")
|
||||
self.toy = find_course("toy")
|
||||
self.course_id = "edX/toy/2012_Fall"
|
||||
self.toy = modulestore().get_course(self.course_id)
|
||||
|
||||
# Create two accounts
|
||||
self.student = 'view@test.com'
|
||||
@@ -123,6 +133,8 @@ class TestInstructorDashboardForumAdmin(ct.PageLoader):
|
||||
self.login(self.instructor, self.password)
|
||||
self.enroll(self.toy)
|
||||
|
||||
|
||||
|
||||
def initialize_roles(self, course_id):
|
||||
self.admin_role = Role.objects.get_or_create(name=FORUM_ROLE_ADMINISTRATOR, course_id=course_id)[0]
|
||||
self.moderator_role = Role.objects.get_or_create(name=FORUM_ROLE_MODERATOR, course_id=course_id)[0]
|
||||
@@ -209,3 +221,74 @@ class TestInstructorDashboardForumAdmin(ct.PageLoader):
|
||||
added_roles.sort()
|
||||
roles = ', '.join(added_roles)
|
||||
self.assertTrue(response.content.find('<td>{0}</td>'.format(roles))>=0, 'not finding roles "{0}"'.format(roles))
|
||||
|
||||
|
||||
@override_settings(MODULESTORE=ct.TEST_DATA_XML_MODULESTORE)
|
||||
class TestStaffGradingService(ct.PageLoader):
|
||||
'''
|
||||
Check that staff grading service proxy works. Basically just checking the
|
||||
access control and error handling logic -- all the actual work is on the
|
||||
backend.
|
||||
'''
|
||||
def setUp(self):
|
||||
xmodule.modulestore.django._MODULESTORES = {}
|
||||
|
||||
self.student = 'view@test.com'
|
||||
self.instructor = 'view2@test.com'
|
||||
self.password = 'foo'
|
||||
self.create_account('u1', self.student, self.password)
|
||||
self.create_account('u2', self.instructor, self.password)
|
||||
self.activate_user(self.student)
|
||||
self.activate_user(self.instructor)
|
||||
|
||||
self.course_id = "edX/toy/2012_Fall"
|
||||
self.toy = modulestore().get_course(self.course_id)
|
||||
def make_instructor(course):
|
||||
group_name = _course_staff_group_name(course.location)
|
||||
g = Group.objects.create(name=group_name)
|
||||
g.user_set.add(ct.user(self.instructor))
|
||||
|
||||
make_instructor(self.toy)
|
||||
|
||||
self.mock_service = staff_grading_service.grading_service()
|
||||
|
||||
self.logout()
|
||||
|
||||
def test_access(self):
|
||||
"""
|
||||
Make sure only staff have access.
|
||||
"""
|
||||
self.login(self.student, self.password)
|
||||
|
||||
# both get and post should return 404
|
||||
for view_name in ('staff_grading_get_next', 'staff_grading_save_grade'):
|
||||
url = reverse(view_name, kwargs={'course_id': self.course_id})
|
||||
self.check_for_get_code(404, url)
|
||||
self.check_for_post_code(404, url)
|
||||
|
||||
|
||||
def test_get_next(self):
|
||||
self.login(self.instructor, self.password)
|
||||
|
||||
url = reverse('staff_grading_get_next', kwargs={'course_id': self.course_id})
|
||||
|
||||
r = self.check_for_get_code(200, url)
|
||||
d = json.loads(r.content)
|
||||
self.assertTrue(d['success'])
|
||||
self.assertEquals(d['submission_id'], self.mock_service.cnt)
|
||||
|
||||
|
||||
def test_save_grade(self):
|
||||
self.login(self.instructor, self.password)
|
||||
|
||||
url = reverse('staff_grading_save_grade', kwargs={'course_id': self.course_id})
|
||||
|
||||
data = {'score': '12',
|
||||
'feedback': 'great!',
|
||||
'submission_id': '123'}
|
||||
r = self.check_for_post_code(200, url, data)
|
||||
d = json.loads(r.content)
|
||||
self.assertTrue(d['success'], str(d))
|
||||
self.assertEquals(d['submission_id'], self.mock_service.cnt)
|
||||
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@ from django.http import HttpResponse
|
||||
from django_future.csrf import ensure_csrf_cookie
|
||||
from django.views.decorators.cache import cache_control
|
||||
from mitxmako.shortcuts import render_to_response
|
||||
from django.core.urlresolvers import reverse
|
||||
|
||||
from courseware import grades
|
||||
from courseware.access import has_access, get_access_group_name
|
||||
@@ -27,7 +28,10 @@ from xmodule.modulestore.exceptions import InvalidLocationError, ItemNotFoundErr
|
||||
from xmodule.modulestore.search import path_to_location
|
||||
import track.views
|
||||
|
||||
log = logging.getLogger("mitx.courseware")
|
||||
from .grading import StaffGrading
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
template_imports = {'urllib': urllib}
|
||||
|
||||
@@ -87,7 +91,7 @@ def instructor_dashboard(request, course_id):
|
||||
try:
|
||||
group = Group.objects.get(name=staffgrp)
|
||||
except Group.DoesNotExist:
|
||||
group = Group(name=staffgrp) # create the group
|
||||
group = Group(name=staffgrp) # create the group
|
||||
group.save()
|
||||
return group
|
||||
|
||||
@@ -377,7 +381,7 @@ def get_student_grade_summary_data(request, course, course_id, get_grades=True,
|
||||
enrolled_students = User.objects.filter(courseenrollment__course_id=course_id).prefetch_related("groups").order_by('username')
|
||||
|
||||
header = ['ID', 'Username', 'Full Name', 'edX email', 'External email']
|
||||
if get_grades:
|
||||
if get_grades and enrolled_students.count() > 0:
|
||||
# just to construct the header
|
||||
gradeset = grades.grade(enrolled_students[0], request, course, keep_raw_scores=get_raw_scores)
|
||||
# log.debug('student {0} gradeset {1}'.format(enrolled_students[0], gradeset))
|
||||
@@ -409,6 +413,29 @@ def get_student_grade_summary_data(request, course, course_id, get_grades=True,
|
||||
return datatable
|
||||
|
||||
|
||||
|
||||
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
|
||||
def staff_grading(request, course_id):
|
||||
"""
|
||||
Show the instructor grading interface.
|
||||
"""
|
||||
course = get_course_with_access(request.user, course_id, 'staff')
|
||||
|
||||
grading = StaffGrading(course)
|
||||
|
||||
ajax_url = reverse('staff_grading', kwargs={'course_id': course_id})
|
||||
if not ajax_url.endswith('/'):
|
||||
ajax_url += '/'
|
||||
|
||||
return render_to_response('instructor/staff_grading.html', {
|
||||
'view_html': grading.get_html(),
|
||||
'course': course,
|
||||
'course_id': course_id,
|
||||
'ajax_url': ajax_url,
|
||||
# Checked above
|
||||
'staff_access': True, })
|
||||
|
||||
|
||||
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
|
||||
def gradebook(request, course_id):
|
||||
"""
|
||||
|
||||
@@ -76,5 +76,8 @@ DATABASES = AUTH_TOKENS['DATABASES']
|
||||
|
||||
XQUEUE_INTERFACE = AUTH_TOKENS['XQUEUE_INTERFACE']
|
||||
|
||||
STAFF_GRADING_INTERFACE = AUTH_TOKENS.get('STAFF_GRADING_INTERFACE')
|
||||
|
||||
|
||||
PEARSON_TEST_USER = "pearsontest"
|
||||
PEARSON_TEST_PASSWORD = AUTH_TOKENS.get("PEARSON_TEST_PASSWORD")
|
||||
|
||||
@@ -322,6 +322,13 @@ WIKI_USE_BOOTSTRAP_SELECT_WIDGET = False
|
||||
WIKI_LINK_LIVE_LOOKUPS = False
|
||||
WIKI_LINK_DEFAULT_LEVEL = 2
|
||||
|
||||
################################# Staff grading config #####################
|
||||
|
||||
STAFF_GRADING_INTERFACE = None
|
||||
# Used for testing, debugging
|
||||
MOCK_STAFF_GRADING = False
|
||||
|
||||
|
||||
################################# Jasmine ###################################
|
||||
JASMINE_TEST_DIRECTORY = PROJECT_ROOT + '/static/coffee'
|
||||
|
||||
@@ -406,6 +413,9 @@ main_vendor_js = [
|
||||
|
||||
discussion_js = sorted(glob2.glob(PROJECT_ROOT / 'static/coffee/src/discussion/**/*.coffee'))
|
||||
|
||||
staff_grading_js = sorted(glob2.glob(PROJECT_ROOT / 'static/coffee/src/staff_grading/**/*.coffee'))
|
||||
|
||||
|
||||
# Load javascript from all of the available xmodules, and
|
||||
# prep it for use in pipeline js
|
||||
from xmodule.x_module import XModuleDescriptor
|
||||
@@ -468,7 +478,8 @@ with open(module_styles_path, 'w') as module_styles:
|
||||
|
||||
PIPELINE_JS = {
|
||||
'application': {
|
||||
# Application will contain all paths not in courseware_only_js
|
||||
# Application will contain all paths not in courseware_only_js or
|
||||
# discussion_js or staff_grading_js
|
||||
'source_filenames': [
|
||||
pth.replace(COMMON_ROOT / 'static/', '')
|
||||
for pth
|
||||
@@ -476,7 +487,9 @@ PIPELINE_JS = {
|
||||
] + [
|
||||
pth.replace(PROJECT_ROOT / 'static/', '')
|
||||
for pth in sorted(glob2.glob(PROJECT_ROOT / 'static/coffee/src/**/*.coffee'))\
|
||||
if pth not in courseware_only_js and pth not in discussion_js
|
||||
if (pth not in courseware_only_js and
|
||||
pth not in discussion_js and
|
||||
pth not in staff_grading_js)
|
||||
] + [
|
||||
'js/form.ext.js',
|
||||
'js/my_courses_dropdown.js',
|
||||
@@ -505,7 +518,12 @@ PIPELINE_JS = {
|
||||
'discussion' : {
|
||||
'source_filenames': [pth.replace(PROJECT_ROOT / 'static/', '') for pth in discussion_js],
|
||||
'output_filename': 'js/discussion.js'
|
||||
},
|
||||
'staff_grading' : {
|
||||
'source_filenames': [pth.replace(PROJECT_ROOT / 'static/', '') for pth in staff_grading_js],
|
||||
'output_filename': 'js/staff_grading.js'
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
PIPELINE_DISABLE_WRAPPER = True
|
||||
|
||||
@@ -39,7 +39,7 @@ DATABASES = {
|
||||
}
|
||||
|
||||
CACHES = {
|
||||
# This is the cache used for most things.
|
||||
# This is the cache used for most things.
|
||||
# In staging/prod envs, the sessions also live here.
|
||||
'default': {
|
||||
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
|
||||
@@ -102,7 +102,13 @@ SUBDOMAIN_BRANDING = {
|
||||
|
||||
COMMENTS_SERVICE_KEY = "PUT_YOUR_API_KEY_HERE"
|
||||
|
||||
################################# Staff grading config #####################
|
||||
|
||||
STAFF_GRADING_INTERFACE = {
|
||||
'url': 'http://127.0.0.1:3033/staff_grading',
|
||||
'username': 'lms',
|
||||
'password': 'abcd',
|
||||
}
|
||||
|
||||
################################ LMS Migration #################################
|
||||
MITX_FEATURES['ENABLE_LMS_MIGRATION'] = True
|
||||
|
||||
@@ -65,6 +65,10 @@ XQUEUE_INTERFACE = {
|
||||
}
|
||||
XQUEUE_WAITTIME_BETWEEN_REQUESTS = 5 # seconds
|
||||
|
||||
|
||||
# Don't rely on a real staff grading backend
|
||||
MOCK_STAFF_GRADING = True
|
||||
|
||||
# TODO (cpennington): We need to figure out how envs/test.py can inject things
|
||||
# into common.py so that we don't have to repeat this sort of thing
|
||||
STATICFILES_DIRS = [
|
||||
@@ -99,7 +103,7 @@ DATABASES = {
|
||||
}
|
||||
|
||||
CACHES = {
|
||||
# This is the cache used for most things.
|
||||
# This is the cache used for most things.
|
||||
# In staging/prod envs, the sessions also live here.
|
||||
'default': {
|
||||
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
|
||||
|
||||
@@ -1,63 +0,0 @@
|
||||
/* IE 6 & 7 */
|
||||
|
||||
/* Proper fixed width for dashboard in IE6 */
|
||||
|
||||
.dashboard #content {
|
||||
*width: 768px;
|
||||
}
|
||||
|
||||
.dashboard #content-main {
|
||||
*width: 535px;
|
||||
}
|
||||
|
||||
/* IE 6 ONLY */
|
||||
|
||||
/* Keep header from flowing off the page */
|
||||
|
||||
#container {
|
||||
_position: static;
|
||||
}
|
||||
|
||||
/* Put the right sidebars back on the page */
|
||||
|
||||
.colMS #content-related {
|
||||
_margin-right: 0;
|
||||
_margin-left: 10px;
|
||||
_position: static;
|
||||
}
|
||||
|
||||
/* Put the left sidebars back on the page */
|
||||
|
||||
.colSM #content-related {
|
||||
_margin-right: 10px;
|
||||
_margin-left: -115px;
|
||||
_position: static;
|
||||
}
|
||||
|
||||
.form-row {
|
||||
_height: 1%;
|
||||
}
|
||||
|
||||
/* Fix right margin for changelist filters in IE6 */
|
||||
|
||||
#changelist-filter ul {
|
||||
_margin-right: -10px;
|
||||
}
|
||||
|
||||
/* IE ignores min-height, but treats height as if it were min-height */
|
||||
|
||||
.change-list .filtered {
|
||||
_height: 400px;
|
||||
}
|
||||
|
||||
/* IE doesn't know alpha transparency in PNGs */
|
||||
|
||||
.inline-deletelink {
|
||||
background: transparent url(../img/inline-delete-8bit.png) no-repeat;
|
||||
}
|
||||
|
||||
/* IE7 doesn't support inline-block */
|
||||
.change-list ul.toplinks li {
|
||||
zoom: 1;
|
||||
*display: inline;
|
||||
}
|
||||
266
lms/static/coffee/src/staff_grading/staff_grading.coffee
Normal file
266
lms/static/coffee/src/staff_grading/staff_grading.coffee
Normal file
@@ -0,0 +1,266 @@
|
||||
# wrap everything in a class in case we want to use inside xmodules later
|
||||
|
||||
get_random_int: (min, max) ->
|
||||
return Math.floor(Math.random() * (max - min + 1)) + min
|
||||
|
||||
# states
|
||||
state_grading = "grading"
|
||||
state_graded = "graded"
|
||||
state_no_data = "no_data"
|
||||
state_error = "error"
|
||||
|
||||
class StaffGradingBackend
|
||||
constructor: (ajax_url, mock_backend) ->
|
||||
@ajax_url = ajax_url
|
||||
@mock_backend = mock_backend
|
||||
if @mock_backend
|
||||
@mock_cnt = 0
|
||||
|
||||
mock: (cmd, data) ->
|
||||
# Return a mock response to cmd and data
|
||||
# should take a location as an argument
|
||||
if cmd == 'get_next'
|
||||
@mock_cnt++
|
||||
response =
|
||||
success: true
|
||||
problem_name: 'Problem 1'
|
||||
num_left: 3
|
||||
num_total: 5
|
||||
prompt: 'This is a fake prompt'
|
||||
submission: 'submission! ' + @mock_cnt
|
||||
rubric: 'A rubric! ' + @mock_cnt
|
||||
submission_id: @mock_cnt
|
||||
max_score: 2 + @mock_cnt % 3
|
||||
ml_error_info : 'ML accuracy info: ' + @mock_cnt
|
||||
|
||||
else if cmd == 'save_grade'
|
||||
console.log("eval: #{data.score} pts, Feedback: #{data.feedback}")
|
||||
response =
|
||||
@mock('get_next', {})
|
||||
# get_probblem_list
|
||||
# sends in a course_id and a grader_id
|
||||
# should get back a list of problem_ids, problem_names, num_left, num_total
|
||||
else if cmd == 'get_problem_list'
|
||||
response =
|
||||
success: true
|
||||
problem_list: [
|
||||
{location: 'i4x://MITx/3.091x/problem/open_ended_demo', \
|
||||
problem_name: "Problem 1", num_left: 3, num_total: 5},
|
||||
{location: 'i4x://MITx/3.091x/problem/open_ended_demo', \
|
||||
problem_name: "Problem 2", num_left: 1, num_total: 5}
|
||||
]
|
||||
else
|
||||
response =
|
||||
success: false
|
||||
error: 'Unknown command ' + cmd
|
||||
|
||||
if @mock_cnt % 5 == 0
|
||||
response =
|
||||
success: true
|
||||
message: 'No more submissions'
|
||||
|
||||
|
||||
if @mock_cnt % 7 == 0
|
||||
response =
|
||||
success: false
|
||||
error: 'An error for testing'
|
||||
|
||||
return response
|
||||
|
||||
|
||||
post: (cmd, data, callback) ->
|
||||
if @mock_backend
|
||||
callback(@mock(cmd, data))
|
||||
else
|
||||
# TODO: replace with postWithPrefix when that's loaded
|
||||
$.post(@ajax_url + cmd, data, callback)
|
||||
|
||||
|
||||
class StaffGrading
|
||||
constructor: (backend) ->
|
||||
@backend = backend
|
||||
|
||||
# all the jquery selectors
|
||||
@error_container = $('.error-container')
|
||||
@message_container = $('.message-container')
|
||||
|
||||
@prompt_container = $('.prompt-container')
|
||||
@prompt_wrapper = $('.prompt-wrapper')
|
||||
|
||||
@submission_container = $('.submission-container')
|
||||
@submission_wrapper = $('.submission-wrapper')
|
||||
|
||||
@rubric_container = $('.rubric-container')
|
||||
@rubric_wrapper = $('.rubric-wrapper')
|
||||
|
||||
@feedback_area = $('.feedback-area')
|
||||
@score_selection_container = $('.score-selection-container')
|
||||
@submit_button = $('.submit-button')
|
||||
@ml_error_info_container = $('.ml-error-info-container')
|
||||
|
||||
# model state
|
||||
@state = state_no_data
|
||||
@submission_id = null
|
||||
@prompt = ''
|
||||
@submission = ''
|
||||
@rubric = ''
|
||||
@error_msg = ''
|
||||
@message = ''
|
||||
@max_score = 0
|
||||
@ml_error_info= ''
|
||||
|
||||
@score = null
|
||||
|
||||
# action handlers
|
||||
@submit_button.click @submit
|
||||
|
||||
# render intial state
|
||||
@render_view()
|
||||
|
||||
# send initial request automatically
|
||||
@get_next_submission()
|
||||
|
||||
|
||||
setup_score_selection: =>
|
||||
# first, get rid of all the old inputs, if any.
|
||||
@score_selection_container.html('Choose score: ')
|
||||
|
||||
# Now create new labels and inputs for each possible score.
|
||||
for score in [0..@max_score]
|
||||
id = 'score-' + score
|
||||
label = """<label for="#{id}">#{score}</label>"""
|
||||
|
||||
input = """
|
||||
<input type="radio" name="score-selection" id="#{id}" value="#{score}"/>
|
||||
""" # " fix broken parsing in emacs
|
||||
@score_selection_container.append(input + label)
|
||||
|
||||
# And now hook up an event handler again
|
||||
$("input[name='score-selection']").change @graded_callback
|
||||
|
||||
|
||||
set_button_text: (text) =>
|
||||
@submit_button.attr('value', text)
|
||||
|
||||
graded_callback: (event) =>
|
||||
@score = event.target.value
|
||||
@state = state_graded
|
||||
@render_view()
|
||||
|
||||
ajax_callback: (response) =>
|
||||
# always clear out errors and messages on transition.
|
||||
@error_msg = ''
|
||||
@message = ''
|
||||
|
||||
if response.success
|
||||
if response.submission
|
||||
@data_loaded(response.prompt, response.submission, response.rubric, response.submission_id, response.max_score, response.ml_error_info)
|
||||
else
|
||||
@no_more(response.message)
|
||||
else
|
||||
@error(response.error)
|
||||
|
||||
@render_view()
|
||||
|
||||
get_next_submission: () ->
|
||||
@backend.post('get_next', {}, @ajax_callback)
|
||||
|
||||
submit_and_get_next: () ->
|
||||
data =
|
||||
score: @score
|
||||
feedback: @feedback_area.val()
|
||||
submission_id: @submission_id
|
||||
|
||||
@backend.post('save_grade', data, @ajax_callback)
|
||||
|
||||
error: (msg) ->
|
||||
@error_msg = msg
|
||||
@state = state_error
|
||||
|
||||
data_loaded: (prompt, submission, rubric, submission_id, max_score, ml_error_info) ->
|
||||
@prompt = prompt
|
||||
@submission = submission
|
||||
@rubric = rubric
|
||||
@submission_id = submission_id
|
||||
@feedback_area.val('')
|
||||
@max_score = max_score
|
||||
@score = null
|
||||
@ml_error_info=ml_error_info
|
||||
@state = state_grading
|
||||
if not @max_score?
|
||||
@error("No max score specified for submission.")
|
||||
|
||||
no_more: (message) ->
|
||||
@prompt = null
|
||||
@submission = null
|
||||
@rubric = null
|
||||
@ml_error_info = null
|
||||
@submission_id = null
|
||||
@message = message
|
||||
@score = null
|
||||
@max_score = 0
|
||||
@state = state_no_data
|
||||
|
||||
render_view: () ->
|
||||
# make the view elements match the state. Idempotent.
|
||||
show_grading_elements = false
|
||||
show_submit_button = true
|
||||
|
||||
@message_container.html(@message)
|
||||
if @backend.mock_backend
|
||||
@message_container.append("<p>NOTE: Mocking backend.</p>")
|
||||
|
||||
@error_container.html(@error_msg)
|
||||
|
||||
if @state == state_error
|
||||
@set_button_text('Try loading again')
|
||||
|
||||
else if @state == state_grading
|
||||
@ml_error_info_container.html(@ml_error_info)
|
||||
@prompt_container.html(@prompt)
|
||||
@submission_container.html(@submission)
|
||||
@rubric_container.html(@rubric)
|
||||
show_grading_elements = true
|
||||
|
||||
# no submit button until user picks grade.
|
||||
show_submit_button = false
|
||||
|
||||
@setup_score_selection()
|
||||
|
||||
else if @state == state_graded
|
||||
show_grading_elements = true
|
||||
@set_button_text('Submit')
|
||||
|
||||
else if @state == state_no_data
|
||||
@message_container.html(@message)
|
||||
@set_button_text('Re-check for submissions')
|
||||
|
||||
else
|
||||
@error('System got into invalid state ' + @state)
|
||||
|
||||
@submit_button.toggle(show_submit_button)
|
||||
@prompt_wrapper.toggle(show_grading_elements)
|
||||
@submission_wrapper.toggle(show_grading_elements)
|
||||
@rubric_wrapper.toggle(show_grading_elements)
|
||||
@ml_error_info_container.toggle(show_grading_elements)
|
||||
|
||||
|
||||
submit: (event) =>
|
||||
event.preventDefault()
|
||||
|
||||
if @state == state_error
|
||||
@get_next_submission()
|
||||
else if @state == state_graded
|
||||
@submit_and_get_next()
|
||||
else if @state == state_no_data
|
||||
@get_next_submission()
|
||||
else
|
||||
@error('System got into invalid state for submission: ' + @state)
|
||||
|
||||
|
||||
# for now, just create an instance and load it...
|
||||
mock_backend = false
|
||||
ajax_url = $('.staff-grading').data('ajax_url')
|
||||
backend = new StaffGradingBackend(ajax_url, mock_backend)
|
||||
|
||||
$(document).ready(() -> new StaffGrading(backend))
|
||||
45
lms/static/coffee/src/staff_grading/test_grading.html
Normal file
45
lms/static/coffee/src/staff_grading/test_grading.html
Normal file
@@ -0,0 +1,45 @@
|
||||
<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML//EN">
|
||||
<html> <head>
|
||||
<title></title>
|
||||
<!-- <script src="http://code.jquery.com/jquery-latest.js"></script> -->
|
||||
<script src="../../../admin/js/jquery.min.js"></script>
|
||||
<script type="text/javascript" src="staff_grading.js"></script>
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<div class="staff-grading" data-ajax_url="/some_url/">
|
||||
<h1>Staff grading</h1>
|
||||
|
||||
<div class="error-container">
|
||||
</div>
|
||||
|
||||
<div class="message-container">
|
||||
</div>
|
||||
|
||||
<section class="submission-wrapper">
|
||||
<h3>Submission</h3>
|
||||
<div class="submission-container">
|
||||
</div>
|
||||
</section>
|
||||
|
||||
<section class="rubric-wrapper">
|
||||
<h3>Rubric</h3>
|
||||
<div class="rubric-container">
|
||||
</div>
|
||||
|
||||
<div class="evaluation">
|
||||
<textarea name="feedback" placeholder="Feedback for student..."
|
||||
class="feedback-area" cols="70" rows="10"></textarea>
|
||||
<p class="score-selection-container">
|
||||
</p>
|
||||
</div>
|
||||
|
||||
</section>
|
||||
|
||||
<div class="submission">
|
||||
<input type="button" value="Submit" class="submit-button" name="show"/>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
|
||||
</body> </html>
|
||||
@@ -43,6 +43,7 @@
|
||||
@import "course/profile";
|
||||
@import "course/gradebook";
|
||||
@import "course/tabs";
|
||||
@import "course/staff_grading";
|
||||
|
||||
// instructor
|
||||
@import "course/instructor/instructor";
|
||||
|
||||
29
lms/static/sass/course/_staff_grading.scss
Normal file
29
lms/static/sass/course/_staff_grading.scss
Normal file
@@ -0,0 +1,29 @@
|
||||
div.staff-grading {
|
||||
textarea.feedback-area {
|
||||
height: 100px;
|
||||
margin: 20px;
|
||||
}
|
||||
|
||||
div {
|
||||
margin: 10px;
|
||||
}
|
||||
|
||||
label {
|
||||
margin: 10px;
|
||||
padding: 5px;
|
||||
display: inline-block;
|
||||
min-width: 50px;
|
||||
background-color: #CCC;
|
||||
text-size: 1.5em;
|
||||
}
|
||||
|
||||
/* Toggled State */
|
||||
input[type=radio]:checked + label {
|
||||
background: #666;
|
||||
color: white;
|
||||
}
|
||||
|
||||
input[name='score-selection'] {
|
||||
display: none;
|
||||
}
|
||||
}
|
||||
64
lms/templates/instructor/staff_grading.html
Normal file
64
lms/templates/instructor/staff_grading.html
Normal file
@@ -0,0 +1,64 @@
|
||||
<%inherit file="/main.html" />
|
||||
<%block name="bodyclass">${course.css_class}</%block>
|
||||
<%namespace name='static' file='/static_content.html'/>
|
||||
|
||||
<%block name="headextra">
|
||||
<%static:css group='course'/>
|
||||
</%block>
|
||||
|
||||
<%block name="title"><title>${course.number} Staff Grading</title></%block>
|
||||
|
||||
<%include file="/courseware/course_navigation.html" args="active_page='staff_grading'" />
|
||||
|
||||
<%block name="js_extra">
|
||||
<%static:js group='staff_grading'/>
|
||||
</%block>
|
||||
|
||||
<section class="container">
|
||||
|
||||
<div class="staff-grading" data-ajax_url="${ajax_url}">
|
||||
<h1>Staff grading</h1>
|
||||
|
||||
<div class="error-container">
|
||||
</div>
|
||||
|
||||
<div class="message-container">
|
||||
</div>
|
||||
|
||||
<div class="ml-error-info-container">
|
||||
</div>
|
||||
|
||||
<section class="prompt-wrapper">
|
||||
<h3>Question prompt</h3>
|
||||
<div class="prompt-container">
|
||||
</div>
|
||||
</section>
|
||||
|
||||
<section class="submission-wrapper">
|
||||
<h3>Submission</h3>
|
||||
<div class="submission-container">
|
||||
</div>
|
||||
</section>
|
||||
|
||||
<section class="rubric-wrapper">
|
||||
<h3>Rubric</h3>
|
||||
<div class="rubric-container">
|
||||
</div>
|
||||
|
||||
<div class="evaluation">
|
||||
|
||||
<textarea name="feedback" placeholder="Feedback for student..."
|
||||
class="feedback-area" cols="70" rows="10"></textarea>
|
||||
<p class="score-selection-container">
|
||||
</p>
|
||||
</div>
|
||||
|
||||
</section>
|
||||
|
||||
<div class="submission">
|
||||
<input type="button" value="Submit" class="submit-button" name="show"/>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
|
||||
</section>
|
||||
12
lms/templates/open_ended_error.html
Normal file
12
lms/templates/open_ended_error.html
Normal file
@@ -0,0 +1,12 @@
|
||||
<section>
|
||||
<div class="shortform">
|
||||
<div class="result-errors">
|
||||
There was an error with your submission. Please contact course staff.
|
||||
</div>
|
||||
</div>
|
||||
<div class="longform">
|
||||
<div class="result-errors">
|
||||
${errors}
|
||||
</div>
|
||||
</div>
|
||||
</section>
|
||||
16
lms/templates/open_ended_feedback.html
Normal file
16
lms/templates/open_ended_feedback.html
Normal file
@@ -0,0 +1,16 @@
|
||||
<section>
|
||||
<header>Feedback</header>
|
||||
<div class="shortform">
|
||||
<div class="result-output">
|
||||
<p>Score: ${score}</p>
|
||||
% if grader_type == "ML":
|
||||
<p>Check below for full feedback:</p>
|
||||
% endif
|
||||
</div>
|
||||
</div>
|
||||
<div class="longform">
|
||||
<div class="result-output">
|
||||
${ feedback | n}
|
||||
</div>
|
||||
</div>
|
||||
</section>
|
||||
@@ -165,7 +165,7 @@ if settings.COURSEWARE_ENABLED:
|
||||
# input types system so that previews can be context-specific.
|
||||
# Unfortunately, we don't have time to think through the right way to do
|
||||
# that (and implement it), and it's not a terrible thing to provide a
|
||||
# generic chemican-equation rendering service.
|
||||
# generic chemical-equation rendering service.
|
||||
url(r'^preview/chemcalc', 'courseware.module_render.preview_chemcalc',
|
||||
name='preview_chemcalc'),
|
||||
|
||||
@@ -234,6 +234,12 @@ if settings.COURSEWARE_ENABLED:
|
||||
'instructor.views.grade_summary', name='grade_summary'),
|
||||
url(r'^courses/(?P<course_id>[^/]+/[^/]+/[^/]+)/enroll_students$',
|
||||
'instructor.views.enroll_students', name='enroll_students'),
|
||||
url(r'^courses/(?P<course_id>[^/]+/[^/]+/[^/]+)/staff_grading$',
|
||||
'instructor.views.staff_grading', name='staff_grading'),
|
||||
url(r'^courses/(?P<course_id>[^/]+/[^/]+/[^/]+)/staff_grading/get_next$',
|
||||
'instructor.staff_grading_service.get_next', name='staff_grading_get_next'),
|
||||
url(r'^courses/(?P<course_id>[^/]+/[^/]+/[^/]+)/staff_grading/save_grade$',
|
||||
'instructor.staff_grading_service.save_grade', name='staff_grading_save_grade'),
|
||||
)
|
||||
|
||||
# discussion forums live within courseware, so courseware must be enabled first
|
||||
|
||||
Reference in New Issue
Block a user