Merge branch 'feature/cale/cms-master' of github.com:MITx/mitx into feature/cdodge/import-grading-policy
This commit is contained in:
@@ -27,7 +27,7 @@ def get_course_location_for_item(location):
|
||||
raise BaseException('Could not find course at {0}'.format(course_search_location))
|
||||
|
||||
if found_cnt > 1:
|
||||
raise BaseException('Found more than one course at {0}. There should only be one!!!'.format(course_search_location))
|
||||
raise BaseException('Found more than one course at {0}. There should only be one!!! Dump = {1}'.format(course_search_location, courses))
|
||||
|
||||
location = courses[0].location
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@ import sys
|
||||
import time
|
||||
import tarfile
|
||||
import shutil
|
||||
import tempfile
|
||||
from datetime import datetime
|
||||
from collections import defaultdict
|
||||
from uuid import uuid4
|
||||
@@ -947,6 +948,12 @@ def create_new_course(request):
|
||||
if existing_course is not None:
|
||||
return HttpResponse(json.dumps({'ErrMsg': 'There is already a course defined with this name.'}))
|
||||
|
||||
course_search_location = ['i4x', dest_location.org, dest_location.course, 'course', None]
|
||||
courses = modulestore().get_items(course_search_location)
|
||||
|
||||
if len(courses) > 0:
|
||||
return HttpResponse(json.dumps({'ErrMsg': 'There is already a course defined with the same organization and course number.'}))
|
||||
|
||||
new_course = modulestore('direct').clone_item(template, dest_location)
|
||||
|
||||
if display_name is not None:
|
||||
@@ -982,7 +989,11 @@ def import_course(request, org, course, name):
|
||||
|
||||
data_root = path(settings.GITHUB_REPO_ROOT)
|
||||
|
||||
temp_filepath = data_root / filename
|
||||
course_dir = data_root / "{0}-{1}-{2}".format(org, course, name)
|
||||
if not course_dir.isdir():
|
||||
os.mkdir(course_dir)
|
||||
|
||||
temp_filepath = course_dir / filename
|
||||
|
||||
logging.debug('importing course to {0}'.format(temp_filepath))
|
||||
|
||||
@@ -992,32 +1003,42 @@ def import_course(request, org, course, name):
|
||||
temp_file.write(chunk)
|
||||
temp_file.close()
|
||||
|
||||
# @todo: don't assume the top-level directory that was unziped was the same name (but without .tar.gz)
|
||||
course_dir = filename.replace('.tar.gz', '')
|
||||
|
||||
tf = tarfile.open(temp_filepath)
|
||||
if (data_root / course_dir).isdir():
|
||||
shutil.rmtree(data_root / course_dir)
|
||||
tf.extractall(data_root + '/')
|
||||
tf.extractall(course_dir + '/')
|
||||
|
||||
os.remove(temp_filepath) # remove the .tar.gz file
|
||||
# find the 'course.xml' file
|
||||
|
||||
for r,d,f in os.walk(course_dir):
|
||||
for files in f:
|
||||
if files == 'course.xml':
|
||||
break
|
||||
if files == 'course.xml':
|
||||
break
|
||||
|
||||
with open(data_root / course_dir / 'course.xml', 'r') as course_file:
|
||||
if files != 'course.xml':
|
||||
return HttpResponse(json.dumps({'ErrMsg': 'Could not find the course.xml file in the package.'}))
|
||||
|
||||
logging.debug('found course.xml at {0}'.format(r))
|
||||
|
||||
if r != course_dir:
|
||||
for fname in os.listdir(r):
|
||||
shutil.move(r/fname, course_dir)
|
||||
|
||||
with open(course_dir / 'course.xml', 'r') as course_file:
|
||||
course_data = etree.parse(course_file, parser=edx_xml_parser)
|
||||
course_data_root = course_data.getroot()
|
||||
course_data_root.set('org', org)
|
||||
course_data_root.set('course', course)
|
||||
course_data_root.set('url_name', name)
|
||||
|
||||
with open(data_root / course_dir / 'course.xml', 'w') as course_file:
|
||||
with open(course_dir / 'course.xml', 'w') as course_file:
|
||||
course_data.write(course_file)
|
||||
|
||||
module_store, course_items = import_from_xml(modulestore('direct'), settings.GITHUB_REPO_ROOT,
|
||||
[course_dir], load_error_modules=False, static_content_store=contentstore())
|
||||
|
||||
# remove content directory - we *shouldn't* need this any longer :-)
|
||||
shutil.rmtree(data_root / course_dir)
|
||||
# we can blow this away when we're done importing.
|
||||
shutil.rmtree(course_dir)
|
||||
|
||||
logging.debug('new course at {0}'.format(course_items[0].location))
|
||||
|
||||
|
||||
@@ -10,7 +10,8 @@ var $spinner;
|
||||
$(document).ready(function() {
|
||||
$body = $('body');
|
||||
$modal = $('.history-modal');
|
||||
$modalCover = $('.modal-cover');
|
||||
$modalCover = $('<div class="modal-cover">');
|
||||
$body.append($modalCover);
|
||||
$newComponentItem = $('.new-component-item');
|
||||
$newComponentTypePicker = $('.new-component');
|
||||
$newComponentTemplatePickers = $('.new-component-templates');
|
||||
@@ -102,8 +103,35 @@ $(document).ready(function() {
|
||||
// pretty wacky stuff to happen
|
||||
$('.file-input').bind('change', startUpload);
|
||||
$('.upload-modal .choose-file-button').bind('click', showFileSelectionMenu);
|
||||
|
||||
$body.on('click', '.section-published-date .edit-button', editSectionPublishDate);
|
||||
$body.on('click', '.section-published-date .schedule-button', editSectionPublishDate);
|
||||
$body.on('click', '.edit-subsection-publish-settings .save-button', saveSetSectionScheduleDate);
|
||||
$body.on('click', '.edit-subsection-publish-settings .cancel-button', hideModal)
|
||||
$body.on('change', '.edit-subsection-publish-settings .start-date', function() {
|
||||
if($('.edit-subsection-publish-settings').find('.start-time').val() == '') {
|
||||
$('.edit-subsection-publish-settings').find('.start-time').val('12:00am');
|
||||
}
|
||||
});
|
||||
$('.edit-subsection-publish-settings').on('change', '.start-date, .start-time', function() {
|
||||
$('.edit-subsection-publish-settings').find('.save-button').show();
|
||||
});
|
||||
});
|
||||
|
||||
function editSectionPublishDate(e) {
|
||||
e.preventDefault();
|
||||
$modal = $('.edit-subsection-publish-settings').show();
|
||||
$modal = $('.edit-subsection-publish-settings').show();
|
||||
$modal.attr('data-id', $(this).attr('data-id'));
|
||||
$modal.find('.start-date').val($(this).attr('data-date'));
|
||||
$modal.find('.start-time').val($(this).attr('data-time'));
|
||||
if($modal.find('.start-date').val() == '' && $modal.find('.start-time').val() == '') {
|
||||
$modal.find('.save-button').hide();
|
||||
}
|
||||
$modal.find('.section-name').html('"' + $(this).closest('.courseware-section').find('.section-name-span').text() + '"');
|
||||
$modalCover.show();
|
||||
}
|
||||
|
||||
function showImportSubmit(e) {
|
||||
var filepath = $(this).val();
|
||||
if(filepath.substr(filepath.length - 6, 6) == 'tar.gz') {
|
||||
@@ -387,7 +415,9 @@ function _deleteItem($el) {
|
||||
|
||||
function showUploadModal(e) {
|
||||
e.preventDefault();
|
||||
$('.upload-modal').show();
|
||||
$modal = $('.upload-modal').show();
|
||||
$('.file-input').bind('change', startUpload);
|
||||
$('.upload-modal .choose-file-button').bind('click', showFileSelectionMenu);
|
||||
$modalCover.show();
|
||||
}
|
||||
|
||||
@@ -448,8 +478,10 @@ function markAsLoaded() {
|
||||
}
|
||||
|
||||
function hideModal(e) {
|
||||
e.preventDefault();
|
||||
$('.modal').hide();
|
||||
if(e) {
|
||||
e.preventDefault();
|
||||
}
|
||||
$modal.hide();
|
||||
$modalCover.hide();
|
||||
}
|
||||
|
||||
@@ -686,7 +718,7 @@ function saveEditSectionName(e) {
|
||||
e.preventDefault();
|
||||
|
||||
id = $(this).closest("section.courseware-section").data("id");
|
||||
display_name = $(this).prev('.edit-section-name').val();
|
||||
display_name = $.trim($(this).prev('.edit-section-name').val());
|
||||
|
||||
if (display_name == '') {
|
||||
alert("You must specify a name before saving.")
|
||||
@@ -726,15 +758,15 @@ function cancelSetSectionScheduleDate(e) {
|
||||
function saveSetSectionScheduleDate(e) {
|
||||
e.preventDefault();
|
||||
|
||||
input_date = $(this).siblings('input.date').val();
|
||||
input_time = $(this).siblings('input.time').val();
|
||||
input_date = $('.edit-subsection-publish-settings .start-date').val();
|
||||
input_time = $('.edit-subsection-publish-settings .start-time').val();
|
||||
|
||||
start = getEdxTimeFromDateTimeVals(input_date, input_time);
|
||||
|
||||
id = $(this).closest("section.courseware-section").data("id");
|
||||
id = $modal.attr('data-id');
|
||||
var $_this = $(this);
|
||||
|
||||
// call into server to commit the new order
|
||||
// call into server to commit the new order
|
||||
$.ajax({
|
||||
url: "/save_item",
|
||||
type: "POST",
|
||||
@@ -743,7 +775,18 @@ function saveSetSectionScheduleDate(e) {
|
||||
data:JSON.stringify({ 'id' : id, 'metadata' : {'start' : start}, 'data': null, 'children' : null})
|
||||
}).success(function()
|
||||
{
|
||||
alert('Your changes have been saved.');
|
||||
location.reload();
|
||||
var $thisSection = $('.courseware-section[data-id="' + id + '"]');
|
||||
$thisSection.find('.section-published-date').html('<span class="published-status"><strong>Will Release:</strong> ' + input_date + ' at ' + input_time + '</span><a href="#" class="edit-button" data-date="' + input_date + '" data-time="' + input_time + '" data-id="' + id + '">Edit</a>');
|
||||
$thisSection.find('.section-published-date').animate({
|
||||
'background-color': 'rgb(182,37,104)'
|
||||
}, 300).animate({
|
||||
'background-color': '#edf1f5'
|
||||
}, 300).animate({
|
||||
'background-color': 'rgb(182,37,104)'
|
||||
}, 300).animate({
|
||||
'background-color': '#edf1f5'
|
||||
}, 300);
|
||||
|
||||
hideModal();
|
||||
});
|
||||
}
|
||||
|
||||
@@ -28,14 +28,70 @@ input.courseware-unit-search-input {
|
||||
|
||||
&.collapsed {
|
||||
padding-bottom: 0;
|
||||
}
|
||||
|
||||
header {
|
||||
height: 47px;
|
||||
label {
|
||||
float: left;
|
||||
line-height: 29px;
|
||||
}
|
||||
|
||||
.datepair {
|
||||
float: left;
|
||||
margin-left: 10px;
|
||||
}
|
||||
|
||||
.section-published-date {
|
||||
position: absolute;
|
||||
top: 19px;
|
||||
right: 90px;
|
||||
padding: 4px 10px;
|
||||
border-radius: 3px;
|
||||
background: $lightGrey;
|
||||
text-align: right;
|
||||
|
||||
.published-status {
|
||||
font-size: 12px;
|
||||
margin-right: 15px;
|
||||
|
||||
strong {
|
||||
font-weight: 700;
|
||||
}
|
||||
}
|
||||
|
||||
h4 {
|
||||
display: none !important;
|
||||
|
||||
.schedule-button {
|
||||
@include blue-button;
|
||||
}
|
||||
|
||||
.edit-button {
|
||||
@include blue-button;
|
||||
}
|
||||
|
||||
.schedule-button,
|
||||
.edit-button {
|
||||
font-size: 11px;
|
||||
padding: 3px 15px 5px;
|
||||
}
|
||||
}
|
||||
|
||||
.datepair .date,
|
||||
.datepair .time {
|
||||
padding-left: 0;
|
||||
padding-right: 0;
|
||||
border: none;
|
||||
background: none;
|
||||
@include box-shadow(none);
|
||||
font-size: 13px;
|
||||
font-weight: 700;
|
||||
color: $blue;
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
.datepair .date {
|
||||
width: 80px;
|
||||
}
|
||||
|
||||
.datepair .time {
|
||||
width: 65px;
|
||||
}
|
||||
|
||||
&.collapsed .subsection-list,
|
||||
@@ -45,15 +101,15 @@ input.courseware-unit-search-input {
|
||||
}
|
||||
|
||||
header {
|
||||
height: 67px;
|
||||
height: 75px;
|
||||
|
||||
.item-details {
|
||||
float: left;
|
||||
padding: 10px 0 0;
|
||||
padding: 21px 0 0;
|
||||
}
|
||||
|
||||
.item-actions {
|
||||
margin-top: 11px;
|
||||
margin-top: 21px;
|
||||
margin-right: 12px;
|
||||
|
||||
.edit-button,
|
||||
@@ -64,7 +120,7 @@ input.courseware-unit-search-input {
|
||||
|
||||
.expand-collapse-icon {
|
||||
float: left;
|
||||
margin: 16px 6px 16px 16px;
|
||||
margin: 29px 6px 16px 16px;
|
||||
@include transition(none);
|
||||
}
|
||||
|
||||
@@ -74,11 +130,37 @@ input.courseware-unit-search-input {
|
||||
}
|
||||
|
||||
h3 {
|
||||
font-size: 16px;
|
||||
font-size: 19px;
|
||||
font-weight: 700;
|
||||
color: $blue;
|
||||
}
|
||||
|
||||
.section-name-span {
|
||||
cursor: pointer;
|
||||
@include transition(color .15s);
|
||||
|
||||
&:hover {
|
||||
color: $orange;
|
||||
}
|
||||
}
|
||||
|
||||
.section-name-edit {
|
||||
input {
|
||||
font-size: 16px;
|
||||
}
|
||||
|
||||
.save-button {
|
||||
@include blue-button;
|
||||
padding: 7px 20px 7px;
|
||||
margin-right: 5px;
|
||||
}
|
||||
|
||||
.cancel-button {
|
||||
@include white-button;
|
||||
padding: 7px 20px 7px;
|
||||
}
|
||||
}
|
||||
|
||||
h4 {
|
||||
font-size: 12px;
|
||||
color: #878e9d;
|
||||
@@ -161,3 +243,57 @@ input.courseware-unit-search-input {
|
||||
.preview {
|
||||
background: url(../img/preview.jpg) center top no-repeat;
|
||||
}
|
||||
|
||||
.edit-subsection-publish-settings {
|
||||
display: none;
|
||||
position: fixed;
|
||||
top: 100px;
|
||||
left: 50%;
|
||||
z-index: 99999;
|
||||
width: 600px;
|
||||
margin-left: -300px;
|
||||
background: #fff;
|
||||
text-align: center;
|
||||
|
||||
.settings {
|
||||
padding: 40px;
|
||||
}
|
||||
|
||||
h3 {
|
||||
font-size: 34px;
|
||||
font-weight: 300;
|
||||
}
|
||||
|
||||
.picker {
|
||||
margin: 30px 0 65px;
|
||||
}
|
||||
|
||||
.description {
|
||||
margin-top: 30px;
|
||||
font-size: 14px;
|
||||
line-height: 20px;
|
||||
}
|
||||
|
||||
strong {
|
||||
font-weight: 700;
|
||||
}
|
||||
|
||||
.start-date,
|
||||
.start-time {
|
||||
font-size: 19px;
|
||||
}
|
||||
|
||||
.save-button {
|
||||
@include blue-button;
|
||||
margin-right: 10px;
|
||||
}
|
||||
|
||||
.cancel-button {
|
||||
@include white-button;
|
||||
}
|
||||
|
||||
.save-button,
|
||||
.cancel-button {
|
||||
font-size: 16px;
|
||||
}
|
||||
}
|
||||
@@ -21,7 +21,7 @@
|
||||
%if allow_actions:
|
||||
<div class="new-user-form">
|
||||
<label>email: </label><input type="text" id="email" class="email-input" autocomplete="off" placeholder="email@example.com">
|
||||
<a href="#" id="add_user" class="add-button">save</a>
|
||||
<a href="#" id="add_user" class="add-button">add user</a>
|
||||
<a href="#" class="cancel-button">cancel</a>
|
||||
</div>
|
||||
%endif
|
||||
|
||||
@@ -56,6 +56,20 @@
|
||||
</%block>
|
||||
|
||||
<%block name="content">
|
||||
<div class="edit-subsection-publish-settings">
|
||||
<div class="settings">
|
||||
<h3>Section Release Date</h3>
|
||||
<div class="picker datepair">
|
||||
<input class="start-date date" type="text" name="start_date" value="" placeholder="MM/DD/YYYY" class="date" size='15' autocomplete="off"/>
|
||||
<input class="start-time time" type="text" name="start_time" value="" placeholder="HH:MM" class="time" size='10' autocomplete="off"/>
|
||||
<div class="description">
|
||||
<p>On the date set above, this section – <strong class="section-name"></strong> – will be released to students along with the 5 subsections within it. Any units marked private will only be visible to admins.</p>
|
||||
</div>
|
||||
</div>
|
||||
<a href="#" class="save-button">Save</a><a href="#" class="cancel-button">Cancel</a>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="main-wrapper">
|
||||
<div class="inner-wrapper">
|
||||
<h1>Courseware</h1>
|
||||
@@ -74,23 +88,19 @@
|
||||
<a href="#" class="save-button edit-section-name-save">Save</a><a href="#" class="cancel-button edit-section-name-cancel">Cancel</a>
|
||||
</div>
|
||||
</h3>
|
||||
<h4 class='section-published-date'>
|
||||
<div class="section-published-date">
|
||||
<%
|
||||
start_date = datetime.fromtimestamp(mktime(section.start)) if section.start is not None else None
|
||||
start_date_str = start_date.strftime('%m/%d/%Y') if start_date is not None else ''
|
||||
start_time_str = start_date.strftime('%H:%M') if start_date is not None else ''
|
||||
%>
|
||||
%if start_date is None:
|
||||
<strong>Unscheduled:</strong>
|
||||
<a href="#" class="set-publish-date">click here to set</a>
|
||||
<span class="published-status">This section has not been released.</span>
|
||||
<a href="#" class="schedule-button" data-date="" data-time="" data-id="${section.location}">Schedule</a>
|
||||
%else:
|
||||
<strong>${start_date_str} at ${start_time_str}</strong> <a href="#" class="set-publish-date">click here to edit</a>
|
||||
<span class="published-status"><strong>Will Release:</strong> ${start_date_str} at ${start_time_str}</span>
|
||||
<a href="#" class="edit-button" data-date="${start_date_str}" data-time="${start_time_str}" data-id="${section.location}">Edit</a>
|
||||
%endif
|
||||
</h4>
|
||||
<div class="datepair" data-language="javascript" style="display: none">
|
||||
<input type="text" name="start_date" value="${start_date_str}" placeholder="MM/DD/YYYY" class="date" size='15' autocomplete="off"/>
|
||||
<input type="text" name="start_time" value="${start_time_str}" placeholder="HH:MM" class="time" size='10' autocomplete="off"/>
|
||||
<a href="#" class="save-button edit-section-start-save">Save</a><a href="#" class="cancel-button edit-section-start-cancel">Cancel</a>
|
||||
</div>
|
||||
</div>
|
||||
<div class="item-actions">
|
||||
|
||||
123
common/djangoapps/external_auth/djangostore.py
Normal file
123
common/djangoapps/external_auth/djangostore.py
Normal file
@@ -0,0 +1,123 @@
|
||||
"""A openid store using django cache"""
|
||||
|
||||
from openid.store.interface import OpenIDStore
|
||||
from openid.store import nonce
|
||||
|
||||
from django.core.cache import cache
|
||||
|
||||
import logging
|
||||
import time
|
||||
|
||||
DEFAULT_ASSOCIATIONS_TIMEOUT = 60
|
||||
DEFAULT_NONCE_TIMEOUT = 600
|
||||
|
||||
ASSOCIATIONS_KEY_PREFIX = 'openid.provider.associations.'
|
||||
NONCE_KEY_PREFIX = 'openid.provider.nonce.'
|
||||
|
||||
log = logging.getLogger('DjangoOpenIDStore')
|
||||
|
||||
|
||||
def get_url_key(server_url):
|
||||
key = ASSOCIATIONS_KEY_PREFIX + server_url
|
||||
return key
|
||||
|
||||
|
||||
def get_nonce_key(server_url, timestamp, salt):
|
||||
key = '{prefix}{url}.{ts}.{salt}'.format(prefix=NONCE_KEY_PREFIX,
|
||||
url=server_url,
|
||||
ts=timestamp,
|
||||
salt=salt)
|
||||
return key
|
||||
|
||||
|
||||
class DjangoOpenIDStore(OpenIDStore):
|
||||
def __init__(self):
|
||||
log.info('DjangoStore cache:' + str(cache.__class__))
|
||||
|
||||
def storeAssociation(self, server_url, assoc):
|
||||
key = get_url_key(server_url)
|
||||
|
||||
log.info('storeAssociation {0}'.format(key))
|
||||
|
||||
associations = cache.get(key, {})
|
||||
associations[assoc.handle] = assoc
|
||||
|
||||
cache.set(key, associations, DEFAULT_ASSOCIATIONS_TIMEOUT)
|
||||
|
||||
def getAssociation(self, server_url, handle=None):
|
||||
key = get_url_key(server_url)
|
||||
|
||||
log.info('getAssociation {0}'.format(key))
|
||||
|
||||
associations = cache.get(key, {})
|
||||
|
||||
assoc = None
|
||||
|
||||
if handle is None:
|
||||
# get best association
|
||||
valid_assocs = [a for a in associations if a.getExpiresIn() > 0]
|
||||
if valid_assocs:
|
||||
valid_assocs.sort(lambda a: a.getExpiresIn(), reverse=True)
|
||||
assoc = valid_assocs.sort[0]
|
||||
else:
|
||||
assoc = associations.get(handle)
|
||||
|
||||
# check expiration and remove if it has expired
|
||||
if assoc and assoc.getExpiresIn() <= 0:
|
||||
if handle is None:
|
||||
cache.delete(key)
|
||||
else:
|
||||
associations.pop(handle)
|
||||
cache.set(key, associations, DEFAULT_ASSOCIATIONS_TIMEOUT)
|
||||
assoc = None
|
||||
|
||||
return assoc
|
||||
|
||||
def removeAssociation(self, server_url, handle):
|
||||
key = get_url_key(server_url)
|
||||
|
||||
log.info('removeAssociation {0}'.format(key))
|
||||
|
||||
associations = cache.get(key, {})
|
||||
|
||||
removed = False
|
||||
|
||||
if associations:
|
||||
if handle is None:
|
||||
cache.delete(key)
|
||||
removed = True
|
||||
else:
|
||||
assoc = associations.pop(handle)
|
||||
if assoc:
|
||||
cache.set(key, associations, DEFAULT_ASSOCIATIONS_TIMEOUT)
|
||||
removed = True
|
||||
|
||||
return removed
|
||||
|
||||
def useNonce(self, server_url, timestamp, salt):
|
||||
key = get_nonce_key(server_url, timestamp, salt)
|
||||
|
||||
log.info('useNonce {0}'.format(key))
|
||||
|
||||
if abs(timestamp - time.time()) > nonce.SKEW:
|
||||
return False
|
||||
|
||||
anonce = cache.get(key)
|
||||
|
||||
found = False
|
||||
|
||||
if anonce is None:
|
||||
cache.set(key, '-', DEFAULT_NONCE_TIMEOUT)
|
||||
found = False
|
||||
else:
|
||||
found = True
|
||||
|
||||
return found
|
||||
|
||||
def cleanupNonces(self):
|
||||
# not necesary, keys will timeout
|
||||
return 0
|
||||
|
||||
def cleanupAssociations(self):
|
||||
# not necesary, keys will timeout
|
||||
return 0
|
||||
@@ -7,6 +7,7 @@ import string
|
||||
import fnmatch
|
||||
|
||||
from external_auth.models import ExternalAuthMap
|
||||
from external_auth.djangostore import DjangoOpenIDStore
|
||||
|
||||
from django.conf import settings
|
||||
from django.contrib.auth import REDIRECT_FIELD_NAME, authenticate, login
|
||||
@@ -30,7 +31,6 @@ from openid.consumer.consumer import SUCCESS
|
||||
|
||||
from openid.server.server import Server
|
||||
from openid.server.trustroot import TrustRoot
|
||||
from openid.store.filestore import FileOpenIDStore
|
||||
from openid.extensions import ax, sreg
|
||||
|
||||
import student.views as student_views
|
||||
@@ -307,10 +307,7 @@ def get_xrds_url(resource, request):
|
||||
"""
|
||||
Return the XRDS url for a resource
|
||||
"""
|
||||
host = request.META['HTTP_HOST']
|
||||
|
||||
if not host.endswith('edx.org'):
|
||||
return None
|
||||
host = request.get_host()
|
||||
|
||||
location = host + '/openid/provider/' + resource + '/'
|
||||
|
||||
@@ -332,6 +329,8 @@ def add_openid_simple_registration(request, response, data):
|
||||
sreg_data['email'] = data['email']
|
||||
elif field == 'fullname' and 'fullname' in data:
|
||||
sreg_data['fullname'] = data['fullname']
|
||||
elif field == 'nickname' and 'nickname' in data:
|
||||
sreg_data['nickname'] = data['nickname']
|
||||
|
||||
# construct sreg response
|
||||
sreg_response = sreg.SRegResponse.extractResponse(sreg_request,
|
||||
@@ -436,7 +435,7 @@ def provider_login(request):
|
||||
return default_render_failure(request, "Invalid OpenID request")
|
||||
|
||||
# initialize store and server
|
||||
store = FileOpenIDStore('/tmp/openid_provider')
|
||||
store = DjangoOpenIDStore()
|
||||
server = Server(store, endpoint)
|
||||
|
||||
# handle OpenID request
|
||||
@@ -525,13 +524,22 @@ def provider_login(request):
|
||||
url = endpoint + urlquote(user.username)
|
||||
response = openid_request.answer(True, None, url)
|
||||
|
||||
return provider_respond(server,
|
||||
openid_request,
|
||||
response,
|
||||
{
|
||||
'fullname': profile.name,
|
||||
'email': user.email
|
||||
})
|
||||
# TODO: for CS50 we are forcibly returning the username
|
||||
# instead of fullname. In the OpenID simple registration
|
||||
# extension, we don't have to return any fields we don't
|
||||
# want to, even if they were marked as required by the
|
||||
# Consumer. The behavior of what to do when there are
|
||||
# missing fields is up to the Consumer. The proper change
|
||||
# should only return the username, however this will likely
|
||||
# break the CS50 client. Temporarily we will be returning
|
||||
# username filling in for fullname in addition to username
|
||||
# as sreg nickname.
|
||||
results = {
|
||||
'nickname': user.username,
|
||||
'email': user.email,
|
||||
'fullname': user.username
|
||||
}
|
||||
return provider_respond(server, openid_request, response, results)
|
||||
|
||||
request.session['openid_error'] = True
|
||||
msg = "Login failed - Account not active for user {0}".format(username)
|
||||
|
||||
1
common/djangoapps/status/__init__.py
Normal file
1
common/djangoapps/status/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
|
||||
43
common/djangoapps/status/status.py
Normal file
43
common/djangoapps/status/status.py
Normal file
@@ -0,0 +1,43 @@
|
||||
"""
|
||||
A tiny app that checks for a status message.
|
||||
"""
|
||||
|
||||
from django.conf import settings
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
def get_site_status_msg(course_id):
|
||||
"""
|
||||
Look for a file settings.STATUS_MESSAGE_PATH. If found, read it,
|
||||
parse as json, and do the following:
|
||||
|
||||
* if there is a key 'global', include that in the result list.
|
||||
* if course is not None, and there is a key for course.id, add that to the result list.
|
||||
* return "<br/>".join(result)
|
||||
|
||||
Otherwise, return None.
|
||||
|
||||
If something goes wrong, returns None. ("is there a status msg?" logic is
|
||||
not allowed to break the entire site).
|
||||
"""
|
||||
try:
|
||||
if os.path.isfile(settings.STATUS_MESSAGE_PATH):
|
||||
with open(settings.STATUS_MESSAGE_PATH) as f:
|
||||
content = f.read()
|
||||
else:
|
||||
return None
|
||||
|
||||
status_dict = json.loads(content)
|
||||
msg = status_dict.get('global', None)
|
||||
if course_id in status_dict:
|
||||
msg = msg + "<br>" if msg else ''
|
||||
msg += status_dict[course_id]
|
||||
|
||||
return msg
|
||||
except:
|
||||
log.exception("Error while getting a status message.")
|
||||
return None
|
||||
90
common/djangoapps/status/tests.py
Normal file
90
common/djangoapps/status/tests.py
Normal file
@@ -0,0 +1,90 @@
|
||||
from django.conf import settings
|
||||
from django.test import TestCase
|
||||
import os
|
||||
from override_settings import override_settings
|
||||
from tempfile import NamedTemporaryFile
|
||||
|
||||
from status import get_site_status_msg
|
||||
|
||||
# Get a name where we can put test files
|
||||
TMP_FILE = NamedTemporaryFile(delete=False)
|
||||
TMP_NAME = TMP_FILE.name
|
||||
# Close it--we just want the path.
|
||||
TMP_FILE.close()
|
||||
|
||||
|
||||
@override_settings(STATUS_MESSAGE_PATH=TMP_NAME)
|
||||
class TestStatus(TestCase):
|
||||
"""Test that the get_site_status_msg function does the right thing"""
|
||||
|
||||
no_file = None
|
||||
|
||||
invalid_json = """{
|
||||
"global" : "Hello, Globe",
|
||||
}"""
|
||||
|
||||
global_only = """{
|
||||
"global" : "Hello, Globe"
|
||||
}"""
|
||||
|
||||
toy_only = """{
|
||||
"edX/toy/2012_Fall" : "A toy story"
|
||||
}"""
|
||||
|
||||
global_and_toy = """{
|
||||
"global" : "Hello, Globe",
|
||||
"edX/toy/2012_Fall" : "A toy story"
|
||||
}"""
|
||||
|
||||
|
||||
# json to use, expected results for course=None (e.g. homepage),
|
||||
# for toy course, for full course. Note that get_site_status_msg
|
||||
# is supposed to return global message even if course=None. The
|
||||
# template just happens to not display it outside the courseware
|
||||
# at the moment...
|
||||
checks = [
|
||||
(no_file, None, None, None),
|
||||
(invalid_json, None, None, None),
|
||||
(global_only, "Hello, Globe", "Hello, Globe", "Hello, Globe"),
|
||||
(toy_only, None, "A toy story", None),
|
||||
(global_and_toy, "Hello, Globe", "Hello, Globe<br>A toy story", "Hello, Globe"),
|
||||
]
|
||||
|
||||
def setUp(self):
|
||||
"""
|
||||
Fake course ids, since we don't have to have full django
|
||||
settings (common tests run without the lms settings imported)
|
||||
"""
|
||||
self.full_id = 'edX/full/2012_Fall'
|
||||
self.toy_id = 'edX/toy/2012_Fall'
|
||||
|
||||
def create_status_file(self, contents):
|
||||
"""
|
||||
Write contents to settings.STATUS_MESSAGE_PATH.
|
||||
"""
|
||||
with open(settings.STATUS_MESSAGE_PATH, 'w') as f:
|
||||
f.write(contents)
|
||||
|
||||
def remove_status_file(self):
|
||||
"""Delete the status file if it exists"""
|
||||
if os.path.exists(settings.STATUS_MESSAGE_PATH):
|
||||
os.remove(settings.STATUS_MESSAGE_PATH)
|
||||
|
||||
def tearDown(self):
|
||||
self.remove_status_file()
|
||||
|
||||
def test_get_site_status_msg(self):
|
||||
"""run the tests"""
|
||||
for (json_str, exp_none, exp_toy, exp_full) in self.checks:
|
||||
|
||||
self.remove_status_file()
|
||||
if json_str:
|
||||
self.create_status_file(json_str)
|
||||
|
||||
print "checking results for {0}".format(json_str)
|
||||
print "course=None:"
|
||||
self.assertEqual(get_site_status_msg(None), exp_none)
|
||||
print "course=toy:"
|
||||
self.assertEqual(get_site_status_msg(self.toy_id), exp_toy)
|
||||
print "course=full:"
|
||||
self.assertEqual(get_site_status_msg(self.full_id), exp_full)
|
||||
37
common/djangoapps/student/management/commands/set_staff.py
Normal file
37
common/djangoapps/student/management/commands/set_staff.py
Normal file
@@ -0,0 +1,37 @@
|
||||
from django.contrib.auth.models import User
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
import re
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
|
||||
args = '<user/email user/email ...>'
|
||||
help = """
|
||||
This command will set isstaff to true for one or more users.
|
||||
Lookup by username or email address, assumes usernames
|
||||
do not look like email addresses.
|
||||
"""
|
||||
|
||||
def handle(self, *args, **kwargs):
|
||||
|
||||
if len(args) < 1:
|
||||
print Command.help
|
||||
return
|
||||
|
||||
for user in args:
|
||||
|
||||
if re.match('[^@]+@[^@]+\.[^@]+', user):
|
||||
try:
|
||||
v = User.objects.get(email=user)
|
||||
except:
|
||||
raise CommandError("User {0} does not exist".format(
|
||||
user))
|
||||
else:
|
||||
try:
|
||||
v = User.objects.get(username=user)
|
||||
except:
|
||||
raise CommandError("User {0} does not exist".format(
|
||||
user))
|
||||
|
||||
v.is_staff = True
|
||||
v.save()
|
||||
@@ -262,10 +262,15 @@ def login_user(request, error=""):
|
||||
try_change_enrollment(request)
|
||||
|
||||
return HttpResponse(json.dumps({'success': True}))
|
||||
|
||||
log.warning("Login failed - Account not active for user {0}".format(username))
|
||||
|
||||
log.warning("Login failed - Account not active for user {0}, resending activation".format(username))
|
||||
|
||||
reactivation_email_for_user(user)
|
||||
not_activated_msg = "This account has not been activated. We have " + \
|
||||
"sent another activation message. Please check your " + \
|
||||
"e-mail for the activation instructions."
|
||||
return HttpResponse(json.dumps({'success': False,
|
||||
'value': 'This account has not been activated. Please check your e-mail for the activation instructions.'}))
|
||||
'value': not_activated_msg}))
|
||||
|
||||
|
||||
@ensure_csrf_cookie
|
||||
@@ -517,6 +522,17 @@ def password_reset(request):
|
||||
''' Attempts to send a password reset e-mail. '''
|
||||
if request.method != "POST":
|
||||
raise Http404
|
||||
|
||||
# By default, Django doesn't allow Users with is_active = False to reset their passwords,
|
||||
# but this bites people who signed up a long time ago, never activated, and forgot their
|
||||
# password. So for their sake, we'll auto-activate a user for whome password_reset is called.
|
||||
try:
|
||||
user = User.objects.get(email=request.POST['email'])
|
||||
user.is_active = True
|
||||
user.save()
|
||||
except:
|
||||
log.exception("Tried to auto-activate user to enable password reset, but failed.")
|
||||
|
||||
form = PasswordResetForm(request.POST)
|
||||
if form.is_valid():
|
||||
form.save(use_https = request.is_secure(),
|
||||
@@ -529,7 +545,6 @@ def password_reset(request):
|
||||
return HttpResponse(json.dumps({'success': False,
|
||||
'error': 'Invalid e-mail'}))
|
||||
|
||||
|
||||
@ensure_csrf_cookie
|
||||
def reactivation_email(request):
|
||||
''' Send an e-mail to reactivate a deactivated account, or to
|
||||
@@ -540,25 +555,22 @@ def reactivation_email(request):
|
||||
except User.DoesNotExist:
|
||||
return HttpResponse(json.dumps({'success': False,
|
||||
'error': 'No inactive user with this e-mail exists'}))
|
||||
return reactivation_email_for_user(user)
|
||||
|
||||
if user.is_active:
|
||||
return HttpResponse(json.dumps({'success': False,
|
||||
'error': 'User is already active'}))
|
||||
|
||||
def reactivation_email_for_user(user):
|
||||
reg = Registration.objects.get(user=user)
|
||||
reg.register(user)
|
||||
|
||||
d = {'name': UserProfile.get(user=user).name,
|
||||
'key': r.activation_key}
|
||||
d = {'name': user.profile.name,
|
||||
'key': reg.activation_key}
|
||||
|
||||
subject = render_to_string('reactivation_email_subject.txt', d)
|
||||
subject = render_to_string('emails/activation_email_subject.txt', d)
|
||||
subject = ''.join(subject.splitlines())
|
||||
message = render_to_string('reactivation_email.txt', d)
|
||||
message = render_to_string('emails/activation_email.txt', d)
|
||||
|
||||
res = user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)
|
||||
|
||||
return HttpResponse(json.dumps({'success': True}))
|
||||
|
||||
|
||||
|
||||
@ensure_csrf_cookie
|
||||
def change_email_request(request):
|
||||
@@ -642,9 +654,12 @@ def confirm_email_change(request, key):
|
||||
meta['old_emails'].append([user.email, datetime.datetime.now().isoformat()])
|
||||
up.set_meta(meta)
|
||||
up.save()
|
||||
# Send it to the old email...
|
||||
user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)
|
||||
user.email = pec.new_email
|
||||
user.save()
|
||||
pec.delete()
|
||||
# And send it to the new email...
|
||||
user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)
|
||||
|
||||
return render_to_response("email_change_successful.html", d)
|
||||
@@ -665,9 +680,12 @@ def change_name_request(request):
|
||||
pnc.rationale = request.POST['rationale']
|
||||
if len(pnc.new_name) < 2:
|
||||
return HttpResponse(json.dumps({'success': False, 'error': 'Name required'}))
|
||||
if len(pnc.rationale) < 2:
|
||||
return HttpResponse(json.dumps({'success': False, 'error': 'Rationale required'}))
|
||||
pnc.save()
|
||||
|
||||
# The following automatically accepts name change requests. Remove this to
|
||||
# go back to the old system where it gets queued up for admin approval.
|
||||
accept_name_change_by_id(pnc.id)
|
||||
|
||||
return HttpResponse(json.dumps({'success': True}))
|
||||
|
||||
|
||||
@@ -702,14 +720,9 @@ def reject_name_change(request):
|
||||
return HttpResponse(json.dumps({'success': True}))
|
||||
|
||||
|
||||
@ensure_csrf_cookie
|
||||
def accept_name_change(request):
|
||||
''' JSON: Name change process. Course staff clicks 'accept' on a given name change '''
|
||||
if not request.user.is_staff:
|
||||
raise Http404
|
||||
|
||||
def accept_name_change_by_id(id):
|
||||
try:
|
||||
pnc = PendingNameChange.objects.get(id=int(request.POST['id']))
|
||||
pnc = PendingNameChange.objects.get(id=id)
|
||||
except PendingNameChange.DoesNotExist:
|
||||
return HttpResponse(json.dumps({'success': False, 'error': 'Invalid ID'}))
|
||||
|
||||
@@ -728,3 +741,17 @@ def accept_name_change(request):
|
||||
pnc.delete()
|
||||
|
||||
return HttpResponse(json.dumps({'success': True}))
|
||||
|
||||
|
||||
@ensure_csrf_cookie
|
||||
def accept_name_change(request):
|
||||
''' JSON: Name change process. Course staff clicks 'accept' on a given name change
|
||||
|
||||
We used this during the prototype but now we simply record name changes instead
|
||||
of manually approving them. Still keeping this around in case we want to go
|
||||
back to this approval method.
|
||||
'''
|
||||
if not request.user.is_staff:
|
||||
raise Http404
|
||||
|
||||
return accept_name_change_by_id(int(request.POST['id']))
|
||||
|
||||
@@ -48,7 +48,7 @@ general_whitespace = re.compile('[^\w]+')
|
||||
|
||||
|
||||
def check_variables(string, variables):
|
||||
''' Confirm the only variables in string are defined.
|
||||
'''Confirm the only variables in string are defined.
|
||||
|
||||
Pyparsing uses a left-to-right parser, which makes the more
|
||||
elegant approach pretty hopeless.
|
||||
@@ -56,7 +56,8 @@ def check_variables(string, variables):
|
||||
achar = reduce(lambda a,b:a|b ,map(Literal,alphas)) # Any alphabetic character
|
||||
undefined_variable = achar + Word(alphanums)
|
||||
undefined_variable.setParseAction(lambda x:UndefinedVariable("".join(x)).raiseself())
|
||||
varnames = varnames | undefined_variable'''
|
||||
varnames = varnames | undefined_variable
|
||||
'''
|
||||
possible_variables = re.split(general_whitespace, string) # List of all alnums in string
|
||||
bad_variables = list()
|
||||
for v in possible_variables:
|
||||
@@ -71,7 +72,8 @@ def check_variables(string, variables):
|
||||
|
||||
|
||||
def evaluator(variables, functions, string, cs=False):
|
||||
''' Evaluate an expression. Variables are passed as a dictionary
|
||||
'''
|
||||
Evaluate an expression. Variables are passed as a dictionary
|
||||
from string to value. Unary functions are passed as a dictionary
|
||||
from string to function. Variables must be floats.
|
||||
cs: Case sensitive
|
||||
@@ -108,6 +110,7 @@ def evaluator(variables, functions, string, cs=False):
|
||||
|
||||
if string.strip() == "":
|
||||
return float('nan')
|
||||
|
||||
ops = {"^": operator.pow,
|
||||
"*": operator.mul,
|
||||
"/": operator.truediv,
|
||||
@@ -169,14 +172,19 @@ def evaluator(variables, functions, string, cs=False):
|
||||
def func_parse_action(x):
|
||||
return [all_functions[x[0]](x[1])]
|
||||
|
||||
number_suffix = reduce(lambda a, b: a | b, map(Literal, suffixes.keys()), NoMatch()) # SI suffixes and percent
|
||||
# SI suffixes and percent
|
||||
number_suffix = reduce(lambda a, b: a | b, map(Literal, suffixes.keys()), NoMatch())
|
||||
(dot, minus, plus, times, div, lpar, rpar, exp) = map(Literal, ".-+*/()^")
|
||||
|
||||
number_part = Word(nums)
|
||||
inner_number = (number_part + Optional("." + number_part)) | ("." + number_part) # 0.33 or 7 or .34
|
||||
number = Optional(minus | plus) + inner_number + \
|
||||
Optional(CaselessLiteral("E") + Optional("-") + number_part) + \
|
||||
Optional(number_suffix) # 0.33k or -17
|
||||
|
||||
# 0.33 or 7 or .34
|
||||
inner_number = (number_part + Optional("." + number_part)) | ("." + number_part)
|
||||
|
||||
# 0.33k or -17
|
||||
number = (Optional(minus | plus) + inner_number
|
||||
+ Optional(CaselessLiteral("E") + Optional("-") + number_part)
|
||||
+ Optional(number_suffix))
|
||||
number = number.setParseAction(number_parse_action) # Convert to number
|
||||
|
||||
# Predefine recursive variables
|
||||
@@ -201,9 +209,11 @@ def evaluator(variables, functions, string, cs=False):
|
||||
varnames.setParseAction(lambda x: map(lambda y: all_variables[y], x))
|
||||
else:
|
||||
varnames = NoMatch()
|
||||
|
||||
# Same thing for functions.
|
||||
if len(all_functions) > 0:
|
||||
funcnames = sreduce(lambda x, y: x | y, map(lambda x: CasedLiteral(x), all_functions.keys()))
|
||||
funcnames = sreduce(lambda x, y: x | y,
|
||||
map(lambda x: CasedLiteral(x), all_functions.keys()))
|
||||
function = funcnames + lpar.suppress() + expr + rpar.suppress()
|
||||
function.setParseAction(func_parse_action)
|
||||
else:
|
||||
|
||||
@@ -3,8 +3,9 @@
|
||||
#
|
||||
# Nomenclature:
|
||||
#
|
||||
# A capa Problem is a collection of text and capa Response questions. Each Response may have one or more
|
||||
# Input entry fields. The capa Problem may include a solution.
|
||||
# A capa Problem is a collection of text and capa Response questions.
|
||||
# Each Response may have one or more Input entry fields.
|
||||
# The capa problem may include a solution.
|
||||
#
|
||||
'''
|
||||
Main module which shows problems (of "capa" type).
|
||||
@@ -29,6 +30,8 @@ import sys
|
||||
from lxml import etree
|
||||
from xml.sax.saxutils import unescape
|
||||
|
||||
import chem
|
||||
import chem.chemcalc
|
||||
import calc
|
||||
from correctmap import CorrectMap
|
||||
import eia
|
||||
@@ -42,9 +45,25 @@ import responsetypes
|
||||
# dict of tagname, Response Class -- this should come from auto-registering
|
||||
response_tag_dict = dict([(x.response_tag, x) for x in responsetypes.__all__])
|
||||
|
||||
entry_types = ['textline', 'schematic', 'textbox', 'imageinput', 'optioninput', 'choicegroup', 'radiogroup', 'checkboxgroup', 'filesubmission', 'javascriptinput']
|
||||
solution_types = ['solution'] # extra things displayed after "show answers" is pressed
|
||||
response_properties = ["codeparam", "responseparam", "answer"] # these get captured as student responses
|
||||
# Different ways students can input code
|
||||
entry_types = ['textline',
|
||||
'schematic',
|
||||
'textbox',
|
||||
'imageinput',
|
||||
'optioninput',
|
||||
'choicegroup',
|
||||
'radiogroup',
|
||||
'checkboxgroup',
|
||||
'filesubmission',
|
||||
'javascriptinput',
|
||||
'crystallography',
|
||||
'chemicalequationinput',]
|
||||
|
||||
# extra things displayed after "show answers" is pressed
|
||||
solution_types = ['solution']
|
||||
|
||||
# these get captured as student responses
|
||||
response_properties = ["codeparam", "responseparam", "answer"]
|
||||
|
||||
# special problem tags which should be turned into innocuous HTML
|
||||
html_transforms = {'problem': {'tag': 'div'},
|
||||
@@ -57,7 +76,8 @@ global_context = {'random': random,
|
||||
'math': math,
|
||||
'scipy': scipy,
|
||||
'calc': calc,
|
||||
'eia': eia}
|
||||
'eia': eia,
|
||||
'chemcalc': chem.chemcalc}
|
||||
|
||||
# These should be removed from HTML output, including all subelements
|
||||
html_problem_semantics = ["codeparam", "responseparam", "answer", "script", "hintgroup"]
|
||||
@@ -83,7 +103,8 @@ class LoncapaProblem(object):
|
||||
- id (string): identifier for this problem; often a filename (no spaces)
|
||||
- state (dict): student state
|
||||
- seed (int): random number generator seed (int)
|
||||
- system (ModuleSystem): ModuleSystem instance which provides OS, rendering, and user context
|
||||
- system (ModuleSystem): ModuleSystem instance which provides OS,
|
||||
rendering, and user context
|
||||
|
||||
'''
|
||||
|
||||
@@ -107,19 +128,24 @@ class LoncapaProblem(object):
|
||||
if not self.seed:
|
||||
self.seed = struct.unpack('i', os.urandom(4))[0]
|
||||
|
||||
problem_text = re.sub("startouttext\s*/", "text", problem_text) # Convert startouttext and endouttext to proper <text></text>
|
||||
# Convert startouttext and endouttext to proper <text></text>
|
||||
problem_text = re.sub("startouttext\s*/", "text", problem_text)
|
||||
problem_text = re.sub("endouttext\s*/", "/text", problem_text)
|
||||
self.problem_text = problem_text
|
||||
|
||||
self.tree = etree.XML(problem_text) # parse problem XML file into an element tree
|
||||
self._process_includes() # handle any <include file="foo"> tags
|
||||
# parse problem XML file into an element tree
|
||||
self.tree = etree.XML(problem_text)
|
||||
|
||||
# handle any <include file="foo"> tags
|
||||
self._process_includes()
|
||||
|
||||
# construct script processor context (eg for customresponse problems)
|
||||
self.context = self._extract_context(self.tree, seed=self.seed)
|
||||
|
||||
# pre-parse the XML tree: modifies it to add ID's and perform some in-place transformations
|
||||
# this also creates the dict (self.responders) of Response instances for each question in the problem.
|
||||
# the dict has keys = xml subtree of Response, values = Response instance
|
||||
# Pre-parse the XML tree: modifies it to add ID's and perform some in-place
|
||||
# transformations. This also creates the dict (self.responders) of Response
|
||||
# instances for each question in the problem. The dict has keys = xml subtree of
|
||||
# Response, values = Response instance
|
||||
self._preprocess_problem(self.tree)
|
||||
|
||||
if not self.student_answers: # True when student_answers is an empty dict
|
||||
@@ -134,6 +160,9 @@ class LoncapaProblem(object):
|
||||
self.done = False
|
||||
|
||||
def set_initial_display(self):
|
||||
"""
|
||||
Set the student's answers to the responders' initial displays, if specified.
|
||||
"""
|
||||
initial_answers = dict()
|
||||
for responder in self.responders.values():
|
||||
if hasattr(responder, 'get_initial_display'):
|
||||
@@ -145,9 +174,11 @@ class LoncapaProblem(object):
|
||||
return u"LoncapaProblem ({0})".format(self.problem_id)
|
||||
|
||||
def get_state(self):
|
||||
''' Stored per-user session data neeeded to:
|
||||
'''
|
||||
Stored per-user session data neeeded to:
|
||||
1) Recreate the problem
|
||||
2) Populate any student answers. '''
|
||||
2) Populate any student answers.
|
||||
'''
|
||||
|
||||
return {'seed': self.seed,
|
||||
'student_answers': self.student_answers,
|
||||
@@ -156,7 +187,7 @@ class LoncapaProblem(object):
|
||||
|
||||
def get_max_score(self):
|
||||
'''
|
||||
Return maximum score for this problem.
|
||||
Return the maximum score for this problem.
|
||||
'''
|
||||
maxscore = 0
|
||||
for response, responder in self.responders.iteritems():
|
||||
@@ -164,11 +195,11 @@ class LoncapaProblem(object):
|
||||
return maxscore
|
||||
|
||||
def get_score(self):
|
||||
'''
|
||||
"""
|
||||
Compute score for this problem. The score is the number of points awarded.
|
||||
Returns a dictionary {'score': integer, from 0 to get_max_score(),
|
||||
'total': get_max_score()}.
|
||||
'''
|
||||
"""
|
||||
correct = 0
|
||||
for key in self.correct_map:
|
||||
try:
|
||||
@@ -204,22 +235,25 @@ class LoncapaProblem(object):
|
||||
def is_queued(self):
|
||||
'''
|
||||
Returns True if any part of the problem has been submitted to an external queue
|
||||
(e.g. for grading.)
|
||||
'''
|
||||
return any(self.correct_map.is_queued(answer_id) for answer_id in self.correct_map)
|
||||
|
||||
|
||||
def get_recentmost_queuetime(self):
|
||||
'''
|
||||
Returns a DateTime object that represents the timestamp of the most recent queueing request, or None if not queued
|
||||
Returns a DateTime object that represents the timestamp of the most recent
|
||||
queueing request, or None if not queued
|
||||
'''
|
||||
if not self.is_queued():
|
||||
return None
|
||||
|
||||
# Get a list of timestamps of all queueing requests, then convert it to a DateTime object
|
||||
queuetime_strs = [self.correct_map.get_queuetime_str(answer_id)
|
||||
for answer_id in self.correct_map
|
||||
for answer_id in self.correct_map
|
||||
if self.correct_map.is_queued(answer_id)]
|
||||
queuetimes = [datetime.strptime(qt_str, xqueue_interface.dateformat) for qt_str in queuetime_strs]
|
||||
queuetimes = [datetime.strptime(qt_str, xqueue_interface.dateformat)
|
||||
for qt_str in queuetime_strs]
|
||||
|
||||
return max(queuetimes)
|
||||
|
||||
@@ -235,14 +269,20 @@ class LoncapaProblem(object):
|
||||
Calls the Response for each question in this problem, to do the actual grading.
|
||||
'''
|
||||
|
||||
# if answers include File objects, convert them to filenames.
|
||||
self.student_answers = convert_files_to_filenames(answers)
|
||||
|
||||
oldcmap = self.correct_map # old CorrectMap
|
||||
newcmap = CorrectMap() # start new with empty CorrectMap
|
||||
# old CorrectMap
|
||||
oldcmap = self.correct_map
|
||||
|
||||
# start new with empty CorrectMap
|
||||
newcmap = CorrectMap()
|
||||
# log.debug('Responders: %s' % self.responders)
|
||||
for responder in self.responders.values(): # Call each responsetype instance to do actual grading
|
||||
if 'filesubmission' in responder.allowed_inputfields: # File objects are passed only if responsetype
|
||||
# explicitly allows for file submissions
|
||||
# Call each responsetype instance to do actual grading
|
||||
for responder in self.responders.values():
|
||||
# File objects are passed only if responsetype explicitly allows for file
|
||||
# submissions
|
||||
if 'filesubmission' in responder.allowed_inputfields:
|
||||
results = responder.evaluate_answers(answers, oldcmap)
|
||||
else:
|
||||
results = responder.evaluate_answers(convert_files_to_filenames(answers), oldcmap)
|
||||
@@ -252,28 +292,33 @@ class LoncapaProblem(object):
|
||||
return newcmap
|
||||
|
||||
def get_question_answers(self):
|
||||
"""Returns a dict of answer_ids to answer values. If we cannot generate
|
||||
"""
|
||||
Returns a dict of answer_ids to answer values. If we cannot generate
|
||||
an answer (this sometimes happens in customresponses), that answer_id is
|
||||
not included. Called by "show answers" button JSON request
|
||||
(see capa_module)
|
||||
"""
|
||||
# dict of (id, correct_answer)
|
||||
answer_map = dict()
|
||||
for response in self.responders.keys():
|
||||
results = self.responder_answers[response]
|
||||
answer_map.update(results) # dict of (id,correct_answer)
|
||||
answer_map.update(results)
|
||||
|
||||
# include solutions from <solution>...</solution> stanzas
|
||||
for entry in self.tree.xpath("//" + "|//".join(solution_types)):
|
||||
answer = etree.tostring(entry)
|
||||
if answer: answer_map[entry.get('id')] = contextualize_text(answer, self.context)
|
||||
if answer:
|
||||
answer_map[entry.get('id')] = contextualize_text(answer, self.context)
|
||||
|
||||
log.debug('answer_map = %s' % answer_map)
|
||||
return answer_map
|
||||
|
||||
def get_answer_ids(self):
|
||||
"""Return the IDs of all the responses -- these are the keys used for
|
||||
"""
|
||||
Return the IDs of all the responses -- these are the keys used for
|
||||
the dicts returned by grade_answers and get_question_answers. (Though
|
||||
get_question_answers may only return a subset of these."""
|
||||
get_question_answers may only return a subset of these.
|
||||
"""
|
||||
answer_ids = []
|
||||
for response in self.responders.keys():
|
||||
results = self.responder_answers[response]
|
||||
@@ -298,7 +343,8 @@ class LoncapaProblem(object):
|
||||
file = inc.get('file')
|
||||
if file is not None:
|
||||
try:
|
||||
ifp = self.system.filestore.open(file) # open using ModuleSystem OSFS filestore
|
||||
# open using ModuleSystem OSFS filestore
|
||||
ifp = self.system.filestore.open(file)
|
||||
except Exception as err:
|
||||
log.warning('Error %s in problem xml include: %s' % (
|
||||
err, etree.tostring(inc, pretty_print=True)))
|
||||
@@ -311,7 +357,8 @@ class LoncapaProblem(object):
|
||||
else:
|
||||
continue
|
||||
try:
|
||||
incxml = etree.XML(ifp.read()) # read in and convert to XML
|
||||
# read in and convert to XML
|
||||
incxml = etree.XML(ifp.read())
|
||||
except Exception as err:
|
||||
log.warning('Error %s in problem xml include: %s' % (
|
||||
err, etree.tostring(inc, pretty_print=True)))
|
||||
@@ -322,6 +369,7 @@ class LoncapaProblem(object):
|
||||
raise
|
||||
else:
|
||||
continue
|
||||
|
||||
# insert new XML into tree in place of inlcude
|
||||
parent = inc.getparent()
|
||||
parent.insert(parent.index(inc), incxml)
|
||||
@@ -329,11 +377,13 @@ class LoncapaProblem(object):
|
||||
log.debug('Included %s into %s' % (file, self.problem_id))
|
||||
|
||||
def _extract_system_path(self, script):
|
||||
'''
|
||||
"""
|
||||
Extracts and normalizes additional paths for code execution.
|
||||
For now, there's a default path of data/course/code; this may be removed
|
||||
at some point.
|
||||
'''
|
||||
|
||||
script : ?? (TODO)
|
||||
"""
|
||||
|
||||
DEFAULT_PATH = ['code']
|
||||
|
||||
@@ -351,7 +401,6 @@ class LoncapaProblem(object):
|
||||
# path is an absolute path or a path relative to the data dir
|
||||
dir = os.path.join(self.system.filestore.root_path, dir)
|
||||
abs_dir = os.path.normpath(dir)
|
||||
#log.debug("appending to path: %s" % abs_dir)
|
||||
path.append(abs_dir)
|
||||
|
||||
return path
|
||||
@@ -362,13 +411,20 @@ class LoncapaProblem(object):
|
||||
context of this problem. Provides ability to randomize problems, and also set
|
||||
variables for problem answer checking.
|
||||
|
||||
Problem XML goes to Python execution context. Runs everything in script tags
|
||||
Problem XML goes to Python execution context. Runs everything in script tags.
|
||||
'''
|
||||
random.seed(self.seed)
|
||||
context = {'global_context': global_context} # save global context in here also
|
||||
context.update(global_context) # initialize context to have stuff in global_context
|
||||
context['__builtins__'] = globals()['__builtins__'] # put globals there also
|
||||
context['the_lcp'] = self # pass instance of LoncapaProblem in
|
||||
# save global context in here also
|
||||
context = {'global_context': global_context}
|
||||
|
||||
# initialize context to have stuff in global_context
|
||||
context.update(global_context)
|
||||
|
||||
# put globals there also
|
||||
context['__builtins__'] = globals()['__builtins__']
|
||||
|
||||
# pass instance of LoncapaProblem in
|
||||
context['the_lcp'] = self
|
||||
context['script_code'] = ''
|
||||
|
||||
self._execute_scripts(tree.findall('.//script'), context)
|
||||
@@ -385,7 +441,7 @@ class LoncapaProblem(object):
|
||||
sys.path = original_path + self._extract_system_path(script)
|
||||
|
||||
stype = script.get('type')
|
||||
|
||||
|
||||
if stype:
|
||||
if 'javascript' in stype:
|
||||
continue # skip javascript
|
||||
@@ -395,12 +451,14 @@ class LoncapaProblem(object):
|
||||
code = script.text
|
||||
XMLESC = {"'": "'", """: '"'}
|
||||
code = unescape(code, XMLESC)
|
||||
context['script_code'] += code # store code source in context
|
||||
# store code source in context
|
||||
context['script_code'] += code
|
||||
try:
|
||||
exec code in context, context # use "context" for global context; thus defs in code are global within code
|
||||
# use "context" for global context; thus defs in code are global within code
|
||||
exec code in context, context
|
||||
except Exception as err:
|
||||
log.exception("Error while execing script code: " + code)
|
||||
msg = "Error while executing script code: %s" % str(err).replace('<','<')
|
||||
msg = "Error while executing script code: %s" % str(err).replace('<','<')
|
||||
raise responsetypes.LoncapaProblemError(msg)
|
||||
finally:
|
||||
sys.path = original_path
|
||||
@@ -415,7 +473,8 @@ class LoncapaProblem(object):
|
||||
|
||||
Used by get_html.
|
||||
'''
|
||||
if problemtree.tag == 'script' and problemtree.get('type') and 'javascript' in problemtree.get('type'):
|
||||
if (problemtree.tag == 'script' and problemtree.get('type')
|
||||
and 'javascript' in problemtree.get('type')):
|
||||
# leave javascript intact.
|
||||
return problemtree
|
||||
|
||||
@@ -424,8 +483,8 @@ class LoncapaProblem(object):
|
||||
|
||||
problemid = problemtree.get('id') # my ID
|
||||
|
||||
if problemtree.tag in inputtypes.get_input_xml_tags():
|
||||
|
||||
if problemtree.tag in inputtypes.registered_input_tags():
|
||||
# If this is an inputtype subtree, let it render itself.
|
||||
status = "unsubmitted"
|
||||
msg = ''
|
||||
hint = ''
|
||||
@@ -442,32 +501,34 @@ class LoncapaProblem(object):
|
||||
value = self.student_answers[problemid]
|
||||
|
||||
# do the rendering
|
||||
render_object = inputtypes.SimpleInput(system=self.system,
|
||||
xml=problemtree,
|
||||
state={'value': value,
|
||||
'status': status,
|
||||
'id': problemtree.get('id'),
|
||||
'feedback': {'message': msg,
|
||||
'hint': hint,
|
||||
'hintmode': hintmode,
|
||||
}
|
||||
},
|
||||
use='capa_input')
|
||||
return render_object.get_html() # function(problemtree, value, status, msg) # render the special response (textline, schematic,...)
|
||||
|
||||
if problemtree in self.responders: # let each Response render itself
|
||||
state = {'value': value,
|
||||
'status': status,
|
||||
'id': problemtree.get('id'),
|
||||
'feedback': {'message': msg,
|
||||
'hint': hint,
|
||||
'hintmode': hintmode,}}
|
||||
|
||||
input_type_cls = inputtypes.get_class_for_tag(problemtree.tag)
|
||||
the_input = input_type_cls(self.system, problemtree, state)
|
||||
return the_input.get_html()
|
||||
|
||||
# let each Response render itself
|
||||
if problemtree in self.responders:
|
||||
return self.responders[problemtree].render_html(self._extract_html)
|
||||
|
||||
tree = etree.Element(problemtree.tag)
|
||||
for item in problemtree:
|
||||
item_xhtml = self._extract_html(item) # nothing special: recurse
|
||||
# render child recursively
|
||||
item_xhtml = self._extract_html(item)
|
||||
if item_xhtml is not None:
|
||||
tree.append(item_xhtml)
|
||||
|
||||
if tree.tag in html_transforms:
|
||||
tree.tag = html_transforms[problemtree.tag]['tag']
|
||||
else:
|
||||
for (key, value) in problemtree.items(): # copy attributes over if not innocufying
|
||||
# copy attributes over if not innocufying
|
||||
for (key, value) in problemtree.items():
|
||||
tree.set(key, value)
|
||||
|
||||
tree.text = problemtree.text
|
||||
@@ -490,31 +551,41 @@ class LoncapaProblem(object):
|
||||
self.responders = {}
|
||||
for response in tree.xpath('//' + "|//".join(response_tag_dict)):
|
||||
response_id_str = self.problem_id + "_" + str(response_id)
|
||||
response.set('id', response_id_str) # create and save ID for this response
|
||||
# create and save ID for this response
|
||||
response.set('id', response_id_str)
|
||||
response_id += 1
|
||||
|
||||
answer_id = 1
|
||||
inputfields = tree.xpath("|".join(['//' + response.tag + '[@id=$id]//' + x for x in (entry_types + solution_types)]),
|
||||
inputfields = tree.xpath("|".join(['//' + response.tag + '[@id=$id]//' + x
|
||||
for x in (entry_types + solution_types)]),
|
||||
id=response_id_str)
|
||||
for entry in inputfields: # assign one answer_id for each entry_type or solution_type
|
||||
|
||||
# assign one answer_id for each entry_type or solution_type
|
||||
for entry in inputfields:
|
||||
entry.attrib['response_id'] = str(response_id)
|
||||
entry.attrib['answer_id'] = str(answer_id)
|
||||
entry.attrib['id'] = "%s_%i_%i" % (self.problem_id, response_id, answer_id)
|
||||
answer_id = answer_id + 1
|
||||
|
||||
responder = response_tag_dict[response.tag](response, inputfields, self.context, self.system) # instantiate capa Response
|
||||
self.responders[response] = responder # save in list in self
|
||||
# instantiate capa Response
|
||||
responder = response_tag_dict[response.tag](response, inputfields,
|
||||
self.context, self.system)
|
||||
# save in list in self
|
||||
self.responders[response] = responder
|
||||
|
||||
# get responder answers (do this only once, since there may be a performance cost, eg with externalresponse)
|
||||
# get responder answers (do this only once, since there may be a performance cost,
|
||||
# eg with externalresponse)
|
||||
self.responder_answers = {}
|
||||
for response in self.responders.keys():
|
||||
try:
|
||||
self.responder_answers[response] = self.responders[response].get_answers()
|
||||
except:
|
||||
log.debug('responder %s failed to properly return get_answers()' % self.responders[response]) # FIXME
|
||||
log.debug('responder %s failed to properly return get_answers()',
|
||||
self.responders[response]) # FIXME
|
||||
raise
|
||||
|
||||
# <solution>...</solution> may not be associated with any specific response; give IDs for those separately
|
||||
# <solution>...</solution> may not be associated with any specific response; give
|
||||
# IDs for those separately
|
||||
# TODO: We should make the namespaces consistent and unique (e.g. %s_problem_%i).
|
||||
solution_id = 1
|
||||
for solution in tree.findall('.//solution'):
|
||||
|
||||
1
common/lib/capa/capa/chem/__init__.py
Normal file
1
common/lib/capa/capa/chem/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
|
||||
433
common/lib/capa/capa/chem/chemcalc.py
Normal file
433
common/lib/capa/capa/chem/chemcalc.py
Normal file
@@ -0,0 +1,433 @@
|
||||
from __future__ import division
|
||||
import copy
|
||||
from fractions import Fraction
|
||||
import logging
|
||||
import math
|
||||
import operator
|
||||
import re
|
||||
import numpy
|
||||
import numbers
|
||||
import scipy.constants
|
||||
|
||||
from pyparsing import (Literal, Keyword, Word, nums, StringEnd, Optional,
|
||||
Forward, OneOrMore, ParseException)
|
||||
import nltk
|
||||
from nltk.tree import Tree
|
||||
|
||||
ARROWS = ('<->', '->')
|
||||
|
||||
## Defines a simple pyparsing tokenizer for chemical equations
|
||||
elements = ['Ac','Ag','Al','Am','Ar','As','At','Au','B','Ba','Be',
|
||||
'Bh','Bi','Bk','Br','C','Ca','Cd','Ce','Cf','Cl','Cm',
|
||||
'Cn','Co','Cr','Cs','Cu','Db','Ds','Dy','Er','Es','Eu',
|
||||
'F','Fe','Fl','Fm','Fr','Ga','Gd','Ge','H','He','Hf',
|
||||
'Hg','Ho','Hs','I','In','Ir','K','Kr','La','Li','Lr',
|
||||
'Lu','Lv','Md','Mg','Mn','Mo','Mt','N','Na','Nb','Nd',
|
||||
'Ne','Ni','No','Np','O','Os','P','Pa','Pb','Pd','Pm',
|
||||
'Po','Pr','Pt','Pu','Ra','Rb','Re','Rf','Rg','Rh','Rn',
|
||||
'Ru','S','Sb','Sc','Se','Sg','Si','Sm','Sn','Sr','Ta',
|
||||
'Tb','Tc','Te','Th','Ti','Tl','Tm','U','Uuo','Uup',
|
||||
'Uus','Uut','V','W','Xe','Y','Yb','Zn','Zr']
|
||||
digits = map(str, range(10))
|
||||
symbols = list("[](){}^+-/")
|
||||
phases = ["(s)", "(l)", "(g)", "(aq)"]
|
||||
tokens = reduce(lambda a, b: a ^ b, map(Literal, elements + digits + symbols + phases))
|
||||
tokenizer = OneOrMore(tokens) + StringEnd()
|
||||
|
||||
|
||||
def _orjoin(l):
|
||||
return "'" + "' | '".join(l) + "'"
|
||||
|
||||
## Defines an NLTK parser for tokenized expressions
|
||||
grammar = """
|
||||
S -> multimolecule | multimolecule '+' S
|
||||
multimolecule -> count molecule | molecule
|
||||
count -> number | number '/' number
|
||||
molecule -> unphased | unphased phase
|
||||
unphased -> group | paren_group_round | paren_group_square
|
||||
element -> """ + _orjoin(elements) + """
|
||||
digit -> """ + _orjoin(digits) + """
|
||||
phase -> """ + _orjoin(phases) + """
|
||||
number -> digit | digit number
|
||||
group -> suffixed | suffixed group
|
||||
paren_group_round -> '(' group ')'
|
||||
paren_group_square -> '[' group ']'
|
||||
plus_minus -> '+' | '-'
|
||||
number_suffix -> number
|
||||
ion_suffix -> '^' number plus_minus | '^' plus_minus
|
||||
suffix -> number_suffix | number_suffix ion_suffix | ion_suffix
|
||||
unsuffixed -> element | paren_group_round | paren_group_square
|
||||
|
||||
suffixed -> unsuffixed | unsuffixed suffix
|
||||
"""
|
||||
parser = nltk.ChartParser(nltk.parse_cfg(grammar))
|
||||
|
||||
|
||||
def _clean_parse_tree(tree):
|
||||
''' The parse tree contains a lot of redundant
|
||||
nodes. E.g. paren_groups have groups as children, etc. This will
|
||||
clean up the tree.
|
||||
'''
|
||||
def unparse_number(n):
|
||||
''' Go from a number parse tree to a number '''
|
||||
if len(n) == 1:
|
||||
rv = n[0][0]
|
||||
else:
|
||||
rv = n[0][0] + unparse_number(n[1])
|
||||
return rv
|
||||
|
||||
def null_tag(n):
|
||||
''' Remove a tag '''
|
||||
return n[0]
|
||||
|
||||
def ion_suffix(n):
|
||||
'''1. "if" part handles special case
|
||||
2. "else" part is general behaviour '''
|
||||
|
||||
if n[1:][0].node == 'number' and n[1:][0][0][0] == '1':
|
||||
# if suffix is explicitly 1, like ^1-
|
||||
# strip 1, leave only sign: ^-
|
||||
return nltk.tree.Tree(n.node, n[2:])
|
||||
else:
|
||||
return nltk.tree.Tree(n.node, n[1:])
|
||||
|
||||
dispatch = {'number': lambda x: nltk.tree.Tree("number", [unparse_number(x)]),
|
||||
'unphased': null_tag,
|
||||
'unsuffixed': null_tag,
|
||||
'number_suffix': lambda x: nltk.tree.Tree('number_suffix', [unparse_number(x[0])]),
|
||||
'suffixed': lambda x: len(x) > 1 and x or x[0],
|
||||
'ion_suffix': ion_suffix,
|
||||
'paren_group_square': lambda x: nltk.tree.Tree(x.node, x[1]),
|
||||
'paren_group_round': lambda x: nltk.tree.Tree(x.node, x[1])}
|
||||
|
||||
if type(tree) == str:
|
||||
return tree
|
||||
|
||||
old_node = None
|
||||
## This loop means that if a node is processed, and returns a child,
|
||||
## the child will be processed.
|
||||
while tree.node in dispatch and tree.node != old_node:
|
||||
old_node = tree.node
|
||||
tree = dispatch[tree.node](tree)
|
||||
|
||||
children = []
|
||||
for child in tree:
|
||||
child = _clean_parse_tree(child)
|
||||
children.append(child)
|
||||
|
||||
tree = nltk.tree.Tree(tree.node, children)
|
||||
|
||||
return tree
|
||||
|
||||
|
||||
def _merge_children(tree, tags):
|
||||
''' nltk, by documentation, cannot do arbitrary length
|
||||
groups. Instead of:
|
||||
(group 1 2 3 4)
|
||||
It has to handle this recursively:
|
||||
(group 1 (group 2 (group 3 (group 4))))
|
||||
We do the cleanup of converting from the latter to the former.
|
||||
'''
|
||||
if tree is None:
|
||||
# There was a problem--shouldn't have empty trees (NOTE: see this with input e.g. 'H2O(', or 'Xe+').
|
||||
# Haven't grokked the code to tell if this is indeed the right thing to do.
|
||||
raise ParseException("Shouldn't have empty trees")
|
||||
|
||||
if type(tree) == str:
|
||||
return tree
|
||||
|
||||
merged_children = []
|
||||
done = False
|
||||
#print '00000', tree
|
||||
## Merge current tag
|
||||
while not done:
|
||||
done = True
|
||||
for child in tree:
|
||||
if type(child) == nltk.tree.Tree and child.node == tree.node and tree.node in tags:
|
||||
merged_children = merged_children + list(child)
|
||||
done = False
|
||||
else:
|
||||
merged_children = merged_children + [child]
|
||||
tree = nltk.tree.Tree(tree.node, merged_children)
|
||||
merged_children = []
|
||||
#print '======',tree
|
||||
|
||||
# And recurse
|
||||
children = []
|
||||
for child in tree:
|
||||
children.append(_merge_children(child, tags))
|
||||
|
||||
#return tree
|
||||
return nltk.tree.Tree(tree.node, children)
|
||||
|
||||
|
||||
def _render_to_html(tree):
|
||||
''' Renders a cleaned tree to HTML '''
|
||||
|
||||
def molecule_count(tree, children):
|
||||
# If an integer, return that integer
|
||||
if len(tree) == 1:
|
||||
return tree[0][0]
|
||||
# If a fraction, return the fraction
|
||||
if len(tree) == 3:
|
||||
return " <sup>{num}</sup>⁄<sub>{den}</sub> ".format(num=tree[0][0], den=tree[2][0])
|
||||
return "Error"
|
||||
|
||||
def subscript(tree, children):
|
||||
return "<sub>{sub}</sub>".format(sub=children)
|
||||
|
||||
def superscript(tree, children):
|
||||
return "<sup>{sup}</sup>".format(sup=children)
|
||||
|
||||
def round_brackets(tree, children):
|
||||
return "({insider})".format(insider=children)
|
||||
|
||||
def square_brackets(tree, children):
|
||||
return "[{insider}]".format(insider=children)
|
||||
|
||||
dispatch = {'count': molecule_count,
|
||||
'number_suffix': subscript,
|
||||
'ion_suffix': superscript,
|
||||
'paren_group_round': round_brackets,
|
||||
'paren_group_square': square_brackets}
|
||||
|
||||
if type(tree) == str:
|
||||
return tree
|
||||
else:
|
||||
children = "".join(map(_render_to_html, tree))
|
||||
if tree.node in dispatch:
|
||||
return dispatch[tree.node](tree, children)
|
||||
else:
|
||||
return children.replace(' ', '')
|
||||
|
||||
|
||||
|
||||
def render_to_html(eq):
|
||||
'''
|
||||
Render a chemical equation string to html.
|
||||
|
||||
Renders each molecule separately, and returns invalid input wrapped in a <span>.
|
||||
'''
|
||||
def err(s):
|
||||
"Render as an error span"
|
||||
return '<span class="inline-error inline">{0}</span>'.format(s)
|
||||
|
||||
def render_arrow(arrow):
|
||||
"""Turn text arrows into pretty ones"""
|
||||
if arrow == '->':
|
||||
return u'\u2192'
|
||||
if arrow == '<->':
|
||||
return u'\u2194'
|
||||
|
||||
# this won't be reached unless we add more arrow types, but keep it to avoid explosions when
|
||||
# that happens.
|
||||
return arrow
|
||||
|
||||
def render_expression(ex):
|
||||
"""
|
||||
Render a chemical expression--no arrows.
|
||||
"""
|
||||
try:
|
||||
return _render_to_html(_get_final_tree(ex))
|
||||
except ParseException:
|
||||
return err(ex)
|
||||
|
||||
def spanify(s):
|
||||
return u'<span class="math">{0}</span>'.format(s)
|
||||
|
||||
left, arrow, right = split_on_arrow(eq)
|
||||
if arrow == '':
|
||||
# only one side
|
||||
return spanify(render_expression(left))
|
||||
|
||||
|
||||
return spanify(render_expression(left) + render_arrow(arrow) + render_expression(right))
|
||||
|
||||
|
||||
def _get_final_tree(s):
|
||||
'''
|
||||
Return final tree after merge and clean.
|
||||
|
||||
Raises pyparsing.ParseException if s is invalid.
|
||||
'''
|
||||
tokenized = tokenizer.parseString(s)
|
||||
parsed = parser.parse(tokenized)
|
||||
merged = _merge_children(parsed, {'S','group'})
|
||||
final = _clean_parse_tree(merged)
|
||||
return final
|
||||
|
||||
|
||||
def _check_equality(tuple1, tuple2):
|
||||
''' return True if tuples of multimolecules are equal '''
|
||||
list1 = list(tuple1)
|
||||
list2 = list(tuple2)
|
||||
|
||||
# Hypo: trees where are levels count+molecule vs just molecule
|
||||
# cannot be sorted properly (tested on test_complex_additivity)
|
||||
# But without factors and phases sorting seems to work.
|
||||
|
||||
# Also for lists of multimolecules without factors and phases
|
||||
# sorting seems to work fine.
|
||||
list1.sort()
|
||||
list2.sort()
|
||||
return list1 == list2
|
||||
|
||||
|
||||
def compare_chemical_expression(s1, s2, ignore_state=False):
|
||||
''' It does comparison between two expressions.
|
||||
It uses divide_chemical_expression and check if division is 1
|
||||
'''
|
||||
return divide_chemical_expression(s1, s2, ignore_state) == 1
|
||||
|
||||
|
||||
def divide_chemical_expression(s1, s2, ignore_state=False):
|
||||
'''Compare two chemical expressions for equivalence up to a multiplicative factor:
|
||||
|
||||
- If they are not the same chemicals, returns False.
|
||||
- If they are the same, "divide" s1 by s2 to returns a factor x such that s1 / s2 == x as a Fraction object.
|
||||
- if ignore_state is True, ignores phases when doing the comparison.
|
||||
|
||||
Examples:
|
||||
divide_chemical_expression("H2O", "3H2O") -> Fraction(1,3)
|
||||
divide_chemical_expression("3H2O", "H2O") -> 3 # actually Fraction(3, 1), but compares == to 3.
|
||||
divide_chemical_expression("2H2O(s) + 2CO2", "H2O(s)+CO2") -> 2
|
||||
divide_chemical_expression("H2O(s) + CO2", "3H2O(s)+2CO2") -> False
|
||||
|
||||
Implementation sketch:
|
||||
- extract factors and phases to standalone lists,
|
||||
- compare expressions without factors and phases,
|
||||
- divide lists of factors for each other and check
|
||||
for equality of every element in list,
|
||||
- return result of factor division
|
||||
|
||||
'''
|
||||
|
||||
# parsed final trees
|
||||
treedic = {}
|
||||
treedic['1'] = _get_final_tree(s1)
|
||||
treedic['2'] = _get_final_tree(s2)
|
||||
|
||||
# strip phases and factors
|
||||
# collect factors in list
|
||||
for i in ('1', '2'):
|
||||
treedic[i + ' cleaned_mm_list'] = []
|
||||
treedic[i + ' factors'] = []
|
||||
treedic[i + ' phases'] = []
|
||||
for el in treedic[i].subtrees(filter=lambda t: t.node == 'multimolecule'):
|
||||
count_subtree = [t for t in el.subtrees() if t.node == 'count']
|
||||
group_subtree = [t for t in el.subtrees() if t.node == 'group']
|
||||
phase_subtree = [t for t in el.subtrees() if t.node == 'phase']
|
||||
if count_subtree:
|
||||
if len(count_subtree[0]) > 1:
|
||||
treedic[i + ' factors'].append(
|
||||
int(count_subtree[0][0][0]) /
|
||||
int(count_subtree[0][2][0]))
|
||||
else:
|
||||
treedic[i + ' factors'].append(int(count_subtree[0][0][0]))
|
||||
else:
|
||||
treedic[i + ' factors'].append(1.0)
|
||||
if phase_subtree:
|
||||
treedic[i + ' phases'].append(phase_subtree[0][0])
|
||||
else:
|
||||
treedic[i + ' phases'].append(' ')
|
||||
treedic[i + ' cleaned_mm_list'].append(
|
||||
Tree('multimolecule', [Tree('molecule', group_subtree)]))
|
||||
|
||||
# order of factors and phases must mirror the order of multimolecules,
|
||||
# use 'decorate, sort, undecorate' pattern
|
||||
treedic['1 cleaned_mm_list'], treedic['1 factors'], treedic['1 phases'] = zip(
|
||||
*sorted(zip(treedic['1 cleaned_mm_list'], treedic['1 factors'], treedic['1 phases'])))
|
||||
|
||||
treedic['2 cleaned_mm_list'], treedic['2 factors'], treedic['2 phases'] = zip(
|
||||
*sorted(zip(treedic['2 cleaned_mm_list'], treedic['2 factors'], treedic['2 phases'])))
|
||||
|
||||
# check if expressions are correct without factors
|
||||
if not _check_equality(treedic['1 cleaned_mm_list'], treedic['2 cleaned_mm_list']):
|
||||
return False
|
||||
|
||||
# phases are ruled by ingore_state flag
|
||||
if not ignore_state: # phases matters
|
||||
if treedic['1 phases'] != treedic['2 phases']:
|
||||
return False
|
||||
|
||||
if any(map(lambda x, y: x / y - treedic['1 factors'][0] / treedic['2 factors'][0],
|
||||
treedic['1 factors'], treedic['2 factors'])):
|
||||
# factors are not proportional
|
||||
return False
|
||||
else:
|
||||
# return ratio
|
||||
return Fraction(treedic['1 factors'][0] / treedic['2 factors'][0])
|
||||
|
||||
|
||||
def split_on_arrow(eq):
|
||||
"""
|
||||
Split a string on an arrow. Returns left, arrow, right. If there is no arrow, returns the
|
||||
entire eq in left, and '' in arrow and right.
|
||||
|
||||
Return left, arrow, right.
|
||||
"""
|
||||
# order matters -- need to try <-> first
|
||||
for arrow in ARROWS:
|
||||
left, a, right = eq.partition(arrow)
|
||||
if a != '':
|
||||
return left, a, right
|
||||
|
||||
return eq, '', ''
|
||||
|
||||
|
||||
def chemical_equations_equal(eq1, eq2, exact=False):
|
||||
"""
|
||||
Check whether two chemical equations are the same. (equations have arrows)
|
||||
|
||||
If exact is False, then they are considered equal if they differ by a
|
||||
constant factor.
|
||||
|
||||
arrows matter: -> and <-> are different.
|
||||
|
||||
e.g.
|
||||
chemical_equations_equal('H2 + O2 -> H2O2', 'O2 + H2 -> H2O2') -> True
|
||||
chemical_equations_equal('H2 + O2 -> H2O2', 'O2 + 2H2 -> H2O2') -> False
|
||||
|
||||
chemical_equations_equal('H2 + O2 -> H2O2', 'O2 + H2 <-> H2O2') -> False
|
||||
|
||||
chemical_equations_equal('H2 + O2 -> H2O2', '2 H2 + 2 O2 -> 2 H2O2') -> True
|
||||
chemical_equations_equal('H2 + O2 -> H2O2', '2 H2 + 2 O2 -> 2 H2O2', exact=True) -> False
|
||||
|
||||
|
||||
If there's a syntax error, we return False.
|
||||
"""
|
||||
|
||||
left1, arrow1, right1 = split_on_arrow(eq1)
|
||||
left2, arrow2, right2 = split_on_arrow(eq2)
|
||||
|
||||
if arrow1 == '' or arrow2 == '':
|
||||
return False
|
||||
|
||||
# TODO: may want to be able to give student helpful feedback about why things didn't work.
|
||||
if arrow1 != arrow2:
|
||||
# arrows don't match
|
||||
return False
|
||||
|
||||
try:
|
||||
factor_left = divide_chemical_expression(left1, left2)
|
||||
if not factor_left:
|
||||
# left sides don't match
|
||||
return False
|
||||
|
||||
factor_right = divide_chemical_expression(right1, right2)
|
||||
if not factor_right:
|
||||
# right sides don't match
|
||||
return False
|
||||
|
||||
if factor_left != factor_right:
|
||||
# factors don't match (molecule counts to add up)
|
||||
return False
|
||||
|
||||
if exact and factor_left != 1:
|
||||
# want an exact match.
|
||||
return False
|
||||
|
||||
return True
|
||||
except ParseException:
|
||||
# Don't want external users to have to deal with parsing exceptions. Just return False.
|
||||
return False
|
||||
336
common/lib/capa/capa/chem/tests.py
Normal file
336
common/lib/capa/capa/chem/tests.py
Normal file
@@ -0,0 +1,336 @@
|
||||
import codecs
|
||||
from fractions import Fraction
|
||||
from pyparsing import ParseException
|
||||
import unittest
|
||||
|
||||
from chemcalc import (compare_chemical_expression, divide_chemical_expression,
|
||||
render_to_html, chemical_equations_equal)
|
||||
|
||||
local_debug = None
|
||||
|
||||
def log(s, output_type=None):
|
||||
if local_debug:
|
||||
print s
|
||||
if output_type == 'html':
|
||||
f.write(s + '\n<br>\n')
|
||||
|
||||
|
||||
class Test_Compare_Equations(unittest.TestCase):
|
||||
def test_simple_equation(self):
|
||||
self.assertTrue(chemical_equations_equal('H2 + O2 -> H2O2',
|
||||
'O2 + H2 -> H2O2'))
|
||||
# left sides don't match
|
||||
self.assertFalse(chemical_equations_equal('H2 + O2 -> H2O2',
|
||||
'O2 + 2H2 -> H2O2'))
|
||||
# right sides don't match
|
||||
self.assertFalse(chemical_equations_equal('H2 + O2 -> H2O2',
|
||||
'O2 + H2 -> H2O'))
|
||||
|
||||
# factors don't match
|
||||
self.assertFalse(chemical_equations_equal('H2 + O2 -> H2O2',
|
||||
'O2 + H2 -> 2H2O2'))
|
||||
|
||||
def test_different_factor(self):
|
||||
self.assertTrue(chemical_equations_equal('H2 + O2 -> H2O2',
|
||||
'2O2 + 2H2 -> 2H2O2'))
|
||||
|
||||
self.assertFalse(chemical_equations_equal('2H2 + O2 -> H2O2',
|
||||
'2O2 + 2H2 -> 2H2O2'))
|
||||
|
||||
|
||||
def test_different_arrows(self):
|
||||
self.assertTrue(chemical_equations_equal('H2 + O2 -> H2O2',
|
||||
'2O2 + 2H2 -> 2H2O2'))
|
||||
|
||||
self.assertFalse(chemical_equations_equal('H2 + O2 -> H2O2',
|
||||
'O2 + H2 <-> 2H2O2'))
|
||||
|
||||
def test_exact_match(self):
|
||||
self.assertTrue(chemical_equations_equal('H2 + O2 -> H2O2',
|
||||
'2O2 + 2H2 -> 2H2O2'))
|
||||
|
||||
self.assertFalse(chemical_equations_equal('H2 + O2 -> H2O2',
|
||||
'2O2 + 2H2 -> 2H2O2', exact=True))
|
||||
|
||||
# order still doesn't matter
|
||||
self.assertTrue(chemical_equations_equal('H2 + O2 -> H2O2',
|
||||
'O2 + H2 -> H2O2', exact=True))
|
||||
|
||||
|
||||
def test_syntax_errors(self):
|
||||
self.assertFalse(chemical_equations_equal('H2 + O2 a-> H2O2',
|
||||
'2O2 + 2H2 -> 2H2O2'))
|
||||
|
||||
self.assertFalse(chemical_equations_equal('H2O( -> H2O2',
|
||||
'H2O -> H2O2'))
|
||||
|
||||
|
||||
self.assertFalse(chemical_equations_equal('H2 + O2 ==> H2O2', # strange arrow
|
||||
'2O2 + 2H2 -> 2H2O2'))
|
||||
|
||||
|
||||
class Test_Compare_Expressions(unittest.TestCase):
|
||||
|
||||
def test_compare_incorrect_order_of_atoms_in_molecule(self):
|
||||
self.assertFalse(compare_chemical_expression("H2O + CO2", "O2C + OH2"))
|
||||
|
||||
def test_compare_same_order_no_phases_no_factors_no_ions(self):
|
||||
self.assertTrue(compare_chemical_expression("H2O + CO2", "CO2+H2O"))
|
||||
|
||||
def test_compare_different_order_no_phases_no_factors_no_ions(self):
|
||||
self.assertTrue(compare_chemical_expression("H2O + CO2", "CO2 + H2O"))
|
||||
|
||||
def test_compare_different_order_three_multimolecule(self):
|
||||
self.assertTrue(compare_chemical_expression("H2O + Fe(OH)3 + CO2", "CO2 + H2O + Fe(OH)3"))
|
||||
|
||||
def test_compare_same_factors(self):
|
||||
self.assertTrue(compare_chemical_expression("3H2O + 2CO2", "2CO2 + 3H2O "))
|
||||
|
||||
def test_compare_different_factors(self):
|
||||
self.assertFalse(compare_chemical_expression("2H2O + 3CO2", "2CO2 + 3H2O "))
|
||||
|
||||
def test_compare_correct_ions(self):
|
||||
self.assertTrue(compare_chemical_expression("H^+ + OH^-", " OH^- + H^+ "))
|
||||
|
||||
def test_compare_wrong_ions(self):
|
||||
self.assertFalse(compare_chemical_expression("H^+ + OH^-", " OH^- + H^- "))
|
||||
|
||||
def test_compare_parent_groups_ions(self):
|
||||
self.assertTrue(compare_chemical_expression("Fe(OH)^2- + (OH)^-", " (OH)^- + Fe(OH)^2- "))
|
||||
|
||||
def test_compare_correct_factors_ions_and_one(self):
|
||||
self.assertTrue(compare_chemical_expression("3H^+ + 2OH^-", " 2OH^- + 3H^+ "))
|
||||
|
||||
def test_compare_wrong_factors_ions(self):
|
||||
self.assertFalse(compare_chemical_expression("2H^+ + 3OH^-", " 2OH^- + 3H^+ "))
|
||||
|
||||
def test_compare_float_factors(self):
|
||||
self.assertTrue(compare_chemical_expression("7/2H^+ + 3/5OH^-", " 3/5OH^- + 7/2H^+ "))
|
||||
|
||||
# Phases tests
|
||||
def test_compare_phases_ignored(self):
|
||||
self.assertTrue(compare_chemical_expression(
|
||||
"H2O(s) + CO2", "H2O+CO2", ignore_state=True))
|
||||
|
||||
def test_compare_phases_not_ignored_explicitly(self):
|
||||
self.assertFalse(compare_chemical_expression(
|
||||
"H2O(s) + CO2", "H2O+CO2", ignore_state=False))
|
||||
|
||||
def test_compare_phases_not_ignored(self): # same as previous
|
||||
self.assertFalse(compare_chemical_expression(
|
||||
"H2O(s) + CO2", "H2O+CO2"))
|
||||
|
||||
def test_compare_phases_not_ignored_explicitly(self):
|
||||
self.assertTrue(compare_chemical_expression(
|
||||
"H2O(s) + CO2", "H2O(s)+CO2", ignore_state=False))
|
||||
|
||||
# all in one cases
|
||||
def test_complex_additivity(self):
|
||||
self.assertTrue(compare_chemical_expression(
|
||||
"5(H1H212)^70010- + 2H20 + 7/2HCl + H2O",
|
||||
"7/2HCl + 2H20 + H2O + 5(H1H212)^70010-"))
|
||||
|
||||
def test_complex_additivity_wrong(self):
|
||||
self.assertFalse(compare_chemical_expression(
|
||||
"5(H1H212)^70010- + 2H20 + 7/2HCl + H2O",
|
||||
"2H20 + 7/2HCl + H2O + 5(H1H212)^70011-"))
|
||||
|
||||
def test_complex_all_grammar(self):
|
||||
self.assertTrue(compare_chemical_expression(
|
||||
"5[Ni(NH3)4]^2+ + 5/2SO4^2-",
|
||||
"5/2SO4^2- + 5[Ni(NH3)4]^2+"))
|
||||
|
||||
# special cases
|
||||
|
||||
def test_compare_one_superscript_explicitly_set(self):
|
||||
self.assertTrue(compare_chemical_expression("H^+ + OH^1-", " OH^- + H^+ "))
|
||||
|
||||
def test_compare_equal_factors_differently_set(self):
|
||||
self.assertTrue(compare_chemical_expression("6/2H^+ + OH^-", " OH^- + 3H^+ "))
|
||||
|
||||
def test_compare_one_subscript_explicitly_set(self):
|
||||
self.assertFalse(compare_chemical_expression("H2 + CO2", "H2 + C102"))
|
||||
|
||||
|
||||
class Test_Divide_Expressions(unittest.TestCase):
|
||||
''' as compare_ use divide_,
|
||||
tests here must consider different
|
||||
division (not equality) cases '''
|
||||
|
||||
def test_divide_by_zero(self):
|
||||
self.assertFalse(divide_chemical_expression(
|
||||
"0H2O", "H2O"))
|
||||
|
||||
def test_divide_wrong_factors(self):
|
||||
self.assertFalse(divide_chemical_expression(
|
||||
"5(H1H212)^70010- + 10H2O", "5H2O + 10(H1H212)^70010-"))
|
||||
|
||||
def test_divide_right(self):
|
||||
self.assertEqual(divide_chemical_expression(
|
||||
"5(H1H212)^70010- + 10H2O", "10H2O + 5(H1H212)^70010-"), 1)
|
||||
|
||||
def test_divide_wrong_reagents(self):
|
||||
self.assertFalse(divide_chemical_expression(
|
||||
"H2O + CO2", "CO2"))
|
||||
|
||||
def test_divide_right_simple(self):
|
||||
self.assertEqual(divide_chemical_expression(
|
||||
"H2O + CO2", "H2O+CO2"), 1)
|
||||
|
||||
def test_divide_right_phases(self):
|
||||
self.assertEqual(divide_chemical_expression(
|
||||
"H2O(s) + CO2", "2H2O(s)+2CO2"), Fraction(1, 2))
|
||||
|
||||
def test_divide_right_phases_other_order(self):
|
||||
self.assertEqual(divide_chemical_expression(
|
||||
"2H2O(s) + 2CO2", "H2O(s)+CO2"), 2)
|
||||
|
||||
def test_divide_wrong_phases(self):
|
||||
self.assertFalse(divide_chemical_expression(
|
||||
"H2O(s) + CO2", "2H2O+2CO2(s)"))
|
||||
|
||||
def test_divide_wrong_phases_but_phases_ignored(self):
|
||||
self.assertEqual(divide_chemical_expression(
|
||||
"H2O(s) + CO2", "2H2O+2CO2(s)", ignore_state=True), Fraction(1, 2))
|
||||
|
||||
def test_divide_order(self):
|
||||
self.assertEqual(divide_chemical_expression(
|
||||
"2CO2 + H2O", "2H2O+4CO2"), Fraction(1, 2))
|
||||
|
||||
def test_divide_fract_to_int(self):
|
||||
self.assertEqual(divide_chemical_expression(
|
||||
"3/2CO2 + H2O", "2H2O+3CO2"), Fraction(1, 2))
|
||||
|
||||
def test_divide_fract_to_frac(self):
|
||||
self.assertEqual(divide_chemical_expression(
|
||||
"3/4CO2 + H2O", "2H2O+9/6CO2"), Fraction(1, 2))
|
||||
|
||||
def test_divide_fract_to_frac_wrog(self):
|
||||
self.assertFalse(divide_chemical_expression(
|
||||
"6/2CO2 + H2O", "2H2O+9/6CO2"), 2)
|
||||
|
||||
|
||||
class Test_Render_Equations(unittest.TestCase):
|
||||
|
||||
def test_render1(self):
|
||||
s = "H2O + CO2"
|
||||
out = render_to_html(s)
|
||||
correct = u'<span class="math">H<sub>2</sub>O+CO<sub>2</sub></span>'
|
||||
log(out + ' ------- ' + correct, 'html')
|
||||
self.assertEqual(out, correct)
|
||||
|
||||
def test_render_uncorrect_reaction(self):
|
||||
s = "O2C + OH2"
|
||||
out = render_to_html(s)
|
||||
correct = u'<span class="math">O<sub>2</sub>C+OH<sub>2</sub></span>'
|
||||
log(out + ' ------- ' + correct, 'html')
|
||||
self.assertEqual(out, correct)
|
||||
|
||||
def test_render2(self):
|
||||
s = "CO2 + H2O + Fe(OH)3"
|
||||
out = render_to_html(s)
|
||||
correct = u'<span class="math">CO<sub>2</sub>+H<sub>2</sub>O+Fe(OH)<sub>3</sub></span>'
|
||||
log(out + ' ------- ' + correct, 'html')
|
||||
self.assertEqual(out, correct)
|
||||
|
||||
def test_render3(self):
|
||||
s = "3H2O + 2CO2"
|
||||
out = render_to_html(s)
|
||||
correct = u'<span class="math">3H<sub>2</sub>O+2CO<sub>2</sub></span>'
|
||||
log(out + ' ------- ' + correct, 'html')
|
||||
self.assertEqual(out, correct)
|
||||
|
||||
def test_render4(self):
|
||||
s = "H^+ + OH^-"
|
||||
out = render_to_html(s)
|
||||
correct = u'<span class="math">H<sup>+</sup>+OH<sup>-</sup></span>'
|
||||
log(out + ' ------- ' + correct, 'html')
|
||||
self.assertEqual(out, correct)
|
||||
|
||||
def test_render5(self):
|
||||
s = "Fe(OH)^2- + (OH)^-"
|
||||
out = render_to_html(s)
|
||||
correct = u'<span class="math">Fe(OH)<sup>2-</sup>+(OH)<sup>-</sup></span>'
|
||||
log(out + ' ------- ' + correct, 'html')
|
||||
self.assertEqual(out, correct)
|
||||
|
||||
def test_render6(self):
|
||||
s = "7/2H^+ + 3/5OH^-"
|
||||
out = render_to_html(s)
|
||||
correct = u'<span class="math"><sup>7</sup>⁄<sub>2</sub>H<sup>+</sup>+<sup>3</sup>⁄<sub>5</sub>OH<sup>-</sup></span>'
|
||||
log(out + ' ------- ' + correct, 'html')
|
||||
self.assertEqual(out, correct)
|
||||
|
||||
def test_render7(self):
|
||||
s = "5(H1H212)^70010- + 2H2O + 7/2HCl + H2O"
|
||||
out = render_to_html(s)
|
||||
correct = u'<span class="math">5(H<sub>1</sub>H<sub>212</sub>)<sup>70010-</sup>+2H<sub>2</sub>O+<sup>7</sup>⁄<sub>2</sub>HCl+H<sub>2</sub>O</span>'
|
||||
log(out + ' ------- ' + correct, 'html')
|
||||
self.assertEqual(out, correct)
|
||||
|
||||
def test_render8(self):
|
||||
s = "H2O(s) + CO2"
|
||||
out = render_to_html(s)
|
||||
correct = u'<span class="math">H<sub>2</sub>O(s)+CO<sub>2</sub></span>'
|
||||
log(out + ' ------- ' + correct, 'html')
|
||||
self.assertEqual(out, correct)
|
||||
|
||||
def test_render9(self):
|
||||
s = "5[Ni(NH3)4]^2+ + 5/2SO4^2-"
|
||||
#import ipdb; ipdb.set_trace()
|
||||
out = render_to_html(s)
|
||||
correct = u'<span class="math">5[Ni(NH<sub>3</sub>)<sub>4</sub>]<sup>2+</sup>+<sup>5</sup>⁄<sub>2</sub>SO<sub>4</sub><sup>2-</sup></span>'
|
||||
log(out + ' ------- ' + correct, 'html')
|
||||
self.assertEqual(out, correct)
|
||||
|
||||
def test_render_error(self):
|
||||
s = "5.2H20"
|
||||
out = render_to_html(s)
|
||||
correct = u'<span class="math"><span class="inline-error inline">5.2H20</span></span>'
|
||||
log(out + ' ------- ' + correct, 'html')
|
||||
self.assertEqual(out, correct)
|
||||
|
||||
def test_render_simple_brackets(self):
|
||||
s = "(Ar)"
|
||||
out = render_to_html(s)
|
||||
correct = u'<span class="math">(Ar)</span>'
|
||||
log(out + ' ------- ' + correct, 'html')
|
||||
self.assertEqual(out, correct)
|
||||
|
||||
def test_render_eq1(self):
|
||||
s = "H^+ + OH^- -> H2O"
|
||||
out = render_to_html(s)
|
||||
correct = u'<span class="math">H<sup>+</sup>+OH<sup>-</sup>\u2192H<sub>2</sub>O</span>'
|
||||
log(out + ' ------- ' + correct, 'html')
|
||||
self.assertEqual(out, correct)
|
||||
|
||||
def test_render_eq2(self):
|
||||
s = "H^+ + OH^- <-> H2O"
|
||||
out = render_to_html(s)
|
||||
correct = u'<span class="math">H<sup>+</sup>+OH<sup>-</sup>\u2194H<sub>2</sub>O</span>'
|
||||
log(out + ' ------- ' + correct, 'html')
|
||||
self.assertEqual(out, correct)
|
||||
|
||||
|
||||
def test_render_eq3(self):
|
||||
s = "H^+ + OH^- <= H2O" # unsupported arrow
|
||||
out = render_to_html(s)
|
||||
correct = u'<span class="math"><span class="inline-error inline">H^+ + OH^- <= H2O</span></span>'
|
||||
log(out + ' ------- ' + correct, 'html')
|
||||
self.assertEqual(out, correct)
|
||||
|
||||
|
||||
|
||||
def suite():
|
||||
|
||||
testcases = [Test_Compare_Expressions, Test_Divide_Expressions, Test_Render_Equations]
|
||||
suites = []
|
||||
for testcase in testcases:
|
||||
suites.append(unittest.TestLoader().loadTestsFromTestCase(testcase))
|
||||
return unittest.TestSuite(suites)
|
||||
|
||||
if __name__ == "__main__":
|
||||
local_debug = True
|
||||
with codecs.open('render.html', 'w', encoding='utf-8') as f:
|
||||
unittest.TextTestRunner(verbosity=2).run(suite())
|
||||
# open render.html to look at rendered equations
|
||||
@@ -5,23 +5,26 @@
|
||||
|
||||
|
||||
class CorrectMap(object):
|
||||
'''
|
||||
"""
|
||||
Stores map between answer_id and response evaluation result for each question
|
||||
in a capa problem. The response evaluation result for each answer_id includes
|
||||
(correctness, npoints, msg, hint, hintmode).
|
||||
|
||||
- correctness : either 'correct' or 'incorrect'
|
||||
- npoints : None, or integer specifying number of points awarded for this answer_id
|
||||
- msg : string (may have HTML) giving extra message response (displayed below textline or textbox)
|
||||
- hint : string (may have HTML) giving optional hint (displayed below textline or textbox, above msg)
|
||||
- msg : string (may have HTML) giving extra message response
|
||||
(displayed below textline or textbox)
|
||||
- hint : string (may have HTML) giving optional hint
|
||||
(displayed below textline or textbox, above msg)
|
||||
- hintmode : one of (None,'on_request','always') criteria for displaying hint
|
||||
- queuestate : Dict {key:'', time:''} where key is a secret string, and time is a string dump
|
||||
of a DateTime object in the format '%Y%m%d%H%M%S'. Is None when not queued
|
||||
|
||||
Behaves as a dict.
|
||||
'''
|
||||
"""
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.cmap = dict() # start with empty dict
|
||||
# start with empty dict
|
||||
self.cmap = dict()
|
||||
self.items = self.cmap.items
|
||||
self.keys = self.cmap.keys
|
||||
self.set(*args, **kwargs)
|
||||
@@ -33,7 +36,15 @@ class CorrectMap(object):
|
||||
return self.cmap.__iter__()
|
||||
|
||||
# See the documentation for 'set_dict' for the use of kwargs
|
||||
def set(self, answer_id=None, correctness=None, npoints=None, msg='', hint='', hintmode=None, queuestate=None, **kwargs):
|
||||
def set(self,
|
||||
answer_id=None,
|
||||
correctness=None,
|
||||
npoints=None,
|
||||
msg='',
|
||||
hint='',
|
||||
hintmode=None,
|
||||
queuestate=None, **kwargs):
|
||||
|
||||
if answer_id is not None:
|
||||
self.cmap[answer_id] = {'correctness': correctness,
|
||||
'npoints': npoints,
|
||||
@@ -56,12 +67,13 @@ class CorrectMap(object):
|
||||
'''
|
||||
Set internal dict of CorrectMap to provided correct_map dict
|
||||
|
||||
correct_map is saved by LMS as a plaintext JSON dump of the correctmap dict. This means that
|
||||
when the definition of CorrectMap (e.g. its properties) are altered, existing correct_map dict
|
||||
not coincide with the newest CorrectMap format as defined by self.set.
|
||||
correct_map is saved by LMS as a plaintext JSON dump of the correctmap dict. This
|
||||
means that when the definition of CorrectMap (e.g. its properties) are altered,
|
||||
an existing correct_map dict not coincide with the newest CorrectMap format as
|
||||
defined by self.set.
|
||||
|
||||
For graceful migration, feed the contents of each correct map to self.set, rather than
|
||||
making a direct copy of the given correct_map dict. This way, the common keys between
|
||||
making a direct copy of the given correct_map dict. This way, the common keys between
|
||||
the incoming correct_map dict and the new CorrectMap instance will be written, while
|
||||
mismatched keys will be gracefully ignored.
|
||||
|
||||
@@ -69,14 +81,20 @@ class CorrectMap(object):
|
||||
If correct_map is a one-level dict, then convert it to the new dict of dicts format.
|
||||
'''
|
||||
if correct_map and not (type(correct_map[correct_map.keys()[0]]) == dict):
|
||||
self.__init__() # empty current dict
|
||||
for k in correct_map: self.set(k, correct_map[k]) # create new dict entries
|
||||
# empty current dict
|
||||
self.__init__()
|
||||
|
||||
# create new dict entries
|
||||
for k in correct_map:
|
||||
self.set(k, correct_map[k])
|
||||
else:
|
||||
self.__init__()
|
||||
for k in correct_map: self.set(k, **correct_map[k])
|
||||
for k in correct_map:
|
||||
self.set(k, **correct_map[k])
|
||||
|
||||
def is_correct(self, answer_id):
|
||||
if answer_id in self.cmap: return self.cmap[answer_id]['correctness'] == 'correct'
|
||||
if answer_id in self.cmap:
|
||||
return self.cmap[answer_id]['correctness'] == 'correct'
|
||||
return None
|
||||
|
||||
def is_queued(self, answer_id):
|
||||
@@ -94,14 +112,18 @@ class CorrectMap(object):
|
||||
return npoints
|
||||
elif self.is_correct(answer_id):
|
||||
return 1
|
||||
return 0 # if not correct and no points have been assigned, return 0
|
||||
# if not correct and no points have been assigned, return 0
|
||||
return 0
|
||||
|
||||
def set_property(self, answer_id, property, value):
|
||||
if answer_id in self.cmap: self.cmap[answer_id][property] = value
|
||||
else: self.cmap[answer_id] = {property: value}
|
||||
if answer_id in self.cmap:
|
||||
self.cmap[answer_id][property] = value
|
||||
else:
|
||||
self.cmap[answer_id] = {property: value}
|
||||
|
||||
def get_property(self, answer_id, property, default=None):
|
||||
if answer_id in self.cmap: return self.cmap[answer_id].get(property, default)
|
||||
if answer_id in self.cmap:
|
||||
return self.cmap[answer_id].get(property, default)
|
||||
return default
|
||||
|
||||
def get_correctness(self, answer_id):
|
||||
|
||||
@@ -1,9 +1,15 @@
|
||||
""" Standard resistor codes.
|
||||
"""
|
||||
Standard resistor codes.
|
||||
http://en.wikipedia.org/wiki/Electronic_color_code
|
||||
"""
|
||||
E6 = [10, 15, 22, 33, 47, 68]
|
||||
|
||||
E12 = [10, 12, 15, 18, 22, 27, 33, 39, 47, 56, 68, 82]
|
||||
|
||||
E24 = [10, 12, 15, 18, 22, 27, 33, 39, 47, 56, 68, 82, 11, 13, 16, 20, 24, 30, 36, 43, 51, 62, 75, 91]
|
||||
|
||||
E48 = [100, 121, 147, 178, 215, 261, 316, 383, 464, 562, 681, 825, 105, 127, 154, 187, 226, 274, 332, 402, 487, 590, 715, 866, 110, 133, 162, 196, 237, 287, 348, 422, 511, 619, 750, 909, 115, 140, 169, 205, 249, 301, 365, 442, 536, 649, 787, 953]
|
||||
|
||||
E96 = [100, 121, 147, 178, 215, 261, 316, 383, 464, 562, 681, 825, 102, 124, 150, 182, 221, 267, 324, 392, 475, 576, 698, 845, 105, 127, 154, 187, 226, 274, 332, 402, 487, 590, 715, 866, 107, 130, 158, 191, 232, 280, 340, 412, 499, 604, 732, 887, 110, 133, 162, 196, 237, 287, 348, 422, 511, 619, 750, 909, 113, 137, 165, 200, 243, 294, 357, 432, 523, 634, 768, 931, 115, 140, 169, 205, 249, 301, 365, 442, 536, 649, 787, 953, 118, 143, 174, 210, 255, 309, 374, 453, 549, 665, 806, 976]
|
||||
|
||||
E192 = [100, 121, 147, 178, 215, 261, 316, 383, 464, 562, 681, 825, 101, 123, 149, 180, 218, 264, 320, 388, 470, 569, 690, 835, 102, 124, 150, 182, 221, 267, 324, 392, 475, 576, 698, 845, 104, 126, 152, 184, 223, 271, 328, 397, 481, 583, 706, 856, 105, 127, 154, 187, 226, 274, 332, 402, 487, 590, 715, 866, 106, 129, 156, 189, 229, 277, 336, 407, 493, 597, 723, 876, 107, 130, 158, 191, 232, 280, 340, 412, 499, 604, 732, 887, 109, 132, 160, 193, 234, 284, 344, 417, 505, 612, 741, 898, 110, 133, 162, 196, 237, 287, 348, 422, 511, 619, 750, 909, 111, 135, 164, 198, 240, 291, 352, 427, 517, 626, 759, 920, 113, 137, 165, 200, 243, 294, 357, 432, 523, 634, 768, 931, 114, 138, 167, 203, 246, 298, 361, 437, 530, 642, 777, 942, 115, 140, 169, 205, 249, 301, 365, 442, 536, 649, 787, 953, 117, 142, 172, 208, 252, 305, 370, 448, 542, 657, 796, 965, 118, 143, 174, 210, 255, 309, 374, 453, 549, 665, 806, 976, 120, 145, 176, 213, 258, 312, 379, 459, 556, 673, 816, 988]
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# File: courseware/capa/inputtypes.py
|
||||
#
|
||||
|
||||
'''
|
||||
"""
|
||||
Module containing the problem elements which render into input objects
|
||||
|
||||
- textline
|
||||
@@ -16,15 +16,16 @@ Module containing the problem elements which render into input objects
|
||||
- optioninput (for option list)
|
||||
- filesubmission (upload a file)
|
||||
|
||||
These are matched by *.html files templates/*.html which are mako templates with the actual html.
|
||||
These are matched by *.html files templates/*.html which are mako templates with the
|
||||
actual html.
|
||||
|
||||
Each input type takes the xml tree as 'element', the previous answer as 'value', and the graded status as 'status'
|
||||
Each input type takes the xml tree as 'element', the previous answer as 'value', and the
|
||||
graded status as'status'
|
||||
"""
|
||||
|
||||
'''
|
||||
|
||||
# TODO: rename "state" to "status" for all below
|
||||
# status is currently the answer for the problem ID for the input element,
|
||||
# but it will turn into a dict containing both the answer and any associated message for the problem ID for the input element.
|
||||
# TODO: rename "state" to "status" for all below. status is currently the answer for the
|
||||
# problem ID for the input element, but it will turn into a dict containing both the
|
||||
# answer and any associated message for the problem ID for the input element.
|
||||
|
||||
import logging
|
||||
import re
|
||||
@@ -36,113 +37,196 @@ import xml.sax.saxutils as saxutils
|
||||
|
||||
log = logging.getLogger('mitx.' + __name__)
|
||||
|
||||
#########################################################################
|
||||
|
||||
def get_input_xml_tags():
|
||||
''' Eventually, this will be for all registered input types '''
|
||||
return SimpleInput.get_xml_tags()
|
||||
_TAGS_TO_CLASSES = {}
|
||||
|
||||
def register_input_class(cls):
|
||||
"""
|
||||
Register cls as a supported input type. It is expected to have the same constructor as
|
||||
InputTypeBase, and to define cls.tags as a list of tags that it implements.
|
||||
|
||||
If an already-registered input type has claimed one of those tags, will raise ValueError.
|
||||
|
||||
If there are no tags in cls.tags, will also raise ValueError.
|
||||
"""
|
||||
|
||||
# Do all checks and complain before changing any state.
|
||||
if len(cls.tags) == 0:
|
||||
raise ValueError("No supported tags for class {0}".format(cls.__name__))
|
||||
|
||||
for t in cls.tags:
|
||||
if t in _TAGS_TO_CLASSES:
|
||||
other_cls = _TAGS_TO_CLASSES[t]
|
||||
if cls == other_cls:
|
||||
# registering the same class multiple times seems silly, but ok
|
||||
continue
|
||||
raise ValueError("Tag {0} already registered by class {1}. Can't register for class {2}"
|
||||
.format(t, other_cls.__name__, cls.__name__))
|
||||
|
||||
# Ok, should be good to change state now.
|
||||
for t in cls.tags:
|
||||
_TAGS_TO_CLASSES[t] = cls
|
||||
|
||||
def registered_input_tags():
|
||||
"""
|
||||
Get a list of all the xml tags that map to known input types.
|
||||
"""
|
||||
return _TAGS_TO_CLASSES.keys()
|
||||
|
||||
|
||||
class SimpleInput():# XModule
|
||||
'''
|
||||
Type for simple inputs -- plain HTML with a form element
|
||||
'''
|
||||
def get_class_for_tag(tag):
|
||||
"""
|
||||
For any tag in registered_input_tags(), return the corresponding class. Otherwise, will raise KeyError.
|
||||
"""
|
||||
return _TAGS_TO_CLASSES[tag]
|
||||
|
||||
xml_tags = {} # # Maps tags to functions
|
||||
|
||||
def __init__(self, system, xml, item_id=None, track_url=None, state=None, use='capa_input'):
|
||||
'''
|
||||
Instantiate a SimpleInput class. Arguments:
|
||||
class InputTypeBase(object):
|
||||
"""
|
||||
Abstract base class for input types.
|
||||
"""
|
||||
|
||||
- system : ModuleSystem instance which provides OS, rendering, and user context
|
||||
template = None
|
||||
|
||||
def __init__(self, system, xml, state):
|
||||
"""
|
||||
Instantiate an InputType class. Arguments:
|
||||
|
||||
- system : ModuleSystem instance which provides OS, rendering, and user context. Specifically, must
|
||||
have a render_template function.
|
||||
- xml : Element tree of this Input element
|
||||
- item_id : id for this input element (assigned by capa_problem.LoncapProblem) - string
|
||||
- track_url : URL used for tracking - string
|
||||
- state : a dictionary with optional keys:
|
||||
* Value
|
||||
* ID
|
||||
* Status (answered, unanswered, unsubmitted)
|
||||
* Feedback (dictionary containing keys for hints, errors, or other
|
||||
feedback from previous attempt)
|
||||
- use :
|
||||
'''
|
||||
* 'value'
|
||||
* 'id'
|
||||
* 'status' (answered, unanswered, unsubmitted)
|
||||
* 'feedback' (dictionary containing keys for hints, errors, or other
|
||||
feedback from previous attempt. Specifically 'message', 'hint', 'hintmode'. If 'hintmode'
|
||||
is 'always', the hint is always displayed.)
|
||||
"""
|
||||
|
||||
self.xml = xml
|
||||
self.tag = xml.tag
|
||||
self.system = system
|
||||
if not state: state = {}
|
||||
|
||||
## ID should only come from one place.
|
||||
## If it comes from multiple, we use state first, XML second, and parameter
|
||||
## third. Since we don't make this guarantee, we can swap this around in
|
||||
## the future if there's a more logical order.
|
||||
if item_id: self.id = item_id
|
||||
if xml.get('id'): self.id = xml.get('id')
|
||||
if 'id' in state: self.id = state['id']
|
||||
## NOTE: ID should only come from one place. If it comes from multiple,
|
||||
## we use state first, XML second (in case the xml changed, but we have
|
||||
## existing state with an old id). Since we don't make this guarantee,
|
||||
## we can swap this around in the future if there's a more logical
|
||||
## order.
|
||||
|
||||
self.value = ''
|
||||
if 'value' in state:
|
||||
self.value = state['value']
|
||||
self.id = state.get('id', xml.get('id'))
|
||||
if self.id is None:
|
||||
raise ValueError("input id state is None. xml is {0}".format(etree.tostring(xml)))
|
||||
|
||||
self.msg = ''
|
||||
feedback = state.get('feedback')
|
||||
if feedback is not None:
|
||||
self.msg = feedback.get('message', '')
|
||||
self.hint = feedback.get('hint', '')
|
||||
self.hintmode = feedback.get('hintmode', None)
|
||||
self.value = state.get('value', '')
|
||||
|
||||
# put hint above msg if to be displayed
|
||||
if self.hintmode == 'always':
|
||||
self.msg = self.hint + ('<br/.>' if self.msg else '') + self.msg
|
||||
feedback = state.get('feedback', {})
|
||||
self.msg = feedback.get('message', '')
|
||||
self.hint = feedback.get('hint', '')
|
||||
self.hintmode = feedback.get('hintmode', None)
|
||||
|
||||
self.status = 'unanswered'
|
||||
if 'status' in state:
|
||||
self.status = state['status']
|
||||
# put hint above msg if it should be displayed
|
||||
if self.hintmode == 'always':
|
||||
self.msg = self.hint + ('<br/>' if self.msg else '') + self.msg
|
||||
|
||||
@classmethod
|
||||
def get_xml_tags(c):
|
||||
return c.xml_tags.keys()
|
||||
self.status = state.get('status', 'unanswered')
|
||||
|
||||
@classmethod
|
||||
def get_uses(c):
|
||||
return ['capa_input', 'capa_transform']
|
||||
def _get_render_context(self):
|
||||
"""
|
||||
Abstract method. Subclasses should implement to return the dictionary
|
||||
of keys needed to render their template.
|
||||
|
||||
def get_html(self):
|
||||
return self.xml_tags[self.tag](self.xml, self.value, self.status, self.system.render_template, self.msg)
|
||||
|
||||
|
||||
def register_render_function(fn, names=None, cls=SimpleInput):
|
||||
if names is None:
|
||||
SimpleInput.xml_tags[fn.__name__] = fn
|
||||
else:
|
||||
(Separate from get_html to faciliate testing of logic separately from the rendering)
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def wrapped():
|
||||
return fn
|
||||
return wrapped
|
||||
def get_html(self):
|
||||
"""
|
||||
Return the html for this input, as an etree element.
|
||||
"""
|
||||
if self.template is None:
|
||||
raise NotImplementedError("no rendering template specified for class {0}".format(self.__class__))
|
||||
|
||||
html = self.system.render_template(self.template, self._get_render_context())
|
||||
return etree.XML(html)
|
||||
|
||||
|
||||
## TODO: Remove once refactor is complete
|
||||
def make_class_for_render_function(fn):
|
||||
"""
|
||||
Take an old-style render function, return a new-style input class.
|
||||
"""
|
||||
|
||||
class Impl(InputTypeBase):
|
||||
"""
|
||||
Inherit all the constructor logic from InputTypeBase...
|
||||
"""
|
||||
tags = [fn.__name__]
|
||||
def get_html(self):
|
||||
"""...delegate to the render function to do the work"""
|
||||
return fn(self.xml, self.value, self.status, self.system.render_template, self.msg)
|
||||
|
||||
# don't want all the classes to be called Impl (confuses register_input_class).
|
||||
Impl.__name__ = fn.__name__.capitalize()
|
||||
return Impl
|
||||
|
||||
|
||||
def _reg(fn):
|
||||
"""
|
||||
Register an old-style inputtype render function as a new-style subclass of InputTypeBase.
|
||||
This will go away once converting all input types to the new format is complete. (TODO)
|
||||
"""
|
||||
register_input_class(make_class_for_render_function(fn))
|
||||
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
|
||||
@register_render_function
|
||||
class OptionInput(InputTypeBase):
|
||||
"""
|
||||
Input type for selecting and Select option input type.
|
||||
|
||||
Example:
|
||||
|
||||
<optioninput options="('Up','Down')" correct="Up"/><text>The location of the sky</text>
|
||||
"""
|
||||
|
||||
template = "optioninput.html"
|
||||
tags = ['optioninput']
|
||||
|
||||
def _get_render_context(self):
|
||||
return _optioninput(self.xml, self.value, self.status, self.system.render_template, self.msg)
|
||||
|
||||
|
||||
def optioninput(element, value, status, render_template, msg=''):
|
||||
'''
|
||||
context = _optioninput(element, value, status, render_template, msg)
|
||||
html = render_template("optioninput.html", context)
|
||||
return etree.XML(html)
|
||||
|
||||
def _optioninput(element, value, status, render_template, msg=''):
|
||||
"""
|
||||
Select option input type.
|
||||
|
||||
Example:
|
||||
|
||||
<optioninput options="('Up','Down')" correct="Up"/><text>The location of the sky</text>
|
||||
'''
|
||||
"""
|
||||
eid = element.get('id')
|
||||
options = element.get('options')
|
||||
if not options:
|
||||
raise Exception("[courseware.capa.inputtypes.optioninput] Missing options specification in " + etree.tostring(element))
|
||||
raise Exception(
|
||||
"[courseware.capa.inputtypes.optioninput] Missing options specification in "
|
||||
+ etree.tostring(element))
|
||||
|
||||
# parse the set of possible options
|
||||
oset = shlex.shlex(options[1:-1])
|
||||
oset.quotes = "'"
|
||||
oset.whitespace = ","
|
||||
oset = [x[1:-1] for x in list(oset)]
|
||||
|
||||
# osetdict = dict([('option_%s_%s' % (eid,x),oset[x]) for x in range(len(oset)) ]) # make dict with IDs
|
||||
osetdict = [(oset[x], oset[x]) for x in range(len(oset))] # make ordered list with (key,value) same
|
||||
# make ordered list with (key, value) same
|
||||
osetdict = [(oset[x], oset[x]) for x in range(len(oset))]
|
||||
# TODO: allow ordering to be randomized
|
||||
|
||||
context = {'id': eid,
|
||||
@@ -152,43 +236,53 @@ def optioninput(element, value, status, render_template, msg=''):
|
||||
'options': osetdict,
|
||||
'inline': element.get('inline',''),
|
||||
}
|
||||
return context
|
||||
|
||||
html = render_template("optioninput.html", context)
|
||||
return etree.XML(html)
|
||||
register_input_class(OptionInput)
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
|
||||
# TODO: consolidate choicegroup, radiogroup, checkboxgroup after discussion of
|
||||
# desired semantics.
|
||||
@register_render_function
|
||||
# @register_render_function
|
||||
def choicegroup(element, value, status, render_template, msg=''):
|
||||
'''
|
||||
Radio button inputs: multiple choice or true/false
|
||||
|
||||
TODO: allow order of choices to be randomized, following lon-capa spec. Use "location" attribute,
|
||||
ie random, top, bottom.
|
||||
TODO: allow order of choices to be randomized, following lon-capa spec. Use
|
||||
"location" attribute, ie random, top, bottom.
|
||||
'''
|
||||
eid = element.get('id')
|
||||
if element.get('type') == "MultipleChoice":
|
||||
type = "radio"
|
||||
element_type = "radio"
|
||||
elif element.get('type') == "TrueFalse":
|
||||
type = "checkbox"
|
||||
element_type = "checkbox"
|
||||
else:
|
||||
type = "radio"
|
||||
element_type = "radio"
|
||||
choices = []
|
||||
for choice in element:
|
||||
if not choice.tag == 'choice':
|
||||
raise Exception("[courseware.capa.inputtypes.choicegroup] Error only <choice> tags should be immediate children of a <choicegroup>, found %s instead" % choice.tag)
|
||||
raise Exception("[courseware.capa.inputtypes.choicegroup] "
|
||||
"Error: only <choice> tags should be immediate children "
|
||||
"of a <choicegroup>, found %s instead" % choice.tag)
|
||||
ctext = ""
|
||||
ctext += ''.join([etree.tostring(x) for x in choice]) # TODO: what if choice[0] has math tags in it?
|
||||
# TODO: what if choice[0] has math tags in it?
|
||||
ctext += ''.join([etree.tostring(x) for x in choice])
|
||||
if choice.text is not None:
|
||||
ctext += choice.text # TODO: fix order?
|
||||
# TODO: fix order?
|
||||
ctext += choice.text
|
||||
choices.append((choice.get("name"), ctext))
|
||||
context = {'id': eid, 'value': value, 'state': status, 'input_type': type, 'choices': choices, 'name_array_suffix': ''}
|
||||
context = {'id': eid,
|
||||
'value': value,
|
||||
'state': status,
|
||||
'input_type': element_type,
|
||||
'choices': choices,
|
||||
'name_array_suffix': ''}
|
||||
html = render_template("choicegroup.html", context)
|
||||
return etree.XML(html)
|
||||
|
||||
_reg(choicegroup)
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
def extract_choices(element):
|
||||
@@ -196,8 +290,8 @@ def extract_choices(element):
|
||||
Extracts choices for a few input types, such as radiogroup and
|
||||
checkboxgroup.
|
||||
|
||||
TODO: allow order of choices to be randomized, following lon-capa spec. Use "location" attribute,
|
||||
ie random, top, bottom.
|
||||
TODO: allow order of choices to be randomized, following lon-capa spec. Use
|
||||
"location" attribute, ie random, top, bottom.
|
||||
'''
|
||||
|
||||
choices = []
|
||||
@@ -216,7 +310,6 @@ def extract_choices(element):
|
||||
|
||||
# TODO: consolidate choicegroup, radiogroup, checkboxgroup after discussion of
|
||||
# desired semantics.
|
||||
@register_render_function
|
||||
def radiogroup(element, value, status, render_template, msg=''):
|
||||
'''
|
||||
Radio button inputs: (multiple choice)
|
||||
@@ -226,15 +319,21 @@ def radiogroup(element, value, status, render_template, msg=''):
|
||||
|
||||
choices = extract_choices(element)
|
||||
|
||||
context = {'id': eid, 'value': value, 'state': status, 'input_type': 'radio', 'choices': choices, 'name_array_suffix': '[]'}
|
||||
context = {'id': eid,
|
||||
'value': value,
|
||||
'state': status,
|
||||
'input_type': 'radio',
|
||||
'choices': choices,
|
||||
'name_array_suffix': '[]'}
|
||||
|
||||
html = render_template("choicegroup.html", context)
|
||||
return etree.XML(html)
|
||||
|
||||
|
||||
_reg(radiogroup)
|
||||
|
||||
# TODO: consolidate choicegroup, radiogroup, checkboxgroup after discussion of
|
||||
# desired semantics.
|
||||
@register_render_function
|
||||
def checkboxgroup(element, value, status, render_template, msg=''):
|
||||
'''
|
||||
Checkbox inputs: (select one or more choices)
|
||||
@@ -244,12 +343,18 @@ def checkboxgroup(element, value, status, render_template, msg=''):
|
||||
|
||||
choices = extract_choices(element)
|
||||
|
||||
context = {'id': eid, 'value': value, 'state': status, 'input_type': 'checkbox', 'choices': choices, 'name_array_suffix': '[]'}
|
||||
context = {'id': eid,
|
||||
'value': value,
|
||||
'state': status,
|
||||
'input_type': 'checkbox',
|
||||
'choices': choices,
|
||||
'name_array_suffix': '[]'}
|
||||
|
||||
html = render_template("choicegroup.html", context)
|
||||
return etree.XML(html)
|
||||
|
||||
@register_render_function
|
||||
_reg(checkboxgroup)
|
||||
|
||||
def javascriptinput(element, value, status, render_template, msg='null'):
|
||||
'''
|
||||
Hidden field for javascript to communicate via; also loads the required
|
||||
@@ -260,60 +365,80 @@ def javascriptinput(element, value, status, render_template, msg='null'):
|
||||
problem_state = element.get('problem_state')
|
||||
display_class = element.get('display_class')
|
||||
display_file = element.get('display_file')
|
||||
|
||||
|
||||
# Need to provide a value that JSON can parse if there is no
|
||||
# student-supplied value yet.
|
||||
if value == "":
|
||||
value = 'null'
|
||||
|
||||
|
||||
escapedict = {'"': '"'}
|
||||
value = saxutils.escape(value, escapedict)
|
||||
msg = saxutils.escape(msg, escapedict)
|
||||
context = {'id': eid, 'params': params, 'display_file': display_file,
|
||||
'display_class': display_class, 'problem_state': problem_state,
|
||||
'value': value, 'evaluation': msg,
|
||||
context = {'id': eid,
|
||||
'params': params,
|
||||
'display_file': display_file,
|
||||
'display_class': display_class,
|
||||
'problem_state': problem_state,
|
||||
'value': value,
|
||||
'evaluation': msg,
|
||||
}
|
||||
html = render_template("javascriptinput.html", context)
|
||||
return etree.XML(html)
|
||||
|
||||
_reg(javascriptinput)
|
||||
|
||||
|
||||
@register_render_function
|
||||
def textline(element, value, status, render_template, msg=""):
|
||||
'''
|
||||
Simple text line input, with optional size specification.
|
||||
'''
|
||||
if element.get('math') or element.get('dojs'): # 'dojs' flag is temporary, for backwards compatibility with 8.02x
|
||||
return SimpleInput.xml_tags['textline_dynamath'](element, value, status, render_template, msg)
|
||||
# TODO: 'dojs' flag is temporary, for backwards compatibility with 8.02x
|
||||
if element.get('math') or element.get('dojs'):
|
||||
return textline_dynamath(element, value, status, render_template, msg)
|
||||
eid = element.get('id')
|
||||
if eid is None:
|
||||
msg = 'textline has no id: it probably appears outside of a known response type'
|
||||
msg += "\nSee problem XML source line %s" % getattr(element, 'sourceline', '<unavailable>')
|
||||
raise Exception(msg)
|
||||
|
||||
count = int(eid.split('_')[-2]) - 1 # HACK
|
||||
size = element.get('size')
|
||||
hidden = element.get('hidden', '') # if specified, then textline is hidden and id is stored in div of name given by hidden
|
||||
# if specified, then textline is hidden and id is stored in div of name given by hidden
|
||||
hidden = element.get('hidden', '')
|
||||
|
||||
# Escape answers with quotes, so they don't crash the system!
|
||||
escapedict = {'"': '"'}
|
||||
value = saxutils.escape(value, escapedict) # otherwise, answers with quotes in them crashes the system!
|
||||
context = {'id': eid, 'value': value, 'state': status, 'count': count, 'size': size, 'msg': msg, 'hidden': hidden,
|
||||
value = saxutils.escape(value, escapedict)
|
||||
|
||||
context = {'id': eid,
|
||||
'value': value,
|
||||
'state': status,
|
||||
'count': count,
|
||||
'size': size,
|
||||
'msg': msg,
|
||||
'hidden': hidden,
|
||||
'inline': element.get('inline',''),
|
||||
}
|
||||
|
||||
html = render_template("textinput.html", context)
|
||||
try:
|
||||
xhtml = etree.XML(html)
|
||||
except Exception as err:
|
||||
if True: # TODO needs to be self.system.DEBUG - but can't access system
|
||||
# TODO: needs to be self.system.DEBUG - but can't access system
|
||||
if True:
|
||||
log.debug('[inputtypes.textline] failed to parse XML for:\n%s' % html)
|
||||
raise
|
||||
return xhtml
|
||||
|
||||
_reg(textline)
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
|
||||
@register_render_function
|
||||
def textline_dynamath(element, value, status, render_template, msg=''):
|
||||
'''
|
||||
Text line input with dynamic math display (equation rendered on client in real time during input).
|
||||
Text line input with dynamic math display (equation rendered on client in real time
|
||||
during input).
|
||||
'''
|
||||
# TODO: Make a wrapper for <formulainput>
|
||||
# TODO: Make an AJAX loop to confirm equation is okay in real-time as user types
|
||||
@@ -325,7 +450,8 @@ def textline_dynamath(element, value, status, render_template, msg=''):
|
||||
eid = element.get('id')
|
||||
count = int(eid.split('_')[-2]) - 1 # HACK
|
||||
size = element.get('size')
|
||||
hidden = element.get('hidden', '') # if specified, then textline is hidden and id is stored in div of name given by hidden
|
||||
# if specified, then textline is hidden and id is stored in div of name given by hidden
|
||||
hidden = element.get('hidden', '')
|
||||
|
||||
# Preprocessor to insert between raw input and Mathjax
|
||||
preprocessor = {'class_name': element.get('preprocessorClassName',''),
|
||||
@@ -337,16 +463,19 @@ def textline_dynamath(element, value, status, render_template, msg=''):
|
||||
escapedict = {'"': '"'}
|
||||
value = saxutils.escape(value, escapedict)
|
||||
|
||||
context = {'id': eid, 'value': value, 'state': status, 'count': count, 'size': size,
|
||||
'msg': msg, 'hidden': hidden,
|
||||
'preprocessor': preprocessor,
|
||||
}
|
||||
context = {'id': eid,
|
||||
'value': value,
|
||||
'state': status,
|
||||
'count': count,
|
||||
'size': size,
|
||||
'msg': msg,
|
||||
'hidden': hidden,
|
||||
'preprocessor': preprocessor,}
|
||||
html = render_template("textinput_dynamath.html", context)
|
||||
return etree.XML(html)
|
||||
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
@register_render_function
|
||||
def filesubmission(element, value, status, render_template, msg=''):
|
||||
'''
|
||||
Upload a single file (e.g. for programming assignments)
|
||||
@@ -360,22 +489,27 @@ def filesubmission(element, value, status, render_template, msg=''):
|
||||
|
||||
# Check if problem has been queued
|
||||
queue_len = 0
|
||||
if status == 'incomplete': # Flag indicating that the problem has been queued, 'msg' is length of queue
|
||||
# Flag indicating that the problem has been queued, 'msg' is length of queue
|
||||
if status == 'incomplete':
|
||||
status = 'queued'
|
||||
queue_len = msg
|
||||
msg = 'Submitted to grader.'
|
||||
|
||||
context = { 'id': eid, 'state': status, 'msg': msg, 'value': value,
|
||||
'queue_len': queue_len, 'allowed_files': allowed_files,
|
||||
'required_files': required_files
|
||||
}
|
||||
context = { 'id': eid,
|
||||
'state': status,
|
||||
'msg': msg,
|
||||
'value': value,
|
||||
'queue_len': queue_len,
|
||||
'allowed_files': allowed_files,
|
||||
'required_files': required_files,}
|
||||
html = render_template("filesubmission.html", context)
|
||||
return etree.XML(html)
|
||||
|
||||
_reg(filesubmission)
|
||||
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
## TODO: Make a wrapper for <codeinput>
|
||||
@register_render_function
|
||||
def textbox(element, value, status, render_template, msg=''):
|
||||
'''
|
||||
The textbox is used for code input. The message is the return HTML string from
|
||||
@@ -387,13 +521,17 @@ def textbox(element, value, status, render_template, msg=''):
|
||||
size = element.get('size')
|
||||
rows = element.get('rows') or '30'
|
||||
cols = element.get('cols') or '80'
|
||||
hidden = element.get('hidden', '') # if specified, then textline is hidden and id is stored in div of name given by hidden
|
||||
# if specified, then textline is hidden and id is stored in div of name given by hidden
|
||||
hidden = element.get('hidden', '')
|
||||
|
||||
if not value: value = element.text # if no student input yet, then use the default input given by the problem
|
||||
# if no student input yet, then use the default input given by the problem
|
||||
if not value:
|
||||
value = element.text
|
||||
|
||||
# Check if problem has been queued
|
||||
queue_len = 0
|
||||
if status == 'incomplete': # Flag indicating that the problem has been queued, 'msg' is length of queue
|
||||
# Flag indicating that the problem has been queued, 'msg' is length of queue
|
||||
if status == 'incomplete':
|
||||
status = 'queued'
|
||||
queue_len = msg
|
||||
msg = 'Submitted to grader.'
|
||||
@@ -404,10 +542,18 @@ def textbox(element, value, status, render_template, msg=''):
|
||||
tabsize = element.get('tabsize','4')
|
||||
tabsize = int(tabsize)
|
||||
|
||||
context = {'id': eid, 'value': value, 'state': status, 'count': count, 'size': size, 'msg': msg,
|
||||
'mode': mode, 'linenumbers': linenumbers,
|
||||
'rows': rows, 'cols': cols,
|
||||
'hidden': hidden, 'tabsize': tabsize,
|
||||
context = {'id': eid,
|
||||
'value': value,
|
||||
'state': status,
|
||||
'count': count,
|
||||
'size': size,
|
||||
'msg': msg,
|
||||
'mode': mode,
|
||||
'linenumbers': linenumbers,
|
||||
'rows': rows,
|
||||
'cols': cols,
|
||||
'hidden': hidden,
|
||||
'tabsize': tabsize,
|
||||
'queue_len': queue_len,
|
||||
}
|
||||
html = render_template("textbox.html", context)
|
||||
@@ -422,8 +568,9 @@ def textbox(element, value, status, render_template, msg=''):
|
||||
return xhtml
|
||||
|
||||
|
||||
_reg(textbox)
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
@register_render_function
|
||||
def schematic(element, value, status, render_template, msg=''):
|
||||
eid = element.get('id')
|
||||
height = element.get('height')
|
||||
@@ -446,10 +593,10 @@ def schematic(element, value, status, render_template, msg=''):
|
||||
html = render_template("schematicinput.html", context)
|
||||
return etree.XML(html)
|
||||
|
||||
_reg(schematic)
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
### TODO: Move out of inputtypes
|
||||
@register_render_function
|
||||
def math(element, value, status, render_template, msg=''):
|
||||
'''
|
||||
This is not really an input type. It is a convention from Lon-CAPA, used for
|
||||
@@ -475,7 +622,8 @@ def math(element, value, status, render_template, msg=''):
|
||||
# mathstr = mathstr.replace('\\displaystyle','')
|
||||
#else:
|
||||
# isinline = True
|
||||
# html = render_template("mathstring.html",{'mathstr':mathstr,'isinline':isinline,'tail':element.tail})
|
||||
# html = render_template("mathstring.html", {'mathstr':mathstr,
|
||||
# 'isinline':isinline,'tail':element.tail})
|
||||
|
||||
html = '<html><html>%s</html><html>%s</html></html>' % (mathstr, saxutils.escape(element.tail))
|
||||
try:
|
||||
@@ -483,25 +631,27 @@ def math(element, value, status, render_template, msg=''):
|
||||
except Exception as err:
|
||||
if False: # TODO needs to be self.system.DEBUG - but can't access system
|
||||
msg = '<html><div class="inline-error"><p>Error %s</p>' % str(err).replace('<', '<')
|
||||
msg += '<p>Failed to construct math expression from <pre>%s</pre></p>' % html.replace('<', '<')
|
||||
msg += ('<p>Failed to construct math expression from <pre>%s</pre></p>' %
|
||||
html.replace('<', '<'))
|
||||
msg += "</div></html>"
|
||||
log.error(msg)
|
||||
return etree.XML(msg)
|
||||
else:
|
||||
raise
|
||||
# xhtml.tail = element.tail # don't forget to include the tail!
|
||||
# xhtml.tail = element.tail # don't forget to include the tail!
|
||||
return xhtml
|
||||
|
||||
_reg(math)
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
|
||||
@register_render_function
|
||||
def solution(element, value, status, render_template, msg=''):
|
||||
'''
|
||||
This is not really an input type. It is just a <span>...</span> which is given an ID,
|
||||
that is used for displaying an extended answer (a problem "solution") after "show answers"
|
||||
is pressed. Note that the solution content is NOT sent with the HTML. It is obtained
|
||||
by a JSON call.
|
||||
by an ajax call.
|
||||
'''
|
||||
eid = element.get('id')
|
||||
size = element.get('size')
|
||||
@@ -514,17 +664,20 @@ def solution(element, value, status, render_template, msg=''):
|
||||
html = render_template("solutionspan.html", context)
|
||||
return etree.XML(html)
|
||||
|
||||
_reg(solution)
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
|
||||
@register_render_function
|
||||
def imageinput(element, value, status, render_template, msg=''):
|
||||
'''
|
||||
Clickable image as an input field. Element should specify the image source, height, and width, eg
|
||||
<imageinput src="/static/Physics801/Figures/Skier-conservation of energy.jpg" width="388" height="560" />
|
||||
Clickable image as an input field. Element should specify the image source, height,
|
||||
and width, e.g.
|
||||
|
||||
TODO: showanswer for imageimput does not work yet - need javascript to put rectangle over acceptable area of image.
|
||||
<imageinput src="/static/Figures/Skier-conservation-of-energy.jpg" width="388" height="560" />
|
||||
|
||||
TODO: showanswer for imageimput does not work yet - need javascript to put rectangle
|
||||
over acceptable area of image.
|
||||
'''
|
||||
eid = element.get('id')
|
||||
src = element.get('src')
|
||||
@@ -551,3 +704,80 @@ def imageinput(element, value, status, render_template, msg=''):
|
||||
}
|
||||
html = render_template("imageinput.html", context)
|
||||
return etree.XML(html)
|
||||
|
||||
_reg(imageinput)
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
def crystallography(element, value, status, render_template, msg=''):
|
||||
eid = element.get('id')
|
||||
if eid is None:
|
||||
msg = 'cryst has no id: it probably appears outside of a known response type'
|
||||
msg += "\nSee problem XML source line %s" % getattr(element, 'sourceline', '<unavailable>')
|
||||
raise Exception(msg)
|
||||
height = element.get('height')
|
||||
width = element.get('width')
|
||||
display_file = element.get('display_file')
|
||||
|
||||
count = int(eid.split('_')[-2]) - 1 # HACK
|
||||
size = element.get('size')
|
||||
# if specified, then textline is hidden and id is stored in div of name given by hidden
|
||||
hidden = element.get('hidden', '')
|
||||
# Escape answers with quotes, so they don't crash the system!
|
||||
escapedict = {'"': '"'}
|
||||
value = saxutils.escape(value, escapedict)
|
||||
|
||||
context = {'id': eid,
|
||||
'value': value,
|
||||
'state': status,
|
||||
'count': count,
|
||||
'size': size,
|
||||
'msg': msg,
|
||||
'hidden': hidden,
|
||||
'inline': element.get('inline', ''),
|
||||
'width': width,
|
||||
'height': height,
|
||||
'display_file': display_file,
|
||||
}
|
||||
|
||||
html = render_template("crystallography.html", context)
|
||||
try:
|
||||
xhtml = etree.XML(html)
|
||||
except Exception as err:
|
||||
# TODO: needs to be self.system.DEBUG - but can't access system
|
||||
if True:
|
||||
log.debug('[inputtypes.textline] failed to parse XML for:\n%s' % html)
|
||||
raise
|
||||
return xhtml
|
||||
|
||||
_reg(crystallography)
|
||||
|
||||
|
||||
#--------------------------------------------------------------------------------
|
||||
|
||||
|
||||
class ChemicalEquationInput(InputTypeBase):
|
||||
"""
|
||||
An input type for entering chemical equations. Supports live preview.
|
||||
|
||||
Example:
|
||||
|
||||
<chemicalequationinput size="50"/>
|
||||
|
||||
options: size -- width of the textbox.
|
||||
"""
|
||||
|
||||
template = "chemicalequationinput.html"
|
||||
tags = ['chemicalequationinput']
|
||||
|
||||
def _get_render_context(self):
|
||||
size = self.xml.get('size', '20')
|
||||
context = {
|
||||
'id': self.id,
|
||||
'value': self.value,
|
||||
'status': self.status,
|
||||
'size': size,
|
||||
'previewer': '/static/js/capa/chemical_equation_preview.js',
|
||||
}
|
||||
return context
|
||||
|
||||
register_input_class(ChemicalEquationInput)
|
||||
|
||||
@@ -77,18 +77,27 @@ class LoncapaResponse(object):
|
||||
|
||||
In addition, these methods are optional:
|
||||
|
||||
- setup_response : find and note the answer input field IDs for the response; called by __init__
|
||||
- check_hint_condition : check to see if the student's answers satisfy a particular condition for a hint to be displayed
|
||||
- render_html : render this Response as HTML (must return XHTML compliant string)
|
||||
- setup_response : find and note the answer input field IDs for the response; called
|
||||
by __init__
|
||||
|
||||
- check_hint_condition : check to see if the student's answers satisfy a particular
|
||||
condition for a hint to be displayed
|
||||
|
||||
- render_html : render this Response as HTML (must return XHTML-compliant string)
|
||||
- __unicode__ : unicode representation of this Response
|
||||
|
||||
Each response type may also specify the following attributes:
|
||||
|
||||
- max_inputfields : (int) maximum number of answer input fields (checked in __init__ if not None)
|
||||
- allowed_inputfields : list of allowed input fields (each a string) for this Response
|
||||
- required_attributes : list of required attributes (each a string) on the main response XML stanza
|
||||
- hint_tag : xhtml tag identifying hint associated with this response inside hintgroup
|
||||
- max_inputfields : (int) maximum number of answer input fields (checked in __init__
|
||||
if not None)
|
||||
|
||||
- allowed_inputfields : list of allowed input fields (each a string) for this Response
|
||||
|
||||
- required_attributes : list of required attributes (each a string) on the main
|
||||
response XML stanza
|
||||
|
||||
- hint_tag : xhtml tag identifying hint associated with this response inside
|
||||
hintgroup
|
||||
"""
|
||||
__metaclass__ = abc.ABCMeta # abc = Abstract Base Class
|
||||
|
||||
@@ -121,26 +130,32 @@ class LoncapaResponse(object):
|
||||
raise LoncapaProblemError(msg)
|
||||
|
||||
if self.max_inputfields and len(inputfields) > self.max_inputfields:
|
||||
msg = "%s: cannot have more than %s input fields" % (unicode(self), self.max_inputfields)
|
||||
msg = "%s: cannot have more than %s input fields" % (
|
||||
unicode(self), self.max_inputfields)
|
||||
msg += "\nSee XML source line %s" % getattr(xml, 'sourceline', '<unavailable>')
|
||||
raise LoncapaProblemError(msg)
|
||||
|
||||
for prop in self.required_attributes:
|
||||
if not xml.get(prop):
|
||||
msg = "Error in problem specification: %s missing required attribute %s" % (unicode(self), prop)
|
||||
msg = "Error in problem specification: %s missing required attribute %s" % (
|
||||
unicode(self), prop)
|
||||
msg += "\nSee XML source line %s" % getattr(xml, 'sourceline', '<unavailable>')
|
||||
raise LoncapaProblemError(msg)
|
||||
|
||||
self.answer_ids = [x.get('id') for x in self.inputfields] # ordered list of answer_id values for this response
|
||||
# ordered list of answer_id values for this response
|
||||
self.answer_ids = [x.get('id') for x in self.inputfields]
|
||||
if self.max_inputfields == 1:
|
||||
self.answer_id = self.answer_ids[0] # for convenience
|
||||
# for convenience
|
||||
self.answer_id = self.answer_ids[0]
|
||||
|
||||
self.maxpoints = dict()
|
||||
for inputfield in self.inputfields:
|
||||
maxpoints = inputfield.get('points','1') # By default, each answerfield is worth 1 point
|
||||
# By default, each answerfield is worth 1 point
|
||||
maxpoints = inputfield.get('points', '1')
|
||||
self.maxpoints.update({inputfield.get('id'): int(maxpoints)})
|
||||
|
||||
self.default_answer_map = {} # dict for default answer map (provided in input elements)
|
||||
# dict for default answer map (provided in input elements)
|
||||
self.default_answer_map = {}
|
||||
for entry in self.inputfields:
|
||||
answer = entry.get('correct_answer')
|
||||
if answer:
|
||||
@@ -163,12 +178,18 @@ class LoncapaResponse(object):
|
||||
|
||||
- renderer : procedure which produces HTML given an ElementTree
|
||||
'''
|
||||
tree = etree.Element('span') # render ourself as a <span> + our content
|
||||
if self.xml.get('inline',''): # problem author can make this span display:inline
|
||||
# render ourself as a <span> + our content
|
||||
tree = etree.Element('span')
|
||||
|
||||
# problem author can make this span display:inline
|
||||
if self.xml.get('inline',''):
|
||||
tree.set('class','inline')
|
||||
|
||||
for item in self.xml:
|
||||
item_xhtml = renderer(item) # call provided procedure to do the rendering
|
||||
if item_xhtml is not None: tree.append(item_xhtml)
|
||||
# call provided procedure to do the rendering
|
||||
item_xhtml = renderer(item)
|
||||
if item_xhtml is not None:
|
||||
tree.append(item_xhtml)
|
||||
tree.tail = self.xml.tail
|
||||
return tree
|
||||
|
||||
@@ -194,21 +215,21 @@ class LoncapaResponse(object):
|
||||
Modifies new_cmap, by adding hints to answer_id entries as appropriate.
|
||||
'''
|
||||
hintgroup = self.xml.find('hintgroup')
|
||||
if hintgroup is None: return
|
||||
if hintgroup is None:
|
||||
return
|
||||
|
||||
# hint specified by function?
|
||||
hintfn = hintgroup.get('hintfn')
|
||||
if hintfn:
|
||||
'''
|
||||
Hint is determined by a function defined in the <script> context; evaluate that function to obtain
|
||||
list of hint, hintmode for each answer_id.
|
||||
Hint is determined by a function defined in the <script> context; evaluate
|
||||
that function to obtain list of hint, hintmode for each answer_id.
|
||||
|
||||
The function should take arguments (answer_ids, student_answers, new_cmap, old_cmap)
|
||||
and it should modify new_cmap as appropriate.
|
||||
|
||||
We may extend this in the future to add another argument which provides a callback procedure
|
||||
to a social hint generation system.
|
||||
|
||||
We may extend this in the future to add another argument which provides a
|
||||
callback procedure to a social hint generation system.
|
||||
'''
|
||||
if not hintfn in self.context:
|
||||
msg = 'missing specified hint function %s in script context' % hintfn
|
||||
@@ -239,14 +260,20 @@ class LoncapaResponse(object):
|
||||
# </hintgroup>
|
||||
# </formularesponse>
|
||||
|
||||
if self.hint_tag is not None and hintgroup.find(self.hint_tag) is not None and hasattr(self, 'check_hint_condition'):
|
||||
if (self.hint_tag is not None
|
||||
and hintgroup.find(self.hint_tag) is not None
|
||||
and hasattr(self, 'check_hint_condition')):
|
||||
|
||||
rephints = hintgroup.findall(self.hint_tag)
|
||||
hints_to_show = self.check_hint_condition(rephints, student_answers)
|
||||
hintmode = hintgroup.get('mode', 'always') # can be 'on_request' or 'always' (default)
|
||||
|
||||
# can be 'on_request' or 'always' (default)
|
||||
hintmode = hintgroup.get('mode', 'always')
|
||||
for hintpart in hintgroup.findall('hintpart'):
|
||||
if hintpart.get('on') in hints_to_show:
|
||||
hint_text = hintpart.find('text').text
|
||||
aid = self.answer_ids[-1] # make the hint appear after the last answer box in this response
|
||||
# make the hint appear after the last answer box in this response
|
||||
aid = self.answer_ids[-1]
|
||||
new_cmap.set_hint_and_mode(aid, hint_text, hintmode)
|
||||
log.debug('after hint: new_cmap = %s' % new_cmap)
|
||||
|
||||
@@ -257,10 +284,10 @@ class LoncapaResponse(object):
|
||||
(correctness, npoints, msg) for each answer_id.
|
||||
|
||||
Arguments:
|
||||
|
||||
- student_answers : dict of (answer_id,answer) where answer = student input (string)
|
||||
- old_cmap : previous CorrectMap (may be empty); useful for analyzing or recording history of responses
|
||||
|
||||
- old_cmap : previous CorrectMap (may be empty); useful for analyzing or
|
||||
recording history of responses
|
||||
'''
|
||||
pass
|
||||
|
||||
@@ -275,10 +302,13 @@ class LoncapaResponse(object):
|
||||
'''
|
||||
Return a list of hints to show.
|
||||
|
||||
- hxml_set : list of Element trees, each specifying a condition to be satisfied for a named hint condition
|
||||
- hxml_set : list of Element trees, each specifying a condition to be
|
||||
satisfied for a named hint condition
|
||||
|
||||
- student_answers : dict of student answers
|
||||
|
||||
Returns a list of names of hint conditions which were satisfied. Those are used to determine which hints are displayed.
|
||||
Returns a list of names of hint conditions which were satisfied. Those are used
|
||||
to determine which hints are displayed.
|
||||
'''
|
||||
pass
|
||||
|
||||
@@ -292,10 +322,10 @@ class LoncapaResponse(object):
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
class JavascriptResponse(LoncapaResponse):
|
||||
'''
|
||||
"""
|
||||
This response type is used when the student's answer is graded via
|
||||
Javascript using Node.js.
|
||||
'''
|
||||
"""
|
||||
|
||||
response_tag = 'javascriptresponse'
|
||||
max_inputfields = 1
|
||||
@@ -314,11 +344,11 @@ class JavascriptResponse(LoncapaResponse):
|
||||
self.problem_state = self.generate_problem_state()
|
||||
else:
|
||||
self.problem_state = None
|
||||
|
||||
|
||||
self.solution = None
|
||||
|
||||
self.prepare_inputfield()
|
||||
|
||||
|
||||
def compile_display_javascript(self):
|
||||
|
||||
# TODO FIXME
|
||||
@@ -357,10 +387,10 @@ class JavascriptResponse(LoncapaResponse):
|
||||
self.generator_xml = self.xml.xpath('//*[@id=$id]//generator',
|
||||
id=self.xml.get('id'))[0]
|
||||
|
||||
self.grader_xml = self.xml.xpath('//*[@id=$id]//grader',
|
||||
self.grader_xml = self.xml.xpath('//*[@id=$id]//grader',
|
||||
id=self.xml.get('id'))[0]
|
||||
|
||||
self.display_xml = self.xml.xpath('//*[@id=$id]//display',
|
||||
self.display_xml = self.xml.xpath('//*[@id=$id]//display',
|
||||
id=self.xml.get('id'))[0]
|
||||
|
||||
self.xml.remove(self.generator_xml)
|
||||
@@ -387,7 +417,7 @@ class JavascriptResponse(LoncapaResponse):
|
||||
self.display_dependencies = []
|
||||
|
||||
self.display_class = self.display_xml.get("class")
|
||||
|
||||
|
||||
def get_node_env(self):
|
||||
|
||||
js_dir = os.path.join(self.system.filestore.root_path, 'js')
|
||||
@@ -395,7 +425,7 @@ class JavascriptResponse(LoncapaResponse):
|
||||
node_path = self.system.node_path + ":" + os.path.normpath(js_dir)
|
||||
tmp_env["NODE_PATH"] = node_path
|
||||
return tmp_env
|
||||
|
||||
|
||||
def call_node(self, args):
|
||||
|
||||
subprocess_args = ["node"]
|
||||
@@ -408,7 +438,7 @@ class JavascriptResponse(LoncapaResponse):
|
||||
|
||||
generator_file = os.path.dirname(os.path.normpath(__file__)) + '/javascript_problem_generator.js'
|
||||
output = self.call_node([generator_file,
|
||||
self.generator,
|
||||
self.generator,
|
||||
json.dumps(self.generator_dependencies),
|
||||
json.dumps(str(self.context['the_lcp'].seed)),
|
||||
json.dumps(self.params)]).strip()
|
||||
@@ -418,18 +448,18 @@ class JavascriptResponse(LoncapaResponse):
|
||||
def extract_params(self):
|
||||
|
||||
params = {}
|
||||
|
||||
for param in self.xml.xpath('//*[@id=$id]//responseparam',
|
||||
|
||||
for param in self.xml.xpath('//*[@id=$id]//responseparam',
|
||||
id=self.xml.get('id')):
|
||||
|
||||
raw_param = param.get("value")
|
||||
params[param.get("name")] = json.loads(contextualize_text(raw_param, self.context))
|
||||
|
||||
|
||||
return params
|
||||
|
||||
def prepare_inputfield(self):
|
||||
|
||||
for inputfield in self.xml.xpath('//*[@id=$id]//javascriptinput',
|
||||
for inputfield in self.xml.xpath('//*[@id=$id]//javascriptinput',
|
||||
id=self.xml.get('id')):
|
||||
|
||||
escapedict = {'"': '"'}
|
||||
@@ -456,36 +486,36 @@ class JavascriptResponse(LoncapaResponse):
|
||||
else:
|
||||
points = 0
|
||||
return CorrectMap(self.answer_id, correctness, npoints=points, msg=evaluation)
|
||||
|
||||
|
||||
def run_grader(self, submission):
|
||||
if submission is None or submission == '':
|
||||
submission = json.dumps(None)
|
||||
|
||||
grader_file = os.path.dirname(os.path.normpath(__file__)) + '/javascript_problem_grader.js'
|
||||
outputs = self.call_node([grader_file,
|
||||
self.grader,
|
||||
outputs = self.call_node([grader_file,
|
||||
self.grader,
|
||||
json.dumps(self.grader_dependencies),
|
||||
submission,
|
||||
json.dumps(self.problem_state),
|
||||
submission,
|
||||
json.dumps(self.problem_state),
|
||||
json.dumps(self.params)]).split('\n')
|
||||
|
||||
all_correct = json.loads(outputs[0].strip())
|
||||
evaluation = outputs[1].strip()
|
||||
solution = outputs[2].strip()
|
||||
return (all_correct, evaluation, solution)
|
||||
|
||||
|
||||
def get_answers(self):
|
||||
if self.solution is None:
|
||||
(_, _, self.solution) = self.run_grader(None)
|
||||
|
||||
return {self.answer_id: self.solution}
|
||||
|
||||
|
||||
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
class ChoiceResponse(LoncapaResponse):
|
||||
'''
|
||||
"""
|
||||
This response type is used when the student chooses from a discrete set of
|
||||
choices. Currently, to be marked correct, all "correct" choices must be
|
||||
supplied by the student, and no extraneous choices may be included.
|
||||
@@ -530,7 +560,7 @@ class ChoiceResponse(LoncapaResponse):
|
||||
choices must be given names. This behavior seems like a leaky abstraction,
|
||||
and it'd be nice to change this at some point.
|
||||
|
||||
'''
|
||||
"""
|
||||
|
||||
response_tag = 'choiceresponse'
|
||||
max_inputfields = 1
|
||||
@@ -596,7 +626,8 @@ class MultipleChoiceResponse(LoncapaResponse):
|
||||
allowed_inputfields = ['choicegroup']
|
||||
|
||||
def setup_response(self):
|
||||
self.mc_setup_response() # call secondary setup for MultipleChoice questions, to set name attributes
|
||||
# call secondary setup for MultipleChoice questions, to set name attributes
|
||||
self.mc_setup_response()
|
||||
|
||||
# define correct choices (after calling secondary setup)
|
||||
xml = self.xml
|
||||
@@ -611,7 +642,8 @@ class MultipleChoiceResponse(LoncapaResponse):
|
||||
for response in self.xml.xpath("choicegroup"):
|
||||
rtype = response.get('type')
|
||||
if rtype not in ["MultipleChoice"]:
|
||||
response.set("type", "MultipleChoice") # force choicegroup to be MultipleChoice if not valid
|
||||
# force choicegroup to be MultipleChoice if not valid
|
||||
response.set("type", "MultipleChoice")
|
||||
for choice in list(response):
|
||||
if choice.get("name") is None:
|
||||
choice.set("name", "choice_" + str(i))
|
||||
@@ -623,8 +655,10 @@ class MultipleChoiceResponse(LoncapaResponse):
|
||||
'''
|
||||
grade student response.
|
||||
'''
|
||||
# log.debug('%s: student_answers=%s, correct_choices=%s' % (unicode(self),student_answers,self.correct_choices))
|
||||
if self.answer_id in student_answers and student_answers[self.answer_id] in self.correct_choices:
|
||||
# log.debug('%s: student_answers=%s, correct_choices=%s' % (
|
||||
# unicode(self), student_answers, self.correct_choices))
|
||||
if (self.answer_id in student_answers
|
||||
and student_answers[self.answer_id] in self.correct_choices):
|
||||
return CorrectMap(self.answer_id, 'correct')
|
||||
else:
|
||||
return CorrectMap(self.answer_id, 'incorrect')
|
||||
@@ -664,10 +698,14 @@ class OptionResponse(LoncapaResponse):
|
||||
'''
|
||||
TODO: handle direction and randomize
|
||||
'''
|
||||
snippets = [{'snippet': '''<optionresponse direction="vertical" randomize="yes">
|
||||
<optioninput options="('Up','Down')" correct="Up"><text>The location of the sky</text></optioninput>
|
||||
<optioninput options="('Up','Down')" correct="Down"><text>The location of the earth</text></optioninput>
|
||||
</optionresponse>'''}]
|
||||
snippets = [{'snippet': """<optionresponse direction="vertical" randomize="yes">
|
||||
<optioninput options="('Up','Down')" correct="Up">
|
||||
<text>The location of the sky</text>
|
||||
</optioninput>
|
||||
<optioninput options="('Up','Down')" correct="Down">
|
||||
<text>The location of the earth</text>
|
||||
</optioninput>
|
||||
</optionresponse>"""}]
|
||||
|
||||
response_tag = 'optionresponse'
|
||||
hint_tag = 'optionhint'
|
||||
@@ -722,21 +760,33 @@ class NumericalResponse(LoncapaResponse):
|
||||
def get_score(self, student_answers):
|
||||
'''Grade a numeric response '''
|
||||
student_answer = student_answers[self.answer_id]
|
||||
|
||||
try:
|
||||
correct = compare_with_tolerance(evaluator(dict(), dict(), student_answer), complex(self.correct_answer), self.tolerance)
|
||||
correct_ans = complex(self.correct_answer)
|
||||
except ValueError:
|
||||
log.debug("Content error--answer '{0}' is not a valid complex number".format(self.correct_answer))
|
||||
raise StudentInputError("There was a problem with the staff answer to this problem")
|
||||
|
||||
try:
|
||||
correct = compare_with_tolerance(evaluator(dict(), dict(), student_answer),
|
||||
correct_ans, self.tolerance)
|
||||
# We should catch this explicitly.
|
||||
# I think this is just pyparsing.ParseException, calc.UndefinedVariable:
|
||||
# But we'd need to confirm
|
||||
except:
|
||||
raise StudentInputError("Invalid input: could not interpret '%s' as a number" %\
|
||||
cgi.escape(student_answer))
|
||||
# Use the traceback-preserving version of re-raising with a different type
|
||||
import sys
|
||||
type, value, traceback = sys.exc_info()
|
||||
|
||||
raise StudentInputError, ("Invalid input: could not interpret '%s' as a number" %
|
||||
cgi.escape(student_answer)), traceback
|
||||
|
||||
if correct:
|
||||
return CorrectMap(self.answer_id, 'correct')
|
||||
else:
|
||||
return CorrectMap(self.answer_id, 'incorrect')
|
||||
|
||||
# TODO: add check_hint_condition(self,hxml_set,student_answers)
|
||||
# TODO: add check_hint_condition(self, hxml_set, student_answers)
|
||||
|
||||
def get_answers(self):
|
||||
return {self.answer_id: self.correct_answer}
|
||||
@@ -786,7 +836,7 @@ class CustomResponse(LoncapaResponse):
|
||||
Custom response. The python code to be run should be in <answer>...</answer>
|
||||
or in a <script>...</script>
|
||||
'''
|
||||
snippets = [{'snippet': '''<customresponse>
|
||||
snippets = [{'snippet': """<customresponse>
|
||||
<text>
|
||||
<br/>
|
||||
Suppose that \(I(t)\) rises from \(0\) to \(I_S\) at a time \(t_0 \neq 0\)
|
||||
@@ -804,8 +854,8 @@ class CustomResponse(LoncapaResponse):
|
||||
if not(r=="IS*u(t-t0)"):
|
||||
correct[0] ='incorrect'
|
||||
</answer>
|
||||
</customresponse>'''},
|
||||
{'snippet': '''<script type="loncapa/python"><![CDATA[
|
||||
</customresponse>"""},
|
||||
{'snippet': """<script type="loncapa/python"><![CDATA[
|
||||
|
||||
def sympy_check2():
|
||||
messages[0] = '%s:%s' % (submission[0],fromjs[0].replace('<','<'))
|
||||
@@ -818,10 +868,10 @@ def sympy_check2():
|
||||
<customresponse cfn="sympy_check2" type="cs" expect="2.27E-39" dojs="math" size="30" answer="2.27E-39">
|
||||
<textline size="40" dojs="math" />
|
||||
<responseparam description="Numerical Tolerance" type="tolerance" default="0.00001" name="tol"/>
|
||||
</customresponse>'''}]
|
||||
</customresponse>"""}]
|
||||
|
||||
response_tag = 'customresponse'
|
||||
allowed_inputfields = ['textline', 'textbox']
|
||||
allowed_inputfields = ['textline', 'textbox', 'crystallography', 'chemicalequationinput']
|
||||
|
||||
def setup_response(self):
|
||||
xml = self.xml
|
||||
@@ -832,7 +882,8 @@ def sympy_check2():
|
||||
|
||||
log.debug('answer_ids=%s' % self.answer_ids)
|
||||
|
||||
# the <answer>...</answer> stanza should be local to the current <customresponse>. So try looking there first.
|
||||
# the <answer>...</answer> stanza should be local to the current <customresponse>.
|
||||
# So try looking there first.
|
||||
self.code = None
|
||||
answer = None
|
||||
try:
|
||||
@@ -840,8 +891,9 @@ def sympy_check2():
|
||||
except IndexError:
|
||||
# print "xml = ",etree.tostring(xml,pretty_print=True)
|
||||
|
||||
# if we have a "cfn" attribute then look for the function specified by cfn, in the problem context
|
||||
# ie the comparison function is defined in the <script>...</script> stanza instead
|
||||
# if we have a "cfn" attribute then look for the function specified by cfn, in
|
||||
# the problem context ie the comparison function is defined in the
|
||||
# <script>...</script> stanza instead
|
||||
cfn = xml.get('cfn')
|
||||
if cfn:
|
||||
log.debug("cfn = %s" % cfn)
|
||||
@@ -849,13 +901,14 @@ def sympy_check2():
|
||||
self.code = self.context[cfn]
|
||||
else:
|
||||
msg = "%s: can't find cfn %s in context" % (unicode(self), cfn)
|
||||
msg += "\nSee XML source line %s" % getattr(self.xml, 'sourceline', '<unavailable>')
|
||||
msg += "\nSee XML source line %s" % getattr(self.xml, 'sourceline',
|
||||
'<unavailable>')
|
||||
raise LoncapaProblemError(msg)
|
||||
|
||||
if not self.code:
|
||||
if answer is None:
|
||||
# raise Exception,"[courseware.capa.responsetypes.customresponse] missing code checking script! id=%s" % self.myid
|
||||
log.error("[courseware.capa.responsetypes.customresponse] missing code checking script! id=%s" % self.myid)
|
||||
log.error("[courseware.capa.responsetypes.customresponse] missing"
|
||||
" code checking script! id=%s" % self.myid)
|
||||
self.code = ''
|
||||
else:
|
||||
answer_src = answer.get('src')
|
||||
@@ -872,43 +925,70 @@ def sympy_check2():
|
||||
|
||||
log.debug('%s: student_answers=%s' % (unicode(self), student_answers))
|
||||
|
||||
idset = sorted(self.answer_ids) # ordered list of answer id's
|
||||
# ordered list of answer id's
|
||||
idset = sorted(self.answer_ids)
|
||||
try:
|
||||
submission = [student_answers[k] for k in idset] # ordered list of answers
|
||||
# ordered list of answers
|
||||
submission = [student_answers[k] for k in idset]
|
||||
except Exception as err:
|
||||
msg = '[courseware.capa.responsetypes.customresponse] error getting student answer from %s' % student_answers
|
||||
msg = ('[courseware.capa.responsetypes.customresponse] error getting'
|
||||
' student answer from %s' % student_answers)
|
||||
msg += '\n idset = %s, error = %s' % (idset, err)
|
||||
log.error(msg)
|
||||
raise Exception(msg)
|
||||
|
||||
# global variable in context which holds the Presentation MathML from dynamic math input
|
||||
dynamath = [student_answers.get(k + '_dynamath', None) for k in idset] # ordered list of dynamath responses
|
||||
# ordered list of dynamath responses
|
||||
dynamath = [student_answers.get(k + '_dynamath', None) for k in idset]
|
||||
|
||||
# if there is only one box, and it's empty, then don't evaluate
|
||||
if len(idset) == 1 and not submission[0]:
|
||||
# default to no error message on empty answer (to be consistent with other responsetypes)
|
||||
# but allow author to still have the old behavior by setting empty_answer_err attribute
|
||||
msg = '<span class="inline-error">No answer entered!</span>' if self.xml.get('empty_answer_err') else ''
|
||||
# default to no error message on empty answer (to be consistent with other
|
||||
# responsetypes) but allow author to still have the old behavior by setting
|
||||
# empty_answer_err attribute
|
||||
msg = ('<span class="inline-error">No answer entered!</span>'
|
||||
if self.xml.get('empty_answer_err') else '')
|
||||
return CorrectMap(idset[0], 'incorrect', msg=msg)
|
||||
|
||||
# NOTE: correct = 'unknown' could be dangerous. Inputtypes such as textline are not expecting 'unknown's
|
||||
# NOTE: correct = 'unknown' could be dangerous. Inputtypes such as textline are
|
||||
# not expecting 'unknown's
|
||||
correct = ['unknown'] * len(idset)
|
||||
messages = [''] * len(idset)
|
||||
|
||||
# put these in the context of the check function evaluator
|
||||
# note that this doesn't help the "cfn" version - only the exec version
|
||||
self.context.update({'xml': self.xml, # our subtree
|
||||
'response_id': self.myid, # my ID
|
||||
'expect': self.expect, # expected answer (if given as attribute)
|
||||
'submission': submission, # ordered list of student answers from entry boxes in our subtree
|
||||
'idset': idset, # ordered list of ID's of all entry boxes in our subtree
|
||||
'dynamath': dynamath, # ordered list of all javascript inputs in our subtree
|
||||
'answers': student_answers, # dict of student's responses, with keys being entry box IDs
|
||||
'correct': correct, # the list to be filled in by the check function
|
||||
'messages': messages, # the list of messages to be filled in by the check function
|
||||
'options': self.xml.get('options'), # any options to be passed to the cfn
|
||||
'testdat': 'hello world',
|
||||
})
|
||||
self.context.update({
|
||||
# our subtree
|
||||
'xml': self.xml,
|
||||
|
||||
# my ID
|
||||
'response_id': self.myid,
|
||||
|
||||
# expected answer (if given as attribute)
|
||||
'expect': self.expect,
|
||||
|
||||
# ordered list of student answers from entry boxes in our subtree
|
||||
'submission': submission,
|
||||
|
||||
# ordered list of ID's of all entry boxes in our subtree
|
||||
'idset': idset,
|
||||
|
||||
# ordered list of all javascript inputs in our subtree
|
||||
'dynamath': dynamath,
|
||||
|
||||
# dict of student's responses, with keys being entry box IDs
|
||||
'answers': student_answers,
|
||||
|
||||
# the list to be filled in by the check function
|
||||
'correct': correct,
|
||||
|
||||
# the list of messages to be filled in by the check function
|
||||
'messages': messages,
|
||||
|
||||
# any options to be passed to the cfn
|
||||
'options': self.xml.get('options'),
|
||||
'testdat': 'hello world',
|
||||
})
|
||||
|
||||
# pass self.system.debug to cfn
|
||||
self.context['debug'] = self.system.DEBUG
|
||||
@@ -923,8 +1003,10 @@ def sympy_check2():
|
||||
print "oops in customresponse (code) error %s" % err
|
||||
print "context = ", self.context
|
||||
print traceback.format_exc()
|
||||
raise StudentInputError("Error: Problem could not be evaluated with your input") # Notify student
|
||||
else: # self.code is not a string; assume its a function
|
||||
# Notify student
|
||||
raise StudentInputError("Error: Problem could not be evaluated with your input")
|
||||
else:
|
||||
# self.code is not a string; assume its a function
|
||||
|
||||
# this is an interface to the Tutor2 check functions
|
||||
fn = self.code
|
||||
@@ -960,7 +1042,8 @@ def sympy_check2():
|
||||
msg = '<html>' + msg + '</html>'
|
||||
msg = msg.replace('<', '<')
|
||||
#msg = msg.replace('<','<')
|
||||
msg = etree.tostring(fromstring_bs(msg, convertEntities=None), pretty_print=True)
|
||||
msg = etree.tostring(fromstring_bs(msg, convertEntities=None),
|
||||
pretty_print=True)
|
||||
#msg = etree.tostring(fromstring_bs(msg),pretty_print=True)
|
||||
msg = msg.replace(' ', '')
|
||||
#msg = re.sub('<html>(.*)</html>','\\1',msg,flags=re.M|re.DOTALL) # python 2.7
|
||||
@@ -1024,18 +1107,19 @@ class SymbolicResponse(CustomResponse):
|
||||
|
||||
|
||||
class CodeResponse(LoncapaResponse):
|
||||
'''
|
||||
"""
|
||||
Grade student code using an external queueing server, called 'xqueue'
|
||||
|
||||
Expects 'xqueue' dict in ModuleSystem with the following keys that are needed by CodeResponse:
|
||||
system.xqueue = { 'interface': XqueueInterface object,
|
||||
'callback_url': Per-StudentModule callback URL where results are posted (string),
|
||||
'callback_url': Per-StudentModule callback URL
|
||||
where results are posted (string),
|
||||
'default_queuename': Default queuename to submit request (string)
|
||||
}
|
||||
|
||||
External requests are only submitted for student submission grading
|
||||
External requests are only submitted for student submission grading
|
||||
(i.e. and not for getting reference answers)
|
||||
'''
|
||||
"""
|
||||
|
||||
response_tag = 'coderesponse'
|
||||
allowed_inputfields = ['textbox', 'filesubmission']
|
||||
@@ -1048,7 +1132,8 @@ class CodeResponse(LoncapaResponse):
|
||||
TODO: Determines whether in synchronous or asynchronous (queued) mode
|
||||
'''
|
||||
xml = self.xml
|
||||
self.url = xml.get('url', None) # TODO: XML can override external resource (grader/queue) URL
|
||||
# TODO: XML can override external resource (grader/queue) URL
|
||||
self.url = xml.get('url', None)
|
||||
self.queue_name = xml.get('queuename', self.system.xqueue['default_queuename'])
|
||||
|
||||
# VS[compat]:
|
||||
@@ -1109,7 +1194,8 @@ class CodeResponse(LoncapaResponse):
|
||||
|
||||
# Extract 'answer' and 'initial_display' from XML. Note that the code to be exec'ed here is:
|
||||
# (1) Internal edX code, i.e. NOT student submissions, and
|
||||
# (2) The code should only define the strings 'initial_display', 'answer', 'preamble', 'test_program'
|
||||
# (2) The code should only define the strings 'initial_display', 'answer',
|
||||
# 'preamble', 'test_program'
|
||||
# following the ExternalResponse XML format
|
||||
penv = {}
|
||||
penv['__builtins__'] = globals()['__builtins__']
|
||||
@@ -1122,10 +1208,12 @@ class CodeResponse(LoncapaResponse):
|
||||
self.answer = penv['answer']
|
||||
self.initial_display = penv['initial_display']
|
||||
except Exception as err:
|
||||
log.error("Error in CodeResponse %s: Problem reference code does not define 'answer' and/or 'initial_display' in <answer>...</answer>" % err)
|
||||
log.error("Error in CodeResponse %s: Problem reference code does not define"
|
||||
" 'answer' and/or 'initial_display' in <answer>...</answer>" % err)
|
||||
raise Exception(err)
|
||||
|
||||
# Finally, make the ExternalResponse input XML format conform to the generic exteral grader interface
|
||||
# Finally, make the ExternalResponse input XML format conform to the generic
|
||||
# exteral grader interface
|
||||
# The XML tagging of grader_payload is pyxserver-specific
|
||||
grader_payload = '<pyxserver>'
|
||||
grader_payload += '<tests>' + tests + '</tests>\n'
|
||||
@@ -1135,14 +1223,16 @@ class CodeResponse(LoncapaResponse):
|
||||
|
||||
def get_score(self, student_answers):
|
||||
try:
|
||||
submission = student_answers[self.answer_id] # Note that submission can be a file
|
||||
# Note that submission can be a file
|
||||
submission = student_answers[self.answer_id]
|
||||
except Exception as err:
|
||||
log.error('Error in CodeResponse %s: cannot get student answer for %s; student_answers=%s' %
|
||||
log.error('Error in CodeResponse %s: cannot get student answer for %s;'
|
||||
' student_answers=%s' %
|
||||
(err, self.answer_id, convert_files_to_filenames(student_answers)))
|
||||
raise Exception(err)
|
||||
|
||||
# Prepare xqueue request
|
||||
#------------------------------------------------------------
|
||||
#------------------------------------------------------------
|
||||
|
||||
qinterface = self.system.xqueue['interface']
|
||||
qtime = datetime.strftime(datetime.now(), xqueue_interface.dateformat)
|
||||
@@ -1151,19 +1241,20 @@ class CodeResponse(LoncapaResponse):
|
||||
|
||||
# Generate header
|
||||
queuekey = xqueue_interface.make_hashkey(str(self.system.seed) + qtime +
|
||||
anonymous_student_id +
|
||||
anonymous_student_id +
|
||||
self.answer_id)
|
||||
xheader = xqueue_interface.make_xheader(lms_callback_url=self.system.xqueue['callback_url'],
|
||||
lms_key=queuekey,
|
||||
queue_name=self.queue_name)
|
||||
|
||||
|
||||
# Generate body
|
||||
if is_list_of_files(submission):
|
||||
self.context.update({'submission': ''}) # TODO: Get S3 pointer from the Queue
|
||||
# TODO: Get S3 pointer from the Queue
|
||||
self.context.update({'submission': ''})
|
||||
else:
|
||||
self.context.update({'submission': submission})
|
||||
|
||||
contents = self.payload.copy()
|
||||
contents = self.payload.copy()
|
||||
|
||||
# Metadata related to the student submission revealed to the external grader
|
||||
student_info = {'anonymous_student_id': anonymous_student_id,
|
||||
@@ -1173,7 +1264,8 @@ class CodeResponse(LoncapaResponse):
|
||||
|
||||
# Submit request. When successful, 'msg' is the prior length of the queue
|
||||
if is_list_of_files(submission):
|
||||
contents.update({'student_response': ''}) # TODO: Is there any information we want to send here?
|
||||
# TODO: Is there any information we want to send here?
|
||||
contents.update({'student_response': ''})
|
||||
(error, msg) = qinterface.send_to_queue(header=xheader,
|
||||
body=json.dumps(contents),
|
||||
files_to_upload=submission)
|
||||
@@ -1184,44 +1276,51 @@ class CodeResponse(LoncapaResponse):
|
||||
|
||||
# State associated with the queueing request
|
||||
queuestate = {'key': queuekey,
|
||||
'time': qtime,
|
||||
}
|
||||
'time': qtime,}
|
||||
|
||||
cmap = CorrectMap()
|
||||
cmap = CorrectMap()
|
||||
if error:
|
||||
cmap.set(self.answer_id, queuestate=None,
|
||||
msg='Unable to deliver your submission to grader. (Reason: %s.) Please try again later.' % msg)
|
||||
msg='Unable to deliver your submission to grader. (Reason: %s.)'
|
||||
' Please try again later.' % msg)
|
||||
else:
|
||||
# Queueing mechanism flags:
|
||||
# 1) Backend: Non-null CorrectMap['queuestate'] indicates that the problem has been queued
|
||||
# 2) Frontend: correctness='incomplete' eventually trickles down through inputtypes.textbox
|
||||
# and .filesubmission to inform the browser to poll the LMS
|
||||
# 1) Backend: Non-null CorrectMap['queuestate'] indicates that
|
||||
# the problem has been queued
|
||||
# 2) Frontend: correctness='incomplete' eventually trickles down
|
||||
# through inputtypes.textbox and .filesubmission to inform the
|
||||
# browser to poll the LMS
|
||||
cmap.set(self.answer_id, queuestate=queuestate, correctness='incomplete', msg=msg)
|
||||
|
||||
return cmap
|
||||
|
||||
def update_score(self, score_msg, oldcmap, queuekey):
|
||||
|
||||
(valid_score_msg, correct, points, msg) = self._parse_score_msg(score_msg)
|
||||
(valid_score_msg, correct, points, msg) = self._parse_score_msg(score_msg)
|
||||
if not valid_score_msg:
|
||||
oldcmap.set(self.answer_id, msg='Invalid grader reply. Please contact the course staff.')
|
||||
oldcmap.set(self.answer_id,
|
||||
msg='Invalid grader reply. Please contact the course staff.')
|
||||
return oldcmap
|
||||
|
||||
|
||||
correctness = 'correct' if correct else 'incorrect'
|
||||
|
||||
self.context['correct'] = correctness # TODO: Find out how this is used elsewhere, if any
|
||||
# TODO: Find out how this is used elsewhere, if any
|
||||
self.context['correct'] = correctness
|
||||
|
||||
# Replace 'oldcmap' with new grading results if queuekey matches.
|
||||
# If queuekey does not match, we keep waiting for the score_msg whose key actually matches
|
||||
# Replace 'oldcmap' with new grading results if queuekey matches. If queuekey
|
||||
# does not match, we keep waiting for the score_msg whose key actually matches
|
||||
if oldcmap.is_right_queuekey(self.answer_id, queuekey):
|
||||
# Sanity check on returned points
|
||||
# Sanity check on returned points
|
||||
if points < 0:
|
||||
points = 0
|
||||
elif points > self.maxpoints[self.answer_id]:
|
||||
points = self.maxpoints[self.answer_id]
|
||||
oldcmap.set(self.answer_id, npoints=points, correctness=correctness, msg=msg.replace(' ', ' '), queuestate=None) # Queuestate is consumed
|
||||
# Queuestate is consumed
|
||||
oldcmap.set(self.answer_id, npoints=points, correctness=correctness,
|
||||
msg=msg.replace(' ', ' '), queuestate=None)
|
||||
else:
|
||||
log.debug('CodeResponse: queuekey %s does not match for answer_id=%s.' % (queuekey, self.answer_id))
|
||||
log.debug('CodeResponse: queuekey %s does not match for answer_id=%s.' %
|
||||
(queuekey, self.answer_id))
|
||||
|
||||
return oldcmap
|
||||
|
||||
@@ -1233,7 +1332,7 @@ class CodeResponse(LoncapaResponse):
|
||||
return {self.answer_id: self.initial_display}
|
||||
|
||||
def _parse_score_msg(self, score_msg):
|
||||
'''
|
||||
"""
|
||||
Grader reply is a JSON-dump of the following dict
|
||||
{ 'correct': True/False,
|
||||
'score': Numeric value (floating point is okay) to assign to answer
|
||||
@@ -1244,22 +1343,25 @@ class CodeResponse(LoncapaResponse):
|
||||
correct: Correctness of submission (Boolean)
|
||||
score: Points to be assigned (numeric, can be float)
|
||||
msg: Message from grader to display to student (string)
|
||||
'''
|
||||
"""
|
||||
fail = (False, False, 0, '')
|
||||
try:
|
||||
score_result = json.loads(score_msg)
|
||||
except (TypeError, ValueError):
|
||||
log.error("External grader message should be a JSON-serialized dict. Received score_msg = %s" % score_msg)
|
||||
log.error("External grader message should be a JSON-serialized dict."
|
||||
" Received score_msg = %s" % score_msg)
|
||||
return fail
|
||||
if not isinstance(score_result, dict):
|
||||
log.error("External grader message should be a JSON-serialized dict. Received score_result = %s" % score_result)
|
||||
log.error("External grader message should be a JSON-serialized dict."
|
||||
" Received score_result = %s" % score_result)
|
||||
return fail
|
||||
for tag in ['correct', 'score', 'msg']:
|
||||
if tag not in score_result:
|
||||
log.error("External grader message is missing one or more required tags: 'correct', 'score', 'msg'")
|
||||
log.error("External grader message is missing one or more required"
|
||||
" tags: 'correct', 'score', 'msg'")
|
||||
return fail
|
||||
|
||||
# Next, we need to check that the contents of the external grader message
|
||||
# Next, we need to check that the contents of the external grader message
|
||||
# is safe for the LMS.
|
||||
# 1) Make sure that the message is valid XML (proper opening/closing tags)
|
||||
# 2) TODO: Is the message actually HTML?
|
||||
@@ -1267,11 +1369,12 @@ class CodeResponse(LoncapaResponse):
|
||||
try:
|
||||
etree.fromstring(msg)
|
||||
except etree.XMLSyntaxError as err:
|
||||
log.error("Unable to parse external grader message as valid XML: score_msg['msg']=%s" % msg)
|
||||
log.error("Unable to parse external grader message as valid"
|
||||
" XML: score_msg['msg']=%s" % msg)
|
||||
return fail
|
||||
|
||||
|
||||
return (True, score_result['correct'], score_result['score'], msg)
|
||||
|
||||
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
@@ -1327,9 +1430,9 @@ main()
|
||||
|
||||
def setup_response(self):
|
||||
xml = self.xml
|
||||
self.url = xml.get('url') or "http://qisx.mit.edu:8889/pyloncapa" # FIXME - hardcoded URL
|
||||
# FIXME - hardcoded URL
|
||||
self.url = xml.get('url') or "http://qisx.mit.edu:8889/pyloncapa"
|
||||
|
||||
# answer = xml.xpath('//*[@id=$id]//answer',id=xml.get('id'))[0] # FIXME - catch errors
|
||||
answer = xml.find('answer')
|
||||
if answer is not None:
|
||||
answer_src = answer.get('src')
|
||||
@@ -1337,7 +1440,8 @@ main()
|
||||
self.code = self.system.filesystem.open('src/' + answer_src).read()
|
||||
else:
|
||||
self.code = answer.text
|
||||
else: # no <answer> stanza; get code from <script>
|
||||
else:
|
||||
# no <answer> stanza; get code from <script>
|
||||
self.code = self.context['script_code']
|
||||
if not self.code:
|
||||
msg = '%s: Missing answer script code for externalresponse' % unicode(self)
|
||||
@@ -1364,19 +1468,22 @@ main()
|
||||
payload.update(extra_payload)
|
||||
|
||||
try:
|
||||
r = requests.post(self.url, data=payload) # call external server
|
||||
# call external server. TODO: synchronous call, can block for a long time
|
||||
r = requests.post(self.url, data=payload)
|
||||
except Exception as err:
|
||||
msg = 'Error %s - cannot connect to external server url=%s' % (err, self.url)
|
||||
log.error(msg)
|
||||
raise Exception(msg)
|
||||
|
||||
if self.system.DEBUG: log.info('response = %s' % r.text)
|
||||
if self.system.DEBUG:
|
||||
log.info('response = %s' % r.text)
|
||||
|
||||
if (not r.text) or (not r.text.strip()):
|
||||
raise Exception('Error: no response from external server url=%s' % self.url)
|
||||
|
||||
try:
|
||||
rxml = etree.fromstring(r.text) # response is XML; prase it
|
||||
# response is XML; parse it
|
||||
rxml = etree.fromstring(r.text)
|
||||
except Exception as err:
|
||||
msg = 'Error %s - cannot parse response from external server r.text=%s' % (err, r.text)
|
||||
log.error(msg)
|
||||
@@ -1390,7 +1497,8 @@ main()
|
||||
try:
|
||||
submission = [student_answers[k] for k in idset]
|
||||
except Exception as err:
|
||||
log.error('Error %s: cannot get student answer for %s; student_answers=%s' % (err, self.answer_ids, student_answers))
|
||||
log.error('Error %s: cannot get student answer for %s; student_answers=%s' %
|
||||
(err, self.answer_ids, student_answers))
|
||||
raise Exception(err)
|
||||
|
||||
self.context.update({'submission': submission})
|
||||
@@ -1403,7 +1511,9 @@ main()
|
||||
log.error('Error %s' % err)
|
||||
if self.system.DEBUG:
|
||||
cmap.set_dict(dict(zip(sorted(self.answer_ids), ['incorrect'] * len(idset))))
|
||||
cmap.set_property(self.answer_ids[0], 'msg', '<span class="inline-error">%s</span>' % str(err).replace('<', '<'))
|
||||
cmap.set_property(
|
||||
self.answer_ids[0], 'msg',
|
||||
'<span class="inline-error">%s</span>' % str(err).replace('<', '<'))
|
||||
return cmap
|
||||
|
||||
ad = rxml.find('awarddetail').text
|
||||
@@ -1437,7 +1547,8 @@ main()
|
||||
exans[0] = msg
|
||||
|
||||
if not (len(exans) == len(self.answer_ids)):
|
||||
log.error('Expected %d answers from external server, only got %d!' % (len(self.answer_ids), len(exans)))
|
||||
log.error('Expected %d answers from external server, only got %d!' %
|
||||
(len(self.answer_ids), len(exans)))
|
||||
raise Exception('Short response from external server')
|
||||
return dict(zip(self.answer_ids, exans))
|
||||
|
||||
@@ -1489,11 +1600,14 @@ class FormulaResponse(LoncapaResponse):
|
||||
typeslist = []
|
||||
else:
|
||||
typeslist = ts.split(',')
|
||||
if 'ci' in typeslist: # Case insensitive
|
||||
if 'ci' in typeslist:
|
||||
# Case insensitive
|
||||
self.case_sensitive = False
|
||||
elif 'cs' in typeslist: # Case sensitive
|
||||
elif 'cs' in typeslist:
|
||||
# Case sensitive
|
||||
self.case_sensitive = True
|
||||
else: # Default
|
||||
else:
|
||||
# Default
|
||||
self.case_sensitive = False
|
||||
|
||||
def get_score(self, student_answers):
|
||||
@@ -1511,12 +1625,14 @@ class FormulaResponse(LoncapaResponse):
|
||||
for i in range(numsamples):
|
||||
instructor_variables = self.strip_dict(dict(self.context))
|
||||
student_variables = dict()
|
||||
for var in ranges: # ranges give numerical ranges for testing
|
||||
# ranges give numerical ranges for testing
|
||||
for var in ranges:
|
||||
value = random.uniform(*ranges[var])
|
||||
instructor_variables[str(var)] = value
|
||||
student_variables[str(var)] = value
|
||||
#log.debug('formula: instructor_vars=%s, expected=%s' % (instructor_variables,expected))
|
||||
instructor_result = evaluator(instructor_variables, dict(), expected, cs=self.case_sensitive)
|
||||
instructor_result = evaluator(instructor_variables, dict(),
|
||||
expected, cs=self.case_sensitive)
|
||||
try:
|
||||
#log.debug('formula: student_vars=%s, given=%s' % (student_variables,given))
|
||||
student_result = evaluator(student_variables,
|
||||
@@ -1542,9 +1658,9 @@ class FormulaResponse(LoncapaResponse):
|
||||
keys and all non-numeric values stripped out. All values also
|
||||
converted to float. Used so we can safely use Python contexts.
|
||||
'''
|
||||
d = dict([(k, numpy.complex(d[k])) for k in d if type(k) == str and \
|
||||
k.isalnum() and \
|
||||
isinstance(d[k], numbers.Number)])
|
||||
d = dict([(k, numpy.complex(d[k])) for k in d if type(k) == str and
|
||||
k.isalnum() and
|
||||
isinstance(d[k], numbers.Number)])
|
||||
return d
|
||||
|
||||
def check_hint_condition(self, hxml_set, student_answers):
|
||||
@@ -1579,7 +1695,8 @@ class SchematicResponse(LoncapaResponse):
|
||||
answer = xml.xpath('//*[@id=$id]//answer', id=xml.get('id'))[0]
|
||||
answer_src = answer.get('src')
|
||||
if answer_src is not None:
|
||||
self.code = self.system.filestore.open('src/' + answer_src).read() # Untested; never used
|
||||
# Untested; never used
|
||||
self.code = self.system.filestore.open('src/' + answer_src).read()
|
||||
else:
|
||||
self.code = answer.text
|
||||
|
||||
@@ -1635,17 +1752,19 @@ class ImageResponse(LoncapaResponse):
|
||||
|
||||
# parse expected answer
|
||||
# TODO: Compile regexp on file load
|
||||
m = re.match('[\(\[]([0-9]+),([0-9]+)[\)\]]-[\(\[]([0-9]+),([0-9]+)[\)\]]', expectedset[aid].strip().replace(' ', ''))
|
||||
m = re.match('[\(\[]([0-9]+),([0-9]+)[\)\]]-[\(\[]([0-9]+),([0-9]+)[\)\]]',
|
||||
expectedset[aid].strip().replace(' ', ''))
|
||||
if not m:
|
||||
msg = 'Error in problem specification! cannot parse rectangle in %s' % (etree.tostring(self.ielements[aid],
|
||||
pretty_print=True))
|
||||
msg = 'Error in problem specification! cannot parse rectangle in %s' % (
|
||||
etree.tostring(self.ielements[aid], pretty_print=True))
|
||||
raise Exception('[capamodule.capa.responsetypes.imageinput] ' + msg)
|
||||
(llx, lly, urx, ury) = [int(x) for x in m.groups()]
|
||||
|
||||
# parse given answer
|
||||
m = re.match('\[([0-9]+),([0-9]+)]', given.strip().replace(' ', ''))
|
||||
if not m:
|
||||
raise Exception('[capamodule.capa.responsetypes.imageinput] error grading %s (input=%s)' % (aid, given))
|
||||
raise Exception('[capamodule.capa.responsetypes.imageinput] '
|
||||
'error grading %s (input=%s)' % (aid, given))
|
||||
(gx, gy) = [int(x) for x in m.groups()]
|
||||
|
||||
# answer is correct if (x,y) is within the specified rectangle
|
||||
@@ -1662,4 +1781,17 @@ class ImageResponse(LoncapaResponse):
|
||||
# TEMPORARY: List of all response subclasses
|
||||
# FIXME: To be replaced by auto-registration
|
||||
|
||||
__all__ = [CodeResponse, NumericalResponse, FormulaResponse, CustomResponse, SchematicResponse, ExternalResponse, ImageResponse, OptionResponse, SymbolicResponse, StringResponse, ChoiceResponse, MultipleChoiceResponse, TrueFalseResponse, JavascriptResponse]
|
||||
__all__ = [CodeResponse,
|
||||
NumericalResponse,
|
||||
FormulaResponse,
|
||||
CustomResponse,
|
||||
SchematicResponse,
|
||||
ExternalResponse,
|
||||
ImageResponse,
|
||||
OptionResponse,
|
||||
SymbolicResponse,
|
||||
StringResponse,
|
||||
ChoiceResponse,
|
||||
MultipleChoiceResponse,
|
||||
TrueFalseResponse,
|
||||
JavascriptResponse]
|
||||
|
||||
40
common/lib/capa/capa/templates/chemicalequationinput.html
Normal file
40
common/lib/capa/capa/templates/chemicalequationinput.html
Normal file
@@ -0,0 +1,40 @@
|
||||
<section id="chemicalequationinput_${id}" class="chemicalequationinput">
|
||||
<div class="script_placeholder" data-src="${previewer}"/>
|
||||
|
||||
% if status == 'unsubmitted':
|
||||
<div class="unanswered" id="status_${id}">
|
||||
% elif status == 'correct':
|
||||
<div class="correct" id="status_${id}">
|
||||
% elif status == 'incorrect':
|
||||
<div class="incorrect" id="status_${id}">
|
||||
% elif status == 'incomplete':
|
||||
<div class="incorrect" id="status_${id}">
|
||||
% endif
|
||||
|
||||
<input type="text" name="input_${id}" id="input_${id}" value="${value|h}"
|
||||
% if size:
|
||||
size="${size}"
|
||||
% endif
|
||||
/>
|
||||
|
||||
<p class="status">
|
||||
% if status == 'unsubmitted':
|
||||
unanswered
|
||||
% elif status == 'correct':
|
||||
correct
|
||||
% elif status == 'incorrect':
|
||||
incorrect
|
||||
% elif status == 'incomplete':
|
||||
incomplete
|
||||
% endif
|
||||
</p>
|
||||
|
||||
<div id="input_${id}_preview" class="equation">
|
||||
</div>
|
||||
|
||||
<p id="answer_${id}" class="answer"></p>
|
||||
|
||||
% if status in ['unsubmitted', 'correct', 'incorrect', 'incomplete']:
|
||||
</div>
|
||||
% endif
|
||||
</section>
|
||||
51
common/lib/capa/capa/templates/crystallography.html
Normal file
51
common/lib/capa/capa/templates/crystallography.html
Normal file
@@ -0,0 +1,51 @@
|
||||
<% doinline = "inline" if inline else "" %>
|
||||
|
||||
<section id="textinput_${id}" class="textinput ${doinline}" >
|
||||
<div id="holder" style="width:${width};height:${height}"></div>
|
||||
<div class="script_placeholder" data-src="/static/js/raphael.js"></div><div class="script_placeholder" data-src="/static/js/sylvester.js"></div><div class="script_placeholder" data-src="/static/js/underscore-min.js"></div>
|
||||
<div class="script_placeholder" data-src="/static/js/crystallography.js"></div>
|
||||
|
||||
|
||||
% if state == 'unsubmitted':
|
||||
<div class="unanswered ${doinline}" id="status_${id}">
|
||||
% elif state == 'correct':
|
||||
<div class="correct ${doinline}" id="status_${id}">
|
||||
% elif state == 'incorrect':
|
||||
<div class="incorrect ${doinline}" id="status_${id}">
|
||||
% elif state == 'incomplete':
|
||||
<div class="incorrect ${doinline}" id="status_${id}">
|
||||
% endif
|
||||
% if hidden:
|
||||
<div style="display:none;" name="${hidden}" inputid="input_${id}" />
|
||||
% endif
|
||||
|
||||
<input type="text" name="input_${id}" id="input_${id}" value="${value}"
|
||||
% if size:
|
||||
size="${size}"
|
||||
% endif
|
||||
% if hidden:
|
||||
style="display:none;"
|
||||
% endif
|
||||
/>
|
||||
|
||||
<p class="status">
|
||||
% if state == 'unsubmitted':
|
||||
unanswered
|
||||
% elif state == 'correct':
|
||||
correct
|
||||
% elif state == 'incorrect':
|
||||
incorrect
|
||||
% elif state == 'incomplete':
|
||||
incomplete
|
||||
% endif
|
||||
</p>
|
||||
|
||||
<p id="answer_${id}" class="answer"></p>
|
||||
|
||||
% if msg:
|
||||
<span class="message">${msg|n}</span>
|
||||
% endif
|
||||
% if state in ['unsubmitted', 'correct', 'incorrect', 'incomplete'] or hidden:
|
||||
</div>
|
||||
% endif
|
||||
</section>
|
||||
@@ -5,8 +5,6 @@
|
||||
% endif
|
||||
>${value|h}</textarea>
|
||||
|
||||
<span id="answer_${id}"></span>
|
||||
|
||||
<div class="grader-status">
|
||||
% if state == 'unsubmitted':
|
||||
<span class="unanswered" style="display:inline-block;" id="status_${id}">Unanswered</span>
|
||||
@@ -26,6 +24,8 @@
|
||||
<p class="debug">${state}</p>
|
||||
</div>
|
||||
|
||||
<span id="answer_${id}"></span>
|
||||
|
||||
<div class="external-grader-message">
|
||||
${msg|n}
|
||||
</div>
|
||||
@@ -42,7 +42,12 @@
|
||||
lineWrapping: true,
|
||||
indentUnit: "${tabsize}",
|
||||
tabSize: "${tabsize}",
|
||||
indentWithTabs: true,
|
||||
indentWithTabs: false,
|
||||
extraKeys: {
|
||||
"Tab": function(cm) {
|
||||
cm.replaceSelection("${' '*tabsize}", "end");
|
||||
}
|
||||
},
|
||||
smartIndent: false
|
||||
});
|
||||
});
|
||||
|
||||
21
common/lib/capa/capa/tests/__init__.py
Normal file
21
common/lib/capa/capa/tests/__init__.py
Normal file
@@ -0,0 +1,21 @@
|
||||
import fs
|
||||
import fs.osfs
|
||||
import os
|
||||
|
||||
from mock import Mock
|
||||
|
||||
TEST_DIR = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
test_system = Mock(
|
||||
ajax_url='courses/course_id/modx/a_location',
|
||||
track_function=Mock(),
|
||||
get_module=Mock(),
|
||||
render_template=Mock(),
|
||||
replace_urls=Mock(),
|
||||
user=Mock(),
|
||||
filestore=fs.osfs.OSFS(os.path.join(TEST_DIR, "test_files")),
|
||||
debug=True,
|
||||
xqueue={'interface':None, 'callback_url':'/', 'default_queuename': 'testqueue', 'waittime': 10},
|
||||
node_path=os.environ.get("NODE_PATH", "/usr/local/lib/node_modules"),
|
||||
anonymous_student_id = 'student'
|
||||
)
|
||||
68
common/lib/capa/capa/tests/test_inputtypes.py
Normal file
68
common/lib/capa/capa/tests/test_inputtypes.py
Normal file
@@ -0,0 +1,68 @@
|
||||
"""
|
||||
Tests of input types (and actually responsetypes too)
|
||||
"""
|
||||
|
||||
from datetime import datetime
|
||||
import json
|
||||
from mock import Mock
|
||||
from nose.plugins.skip import SkipTest
|
||||
import os
|
||||
import unittest
|
||||
|
||||
from . import test_system
|
||||
from capa import inputtypes
|
||||
|
||||
from lxml import etree
|
||||
|
||||
def tst_render_template(template, context):
|
||||
"""
|
||||
A test version of render to template. Renders to the repr of the context, completely ignoring the template name.
|
||||
"""
|
||||
return repr(context)
|
||||
|
||||
|
||||
system = Mock(render_template=tst_render_template)
|
||||
|
||||
class OptionInputTest(unittest.TestCase):
|
||||
'''
|
||||
Make sure option inputs work
|
||||
'''
|
||||
def test_rendering_new(self):
|
||||
xml = """<optioninput options="('Up','Down')" id="sky_input" correct="Up"/>"""
|
||||
element = etree.fromstring(xml)
|
||||
|
||||
value = 'Down'
|
||||
status = 'answered'
|
||||
context = inputtypes._optioninput(element, value, status, test_system.render_template)
|
||||
print 'context: ', context
|
||||
|
||||
expected = {'value': 'Down',
|
||||
'options': [('Up', 'Up'), ('Down', 'Down')],
|
||||
'state': 'answered',
|
||||
'msg': '',
|
||||
'inline': '',
|
||||
'id': 'sky_input'}
|
||||
|
||||
self.assertEqual(context, expected)
|
||||
|
||||
|
||||
def test_rendering(self):
|
||||
xml_str = """<optioninput options="('Up','Down')" id="sky_input" correct="Up"/>"""
|
||||
element = etree.fromstring(xml_str)
|
||||
|
||||
state = {'value': 'Down',
|
||||
'id': 'sky_input',
|
||||
'status': 'answered'}
|
||||
option_input = inputtypes.OptionInput(system, element, state)
|
||||
|
||||
context = option_input._get_render_context()
|
||||
|
||||
expected = {'value': 'Down',
|
||||
'options': [('Up', 'Up'), ('Down', 'Down')],
|
||||
'state': 'answered',
|
||||
'msg': '',
|
||||
'inline': '',
|
||||
'id': 'sky_input'}
|
||||
|
||||
self.assertEqual(context, expected)
|
||||
|
||||
384
common/lib/capa/capa/tests/test_responsetypes.py
Normal file
384
common/lib/capa/capa/tests/test_responsetypes.py
Normal file
@@ -0,0 +1,384 @@
|
||||
"""
|
||||
Tests of responsetypes
|
||||
"""
|
||||
|
||||
|
||||
from datetime import datetime
|
||||
import json
|
||||
from nose.plugins.skip import SkipTest
|
||||
import os
|
||||
import unittest
|
||||
|
||||
from . import test_system
|
||||
|
||||
import capa.capa_problem as lcp
|
||||
from capa.correctmap import CorrectMap
|
||||
from capa.util import convert_files_to_filenames
|
||||
from capa.xqueue_interface import dateformat
|
||||
|
||||
class MultiChoiceTest(unittest.TestCase):
|
||||
def test_MC_grade(self):
|
||||
multichoice_file = os.path.dirname(__file__) + "/test_files/multichoice.xml"
|
||||
test_lcp = lcp.LoncapaProblem(open(multichoice_file).read(), '1', system=test_system)
|
||||
correct_answers = {'1_2_1': 'choice_foil3'}
|
||||
self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct')
|
||||
false_answers = {'1_2_1': 'choice_foil2'}
|
||||
self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect')
|
||||
|
||||
def test_MC_bare_grades(self):
|
||||
multichoice_file = os.path.dirname(__file__) + "/test_files/multi_bare.xml"
|
||||
test_lcp = lcp.LoncapaProblem(open(multichoice_file).read(), '1', system=test_system)
|
||||
correct_answers = {'1_2_1': 'choice_2'}
|
||||
self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct')
|
||||
false_answers = {'1_2_1': 'choice_1'}
|
||||
self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect')
|
||||
|
||||
def test_TF_grade(self):
|
||||
truefalse_file = os.path.dirname(__file__) + "/test_files/truefalse.xml"
|
||||
test_lcp = lcp.LoncapaProblem(open(truefalse_file).read(), '1', system=test_system)
|
||||
correct_answers = {'1_2_1': ['choice_foil2', 'choice_foil1']}
|
||||
self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct')
|
||||
false_answers = {'1_2_1': ['choice_foil1']}
|
||||
self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect')
|
||||
false_answers = {'1_2_1': ['choice_foil1', 'choice_foil3']}
|
||||
self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect')
|
||||
false_answers = {'1_2_1': ['choice_foil3']}
|
||||
self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect')
|
||||
false_answers = {'1_2_1': ['choice_foil1', 'choice_foil2', 'choice_foil3']}
|
||||
self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect')
|
||||
|
||||
|
||||
class ImageResponseTest(unittest.TestCase):
|
||||
def test_ir_grade(self):
|
||||
imageresponse_file = os.path.dirname(__file__) + "/test_files/imageresponse.xml"
|
||||
test_lcp = lcp.LoncapaProblem(open(imageresponse_file).read(), '1', system=test_system)
|
||||
correct_answers = {'1_2_1': '(490,11)-(556,98)',
|
||||
'1_2_2': '(242,202)-(296,276)'}
|
||||
test_answers = {'1_2_1': '[500,20]',
|
||||
'1_2_2': '[250,300]',
|
||||
}
|
||||
self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_1'), 'correct')
|
||||
self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_2'), 'incorrect')
|
||||
|
||||
|
||||
class SymbolicResponseTest(unittest.TestCase):
|
||||
def test_sr_grade(self):
|
||||
raise SkipTest() # This test fails due to dependencies on a local copy of snuggletex-webapp. Until we have figured that out, we'll just skip this test
|
||||
symbolicresponse_file = os.path.dirname(__file__) + "/test_files/symbolicresponse.xml"
|
||||
test_lcp = lcp.LoncapaProblem(open(symbolicresponse_file).read(), '1', system=test_system)
|
||||
correct_answers = {'1_2_1': 'cos(theta)*[[1,0],[0,1]] + i*sin(theta)*[[0,1],[1,0]]',
|
||||
'1_2_1_dynamath': '''
|
||||
<math xmlns="http://www.w3.org/1998/Math/MathML">
|
||||
<mstyle displaystyle="true">
|
||||
<mrow>
|
||||
<mi>cos</mi>
|
||||
<mrow>
|
||||
<mo>(</mo>
|
||||
<mi>θ</mi>
|
||||
<mo>)</mo>
|
||||
</mrow>
|
||||
</mrow>
|
||||
<mo>⋅</mo>
|
||||
<mrow>
|
||||
<mo>[</mo>
|
||||
<mtable>
|
||||
<mtr>
|
||||
<mtd>
|
||||
<mn>1</mn>
|
||||
</mtd>
|
||||
<mtd>
|
||||
<mn>0</mn>
|
||||
</mtd>
|
||||
</mtr>
|
||||
<mtr>
|
||||
<mtd>
|
||||
<mn>0</mn>
|
||||
</mtd>
|
||||
<mtd>
|
||||
<mn>1</mn>
|
||||
</mtd>
|
||||
</mtr>
|
||||
</mtable>
|
||||
<mo>]</mo>
|
||||
</mrow>
|
||||
<mo>+</mo>
|
||||
<mi>i</mi>
|
||||
<mo>⋅</mo>
|
||||
<mrow>
|
||||
<mi>sin</mi>
|
||||
<mrow>
|
||||
<mo>(</mo>
|
||||
<mi>θ</mi>
|
||||
<mo>)</mo>
|
||||
</mrow>
|
||||
</mrow>
|
||||
<mo>⋅</mo>
|
||||
<mrow>
|
||||
<mo>[</mo>
|
||||
<mtable>
|
||||
<mtr>
|
||||
<mtd>
|
||||
<mn>0</mn>
|
||||
</mtd>
|
||||
<mtd>
|
||||
<mn>1</mn>
|
||||
</mtd>
|
||||
</mtr>
|
||||
<mtr>
|
||||
<mtd>
|
||||
<mn>1</mn>
|
||||
</mtd>
|
||||
<mtd>
|
||||
<mn>0</mn>
|
||||
</mtd>
|
||||
</mtr>
|
||||
</mtable>
|
||||
<mo>]</mo>
|
||||
</mrow>
|
||||
</mstyle>
|
||||
</math>
|
||||
''',
|
||||
}
|
||||
wrong_answers = {'1_2_1': '2',
|
||||
'1_2_1_dynamath': '''
|
||||
<math xmlns="http://www.w3.org/1998/Math/MathML">
|
||||
<mstyle displaystyle="true">
|
||||
<mn>2</mn>
|
||||
</mstyle>
|
||||
</math>''',
|
||||
}
|
||||
self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct')
|
||||
self.assertEquals(test_lcp.grade_answers(wrong_answers).get_correctness('1_2_1'), 'incorrect')
|
||||
|
||||
|
||||
class OptionResponseTest(unittest.TestCase):
|
||||
'''
|
||||
Run this with
|
||||
|
||||
python manage.py test courseware.OptionResponseTest
|
||||
'''
|
||||
def test_or_grade(self):
|
||||
optionresponse_file = os.path.dirname(__file__) + "/test_files/optionresponse.xml"
|
||||
test_lcp = lcp.LoncapaProblem(open(optionresponse_file).read(), '1', system=test_system)
|
||||
correct_answers = {'1_2_1': 'True',
|
||||
'1_2_2': 'False'}
|
||||
test_answers = {'1_2_1': 'True',
|
||||
'1_2_2': 'True',
|
||||
}
|
||||
self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_1'), 'correct')
|
||||
self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_2'), 'incorrect')
|
||||
|
||||
|
||||
class FormulaResponseWithHintTest(unittest.TestCase):
|
||||
'''
|
||||
Test Formula response problem with a hint
|
||||
This problem also uses calc.
|
||||
'''
|
||||
def test_or_grade(self):
|
||||
problem_file = os.path.dirname(__file__) + "/test_files/formularesponse_with_hint.xml"
|
||||
test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=test_system)
|
||||
correct_answers = {'1_2_1': '2.5*x-5.0'}
|
||||
test_answers = {'1_2_1': '0.4*x-5.0'}
|
||||
self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct')
|
||||
cmap = test_lcp.grade_answers(test_answers)
|
||||
self.assertEquals(cmap.get_correctness('1_2_1'), 'incorrect')
|
||||
self.assertTrue('You have inverted' in cmap.get_hint('1_2_1'))
|
||||
|
||||
|
||||
class StringResponseWithHintTest(unittest.TestCase):
|
||||
'''
|
||||
Test String response problem with a hint
|
||||
'''
|
||||
def test_or_grade(self):
|
||||
problem_file = os.path.dirname(__file__) + "/test_files/stringresponse_with_hint.xml"
|
||||
test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=test_system)
|
||||
correct_answers = {'1_2_1': 'Michigan'}
|
||||
test_answers = {'1_2_1': 'Minnesota'}
|
||||
self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct')
|
||||
cmap = test_lcp.grade_answers(test_answers)
|
||||
self.assertEquals(cmap.get_correctness('1_2_1'), 'incorrect')
|
||||
self.assertTrue('St. Paul' in cmap.get_hint('1_2_1'))
|
||||
|
||||
|
||||
class CodeResponseTest(unittest.TestCase):
|
||||
'''
|
||||
Test CodeResponse
|
||||
TODO: Add tests for external grader messages
|
||||
'''
|
||||
@staticmethod
|
||||
def make_queuestate(key, time):
|
||||
timestr = datetime.strftime(time, dateformat)
|
||||
return {'key': key, 'time': timestr}
|
||||
|
||||
def test_is_queued(self):
|
||||
"""
|
||||
Simple test of whether LoncapaProblem knows when it's been queued
|
||||
"""
|
||||
problem_file = os.path.join(os.path.dirname(__file__), "test_files/coderesponse.xml")
|
||||
with open(problem_file) as input_file:
|
||||
test_lcp = lcp.LoncapaProblem(input_file.read(), '1', system=test_system)
|
||||
|
||||
answer_ids = sorted(test_lcp.get_question_answers())
|
||||
|
||||
# CodeResponse requires internal CorrectMap state. Build it now in the unqueued state
|
||||
cmap = CorrectMap()
|
||||
for answer_id in answer_ids:
|
||||
cmap.update(CorrectMap(answer_id=answer_id, queuestate=None))
|
||||
test_lcp.correct_map.update(cmap)
|
||||
|
||||
self.assertEquals(test_lcp.is_queued(), False)
|
||||
|
||||
# Now we queue the LCP
|
||||
cmap = CorrectMap()
|
||||
for i, answer_id in enumerate(answer_ids):
|
||||
queuestate = CodeResponseTest.make_queuestate(i, datetime.now())
|
||||
cmap.update(CorrectMap(answer_id=answer_ids[i], queuestate=queuestate))
|
||||
test_lcp.correct_map.update(cmap)
|
||||
|
||||
self.assertEquals(test_lcp.is_queued(), True)
|
||||
|
||||
|
||||
def test_update_score(self):
|
||||
'''
|
||||
Test whether LoncapaProblem.update_score can deliver queued result to the right subproblem
|
||||
'''
|
||||
problem_file = os.path.join(os.path.dirname(__file__), "test_files/coderesponse.xml")
|
||||
with open(problem_file) as input_file:
|
||||
test_lcp = lcp.LoncapaProblem(input_file.read(), '1', system=test_system)
|
||||
|
||||
answer_ids = sorted(test_lcp.get_question_answers())
|
||||
|
||||
# CodeResponse requires internal CorrectMap state. Build it now in the queued state
|
||||
old_cmap = CorrectMap()
|
||||
for i, answer_id in enumerate(answer_ids):
|
||||
queuekey = 1000 + i
|
||||
queuestate = CodeResponseTest.make_queuestate(1000+i, datetime.now())
|
||||
old_cmap.update(CorrectMap(answer_id=answer_ids[i], queuestate=queuestate))
|
||||
|
||||
# Message format common to external graders
|
||||
grader_msg = '<span>MESSAGE</span>' # Must be valid XML
|
||||
correct_score_msg = json.dumps({'correct':True, 'score':1, 'msg': grader_msg})
|
||||
incorrect_score_msg = json.dumps({'correct':False, 'score':0, 'msg': grader_msg})
|
||||
|
||||
xserver_msgs = {'correct': correct_score_msg,
|
||||
'incorrect': incorrect_score_msg,}
|
||||
|
||||
# Incorrect queuekey, state should not be updated
|
||||
for correctness in ['correct', 'incorrect']:
|
||||
test_lcp.correct_map = CorrectMap()
|
||||
test_lcp.correct_map.update(old_cmap) # Deep copy
|
||||
|
||||
test_lcp.update_score(xserver_msgs[correctness], queuekey=0)
|
||||
self.assertEquals(test_lcp.correct_map.get_dict(), old_cmap.get_dict()) # Deep comparison
|
||||
|
||||
for answer_id in answer_ids:
|
||||
self.assertTrue(test_lcp.correct_map.is_queued(answer_id)) # Should be still queued, since message undelivered
|
||||
|
||||
# Correct queuekey, state should be updated
|
||||
for correctness in ['correct', 'incorrect']:
|
||||
for i, answer_id in enumerate(answer_ids):
|
||||
test_lcp.correct_map = CorrectMap()
|
||||
test_lcp.correct_map.update(old_cmap)
|
||||
|
||||
new_cmap = CorrectMap()
|
||||
new_cmap.update(old_cmap)
|
||||
npoints = 1 if correctness=='correct' else 0
|
||||
new_cmap.set(answer_id=answer_id, npoints=npoints, correctness=correctness, msg=grader_msg, queuestate=None)
|
||||
|
||||
test_lcp.update_score(xserver_msgs[correctness], queuekey=1000 + i)
|
||||
self.assertEquals(test_lcp.correct_map.get_dict(), new_cmap.get_dict())
|
||||
|
||||
for j, test_id in enumerate(answer_ids):
|
||||
if j == i:
|
||||
self.assertFalse(test_lcp.correct_map.is_queued(test_id)) # Should be dequeued, message delivered
|
||||
else:
|
||||
self.assertTrue(test_lcp.correct_map.is_queued(test_id)) # Should be queued, message undelivered
|
||||
|
||||
|
||||
def test_recentmost_queuetime(self):
|
||||
'''
|
||||
Test whether the LoncapaProblem knows about the time of queue requests
|
||||
'''
|
||||
problem_file = os.path.join(os.path.dirname(__file__), "test_files/coderesponse.xml")
|
||||
with open(problem_file) as input_file:
|
||||
test_lcp = lcp.LoncapaProblem(input_file.read(), '1', system=test_system)
|
||||
|
||||
answer_ids = sorted(test_lcp.get_question_answers())
|
||||
|
||||
# CodeResponse requires internal CorrectMap state. Build it now in the unqueued state
|
||||
cmap = CorrectMap()
|
||||
for answer_id in answer_ids:
|
||||
cmap.update(CorrectMap(answer_id=answer_id, queuestate=None))
|
||||
test_lcp.correct_map.update(cmap)
|
||||
|
||||
self.assertEquals(test_lcp.get_recentmost_queuetime(), None)
|
||||
|
||||
# CodeResponse requires internal CorrectMap state. Build it now in the queued state
|
||||
cmap = CorrectMap()
|
||||
for i, answer_id in enumerate(answer_ids):
|
||||
queuekey = 1000 + i
|
||||
latest_timestamp = datetime.now()
|
||||
queuestate = CodeResponseTest.make_queuestate(1000+i, latest_timestamp)
|
||||
cmap.update(CorrectMap(answer_id=answer_id, queuestate=queuestate))
|
||||
test_lcp.correct_map.update(cmap)
|
||||
|
||||
# Queue state only tracks up to second
|
||||
latest_timestamp = datetime.strptime(datetime.strftime(latest_timestamp, dateformat), dateformat)
|
||||
|
||||
self.assertEquals(test_lcp.get_recentmost_queuetime(), latest_timestamp)
|
||||
|
||||
def test_convert_files_to_filenames(self):
|
||||
'''
|
||||
Test whether file objects are converted to filenames without altering other structures
|
||||
'''
|
||||
problem_file = os.path.join(os.path.dirname(__file__), "test_files/coderesponse.xml")
|
||||
with open(problem_file) as fp:
|
||||
answers_with_file = {'1_2_1': 'String-based answer',
|
||||
'1_3_1': ['answer1', 'answer2', 'answer3'],
|
||||
'1_4_1': [fp, fp]}
|
||||
answers_converted = convert_files_to_filenames(answers_with_file)
|
||||
self.assertEquals(answers_converted['1_2_1'], 'String-based answer')
|
||||
self.assertEquals(answers_converted['1_3_1'], ['answer1', 'answer2', 'answer3'])
|
||||
self.assertEquals(answers_converted['1_4_1'], [fp.name, fp.name])
|
||||
|
||||
|
||||
class ChoiceResponseTest(unittest.TestCase):
|
||||
|
||||
def test_cr_rb_grade(self):
|
||||
problem_file = os.path.dirname(__file__) + "/test_files/choiceresponse_radio.xml"
|
||||
test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=test_system)
|
||||
correct_answers = {'1_2_1': 'choice_2',
|
||||
'1_3_1': ['choice_2', 'choice_3']}
|
||||
test_answers = {'1_2_1': 'choice_2',
|
||||
'1_3_1': 'choice_2',
|
||||
}
|
||||
self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_1'), 'correct')
|
||||
self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_3_1'), 'incorrect')
|
||||
|
||||
def test_cr_cb_grade(self):
|
||||
problem_file = os.path.dirname(__file__) + "/test_files/choiceresponse_checkbox.xml"
|
||||
test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=test_system)
|
||||
correct_answers = {'1_2_1': 'choice_2',
|
||||
'1_3_1': ['choice_2', 'choice_3'],
|
||||
'1_4_1': ['choice_2', 'choice_3']}
|
||||
test_answers = {'1_2_1': 'choice_2',
|
||||
'1_3_1': 'choice_2',
|
||||
'1_4_1': ['choice_2', 'choice_3'],
|
||||
}
|
||||
self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_1'), 'correct')
|
||||
self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_3_1'), 'incorrect')
|
||||
self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_4_1'), 'correct')
|
||||
|
||||
class JavascriptResponseTest(unittest.TestCase):
|
||||
|
||||
def test_jr_grade(self):
|
||||
problem_file = os.path.dirname(__file__) + "/test_files/javascriptresponse.xml"
|
||||
coffee_file_path = os.path.dirname(__file__) + "/test_files/js/*.coffee"
|
||||
os.system("coffee -c %s" % (coffee_file_path))
|
||||
test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=test_system)
|
||||
correct_answers = {'1_2_1': json.dumps({0: 4})}
|
||||
incorrect_answers = {'1_2_1': json.dumps({0: 5})}
|
||||
|
||||
self.assertEquals(test_lcp.grade_answers(incorrect_answers).get_correctness('1_2_1'), 'incorrect')
|
||||
self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct')
|
||||
|
||||
@@ -11,7 +11,7 @@ def compare_with_tolerance(v1, v2, tol):
|
||||
|
||||
- v1 : student result (number)
|
||||
- v2 : instructor result (number)
|
||||
- tol : tolerance (string or number)
|
||||
- tol : tolerance (string representing a number)
|
||||
|
||||
'''
|
||||
relative = tol.endswith('%')
|
||||
@@ -26,9 +26,20 @@ def compare_with_tolerance(v1, v2, tol):
|
||||
def contextualize_text(text, context): # private
|
||||
''' Takes a string with variables. E.g. $a+$b.
|
||||
Does a substitution of those variables from the context '''
|
||||
if not text: return text
|
||||
if not text:
|
||||
return text
|
||||
for key in sorted(context, lambda x, y: cmp(len(y), len(x))):
|
||||
text = text.replace('$' + key, str(context[key]))
|
||||
# TODO (vshnayder): This whole replacement thing is a big hack
|
||||
# right now--context contains not just the vars defined in the
|
||||
# program, but also e.g. a reference to the numpy module.
|
||||
# Should be a separate dict of variables that should be
|
||||
# replaced.
|
||||
if '$' + key in text:
|
||||
try:
|
||||
s = str(context[key])
|
||||
except UnicodeEncodeError:
|
||||
s = context[key].encode('utf8', errors='ignore')
|
||||
text = text.replace('$' + key, s)
|
||||
return text
|
||||
|
||||
|
||||
@@ -53,8 +64,4 @@ def is_file(file_to_test):
|
||||
'''
|
||||
Duck typing to check if 'file_to_test' is a File object
|
||||
'''
|
||||
is_file = True
|
||||
for method in ['read', 'name']:
|
||||
if not hasattr(file_to_test, method):
|
||||
is_file = False
|
||||
return is_file
|
||||
return all(hasattr(file_to_test, method) for method in ['read', 'name'])
|
||||
|
||||
@@ -12,7 +12,7 @@ dateformat = '%Y%m%d%H%M%S'
|
||||
|
||||
def make_hashkey(seed):
|
||||
'''
|
||||
Generate a string key by hashing
|
||||
Generate a string key by hashing
|
||||
'''
|
||||
h = hashlib.md5()
|
||||
h.update(str(seed))
|
||||
@@ -20,27 +20,27 @@ def make_hashkey(seed):
|
||||
|
||||
|
||||
def make_xheader(lms_callback_url, lms_key, queue_name):
|
||||
'''
|
||||
"""
|
||||
Generate header for delivery and reply of queue request.
|
||||
|
||||
Xqueue header is a JSON-serialized dict:
|
||||
{ 'lms_callback_url': url to which xqueue will return the request (string),
|
||||
'lms_key': secret key used by LMS to protect its state (string),
|
||||
'lms_key': secret key used by LMS to protect its state (string),
|
||||
'queue_name': designate a specific queue within xqueue server, e.g. 'MITx-6.00x' (string)
|
||||
}
|
||||
'''
|
||||
"""
|
||||
return json.dumps({ 'lms_callback_url': lms_callback_url,
|
||||
'lms_key': lms_key,
|
||||
'queue_name': queue_name })
|
||||
|
||||
|
||||
def parse_xreply(xreply):
|
||||
'''
|
||||
"""
|
||||
Parse the reply from xqueue. Messages are JSON-serialized dict:
|
||||
{ 'return_code': 0 (success), 1 (fail)
|
||||
'content': Message from xqueue (string)
|
||||
}
|
||||
'''
|
||||
"""
|
||||
try:
|
||||
xreply = json.loads(xreply)
|
||||
except ValueError, err:
|
||||
@@ -61,11 +61,11 @@ class XQueueInterface(object):
|
||||
self.url = url
|
||||
self.auth = django_auth
|
||||
self.session = requests.session(auth=requests_auth)
|
||||
|
||||
|
||||
def send_to_queue(self, header, body, files_to_upload=None):
|
||||
'''
|
||||
"""
|
||||
Submit a request to xqueue.
|
||||
|
||||
|
||||
header: JSON-serialized dict in the format described in 'xqueue_interface.make_xheader'
|
||||
|
||||
body: Serialized data for the receipient behind the queueing service. The operation of
|
||||
@@ -74,14 +74,16 @@ class XQueueInterface(object):
|
||||
files_to_upload: List of file objects to be uploaded to xqueue along with queue request
|
||||
|
||||
Returns (error_code, msg) where error_code != 0 indicates an error
|
||||
'''
|
||||
"""
|
||||
# Attempt to send to queue
|
||||
(error, msg) = self._send_to_queue(header, body, files_to_upload)
|
||||
|
||||
if error and (msg == 'login_required'): # Log in, then try again
|
||||
# Log in, then try again
|
||||
if error and (msg == 'login_required'):
|
||||
self._login()
|
||||
if files_to_upload is not None:
|
||||
for f in files_to_upload: # Need to rewind file pointers
|
||||
# Need to rewind file pointers
|
||||
for f in files_to_upload:
|
||||
f.seek(0)
|
||||
(error, msg) = self._send_to_queue(header, body, files_to_upload)
|
||||
|
||||
@@ -91,18 +93,18 @@ class XQueueInterface(object):
|
||||
def _login(self):
|
||||
payload = { 'username': self.auth['username'],
|
||||
'password': self.auth['password'] }
|
||||
return self._http_post(self.url+'/xqueue/login/', payload)
|
||||
return self._http_post(self.url + '/xqueue/login/', payload)
|
||||
|
||||
|
||||
def _send_to_queue(self, header, body, files_to_upload):
|
||||
payload = {'xqueue_header': header,
|
||||
'xqueue_body' : body}
|
||||
files = {}
|
||||
files = {}
|
||||
if files_to_upload is not None:
|
||||
for f in files_to_upload:
|
||||
files.update({ f.name: f })
|
||||
|
||||
return self._http_post(self.url+'/xqueue/submit/', payload, files=files)
|
||||
return self._http_post(self.url + '/xqueue/submit/', payload, files=files)
|
||||
|
||||
|
||||
def _http_post(self, url, data, files=None):
|
||||
@@ -111,7 +113,7 @@ class XQueueInterface(object):
|
||||
except requests.exceptions.ConnectionError, err:
|
||||
log.error(err)
|
||||
return (1, 'cannot connect to server')
|
||||
|
||||
|
||||
if r.status_code not in [200]:
|
||||
return (1, 'unexpected HTTP status code [%d]' % r.status_code)
|
||||
|
||||
|
||||
@@ -76,9 +76,13 @@ class CapaModule(XModule):
|
||||
'''
|
||||
icon_class = 'problem'
|
||||
|
||||
js = {'coffee': [resource_string(__name__, 'js/src/capa/display.coffee')],
|
||||
js = {'coffee': [resource_string(__name__, 'js/src/capa/display.coffee'),
|
||||
resource_string(__name__, 'js/src/collapsible.coffee'),
|
||||
resource_string(__name__, 'js/src/javascript_loader.coffee'),
|
||||
],
|
||||
'js': [resource_string(__name__, 'js/src/capa/imageinput.js'),
|
||||
resource_string(__name__, 'js/src/capa/schematic.js')]}
|
||||
|
||||
js_module_name = "Problem"
|
||||
css = {'scss': [resource_string(__name__, 'css/capa/display.scss')]}
|
||||
|
||||
@@ -129,6 +133,11 @@ class CapaModule(XModule):
|
||||
if self.rerandomize == 'never':
|
||||
self.seed = 1
|
||||
elif self.rerandomize == "per_student" and hasattr(self.system, 'id'):
|
||||
# TODO: This line is badly broken:
|
||||
# (1) We're passing student ID to xmodule.
|
||||
# (2) There aren't bins of students. -- we only want 10 or 20 randomizations, and want to assign students
|
||||
# to these bins, and may not want cohorts. So e.g. hash(your-id, problem_id) % num_bins.
|
||||
# - analytics really needs small number of bins.
|
||||
self.seed = system.id
|
||||
else:
|
||||
self.seed = None
|
||||
|
||||
@@ -44,8 +44,8 @@ section.problem {
|
||||
}
|
||||
}
|
||||
|
||||
min-width:100px;
|
||||
width: auto !important;
|
||||
min-width:100px;
|
||||
width: auto !important;
|
||||
width: 100px;
|
||||
|
||||
.indicator_container {
|
||||
@@ -299,7 +299,7 @@ section.problem {
|
||||
form.option-input {
|
||||
margin: -10px 0 20px;
|
||||
padding-bottom: 20px;
|
||||
|
||||
|
||||
select {
|
||||
margin-right: flex-gutter();
|
||||
}
|
||||
@@ -421,7 +421,7 @@ section.problem {
|
||||
background-position: right;
|
||||
background-repeat: no-repeat;
|
||||
}
|
||||
|
||||
|
||||
pre {
|
||||
@include border-radius(0);
|
||||
border-radius: 0;
|
||||
@@ -572,7 +572,7 @@ section.problem {
|
||||
}
|
||||
}
|
||||
|
||||
section {
|
||||
> section {
|
||||
padding: 9px;
|
||||
}
|
||||
}
|
||||
@@ -622,4 +622,70 @@ section.problem {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
.external-grader-message {
|
||||
section {
|
||||
padding-left: 20px;
|
||||
background-color: #FAFAFA;
|
||||
color: #2C2C2C;
|
||||
font-family: monospace;
|
||||
font-size: 1em;
|
||||
|
||||
.shortform {
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.longform {
|
||||
padding: 0px;
|
||||
margin: 0px;
|
||||
|
||||
.result-errors {
|
||||
margin: 5px;
|
||||
padding: 10px 10px 10px 40px;
|
||||
background: url('../images/incorrect-icon.png') center left no-repeat;
|
||||
li {
|
||||
color: #B00;
|
||||
}
|
||||
}
|
||||
|
||||
.result-output {
|
||||
margin: 5px;
|
||||
padding: 20px 0px 15px 50px;
|
||||
border-top: 1px solid #DDD;
|
||||
border-left: 20px solid #FAFAFA;
|
||||
|
||||
h4 {
|
||||
font-family: monospace;
|
||||
font-size: 1em;
|
||||
}
|
||||
|
||||
dl {
|
||||
margin: 0px;
|
||||
}
|
||||
|
||||
dt {
|
||||
margin-top: 20px;
|
||||
}
|
||||
|
||||
dd {
|
||||
margin-left: 24pt;
|
||||
}
|
||||
}
|
||||
|
||||
.result-correct {
|
||||
background: url('../images/correct-icon.png') left 20px no-repeat;
|
||||
.result-actual-output {
|
||||
color: #090;
|
||||
}
|
||||
}
|
||||
|
||||
.result-incorrect {
|
||||
background: url('../images/incorrect-icon.png') left 20px no-repeat;
|
||||
.result-actual-output {
|
||||
color: #B00;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import abc
|
||||
import inspect
|
||||
import json
|
||||
import logging
|
||||
import random
|
||||
@@ -109,6 +110,15 @@ def aggregate_scores(scores, section_name="summary"):
|
||||
return all_total, graded_total
|
||||
|
||||
|
||||
def invalid_args(func, argdict):
|
||||
"""
|
||||
Given a function and a dictionary of arguments, returns a set of arguments
|
||||
from argdict that aren't accepted by func
|
||||
"""
|
||||
args, varargs, keywords, defaults = inspect.getargspec(func)
|
||||
if keywords: return set() # All accepted
|
||||
return set(argdict) - set(args)
|
||||
|
||||
def grader_from_conf(conf):
|
||||
"""
|
||||
This creates a CourseGrader from a configuration (such as in course_settings.py).
|
||||
@@ -128,14 +138,21 @@ def grader_from_conf(conf):
|
||||
try:
|
||||
if 'min_count' in subgraderconf:
|
||||
#This is an AssignmentFormatGrader
|
||||
subgrader = AssignmentFormatGrader(**subgraderconf)
|
||||
subgraders.append((subgrader, subgrader.category, weight))
|
||||
subgrader_class = AssignmentFormatGrader
|
||||
elif 'name' in subgraderconf:
|
||||
#This is an SingleSectionGrader
|
||||
subgrader = SingleSectionGrader(**subgraderconf)
|
||||
subgraders.append((subgrader, subgrader.category, weight))
|
||||
subgrader_class = SingleSectionGrader
|
||||
else:
|
||||
raise ValueError("Configuration has no appropriate grader class.")
|
||||
|
||||
bad_args = invalid_args(subgrader_class.__init__, subgraderconf)
|
||||
if len(bad_args) > 0:
|
||||
log.warning("Invalid arguments for a subgrader: %s", bad_args)
|
||||
for key in bad_args:
|
||||
del subgraderconf[key]
|
||||
|
||||
subgrader = subgrader_class(**subgraderconf)
|
||||
subgraders.append((subgrader, subgrader.category, weight))
|
||||
|
||||
except (TypeError, ValueError) as error:
|
||||
# Add info and re-raise
|
||||
@@ -300,9 +317,12 @@ class AssignmentFormatGrader(CourseGrader):
|
||||
|
||||
short_label is similar to section_type, but shorter. For example, for Homework it would be
|
||||
"HW".
|
||||
|
||||
starting_index is the first number that will appear. For example, starting_index=3 and
|
||||
min_count = 2 would produce the labels "Assignment 3", "Assignment 4"
|
||||
|
||||
"""
|
||||
def __init__(self, type, min_count, drop_count, category=None, section_type=None, short_label=None, show_only_average=False):
|
||||
def __init__(self, type, min_count, drop_count, category=None, section_type=None, short_label=None, show_only_average=False, starting_index=1):
|
||||
self.type = type
|
||||
self.min_count = min_count
|
||||
self.drop_count = drop_count
|
||||
@@ -310,6 +330,7 @@ class AssignmentFormatGrader(CourseGrader):
|
||||
self.section_type = section_type or self.type
|
||||
self.short_label = short_label or self.type
|
||||
self.show_only_average = show_only_average
|
||||
self.starting_index = starting_index
|
||||
|
||||
def grade(self, grade_sheet, generate_random_scores=False):
|
||||
def totalWithDrops(breakdown, drop_count):
|
||||
@@ -345,7 +366,7 @@ class AssignmentFormatGrader(CourseGrader):
|
||||
section_name = scores[i].section
|
||||
|
||||
percentage = earned / float(possible)
|
||||
summary = "{section_type} {index} - {name} - {percent:.0%} ({earned:.3n}/{possible:.3n})".format(index=i + 1,
|
||||
summary = "{section_type} {index} - {name} - {percent:.0%} ({earned:.3n}/{possible:.3n})".format(index=i + self.starting_index,
|
||||
section_type=self.section_type,
|
||||
name=section_name,
|
||||
percent=percentage,
|
||||
@@ -353,9 +374,9 @@ class AssignmentFormatGrader(CourseGrader):
|
||||
possible=float(possible))
|
||||
else:
|
||||
percentage = 0
|
||||
summary = "{section_type} {index} Unreleased - 0% (?/?)".format(index=i + 1, section_type=self.section_type)
|
||||
summary = "{section_type} {index} Unreleased - 0% (?/?)".format(index=i + self.starting_index, section_type=self.section_type)
|
||||
|
||||
short_label = "{short_label} {index:02d}".format(index=i + 1, short_label=self.short_label)
|
||||
short_label = "{short_label} {index:02d}".format(index=i + self.starting_index, short_label=self.short_label)
|
||||
|
||||
breakdown.append({'percent': percentage, 'label': short_label, 'detail': summary, 'category': self.category})
|
||||
|
||||
|
||||
@@ -21,7 +21,11 @@ log = logging.getLogger("mitx.courseware")
|
||||
|
||||
|
||||
class HtmlModule(XModule):
|
||||
js = {'coffee': [resource_string(__name__, 'js/src/html/display.coffee')]}
|
||||
js = {'coffee': [resource_string(__name__, 'js/src/javascript_loader.coffee'),
|
||||
resource_string(__name__, 'js/src/collapsible.coffee'),
|
||||
resource_string(__name__, 'js/src/html/display.coffee')
|
||||
]
|
||||
}
|
||||
js_module_name = "HTMLModule"
|
||||
|
||||
def get_html(self):
|
||||
|
||||
@@ -27,11 +27,7 @@ class @Problem
|
||||
@$('section.action input.save').click @save
|
||||
|
||||
# Collapsibles
|
||||
@$('.longform').hide();
|
||||
@$('.shortform').append('<a href="#" class="full">See full output</a>');
|
||||
@$('.collapsible section').hide();
|
||||
@$('.full').click @toggleFull
|
||||
@$('.collapsible header a').click @toggleHint
|
||||
Collapsible.setCollapsibles(@el)
|
||||
|
||||
# Dynamath
|
||||
@$('input.math').keyup(@refreshMath)
|
||||
@@ -67,7 +63,7 @@ class @Problem
|
||||
@new_queued_items = $(response.html).find(".xqueue")
|
||||
if @new_queued_items.length isnt @num_queued_items
|
||||
@el.html(response.html)
|
||||
@executeProblemScripts () =>
|
||||
JavascriptLoader.executeModuleScripts @el, () =>
|
||||
@setupInputTypes()
|
||||
@bind()
|
||||
|
||||
@@ -81,18 +77,19 @@ class @Problem
|
||||
render: (content) ->
|
||||
if content
|
||||
@el.html(content)
|
||||
@executeProblemScripts () =>
|
||||
JavascriptLoader.executeModuleScripts @el, () =>
|
||||
@setupInputTypes()
|
||||
@bind()
|
||||
@queueing()
|
||||
else
|
||||
$.postWithPrefix "#{@url}/problem_get", (response) =>
|
||||
@el.html(response.html)
|
||||
@executeProblemScripts () =>
|
||||
JavascriptLoader.executeModuleScripts @el, () =>
|
||||
@setupInputTypes()
|
||||
@bind()
|
||||
@queueing()
|
||||
|
||||
|
||||
# TODO add hooks for problem types here by inspecting response.html and doing
|
||||
# stuff if a div w a class is found
|
||||
|
||||
@@ -106,50 +103,6 @@ class @Problem
|
||||
if setupMethod?
|
||||
@inputtypeDisplays[id] = setupMethod(inputtype)
|
||||
|
||||
executeProblemScripts: (callback=null) ->
|
||||
|
||||
placeholders = @el.find(".script_placeholder")
|
||||
|
||||
if placeholders.length == 0
|
||||
callback()
|
||||
return
|
||||
|
||||
completed = (false for i in [1..placeholders.length])
|
||||
callbackCalled = false
|
||||
|
||||
# This is required for IE8 support.
|
||||
completionHandlerGeneratorIE = (index) =>
|
||||
return () ->
|
||||
if (this.readyState == 'complete' || this.readyState == 'loaded')
|
||||
#completionHandlerGenerator.call(self, index)()
|
||||
completionHandlerGenerator(index)()
|
||||
|
||||
completionHandlerGenerator = (index) =>
|
||||
return () =>
|
||||
allComplete = true
|
||||
completed[index] = true
|
||||
for flag in completed
|
||||
if not flag
|
||||
allComplete = false
|
||||
break
|
||||
if allComplete and not callbackCalled
|
||||
callbackCalled = true
|
||||
callback() if callback?
|
||||
|
||||
placeholders.each (index, placeholder) ->
|
||||
s = document.createElement('script')
|
||||
s.setAttribute('src', $(placeholder).attr("data-src"))
|
||||
s.setAttribute('type', "text/javascript")
|
||||
|
||||
s.onload = completionHandlerGenerator(index)
|
||||
|
||||
# s.onload does not fire in IE8; this does.
|
||||
s.onreadystatechange = completionHandlerGeneratorIE(index)
|
||||
|
||||
# Need to use the DOM elements directly or the scripts won't execute
|
||||
# properly.
|
||||
$('head')[0].appendChild(s)
|
||||
$(placeholder).remove()
|
||||
|
||||
###
|
||||
# 'check_fd' uses FormData to allow file submissions in the 'problem_check' dispatch,
|
||||
@@ -340,17 +293,6 @@ class @Problem
|
||||
element.CodeMirror.save() if element.CodeMirror.save
|
||||
@answers = @inputs.serialize()
|
||||
|
||||
toggleFull: (event) =>
|
||||
$(event.target).parent().siblings().slideToggle()
|
||||
$(event.target).parent().parent().toggleClass('open')
|
||||
text = $(event.target).text() == 'See full output' ? 'Hide output' : 'See full output'
|
||||
$(this).text(text)
|
||||
|
||||
toggleHint: (event) =>
|
||||
event.preventDefault()
|
||||
$(event.target).parent().siblings().slideToggle()
|
||||
$(event.target).parent().parent().toggleClass('open')
|
||||
|
||||
inputtypeSetupMethods:
|
||||
|
||||
'text-input-dynamath': (element) =>
|
||||
@@ -392,10 +334,13 @@ class @Problem
|
||||
inputtypeShowAnswerMethods:
|
||||
choicegroup: (element, display, answers) =>
|
||||
element = $(element)
|
||||
for key, value of answers
|
||||
element.find('input').attr('disabled', 'disabled')
|
||||
for choice in value
|
||||
element.find("label[for='input_#{key}_#{choice}']").addClass 'choicegroup_correct'
|
||||
|
||||
element.find('input').attr('disabled', 'disabled')
|
||||
|
||||
input_id = element.attr('id').replace(/inputtype_/,'')
|
||||
answer = answers[input_id]
|
||||
for choice in answer
|
||||
element.find("label[for='input_#{input_id}_#{choice}']").addClass 'choicegroup_correct'
|
||||
|
||||
javascriptinput: (element, display, answers) =>
|
||||
answer_id = $(element).attr('id').split("_")[1...].join("_")
|
||||
|
||||
31
common/lib/xmodule/xmodule/js/src/collapsible.coffee
Normal file
31
common/lib/xmodule/xmodule/js/src/collapsible.coffee
Normal file
@@ -0,0 +1,31 @@
|
||||
class @Collapsible
|
||||
|
||||
# Set of library functions that provide a simple way to add collapsible
|
||||
# functionality to elements.
|
||||
|
||||
# setCollapsibles:
|
||||
# Scan element's content for generic collapsible containers
|
||||
@setCollapsibles: (el) =>
|
||||
###
|
||||
el: container
|
||||
###
|
||||
el.find('.longform').hide()
|
||||
el.find('.shortform').append('<a href="#" class="full">See full output</a>')
|
||||
el.find('.collapsible header + section').hide()
|
||||
el.find('.full').click @toggleFull
|
||||
el.find('.collapsible header a').click @toggleHint
|
||||
|
||||
@toggleFull: (event) =>
|
||||
event.preventDefault()
|
||||
$(event.target).parent().siblings().slideToggle()
|
||||
$(event.target).parent().parent().toggleClass('open')
|
||||
if $(event.target).text() == 'See full output'
|
||||
new_text = 'Hide output'
|
||||
else
|
||||
new_text = 'See full ouput'
|
||||
$(event.target).text(new_text)
|
||||
|
||||
@toggleHint: (event) =>
|
||||
event.preventDefault()
|
||||
$(event.target).parent().siblings().slideToggle()
|
||||
$(event.target).parent().parent().toggleClass('open')
|
||||
@@ -2,26 +2,9 @@ class @HTMLModule
|
||||
|
||||
constructor: (@element) ->
|
||||
@el = $(@element)
|
||||
@setCollapsibles()
|
||||
JavascriptLoader.executeModuleScripts(@el)
|
||||
Collapsible.setCollapsibles(@el)
|
||||
MathJax.Hub.Queue ["Typeset", MathJax.Hub, @el[0]]
|
||||
|
||||
$: (selector) ->
|
||||
$(selector, @el)
|
||||
|
||||
setCollapsibles: =>
|
||||
$('.longform').hide();
|
||||
$('.shortform').append('<a href="#" class="full">See full output</a>');
|
||||
$('.collapsible section').hide();
|
||||
$('.full').click @toggleFull
|
||||
$('.collapsible header a').click @toggleHint
|
||||
|
||||
toggleFull: (event) =>
|
||||
$(event.target).parent().siblings().slideToggle()
|
||||
$(event.target).parent().parent().toggleClass('open')
|
||||
text = $(event.target).text() == 'See full output' ? 'Hide output' : 'See full output'
|
||||
$(this).text(text)
|
||||
|
||||
toggleHint: (event) =>
|
||||
event.preventDefault()
|
||||
$(event.target).parent().siblings().slideToggle()
|
||||
$(event.target).parent().parent().toggleClass('open')
|
||||
70
common/lib/xmodule/xmodule/js/src/javascript_loader.coffee
Normal file
70
common/lib/xmodule/xmodule/js/src/javascript_loader.coffee
Normal file
@@ -0,0 +1,70 @@
|
||||
class @JavascriptLoader
|
||||
|
||||
# Set of library functions that provide common interface for javascript loading
|
||||
# for all module types. All functionality provided by JavascriptLoader should take
|
||||
# place at module scope, i.e. don't run jQuery over entire page
|
||||
|
||||
# executeModuleScripts:
|
||||
# Scan the module ('el') for "script_placeholder"s, then:
|
||||
# 1) Fetch each script from server
|
||||
# 2) Explicitly attach the script to the <head> of document
|
||||
# 3) Explicitly wait for each script to be loaded
|
||||
# 4) Return to callback function when all scripts loaded
|
||||
@executeModuleScripts: (el, callback=null) ->
|
||||
|
||||
placeholders = el.find(".script_placeholder")
|
||||
|
||||
if placeholders.length == 0
|
||||
callback() if callback?
|
||||
return
|
||||
|
||||
# TODO: Verify the execution order of multiple placeholders
|
||||
completed = (false for i in [1..placeholders.length])
|
||||
callbackCalled = false
|
||||
|
||||
# This is required for IE8 support.
|
||||
completionHandlerGeneratorIE = (index) =>
|
||||
return () ->
|
||||
if (this.readyState == 'complete' || this.readyState == 'loaded')
|
||||
#completionHandlerGenerator.call(self, index)()
|
||||
completionHandlerGenerator(index)()
|
||||
|
||||
completionHandlerGenerator = (index) =>
|
||||
return () =>
|
||||
allComplete = true
|
||||
completed[index] = true
|
||||
for flag in completed
|
||||
if not flag
|
||||
allComplete = false
|
||||
break
|
||||
if allComplete and not callbackCalled
|
||||
callbackCalled = true
|
||||
callback() if callback?
|
||||
|
||||
# Keep a map of what sources we're loaded from, and don't do it twice.
|
||||
loaded = {}
|
||||
placeholders.each (index, placeholder) ->
|
||||
# TODO: Check if the script already exists in DOM. If so, (1) copy it
|
||||
# into memory; (2) delete the DOM script element; (3) reappend it.
|
||||
# This would prevent memory bloat and save a network request.
|
||||
src = $(placeholder).attr("data-src")
|
||||
if src not of loaded
|
||||
loaded[src] = true
|
||||
s = document.createElement('script')
|
||||
s.setAttribute('src', src)
|
||||
s.setAttribute('type', "text/javascript")
|
||||
|
||||
s.onload = completionHandlerGenerator(index)
|
||||
|
||||
# s.onload does not fire in IE8; this does.
|
||||
s.onreadystatechange = completionHandlerGeneratorIE(index)
|
||||
|
||||
# Need to use the DOM elements directly or the scripts won't execute
|
||||
# properly.
|
||||
$('head')[0].appendChild(s)
|
||||
else
|
||||
# just call the completion callback directly, without reloading the file
|
||||
completionHandlerGenerator(index)()
|
||||
$(placeholder).remove()
|
||||
|
||||
|
||||
@@ -29,7 +29,9 @@ def import_static_content(modules, data_dir, static_content_store):
|
||||
|
||||
|
||||
# now import all static assets
|
||||
static_dir = '{0}/{1}/static/'.format(data_dir, course_data_dir)
|
||||
static_dir = '{0}/static/'.format(course_data_dir)
|
||||
|
||||
logging.debug("Importing static assets in {0}".format(static_dir))
|
||||
|
||||
for dirname, dirnames, filenames in os.walk(static_dir):
|
||||
for filename in filenames:
|
||||
|
||||
@@ -1,31 +1,22 @@
|
||||
#
|
||||
# unittests for xmodule (and capa)
|
||||
#
|
||||
# Note: run this using a like like this:
|
||||
#
|
||||
# django-admin.py test --settings=lms.envs.test_ike --pythonpath=. common/lib/xmodule
|
||||
"""
|
||||
unittests for xmodule
|
||||
|
||||
Run like this:
|
||||
|
||||
rake test_common/lib/xmodule
|
||||
|
||||
"""
|
||||
|
||||
import unittest
|
||||
import os
|
||||
import fs
|
||||
import fs.osfs
|
||||
import json
|
||||
|
||||
import json
|
||||
import numpy
|
||||
|
||||
import xmodule
|
||||
import capa.calc as calc
|
||||
import capa.capa_problem as lcp
|
||||
from capa.correctmap import CorrectMap
|
||||
from capa.util import convert_files_to_filenames
|
||||
from capa.xqueue_interface import dateformat
|
||||
from datetime import datetime
|
||||
from xmodule import graders, x_module
|
||||
import xmodule
|
||||
from xmodule.x_module import ModuleSystem
|
||||
from xmodule.graders import Score, aggregate_scores
|
||||
from xmodule.progress import Progress
|
||||
from nose.plugins.skip import SkipTest
|
||||
from mock import Mock
|
||||
|
||||
i4xs = ModuleSystem(
|
||||
@@ -35,7 +26,7 @@ i4xs = ModuleSystem(
|
||||
render_template=Mock(),
|
||||
replace_urls=Mock(),
|
||||
user=Mock(),
|
||||
filestore=fs.osfs.OSFS(os.path.dirname(os.path.realpath(__file__))+"/test_files"),
|
||||
filestore=Mock(),
|
||||
debug=True,
|
||||
xqueue={'interface':None, 'callback_url':'/', 'default_queuename': 'testqueue', 'waittime': 10},
|
||||
node_path=os.environ.get("NODE_PATH", "/usr/local/lib/node_modules"),
|
||||
@@ -94,719 +85,3 @@ class ModelsTest(unittest.TestCase):
|
||||
exception_happened = True
|
||||
self.assertTrue(exception_happened)
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# tests of capa_problem inputtypes
|
||||
|
||||
|
||||
class MultiChoiceTest(unittest.TestCase):
|
||||
def test_MC_grade(self):
|
||||
multichoice_file = os.path.dirname(__file__) + "/test_files/multichoice.xml"
|
||||
test_lcp = lcp.LoncapaProblem(open(multichoice_file).read(), '1', system=i4xs)
|
||||
correct_answers = {'1_2_1': 'choice_foil3'}
|
||||
self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct')
|
||||
false_answers = {'1_2_1': 'choice_foil2'}
|
||||
self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect')
|
||||
|
||||
def test_MC_bare_grades(self):
|
||||
multichoice_file = os.path.dirname(__file__) + "/test_files/multi_bare.xml"
|
||||
test_lcp = lcp.LoncapaProblem(open(multichoice_file).read(), '1', system=i4xs)
|
||||
correct_answers = {'1_2_1': 'choice_2'}
|
||||
self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct')
|
||||
false_answers = {'1_2_1': 'choice_1'}
|
||||
self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect')
|
||||
|
||||
def test_TF_grade(self):
|
||||
truefalse_file = os.path.dirname(__file__) + "/test_files/truefalse.xml"
|
||||
test_lcp = lcp.LoncapaProblem(open(truefalse_file).read(), '1', system=i4xs)
|
||||
correct_answers = {'1_2_1': ['choice_foil2', 'choice_foil1']}
|
||||
self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct')
|
||||
false_answers = {'1_2_1': ['choice_foil1']}
|
||||
self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect')
|
||||
false_answers = {'1_2_1': ['choice_foil1', 'choice_foil3']}
|
||||
self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect')
|
||||
false_answers = {'1_2_1': ['choice_foil3']}
|
||||
self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect')
|
||||
false_answers = {'1_2_1': ['choice_foil1', 'choice_foil2', 'choice_foil3']}
|
||||
self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect')
|
||||
|
||||
|
||||
class ImageResponseTest(unittest.TestCase):
|
||||
def test_ir_grade(self):
|
||||
imageresponse_file = os.path.dirname(__file__) + "/test_files/imageresponse.xml"
|
||||
test_lcp = lcp.LoncapaProblem(open(imageresponse_file).read(), '1', system=i4xs)
|
||||
correct_answers = {'1_2_1': '(490,11)-(556,98)',
|
||||
'1_2_2': '(242,202)-(296,276)'}
|
||||
test_answers = {'1_2_1': '[500,20]',
|
||||
'1_2_2': '[250,300]',
|
||||
}
|
||||
self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_1'), 'correct')
|
||||
self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_2'), 'incorrect')
|
||||
|
||||
|
||||
class SymbolicResponseTest(unittest.TestCase):
|
||||
def test_sr_grade(self):
|
||||
raise SkipTest() # This test fails due to dependencies on a local copy of snuggletex-webapp. Until we have figured that out, we'll just skip this test
|
||||
symbolicresponse_file = os.path.dirname(__file__) + "/test_files/symbolicresponse.xml"
|
||||
test_lcp = lcp.LoncapaProblem(open(symbolicresponse_file).read(), '1', system=i4xs)
|
||||
correct_answers = {'1_2_1': 'cos(theta)*[[1,0],[0,1]] + i*sin(theta)*[[0,1],[1,0]]',
|
||||
'1_2_1_dynamath': '''
|
||||
<math xmlns="http://www.w3.org/1998/Math/MathML">
|
||||
<mstyle displaystyle="true">
|
||||
<mrow>
|
||||
<mi>cos</mi>
|
||||
<mrow>
|
||||
<mo>(</mo>
|
||||
<mi>θ</mi>
|
||||
<mo>)</mo>
|
||||
</mrow>
|
||||
</mrow>
|
||||
<mo>⋅</mo>
|
||||
<mrow>
|
||||
<mo>[</mo>
|
||||
<mtable>
|
||||
<mtr>
|
||||
<mtd>
|
||||
<mn>1</mn>
|
||||
</mtd>
|
||||
<mtd>
|
||||
<mn>0</mn>
|
||||
</mtd>
|
||||
</mtr>
|
||||
<mtr>
|
||||
<mtd>
|
||||
<mn>0</mn>
|
||||
</mtd>
|
||||
<mtd>
|
||||
<mn>1</mn>
|
||||
</mtd>
|
||||
</mtr>
|
||||
</mtable>
|
||||
<mo>]</mo>
|
||||
</mrow>
|
||||
<mo>+</mo>
|
||||
<mi>i</mi>
|
||||
<mo>⋅</mo>
|
||||
<mrow>
|
||||
<mi>sin</mi>
|
||||
<mrow>
|
||||
<mo>(</mo>
|
||||
<mi>θ</mi>
|
||||
<mo>)</mo>
|
||||
</mrow>
|
||||
</mrow>
|
||||
<mo>⋅</mo>
|
||||
<mrow>
|
||||
<mo>[</mo>
|
||||
<mtable>
|
||||
<mtr>
|
||||
<mtd>
|
||||
<mn>0</mn>
|
||||
</mtd>
|
||||
<mtd>
|
||||
<mn>1</mn>
|
||||
</mtd>
|
||||
</mtr>
|
||||
<mtr>
|
||||
<mtd>
|
||||
<mn>1</mn>
|
||||
</mtd>
|
||||
<mtd>
|
||||
<mn>0</mn>
|
||||
</mtd>
|
||||
</mtr>
|
||||
</mtable>
|
||||
<mo>]</mo>
|
||||
</mrow>
|
||||
</mstyle>
|
||||
</math>
|
||||
''',
|
||||
}
|
||||
wrong_answers = {'1_2_1': '2',
|
||||
'1_2_1_dynamath': '''
|
||||
<math xmlns="http://www.w3.org/1998/Math/MathML">
|
||||
<mstyle displaystyle="true">
|
||||
<mn>2</mn>
|
||||
</mstyle>
|
||||
</math>''',
|
||||
}
|
||||
self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct')
|
||||
self.assertEquals(test_lcp.grade_answers(wrong_answers).get_correctness('1_2_1'), 'incorrect')
|
||||
|
||||
|
||||
class OptionResponseTest(unittest.TestCase):
|
||||
'''
|
||||
Run this with
|
||||
|
||||
python manage.py test courseware.OptionResponseTest
|
||||
'''
|
||||
def test_or_grade(self):
|
||||
optionresponse_file = os.path.dirname(__file__) + "/test_files/optionresponse.xml"
|
||||
test_lcp = lcp.LoncapaProblem(open(optionresponse_file).read(), '1', system=i4xs)
|
||||
correct_answers = {'1_2_1': 'True',
|
||||
'1_2_2': 'False'}
|
||||
test_answers = {'1_2_1': 'True',
|
||||
'1_2_2': 'True',
|
||||
}
|
||||
self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_1'), 'correct')
|
||||
self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_2'), 'incorrect')
|
||||
|
||||
|
||||
class FormulaResponseWithHintTest(unittest.TestCase):
|
||||
'''
|
||||
Test Formula response problem with a hint
|
||||
This problem also uses calc.
|
||||
'''
|
||||
def test_or_grade(self):
|
||||
problem_file = os.path.dirname(__file__) + "/test_files/formularesponse_with_hint.xml"
|
||||
test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=i4xs)
|
||||
correct_answers = {'1_2_1': '2.5*x-5.0'}
|
||||
test_answers = {'1_2_1': '0.4*x-5.0'}
|
||||
self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct')
|
||||
cmap = test_lcp.grade_answers(test_answers)
|
||||
self.assertEquals(cmap.get_correctness('1_2_1'), 'incorrect')
|
||||
self.assertTrue('You have inverted' in cmap.get_hint('1_2_1'))
|
||||
|
||||
|
||||
class StringResponseWithHintTest(unittest.TestCase):
|
||||
'''
|
||||
Test String response problem with a hint
|
||||
'''
|
||||
def test_or_grade(self):
|
||||
problem_file = os.path.dirname(__file__) + "/test_files/stringresponse_with_hint.xml"
|
||||
test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=i4xs)
|
||||
correct_answers = {'1_2_1': 'Michigan'}
|
||||
test_answers = {'1_2_1': 'Minnesota'}
|
||||
self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct')
|
||||
cmap = test_lcp.grade_answers(test_answers)
|
||||
self.assertEquals(cmap.get_correctness('1_2_1'), 'incorrect')
|
||||
self.assertTrue('St. Paul' in cmap.get_hint('1_2_1'))
|
||||
|
||||
|
||||
class CodeResponseTest(unittest.TestCase):
|
||||
'''
|
||||
Test CodeResponse
|
||||
TODO: Add tests for external grader messages
|
||||
'''
|
||||
@staticmethod
|
||||
def make_queuestate(key, time):
|
||||
timestr = datetime.strftime(time, dateformat)
|
||||
return {'key': key, 'time': timestr}
|
||||
|
||||
def test_is_queued(self):
|
||||
'''
|
||||
Simple test of whether LoncapaProblem knows when it's been queued
|
||||
'''
|
||||
problem_file = os.path.join(os.path.dirname(__file__), "test_files/coderesponse.xml")
|
||||
with open(problem_file) as input_file:
|
||||
test_lcp = lcp.LoncapaProblem(input_file.read(), '1', system=i4xs)
|
||||
|
||||
answer_ids = sorted(test_lcp.get_question_answers())
|
||||
|
||||
# CodeResponse requires internal CorrectMap state. Build it now in the unqueued state
|
||||
cmap = CorrectMap()
|
||||
for answer_id in answer_ids:
|
||||
cmap.update(CorrectMap(answer_id=answer_id, queuestate=None))
|
||||
test_lcp.correct_map.update(cmap)
|
||||
|
||||
self.assertEquals(test_lcp.is_queued(), False)
|
||||
|
||||
# Now we queue the LCP
|
||||
cmap = CorrectMap()
|
||||
for i, answer_id in enumerate(answer_ids):
|
||||
queuestate = CodeResponseTest.make_queuestate(i, datetime.now())
|
||||
cmap.update(CorrectMap(answer_id=answer_ids[i], queuestate=queuestate))
|
||||
test_lcp.correct_map.update(cmap)
|
||||
|
||||
self.assertEquals(test_lcp.is_queued(), True)
|
||||
|
||||
|
||||
def test_update_score(self):
|
||||
'''
|
||||
Test whether LoncapaProblem.update_score can deliver queued result to the right subproblem
|
||||
'''
|
||||
problem_file = os.path.join(os.path.dirname(__file__), "test_files/coderesponse.xml")
|
||||
with open(problem_file) as input_file:
|
||||
test_lcp = lcp.LoncapaProblem(input_file.read(), '1', system=i4xs)
|
||||
|
||||
answer_ids = sorted(test_lcp.get_question_answers())
|
||||
|
||||
# CodeResponse requires internal CorrectMap state. Build it now in the queued state
|
||||
old_cmap = CorrectMap()
|
||||
for i, answer_id in enumerate(answer_ids):
|
||||
queuekey = 1000 + i
|
||||
queuestate = CodeResponseTest.make_queuestate(1000+i, datetime.now())
|
||||
old_cmap.update(CorrectMap(answer_id=answer_ids[i], queuestate=queuestate))
|
||||
|
||||
# Message format common to external graders
|
||||
grader_msg = '<span>MESSAGE</span>' # Must be valid XML
|
||||
correct_score_msg = json.dumps({'correct':True, 'score':1, 'msg': grader_msg})
|
||||
incorrect_score_msg = json.dumps({'correct':False, 'score':0, 'msg': grader_msg})
|
||||
|
||||
xserver_msgs = {'correct': correct_score_msg,
|
||||
'incorrect': incorrect_score_msg,}
|
||||
|
||||
# Incorrect queuekey, state should not be updated
|
||||
for correctness in ['correct', 'incorrect']:
|
||||
test_lcp.correct_map = CorrectMap()
|
||||
test_lcp.correct_map.update(old_cmap) # Deep copy
|
||||
|
||||
test_lcp.update_score(xserver_msgs[correctness], queuekey=0)
|
||||
self.assertEquals(test_lcp.correct_map.get_dict(), old_cmap.get_dict()) # Deep comparison
|
||||
|
||||
for answer_id in answer_ids:
|
||||
self.assertTrue(test_lcp.correct_map.is_queued(answer_id)) # Should be still queued, since message undelivered
|
||||
|
||||
# Correct queuekey, state should be updated
|
||||
for correctness in ['correct', 'incorrect']:
|
||||
for i, answer_id in enumerate(answer_ids):
|
||||
test_lcp.correct_map = CorrectMap()
|
||||
test_lcp.correct_map.update(old_cmap)
|
||||
|
||||
new_cmap = CorrectMap()
|
||||
new_cmap.update(old_cmap)
|
||||
npoints = 1 if correctness=='correct' else 0
|
||||
new_cmap.set(answer_id=answer_id, npoints=npoints, correctness=correctness, msg=grader_msg, queuestate=None)
|
||||
|
||||
test_lcp.update_score(xserver_msgs[correctness], queuekey=1000 + i)
|
||||
self.assertEquals(test_lcp.correct_map.get_dict(), new_cmap.get_dict())
|
||||
|
||||
for j, test_id in enumerate(answer_ids):
|
||||
if j == i:
|
||||
self.assertFalse(test_lcp.correct_map.is_queued(test_id)) # Should be dequeued, message delivered
|
||||
else:
|
||||
self.assertTrue(test_lcp.correct_map.is_queued(test_id)) # Should be queued, message undelivered
|
||||
|
||||
|
||||
def test_recentmost_queuetime(self):
|
||||
'''
|
||||
Test whether the LoncapaProblem knows about the time of queue requests
|
||||
'''
|
||||
problem_file = os.path.join(os.path.dirname(__file__), "test_files/coderesponse.xml")
|
||||
with open(problem_file) as input_file:
|
||||
test_lcp = lcp.LoncapaProblem(input_file.read(), '1', system=i4xs)
|
||||
|
||||
answer_ids = sorted(test_lcp.get_question_answers())
|
||||
|
||||
# CodeResponse requires internal CorrectMap state. Build it now in the unqueued state
|
||||
cmap = CorrectMap()
|
||||
for answer_id in answer_ids:
|
||||
cmap.update(CorrectMap(answer_id=answer_id, queuestate=None))
|
||||
test_lcp.correct_map.update(cmap)
|
||||
|
||||
self.assertEquals(test_lcp.get_recentmost_queuetime(), None)
|
||||
|
||||
# CodeResponse requires internal CorrectMap state. Build it now in the queued state
|
||||
cmap = CorrectMap()
|
||||
for i, answer_id in enumerate(answer_ids):
|
||||
queuekey = 1000 + i
|
||||
latest_timestamp = datetime.now()
|
||||
queuestate = CodeResponseTest.make_queuestate(1000+i, latest_timestamp)
|
||||
cmap.update(CorrectMap(answer_id=answer_id, queuestate=queuestate))
|
||||
test_lcp.correct_map.update(cmap)
|
||||
|
||||
# Queue state only tracks up to second
|
||||
latest_timestamp = datetime.strptime(datetime.strftime(latest_timestamp, dateformat), dateformat)
|
||||
|
||||
self.assertEquals(test_lcp.get_recentmost_queuetime(), latest_timestamp)
|
||||
|
||||
def test_convert_files_to_filenames(self):
|
||||
'''
|
||||
Test whether file objects are converted to filenames without altering other structures
|
||||
'''
|
||||
problem_file = os.path.join(os.path.dirname(__file__), "test_files/coderesponse.xml")
|
||||
with open(problem_file) as fp:
|
||||
answers_with_file = {'1_2_1': 'String-based answer',
|
||||
'1_3_1': ['answer1', 'answer2', 'answer3'],
|
||||
'1_4_1': [fp, fp]}
|
||||
answers_converted = convert_files_to_filenames(answers_with_file)
|
||||
self.assertEquals(answers_converted['1_2_1'], 'String-based answer')
|
||||
self.assertEquals(answers_converted['1_3_1'], ['answer1', 'answer2', 'answer3'])
|
||||
self.assertEquals(answers_converted['1_4_1'], [fp.name, fp.name])
|
||||
|
||||
|
||||
class ChoiceResponseTest(unittest.TestCase):
|
||||
|
||||
def test_cr_rb_grade(self):
|
||||
problem_file = os.path.dirname(__file__) + "/test_files/choiceresponse_radio.xml"
|
||||
test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=i4xs)
|
||||
correct_answers = {'1_2_1': 'choice_2',
|
||||
'1_3_1': ['choice_2', 'choice_3']}
|
||||
test_answers = {'1_2_1': 'choice_2',
|
||||
'1_3_1': 'choice_2',
|
||||
}
|
||||
self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_1'), 'correct')
|
||||
self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_3_1'), 'incorrect')
|
||||
|
||||
def test_cr_cb_grade(self):
|
||||
problem_file = os.path.dirname(__file__) + "/test_files/choiceresponse_checkbox.xml"
|
||||
test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=i4xs)
|
||||
correct_answers = {'1_2_1': 'choice_2',
|
||||
'1_3_1': ['choice_2', 'choice_3'],
|
||||
'1_4_1': ['choice_2', 'choice_3']}
|
||||
test_answers = {'1_2_1': 'choice_2',
|
||||
'1_3_1': 'choice_2',
|
||||
'1_4_1': ['choice_2', 'choice_3'],
|
||||
}
|
||||
self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_1'), 'correct')
|
||||
self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_3_1'), 'incorrect')
|
||||
self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_4_1'), 'correct')
|
||||
|
||||
class JavascriptResponseTest(unittest.TestCase):
|
||||
|
||||
def test_jr_grade(self):
|
||||
problem_file = os.path.dirname(__file__) + "/test_files/javascriptresponse.xml"
|
||||
coffee_file_path = os.path.dirname(__file__) + "/test_files/js/*.coffee"
|
||||
os.system("coffee -c %s" % (coffee_file_path))
|
||||
test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=i4xs)
|
||||
correct_answers = {'1_2_1': json.dumps({0: 4})}
|
||||
incorrect_answers = {'1_2_1': json.dumps({0: 5})}
|
||||
|
||||
self.assertEquals(test_lcp.grade_answers(incorrect_answers).get_correctness('1_2_1'), 'incorrect')
|
||||
self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct')
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Grading tests
|
||||
|
||||
|
||||
class GradesheetTest(unittest.TestCase):
|
||||
|
||||
def test_weighted_grading(self):
|
||||
scores = []
|
||||
Score.__sub__ = lambda me, other: (me.earned - other.earned) + (me.possible - other.possible)
|
||||
|
||||
all, graded = aggregate_scores(scores)
|
||||
self.assertEqual(all, Score(earned=0, possible=0, graded=False, section="summary"))
|
||||
self.assertEqual(graded, Score(earned=0, possible=0, graded=True, section="summary"))
|
||||
|
||||
scores.append(Score(earned=0, possible=5, graded=False, section="summary"))
|
||||
all, graded = aggregate_scores(scores)
|
||||
self.assertEqual(all, Score(earned=0, possible=5, graded=False, section="summary"))
|
||||
self.assertEqual(graded, Score(earned=0, possible=0, graded=True, section="summary"))
|
||||
|
||||
scores.append(Score(earned=3, possible=5, graded=True, section="summary"))
|
||||
all, graded = aggregate_scores(scores)
|
||||
self.assertAlmostEqual(all, Score(earned=3, possible=10, graded=False, section="summary"))
|
||||
self.assertAlmostEqual(graded, Score(earned=3, possible=5, graded=True, section="summary"))
|
||||
|
||||
scores.append(Score(earned=2, possible=5, graded=True, section="summary"))
|
||||
all, graded = aggregate_scores(scores)
|
||||
self.assertAlmostEqual(all, Score(earned=5, possible=15, graded=False, section="summary"))
|
||||
self.assertAlmostEqual(graded, Score(earned=5, possible=10, graded=True, section="summary"))
|
||||
|
||||
|
||||
class GraderTest(unittest.TestCase):
|
||||
|
||||
empty_gradesheet = {
|
||||
}
|
||||
|
||||
incomplete_gradesheet = {
|
||||
'Homework': [],
|
||||
'Lab': [],
|
||||
'Midterm': [],
|
||||
}
|
||||
|
||||
test_gradesheet = {
|
||||
'Homework': [Score(earned=2, possible=20.0, graded=True, section='hw1'),
|
||||
Score(earned=16, possible=16.0, graded=True, section='hw2')],
|
||||
#The dropped scores should be from the assignments that don't exist yet
|
||||
|
||||
'Lab': [Score(earned=1, possible=2.0, graded=True, section='lab1'), # Dropped
|
||||
Score(earned=1, possible=1.0, graded=True, section='lab2'),
|
||||
Score(earned=1, possible=1.0, graded=True, section='lab3'),
|
||||
Score(earned=5, possible=25.0, graded=True, section='lab4'), # Dropped
|
||||
Score(earned=3, possible=4.0, graded=True, section='lab5'), # Dropped
|
||||
Score(earned=6, possible=7.0, graded=True, section='lab6'),
|
||||
Score(earned=5, possible=6.0, graded=True, section='lab7')],
|
||||
|
||||
'Midterm': [Score(earned=50.5, possible=100, graded=True, section="Midterm Exam"), ],
|
||||
}
|
||||
|
||||
def test_SingleSectionGrader(self):
|
||||
midtermGrader = graders.SingleSectionGrader("Midterm", "Midterm Exam")
|
||||
lab4Grader = graders.SingleSectionGrader("Lab", "lab4")
|
||||
badLabGrader = graders.SingleSectionGrader("Lab", "lab42")
|
||||
|
||||
for graded in [midtermGrader.grade(self.empty_gradesheet),
|
||||
midtermGrader.grade(self.incomplete_gradesheet),
|
||||
badLabGrader.grade(self.test_gradesheet)]:
|
||||
self.assertEqual(len(graded['section_breakdown']), 1)
|
||||
self.assertEqual(graded['percent'], 0.0)
|
||||
|
||||
graded = midtermGrader.grade(self.test_gradesheet)
|
||||
self.assertAlmostEqual(graded['percent'], 0.505)
|
||||
self.assertEqual(len(graded['section_breakdown']), 1)
|
||||
|
||||
graded = lab4Grader.grade(self.test_gradesheet)
|
||||
self.assertAlmostEqual(graded['percent'], 0.2)
|
||||
self.assertEqual(len(graded['section_breakdown']), 1)
|
||||
|
||||
def test_AssignmentFormatGrader(self):
|
||||
homeworkGrader = graders.AssignmentFormatGrader("Homework", 12, 2)
|
||||
noDropGrader = graders.AssignmentFormatGrader("Homework", 12, 0)
|
||||
#Even though the minimum number is 3, this should grade correctly when 7 assignments are found
|
||||
overflowGrader = graders.AssignmentFormatGrader("Lab", 3, 2)
|
||||
labGrader = graders.AssignmentFormatGrader("Lab", 7, 3)
|
||||
|
||||
#Test the grading of an empty gradesheet
|
||||
for graded in [homeworkGrader.grade(self.empty_gradesheet),
|
||||
noDropGrader.grade(self.empty_gradesheet),
|
||||
homeworkGrader.grade(self.incomplete_gradesheet),
|
||||
noDropGrader.grade(self.incomplete_gradesheet)]:
|
||||
self.assertAlmostEqual(graded['percent'], 0.0)
|
||||
#Make sure the breakdown includes 12 sections, plus one summary
|
||||
self.assertEqual(len(graded['section_breakdown']), 12 + 1)
|
||||
|
||||
graded = homeworkGrader.grade(self.test_gradesheet)
|
||||
self.assertAlmostEqual(graded['percent'], 0.11) # 100% + 10% / 10 assignments
|
||||
self.assertEqual(len(graded['section_breakdown']), 12 + 1)
|
||||
|
||||
graded = noDropGrader.grade(self.test_gradesheet)
|
||||
self.assertAlmostEqual(graded['percent'], 0.0916666666666666) # 100% + 10% / 12 assignments
|
||||
self.assertEqual(len(graded['section_breakdown']), 12 + 1)
|
||||
|
||||
graded = overflowGrader.grade(self.test_gradesheet)
|
||||
self.assertAlmostEqual(graded['percent'], 0.8880952380952382) # 100% + 10% / 5 assignments
|
||||
self.assertEqual(len(graded['section_breakdown']), 7 + 1)
|
||||
|
||||
graded = labGrader.grade(self.test_gradesheet)
|
||||
self.assertAlmostEqual(graded['percent'], 0.9226190476190477)
|
||||
self.assertEqual(len(graded['section_breakdown']), 7 + 1)
|
||||
|
||||
def test_WeightedSubsectionsGrader(self):
|
||||
#First, a few sub graders
|
||||
homeworkGrader = graders.AssignmentFormatGrader("Homework", 12, 2)
|
||||
labGrader = graders.AssignmentFormatGrader("Lab", 7, 3)
|
||||
midtermGrader = graders.SingleSectionGrader("Midterm", "Midterm Exam")
|
||||
|
||||
weightedGrader = graders.WeightedSubsectionsGrader([(homeworkGrader, homeworkGrader.category, 0.25), (labGrader, labGrader.category, 0.25),
|
||||
(midtermGrader, midtermGrader.category, 0.5)])
|
||||
|
||||
overOneWeightsGrader = graders.WeightedSubsectionsGrader([(homeworkGrader, homeworkGrader.category, 0.5), (labGrader, labGrader.category, 0.5),
|
||||
(midtermGrader, midtermGrader.category, 0.5)])
|
||||
|
||||
#The midterm should have all weight on this one
|
||||
zeroWeightsGrader = graders.WeightedSubsectionsGrader([(homeworkGrader, homeworkGrader.category, 0.0), (labGrader, labGrader.category, 0.0),
|
||||
(midtermGrader, midtermGrader.category, 0.5)])
|
||||
|
||||
#This should always have a final percent of zero
|
||||
allZeroWeightsGrader = graders.WeightedSubsectionsGrader([(homeworkGrader, homeworkGrader.category, 0.0), (labGrader, labGrader.category, 0.0),
|
||||
(midtermGrader, midtermGrader.category, 0.0)])
|
||||
|
||||
emptyGrader = graders.WeightedSubsectionsGrader([])
|
||||
|
||||
graded = weightedGrader.grade(self.test_gradesheet)
|
||||
self.assertAlmostEqual(graded['percent'], 0.5106547619047619)
|
||||
self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1)
|
||||
self.assertEqual(len(graded['grade_breakdown']), 3)
|
||||
|
||||
graded = overOneWeightsGrader.grade(self.test_gradesheet)
|
||||
self.assertAlmostEqual(graded['percent'], 0.7688095238095238)
|
||||
self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1)
|
||||
self.assertEqual(len(graded['grade_breakdown']), 3)
|
||||
|
||||
graded = zeroWeightsGrader.grade(self.test_gradesheet)
|
||||
self.assertAlmostEqual(graded['percent'], 0.2525)
|
||||
self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1)
|
||||
self.assertEqual(len(graded['grade_breakdown']), 3)
|
||||
|
||||
graded = allZeroWeightsGrader.grade(self.test_gradesheet)
|
||||
self.assertAlmostEqual(graded['percent'], 0.0)
|
||||
self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1)
|
||||
self.assertEqual(len(graded['grade_breakdown']), 3)
|
||||
|
||||
for graded in [weightedGrader.grade(self.empty_gradesheet),
|
||||
weightedGrader.grade(self.incomplete_gradesheet),
|
||||
zeroWeightsGrader.grade(self.empty_gradesheet),
|
||||
allZeroWeightsGrader.grade(self.empty_gradesheet)]:
|
||||
self.assertAlmostEqual(graded['percent'], 0.0)
|
||||
self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1)
|
||||
self.assertEqual(len(graded['grade_breakdown']), 3)
|
||||
|
||||
graded = emptyGrader.grade(self.test_gradesheet)
|
||||
self.assertAlmostEqual(graded['percent'], 0.0)
|
||||
self.assertEqual(len(graded['section_breakdown']), 0)
|
||||
self.assertEqual(len(graded['grade_breakdown']), 0)
|
||||
|
||||
def test_graderFromConf(self):
|
||||
|
||||
#Confs always produce a graders.WeightedSubsectionsGrader, so we test this by repeating the test
|
||||
#in test_graders.WeightedSubsectionsGrader, but generate the graders with confs.
|
||||
|
||||
weightedGrader = graders.grader_from_conf([
|
||||
{
|
||||
'type': "Homework",
|
||||
'min_count': 12,
|
||||
'drop_count': 2,
|
||||
'short_label': "HW",
|
||||
'weight': 0.25,
|
||||
},
|
||||
{
|
||||
'type': "Lab",
|
||||
'min_count': 7,
|
||||
'drop_count': 3,
|
||||
'category': "Labs",
|
||||
'weight': 0.25
|
||||
},
|
||||
{
|
||||
'type': "Midterm",
|
||||
'name': "Midterm Exam",
|
||||
'short_label': "Midterm",
|
||||
'weight': 0.5,
|
||||
},
|
||||
])
|
||||
|
||||
emptyGrader = graders.grader_from_conf([])
|
||||
|
||||
graded = weightedGrader.grade(self.test_gradesheet)
|
||||
self.assertAlmostEqual(graded['percent'], 0.5106547619047619)
|
||||
self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1)
|
||||
self.assertEqual(len(graded['grade_breakdown']), 3)
|
||||
|
||||
graded = emptyGrader.grade(self.test_gradesheet)
|
||||
self.assertAlmostEqual(graded['percent'], 0.0)
|
||||
self.assertEqual(len(graded['section_breakdown']), 0)
|
||||
self.assertEqual(len(graded['grade_breakdown']), 0)
|
||||
|
||||
#Test that graders can also be used instead of lists of dictionaries
|
||||
homeworkGrader = graders.AssignmentFormatGrader("Homework", 12, 2)
|
||||
homeworkGrader2 = graders.grader_from_conf(homeworkGrader)
|
||||
|
||||
graded = homeworkGrader2.grade(self.test_gradesheet)
|
||||
self.assertAlmostEqual(graded['percent'], 0.11)
|
||||
self.assertEqual(len(graded['section_breakdown']), 12 + 1)
|
||||
|
||||
#TODO: How do we test failure cases? The parser only logs an error when it can't parse something. Maybe it should throw exceptions?
|
||||
|
||||
# --------------------------------------------------------------------------
|
||||
# Module progress tests
|
||||
|
||||
|
||||
class ProgressTest(unittest.TestCase):
|
||||
''' Test that basic Progress objects work. A Progress represents a
|
||||
fraction between 0 and 1.
|
||||
'''
|
||||
not_started = Progress(0, 17)
|
||||
part_done = Progress(2, 6)
|
||||
half_done = Progress(3, 6)
|
||||
also_half_done = Progress(1, 2)
|
||||
done = Progress(7, 7)
|
||||
|
||||
def test_create_object(self):
|
||||
# These should work:
|
||||
p = Progress(0, 2)
|
||||
p = Progress(1, 2)
|
||||
p = Progress(2, 2)
|
||||
|
||||
p = Progress(2.5, 5.0)
|
||||
p = Progress(3.7, 12.3333)
|
||||
|
||||
# These shouldn't
|
||||
self.assertRaises(ValueError, Progress, 0, 0)
|
||||
self.assertRaises(ValueError, Progress, 2, 0)
|
||||
self.assertRaises(ValueError, Progress, 1, -2)
|
||||
|
||||
self.assertRaises(TypeError, Progress, 0, "all")
|
||||
# check complex numbers just for the heck of it :)
|
||||
self.assertRaises(TypeError, Progress, 2j, 3)
|
||||
|
||||
def test_clamp(self):
|
||||
self.assertEqual((2, 2), Progress(3, 2).frac())
|
||||
self.assertEqual((0, 2), Progress(-2, 2).frac())
|
||||
|
||||
def test_frac(self):
|
||||
p = Progress(1, 2)
|
||||
(a, b) = p.frac()
|
||||
self.assertEqual(a, 1)
|
||||
self.assertEqual(b, 2)
|
||||
|
||||
def test_percent(self):
|
||||
self.assertEqual(self.not_started.percent(), 0)
|
||||
self.assertAlmostEqual(self.part_done.percent(), 33.33333333333333)
|
||||
self.assertEqual(self.half_done.percent(), 50)
|
||||
self.assertEqual(self.done.percent(), 100)
|
||||
|
||||
self.assertEqual(self.half_done.percent(), self.also_half_done.percent())
|
||||
|
||||
def test_started(self):
|
||||
self.assertFalse(self.not_started.started())
|
||||
|
||||
self.assertTrue(self.part_done.started())
|
||||
self.assertTrue(self.half_done.started())
|
||||
self.assertTrue(self.done.started())
|
||||
|
||||
def test_inprogress(self):
|
||||
# only true if working on it
|
||||
self.assertFalse(self.done.inprogress())
|
||||
self.assertFalse(self.not_started.inprogress())
|
||||
|
||||
self.assertTrue(self.part_done.inprogress())
|
||||
self.assertTrue(self.half_done.inprogress())
|
||||
|
||||
def test_done(self):
|
||||
self.assertTrue(self.done.done())
|
||||
self.assertFalse(self.half_done.done())
|
||||
self.assertFalse(self.not_started.done())
|
||||
|
||||
def test_str(self):
|
||||
self.assertEqual(str(self.not_started), "0/17")
|
||||
self.assertEqual(str(self.part_done), "2/6")
|
||||
self.assertEqual(str(self.done), "7/7")
|
||||
|
||||
def test_ternary_str(self):
|
||||
self.assertEqual(self.not_started.ternary_str(), "none")
|
||||
self.assertEqual(self.half_done.ternary_str(), "in_progress")
|
||||
self.assertEqual(self.done.ternary_str(), "done")
|
||||
|
||||
def test_to_js_status(self):
|
||||
'''Test the Progress.to_js_status_str() method'''
|
||||
|
||||
self.assertEqual(Progress.to_js_status_str(self.not_started), "none")
|
||||
self.assertEqual(Progress.to_js_status_str(self.half_done), "in_progress")
|
||||
self.assertEqual(Progress.to_js_status_str(self.done), "done")
|
||||
self.assertEqual(Progress.to_js_status_str(None), "NA")
|
||||
|
||||
def test_to_js_detail_str(self):
|
||||
'''Test the Progress.to_js_detail_str() method'''
|
||||
f = Progress.to_js_detail_str
|
||||
for p in (self.not_started, self.half_done, self.done):
|
||||
self.assertEqual(f(p), str(p))
|
||||
# But None should be encoded as NA
|
||||
self.assertEqual(f(None), "NA")
|
||||
|
||||
def test_add(self):
|
||||
'''Test the Progress.add_counts() method'''
|
||||
p = Progress(0, 2)
|
||||
p2 = Progress(1, 3)
|
||||
p3 = Progress(2, 5)
|
||||
pNone = None
|
||||
add = lambda a, b: Progress.add_counts(a, b).frac()
|
||||
|
||||
self.assertEqual(add(p, p), (0, 4))
|
||||
self.assertEqual(add(p, p2), (1, 5))
|
||||
self.assertEqual(add(p2, p3), (3, 8))
|
||||
|
||||
self.assertEqual(add(p2, pNone), p2.frac())
|
||||
self.assertEqual(add(pNone, p2), p2.frac())
|
||||
|
||||
def test_equality(self):
|
||||
'''Test that comparing Progress objects for equality
|
||||
works correctly.'''
|
||||
p = Progress(1, 2)
|
||||
p2 = Progress(2, 4)
|
||||
p3 = Progress(1, 2)
|
||||
self.assertTrue(p == p3)
|
||||
self.assertFalse(p == p2)
|
||||
|
||||
# Check != while we're at it
|
||||
self.assertTrue(p != p2)
|
||||
self.assertFalse(p != p3)
|
||||
|
||||
|
||||
class ModuleProgressTest(unittest.TestCase):
|
||||
''' Test that get_progress() does the right thing for the different modules
|
||||
'''
|
||||
def test_xmodule_default(self):
|
||||
'''Make sure default get_progress exists, returns None'''
|
||||
xm = x_module.XModule(i4xs, 'a://b/c/d/e', None, {})
|
||||
p = xm.get_progress()
|
||||
self.assertEqual(p, None)
|
||||
|
||||
220
common/lib/xmodule/xmodule/tests/test_graders.py
Normal file
220
common/lib/xmodule/xmodule/tests/test_graders.py
Normal file
@@ -0,0 +1,220 @@
|
||||
"""Grading tests"""
|
||||
import unittest
|
||||
|
||||
from xmodule import graders
|
||||
from xmodule.graders import Score, aggregate_scores
|
||||
|
||||
class GradesheetTest(unittest.TestCase):
|
||||
|
||||
def test_weighted_grading(self):
|
||||
scores = []
|
||||
Score.__sub__ = lambda me, other: (me.earned - other.earned) + (me.possible - other.possible)
|
||||
|
||||
all, graded = aggregate_scores(scores)
|
||||
self.assertEqual(all, Score(earned=0, possible=0, graded=False, section="summary"))
|
||||
self.assertEqual(graded, Score(earned=0, possible=0, graded=True, section="summary"))
|
||||
|
||||
scores.append(Score(earned=0, possible=5, graded=False, section="summary"))
|
||||
all, graded = aggregate_scores(scores)
|
||||
self.assertEqual(all, Score(earned=0, possible=5, graded=False, section="summary"))
|
||||
self.assertEqual(graded, Score(earned=0, possible=0, graded=True, section="summary"))
|
||||
|
||||
scores.append(Score(earned=3, possible=5, graded=True, section="summary"))
|
||||
all, graded = aggregate_scores(scores)
|
||||
self.assertAlmostEqual(all, Score(earned=3, possible=10, graded=False, section="summary"))
|
||||
self.assertAlmostEqual(graded, Score(earned=3, possible=5, graded=True, section="summary"))
|
||||
|
||||
scores.append(Score(earned=2, possible=5, graded=True, section="summary"))
|
||||
all, graded = aggregate_scores(scores)
|
||||
self.assertAlmostEqual(all, Score(earned=5, possible=15, graded=False, section="summary"))
|
||||
self.assertAlmostEqual(graded, Score(earned=5, possible=10, graded=True, section="summary"))
|
||||
|
||||
|
||||
class GraderTest(unittest.TestCase):
|
||||
|
||||
empty_gradesheet = {
|
||||
}
|
||||
|
||||
incomplete_gradesheet = {
|
||||
'Homework': [],
|
||||
'Lab': [],
|
||||
'Midterm': [],
|
||||
}
|
||||
|
||||
test_gradesheet = {
|
||||
'Homework': [Score(earned=2, possible=20.0, graded=True, section='hw1'),
|
||||
Score(earned=16, possible=16.0, graded=True, section='hw2')],
|
||||
#The dropped scores should be from the assignments that don't exist yet
|
||||
|
||||
'Lab': [Score(earned=1, possible=2.0, graded=True, section='lab1'), # Dropped
|
||||
Score(earned=1, possible=1.0, graded=True, section='lab2'),
|
||||
Score(earned=1, possible=1.0, graded=True, section='lab3'),
|
||||
Score(earned=5, possible=25.0, graded=True, section='lab4'), # Dropped
|
||||
Score(earned=3, possible=4.0, graded=True, section='lab5'), # Dropped
|
||||
Score(earned=6, possible=7.0, graded=True, section='lab6'),
|
||||
Score(earned=5, possible=6.0, graded=True, section='lab7')],
|
||||
|
||||
'Midterm': [Score(earned=50.5, possible=100, graded=True, section="Midterm Exam"), ],
|
||||
}
|
||||
|
||||
def test_SingleSectionGrader(self):
|
||||
midtermGrader = graders.SingleSectionGrader("Midterm", "Midterm Exam")
|
||||
lab4Grader = graders.SingleSectionGrader("Lab", "lab4")
|
||||
badLabGrader = graders.SingleSectionGrader("Lab", "lab42")
|
||||
|
||||
for graded in [midtermGrader.grade(self.empty_gradesheet),
|
||||
midtermGrader.grade(self.incomplete_gradesheet),
|
||||
badLabGrader.grade(self.test_gradesheet)]:
|
||||
self.assertEqual(len(graded['section_breakdown']), 1)
|
||||
self.assertEqual(graded['percent'], 0.0)
|
||||
|
||||
graded = midtermGrader.grade(self.test_gradesheet)
|
||||
self.assertAlmostEqual(graded['percent'], 0.505)
|
||||
self.assertEqual(len(graded['section_breakdown']), 1)
|
||||
|
||||
graded = lab4Grader.grade(self.test_gradesheet)
|
||||
self.assertAlmostEqual(graded['percent'], 0.2)
|
||||
self.assertEqual(len(graded['section_breakdown']), 1)
|
||||
|
||||
def test_AssignmentFormatGrader(self):
|
||||
homeworkGrader = graders.AssignmentFormatGrader("Homework", 12, 2)
|
||||
noDropGrader = graders.AssignmentFormatGrader("Homework", 12, 0)
|
||||
#Even though the minimum number is 3, this should grade correctly when 7 assignments are found
|
||||
overflowGrader = graders.AssignmentFormatGrader("Lab", 3, 2)
|
||||
labGrader = graders.AssignmentFormatGrader("Lab", 7, 3)
|
||||
|
||||
#Test the grading of an empty gradesheet
|
||||
for graded in [homeworkGrader.grade(self.empty_gradesheet),
|
||||
noDropGrader.grade(self.empty_gradesheet),
|
||||
homeworkGrader.grade(self.incomplete_gradesheet),
|
||||
noDropGrader.grade(self.incomplete_gradesheet)]:
|
||||
self.assertAlmostEqual(graded['percent'], 0.0)
|
||||
#Make sure the breakdown includes 12 sections, plus one summary
|
||||
self.assertEqual(len(graded['section_breakdown']), 12 + 1)
|
||||
|
||||
graded = homeworkGrader.grade(self.test_gradesheet)
|
||||
self.assertAlmostEqual(graded['percent'], 0.11) # 100% + 10% / 10 assignments
|
||||
self.assertEqual(len(graded['section_breakdown']), 12 + 1)
|
||||
|
||||
graded = noDropGrader.grade(self.test_gradesheet)
|
||||
self.assertAlmostEqual(graded['percent'], 0.0916666666666666) # 100% + 10% / 12 assignments
|
||||
self.assertEqual(len(graded['section_breakdown']), 12 + 1)
|
||||
|
||||
graded = overflowGrader.grade(self.test_gradesheet)
|
||||
self.assertAlmostEqual(graded['percent'], 0.8880952380952382) # 100% + 10% / 5 assignments
|
||||
self.assertEqual(len(graded['section_breakdown']), 7 + 1)
|
||||
|
||||
graded = labGrader.grade(self.test_gradesheet)
|
||||
self.assertAlmostEqual(graded['percent'], 0.9226190476190477)
|
||||
self.assertEqual(len(graded['section_breakdown']), 7 + 1)
|
||||
|
||||
def test_WeightedSubsectionsGrader(self):
|
||||
#First, a few sub graders
|
||||
homeworkGrader = graders.AssignmentFormatGrader("Homework", 12, 2)
|
||||
labGrader = graders.AssignmentFormatGrader("Lab", 7, 3)
|
||||
midtermGrader = graders.SingleSectionGrader("Midterm", "Midterm Exam")
|
||||
|
||||
weightedGrader = graders.WeightedSubsectionsGrader([(homeworkGrader, homeworkGrader.category, 0.25),
|
||||
(labGrader, labGrader.category, 0.25),
|
||||
(midtermGrader, midtermGrader.category, 0.5)])
|
||||
|
||||
overOneWeightsGrader = graders.WeightedSubsectionsGrader([(homeworkGrader, homeworkGrader.category, 0.5),
|
||||
(labGrader, labGrader.category, 0.5),
|
||||
(midtermGrader, midtermGrader.category, 0.5)])
|
||||
|
||||
#The midterm should have all weight on this one
|
||||
zeroWeightsGrader = graders.WeightedSubsectionsGrader([(homeworkGrader, homeworkGrader.category, 0.0),
|
||||
(labGrader, labGrader.category, 0.0),
|
||||
(midtermGrader, midtermGrader.category, 0.5)])
|
||||
|
||||
#This should always have a final percent of zero
|
||||
allZeroWeightsGrader = graders.WeightedSubsectionsGrader([(homeworkGrader, homeworkGrader.category, 0.0),
|
||||
(labGrader, labGrader.category, 0.0),
|
||||
(midtermGrader, midtermGrader.category, 0.0)])
|
||||
|
||||
emptyGrader = graders.WeightedSubsectionsGrader([])
|
||||
|
||||
graded = weightedGrader.grade(self.test_gradesheet)
|
||||
self.assertAlmostEqual(graded['percent'], 0.5106547619047619)
|
||||
self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1)
|
||||
self.assertEqual(len(graded['grade_breakdown']), 3)
|
||||
|
||||
graded = overOneWeightsGrader.grade(self.test_gradesheet)
|
||||
self.assertAlmostEqual(graded['percent'], 0.7688095238095238)
|
||||
self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1)
|
||||
self.assertEqual(len(graded['grade_breakdown']), 3)
|
||||
|
||||
graded = zeroWeightsGrader.grade(self.test_gradesheet)
|
||||
self.assertAlmostEqual(graded['percent'], 0.2525)
|
||||
self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1)
|
||||
self.assertEqual(len(graded['grade_breakdown']), 3)
|
||||
|
||||
graded = allZeroWeightsGrader.grade(self.test_gradesheet)
|
||||
self.assertAlmostEqual(graded['percent'], 0.0)
|
||||
self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1)
|
||||
self.assertEqual(len(graded['grade_breakdown']), 3)
|
||||
|
||||
for graded in [weightedGrader.grade(self.empty_gradesheet),
|
||||
weightedGrader.grade(self.incomplete_gradesheet),
|
||||
zeroWeightsGrader.grade(self.empty_gradesheet),
|
||||
allZeroWeightsGrader.grade(self.empty_gradesheet)]:
|
||||
self.assertAlmostEqual(graded['percent'], 0.0)
|
||||
self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1)
|
||||
self.assertEqual(len(graded['grade_breakdown']), 3)
|
||||
|
||||
graded = emptyGrader.grade(self.test_gradesheet)
|
||||
self.assertAlmostEqual(graded['percent'], 0.0)
|
||||
self.assertEqual(len(graded['section_breakdown']), 0)
|
||||
self.assertEqual(len(graded['grade_breakdown']), 0)
|
||||
|
||||
def test_graderFromConf(self):
|
||||
|
||||
#Confs always produce a graders.WeightedSubsectionsGrader, so we test this by repeating the test
|
||||
#in test_graders.WeightedSubsectionsGrader, but generate the graders with confs.
|
||||
|
||||
weightedGrader = graders.grader_from_conf([
|
||||
{
|
||||
'type': "Homework",
|
||||
'min_count': 12,
|
||||
'drop_count': 2,
|
||||
'short_label': "HW",
|
||||
'weight': 0.25,
|
||||
},
|
||||
{
|
||||
'type': "Lab",
|
||||
'min_count': 7,
|
||||
'drop_count': 3,
|
||||
'category': "Labs",
|
||||
'weight': 0.25
|
||||
},
|
||||
{
|
||||
'type': "Midterm",
|
||||
'name': "Midterm Exam",
|
||||
'short_label': "Midterm",
|
||||
'weight': 0.5,
|
||||
},
|
||||
])
|
||||
|
||||
emptyGrader = graders.grader_from_conf([])
|
||||
|
||||
graded = weightedGrader.grade(self.test_gradesheet)
|
||||
self.assertAlmostEqual(graded['percent'], 0.5106547619047619)
|
||||
self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1)
|
||||
self.assertEqual(len(graded['grade_breakdown']), 3)
|
||||
|
||||
graded = emptyGrader.grade(self.test_gradesheet)
|
||||
self.assertAlmostEqual(graded['percent'], 0.0)
|
||||
self.assertEqual(len(graded['section_breakdown']), 0)
|
||||
self.assertEqual(len(graded['grade_breakdown']), 0)
|
||||
|
||||
#Test that graders can also be used instead of lists of dictionaries
|
||||
homeworkGrader = graders.AssignmentFormatGrader("Homework", 12, 2)
|
||||
homeworkGrader2 = graders.grader_from_conf(homeworkGrader)
|
||||
|
||||
graded = homeworkGrader2.grade(self.test_gradesheet)
|
||||
self.assertAlmostEqual(graded['percent'], 0.11)
|
||||
self.assertEqual(len(graded['section_breakdown']), 12 + 1)
|
||||
|
||||
#TODO: How do we test failure cases? The parser only logs an error when
|
||||
#it can't parse something. Maybe it should throw exceptions?
|
||||
|
||||
138
common/lib/xmodule/xmodule/tests/test_progress.py
Normal file
138
common/lib/xmodule/xmodule/tests/test_progress.py
Normal file
@@ -0,0 +1,138 @@
|
||||
"""Module progress tests"""
|
||||
|
||||
import unittest
|
||||
|
||||
from xmodule.progress import Progress
|
||||
from xmodule import x_module
|
||||
|
||||
from . import i4xs
|
||||
|
||||
class ProgressTest(unittest.TestCase):
|
||||
''' Test that basic Progress objects work. A Progress represents a
|
||||
fraction between 0 and 1.
|
||||
'''
|
||||
not_started = Progress(0, 17)
|
||||
part_done = Progress(2, 6)
|
||||
half_done = Progress(3, 6)
|
||||
also_half_done = Progress(1, 2)
|
||||
done = Progress(7, 7)
|
||||
|
||||
def test_create_object(self):
|
||||
# These should work:
|
||||
p = Progress(0, 2)
|
||||
p = Progress(1, 2)
|
||||
p = Progress(2, 2)
|
||||
|
||||
p = Progress(2.5, 5.0)
|
||||
p = Progress(3.7, 12.3333)
|
||||
|
||||
# These shouldn't
|
||||
self.assertRaises(ValueError, Progress, 0, 0)
|
||||
self.assertRaises(ValueError, Progress, 2, 0)
|
||||
self.assertRaises(ValueError, Progress, 1, -2)
|
||||
|
||||
self.assertRaises(TypeError, Progress, 0, "all")
|
||||
# check complex numbers just for the heck of it :)
|
||||
self.assertRaises(TypeError, Progress, 2j, 3)
|
||||
|
||||
def test_clamp(self):
|
||||
self.assertEqual((2, 2), Progress(3, 2).frac())
|
||||
self.assertEqual((0, 2), Progress(-2, 2).frac())
|
||||
|
||||
def test_frac(self):
|
||||
p = Progress(1, 2)
|
||||
(a, b) = p.frac()
|
||||
self.assertEqual(a, 1)
|
||||
self.assertEqual(b, 2)
|
||||
|
||||
def test_percent(self):
|
||||
self.assertEqual(self.not_started.percent(), 0)
|
||||
self.assertAlmostEqual(self.part_done.percent(), 33.33333333333333)
|
||||
self.assertEqual(self.half_done.percent(), 50)
|
||||
self.assertEqual(self.done.percent(), 100)
|
||||
|
||||
self.assertEqual(self.half_done.percent(), self.also_half_done.percent())
|
||||
|
||||
def test_started(self):
|
||||
self.assertFalse(self.not_started.started())
|
||||
|
||||
self.assertTrue(self.part_done.started())
|
||||
self.assertTrue(self.half_done.started())
|
||||
self.assertTrue(self.done.started())
|
||||
|
||||
def test_inprogress(self):
|
||||
# only true if working on it
|
||||
self.assertFalse(self.done.inprogress())
|
||||
self.assertFalse(self.not_started.inprogress())
|
||||
|
||||
self.assertTrue(self.part_done.inprogress())
|
||||
self.assertTrue(self.half_done.inprogress())
|
||||
|
||||
def test_done(self):
|
||||
self.assertTrue(self.done.done())
|
||||
self.assertFalse(self.half_done.done())
|
||||
self.assertFalse(self.not_started.done())
|
||||
|
||||
def test_str(self):
|
||||
self.assertEqual(str(self.not_started), "0/17")
|
||||
self.assertEqual(str(self.part_done), "2/6")
|
||||
self.assertEqual(str(self.done), "7/7")
|
||||
|
||||
def test_ternary_str(self):
|
||||
self.assertEqual(self.not_started.ternary_str(), "none")
|
||||
self.assertEqual(self.half_done.ternary_str(), "in_progress")
|
||||
self.assertEqual(self.done.ternary_str(), "done")
|
||||
|
||||
def test_to_js_status(self):
|
||||
'''Test the Progress.to_js_status_str() method'''
|
||||
|
||||
self.assertEqual(Progress.to_js_status_str(self.not_started), "none")
|
||||
self.assertEqual(Progress.to_js_status_str(self.half_done), "in_progress")
|
||||
self.assertEqual(Progress.to_js_status_str(self.done), "done")
|
||||
self.assertEqual(Progress.to_js_status_str(None), "NA")
|
||||
|
||||
def test_to_js_detail_str(self):
|
||||
'''Test the Progress.to_js_detail_str() method'''
|
||||
f = Progress.to_js_detail_str
|
||||
for p in (self.not_started, self.half_done, self.done):
|
||||
self.assertEqual(f(p), str(p))
|
||||
# But None should be encoded as NA
|
||||
self.assertEqual(f(None), "NA")
|
||||
|
||||
def test_add(self):
|
||||
'''Test the Progress.add_counts() method'''
|
||||
p = Progress(0, 2)
|
||||
p2 = Progress(1, 3)
|
||||
p3 = Progress(2, 5)
|
||||
pNone = None
|
||||
add = lambda a, b: Progress.add_counts(a, b).frac()
|
||||
|
||||
self.assertEqual(add(p, p), (0, 4))
|
||||
self.assertEqual(add(p, p2), (1, 5))
|
||||
self.assertEqual(add(p2, p3), (3, 8))
|
||||
|
||||
self.assertEqual(add(p2, pNone), p2.frac())
|
||||
self.assertEqual(add(pNone, p2), p2.frac())
|
||||
|
||||
def test_equality(self):
|
||||
'''Test that comparing Progress objects for equality
|
||||
works correctly.'''
|
||||
p = Progress(1, 2)
|
||||
p2 = Progress(2, 4)
|
||||
p3 = Progress(1, 2)
|
||||
self.assertTrue(p == p3)
|
||||
self.assertFalse(p == p2)
|
||||
|
||||
# Check != while we're at it
|
||||
self.assertTrue(p != p2)
|
||||
self.assertFalse(p != p3)
|
||||
|
||||
|
||||
class ModuleProgressTest(unittest.TestCase):
|
||||
''' Test that get_progress() does the right thing for the different modules
|
||||
'''
|
||||
def test_xmodule_default(self):
|
||||
'''Make sure default get_progress exists, returns None'''
|
||||
xm = x_module.XModule(i4xs, 'a://b/c/d/e', None, {})
|
||||
p = xm.get_progress()
|
||||
self.assertEqual(p, None)
|
||||
@@ -31,12 +31,23 @@ class VideoModule(XModule):
|
||||
self.youtube = xmltree.get('youtube')
|
||||
self.position = 0
|
||||
self.show_captions = xmltree.get('show_captions', 'true')
|
||||
self.source = self._get_source(xmltree)
|
||||
|
||||
if instance_state is not None:
|
||||
state = json.loads(instance_state)
|
||||
if 'position' in state:
|
||||
self.position = int(float(state['position']))
|
||||
|
||||
def _get_source(self, xmltree):
|
||||
# find the first valid source
|
||||
source = None
|
||||
for element in xmltree.findall('source'):
|
||||
src = element.get('src')
|
||||
if src:
|
||||
source = src
|
||||
break
|
||||
return source
|
||||
|
||||
def handle_ajax(self, dispatch, get):
|
||||
'''
|
||||
Handle ajax calls to this video.
|
||||
@@ -73,6 +84,7 @@ class VideoModule(XModule):
|
||||
'streams': self.video_list(),
|
||||
'id': self.location.html_id(),
|
||||
'position': self.position,
|
||||
'source': self.source,
|
||||
'display_name': self.display_name,
|
||||
# TODO (cpennington): This won't work when we move to data that isn't on the filesystem
|
||||
'data_dir': self.metadata['data_dir'],
|
||||
@@ -82,6 +94,5 @@ class VideoModule(XModule):
|
||||
|
||||
class VideoDescriptor(RawDescriptor):
|
||||
module_class = VideoModule
|
||||
|
||||
stores_state = True
|
||||
template_dir_name = "video"
|
||||
|
||||
@@ -25,6 +25,10 @@ class @DiscussionUtil
|
||||
staff = _.union(@roleIds['Staff'], @roleIds['Moderator'], @roleIds['Administrator'])
|
||||
_.include(staff, parseInt(user_id))
|
||||
|
||||
@isTA: (user_id) ->
|
||||
ta = _.union(@roleIds['Community TA'])
|
||||
_.include(ta, parseInt(user_id))
|
||||
|
||||
@bulkUpdateContentInfo: (infos) ->
|
||||
for id, info of infos
|
||||
Content.getContent(id).updateInfo(info)
|
||||
@@ -157,7 +161,7 @@ class @DiscussionUtil
|
||||
@makeWmdEditor: ($content, $local, cls_identifier) ->
|
||||
elem = $local(".#{cls_identifier}")
|
||||
placeholder = elem.data('placeholder')
|
||||
id = elem.data("id")
|
||||
id = elem.attr("data-id") # use attr instead of data because we want to avoid type coercion
|
||||
appended_id = "-#{cls_identifier}-#{id}"
|
||||
imageUploadUrl = @urlFor('upload')
|
||||
_processor = (_this) ->
|
||||
@@ -170,12 +174,12 @@ class @DiscussionUtil
|
||||
|
||||
@getWmdEditor: ($content, $local, cls_identifier) ->
|
||||
elem = $local(".#{cls_identifier}")
|
||||
id = elem.data("id")
|
||||
id = elem.attr("data-id") # use attr instead of data because we want to avoid type coercion
|
||||
@wmdEditors["#{cls_identifier}-#{id}"]
|
||||
|
||||
@getWmdInput: ($content, $local, cls_identifier) ->
|
||||
elem = $local(".#{cls_identifier}")
|
||||
id = elem.data("id")
|
||||
id = elem.attr("data-id") # use attr instead of data because we want to avoid type coercion
|
||||
$local("#wmd-input-#{cls_identifier}-#{id}")
|
||||
|
||||
@getWmdContent: ($content, $local, cls_identifier) ->
|
||||
|
||||
@@ -156,7 +156,11 @@ if Backbone?
|
||||
@$(".post-list").append(view.el)
|
||||
|
||||
threadSelected: (e) =>
|
||||
thread_id = $(e.target).closest("a").data("id")
|
||||
# Use .attr('data-id') rather than .data('id') because .data does type
|
||||
# coercion. Usually, this is fine, but when Mongo gives an object id with
|
||||
# no letters, it casts it to a Number.
|
||||
|
||||
thread_id = $(e.target).closest("a").attr("data-id")
|
||||
@setActiveThread(thread_id)
|
||||
@trigger("thread:selected", thread_id) # This triggers a callback in the DiscussionRouter which calls the line above...
|
||||
false
|
||||
|
||||
@@ -32,3 +32,5 @@ if Backbone?
|
||||
markAsStaff: ->
|
||||
if DiscussionUtil.isStaff(@model.get("user_id"))
|
||||
@$el.find("a.profile-link").after('<span class="staff-label">staff</span>')
|
||||
else if DiscussionUtil.isTA(@model.get("user_id"))
|
||||
@$el.find("a.profile-link").after('<span class="community-ta-label">Community TA</span>')
|
||||
|
||||
@@ -37,6 +37,9 @@ if Backbone?
|
||||
if DiscussionUtil.isStaff(@model.get("user_id"))
|
||||
@$el.addClass("staff")
|
||||
@$el.prepend('<div class="staff-banner">staff</div>')
|
||||
else if DiscussionUtil.isTA(@model.get("user_id"))
|
||||
@$el.addClass("community-ta")
|
||||
@$el.prepend('<div class="community-ta-banner">Community TA</div>')
|
||||
|
||||
toggleVote: (event) ->
|
||||
event.preventDefault()
|
||||
|
||||
1
common/static/js/capa/README
Normal file
1
common/static/js/capa/README
Normal file
@@ -0,0 +1 @@
|
||||
These files really should be in the capa module, but we don't have a way to load js from there at the moment. (TODO)
|
||||
24
common/static/js/capa/chemical_equation_preview.js
Normal file
24
common/static/js/capa/chemical_equation_preview.js
Normal file
@@ -0,0 +1,24 @@
|
||||
(function () {
|
||||
update = function() {
|
||||
function create_handler(saved_div) {
|
||||
return (function(response) {
|
||||
if (response.error) {
|
||||
saved_div.html("<span class='error'>" + response.error + "</span>");
|
||||
} else {
|
||||
saved_div.html(response.preview);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
prev_id = "#" + this.id + "_preview";
|
||||
preview_div = $(prev_id)
|
||||
|
||||
$.get("/preview/chemcalc/", {"formula" : this.value}, create_handler(preview_div));
|
||||
}
|
||||
|
||||
inputs = $('.chemicalequationinput input');
|
||||
// update on load
|
||||
inputs.each(update);
|
||||
// and on every change
|
||||
inputs.bind("input", update);
|
||||
}).call(this);
|
||||
@@ -11,7 +11,7 @@
|
||||
-moz-box-shadow: 0 5px 10px rgba(0,0,0,0.1);
|
||||
box-shadow: 0 5px 10px rgba(0,0,0,0.1);
|
||||
outline: none;
|
||||
z-index: 10001;
|
||||
z-index: 100001;
|
||||
font-size: 12px;
|
||||
}
|
||||
|
||||
|
||||
@@ -13,13 +13,13 @@ ouch() {
|
||||
printf '\E[31m'
|
||||
|
||||
cat<<EOL
|
||||
|
||||
|
||||
!! ERROR !!
|
||||
|
||||
The last command did not complete successfully,
|
||||
The last command did not complete successfully,
|
||||
For more details or trying running the
|
||||
script again with the -v flag.
|
||||
|
||||
script again with the -v flag.
|
||||
|
||||
Output of the script is recorded in $LOG
|
||||
|
||||
EOL
|
||||
@@ -27,18 +27,18 @@ EOL
|
||||
|
||||
}
|
||||
error() {
|
||||
printf '\E[31m'; echo "$@"; printf '\E[0m'
|
||||
printf '\E[31m'; echo "$@"; printf '\E[0m'
|
||||
}
|
||||
output() {
|
||||
printf '\E[36m'; echo "$@"; printf '\E[0m'
|
||||
printf '\E[36m'; echo "$@"; printf '\E[0m'
|
||||
}
|
||||
usage() {
|
||||
cat<<EO
|
||||
|
||||
Usage: $PROG [-c] [-v] [-h]
|
||||
|
||||
|
||||
-c compile scipy and numpy
|
||||
-s give access to global site-packages for virtualenv
|
||||
-s give access to global site-packages for virtualenv
|
||||
-v set -x + spew
|
||||
-h this
|
||||
|
||||
@@ -49,7 +49,7 @@ EO
|
||||
info() {
|
||||
|
||||
cat<<EO
|
||||
MITx base dir : $BASE
|
||||
MITx base dir : $BASE
|
||||
Python dir : $PYTHON_DIR
|
||||
Ruby dir : $RUBY_DIR
|
||||
Ruby ver : $RUBY_VER
|
||||
@@ -59,11 +59,11 @@ EO
|
||||
|
||||
clone_repos() {
|
||||
cd "$BASE"
|
||||
|
||||
|
||||
if [[ -d "$BASE/mitx/.git" ]]; then
|
||||
output "Pulling mitx"
|
||||
cd "$BASE/mitx"
|
||||
git pull
|
||||
git pull
|
||||
else
|
||||
output "Cloning mitx"
|
||||
if [[ -d "$BASE/mitx" ]]; then
|
||||
@@ -71,13 +71,13 @@ clone_repos() {
|
||||
fi
|
||||
git clone git@github.com:MITx/mitx.git
|
||||
fi
|
||||
|
||||
|
||||
if [[ ! -d "$BASE/mitx/askbot/.git" ]]; then
|
||||
output "Cloning askbot as a submodule of mitx"
|
||||
cd "$BASE/mitx"
|
||||
git submodule update --init
|
||||
fi
|
||||
|
||||
|
||||
# By default, dev environments start with a copy of 6.002x
|
||||
cd "$BASE"
|
||||
mkdir -p "$BASE/data"
|
||||
@@ -85,14 +85,14 @@ clone_repos() {
|
||||
if [[ -d "$BASE/data/$REPO/.git" ]]; then
|
||||
output "Pulling $REPO"
|
||||
cd "$BASE/data/$REPO"
|
||||
git pull
|
||||
git pull
|
||||
else
|
||||
output "Cloning $REPO"
|
||||
if [[ -d "$BASE/data/$REPO" ]]; then
|
||||
mv "$BASE/data/$REPO" "${BASE}/data/$REPO.bak.$$"
|
||||
fi
|
||||
cd "$BASE/data"
|
||||
git clone git@github.com:MITx/$REPO
|
||||
git clone git@github.com:MITx/$REPO
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -118,8 +118,8 @@ if [[ $? != 0 ]]; then
|
||||
exit 1
|
||||
fi
|
||||
eval set -- "$ARGS"
|
||||
while true; do
|
||||
case $1 in
|
||||
while true; do
|
||||
case $1 in
|
||||
-c)
|
||||
compile=true
|
||||
shift
|
||||
@@ -159,16 +159,16 @@ cat<<EO
|
||||
To compile scipy and numpy from source use the -c option
|
||||
|
||||
!!! Do not run this script from an existing virtualenv !!!
|
||||
|
||||
|
||||
If you are in a ruby/python virtualenv please start a new
|
||||
shell.
|
||||
shell.
|
||||
|
||||
EO
|
||||
info
|
||||
output "Press return to begin or control-C to abort"
|
||||
read dummy
|
||||
|
||||
# log all stdout and stderr
|
||||
# log all stdout and stderr
|
||||
exec > >(tee $LOG)
|
||||
exec 2>&1
|
||||
|
||||
@@ -193,7 +193,7 @@ case `uname -s` in
|
||||
maya|lisa|natty|oneiric|precise)
|
||||
output "Installing ubuntu requirements"
|
||||
sudo apt-get -y update
|
||||
sudo apt-get -y install $APT_PKGS
|
||||
sudo apt-get -y install $APT_PKGS
|
||||
clone_repos
|
||||
;;
|
||||
*)
|
||||
@@ -203,11 +203,11 @@ case `uname -s` in
|
||||
esac
|
||||
;;
|
||||
Darwin)
|
||||
|
||||
|
||||
if [[ ! -w /usr/local ]]; then
|
||||
cat<<EO
|
||||
|
||||
You need to be able to write to /usr/local for
|
||||
|
||||
You need to be able to write to /usr/local for
|
||||
the installation of brew and brew packages.
|
||||
|
||||
Either make sure the group you are in (most likely 'staff')
|
||||
@@ -221,13 +221,13 @@ EO
|
||||
|
||||
fi
|
||||
|
||||
command -v brew &>/dev/null || {
|
||||
command -v brew &>/dev/null || {
|
||||
output "Installing brew"
|
||||
/usr/bin/ruby <(curl -fsSkL raw.github.com/mxcl/homebrew/go)
|
||||
}
|
||||
}
|
||||
command -v git &>/dev/null || {
|
||||
output "Installing git"
|
||||
brew install git
|
||||
brew install git
|
||||
}
|
||||
|
||||
clone_repos
|
||||
@@ -241,17 +241,21 @@ EO
|
||||
for pkg in $(cat $BREW_FILE); do
|
||||
grep $pkg <(brew list) &>/dev/null || {
|
||||
output "Installing $pkg"
|
||||
brew install $pkg
|
||||
brew install $pkg
|
||||
}
|
||||
done
|
||||
|
||||
# paths where brew likes to install python scripts
|
||||
PATH=/usr/local/share/python:/usr/local/bin:$PATH
|
||||
|
||||
command -v pip &>/dev/null || {
|
||||
output "Installing pip"
|
||||
sudo easy_install pip
|
||||
easy_install pip
|
||||
}
|
||||
|
||||
if ! grep -Eq ^1.7 <(virtualenv --version 2>/dev/null); then
|
||||
output "Installing virtualenv >1.7"
|
||||
sudo pip install 'virtualenv>1.7' virtualenvwrapper
|
||||
pip install 'virtualenv>1.7' virtualenvwrapper
|
||||
fi
|
||||
|
||||
command -v coffee &>/dev/null || {
|
||||
@@ -267,18 +271,10 @@ EO
|
||||
esac
|
||||
|
||||
output "Installing rvm and ruby"
|
||||
curl -sL get.rvm.io | bash -s stable
|
||||
curl -sL get.rvm.io | bash -s -- --version 1.15.7
|
||||
source $RUBY_DIR/scripts/rvm
|
||||
# skip the intro
|
||||
# skip the intro
|
||||
LESS="-E" rvm install $RUBY_VER
|
||||
if [[ $systempkgs ]]; then
|
||||
virtualenv --system-site-packages "$PYTHON_DIR"
|
||||
else
|
||||
# default behavior for virtualenv>1.7 is
|
||||
# --no-site-packages
|
||||
virtualenv "$PYTHON_DIR"
|
||||
fi
|
||||
source $PYTHON_DIR/bin/activate
|
||||
output "Installing gem bundler"
|
||||
gem install bundler
|
||||
output "Installing ruby packages"
|
||||
@@ -287,6 +283,16 @@ cd $BASE/mitx || true
|
||||
bundle install
|
||||
|
||||
cd $BASE
|
||||
if [[ $systempkgs ]]; then
|
||||
virtualenv --system-site-packages "$PYTHON_DIR"
|
||||
else
|
||||
# default behavior for virtualenv>1.7 is
|
||||
# --no-site-packages
|
||||
virtualenv "$PYTHON_DIR"
|
||||
fi
|
||||
|
||||
# change to mitx python virtualenv
|
||||
source $PYTHON_DIR/bin/activate
|
||||
|
||||
if [[ -n $compile ]]; then
|
||||
output "Downloading numpy and scipy"
|
||||
@@ -297,39 +303,54 @@ if [[ -n $compile ]]; then
|
||||
rm -f numpy.tar.gz scipy.tar.gz
|
||||
output "Compiling numpy"
|
||||
cd "$BASE/numpy-${NUMPY_VER}"
|
||||
python setup.py install
|
||||
python setup.py install
|
||||
output "Compiling scipy"
|
||||
cd "$BASE/scipy-${SCIPY_VER}"
|
||||
python setup.py install
|
||||
python setup.py install
|
||||
cd "$BASE"
|
||||
rm -rf numpy-${NUMPY_VER} scipy-${SCIPY_VER}
|
||||
fi
|
||||
|
||||
case `uname -s` in
|
||||
Darwin)
|
||||
# on mac os x get the latest distribute and pip
|
||||
curl http://python-distribute.org/distribute_setup.py | python
|
||||
pip install -U pip
|
||||
# need latest pytz before compiling numpy and scipy
|
||||
pip install -U pytz
|
||||
pip install numpy
|
||||
# fixes problem with scipy on 10.8
|
||||
pip install -e git+https://github.com/scipy/scipy#egg=scipy-dev
|
||||
;;
|
||||
esac
|
||||
|
||||
output "Installing MITx pre-requirements"
|
||||
pip install -r mitx/pre-requirements.txt
|
||||
pip install -r mitx/pre-requirements.txt
|
||||
# Need to be in the mitx dir to get the paths to local modules right
|
||||
output "Installing MITx requirements"
|
||||
cd mitx
|
||||
pip install -r requirements.txt
|
||||
pip install -r requirements.txt
|
||||
output "Installing askbot requirements"
|
||||
pip install -r askbot/askbot_requirements.txt
|
||||
pip install -r askbot/askbot_requirements_dev.txt
|
||||
|
||||
pip install -r askbot/askbot_requirements.txt
|
||||
pip install -r askbot/askbot_requirements_dev.txt
|
||||
|
||||
mkdir "$BASE/log" || true
|
||||
mkdir "$BASE/db" || true
|
||||
|
||||
output "Fixing your git default settings"
|
||||
git config --global push.default current
|
||||
|
||||
cat<<END
|
||||
Success!!
|
||||
|
||||
To start using Django you will need to activate the local Python
|
||||
To start using Django you will need to activate the local Python
|
||||
and Ruby environment (at this time rvm only supports bash) :
|
||||
|
||||
$ source $RUBY_DIR/scripts/rvm
|
||||
$ source $PYTHON_DIR/bin/activate
|
||||
|
||||
|
||||
To initialize Django
|
||||
|
||||
|
||||
$ cd $BASE/mitx
|
||||
$ rake django-admin[syncdb]
|
||||
$ rake django-admin[migrate]
|
||||
@@ -337,21 +358,20 @@ cat<<END
|
||||
To start the Django on port 8000
|
||||
|
||||
$ rake lms
|
||||
|
||||
|
||||
Or to start Django on a different <port#>
|
||||
|
||||
$ rake django-admin[runserver,lms,dev,<port#>]
|
||||
$ rake django-admin[runserver,lms,dev,<port#>]
|
||||
|
||||
If the Django development server starts properly you
|
||||
If the Django development server starts properly you
|
||||
should see:
|
||||
|
||||
Development server is running at http://127.0.0.1:<port#>/
|
||||
Quit the server with CONTROL-C.
|
||||
|
||||
Connect your browser to http://127.0.0.1:<port#> to
|
||||
Connect your browser to http://127.0.0.1:<port#> to
|
||||
view the Django site.
|
||||
|
||||
|
||||
END
|
||||
exit 0
|
||||
|
||||
|
||||
@@ -107,7 +107,7 @@ def _has_access_course_desc(user, course, action):
|
||||
NOTE: this is not checking whether user is actually enrolled in the course.
|
||||
"""
|
||||
# delegate to generic descriptor check to check start dates
|
||||
return _has_access_descriptor(user, course, action)
|
||||
return _has_access_descriptor(user, course, 'load')
|
||||
|
||||
def can_enroll():
|
||||
"""
|
||||
|
||||
@@ -329,9 +329,15 @@ def progress_summary(student, request, course, student_module_cache):
|
||||
def get_score(course_id, user, problem_descriptor, module_creator, student_module_cache):
|
||||
"""
|
||||
Return the score for a user on a problem, as a tuple (correct, total).
|
||||
e.g. (5,7) if you got 5 out of 7 points.
|
||||
|
||||
If this problem doesn't have a score, or we couldn't load it, returns (None,
|
||||
None).
|
||||
|
||||
user: a Student object
|
||||
problem: an XModule
|
||||
problem_descriptor: an XModuleDescriptor
|
||||
module_creator: a function that takes a descriptor, and returns the corresponding XModule for this user.
|
||||
Can return None if user doesn't have access, or if something else went wrong.
|
||||
cache: A StudentModuleCache
|
||||
"""
|
||||
if not (problem_descriptor.stores_state and problem_descriptor.has_score):
|
||||
@@ -339,14 +345,16 @@ def get_score(course_id, user, problem_descriptor, module_creator, student_modul
|
||||
return (None, None)
|
||||
|
||||
correct = 0.0
|
||||
|
||||
|
||||
instance_module = student_module_cache.lookup(
|
||||
course_id, problem_descriptor.category, problem_descriptor.location.url())
|
||||
|
||||
|
||||
if not instance_module:
|
||||
# If the problem was not in the cache, we need to instantiate the problem.
|
||||
# Otherwise, the max score (cached in instance_module) won't be available
|
||||
# Otherwise, the max score (cached in instance_module) won't be available
|
||||
problem = module_creator(problem_descriptor)
|
||||
if problem is None:
|
||||
return (None, None)
|
||||
instance_module = get_instance_module(course_id, user, problem, student_module_cache)
|
||||
|
||||
# If this problem is ungraded/ungradable, bail
|
||||
@@ -361,7 +369,7 @@ def get_score(course_id, user, problem_descriptor, module_creator, student_modul
|
||||
weight = getattr(problem_descriptor, 'weight', None)
|
||||
if weight is not None:
|
||||
if total == 0:
|
||||
log.exception("Cannot reweight a problem with zero weight. Problem: " + str(instance_module))
|
||||
log.exception("Cannot reweight a problem with zero total points. Problem: " + str(instance_module))
|
||||
return (correct, total)
|
||||
correct = correct * weight / total
|
||||
total = weight
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import hashlib
|
||||
import json
|
||||
import logging
|
||||
import pyparsing
|
||||
import sys
|
||||
|
||||
from django.conf import settings
|
||||
@@ -13,6 +14,7 @@ from django.views.decorators.csrf import csrf_exempt
|
||||
from requests.auth import HTTPBasicAuth
|
||||
|
||||
from capa.xqueue_interface import XQueueInterface
|
||||
from capa.chem import chemcalc
|
||||
from courseware.access import has_access
|
||||
from mitxmako.shortcuts import render_to_string
|
||||
from models import StudentModule, StudentModuleCache
|
||||
@@ -471,3 +473,42 @@ def modx_dispatch(request, dispatch, location, course_id):
|
||||
|
||||
# Return whatever the module wanted to return to the client/caller
|
||||
return HttpResponse(ajax_return)
|
||||
|
||||
def preview_chemcalc(request):
|
||||
"""
|
||||
Render an html preview of a chemical formula or equation. The fact that
|
||||
this is here is a bit of hack. See the note in lms/urls.py about why it's
|
||||
here. (Victor is to blame.)
|
||||
|
||||
request should be a GET, with a key 'formula' and value 'some formula string'.
|
||||
|
||||
Returns a json dictionary:
|
||||
{
|
||||
'preview' : 'the-preview-html' or ''
|
||||
'error' : 'the-error' or ''
|
||||
}
|
||||
"""
|
||||
if request.method != "GET":
|
||||
raise Http404
|
||||
|
||||
result = {'preview': '',
|
||||
'error': '' }
|
||||
formula = request.GET.get('formula')
|
||||
if formula is None:
|
||||
result['error'] = "No formula specified."
|
||||
|
||||
return HttpResponse(json.dumps(result))
|
||||
|
||||
try:
|
||||
result['preview'] = chemcalc.render_to_html(formula)
|
||||
except pyparsing.ParseException as p:
|
||||
result['error'] = "Couldn't parse formula: {0}".format(p)
|
||||
except Exception:
|
||||
# this is unexpected, so log
|
||||
log.warning("Error while previewing chemical formula", exc_info=True)
|
||||
result['error'] = "Error while rendering preview"
|
||||
|
||||
return HttpResponse(json.dumps(result))
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -15,6 +15,8 @@ import logging
|
||||
from django.conf import settings
|
||||
from django.core.urlresolvers import reverse
|
||||
|
||||
from fs.errors import ResourceNotFoundError
|
||||
|
||||
from courseware.access import has_access
|
||||
from static_replace import replace_urls
|
||||
|
||||
@@ -263,7 +265,8 @@ def get_static_tab_contents(course, tab):
|
||||
try:
|
||||
with fs.open(p) as tabfile:
|
||||
# TODO: redundant with module_render.py. Want to be helper methods in static_replace or something.
|
||||
contents = replace_urls(tabfile.read(), course.metadata['data_dir'])
|
||||
text = tabfile.read().decode('utf-8')
|
||||
contents = replace_urls(text, course.metadata['data_dir'])
|
||||
return replace_urls(contents, staticfiles_prefix='/courses/'+course.id, replace_prefix='/course/')
|
||||
except (ResourceNotFoundError) as err:
|
||||
log.exception("Couldn't load tab contents from '{0}': {1}".format(p, err))
|
||||
|
||||
@@ -362,7 +362,7 @@ def static_tab(request, course_id, tab_slug):
|
||||
tab = tabs.get_static_tab_by_slug(course, tab_slug)
|
||||
if tab is None:
|
||||
raise Http404
|
||||
|
||||
|
||||
contents = tabs.get_static_tab_contents(course, tab)
|
||||
if contents is None:
|
||||
raise Http404
|
||||
@@ -419,6 +419,16 @@ def course_about(request, course_id):
|
||||
'show_courseware_link' : show_courseware_link})
|
||||
|
||||
|
||||
@ensure_csrf_cookie
|
||||
@cache_if_anonymous
|
||||
def static_university_profile(request, org_id):
|
||||
"""
|
||||
Return the profile for the particular org_id that does not have any courses.
|
||||
"""
|
||||
template_file = "university_profile/{0}.html".format(org_id).lower()
|
||||
context = dict(courses=[], org_id=org_id)
|
||||
return render_to_response(template_file, context)
|
||||
|
||||
@ensure_csrf_cookie
|
||||
@cache_if_anonymous
|
||||
def university_profile(request, org_id):
|
||||
@@ -491,7 +501,7 @@ def progress(request, course_id, student_id=None):
|
||||
courseware_summary = grades.progress_summary(student, request, course,
|
||||
student_module_cache)
|
||||
grade_summary = grades.grade(student, request, course, student_module_cache)
|
||||
|
||||
|
||||
if courseware_summary is None:
|
||||
#This means the student didn't have access to the course (which the instructor requested)
|
||||
raise Http404
|
||||
@@ -504,4 +514,3 @@ def progress(request, course_id, student_id=None):
|
||||
context.update()
|
||||
|
||||
return render_to_response('courseware/progress.html', context)
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@ class Command(BaseCommand):
|
||||
course_id = args[0]
|
||||
administrator_role = Role.objects.get_or_create(name="Administrator", course_id=course_id)[0]
|
||||
moderator_role = Role.objects.get_or_create(name="Moderator", course_id=course_id)[0]
|
||||
community_ta_role = Role.objects.get_or_create(name="Community TA", course_id=course_id)[0]
|
||||
student_role = Role.objects.get_or_create(name="Student", course_id=course_id)[0]
|
||||
|
||||
for per in ["vote", "update_thread", "follow_thread", "unfollow_thread",
|
||||
@@ -30,4 +31,7 @@ class Command(BaseCommand):
|
||||
|
||||
moderator_role.inherit_permissions(student_role)
|
||||
|
||||
# For now, Community TA == Moderator, except for the styling.
|
||||
community_ta_role.inherit_permissions(moderator_role)
|
||||
|
||||
administrator_role.inherit_permissions(moderator_role)
|
||||
|
||||
0
lms/djangoapps/instructor/management/__init__.py
Normal file
0
lms/djangoapps/instructor/management/__init__.py
Normal file
79
lms/djangoapps/instructor/management/commands/dump_grades.py
Normal file
79
lms/djangoapps/instructor/management/commands/dump_grades.py
Normal file
@@ -0,0 +1,79 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# django management command: dump grades to csv files
|
||||
# for use by batch processes
|
||||
|
||||
import os, sys, string
|
||||
import datetime
|
||||
import json
|
||||
|
||||
from instructor.views import *
|
||||
from courseware.courses import get_course_by_id
|
||||
from xmodule.modulestore.django import modulestore
|
||||
|
||||
from django.conf import settings
|
||||
from django.core.management.base import BaseCommand
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = "dump grades to CSV file. Usage: dump_grades course_id_or_dir filename dump_type\n"
|
||||
help += " course_id_or_dir: either course_id or course_dir\n"
|
||||
help += " filename: where the output CSV is to be stored\n"
|
||||
# help += " start_date: end date as M/D/Y H:M (defaults to end of available data)"
|
||||
help += " dump_type: 'all' or 'raw' (see instructor dashboard)"
|
||||
|
||||
def handle(self, *args, **options):
|
||||
|
||||
# current grading logic and data schema doesn't handle dates
|
||||
# datetime.strptime("21/11/06 16:30", "%m/%d/%y %H:%M")
|
||||
|
||||
print "args = ", args
|
||||
|
||||
course_id = 'MITx/8.01rq_MW/Classical_Mechanics_Reading_Questions_Fall_2012_MW_Section'
|
||||
fn = "grades.csv"
|
||||
get_raw_scores = False
|
||||
|
||||
if len(args)>0:
|
||||
course_id = args[0]
|
||||
if len(args)>1:
|
||||
fn = args[1]
|
||||
if len(args)>2:
|
||||
get_raw_scores = args[2].lower()=='raw'
|
||||
|
||||
request = self.DummyRequest()
|
||||
try:
|
||||
course = get_course_by_id(course_id)
|
||||
except Exception as err:
|
||||
if course_id in modulestore().courses:
|
||||
course = modulestore().courses[course_id]
|
||||
else:
|
||||
print "-----------------------------------------------------------------------------"
|
||||
print "Sorry, cannot find course %s" % course_id
|
||||
print "Please provide a course ID or course data directory name, eg content-mit-801rq"
|
||||
return
|
||||
|
||||
print "-----------------------------------------------------------------------------"
|
||||
print "Dumping grades from %s to file %s (get_raw_scores=%s)" % (course.id, fn, get_raw_scores)
|
||||
datatable = get_student_grade_summary_data(request, course, course.id, get_raw_scores=get_raw_scores)
|
||||
|
||||
fp = open(fn,'w')
|
||||
|
||||
writer = csv.writer(fp, dialect='excel', quotechar='"', quoting=csv.QUOTE_ALL)
|
||||
writer.writerow(datatable['header'])
|
||||
for datarow in datatable['data']:
|
||||
encoded_row = [unicode(s).encode('utf-8') for s in datarow]
|
||||
writer.writerow(encoded_row)
|
||||
|
||||
fp.close()
|
||||
print "Done: %d records dumped" % len(datatable['data'])
|
||||
|
||||
class DummyRequest(object):
|
||||
META = {}
|
||||
def __init__(self):
|
||||
return
|
||||
def get_host(self):
|
||||
return 'edx.mit.edu'
|
||||
def is_secure(self):
|
||||
return False
|
||||
|
||||
|
||||
|
||||
0
lms/djangoapps/licenses/__init__.py
Normal file
0
lms/djangoapps/licenses/__init__.py
Normal file
0
lms/djangoapps/licenses/management/__init__.py
Normal file
0
lms/djangoapps/licenses/management/__init__.py
Normal file
@@ -0,0 +1,65 @@
|
||||
import os.path
|
||||
from uuid import uuid4
|
||||
from optparse import make_option
|
||||
|
||||
from django.utils.html import escape
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
|
||||
from xmodule.modulestore.django import modulestore
|
||||
|
||||
from licenses.models import CourseSoftware, UserLicense
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = """Generate random serial numbers for software used in a course.
|
||||
|
||||
Usage: generate_serial_numbers <course_id> <software_name> <count>
|
||||
|
||||
<count> is the number of numbers to generate.
|
||||
|
||||
Example:
|
||||
|
||||
import_serial_numbers MITx/6.002x/2012_Fall matlab 100
|
||||
|
||||
"""
|
||||
args = "course_id software_id count"
|
||||
|
||||
def handle(self, *args, **options):
|
||||
"""
|
||||
"""
|
||||
course_id, software_name, count = self._parse_arguments(args)
|
||||
|
||||
software, _ = CourseSoftware.objects.get_or_create(course_id=course_id,
|
||||
name=software_name)
|
||||
self._generate_serials(software, count)
|
||||
|
||||
def _parse_arguments(self, args):
|
||||
if len(args) != 3:
|
||||
raise CommandError("Incorrect number of arguments")
|
||||
|
||||
course_id = args[0]
|
||||
courses = modulestore().get_courses()
|
||||
known_course_ids = set(c.id for c in courses)
|
||||
|
||||
if course_id not in known_course_ids:
|
||||
raise CommandError("Unknown course_id")
|
||||
|
||||
software_name = escape(args[1].lower())
|
||||
|
||||
try:
|
||||
count = int(args[2])
|
||||
except ValueError:
|
||||
raise CommandError("Invalid <count> argument.")
|
||||
|
||||
return course_id, software_name, count
|
||||
|
||||
def _generate_serials(self, software, count):
|
||||
print "Generating {0} serials".format(count)
|
||||
|
||||
# add serial numbers them to the database
|
||||
for _ in xrange(count):
|
||||
serial = str(uuid4())
|
||||
license = UserLicense(software=software, serial=serial)
|
||||
license.save()
|
||||
|
||||
print "{0} new serial numbers generated.".format(count)
|
||||
@@ -0,0 +1,70 @@
|
||||
import os.path
|
||||
from optparse import make_option
|
||||
|
||||
from django.utils.html import escape
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
|
||||
from xmodule.modulestore.django import modulestore
|
||||
|
||||
from licenses.models import CourseSoftware, UserLicense
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = """Imports serial numbers for software used in a course.
|
||||
|
||||
Usage: import_serial_numbers <course_id> <software_name> <file>
|
||||
|
||||
<file> is a text file that list one available serial number per line.
|
||||
|
||||
Example:
|
||||
|
||||
import_serial_numbers MITx/6.002x/2012_Fall matlab serials.txt
|
||||
|
||||
"""
|
||||
args = "course_id software_id serial_file"
|
||||
|
||||
def handle(self, *args, **options):
|
||||
"""
|
||||
"""
|
||||
course_id, software_name, filename = self._parse_arguments(args)
|
||||
|
||||
software, _ = CourseSoftware.objects.get_or_create(course_id=course_id,
|
||||
name=software_name)
|
||||
self._import_serials(software, filename)
|
||||
|
||||
def _parse_arguments(self, args):
|
||||
if len(args) != 3:
|
||||
raise CommandError("Incorrect number of arguments")
|
||||
|
||||
course_id = args[0]
|
||||
courses = modulestore().get_courses()
|
||||
known_course_ids = set(c.id for c in courses)
|
||||
|
||||
if course_id not in known_course_ids:
|
||||
raise CommandError("Unknown course_id")
|
||||
|
||||
software_name = escape(args[1].lower())
|
||||
|
||||
filename = os.path.abspath(args[2])
|
||||
if not os.path.exists(filename):
|
||||
raise CommandError("Cannot find filename {0}".format(filename))
|
||||
|
||||
return course_id, software_name, filename
|
||||
|
||||
def _import_serials(self, software, filename):
|
||||
print "Importing serial numbers for {0}.".format(software)
|
||||
|
||||
serials = set(unicode(l.strip()) for l in open(filename))
|
||||
|
||||
# remove serial numbers we already have
|
||||
licenses = UserLicense.objects.filter(software=software)
|
||||
known_serials = set(l.serial for l in licenses)
|
||||
if known_serials:
|
||||
serials = serials.difference(known_serials)
|
||||
|
||||
# add serial numbers them to the database
|
||||
for serial in serials:
|
||||
license = UserLicense(software=software, serial=serial)
|
||||
license.save()
|
||||
|
||||
print "{0} new serial numbers imported.".format(len(serials))
|
||||
118
lms/djangoapps/licenses/migrations/0001_initial.py
Normal file
118
lms/djangoapps/licenses/migrations/0001_initial.py
Normal file
@@ -0,0 +1,118 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import datetime
|
||||
from south.db import db
|
||||
from south.v2 import SchemaMigration
|
||||
from django.db import models
|
||||
|
||||
|
||||
class Migration(SchemaMigration):
|
||||
|
||||
def forwards(self, orm):
|
||||
# Adding model 'CourseSoftware'
|
||||
db.create_table('licenses_coursesoftware', (
|
||||
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
|
||||
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
|
||||
('full_name', self.gf('django.db.models.fields.CharField')(max_length=255)),
|
||||
('url', self.gf('django.db.models.fields.CharField')(max_length=255)),
|
||||
('course_id', self.gf('django.db.models.fields.CharField')(max_length=255)),
|
||||
))
|
||||
db.send_create_signal('licenses', ['CourseSoftware'])
|
||||
|
||||
# Adding model 'UserLicense'
|
||||
db.create_table('licenses_userlicense', (
|
||||
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
|
||||
('software', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['licenses.CourseSoftware'])),
|
||||
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True)),
|
||||
('serial', self.gf('django.db.models.fields.CharField')(max_length=255)),
|
||||
))
|
||||
db.send_create_signal('licenses', ['UserLicense'])
|
||||
|
||||
|
||||
def backwards(self, orm):
|
||||
# Deleting model 'CourseSoftware'
|
||||
db.delete_table('licenses_coursesoftware')
|
||||
|
||||
# Deleting model 'UserLicense'
|
||||
db.delete_table('licenses_userlicense')
|
||||
|
||||
|
||||
models = {
|
||||
'auth.group': {
|
||||
'Meta': {'object_name': 'Group'},
|
||||
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
|
||||
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
|
||||
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
|
||||
},
|
||||
'auth.permission': {
|
||||
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
|
||||
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
|
||||
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
|
||||
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
|
||||
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
|
||||
},
|
||||
'auth.user': {
|
||||
'Meta': {'object_name': 'User'},
|
||||
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
|
||||
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
|
||||
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
|
||||
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
|
||||
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
|
||||
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
|
||||
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
|
||||
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
|
||||
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
|
||||
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
|
||||
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
|
||||
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
|
||||
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
|
||||
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
|
||||
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
|
||||
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
|
||||
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
|
||||
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
|
||||
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
|
||||
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
|
||||
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
|
||||
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
|
||||
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
|
||||
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
|
||||
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
|
||||
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
|
||||
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
|
||||
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
|
||||
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
|
||||
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
|
||||
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
|
||||
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
|
||||
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
|
||||
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
|
||||
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
|
||||
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
|
||||
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
|
||||
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
|
||||
},
|
||||
'contenttypes.contenttype': {
|
||||
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
|
||||
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
|
||||
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
|
||||
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
|
||||
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
|
||||
},
|
||||
'licenses.coursesoftware': {
|
||||
'Meta': {'object_name': 'CourseSoftware'},
|
||||
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
|
||||
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
|
||||
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
|
||||
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
|
||||
'url': ('django.db.models.fields.CharField', [], {'max_length': '255'})
|
||||
},
|
||||
'licenses.userlicense': {
|
||||
'Meta': {'object_name': 'UserLicense'},
|
||||
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
|
||||
'serial': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
|
||||
'software': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['licenses.CourseSoftware']"}),
|
||||
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
|
||||
}
|
||||
}
|
||||
|
||||
complete_apps = ['licenses']
|
||||
0
lms/djangoapps/licenses/migrations/__init__.py
Normal file
0
lms/djangoapps/licenses/migrations/__init__.py
Normal file
81
lms/djangoapps/licenses/models.py
Normal file
81
lms/djangoapps/licenses/models.py
Normal file
@@ -0,0 +1,81 @@
|
||||
import logging
|
||||
|
||||
from django.db import models, transaction
|
||||
|
||||
from student.models import User
|
||||
|
||||
log = logging.getLogger("mitx.licenses")
|
||||
|
||||
|
||||
class CourseSoftware(models.Model):
|
||||
name = models.CharField(max_length=255)
|
||||
full_name = models.CharField(max_length=255)
|
||||
url = models.CharField(max_length=255)
|
||||
course_id = models.CharField(max_length=255)
|
||||
|
||||
def __unicode__(self):
|
||||
return u'{0} for {1}'.format(self.name, self.course_id)
|
||||
|
||||
|
||||
class UserLicense(models.Model):
|
||||
software = models.ForeignKey(CourseSoftware, db_index=True)
|
||||
user = models.ForeignKey(User, null=True)
|
||||
serial = models.CharField(max_length=255)
|
||||
|
||||
|
||||
def get_courses_licenses(user, courses):
|
||||
course_ids = set(course.id for course in courses)
|
||||
all_software = CourseSoftware.objects.filter(course_id__in=course_ids)
|
||||
|
||||
assigned_licenses = UserLicense.objects.filter(software__in=all_software,
|
||||
user=user)
|
||||
|
||||
licenses = dict.fromkeys(all_software, None)
|
||||
for license in assigned_licenses:
|
||||
licenses[license.software] = license
|
||||
|
||||
log.info(assigned_licenses)
|
||||
log.info(licenses)
|
||||
|
||||
return licenses
|
||||
|
||||
|
||||
def get_license(user, software):
|
||||
try:
|
||||
# TODO: temporary fix for when somehow a user got more that one license.
|
||||
# The proper fix should use Meta.unique_together in the UserLicense model.
|
||||
licenses = UserLicense.objects.filter(user=user, software=software)
|
||||
license = licenses[0] if licenses else None
|
||||
except UserLicense.DoesNotExist:
|
||||
license = None
|
||||
|
||||
return license
|
||||
|
||||
|
||||
def get_or_create_license(user, software):
|
||||
license = get_license(user, software)
|
||||
if license is None:
|
||||
license = _create_license(user, software)
|
||||
|
||||
return license
|
||||
|
||||
|
||||
def _create_license(user, software):
|
||||
license = None
|
||||
|
||||
try:
|
||||
# find one license that has not been assigned, locking the
|
||||
# table/rows with select_for_update to prevent race conditions
|
||||
with transaction.commit_on_success():
|
||||
selected = UserLicense.objects.select_for_update()
|
||||
license = selected.filter(user__isnull=True, software=software)[0]
|
||||
license.user = user
|
||||
license.save()
|
||||
except IndexError:
|
||||
# there are no free licenses
|
||||
log.error('No serial numbers available for {0}', software)
|
||||
license = None
|
||||
# TODO [rocha]look if someone has unenrolled from the class
|
||||
# and already has a serial number
|
||||
|
||||
return license
|
||||
85
lms/djangoapps/licenses/tests.py
Normal file
85
lms/djangoapps/licenses/tests.py
Normal file
@@ -0,0 +1,85 @@
|
||||
import logging
|
||||
from uuid import uuid4
|
||||
from random import shuffle
|
||||
from tempfile import NamedTemporaryFile
|
||||
|
||||
from django.test import TestCase
|
||||
from django.core.management import call_command
|
||||
|
||||
from models import CourseSoftware, UserLicense
|
||||
|
||||
COURSE_1 = 'edX/toy/2012_Fall'
|
||||
|
||||
SOFTWARE_1 = 'matlab'
|
||||
SOFTWARE_2 = 'stata'
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CommandTest(TestCase):
|
||||
def test_import_serial_numbers(self):
|
||||
size = 20
|
||||
|
||||
log.debug('Adding one set of serials for {0}'.format(SOFTWARE_1))
|
||||
with generate_serials_file(size) as temp_file:
|
||||
args = [COURSE_1, SOFTWARE_1, temp_file.name]
|
||||
call_command('import_serial_numbers', *args)
|
||||
|
||||
log.debug('Adding one set of serials for {0}'.format(SOFTWARE_2))
|
||||
with generate_serials_file(size) as temp_file:
|
||||
args = [COURSE_1, SOFTWARE_2, temp_file.name]
|
||||
call_command('import_serial_numbers', *args)
|
||||
|
||||
log.debug('There should be only 2 course-software entries')
|
||||
software_count = CourseSoftware.objects.all().count()
|
||||
self.assertEqual(2, software_count)
|
||||
|
||||
log.debug('We added two sets of {0} serials'.format(size))
|
||||
licenses_count = UserLicense.objects.all().count()
|
||||
self.assertEqual(2 * size, licenses_count)
|
||||
|
||||
log.debug('Adding more serial numbers to {0}'.format(SOFTWARE_1))
|
||||
with generate_serials_file(size) as temp_file:
|
||||
args = [COURSE_1, SOFTWARE_1, temp_file.name]
|
||||
call_command('import_serial_numbers', *args)
|
||||
|
||||
log.debug('There should be still only 2 course-software entries')
|
||||
software_count = CourseSoftware.objects.all().count()
|
||||
self.assertEqual(2, software_count)
|
||||
|
||||
log.debug('Now we should have 3 sets of 20 serials'.format(size))
|
||||
licenses_count = UserLicense.objects.all().count()
|
||||
self.assertEqual(3 * size, licenses_count)
|
||||
|
||||
cs = CourseSoftware.objects.get(pk=1)
|
||||
|
||||
lics = UserLicense.objects.filter(software=cs)[:size]
|
||||
known_serials = list(l.serial for l in lics)
|
||||
known_serials.extend(generate_serials(10))
|
||||
|
||||
shuffle(known_serials)
|
||||
|
||||
log.debug('Adding some new and old serials to {0}'.format(SOFTWARE_1))
|
||||
with NamedTemporaryFile() as f:
|
||||
f.write('\n'.join(known_serials))
|
||||
f.flush()
|
||||
args = [COURSE_1, SOFTWARE_1, f.name]
|
||||
call_command('import_serial_numbers', *args)
|
||||
|
||||
log.debug('Check if we added only the new ones')
|
||||
licenses_count = UserLicense.objects.filter(software=cs).count()
|
||||
self.assertEqual((2 * size) + 10, licenses_count)
|
||||
|
||||
|
||||
def generate_serials(size=20):
|
||||
return [str(uuid4()) for _ in range(size)]
|
||||
|
||||
|
||||
def generate_serials_file(size=20):
|
||||
serials = generate_serials(size)
|
||||
|
||||
temp_file = NamedTemporaryFile()
|
||||
temp_file.write('\n'.join(serials))
|
||||
temp_file.flush()
|
||||
|
||||
return temp_file
|
||||
84
lms/djangoapps/licenses/views.py
Normal file
84
lms/djangoapps/licenses/views.py
Normal file
@@ -0,0 +1,84 @@
|
||||
import logging
|
||||
import json
|
||||
import re
|
||||
from urlparse import urlparse
|
||||
from collections import namedtuple, defaultdict
|
||||
|
||||
|
||||
from mitxmako.shortcuts import render_to_string
|
||||
|
||||
from django.contrib.auth.models import User
|
||||
from django.http import HttpResponse, Http404
|
||||
from django.views.decorators.csrf import requires_csrf_token, csrf_protect
|
||||
|
||||
from models import CourseSoftware
|
||||
from models import get_courses_licenses, get_or_create_license, get_license
|
||||
|
||||
|
||||
log = logging.getLogger("mitx.licenses")
|
||||
|
||||
|
||||
License = namedtuple('License', 'software serial')
|
||||
|
||||
|
||||
def get_licenses_by_course(user, courses):
|
||||
licenses = get_courses_licenses(user, courses)
|
||||
licenses_by_course = defaultdict(list)
|
||||
|
||||
# create missing licenses and group by course_id
|
||||
for software, license in licenses.iteritems():
|
||||
if license is None:
|
||||
licenses[software] = get_or_create_license(user, software)
|
||||
|
||||
course_id = software.course_id
|
||||
serial = license.serial if license else None
|
||||
licenses_by_course[course_id].append(License(software, serial))
|
||||
|
||||
# render elements
|
||||
data_by_course = {}
|
||||
for course_id, licenses in licenses_by_course.iteritems():
|
||||
context = {'licenses': licenses}
|
||||
template = 'licenses/serial_numbers.html'
|
||||
data_by_course[course_id] = render_to_string(template, context)
|
||||
|
||||
return data_by_course
|
||||
|
||||
|
||||
@requires_csrf_token
|
||||
def user_software_license(request):
|
||||
if request.method != 'POST' or not request.is_ajax():
|
||||
raise Http404
|
||||
|
||||
# get the course id from the referer
|
||||
url_path = urlparse(request.META.get('HTTP_REFERER', '')).path
|
||||
pattern = re.compile('^/courses/(?P<id>[^/]+/[^/]+/[^/]+)/.*/?$')
|
||||
match = re.match(pattern, url_path)
|
||||
|
||||
if not match:
|
||||
raise Http404
|
||||
course_id = match.groupdict().get('id', '')
|
||||
|
||||
user_id = request.session.get('_auth_user_id')
|
||||
software_name = request.POST.get('software')
|
||||
generate = request.POST.get('generate', False) == 'true'
|
||||
|
||||
try:
|
||||
software = CourseSoftware.objects.get(name=software_name,
|
||||
course_id=course_id)
|
||||
print software
|
||||
except CourseSoftware.DoesNotExist:
|
||||
raise Http404
|
||||
|
||||
user = User.objects.get(id=user_id)
|
||||
|
||||
if generate:
|
||||
license = get_or_create_license(user, software)
|
||||
else:
|
||||
license = get_license(user, software)
|
||||
|
||||
if license:
|
||||
response = {'serial': license.serial}
|
||||
else:
|
||||
response = {'error': 'No serial number found'}
|
||||
|
||||
return HttpResponse(json.dumps(response), mimetype='application/json')
|
||||
@@ -89,6 +89,7 @@ GENERATE_PROFILE_SCORES = False
|
||||
# Used with XQueue
|
||||
XQUEUE_WAITTIME_BETWEEN_REQUESTS = 5 # seconds
|
||||
|
||||
|
||||
############################# SET PATH INFORMATION #############################
|
||||
PROJECT_ROOT = path(__file__).abspath().dirname().dirname() # /mitx/lms
|
||||
REPO_ROOT = PROJECT_ROOT.dirname()
|
||||
@@ -96,7 +97,6 @@ COMMON_ROOT = REPO_ROOT / "common"
|
||||
ENV_ROOT = REPO_ROOT.dirname() # virtualenv dir /mitx is in
|
||||
COURSES_ROOT = ENV_ROOT / "data"
|
||||
|
||||
# FIXME: To support multiple courses, we should walk the courses dir at startup
|
||||
DATA_DIR = COURSES_ROOT
|
||||
|
||||
sys.path.append(REPO_ROOT)
|
||||
@@ -118,8 +118,11 @@ node_paths = [COMMON_ROOT / "static/js/vendor",
|
||||
NODE_PATH = ':'.join(node_paths)
|
||||
|
||||
|
||||
# Where to look for a status message
|
||||
STATUS_MESSAGE_PATH = ENV_ROOT / "status_message.json"
|
||||
|
||||
############################ OpenID Provider ##################################
|
||||
OPENID_PROVIDER_TRUSTED_ROOTS = ['cs50.net', '*.cs50.net']
|
||||
OPENID_PROVIDER_TRUSTED_ROOTS = ['cs50.net', '*.cs50.net']
|
||||
|
||||
################################## MITXWEB #####################################
|
||||
# This is where we stick our compiled template files. Most of the app uses Mako
|
||||
@@ -147,7 +150,7 @@ TEMPLATE_CONTEXT_PROCESSORS = (
|
||||
#'django.core.context_processors.i18n',
|
||||
'django.contrib.auth.context_processors.auth', #this is required for admin
|
||||
'django.core.context_processors.csrf', #necessary for csrf protection
|
||||
|
||||
|
||||
# Added for django-wiki
|
||||
'django.core.context_processors.media',
|
||||
'django.core.context_processors.tz',
|
||||
@@ -315,7 +318,7 @@ WIKI_CAN_ASSIGN = lambda article, user: user.is_staff or user.is_superuser
|
||||
|
||||
WIKI_USE_BOOTSTRAP_SELECT_WIDGET = False
|
||||
WIKI_LINK_LIVE_LOOKUPS = False
|
||||
WIKI_LINK_DEFAULT_LEVEL = 2
|
||||
WIKI_LINK_DEFAULT_LEVEL = 2
|
||||
|
||||
################################# Jasmine ###################################
|
||||
JASMINE_TEST_DIRECTORY = PROJECT_ROOT + '/static/coffee'
|
||||
@@ -332,10 +335,9 @@ STATICFILES_FINDERS = (
|
||||
TEMPLATE_LOADERS = (
|
||||
'mitxmako.makoloader.MakoFilesystemLoader',
|
||||
'mitxmako.makoloader.MakoAppDirectoriesLoader',
|
||||
|
||||
|
||||
# 'django.template.loaders.filesystem.Loader',
|
||||
# 'django.template.loaders.app_directories.Loader',
|
||||
|
||||
# 'django.template.loaders.eggs.Loader',
|
||||
)
|
||||
|
||||
@@ -353,7 +355,7 @@ MIDDLEWARE_CLASSES = (
|
||||
'django.contrib.messages.middleware.MessageMiddleware',
|
||||
'track.middleware.TrackMiddleware',
|
||||
'mitxmako.middleware.MakoMiddleware',
|
||||
|
||||
|
||||
'course_wiki.course_nav.Middleware',
|
||||
|
||||
'django.middleware.transaction.TransactionMiddleware',
|
||||
@@ -487,8 +489,6 @@ PIPELINE_JS_COMPRESSOR = None
|
||||
STATICFILES_IGNORE_PATTERNS = (
|
||||
"sass/*",
|
||||
"coffee/*",
|
||||
"*.py",
|
||||
"*.pyc"
|
||||
)
|
||||
|
||||
PIPELINE_YUI_BINARY = 'yui-compressor'
|
||||
@@ -526,7 +526,8 @@ INSTALLED_APPS = (
|
||||
'certificates',
|
||||
'instructor',
|
||||
'psychometrics',
|
||||
|
||||
'licenses',
|
||||
|
||||
#For the wiki
|
||||
'wiki', # The new django-wiki from benjaoming
|
||||
'django_notify',
|
||||
|
||||
@@ -27,12 +27,18 @@ SOUTH_TESTS_MIGRATE = False # To disable migrations and use syncdb instead
|
||||
|
||||
# Nose Test Runner
|
||||
INSTALLED_APPS += ('django_nose',)
|
||||
NOSE_ARGS = ['--cover-erase', '--with-xunit', '--with-xcoverage', '--cover-html',
|
||||
# '-v', '--pdb', # When really stuck, uncomment to start debugger on error
|
||||
'--cover-inclusive', '--cover-html-dir',
|
||||
os.environ.get('NOSE_COVER_HTML_DIR', 'cover_html')]
|
||||
for app in os.listdir(PROJECT_ROOT / 'djangoapps'):
|
||||
NOSE_ARGS += ['--cover-package', app]
|
||||
NOSE_ARGS = []
|
||||
|
||||
# Turning off coverage speeds up tests dramatically... until we have better config,
|
||||
# leave it here for manual fiddling.
|
||||
_coverage = True
|
||||
if _coverage:
|
||||
NOSE_ARGS = ['--cover-erase', '--with-xunit', '--with-xcoverage', '--cover-html',
|
||||
# '-v', '--pdb', # When really stuck, uncomment to start debugger on error
|
||||
'--cover-inclusive', '--cover-html-dir',
|
||||
os.environ.get('NOSE_COVER_HTML_DIR', 'cover_html')]
|
||||
for app in os.listdir(PROJECT_ROOT / 'djangoapps'):
|
||||
NOSE_ARGS += ['--cover-package', app]
|
||||
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
|
||||
|
||||
# Local Directories
|
||||
@@ -40,6 +46,8 @@ TEST_ROOT = path("test_root")
|
||||
# Want static files in the same dir for running on jenkins.
|
||||
STATIC_ROOT = TEST_ROOT / "staticfiles"
|
||||
|
||||
STATUS_MESSAGE_PATH = TEST_ROOT / "status_message.json"
|
||||
|
||||
COURSES_ROOT = TEST_ROOT / "data"
|
||||
DATA_DIR = COURSES_ROOT
|
||||
|
||||
@@ -77,28 +85,25 @@ STATICFILES_DIRS += [
|
||||
if os.path.isdir(COMMON_TEST_DATA_ROOT / course_dir)
|
||||
]
|
||||
|
||||
# point tests at the test courses by default
|
||||
|
||||
MODULESTORE = {
|
||||
'default': {
|
||||
'ENGINE': 'xmodule.modulestore.xml.XMLModuleStore',
|
||||
'OPTIONS': {
|
||||
'data_dir': COMMON_TEST_DATA_ROOT,
|
||||
'default_class': 'xmodule.hidden_module.HiddenDescriptor',
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
DATABASES = {
|
||||
'default': {
|
||||
'ENGINE': 'django.db.backends.sqlite3',
|
||||
'NAME': PROJECT_ROOT / "db" / "mitx.db",
|
||||
},
|
||||
|
||||
# The following are for testing purposes...
|
||||
'edX/toy/2012_Fall': {
|
||||
'ENGINE': 'django.db.backends.sqlite3',
|
||||
'NAME': ENV_ROOT / "db" / "course1.db",
|
||||
},
|
||||
|
||||
'edx/full/6.002_Spring_2012': {
|
||||
'ENGINE': 'django.db.backends.sqlite3',
|
||||
'NAME': ENV_ROOT / "db" / "course2.db",
|
||||
},
|
||||
|
||||
'edX/toy/TT_2012_Fall': {
|
||||
'ENGINE': 'django.db.backends.sqlite3',
|
||||
'NAME': ENV_ROOT / "db" / "course3.db",
|
||||
},
|
||||
|
||||
}
|
||||
|
||||
CACHES = {
|
||||
@@ -157,3 +162,15 @@ FILE_UPLOAD_HANDLERS = (
|
||||
'django.core.files.uploadhandler.MemoryFileUploadHandler',
|
||||
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
|
||||
)
|
||||
|
||||
################### Make tests faster
|
||||
|
||||
#http://slacy.com/blog/2012/04/make-your-tests-faster-in-django-1-4/
|
||||
PASSWORD_HASHERS = (
|
||||
# 'django.contrib.auth.hashers.PBKDF2PasswordHasher',
|
||||
# 'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
|
||||
# 'django.contrib.auth.hashers.BCryptPasswordHasher',
|
||||
'django.contrib.auth.hashers.SHA1PasswordHasher',
|
||||
'django.contrib.auth.hashers.MD5PasswordHasher',
|
||||
# 'django.contrib.auth.hashers.CryptPasswordHasher',
|
||||
)
|
||||
|
||||
BIN
lms/static/images/large-white-error-icon.png
Normal file
BIN
lms/static/images/large-white-error-icon.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 1.8 KiB |
BIN
lms/static/images/press/cengage_book_327x400.jpg
Normal file
BIN
lms/static/images/press/cengage_book_327x400.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 31 KiB |
BIN
lms/static/images/press/uts-seal_109x84.jpg
Normal file
BIN
lms/static/images/press/uts-seal_109x84.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 3.1 KiB |
BIN
lms/static/images/university/ut/ut-cover_2025x550.jpg
Normal file
BIN
lms/static/images/university/ut/ut-cover_2025x550.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 200 KiB |
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user