Merge remote-tracking branch 'origin/master' into feature/vik/oe-ui

This commit is contained in:
Vik Paruchuri
2013-07-19 13:35:37 -04:00
62 changed files with 3364 additions and 316 deletions

View File

@@ -81,3 +81,4 @@ Felix Sun <felixsun@mit.edu>
Adam Palay <adam@edx.org>
Ian Hoover <ihoover@edx.org>
Mukul Goyal <miki@edx.org>
Robert Marks <rmarks@edx.org>

View File

@@ -5,10 +5,13 @@ These are notable changes in edx-platform. This is a rolling list of changes,
in roughly chronological order, most recent first. Add your entries at or near
the top. Include a label indicating the component affected.
Common: Added *experimental* support for jsinput type.
Common: Added setting to specify Celery Broker vhost
Common: Utilize new XBlock bulk save API in LMS and CMS.
Studio: Add table for tracking course creator permissions (not yet used).
Update rake django-admin[syncdb] and rake django-admin[migrate] so they
run for both LMS and CMS.
@@ -21,6 +24,8 @@ Studio: Added support for uploading and managing PDF textbooks
Common: Student information is now passed to the tracking log via POST instead of GET.
Blades: Added functionality and tests for new capa input type: choicetextresponse.
Common: Add tests for documentation generation to test suite
Blades: User answer now preserved (and changeable) after clicking "show answer" in choice problems
@@ -43,7 +48,7 @@ history of background tasks for a given problem and student.
Blades: Small UX fix on capa multiple-choice problems. Make labels only
as wide as the text to reduce accidental choice selections.
Studio:
Studio:
- use xblock field defaults to initialize all new instances' fields and
only use templates as override samples.
- create new instances via in memory create_xmodule and related methods rather

View File

@@ -0,0 +1,55 @@
from django.core.management.base import BaseCommand, CommandError
from xmodule.course_module import CourseDescriptor
from xmodule.modulestore.django import modulestore
from json import dumps
from xmodule.modulestore.inheritance import own_metadata
from django.conf import settings
filter_list = ['xml_attributes', 'checklists']
class Command(BaseCommand):
help = '''Write out to stdout a structural and metadata information about a course in a flat dictionary serialized
in a JSON format. This can be used for analytics.'''
def handle(self, *args, **options):
if len(args) < 2 or len(args) > 3:
raise CommandError("dump_course_structure requires two or more arguments: <location> <outfile> |<db>|")
course_id = args[0]
outfile = args[1]
# use a user-specified database name, if present
# this is useful for doing dumps from databases restored from prod backups
if len(args) == 3:
settings.MODULESTORE['direct']['OPTIONS']['db'] = args[2]
loc = CourseDescriptor.id_to_location(course_id)
store = modulestore()
course = None
try:
course = store.get_item(loc, depth=4)
except:
print 'Could not find course at {0}'.format(course_id)
return
info = {}
def dump_into_dict(module, info):
filtered_metadata = dict((key, value) for key, value in own_metadata(module).iteritems()
if key not in filter_list)
info[module.location.url()] = {
'category': module.location.category,
'children': module.children if hasattr(module, 'children') else [],
'metadata': filtered_metadata
}
for child in module.get_children():
dump_into_dict(child, info)
dump_into_dict(course, info)
with open(outfile, 'w') as f:
f.write(dumps(info))

View File

@@ -46,6 +46,8 @@ class ChecklistTestCase(CourseTestCase):
# Now delete the checklists from the course and verify they get repopulated (for courses
# created before checklists were introduced).
self.course.checklists = None
# Save the changed `checklists` to the underlying KeyValueStore before updating the modulestore
self.course.save()
modulestore = get_modulestore(self.course.location)
modulestore.update_metadata(self.course.location, own_metadata(self.course))
self.assertEqual(self.get_persisted_checklists(), None)

View File

@@ -87,6 +87,8 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase):
self.user.is_active = True
# Staff has access to view all courses
self.user.is_staff = True
# Save the data that we've just changed to the db.
self.user.save()
self.client = Client()
@@ -117,6 +119,10 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase):
course.advanced_modules = component_types
# Save the data that we've just changed to the underlying
# MongoKeyValueStore before we update the mongo datastore.
course.save()
store.update_metadata(course.location, own_metadata(course))
# just pick one vertical
@@ -134,7 +140,7 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase):
self.check_components_on_page(ADVANCED_COMPONENT_TYPES, ['Video Alpha',
'Word cloud',
'Annotation',
'Open Ended Grading',
'Open Response Assessment',
'Peer Grading Interface'])
def test_advanced_components_require_two_clicks(self):
@@ -239,6 +245,9 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase):
self.assertNotIn('graceperiod', own_metadata(html_module))
html_module.lms.graceperiod = new_graceperiod
# Save the data that we've just changed to the underlying
# MongoKeyValueStore before we update the mongo datastore.
html_module.save()
self.assertIn('graceperiod', own_metadata(html_module))
self.assertEqual(html_module.lms.graceperiod, new_graceperiod)
@@ -883,6 +892,9 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase):
# add a bool piece of unknown metadata so we can verify we don't throw an exception
metadata['new_metadata'] = True
# Save the data that we've just changed to the underlying
# MongoKeyValueStore before we update the mongo datastore.
course.save()
module_store.update_metadata(location, metadata)
print 'Exporting to tempdir = {0}'.format(root_dir)
@@ -1299,6 +1311,7 @@ class ContentStoreTest(ModuleStoreTestCase):
# now let's define an override at the leaf node level
#
new_module.lms.graceperiod = timedelta(1)
new_module.save()
module_store.update_metadata(new_module.location, own_metadata(new_module))
# flush the cache and refetch

View File

@@ -290,6 +290,71 @@ class CourseGradingTest(CourseTestCase):
altered_grader = CourseGradingModel.update_grader_from_json(test_grader.course_location, test_grader.graders[1])
self.assertDictEqual(test_grader.graders[1], altered_grader, "drop_count[1] + 2")
def test_update_cutoffs_from_json(self):
test_grader = CourseGradingModel.fetch(self.course.location)
CourseGradingModel.update_cutoffs_from_json(test_grader.course_location, test_grader.grade_cutoffs)
# Unlike other tests, need to actually perform a db fetch for this test since update_cutoffs_from_json
# simply returns the cutoffs you send into it, rather than returning the db contents.
altered_grader = CourseGradingModel.fetch(self.course.location)
self.assertDictEqual(test_grader.grade_cutoffs, altered_grader.grade_cutoffs, "Noop update")
test_grader.grade_cutoffs['D'] = 0.3
CourseGradingModel.update_cutoffs_from_json(test_grader.course_location, test_grader.grade_cutoffs)
altered_grader = CourseGradingModel.fetch(self.course.location)
self.assertDictEqual(test_grader.grade_cutoffs, altered_grader.grade_cutoffs, "cutoff add D")
test_grader.grade_cutoffs['Pass'] = 0.75
CourseGradingModel.update_cutoffs_from_json(test_grader.course_location, test_grader.grade_cutoffs)
altered_grader = CourseGradingModel.fetch(self.course.location)
self.assertDictEqual(test_grader.grade_cutoffs, altered_grader.grade_cutoffs, "cutoff change 'Pass'")
def test_delete_grace_period(self):
test_grader = CourseGradingModel.fetch(self.course.location)
CourseGradingModel.update_grace_period_from_json(test_grader.course_location, test_grader.grace_period)
# update_grace_period_from_json doesn't return anything, so query the db for its contents.
altered_grader = CourseGradingModel.fetch(self.course.location)
self.assertEqual(test_grader.grace_period, altered_grader.grace_period, "Noop update")
test_grader.grace_period = {'hours': 15, 'minutes': 5, 'seconds': 30}
CourseGradingModel.update_grace_period_from_json(test_grader.course_location, test_grader.grace_period)
altered_grader = CourseGradingModel.fetch(self.course.location)
self.assertDictEqual(test_grader.grace_period, altered_grader.grace_period, "Adding in a grace period")
test_grader.grace_period = {'hours': 1, 'minutes': 10, 'seconds': 0}
# Now delete the grace period
CourseGradingModel.delete_grace_period(test_grader.course_location)
# update_grace_period_from_json doesn't return anything, so query the db for its contents.
altered_grader = CourseGradingModel.fetch(self.course.location)
# Once deleted, the grace period should simply be None
self.assertEqual(None, altered_grader.grace_period, "Delete grace period")
def test_update_section_grader_type(self):
# Get the descriptor and the section_grader_type and assert they are the default values
descriptor = get_modulestore(self.course.location).get_item(self.course.location)
section_grader_type = CourseGradingModel.get_section_grader_type(self.course.location)
self.assertEqual('Not Graded', section_grader_type['graderType'])
self.assertEqual(None, descriptor.lms.format)
self.assertEqual(False, descriptor.lms.graded)
# Change the default grader type to Homework, which should also mark the section as graded
CourseGradingModel.update_section_grader_type(self.course.location, {'graderType': 'Homework'})
descriptor = get_modulestore(self.course.location).get_item(self.course.location)
section_grader_type = CourseGradingModel.get_section_grader_type(self.course.location)
self.assertEqual('Homework', section_grader_type['graderType'])
self.assertEqual('Homework', descriptor.lms.format)
self.assertEqual(True, descriptor.lms.graded)
# Change the grader type back to Not Graded, which should also unmark the section as graded
CourseGradingModel.update_section_grader_type(self.course.location, {'graderType': 'Not Graded'})
descriptor = get_modulestore(self.course.location).get_item(self.course.location)
section_grader_type = CourseGradingModel.get_section_grader_type(self.course.location)
self.assertEqual('Not Graded', section_grader_type['graderType'])
self.assertEqual(None, descriptor.lms.format)
self.assertEqual(False, descriptor.lms.graded)
class CourseMetadataEditingTest(CourseTestCase):
"""

View File

@@ -4,6 +4,8 @@ from django.core.urlresolvers import reverse
from xmodule.capa_module import CapaDescriptor
import json
from xmodule.modulestore.django import modulestore
import datetime
from pytz import UTC
class DeleteItem(CourseTestCase):
@@ -151,16 +153,16 @@ class TestEditItem(CourseTestCase):
reverse('create_item'),
json.dumps(
{'parent_location': chap_location,
'category': 'vertical'
'category': 'sequential'
}),
content_type="application/json"
)
vert_location = self.response_id(resp)
self.seq_location = self.response_id(resp)
# create problem w/ boilerplate
template_id = 'multiplechoice.yaml'
resp = self.client.post(
reverse('create_item'),
json.dumps({'parent_location': vert_location,
json.dumps({'parent_location': self.seq_location,
'category': 'problem',
'boilerplate': template_id
}),
@@ -210,3 +212,32 @@ class TestEditItem(CourseTestCase):
)
problem = modulestore('draft').get_item(self.problems[0])
self.assertIsNone(problem.markdown)
def test_date_fields(self):
"""
Test setting due & start dates on sequential
"""
sequential = modulestore().get_item(self.seq_location)
self.assertIsNone(sequential.lms.due)
self.client.post(
reverse('save_item'),
json.dumps({
'id': self.seq_location,
'metadata': {'due': '2010-11-22T04:00Z'}
}),
content_type="application/json"
)
sequential = modulestore().get_item(self.seq_location)
self.assertEqual(sequential.lms.due, datetime.datetime(2010, 11, 22, 4, 0, tzinfo=UTC))
self.client.post(
reverse('save_item'),
json.dumps({
'id': self.seq_location,
'metadata': {'start': '2010-09-12T14:00Z'}
}),
content_type="application/json"
)
sequential = modulestore().get_item(self.seq_location)
self.assertEqual(sequential.lms.due, datetime.datetime(2010, 11, 22, 4, 0, tzinfo=UTC))
self.assertEqual(sequential.lms.start, datetime.datetime(2010, 9, 12, 14, 0, tzinfo=UTC))

View File

@@ -62,6 +62,9 @@ class TextbookIndexTestCase(CourseTestCase):
}
]
self.course.pdf_textbooks = content
# Save the data that we've just changed to the underlying
# MongoKeyValueStore before we update the mongo datastore.
self.course.save()
store = get_modulestore(self.course.location)
store.update_metadata(self.course.location, own_metadata(self.course))
@@ -220,6 +223,9 @@ class TextbookByIdTestCase(CourseTestCase):
'tid': 2,
})
self.course.pdf_textbooks = [self.textbook1, self.textbook2]
# Save the data that we've just changed to the underlying
# MongoKeyValueStore before we update the mongo datastore.
self.course.save()
self.store = get_modulestore(self.course.location)
self.store.update_metadata(self.course.location, own_metadata(self.course))
self.url_nonexist = reverse('textbook_by_id', kwargs={

View File

@@ -1,10 +1,9 @@
"""
Views related to operations on course objects
"""
#pylint: disable=W0402
import json
import random
import string
import string # pylint: disable=W0402
from django.contrib.auth.decorators import login_required
from django_future.csrf import ensure_csrf_cookie
@@ -496,6 +495,9 @@ def textbook_index(request, org, course, name):
if not any(tab['type'] == 'pdf_textbooks' for tab in course_module.tabs):
course_module.tabs.append({"type": "pdf_textbooks"})
course_module.pdf_textbooks = textbooks
# Save the data that we've just changed to the underlying
# MongoKeyValueStore before we update the mongo datastore.
course_module.save()
store.update_metadata(course_module.location, own_metadata(course_module))
return JsonResponse(course_module.pdf_textbooks)
else:
@@ -542,6 +544,9 @@ def create_textbook(request, org, course, name):
tabs = course_module.tabs
tabs.append({"type": "pdf_textbooks"})
course_module.tabs = tabs
# Save the data that we've just changed to the underlying
# MongoKeyValueStore before we update the mongo datastore.
course_module.save()
store.update_metadata(course_module.location, own_metadata(course_module))
resp = JsonResponse(textbook, status=201)
resp["Location"] = reverse("textbook_by_id", kwargs={
@@ -585,10 +590,13 @@ def textbook_by_id(request, org, course, name, tid):
i = course_module.pdf_textbooks.index(textbook)
new_textbooks = course_module.pdf_textbooks[0:i]
new_textbooks.append(new_textbook)
new_textbooks.extend(course_module.pdf_textbooks[i+1:])
new_textbooks.extend(course_module.pdf_textbooks[i + 1:])
course_module.pdf_textbooks = new_textbooks
else:
course_module.pdf_textbooks.append(new_textbook)
# Save the data that we've just changed to the underlying
# MongoKeyValueStore before we update the mongo datastore.
course_module.save()
store.update_metadata(course_module.location, own_metadata(course_module))
return JsonResponse(new_textbook, status=201)
elif request.method == 'DELETE':
@@ -596,7 +604,8 @@ def textbook_by_id(request, org, course, name, tid):
return JsonResponse(status=404)
i = course_module.pdf_textbooks.index(textbook)
new_textbooks = course_module.pdf_textbooks[0:i]
new_textbooks.extend(course_module.pdf_textbooks[i+1:])
new_textbooks.extend(course_module.pdf_textbooks[i + 1:])
course_module.pdf_textbooks = new_textbooks
course_module.save()
store.update_metadata(course_module.location, own_metadata(course_module))
return JsonResponse()

View File

@@ -59,23 +59,57 @@ def save_item(request):
# 'apply' the submitted metadata, so we don't end up deleting system metadata
existing_item = modulestore().get_item(item_location)
for metadata_key in request.POST.get('nullout', []):
setattr(existing_item, metadata_key, None)
# [dhm] see comment on _get_xblock_field
_get_xblock_field(existing_item, metadata_key).write_to(existing_item, None)
# update existing metadata with submitted metadata (which can be partial)
# IMPORTANT NOTE: if the client passed 'null' (None) for a piece of metadata that means 'remove it'. If
# the intent is to make it None, use the nullout field
for metadata_key, value in request.POST.get('metadata', {}).items():
# [dhm] see comment on _get_xblock_field
field = _get_xblock_field(existing_item, metadata_key)
if value is None:
delattr(existing_item, metadata_key)
field.delete_from(existing_item)
else:
setattr(existing_item, metadata_key, value)
value = field.from_json(value)
field.write_to(existing_item, value)
# Save the data that we've just changed to the underlying
# MongoKeyValueStore before we update the mongo datastore.
existing_item.save()
# commit to datastore
store.update_metadata(item_location, own_metadata(existing_item))
return HttpResponse()
# [DHM] A hack until we implement a permanent soln. Proposed perm solution is to make namespace fields also top level
# fields in xblocks rather than requiring dereference through namespace but we'll need to consider whether there are
# plausible use cases for distinct fields w/ same name in different namespaces on the same blocks.
# The idea is that consumers of the xblock, and particularly the web client, shouldn't know about our internal
# representation (namespaces as means of decorating all modules).
# Given top-level access, the calls can simply be setattr(existing_item, field, value) ...
# Really, this method should be elsewhere (e.g., xblock). We also need methods for has_value (v is_default)...
def _get_xblock_field(xblock, field_name):
"""
A temporary function to get the xblock field either from the xblock or one of its namespaces by name.
:param xblock:
:param field_name:
"""
def find_field(fields):
for field in fields:
if field.name == field_name:
return field
found = find_field(xblock.fields)
if found:
return found
for namespace in xblock.namespaces:
found = find_field(getattr(xblock, namespace).fields)
if found:
return found
@login_required
@expect_json
def create_item(request):

View File

@@ -7,7 +7,7 @@ from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from mitxmako.shortcuts import render_to_response
from xmodule_modifiers import replace_static_urls, wrap_xmodule
from xmodule_modifiers import replace_static_urls, wrap_xmodule, save_module # pylint: disable=F0401
from xmodule.error_module import ErrorDescriptor
from xmodule.errortracker import exc_info_to_str
from xmodule.exceptions import NotFoundError, ProcessingError
@@ -47,6 +47,8 @@ def preview_dispatch(request, preview_id, location, dispatch=None):
# Let the module handle the AJAX
try:
ajax_return = instance.handle_ajax(dispatch, request.POST)
# Save any module data that has changed to the underlying KeyValueStore
instance.save()
except NotFoundError:
log.exception("Module indicating to user that request doesn't exist")
@@ -166,6 +168,11 @@ def load_preview_module(request, preview_id, descriptor):
course_namespace=Location([module.location.tag, module.location.org, module.location.course, None, None])
)
module.get_html = save_module(
module.get_html,
module
)
return module

View File

@@ -76,6 +76,9 @@ def reorder_static_tabs(request):
# OK, re-assemble the static tabs in the new order
course.tabs = reordered_tabs
# Save the data that we've just changed to the underlying
# MongoKeyValueStore before we update the mongo datastore.
course.save()
modulestore('direct').update_metadata(course.location, own_metadata(course))
return HttpResponse()

View File

@@ -122,6 +122,10 @@ class CourseDetails(object):
descriptor.enrollment_end = converted
if dirty:
# Save the data that we've just changed to the underlying
# MongoKeyValueStore before we update the mongo datastore.
descriptor.save()
get_modulestore(course_location).update_metadata(course_location, own_metadata(descriptor))
# NOTE: below auto writes to the db w/o verifying that any of the fields actually changed

View File

@@ -7,6 +7,9 @@ class CourseGradingModel(object):
"""
Basically a DAO and Model combo for CRUD operations pertaining to grading policy.
"""
# Within this class, allow access to protected members of client classes.
# This comes up when accessing kvs data and caches during kvs saves and modulestore writes.
# pylint: disable=W0212
def __init__(self, course_descriptor):
self.course_location = course_descriptor.location
self.graders = [CourseGradingModel.jsonize_grader(i, grader) for i, grader in enumerate(course_descriptor.raw_grader)] # weights transformed to ints [0..100]
@@ -83,13 +86,16 @@ class CourseGradingModel(object):
"""
course_location = Location(jsondict['course_location'])
descriptor = get_modulestore(course_location).get_item(course_location)
graders_parsed = [CourseGradingModel.parse_grader(jsonele) for jsonele in jsondict['graders']]
descriptor.raw_grader = graders_parsed
descriptor.grade_cutoffs = jsondict['grade_cutoffs']
# Save the data that we've just changed to the underlying
# MongoKeyValueStore before we update the mongo datastore.
descriptor.save()
get_modulestore(course_location).update_item(course_location, descriptor.xblock_kvs._data)
CourseGradingModel.update_grace_period_from_json(course_location, jsondict['grace_period'])
return CourseGradingModel.fetch(course_location)
@@ -116,6 +122,9 @@ class CourseGradingModel(object):
else:
descriptor.raw_grader.append(grader)
# Save the data that we've just changed to the underlying
# MongoKeyValueStore before we update the mongo datastore.
descriptor.save()
get_modulestore(course_location).update_item(course_location, descriptor._model_data._kvs._data)
return CourseGradingModel.jsonize_grader(index, descriptor.raw_grader[index])
@@ -131,6 +140,10 @@ class CourseGradingModel(object):
descriptor = get_modulestore(course_location).get_item(course_location)
descriptor.grade_cutoffs = cutoffs
# Save the data that we've just changed to the underlying
# MongoKeyValueStore before we update the mongo datastore.
descriptor.save()
get_modulestore(course_location).update_item(course_location, descriptor._model_data._kvs._data)
return cutoffs
@@ -156,6 +169,10 @@ class CourseGradingModel(object):
descriptor = get_modulestore(course_location).get_item(course_location)
descriptor.lms.graceperiod = grace_timedelta
# Save the data that we've just changed to the underlying
# MongoKeyValueStore before we update the mongo datastore.
descriptor.save()
get_modulestore(course_location).update_metadata(course_location, descriptor._model_data._kvs._metadata)
@staticmethod
@@ -172,23 +189,12 @@ class CourseGradingModel(object):
del descriptor.raw_grader[index]
# force propagation to definition
descriptor.raw_grader = descriptor.raw_grader
# Save the data that we've just changed to the underlying
# MongoKeyValueStore before we update the mongo datastore.
descriptor.save()
get_modulestore(course_location).update_item(course_location, descriptor._model_data._kvs._data)
# NOTE cannot delete cutoffs. May be useful to reset
@staticmethod
def delete_cutoffs(course_location, cutoffs):
"""
Resets the cutoffs to the defaults
"""
if not isinstance(course_location, Location):
course_location = Location(course_location)
descriptor = get_modulestore(course_location).get_item(course_location)
descriptor.grade_cutoffs = descriptor.defaut_grading_policy['GRADE_CUTOFFS']
get_modulestore(course_location).update_item(course_location, descriptor._model_data._kvs._data)
return descriptor.grade_cutoffs
@staticmethod
def delete_grace_period(course_location):
"""
@@ -199,6 +205,10 @@ class CourseGradingModel(object):
descriptor = get_modulestore(course_location).get_item(course_location)
del descriptor.lms.graceperiod
# Save the data that we've just changed to the underlying
# MongoKeyValueStore before we update the mongo datastore.
descriptor.save()
get_modulestore(course_location).update_metadata(course_location, descriptor._model_data._kvs._metadata)
@staticmethod
@@ -225,6 +235,9 @@ class CourseGradingModel(object):
del descriptor.lms.format
del descriptor.lms.graded
# Save the data that we've just changed to the underlying
# MongoKeyValueStore before we update the mongo datastore.
descriptor.save()
get_modulestore(location).update_metadata(location, descriptor._model_data._kvs._metadata)
@staticmethod

View File

@@ -76,6 +76,9 @@ class CourseMetadata(object):
setattr(descriptor.lms, key, value)
if dirty:
# Save the data that we've just changed to the underlying
# MongoKeyValueStore before we update the mongo datastore.
descriptor.save()
get_modulestore(course_location).update_metadata(course_location,
own_metadata(descriptor))
@@ -97,6 +100,10 @@ class CourseMetadata(object):
elif hasattr(descriptor.lms, key):
delattr(descriptor.lms, key)
# Save the data that we've just changed to the underlying
# MongoKeyValueStore before we update the mongo datastore.
descriptor.save()
get_modulestore(course_location).update_metadata(course_location,
own_metadata(descriptor))

View File

@@ -253,17 +253,13 @@ function syncReleaseDate(e) {
}
function getEdxTimeFromDateTimeVals(date_val, time_val) {
var edxTimeStr = null;
if (date_val != '') {
if (time_val == '') time_val = '00:00';
// Note, we are using date.js utility which has better parsing abilities than the built in JS date parsing
var date = Date.parse(date_val + " " + time_val);
edxTimeStr = date.toString('yyyy-MM-ddTHH:mm');
return new Date(date_val + " " + time_val + "Z");
}
return edxTimeStr;
else return null;
}
function getEdxTimeFromDateTimeInputs(date_id, time_id) {

View File

@@ -11,7 +11,7 @@
<section class="content content-header">
<header>
## "edX Studio" should not be translated
<h1>${_('Welcome to')}<span class="logo">edX Studio</span></h1>
<h1>${_('Welcome to')}<span class="logo">&nbsp;edX Studio</span></h1>
<p class="tagline">${_("Studio helps manage your courses online, so you can focus on teaching them")}</p>
</header>
</section>

View File

@@ -89,6 +89,21 @@ def grade_histogram(module_id):
return grades
def save_module(get_html, module):
"""
Updates the given get_html function for the given module to save the fields
after rendering.
"""
@wraps(get_html)
def _get_html():
"""Cache the rendered output, save, then return the output."""
rendered_html = get_html()
module.save()
return rendered_html
return _get_html
def add_histogram(get_html, module, user):
"""
Updates the supplied module with a new get_html function that wraps

View File

@@ -460,10 +460,10 @@ class JSInput(InputTypeBase):
DO NOT USE! HAS NOT BEEN TESTED BEYOND 700X PROBLEMS, AND MAY CHANGE IN
BACKWARDS-INCOMPATIBLE WAYS.
Inputtype for general javascript inputs. Intended to be used with
customresponse.
customresponse.
Loads in a sandboxed iframe to help prevent css and js conflicts between
frame and top-level window.
frame and top-level window.
iframe sandbox whitelist:
- allow-scripts
- allow-popups
@@ -474,9 +474,9 @@ class JSInput(InputTypeBase):
window elements.
Example:
<jsinput html_file="/static/test.html"
gradefn="grade"
height="500"
<jsinput html_file="/static/test.html"
gradefn="grade"
height="500"
width="400"/>
See the documentation in the /doc/public folder for more information.
@@ -500,7 +500,7 @@ class JSInput(InputTypeBase):
Attribute('width', "400"), # iframe width
Attribute('height', "300")] # iframe height
def _extra_context(self):
context = {
@@ -510,11 +510,12 @@ class JSInput(InputTypeBase):
return context
registry.register(JSInput)
#-----------------------------------------------------------------------------
class TextLine(InputTypeBase):
"""
A text line input. Can do math preview if "math"="1" is specified.
@@ -1368,3 +1369,209 @@ class AnnotationInput(InputTypeBase):
return extra_context
registry.register(AnnotationInput)
class ChoiceTextGroup(InputTypeBase):
"""
Groups of radiobutton/checkboxes with text inputs.
Examples:
RadioButton problem
<problem>
<startouttext/>
A person rolls a standard die 100 times and records the results.
On the first roll they received a "1". Given this information
select the correct choice and fill in numbers to make it accurate.
<endouttext/>
<choicetextresponse>
<radiotextgroup>
<choice correct="false">The lowest number rolled was:
<decoy_input/> and the highest number rolled was:
<decoy_input/> .</choice>
<choice correct="true">The lowest number rolled was <numtolerance_input answer="1"/>
and there is not enough information to determine the highest number rolled.
</choice>
<choice correct="false">There is not enough information to determine the lowest
number rolled, and the highest number rolled was:
<decoy_input/> .
</choice>
</radiotextgroup>
</choicetextresponse>
</problem>
CheckboxProblem:
<problem>
<startouttext/>
A person randomly selects 100 times, with replacement, from the list of numbers \(\sqrt{2}\) , 2, 3, 4 ,5 ,6
and records the results. The first number they pick is \(\sqrt{2}\) Given this information
select the correct choices and fill in numbers to make them accurate.
<endouttext/>
<choicetextresponse>
<checkboxtextgroup>
<choice correct="true">
The lowest number selected was <numtolerance_input answer="1.4142" tolerance="0.01"/>
</choice>
<choice correct="false">
The highest number selected was <decoy_input/> .
</choice>
<choice correct="true">There is not enough information given to determine the highest number
which was selected.
</choice>
<choice correct="false">There is not enough information given to determine the lowest number
selected.
</choice>
</checkboxtextgroup>
</choicetextresponse>
</problem>
In the preceding examples the <decoy_input/> is used to generate a textinput html element
in the problem's display. Since it is inside of an incorrect choice, no answer given
for it will be correct, and thus specifying an answer for it is not needed.
"""
template = "choicetext.html"
tags = ['radiotextgroup', 'checkboxtextgroup']
def setup(self):
"""
Performs setup for the initial rendering of the problem.
`self.html_input_type` determines whether this problem is displayed
with radiobuttons or checkboxes
If the initial value of `self.value` is '' change it to {} so that
the template has an empty dictionary to work with.
sets the value of self.choices to be equal to the return value of
`self.extract_choices`
"""
self.text_input_values = {}
if self.tag == 'radiotextgroup':
self.html_input_type = "radio"
elif self.tag == 'checkboxtextgroup':
self.html_input_type = "checkbox"
else:
raise Exception("ChoiceGroup: unexpected tag {0}".format(self.tag))
if self.value == '':
# Make `value` an empty dictionary, if it currently has an empty
# value. This is necessary because the template expects a
# dictionary.
self.value = {}
self.choices = self.extract_choices(self.xml)
@classmethod
def get_attributes(cls):
"""
Returns a list of `Attribute` for this problem type
"""
return [
Attribute("show_correctness", "always"),
Attribute("submitted_message", "Answer received.")
]
def _extra_context(self):
"""
Returns a dictionary of extra content necessary for rendering this InputType.
`input_type` is either 'radio' or 'checkbox' indicating whether the choices for
this problem will have radiobuttons or checkboxes.
"""
return {
'input_type': self.html_input_type,
'choices': self.choices
}
@staticmethod
def extract_choices(element):
"""
Extracts choices from the xml for this problem type.
If we have xml that is as follows(choice names will have been assigned
by now)
<radiotextgroup>
<choice correct = "true" name ="1_2_1_choiceinput_0bc">
The number
<numtolerance_input name = "1_2_1_choiceinput0_numtolerance_input_0" answer="5"/>
Is the mean of the list.
</choice>
<choice correct = "false" name = "1_2_1_choiceinput_1bc>
False demonstration choice
</choice>
</radiotextgroup>
Choices are used for rendering the problem properly
The function will setup choices as follows:
choices =[
("1_2_1_choiceinput_0bc",
[{'type': 'text', 'contents': "The number", 'tail_text': '',
'value': ''
},
{'type': 'textinput',
'contents': "1_2_1_choiceinput0_numtolerance_input_0",
'tail_text': 'Is the mean of the list',
'value': ''
}
]
),
("1_2_1_choiceinput_1bc",
[{'type': 'text', 'contents': "False demonstration choice",
'tail_text': '',
'value': ''
}
]
)
]
"""
choices = []
for choice in element:
if choice.tag != 'choice':
raise Exception(
"[capa.inputtypes.extract_choices] Expected a <choice>" +
"tag; got {0} instead".format(choice.tag)
)
components = []
choice_text = ''
if choice.text is not None:
choice_text += choice.text
# Initialize our dict for the next content
adder = {
'type': 'text',
'contents': choice_text,
'tail_text': '',
'value': ''
}
components.append(adder)
for elt in choice:
# for elements in the choice e.g. <text> <numtolerance_input>
adder = {
'type': 'text',
'contents': '',
'tail_text': '',
'value': ''
}
tag_type = elt.tag
# If the current `elt` is a <numtolerance_input> set the
# `adder`type to 'numtolerance_input', and 'contents' to
# the `elt`'s name.
# Treat decoy_inputs and numtolerance_inputs the same in order
# to prevent students from reading the Html and figuring out
# which inputs are valid
if tag_type in ('numtolerance_input', 'decoy_input'):
# We set this to textinput, so that we get a textinput html
# element.
adder['type'] = 'textinput'
adder['contents'] = elt.get('name')
else:
adder['contents'] = elt.text
# Add any tail text("is the mean" in the example)
adder['tail_text'] = elt.tail if elt.tail else ''
components.append(adder)
# Add the tuple for the current choice to the list of choices
choices.append((choice.get("name"), components))
return choices
registry.register(ChoiceTextGroup)

View File

@@ -2097,6 +2097,333 @@ class AnnotationResponse(LoncapaResponse):
return option_ids[0]
return None
class ChoiceTextResponse(LoncapaResponse):
"""
Allows for multiple choice responses with text inputs
Desired semantics match those of NumericalResponse and
ChoiceResponse.
"""
response_tag = 'choicetextresponse'
max_inputfields = 1
allowed_inputfields = ['choicetextgroup',
'checkboxtextgroup',
'radiotextgroup'
]
def setup_response(self):
"""
Sets up three dictionaries for use later:
`correct_choices`: These are the correct binary choices(radio/checkbox)
`correct_inputs`: These are the numerical/string answers for required
inputs.
`answer_values`: This is a dict, keyed by the name of the binary choice
which contains the correct answers for the text inputs separated by
commas e.g. "1, 0.5"
`correct_choices` and `correct_inputs` are used for grading the problem
and `answer_values` is used for displaying correct answers.
"""
context = self.context
self.correct_choices = {}
self.assign_choice_names()
self.correct_inputs = {}
self.answer_values = {self.answer_id: []}
correct_xml = self.xml.xpath('//*[@id=$id]//choice[@correct="true"]',
id=self.xml.get('id'))
for node in correct_xml:
# For each correct choice, set the `parent_name` to the
# current choice's name
parent_name = node.get('name')
# Add the name of the correct binary choice to the
# correct choices list as a key. The value is not important.
self.correct_choices[parent_name] = {'answer': ''}
# Add the name of the parent to the list of correct answers
self.answer_values[self.answer_id].append(parent_name)
answer_list = []
# Loop over <numtolerance_input> elements inside of the correct choices
for child in node:
answer = child.get('answer', None)
if not answer:
# If the question creator does not specify an answer for a
# <numtolerance_input> inside of a correct choice, raise an error
raise LoncapaProblemError(
"Answer not provided for numtolerance_input"
)
# Contextualize the answer to allow script generated answers.
answer = contextualize_text(answer, context)
input_name = child.get('name')
# Contextualize the tolerance to value.
tolerance = contextualize_text(
child.get('tolerance', '0'),
context
)
# Add the answer and tolerance information for the current
# numtolerance_input to `correct_inputs`
self.correct_inputs[input_name] = {
'answer': answer,
'tolerance': tolerance
}
# Add the correct answer for this input to the list for show
answer_list.append(answer)
# Turn the list of numtolerance_input answers into a comma separated string.
self.answer_values[parent_name] = ', '.join(answer_list)
# Turn correct choices into a set. Allows faster grading.
self.correct_choices = set(self.correct_choices.keys())
def assign_choice_names(self):
"""
Initialize name attributes in <choice> and <numtolerance_input> tags
for this response.
Example:
Assuming for simplicity that `self.answer_id` = '1_2_1'
Before the function is called `self.xml` =
<radiotextgroup>
<choice correct = "true">
The number
<numtolerance_input answer="5"/>
Is the mean of the list.
</choice>
<choice correct = "false">
False demonstration choice
</choice>
</radiotextgroup>
After this is called the choices and numtolerance_inputs will have a name
attribute initialized and self.xml will be:
<radiotextgroup>
<choice correct = "true" name ="1_2_1_choiceinput_0bc">
The number
<numtolerance_input name = "1_2_1_choiceinput0_numtolerance_input_0"
answer="5"/>
Is the mean of the list.
</choice>
<choice correct = "false" name = "1_2_1_choiceinput_1bc>
False demonstration choice
</choice>
</radiotextgroup>
"""
for index, choice in enumerate(
self.xml.xpath('//*[@id=$id]//choice', id=self.xml.get('id'))
):
# Set the name attribute for <choices>
# "bc" is appended at the end to indicate that this is a
# binary choice as opposed to a numtolerance_input, this convention
# is used when grading the problem
choice.set(
"name",
self.answer_id + "_choiceinput_" + str(index) + "bc"
)
# Set Name attributes for <numtolerance_input> elements
# Look for all <numtolerance_inputs> inside this choice.
numtolerance_inputs = choice.findall('numtolerance_input')
# Look for all <decoy_input> inside this choice
decoys = choice.findall('decoy_input')
# <decoy_input> would only be used in choices which do not contain
# <numtolerance_input>
inputs = numtolerance_inputs if numtolerance_inputs else decoys
# Give each input inside of the choice a name combining
# The ordinality of the choice, and the ordinality of the input
# within that choice e.g. 1_2_1_choiceinput_0_numtolerance_input_1
for ind, child in enumerate(inputs):
child.set(
"name",
self.answer_id + "_choiceinput_" + str(index) +
"_numtolerance_input_" + str(ind)
)
def get_score(self, student_answers):
"""
Returns a `CorrectMap` showing whether `student_answers` are correct.
`student_answers` contains keys for binary inputs(radiobutton,
checkbox) and numerical inputs. Keys ending with 'bc' are binary
choice inputs otherwise they are text fields.
This method first separates the two
types of answers and then grades them in separate methods.
The student is only correct if they have both the binary inputs and
numerical inputs correct.
"""
answer_dict = student_answers.get(self.answer_id, "")
binary_choices, numtolerance_inputs = self._split_answers_dict(answer_dict)
# Check the binary choices first.
choices_correct = self._check_student_choices(binary_choices)
inputs_correct = self._check_student_inputs(numtolerance_inputs)
# Only return correct if the student got both the binary
# and numtolerance_inputs are correct
correct = choices_correct and inputs_correct
return CorrectMap(
self.answer_id,
'correct' if correct else 'incorrect'
)
def get_answers(self):
"""
Returns a dictionary containing the names of binary choices as keys
and a string of answers to any numtolerance_inputs which they may have
e.g {choice_1bc : "answer1, answer2", choice_2bc : ""}
"""
return self.answer_values
def _split_answers_dict(self, a_dict):
"""
Returns two dicts:
`binary_choices` : dictionary {input_name: input_value} for
the binary choices which the student selected.
and
`numtolerance_choices` : a dictionary {input_name: input_value}
for the numtolerance_inputs inside of choices which were selected
Determines if an input is inside of a binary input by looking at
the beginning of it's name.
For example. If a binary_choice was named '1_2_1_choiceinput_0bc'
All of the numtolerance_inputs in it would have an idea that begins
with '1_2_1_choice_input_0_numtolerance_input'
Splits the name of the numtolerance_input at the occurence of
'_numtolerance_input_' and appends 'bc' to the end to get the name
of the choice it is contained in.
Example:
`a_dict` = {
'1_2_1_choiceinput_0bc': '1_2_1_choiceinput_0bc',
'1_2_1_choiceinput_0_numtolerance_input_0': '1',
'1_2_1_choiceinput_0_numtolerance_input_1': '2'
'1_2_1_choiceinput_1_numtolerance_input_0': '3'
}
In this case, the binary choice is '1_2_1_choiceinput_0bc', and
the numtolerance_inputs associated with it are
'1_2_1_choiceinput_0_numtolerance_input_0', and
'1_2_1_choiceinput_0_numtolerance_input_1'.
so the two return dictionaries would be
`binary_choices` = {'1_2_1_choiceinput_0bc': '1_2_1_choiceinput_0bc'}
and
`numtolerance_choices` ={
'1_2_1_choiceinput_0_numtolerance_input_0': '1',
'1_2_1_choiceinput_0_numtolerance_input_1': '2'
}
The entry '1_2_1_choiceinput_1_numtolerance_input_0': '3' is discarded
because it was not inside of a selected binary choice, and no validation
should be performed on numtolerance_inputs inside of non-selected choices.
"""
# Initialize the two dictionaries that are returned
numtolerance_choices = {}
binary_choices = {}
# `selected_choices` is a list of binary choices which were "checked/selected"
# when the student submitted the problem.
# Keys in a_dict ending with 'bc' refer to binary choices.
selected_choices = [key for key in a_dict if key.endswith("bc")]
for key in selected_choices:
binary_choices[key] = a_dict[key]
# Convert the name of a numtolerance_input into the name of the binary
# choice that it is contained within, and append it to the list if
# the numtolerance_input's parent binary_choice is contained in
# `selected_choices`.
selected_numtolerance_inputs = [
key for key in a_dict if key.partition("_numtolerance_input_")[0] + "bc"
in selected_choices
]
for key in selected_numtolerance_inputs:
numtolerance_choices[key] = a_dict[key]
return (binary_choices, numtolerance_choices)
def _check_student_choices(self, choices):
"""
Compares student submitted checkbox/radiobutton answers against
the correct answers. Returns True or False.
True if all of the correct choices are selected and no incorrect
choices are selected.
"""
student_choices = set(choices)
required_selected = len(self.correct_choices - student_choices) == 0
no_extra_selected = len(student_choices - self.correct_choices) == 0
correct = required_selected and no_extra_selected
return correct
def _check_student_inputs(self, numtolerance_inputs):
"""
Compares student submitted numerical answers against the correct
answers and tolerances.
`numtolerance_inputs` is a dictionary {answer_name : answer_value}
Performs numerical validation by means of calling
`compare_with_tolerance()` on all of `numtolerance_inputs`
Performs a call to `compare_with_tolerance` even on values for
decoy_inputs. This is used to validate their numericality and
raise an error if the student entered a non numerical expression.
Returns True if and only if all student inputs are correct.
"""
inputs_correct = True
for answer_name, answer_value in numtolerance_inputs.iteritems():
# If `self.corrrect_inputs` does not contain an entry for
# `answer_name`, this means that answer_name is a decoy
# input's value, and validation of its numericality is the
# only thing of interest from the later call to
# `compare_with_tolerance`.
params = self.correct_inputs.get(answer_name, {'answer': 0})
correct_ans = params['answer']
# Set the tolerance to '0' if it was not specified in the xml
tolerance = params.get('tolerance', '0')
# Make sure that the staff answer is a valid number
try:
correct_ans = complex(correct_ans)
except ValueError:
log.debug(
"Content error--answer" +
"'{0}' is not a valid complex number".format(correct_ans)
)
raise StudentInputError(
"The Staff answer could not be interpreted as a number."
)
# Compare the student answer to the staff answer/ or to 0
# if all that is important is verifying numericality
try:
partial_correct = compare_with_tolerance(
evaluator(dict(), dict(), answer_value),
correct_ans,
tolerance
)
except:
# Use the traceback-preserving version of re-raising with a
# different type
_, _, trace = sys.exc_info()
raise StudentInputError(
"Could not interpret '{0}' as a number{1}".format(
cgi.escape(answer_value),
trace
)
)
# Ignore the results of the comparisons which were just for
# Numerical Validation.
if answer_name in self.correct_inputs and not partial_correct:
# If any input is not correct, set the return value to False
inputs_correct = False
return inputs_correct
#-----------------------------------------------------------------------------
# TEMPORARY: List of all response subclasses
@@ -2116,4 +2443,5 @@ __all__ = [CodeResponse,
MultipleChoiceResponse,
TrueFalseResponse,
JavascriptResponse,
AnnotationResponse]
AnnotationResponse,
ChoiceTextResponse]

View File

@@ -0,0 +1,76 @@
<% element_checked = False %>
% for choice_id, _ in choices:
<%choice_id = choice_id %>
%if choice_id in value:
<% element_checked = True %>
%endif
%endfor
<section id="choicetextinput_${id}" class="choicetextinput">
<form class="choicetextgroup capa_inputtype" id="inputtype_${id}">
<div class="script_placeholder" data-src="/static/js/capa/choicetextinput.js"/>
<div class="indicator_container">
% if input_type == 'checkbox' or not element_checked:
% if status == 'unsubmitted':
<span class="unanswered" style="display:inline-block;" id="status_${id}"></span>
% elif status == 'correct':
<span class="correct" id="status_${id}"></span>
% elif status == 'incorrect':
<span class="incorrect" id="status_${id}"></span>
% elif status == 'incomplete':
<span class="incorrect" id="status_${id}"></span>
% endif
% endif
</div>
<fieldset>
% for choice_id, choice_description in choices:
<%choice_id= choice_id %>
<section id="forinput${choice_id}"
% if input_type == 'radio' and choice_id in value :
<%
if status == 'correct':
correctness = 'correct'
elif status == 'incorrect':
correctness = 'incorrect'
else:
correctness = None
%>
% if correctness:
class="choicetextgroup_${correctness}"
% endif
% endif
>
<input class="ctinput" type="${input_type}" name="choiceinput_${id}" id="${choice_id}" value="${choice_id}"
% if choice_id in value:
checked="true"
% endif
/>
% for content_node in choice_description:
% if content_node['type'] == 'text':
<span class="mock_label">
${content_node['contents']}
</span>
% else:
<% my_id = content_node.get('contents','') %>
<% my_val = value.get(my_id,'') %>
<input class="ctinput" type="text" name="${content_node['contents']}" id="${content_node['contents']}" value="${my_val|h} "/>
%endif
<span class="mock_label">
${content_node['tail_text']}
</span>
% endfor
<p id="answer_${choice_id}" class="answer"></p>
</section>
% endfor
<span id="answer_${id}"></span>
</fieldset>
<input class= "choicetextvalue" type="hidden" name="input_${id}{}" id="input_${id}" value="${value|h}" />
% if show_correctness == "never" and (value or status not in ['unsubmitted']):
<div class="capa_alert">${submitted_message}</div>
%endif
</form>
</section>

View File

@@ -779,3 +779,109 @@ class SymbolicResponseXMLFactory(ResponseXMLFactory):
def create_input_element(self, **kwargs):
return ResponseXMLFactory.textline_input_xml(**kwargs)
class ChoiceTextResponseXMLFactory(ResponseXMLFactory):
""" Factory for producing <choicetextresponse> xml """
def create_response_element(self, **kwargs):
""" Create a <choicetextresponse> element """
return etree.Element("choicetextresponse")
def create_input_element(self, **kwargs):
""" Create a <checkboxgroup> element.
choices can be specified in the following format:
[("true", [{"answer": "5", "tolerance": 0}]),
("false", [{"answer": "5", "tolerance": 0}])
]
This indicates that the first checkbox/radio is correct and it
contains a numtolerance_input with an answer of 5 and a tolerance of 0
It also indicates that the second has a second incorrect radiobutton
or checkbox with a numtolerance_input.
"""
choices = kwargs.get('choices', [("true", {})])
choice_inputs = []
# Ensure that the first element of choices is an ordered
# collection. It will start as a list, a tuple, or not a Container.
if type(choices[0]) not in [list, tuple]:
choices = [choices]
for choice in choices:
correctness, answers = choice
numtolerance_inputs = []
# If the current `choice` contains any("answer": number)
# elements, turn those into numtolerance_inputs
if answers:
# `answers` will be a list or tuple of answers or a single
# answer, representing the answers for numtolerance_inputs
# inside of this specific choice.
# Make sure that `answers` is an ordered collection for
# convenience.
if type(answers) not in [list, tuple]:
answers = [answers]
numtolerance_inputs = [
self._create_numtolerance_input_element(answer)
for answer in answers
]
choice_inputs.append(
self._create_choice_element(
correctness=correctness,
inputs=numtolerance_inputs
)
)
# Default type is 'radiotextgroup'
input_type = kwargs.get('type', 'radiotextgroup')
input_element = etree.Element(input_type)
for ind, choice in enumerate(choice_inputs):
# Give each choice text equal to it's position(0,1,2...)
choice.text = "choice_{0}".format(ind)
input_element.append(choice)
return input_element
def _create_choice_element(self, **kwargs):
"""
Creates a choice element for a choictextproblem.
Defaults to a correct choice with no numtolerance_input
"""
text = kwargs.get('text', '')
correct = kwargs.get('correctness', "true")
inputs = kwargs.get('inputs', [])
choice_element = etree.Element("choice")
choice_element.set("correct", correct)
choice_element.text = text
for inp in inputs:
# Add all of the inputs as children of this choice
choice_element.append(inp)
return choice_element
def _create_numtolerance_input_element(self, params):
"""
Creates a <numtolerance_input/> or <decoy_input/> element with
optionally specified tolerance and answer.
"""
answer = params['answer'] if 'answer' in params else None
# If there is not an answer specified, Then create a <decoy_input/>
# otherwise create a <numtolerance_input/> and set its tolerance
# and answer attributes.
if answer:
text_input = etree.Element("numtolerance_input")
text_input.set('answer', answer)
# If tolerance was specified, was specified use it, otherwise
# Set the tolerance to "0"
text_input.set(
'tolerance',
params['tolerance'] if 'tolerance' in params else "0"
)
else:
text_input = etree.Element("decoy_input")
return text_input

View File

@@ -714,3 +714,170 @@ class DragAndDropTemplateTest(TemplateTestCase):
# escaping the HTML. We should be able to traverse the XML tree.
xpath = "//div[@class='drag_and_drop_problem_json']/p/b"
self.assert_has_text(xml, xpath, 'HTML')
class ChoiceTextGroupTemplateTest(TemplateTestCase):
"""Test mako template for `<choicetextgroup>` input"""
TEMPLATE_NAME = 'choicetext.html'
VALUE_DICT = {'1_choiceinput_0bc': '1_choiceinput_0bc', '1_choiceinput_0_textinput_0': '0',
'1_choiceinput_1_textinput_0': '0'}
EMPTY_DICT = {'1_choiceinput_0_textinput_0': '',
'1_choiceinput_1_textinput_0': ''}
BOTH_CHOICE_CHECKBOX = {'1_choiceinput_0bc': 'choiceinput_0',
'1_choiceinput_1bc': 'choiceinput_1',
'1_choiceinput_0_textinput_0': '0',
'1_choiceinput_1_textinput_0': '0'}
WRONG_CHOICE_CHECKBOX = {'1_choiceinput_1bc': 'choiceinput_1',
'1_choiceinput_0_textinput_0': '0',
'1_choiceinput_1_textinput_0': '0'}
def setUp(self):
choices = [('1_choiceinput_0bc',
[{'tail_text': '', 'type': 'text', 'value': '', 'contents': ''},
{'tail_text': '', 'type': 'textinput', 'value': '', 'contents': 'choiceinput_0_textinput_0'}]),
('1_choiceinput_1bc', [{'tail_text': '', 'type': 'text', 'value': '', 'contents': ''},
{'tail_text': '', 'type': 'textinput', 'value': '', 'contents': 'choiceinput_1_textinput_0'}])]
self.context = {'id': '1',
'choices': choices,
'status': 'correct',
'input_type': 'radio',
'value': self.VALUE_DICT}
super(ChoiceTextGroupTemplateTest, self).setUp()
def test_grouping_tag(self):
"""
Tests whether we are using a section or a label to wrap choice elements.
Section is used for checkbox, so inputting text does not deselect
"""
input_tags = ('radio', 'checkbox')
self.context['status'] = 'correct'
xpath = "//section[@id='forinput1_choiceinput_0bc']"
self.context['value'] = {}
for input_type in input_tags:
self.context['input_type'] = input_type
xml = self.render_to_xml(self.context)
self.assert_has_xpath(xml, xpath, self.context)
def test_problem_marked_correct(self):
"""Test conditions under which the entire problem
(not a particular option) is marked correct"""
self.context['status'] = 'correct'
self.context['input_type'] = 'checkbox'
self.context['value'] = self.VALUE_DICT
# Should mark the entire problem correct
xml = self.render_to_xml(self.context)
xpath = "//div[@class='indicator_container']/span[@class='correct']"
self.assert_has_xpath(xml, xpath, self.context)
# Should NOT mark individual options
self.assert_no_xpath(xml, "//label[@class='choicetextgroup_incorrect']",
self.context)
self.assert_no_xpath(xml, "//label[@class='choicetextgroup_correct']",
self.context)
def test_problem_marked_incorrect(self):
"""Test all conditions under which the entire problem
(not a particular option) is marked incorrect"""
grouping_tags = {'radio': 'label', 'checkbox': 'section'}
conditions = [
{'status': 'incorrect', 'input_type': 'radio', 'value': {}},
{'status': 'incorrect', 'input_type': 'checkbox', 'value': self.WRONG_CHOICE_CHECKBOX},
{'status': 'incorrect', 'input_type': 'checkbox', 'value': self.BOTH_CHOICE_CHECKBOX},
{'status': 'incorrect', 'input_type': 'checkbox', 'value': self.VALUE_DICT},
{'status': 'incomplete', 'input_type': 'radio', 'value': {}},
{'status': 'incomplete', 'input_type': 'checkbox', 'value': self.WRONG_CHOICE_CHECKBOX},
{'status': 'incomplete', 'input_type': 'checkbox', 'value': self.BOTH_CHOICE_CHECKBOX},
{'status': 'incomplete', 'input_type': 'checkbox', 'value': self.VALUE_DICT}]
for test_conditions in conditions:
self.context.update(test_conditions)
xml = self.render_to_xml(self.context)
xpath = "//div[@class='indicator_container']/span[@class='incorrect']"
self.assert_has_xpath(xml, xpath, self.context)
# Should NOT mark individual options
grouping_tag = grouping_tags[test_conditions['input_type']]
self.assert_no_xpath(xml,
"//{0}[@class='choicetextgroup_incorrect']".format(grouping_tag),
self.context)
self.assert_no_xpath(xml,
"//{0}[@class='choicetextgroup_correct']".format(grouping_tag),
self.context)
def test_problem_marked_unsubmitted(self):
"""Test all conditions under which the entire problem
(not a particular option) is marked unanswered"""
grouping_tags = {'radio': 'label', 'checkbox': 'section'}
conditions = [
{'status': 'unsubmitted', 'input_type': 'radio', 'value': {}},
{'status': 'unsubmitted', 'input_type': 'radio', 'value': self.EMPTY_DICT},
{'status': 'unsubmitted', 'input_type': 'checkbox', 'value': {}},
{'status': 'unsubmitted', 'input_type': 'checkbox', 'value': self.EMPTY_DICT},
{'status': 'unsubmitted', 'input_type': 'checkbox', 'value': self.VALUE_DICT},
{'status': 'unsubmitted', 'input_type': 'checkbox', 'value': self.BOTH_CHOICE_CHECKBOX}]
self.context['status'] = 'unanswered'
for test_conditions in conditions:
self.context.update(test_conditions)
xml = self.render_to_xml(self.context)
xpath = "//div[@class='indicator_container']/span[@class='unanswered']"
self.assert_has_xpath(xml, xpath, self.context)
# Should NOT mark individual options
grouping_tag = grouping_tags[test_conditions['input_type']]
self.assert_no_xpath(xml,
"//{0}[@class='choicetextgroup_incorrect']".format(grouping_tag),
self.context)
self.assert_no_xpath(xml,
"//{0}[@class='choicetextgroup_correct']".format(grouping_tag),
self.context)
def test_option_marked_correct(self):
"""Test conditions under which a particular option
(not the entire problem) is marked correct."""
conditions = [
{'input_type': 'radio', 'value': self.VALUE_DICT}]
self.context['status'] = 'correct'
for test_conditions in conditions:
self.context.update(test_conditions)
xml = self.render_to_xml(self.context)
xpath = "//section[@id='forinput1_choiceinput_0bc' and\
@class='choicetextgroup_correct']"
self.assert_has_xpath(xml, xpath, self.context)
# Should NOT mark the whole problem
xpath = "//div[@class='indicator_container']/span"
self.assert_no_xpath(xml, xpath, self.context)
def test_option_marked_incorrect(self):
"""Test conditions under which a particular option
(not the entire problem) is marked incorrect."""
conditions = [
{'input_type': 'radio', 'value': self.VALUE_DICT}]
self.context['status'] = 'incorrect'
for test_conditions in conditions:
self.context.update(test_conditions)
xml = self.render_to_xml(self.context)
xpath = "//section[@id='forinput1_choiceinput_0bc' and\
@class='choicetextgroup_incorrect']"
self.assert_has_xpath(xml, xpath, self.context)
# Should NOT mark the whole problem
xpath = "//div[@class='indicator_container']/span"
self.assert_no_xpath(xml, xpath, self.context)

View File

@@ -860,3 +860,94 @@ class AnnotationInputTest(unittest.TestCase):
self.maxDiff = None
self.assertDictEqual(context, expected)
class TestChoiceText(unittest.TestCase):
"""
Tests for checkboxtextgroup inputs
"""
@staticmethod
def build_choice_element(node_type, contents, tail_text, value):
"""
Builds a content node for a choice.
"""
# When xml is being parsed numtolerance_input and decoy_input tags map to textinput type
# in order to provide the template with correct rendering information.
if node_type in ('numtolerance_input', 'decoy_input'):
node_type = 'textinput'
choice = {'type': node_type, 'contents': contents, 'tail_text': tail_text, 'value': value}
return choice
def check_group(self, tag, choice_tag, expected_input_type):
"""
Build a radio or checkbox group, parse it and check the resuls against the
expected output.
`tag` should be 'checkboxtextgroup' or 'radiotextgroup'
`choice_tag` is either 'choice' for proper xml, or any other value to trigger an error.
`expected_input_type` is either 'radio' or 'checkbox'.
"""
xml_str = """
<{tag}>
<{choice_tag} correct="false" name="choiceinput_0">this is<numtolerance_input name="choiceinput_0_textinput_0"/>false</{choice_tag}>
<choice correct="true" name="choiceinput_1">Is a number<decoy_input name="choiceinput_1_textinput_0"/><text>!</text></choice>
</{tag}>
""".format(tag=tag, choice_tag=choice_tag)
element = etree.fromstring(xml_str)
state = {
'value': '{}',
'id': 'choicetext_input',
'status': 'answered'
}
first_input = self.build_choice_element('numtolerance_input', 'choiceinput_0_textinput_0', 'false', '')
second_input = self.build_choice_element('decoy_input', 'choiceinput_1_textinput_0', '', '')
first_choice_content = self.build_choice_element('text', 'this is', '', '')
second_choice_content = self.build_choice_element('text', 'Is a number', '', '')
second_choice_text = self.build_choice_element('text', "!", '', '')
choices = [
('choiceinput_0', [first_choice_content, first_input]),
('choiceinput_1', [second_choice_content, second_input, second_choice_text])
]
expected = {
'msg': '',
'input_type': expected_input_type,
'choices': choices,
'show_correctness': 'always',
'submitted_message': 'Answer received.'
}
expected.update(state)
the_input = lookup_tag(tag)(test_system(), element, state)
context = the_input._get_render_context()
self.assertEqual(context, expected)
def test_radiotextgroup(self):
"""
Test that a properly formatted radiotextgroup problem generates
expected ouputs
"""
self.check_group('radiotextgroup', 'choice', 'radio')
def test_checkboxtextgroup(self):
"""
Test that a properly formatted checkboxtextgroup problem generates
expected ouput
"""
self.check_group('checkboxtextgroup', 'choice', 'checkbox')
def test_invalid_tag(self):
"""
Test to ensure that an unrecognized inputtype tag causes an error
"""
with self.assertRaises(Exception):
self.check_group('invalid', 'choice', 'checkbox')
def test_invalid_input_tag(self):
"""
Test to ensure having a tag other than <choice> inside of
a checkbox or radiotextgroup problem raises an error.
"""
with self.assertRaisesRegexp(Exception, "Error in xml"):
self.check_group('checkboxtextgroup', 'invalid', 'checkbox')

View File

@@ -1429,3 +1429,357 @@ class AnnotationResponseTest(ResponseTest):
msg="%s should be marked %s" % (answer_id, expected_correctness))
self.assertEqual(expected_points, actual_points,
msg="%s should have %d points" % (answer_id, expected_points))
class ChoiceTextResponseTest(ResponseTest):
"""
Class containing setup and tests for ChoiceText responsetype.
"""
from response_xml_factory import ChoiceTextResponseXMLFactory
xml_factory_class = ChoiceTextResponseXMLFactory
# `TEST_INPUTS` is a dictionary mapping from
# test_name to a representation of inputs for a test problem.
TEST_INPUTS = {
"1_choice_0_input_correct": [(True, [])],
"1_choice_0_input_incorrect": [(False, [])],
"1_choice_0_input_invalid_choice": [(False, []), (True, [])],
"1_choice_1_input_correct": [(True, ["123"])],
"1_input_script_correct": [(True, ["2"])],
"1_input_script_incorrect": [(True, ["3.25"])],
"1_choice_2_inputs_correct": [(True, ["123", "456"])],
"1_choice_2_inputs_tolerance": [(True, ["123 + .5", "456 + 9"])],
"1_choice_2_inputs_1_wrong": [(True, ["0", "456"])],
"1_choice_2_inputs_both_wrong": [(True, ["0", "0"])],
"1_choice_2_inputs_inputs_blank": [(True, ["", ""])],
"1_choice_2_inputs_empty": [(False, [])],
"1_choice_2_inputs_fail_tolerance": [(True, ["123 + 1.5", "456 + 9"])],
"1_choice_1_input_within_tolerance": [(True, ["122.5"])],
"1_choice_1_input_answer_incorrect": [(True, ["345"])],
"1_choice_1_input_choice_incorrect": [(False, ["123"])],
"2_choices_0_inputs_correct": [(False, []), (True, [])],
"2_choices_0_inputs_incorrect": [(True, []), (False, [])],
"2_choices_0_inputs_blank": [(False, []), (False, [])],
"2_choices_1_input_1_correct": [(False, []), (True, ["123"])],
"2_choices_1_input_1_incorrect": [(True, []), (False, ["123"])],
"2_choices_1_input_input_wrong": [(False, []), (True, ["321"])],
"2_choices_1_input_1_blank": [(False, []), (False, [])],
"2_choices_1_input_2_correct": [(True, []), (False, ["123"])],
"2_choices_1_input_2_incorrect": [(False, []), (True, ["123"])],
"2_choices_2_inputs_correct": [(True, ["123"]), (False, [])],
"2_choices_2_inputs_wrong_choice": [(False, ["123"]), (True, [])],
"2_choices_2_inputs_wrong_input": [(True, ["321"]), (False, [])]
}
# `TEST_SCENARIOS` is a dictionary of the form
# {Test_Name" : (Test_Problem_name, correctness)}
# correctness represents whether the problem should be graded as
# correct or incorrect when the test is run.
TEST_SCENARIOS = {
"1_choice_0_input_correct": ("1_choice_0_input", "correct"),
"1_choice_0_input_incorrect": ("1_choice_0_input", "incorrect"),
"1_choice_0_input_invalid_choice": ("1_choice_0_input", "incorrect"),
"1_input_script_correct": ("1_input_script", "correct"),
"1_input_script_incorrect": ("1_input_script", "incorrect"),
"1_choice_2_inputs_correct": ("1_choice_2_inputs", "correct"),
"1_choice_2_inputs_tolerance": ("1_choice_2_inputs", "correct"),
"1_choice_2_inputs_1_wrong": ("1_choice_2_inputs", "incorrect"),
"1_choice_2_inputs_both_wrong": ("1_choice_2_inputs", "incorrect"),
"1_choice_2_inputs_inputs_blank": ("1_choice_2_inputs", "incorrect"),
"1_choice_2_inputs_empty": ("1_choice_2_inputs", "incorrect"),
"1_choice_2_inputs_fail_tolerance": ("1_choice_2_inputs", "incorrect"),
"1_choice_1_input_correct": ("1_choice_1_input", "correct"),
"1_choice_1_input_within_tolerance": ("1_choice_1_input", "correct"),
"1_choice_1_input_answer_incorrect": ("1_choice_1_input", "incorrect"),
"1_choice_1_input_choice_incorrect": ("1_choice_1_input", "incorrect"),
"2_choices_0_inputs_correct": ("2_choices_0_inputs", "correct"),
"2_choices_0_inputs_incorrect": ("2_choices_0_inputs", "incorrect"),
"2_choices_0_inputs_blank": ("2_choices_0_inputs", "incorrect"),
"2_choices_1_input_1_correct": ("2_choices_1_input_1", "correct"),
"2_choices_1_input_1_incorrect": ("2_choices_1_input_1", "incorrect"),
"2_choices_1_input_input_wrong": ("2_choices_1_input_1", "incorrect"),
"2_choices_1_input_1_blank": ("2_choices_1_input_1", "incorrect"),
"2_choices_1_input_2_correct": ("2_choices_1_input_2", "correct"),
"2_choices_1_input_2_incorrect": ("2_choices_1_input_2", "incorrect"),
"2_choices_2_inputs_correct": ("2_choices_2_inputs", "correct"),
"2_choices_2_inputs_wrong_choice": ("2_choices_2_inputs", "incorrect"),
"2_choices_2_inputs_wrong_input": ("2_choices_2_inputs", "incorrect")
}
# Dictionary that maps from problem_name to arguments for
# _make_problem, that will create the problem.
TEST_PROBLEM_ARGS = {
"1_choice_0_input": {"choices": ("true", {}), "script": ''},
"1_choice_1_input": {
"choices": ("true", {"answer": "123", "tolerance": "1"}),
"script": ''
},
"1_input_script": {
"choices": ("true", {"answer": "$computed_response", "tolerance": "1"}),
"script": "computed_response = math.sqrt(4)"
},
"1_choice_2_inputs": {
"choices": [
(
"true", (
{"answer": "123", "tolerance": "1"},
{"answer": "456", "tolerance": "10"}
)
)
],
"script": ''
},
"2_choices_0_inputs": {
"choices": [("false", {}), ("true", {})],
"script": ''
},
"2_choices_1_input_1": {
"choices": [
("false", {}), ("true", {"answer": "123", "tolerance": "0"})
],
"script": ''
},
"2_choices_1_input_2": {
"choices": [("true", {}), ("false", {"answer": "123", "tolerance": "0"})],
"script": ''
},
"2_choices_2_inputs": {
"choices": [
("true", {"answer": "123", "tolerance": "0"}),
("false", {"answer": "999", "tolerance": "0"})
],
"script": ''
}
}
def _make_problem(self, choices, in_type='radiotextgroup', script=''):
"""
Convenience method to fill in default values for script and
type if needed, then call self.build_problem
"""
return self.build_problem(
choices=choices,
type=in_type,
script=script
)
def _make_answer_dict(self, choice_list):
"""
Convenience method to make generation of answers less tedious,
pass in an iterable argument with elements of the form: [bool, [ans,]]
Will generate an answer dict for those options
"""
answer_dict = {}
for index, choice_answers_pair in enumerate(choice_list):
# Choice is whether this choice is correct
# Answers contains a list of answers to textinpts for the choice
choice, answers = choice_answers_pair
if choice:
# Radio/Checkbox inputs in choicetext problems follow
# a naming convention that gives them names ending with "bc"
choice_id = "1_2_1_choiceinput_{index}bc".format(index=index)
choice_value = "choiceinput_{index}".format(index=index)
answer_dict[choice_id] = choice_value
# Build the names for the numtolerance_inputs and add their answers
# to `answer_dict`.
for ind, answer in enumerate(answers):
# In `answer_id` `index` represents the ordinality of the
# choice and `ind` represents the ordinality of the
# numtolerance_input inside the parent choice.
answer_id = "1_2_1_choiceinput_{index}_numtolerance_input_{ind}".format(
index=index,
ind=ind
)
answer_dict[answer_id] = answer
return answer_dict
def test_invalid_xml(self):
"""
Test that build problem raises errors for invalid options
"""
with self.assertRaises(Exception):
self.build_problem(type="invalidtextgroup")
def test_valid_xml(self):
"""
Test that `build_problem` builds valid xml
"""
self.build_problem()
self.assertTrue(True)
def test_unchecked_input_not_validated(self):
"""
Test that a student can have a non numeric answer in an unselected
choice without causing an error to be raised when the problem is
checked.
"""
two_choice_two_input = self._make_problem(
[
("true", {"answer": "123", "tolerance": "1"}),
("false", {})
],
"checkboxtextgroup"
)
self.assert_grade(
two_choice_two_input,
self._make_answer_dict([(True, ["1"]), (False, ["Platypus"])]),
"incorrect"
)
def test_interpret_error(self):
"""
Test that student answers that cannot be interpeted as numbers
cause the response type to raise an error.
"""
two_choice_two_input = self._make_problem(
[
("true", {"answer": "123", "tolerance": "1"}),
("false", {})
],
"checkboxtextgroup"
)
with self.assertRaisesRegexp(StudentInputError, "Could not interpret"):
# Test that error is raised for input in selected correct choice.
self.assert_grade(
two_choice_two_input,
self._make_answer_dict([(True, ["Platypus"])]),
"correct"
)
with self.assertRaisesRegexp(StudentInputError, "Could not interpret"):
# Test that error is raised for input in selected incorrect choice.
self.assert_grade(
two_choice_two_input,
self._make_answer_dict([(True, ["1"]), (True, ["Platypus"])]),
"correct"
)
def test_staff_answer_error(self):
broken_problem = self._make_problem(
[("true", {"answer": "Platypus", "tolerance": "0"}),
("true", {"answer": "edX", "tolerance": "0"})
],
"checkboxtextgroup"
)
with self.assertRaisesRegexp(
StudentInputError,
"The Staff answer could not be interpreted as a number."
):
self.assert_grade(
broken_problem,
self._make_answer_dict(
[(True, ["1"]), (True, ["1"])]
),
"correct"
)
def test_radio_grades(self):
"""
Test that confirms correct operation of grading when the inputtag is
radiotextgroup.
"""
for name, inputs in self.TEST_INPUTS.iteritems():
# Turn submission into the form expected when grading this problem.
submission = self._make_answer_dict(inputs)
# Lookup the problem_name, and the whether this test problem
# and inputs should be graded as correct or incorrect.
problem_name, correctness = self.TEST_SCENARIOS[name]
# Load the args needed to build the problem for this test.
problem_args = self.TEST_PROBLEM_ARGS[problem_name]
test_choices = problem_args["choices"]
test_script = problem_args["script"]
# Build the actual problem for the test.
test_problem = self._make_problem(test_choices, 'radiotextgroup', test_script)
# Make sure the actual grade matches the expected grade.
self.assert_grade(
test_problem,
submission,
correctness,
msg="{0} should be {1}".format(
name,
correctness
)
)
def test_checkbox_grades(self):
"""
Test that confirms correct operation of grading when the inputtag is
checkboxtextgroup.
"""
# Dictionary from name of test_scenario to (problem_name, correctness)
# Correctness is used to test whether the problem was graded properly
scenarios = {
"2_choices_correct": ("checkbox_two_choices", "correct"),
"2_choices_incorrect": ("checkbox_two_choices", "incorrect"),
"2_choices_2_inputs_correct": (
"checkbox_2_choices_2_inputs",
"correct"
),
"2_choices_2_inputs_missing_choice": (
"checkbox_2_choices_2_inputs",
"incorrect"
),
"2_choices_2_inputs_wrong_input": (
"checkbox_2_choices_2_inputs",
"incorrect"
)
}
# Dictionary scenario_name: test_inputs
inputs = {
"2_choices_correct": [(True, []), (True, [])],
"2_choices_incorrect": [(True, []), (False, [])],
"2_choices_2_inputs_correct": [(True, ["123"]), (True, ["456"])],
"2_choices_2_inputs_missing_choice": [
(True, ["123"]), (False, ["456"])
],
"2_choices_2_inputs_wrong_input": [
(True, ["123"]), (True, ["654"])
]
}
# Two choice zero input problem with both choices being correct.
checkbox_two_choices = self._make_problem(
[("true", {}), ("true", {})], "checkboxtextgroup"
)
# Two choice two input problem with both choices correct.
checkbox_two_choices_two_inputs = self._make_problem(
[("true", {"answer": "123", "tolerance": "0"}),
("true", {"answer": "456", "tolerance": "0"})
],
"checkboxtextgroup"
)
# Dictionary problem_name: problem
problems = {
"checkbox_two_choices": checkbox_two_choices,
"checkbox_2_choices_2_inputs": checkbox_two_choices_two_inputs
}
for name, inputs in inputs.iteritems():
submission = self._make_answer_dict(inputs)
# Load the test problem's name and desired correctness
problem_name, correctness = scenarios[name]
# Load the problem
problem = problems[problem_name]
# Make sure the actual grade matches the expected grade
self.assert_grade(
problem,
submission,
correctness,
msg="{0} should be {1}".format(name, correctness)
)

View File

@@ -776,6 +776,13 @@ class CapaModule(CapaFields, XModule):
then the output dict would contain {'1': ['test'] }
(the value is a list).
Some other inputs such as ChoiceTextInput expect a dict of values in the returned
dict If the key ends with '{}' then we will assume that the value is a json
encoded dict and deserialize it.
For example, if the `data` dict contains {'input_1{}': '{"1_2_1": 1}'}
then the output dict would contain {'1': {"1_2_1": 1} }
(the value is a dictionary)
Raises an exception if:
-A key in the `data` dictionary does not contain at least one underscore
@@ -802,11 +809,22 @@ class CapaModule(CapaFields, XModule):
# the same form input (e.g. checkbox inputs). The convention is that
# if the name ends with '[]' (which looks like an array), then the
# answer will be an array.
# if the name ends with '{}' (Which looks like a dict),
# then the answer will be a dict
is_list_key = name.endswith('[]')
name = name[:-2] if is_list_key else name
is_dict_key = name.endswith('{}')
name = name[:-2] if is_list_key or is_dict_key else name
if is_list_key:
val = data.getlist(key)
elif is_dict_key:
try:
val = json.loads(data[key])
# If the submission wasn't deserializable, raise an error.
except(KeyError, ValueError):
raise ValueError(
u"Invalid submission: {val} for {key}".format(val=data[key], key=key)
)
else:
val = data[key]

View File

@@ -13,7 +13,7 @@ import textwrap
log = logging.getLogger("mitx.courseware")
V1_SETTINGS_ATTRIBUTES = ["display_name", "attempts", "is_graded", "accept_file_upload",
V1_SETTINGS_ATTRIBUTES = ["display_name", "max_attempts", "graded", "accept_file_upload",
"skip_spelling_checks", "due", "graceperiod", "weight"]
V1_STUDENT_ATTRIBUTES = ["current_task_number", "task_states", "state",
@@ -29,36 +29,124 @@ VERSION_TUPLES = {
DEFAULT_VERSION = 1
DEFAULT_DATA = textwrap.dedent("""\
<combinedopenended>
<combinedopenended>
<prompt>
<h3>Censorship in the Libraries</h3>
<p>'All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us.' --Katherine Paterson, Author
</p>
<p>
Write a persuasive essay to a newspaper reflecting your vies on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.
</p>
</prompt>
<rubric>
<rubric>
<rubric>
<category>
<description>Category 1</description>
<option>
The response does not incorporate what is needed for a one response.
</option>
<option>
The response is correct for category 1.
</option>
</category>
</rubric>
<category>
<description>
Ideas
</description>
<option>
Difficult for the reader to discern the main idea. Too brief or too repetitive to establish or maintain a focus.
</option>
<option>
Attempts a main idea. Sometimes loses focus or ineffectively displays focus.
</option>
<option>
Presents a unifying theme or main idea, but may include minor tangents. Stays somewhat focused on topic and task.
</option>
<option>
Presents a unifying theme or main idea without going off on tangents. Stays completely focused on topic and task.
</option>
</category>
<category>
<description>
Content
</description>
<option>
Includes little information with few or no details or unrelated details. Unsuccessful in attempts to explore any facets of the topic.
</option>
<option>
Includes little information and few or no details. Explores only one or two facets of the topic.
</option>
<option>
Includes sufficient information and supporting details. (Details may not be fully developed; ideas may be listed.) Explores some facets of the topic.
</option>
<option>
Includes in-depth information and exceptional supporting details that are fully developed. Explores all facets of the topic.
</option>
</category>
<category>
<description>
Organization
</description>
<option>
Ideas organized illogically, transitions weak, and response difficult to follow.
</option>
<option>
Attempts to logically organize ideas. Attempts to progress in an order that enhances meaning, and demonstrates use of transitions.
</option>
<option>
Ideas organized logically. Progresses in an order that enhances meaning. Includes smooth transitions.
</option>
</category>
<category>
<description>
Style
</description>
<option>
Contains limited vocabulary, with many words used incorrectly. Demonstrates problems with sentence patterns.
</option>
<option>
Contains basic vocabulary, with words that are predictable and common. Contains mostly simple sentences (although there may be an attempt at more varied sentence patterns).
</option>
<option>
Includes vocabulary to make explanations detailed and precise. Includes varied sentence patterns, including complex sentences.
</option>
</category>
<category>
<description>
Voice
</description>
<option>
Demonstrates language and tone that may be inappropriate to task and reader.
</option>
<option>
Demonstrates an attempt to adjust language and tone to task and reader.
</option>
<option>
Demonstrates effective adjustment of language and tone to task and reader.
</option>
</category>
</rubric>
<prompt>
<p>Why is the sky blue?</p>
</prompt>
<task>
<selfassessment/>
</task>
<task>
<openended min_score_to_attempt="1" max_score_to_attempt="2">
<openendedparam>
<initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display>
<grader_payload>{"grader_settings" : "peer_grading.conf", "problem_id" : "700x/Demo"}</grader_payload>
</openendedparam>
</openended>
</task>
</combinedopenended>
</rubric>
<task>
<selfassessment/></task>
<task>
<openended min_score_to_attempt="4" max_score_to_attempt="12" >
<openendedparam>
<initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display>
<grader_payload>{"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}</grader_payload>
</openendedparam>
</openended>
</task>
<task>
<openended min_score_to_attempt="9" max_score_to_attempt="12" >
<openendedparam>
<initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display>
<grader_payload>{"grader_settings" : "peer_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}</grader_payload>
</openendedparam>
</openended>
</task>
</combinedopenended>
""")
@@ -84,35 +172,63 @@ class CombinedOpenEndedFields(object):
display_name = String(
display_name="Display Name",
help="This name appears in the horizontal navigation at the top of the page.",
default="Open Ended Grading",
default="Open Response Assessment",
scope=Scope.settings
)
current_task_number = Integer(help="Current task that the student is on.", default=0, scope=Scope.user_state)
task_states = List(help="List of state dictionaries of each task within this module.", scope=Scope.user_state)
state = String(help="Which step within the current task that the student is on.", default="initial",
scope=Scope.user_state)
student_attempts = Integer(help="Number of attempts taken by the student on this problem", default=0,
scope=Scope.user_state)
ready_to_reset = Boolean(
help="If the problem is ready to be reset or not.", default=False,
current_task_number = Integer(
help="Current task that the student is on.",
default=0,
scope=Scope.user_state
)
attempts = Integer(
display_name="Maximum Attempts",
help="The number of times the student can try to answer this problem.", default=1,
scope=Scope.settings, values={"min" : 1 }
task_states = List(
help="List of state dictionaries of each task within this module.",
scope=Scope.user_state
)
state = String(
help="Which step within the current task that the student is on.",
default="initial",
scope=Scope.user_state
)
graded = Boolean(
display_name="Graded",
help='Defines whether the student gets credit for grading this problem.',
default=False,
scope=Scope.settings
)
student_attempts = Integer(
help="Number of attempts taken by the student on this problem",
default=0,
scope=Scope.user_state
)
ready_to_reset = Boolean(
help="If the problem is ready to be reset or not.",
default=False,
scope=Scope.user_state
)
max_attempts = Integer(
display_name="Maximum Attempts",
help="The number of times the student can try to answer this problem.",
default=1,
scope=Scope.settings,
values={"min" : 1 }
)
is_graded = Boolean(display_name="Graded", help="Whether or not the problem is graded.", default=False, scope=Scope.settings)
accept_file_upload = Boolean(
display_name="Allow File Uploads",
help="Whether or not the student can submit files as a response.", default=False, scope=Scope.settings
help="Whether or not the student can submit files as a response.",
default=False,
scope=Scope.settings
)
skip_spelling_checks = Boolean(
display_name="Disable Quality Filter",
help="If False, the Quality Filter is enabled and submissions with poor spelling, short length, or poor grammar will not be peer reviewed.",
default=False, scope=Scope.settings
default=False,
scope=Scope.settings
)
due = Date(
help="Date that this problem is due by",
default=None,
scope=Scope.settings
)
due = Date(help="Date that this problem is due by", default=None, scope=Scope.settings)
graceperiod = String(
help="Amount of time after the due date that submissions will be accepted",
default=None,
@@ -124,22 +240,51 @@ class CombinedOpenEndedFields(object):
weight = Float(
display_name="Problem Weight",
help="Defines the number of points each problem is worth. If the value is not set, each problem is worth one point.",
scope=Scope.settings, values={"min" : 0 , "step": ".1"}
scope=Scope.settings,
values={"min" : 0 , "step": ".1"},
default=1
)
markdown = String(
help="Markdown source of this module",
default=textwrap.dedent("""\
[rubric]
+ Category 1
- The response does not incorporate what is needed for a one response.
- The response is correct for category 1.
[rubric]
[prompt]
<p>Why is the sky blue?</p>
[prompt]
[tasks]
(Self), ({1-2}AI)
[tasks]
[prompt]
<h3>Censorship in the Libraries</h3>
<p>'All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us.' --Katherine Paterson, Author
</p>
<p>
Write a persuasive essay to a newspaper reflecting your vies on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.
</p>
[prompt]
[rubric]
+ Ideas
- Difficult for the reader to discern the main idea. Too brief or too repetitive to establish or maintain a focus.
- Attempts a main idea. Sometimes loses focus or ineffectively displays focus.
- Presents a unifying theme or main idea, but may include minor tangents. Stays somewhat focused on topic and task.
- Presents a unifying theme or main idea without going off on tangents. Stays completely focused on topic and task.
+ Content
- Includes little information with few or no details or unrelated details. Unsuccessful in attempts to explore any facets of the topic.
- Includes little information and few or no details. Explores only one or two facets of the topic.
- Includes sufficient information and supporting details. (Details may not be fully developed; ideas may be listed.) Explores some facets of the topic.
- Includes in-depth information and exceptional supporting details that are fully developed. Explores all facets of the topic.
+ Organization
- Ideas organized illogically, transitions weak, and response difficult to follow.
- Attempts to logically organize ideas. Attempts to progress in an order that enhances meaning, and demonstrates use of transitions.
- Ideas organized logically. Progresses in an order that enhances meaning. Includes smooth transitions.
+ Style
- Contains limited vocabulary, with many words used incorrectly. Demonstrates problems with sentence patterns.
- Contains basic vocabulary, with words that are predictable and common. Contains mostly simple sentences (although there may be an attempt at more varied sentence patterns).
- Includes vocabulary to make explanations detailed and precise. Includes varied sentence patterns, including complex sentences.
+ Voice
- Demonstrates language and tone that may be inappropriate to task and reader.
- Demonstrates an attempt to adjust language and tone to task and reader.
- Demonstrates effective adjustment of language and tone to task and reader.
[rubric]
[tasks]
(Self), ({4-12}AI), ({9-12}Peer)
[tasks]
"""),
scope=Scope.settings
)
@@ -194,37 +339,9 @@ class CombinedOpenEndedModule(CombinedOpenEndedFields, XModule):
def __init__(self, *args, **kwargs):
"""
Definition file should have one or many task blocks, a rubric block, and a prompt block:
Definition file should have one or many task blocks, a rubric block, and a prompt block.
Sample file:
<combinedopenended attempts="10000">
<rubric>
Blah blah rubric.
</rubric>
<prompt>
Some prompt.
</prompt>
<task>
<selfassessment>
<hintprompt>
What hint about this problem would you give to someone?
</hintprompt>
<submitmessage>
Save Succcesful. Thanks for participating!
</submitmessage>
</selfassessment>
</task>
<task>
<openended min_score_to_attempt="1" max_score_to_attempt="1">
<openendedparam>
<initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display>
<grader_payload>{"grader_settings" : "ml_grading.conf",
"problem_id" : "6.002x/Welcome/OETest"}</grader_payload>
</openendedparam>
</openended>
</task>
</combinedopenended>
See DEFAULT_DATA for a sample.
"""
XModule.__init__(self, *args, **kwargs)
@@ -291,6 +408,7 @@ class CombinedOpenEndedDescriptor(CombinedOpenEndedFields, RawDescriptor):
has_score = True
always_recalculate_grades = True
template_dir_name = "combinedopenended"
#Specify whether or not to pass in S3 interface
needs_s3_interface = True
@@ -304,6 +422,11 @@ class CombinedOpenEndedDescriptor(CombinedOpenEndedFields, RawDescriptor):
js_module_name = "OpenEndedMarkdownEditingDescriptor"
css = {'scss': [resource_string(__name__, 'css/editor/edit.scss'), resource_string(__name__, 'css/combinedopenended/edit.scss')]}
metadata_translations = {
'is_graded': 'graded',
'attempts': 'max_attempts',
}
def get_context(self):
_context = RawDescriptor.get_context(self)
_context.update({'markdown': self.markdown,

View File

@@ -929,4 +929,32 @@ section.problem {
}
}
}
.choicetextgroup{
input[type="text"]{
margin-bottom: 0.5em;
}
@extend .choicegroup;
label.choicetextgroup_correct, section.choicetextgroup_correct{
@extend label.choicegroup_correct;
input[type="text"] {
border-color: green;
}
}
label.choicetextgroup_incorrect, section.choicetextgroup_incorrect{
@extend label.choicegroup_incorrect;
}
label.choicetextgroup_show_correct, section.choicetextgroup_show_correct{
&:after{
content: url('../images/correct-icon.png');
margin-left:15px;
}
}
span.mock_label{
cursor : default;
}
}
}

View File

@@ -58,8 +58,7 @@ class Date(ModelType):
else:
msg = "Field {0} has bad value '{1}'".format(
self._name, field)
log.warning(msg)
return None
raise TypeError(msg)
def to_json(self, value):
"""
@@ -76,6 +75,8 @@ class Date(ModelType):
return value.strftime('%Y-%m-%dT%H:%M:%SZ')
else:
return value.isoformat()
else:
raise TypeError("Cannot convert {} to json".format(value))
TIMEDELTA_REGEX = re.compile(r'^((?P<days>\d+?) day(?:s?))?(\s)?((?P<hours>\d+?) hour(?:s?))?(\s)?((?P<minutes>\d+?) minute(?:s)?)?(\s)?((?P<seconds>\d+?) second(?:s)?)?$')

View File

@@ -223,6 +223,58 @@ describe 'Problem', ->
expect($('label[for="input_1_1_3"]')).toHaveAttr 'correct_answer', 'true'
expect($('label[for="input_1_2_1"]')).not.toHaveAttr 'correct_answer', 'true'
describe 'radio text question', ->
radio_text_xml='''
<section class="problem">
<div><p></p><span><section id="choicetextinput_1_2_1" class="choicetextinput">
<form class="choicetextgroup capa_inputtype" id="inputtype_1_2_1">
<div class="indicator_container">
<span class="unanswered" style="display:inline-block;" id="status_1_2_1"></span>
</div>
<fieldset>
<section id="forinput1_2_1_choiceinput_0bc">
<input class="ctinput" type="radio" name="choiceinput_1_2_1" id="1_2_1_choiceinput_0bc" value="choiceinput_0"">
<input class="ctinput" type="text" name="choiceinput_0_textinput_0" id="1_2_1_choiceinput_0_textinput_0" value=" ">
<p id="answer_1_2_1_choiceinput_0bc" class="answer"></p>
</>
<section id="forinput1_2_1_choiceinput_1bc">
<input class="ctinput" type="radio" name="choiceinput_1_2_1" id="1_2_1_choiceinput_1bc" value="choiceinput_1" >
<input class="ctinput" type="text" name="choiceinput_1_textinput_0" id="1_2_1_choiceinput_1_textinput_0" value=" " >
<p id="answer_1_2_1_choiceinput_1bc" class="answer"></p>
</section>
<section id="forinput1_2_1_choiceinput_2bc">
<input class="ctinput" type="radio" name="choiceinput_1_2_1" id="1_2_1_choiceinput_2bc" value="choiceinput_2" >
<input class="ctinput" type="text" name="choiceinput_2_textinput_0" id="1_2_1_choiceinput_2_textinput_0" value=" " >
<p id="answer_1_2_1_choiceinput_2bc" class="answer"></p>
</section></fieldset><input class="choicetextvalue" type="hidden" name="input_1_2_1" id="input_1_2_1"></form>
</section></span></div>
</section>
'''
beforeEach ->
# Append a radiotextresponse problem to the problem, so we can check it's javascript functionality
@problem.el.prepend(radio_text_xml)
it 'sets the correct class on the section for the correct choice', ->
spyOn($, 'postWithPrefix').andCallFake (url, callback) ->
callback answers: "1_2_1": ["1_2_1_choiceinput_0bc"], "1_2_1_choiceinput_0bc": "3"
@problem.show()
expect($('#forinput1_2_1_choiceinput_0bc').attr('class')).toEqual(
'choicetextgroup_show_correct')
expect($('#answer_1_2_1_choiceinput_0bc').text()).toEqual('3')
expect($('#answer_1_2_1_choiceinput_1bc').text()).toEqual('')
expect($('#answer_1_2_1_choiceinput_2bc').text()).toEqual('')
it 'Should not disable input fields', ->
spyOn($, 'postWithPrefix').andCallFake (url, callback) ->
callback answers: "1_2_1": ["1_2_1_choiceinput_0bc"], "1_2_1_choiceinput_0bc": "3"
@problem.show()
expect($('input#1_2_1_choiceinput_0bc').attr('disabled')).not.toEqual('disabled')
expect($('input#1_2_1_choiceinput_1bc').attr('disabled')).not.toEqual('disabled')
expect($('input#1_2_1_choiceinput_2bc').attr('disabled')).not.toEqual('disabled')
expect($('input#1_2_1').attr('disabled')).not.toEqual('disabled')
describe 'when the answers are already shown', ->
beforeEach ->
@problem.el.addClass 'showed'

View File

@@ -403,6 +403,14 @@ class @Problem
answer = JSON.parse(answers[answer_id])
display.showAnswer(answer)
choicetextgroup: (element, display, answers) =>
element = $(element)
input_id = element.attr('id').replace(/inputtype_/,'')
answer = answers[input_id]
for choice in answer
element.find("section#forinput#{choice}").addClass 'choicetextgroup_show_correct'
inputtypeHideAnswerMethods:
choicegroup: (element, display) =>
element = $(element)
@@ -410,3 +418,7 @@ class @Problem
javascriptinput: (element, display) =>
display.hideAnswer()
choicetextgroup: (element, display) =>
element = $(element)
element.find("section[id^='forinput']").removeClass('choicetextgroup_show_correct')

View File

@@ -50,6 +50,10 @@ Write a persuasive essay to a newspaper reflecting your vies on censorship in li
mode: null
})
@setCurrentEditor(@markdown_editor)
selection = @markdown_editor.getSelection()
#Auto-add in the needed template if it isn't already in there.
if(@markdown_editor.getValue() == "")
@markdown_editor.setValue(OpenEndedMarkdownEditingDescriptor.promptTemplate + "\n" + OpenEndedMarkdownEditingDescriptor.rubricTemplate + "\n" + OpenEndedMarkdownEditingDescriptor.tasksTemplate)
# Add listeners for toolbar buttons (only present for markdown editor)
@element.on('click', '.xml-tab', @onShowXMLButton)
@element.on('click', '.format-buttons a', @onToolbarButton)

View File

@@ -105,6 +105,15 @@ class MongoKeyValueStore(KeyValueStore):
else:
raise InvalidScopeError(key.scope)
def set_many(self, update_dict):
"""set_many method. Implementations should accept an `update_dict` of
key-value pairs, and set all the `keys` to the given `value`s."""
# `set` simply updates an in-memory db, rather than calling down to a real db,
# as mongo bulk save is handled elsewhere. A future improvement would be to pull
# the mongo-specific bulk save logic into this method.
for key, value in update_dict.iteritems():
self.set(key, value)
def delete(self, key):
if key.scope == Scope.children:
self._children = []
@@ -639,6 +648,8 @@ class MongoModuleStore(ModuleStoreBase):
:param xmodule:
"""
# Save any changes to the xmodule to the MongoKeyValueStore
xmodule.save()
# split mongo's persist_dag is more general and useful.
self.collection.save({
'_id': xmodule.location.dict(),
@@ -683,6 +694,8 @@ class MongoModuleStore(ModuleStoreBase):
'url_slug': new_object.location.name
})
course.tabs = existing_tabs
# Save any changes to the course to the MongoKeyValueStore
course.save()
self.update_metadata(course.location, course.xblock_kvs._metadata)
def fire_updated_modulestore_signal(self, course_id, location):
@@ -789,6 +802,8 @@ class MongoModuleStore(ModuleStoreBase):
tab['name'] = metadata.get('display_name')
break
course.tabs = existing_tabs
# Save the updates to the course to the MongoKeyValueStore
course.save()
self.update_metadata(course.location, own_metadata(course))
self._update_single_item(location, {'metadata': metadata})
@@ -811,6 +826,8 @@ class MongoModuleStore(ModuleStoreBase):
course = self.get_course_for_item(item.location)
existing_tabs = course.tabs or []
course.tabs = [tab for tab in existing_tabs if tab.get('url_slug') != location.name]
# Save the updates to the course to the MongoKeyValueStore
course.save()
self.update_metadata(course.location, own_metadata(course))
# Must include this to avoid the django debug toolbar (which defines the deprecated "safe=False")

View File

@@ -165,34 +165,31 @@ class ModuleStoreTestCase(TestCase):
# Call superclass implementation
super(ModuleStoreTestCase, self)._post_teardown()
def assert2XX(self, status_code, msg=None):
"""
Assert that the given value is a success status (between 200 and 299)
"""
if not 200 <= status_code < 300:
msg = self._formatMessage(msg, "%s is not a success status" % safe_repr(status_code))
raise self.failureExecption(msg)
msg = self._formatMessage(msg, "%s is not a success status" % safe_repr(status_code))
self.assertTrue(status_code >= 200 and status_code < 300, msg=msg)
def assert3XX(self, status_code, msg=None):
"""
Assert that the given value is a redirection status (between 300 and 399)
"""
if not 300 <= status_code < 400:
msg = self._formatMessage(msg, "%s is not a redirection status" % safe_repr(status_code))
raise self.failureExecption(msg)
msg = self._formatMessage(msg, "%s is not a redirection status" % safe_repr(status_code))
self.assertTrue(status_code >= 300 and status_code < 400, msg=msg)
def assert4XX(self, status_code, msg=None):
"""
Assert that the given value is a client error status (between 400 and 499)
"""
if not 400 <= status_code < 500:
msg = self._formatMessage(msg, "%s is not a client error status" % safe_repr(status_code))
raise self.failureExecption(msg)
msg = self._formatMessage(msg, "%s is not a client error status" % safe_repr(status_code))
self.assertTrue(status_code >= 400 and status_code < 500, msg=msg)
def assert5XX(self, status_code, msg=None):
"""
Assert that the given value is a server error status (between 500 and 599)
"""
if not 500 <= status_code < 600:
msg = self._formatMessage(msg, "%s is not a server error status" % safe_repr(status_code))
raise self.failureExecption(msg)
msg = self._formatMessage(msg, "%s is not a server error status" % safe_repr(status_code))
self.assertTrue(status_code >= 500 and status_code < 600, msg=msg)

View File

@@ -135,7 +135,6 @@ class XModuleItemFactory(Factory):
# replace the display name with an optional parameter passed in from the caller
if display_name is not None:
metadata['display_name'] = display_name
# note that location comes from above lazy_attribute
store.create_and_save_xmodule(location, metadata=metadata, definition_data=data)
if location.category not in DETACHED_CATEGORIES:

View File

@@ -194,6 +194,10 @@ class ImportSystem(XMLParsingSystem, MakoDescriptorSystem):
if hasattr(descriptor, 'children'):
for child in descriptor.get_children():
parent_tracker.add_parent(child.location, descriptor.location)
# After setting up the descriptor, save any changes that we have
# made to attributes on the descriptor to the underlying KeyValueStore.
descriptor.save()
return descriptor
render_template = lambda: ''

View File

@@ -80,37 +80,7 @@ class CombinedOpenEndedV1Module():
instance_state=None, shared_state=None, metadata=None, static_data=None, **kwargs):
"""
Definition file should have one or many task blocks, a rubric block, and a prompt block:
Sample file:
<combinedopenended attempts="10000">
<rubric>
Blah blah rubric.
</rubric>
<prompt>
Some prompt.
</prompt>
<task>
<selfassessment>
<hintprompt>
What hint about this problem would you give to someone?
</hintprompt>
<submitmessage>
Save Succcesful. Thanks for participating!
</submitmessage>
</selfassessment>
</task>
<task>
<openended min_score_to_attempt="1" max_score_to_attempt="1">
<openendedparam>
<initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display>
<grader_payload>{"grader_settings" : "ml_grading.conf",
"problem_id" : "6.002x/Welcome/OETest"}</grader_payload>
</openendedparam>
</openended>
</task>
</combinedopenended>
Definition file should have one or many task blocks, a rubric block, and a prompt block. See DEFAULT_DATA in combined_open_ended_module for a sample.
"""
@@ -133,14 +103,14 @@ class CombinedOpenEndedV1Module():
# Allow reset is true if student has failed the criteria to move to the next child task
self.ready_to_reset = instance_state.get('ready_to_reset', False)
self.attempts = self.instance_state.get('attempts', MAX_ATTEMPTS)
self.is_scored = self.instance_state.get('is_graded', IS_SCORED) in TRUE_DICT
self.accept_file_upload = self.instance_state.get('accept_file_upload', ACCEPT_FILE_UPLOAD) in TRUE_DICT
self.skip_basic_checks = self.instance_state.get('skip_spelling_checks', SKIP_BASIC_CHECKS) in TRUE_DICT
self.max_attempts = instance_state.get('max_attempts', MAX_ATTEMPTS)
self.is_scored = instance_state.get('graded', IS_SCORED) in TRUE_DICT
self.accept_file_upload = instance_state.get('accept_file_upload', ACCEPT_FILE_UPLOAD) in TRUE_DICT
self.skip_basic_checks = instance_state.get('skip_spelling_checks', SKIP_BASIC_CHECKS) in TRUE_DICT
due_date = self.instance_state.get('due', None)
due_date = instance_state.get('due', None)
grace_period_string = self.instance_state.get('graceperiod', None)
grace_period_string = instance_state.get('graceperiod', None)
try:
self.timeinfo = TimeInfo(due_date, grace_period_string)
except Exception:
@@ -155,7 +125,7 @@ class CombinedOpenEndedV1Module():
# Static data is passed to the child modules to render
self.static_data = {
'max_score': self._max_score,
'max_attempts': self.attempts,
'max_attempts': self.max_attempts,
'prompt': definition['prompt'],
'rubric': definition['rubric'],
'display_name': self.display_name,
@@ -658,15 +628,18 @@ class CombinedOpenEndedV1Module():
if not self.ready_to_reset:
return self.out_of_sync_error(data)
if self.student_attempts > self.attempts:
if self.student_attempts >= self.max_attempts-1:
if self.student_attempts==self.max_attempts-1:
self.student_attempts +=1
return {
'success': False,
# This is a student_facing_error
'error': (
'You have attempted this question {0} times. '
'You are only allowed to attempt it {1} times.'
).format(self.student_attempts, self.attempts)
).format(self.student_attempts, self.max_attempts)
}
self.student_attempts +=1
self.state = self.INITIAL
self.ready_to_reset = False
for i in xrange(0, len(self.task_xml)):
@@ -742,7 +715,12 @@ class CombinedOpenEndedV1Module():
"""
max_score = None
score = None
if self.is_scored and self.weight is not None:
#The old default was None, so set to 1 if it is the old default weight
weight = self.weight
if weight is None:
weight = 1
if self.is_scored:
# Finds the maximum score of all student attempts and keeps it.
score_mat = []
for i in xrange(0, len(self.task_states)):
@@ -755,7 +733,7 @@ class CombinedOpenEndedV1Module():
for z in xrange(0, len(score)):
if score[z] is None:
score[z] = 0
score[z] *= float(self.weight)
score[z] *= float(weight)
score_mat.append(score)
if len(score_mat) > 0:
@@ -769,7 +747,7 @@ class CombinedOpenEndedV1Module():
if max_score is not None:
# Weight the max score if it is not None
max_score *= float(self.weight)
max_score *= float(weight)
else:
# Without a max_score, we cannot have a score!
score = None

View File

@@ -9,6 +9,7 @@ from .capa_module import ComplexEncoder
from .x_module import XModule
from xmodule.raw_module import RawDescriptor
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError
from .timeinfo import TimeInfo
from xblock.core import Dict, String, Scope, Boolean, Integer, Float
from xmodule.fields import Date
@@ -19,36 +20,37 @@ from django.utils.timezone import UTC
log = logging.getLogger(__name__)
USE_FOR_SINGLE_LOCATION = False
LINK_TO_LOCATION = ""
MAX_SCORE = 1
IS_GRADED = False
EXTERNAL_GRADER_NO_CONTACT_ERROR = "Failed to contact external graders. Please notify course staff."
class PeerGradingFields(object):
use_for_single_location = Boolean(
display_name="Show Single Problem",
help='When True, only the single problem specified by "Link to Problem Location" is shown. '
'When False, a panel is displayed with all problems available for peer grading.',
default=USE_FOR_SINGLE_LOCATION, scope=Scope.settings
default=False,
scope=Scope.settings
)
link_to_location = String(
display_name="Link to Problem Location",
help='The location of the problem being graded. Only used when "Show Single Problem" is True.',
default=LINK_TO_LOCATION, scope=Scope.settings
default="",
scope=Scope.settings
)
is_graded = Boolean(
graded = Boolean(
display_name="Graded",
help='Defines whether the student gets credit for grading this problem. Only used when "Show Single Problem" is True.',
default=IS_GRADED, scope=Scope.settings
default=False,
scope=Scope.settings
)
due_date = Date(help="Due date that should be displayed.", default=None, scope=Scope.settings)
grace_period_string = String(help="Amount of grace to give on the due date.", default=None, scope=Scope.settings)
max_grade = Integer(
help="The maximum grade that a student can receive for this problem.", default=MAX_SCORE,
scope=Scope.settings, values={"min": 0}
due = Date(
help="Due date that should be displayed.",
default=None,
scope=Scope.settings)
grace_period_string = String(
help="Amount of grace to give on the due date.",
default=None,
scope=Scope.settings
)
student_data_for_location = Dict(
help="Student data for a given peer grading problem.",
@@ -57,7 +59,8 @@ class PeerGradingFields(object):
weight = Float(
display_name="Problem Weight",
help="Defines the number of points each problem is worth. If the value is not set, each problem is worth one point.",
scope=Scope.settings, values={"min": 0, "step": ".1"}
scope=Scope.settings, values={"min": 0, "step": ".1"},
default=1
)
display_name = String(
display_name="Display Name",
@@ -98,35 +101,31 @@ class PeerGradingModule(PeerGradingFields, XModule):
if self.use_for_single_location:
try:
self.linked_problem = modulestore().get_instance(self.system.course_id, self.link_to_location)
except:
except ItemNotFoundError:
log.error("Linked location {0} for peer grading module {1} does not exist".format(
self.link_to_location, self.location))
raise
due_date = self.linked_problem._model_data.get('peer_grading_due', None)
due_date = self.linked_problem._model_data.get('due', None)
if due_date:
self._model_data['due'] = due_date
try:
self.timeinfo = TimeInfo(self.due_date, self.grace_period_string)
except:
log.error("Error parsing due date information in location {0}".format(location))
self.timeinfo = TimeInfo(self.due, self.grace_period_string)
except Exception:
log.error("Error parsing due date information in location {0}".format(self.location))
raise
self.display_due_date = self.timeinfo.display_due_date
try:
self.student_data_for_location = json.loads(self.student_data_for_location)
except:
except Exception:
pass
self.ajax_url = self.system.ajax_url
if not self.ajax_url.endswith("/"):
self.ajax_url = self.ajax_url + "/"
# Integer could return None, so keep this check.
if not isinstance(self.max_grade, int):
raise TypeError("max_grade needs to be an integer.")
def closed(self):
return self._closed(self.timeinfo)
@@ -210,11 +209,16 @@ class PeerGradingModule(PeerGradingFields, XModule):
def get_score(self):
max_score = None
score = None
weight = self.weight
#The old default was None, so set to 1 if it is the old default weight
if weight is None:
weight = 1
score_dict = {
'score': score,
'total': max_score,
}
if not self.use_for_single_location or not self.is_graded:
if not self.use_for_single_location or not self.graded:
return score_dict
try:
@@ -234,11 +238,10 @@ class PeerGradingModule(PeerGradingFields, XModule):
# Ensures that once a student receives a final score for peer grading, that it does not change.
self.student_data_for_location = response
if self.weight is not None:
score = int(count_graded >= count_required and count_graded > 0) * float(self.weight)
total = self.max_grade * float(self.weight)
score_dict['score'] = score
score_dict['total'] = total
score = int(count_graded >= count_required and count_graded > 0) * float(weight)
total = float(weight)
score_dict['score'] = score
score_dict['total'] = total
return score_dict
@@ -249,8 +252,8 @@ class PeerGradingModule(PeerGradingFields, XModule):
randomization, and 5/7 on another
'''
max_grade = None
if self.use_for_single_location and self.is_graded:
max_grade = self.max_grade
if self.use_for_single_location and self.graded:
max_grade = self.weight
return max_grade
def get_next_submission(self, data):
@@ -531,7 +534,7 @@ class PeerGradingModule(PeerGradingFields, XModule):
problem_location = problem['location']
descriptor = _find_corresponding_module_for_location(problem_location)
if descriptor:
problem['due'] = descriptor._model_data.get('peer_grading_due', None)
problem['due'] = descriptor._model_data.get('due', None)
grace_period_string = descriptor._model_data.get('graceperiod', None)
try:
problem_timeinfo = TimeInfo(problem['due'], grace_period_string)
@@ -618,9 +621,14 @@ class PeerGradingDescriptor(PeerGradingFields, RawDescriptor):
#Specify whether or not to pass in open ended interface
needs_open_ended_interface = True
metadata_translations = {
'is_graded': 'graded',
'attempts': 'max_attempts',
'due_data' : 'due'
}
@property
def non_editable_metadata_fields(self):
non_editable_fields = super(PeerGradingDescriptor, self).non_editable_metadata_fields
non_editable_fields.extend([PeerGradingFields.due_date, PeerGradingFields.grace_period_string,
PeerGradingFields.max_grade])
non_editable_fields.extend([PeerGradingFields.due, PeerGradingFields.grace_period_string])
return non_editable_fields

View File

@@ -20,22 +20,22 @@ data: |
<draggable id="11" label="few"/>
</drag_and_drop_input>
<answer type="loncapa/python">
correct_answer = {
'1': [[70, 150], 121],
'6': [[190, 150], 121],
'8': [[190, 150], 121],
'2': [[310, 150], 121],
'9': [[310, 150], 121],
'11': [[310, 150], 121],
'4': [[420, 150], 121],
'7': [[420, 150], 121],
'3': [[550, 150], 121],
'5': [[550, 150], 121],
'10': [[550, 150], 121]}
if draganddrop.grade(submission[0], correct_answer):
correct = ['correct']
else:
correct = ['incorrect']
correct_answer = {
'1': [[70, 150], 121],
'6': [[190, 150], 121],
'8': [[190, 150], 121],
'2': [[310, 150], 121],
'9': [[310, 150], 121],
'11': [[310, 150], 121],
'4': [[420, 150], 121],
'7': [[420, 150], 121],
'3': [[550, 150], 121],
'5': [[550, 150], 121],
'10': [[550, 150], 121]}
if draganddrop.grade(submission[0], correct_answer):
correct = ['correct']
else:
correct = ['incorrect']
</answer>
</customresponse>
</problem>

View File

@@ -48,7 +48,6 @@ metadata:
\edXabox{type="custom" cfn='test_str' expect='python' hintfn='hint_fn'}
markdown: !!null
data: |
<?xml version="1.0"?>
<problem>
<text>
<p>

File diff suppressed because one or more lines are too long

View File

@@ -217,8 +217,11 @@ class ConditionalModuleXmlTest(unittest.TestCase):
html = ajax['html']
self.assertFalse(any(['This is a secret' in item for item in html]))
# now change state of the capa problem to make it completed
inner_get_module(Location('i4x://HarvardX/ER22x/problem/choiceprob')).attempts = 1
# Now change state of the capa problem to make it completed
inner_module = inner_get_module(Location('i4x://HarvardX/ER22x/problem/choiceprob'))
inner_module.attempts = 1
# Save our modifications to the underlying KeyValueStore so they can be persisted
inner_module.save()
ajax = json.loads(module.handle_ajax('', ''))
print "post-attempt ajax: ", ajax

View File

@@ -44,7 +44,8 @@ class DateTest(unittest.TestCase):
def test_return_None(self):
self.assertIsNone(DateTest.date.from_json(""))
self.assertIsNone(DateTest.date.from_json(None))
self.assertIsNone(DateTest.date.from_json(['unknown value']))
with self.assertRaises(TypeError):
DateTest.date.from_json(['unknown value'])
def test_old_due_date_format(self):
current = datetime.datetime.today()
@@ -83,6 +84,8 @@ class DateTest(unittest.TestCase):
DateTest.date.to_json(
DateTest.date.from_json("2012-12-31T23:00:01-01:00")),
"2012-12-31T23:00:01-01:00")
with self.assertRaises(TypeError):
DateTest.date.to_json('2012-12-31T23:00:01-01:00')
class TimedeltaTest(unittest.TestCase):

View File

@@ -0,0 +1,75 @@
(function () {
var update = function () {
// Whenever a value changes create a new serialized version of this
// problem's inputs and set the hidden input fields value to equal it.
var parent = $(this).closest('.problems-wrapper');
// find the closest parent problems-wrapper and use that as the problem
// grab the input id from the input
// real_input is the hidden input field
var real_input = $('input.choicetextvalue', parent);
var all_inputs = $('.choicetextinput .ctinput', parent);
var user_inputs = {};
$(all_inputs).each(function (index, elt) {
var node = $(elt);
var name = node.attr('id');
var val = node.val();
var radio_value = node.attr('value');
var type = node.attr('type');
var is_checked = node.attr('checked');
if (type === "radio" || type === "checkbox") {
if (is_checked === "checked" || is_checked === "true") {
user_inputs[name] = radio_value;
}
} else {
user_inputs[name] = val;
}
});
var val_string = JSON.stringify(user_inputs);
//this is what gets submitted as the answer, we deserialize it later
real_input.val(val_string);
};
var check_parent = function (event) {
// This looks for the containing choice of a textinput
// and sets it to be checked.
var elt = $(event.target);
var parent_container = elt.closest('section[id^="forinput"]');
var choice = parent_container.find("input[type='checkbox'], input[type='radio']");
choice.attr("checked", "checked");
choice.change();
//need to check it then trigger the change event
};
var imitate_label = function (event) {
// This causes a section to check and uncheck
// a radiobutton/checkbox whenever a user clicks on it
// If the button/checkbox is disabled, nothing happens
var elt = $(event.target);
var parent_container = elt.closest('section[id^="forinput"]');
var choice = parent_container.find("input[type='checkbox'], input[type='radio']");
if (choice.attr("type") === "radio") {
choice.attr("checked", "checked");
} else {
if (choice.attr('checked')) {
choice.prop("checked", false);
} else {
choice.prop("checked", true);
}
}
choice.change();
update();
};
var choices = $('.mock_label');
var inputs = $('.choicetextinput .ctinput');
var text_inputs = $('.choicetextinput .ctinput[type="text"]');
// update on load
inputs.each(update);
// and on every change
// This allows text inside of choices to behave as if they were part of
// a label for the choice's button/checkbox
choices.click(imitate_label);
inputs.bind("change", update);
text_inputs.click(check_parent);
}).call(this);

View File

@@ -0,0 +1,24 @@
<combinedopenended attempts="1" display_name = "Humanities Question -- Machine Assessed">
<rubric>
<rubric>
<category>
<description>Writing Applications</description>
<option> The essay loses focus, has little information or supporting details, and the organization makes it difficult to follow.</option>
<option> The essay presents a mostly unified theme, includes sufficient information to convey the theme, and is generally organized well.</option>
</category>
<category>
<description> Language Conventions </description>
<option> The essay demonstrates a reasonable command of proper spelling and grammar. </option>
<option> The essay demonstrates superior command of proper spelling and grammar.</option>
</category>
</rubric>
</rubric>
<prompt>
<h4>Censorship in the Libraries</h4>
<p>"All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us." --Katherine Paterson, Author</p>
<p>Write a persuasive essay to a newspaper reflecting your vies on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.</p>
</prompt>
<task>
<selfassessment/>
</task>
</combinedopenended>

View File

@@ -1,6 +1,7 @@
<course>
<chapter url_name="Overview">
<combinedopenended url_name="SampleQuestion"/>
<combinedopenended url_name="SampleQuestion1Attempt"/>
<peergrading url_name="PeerGradingSample"/>
<peergrading url_name="PeerGradingScored"/>
</chapter>

View File

@@ -21,6 +21,8 @@ Feature: Answer problems
| formula |
| script |
| code |
| radio_text |
| checkbox_text |
Scenario: I can answer a problem incorrectly
Given External graders respond "incorrect"
@@ -40,6 +42,8 @@ Feature: Answer problems
| formula |
| script |
| code |
| radio_text |
| checkbox_text |
Scenario: I can submit a blank answer
Given I am viewing a "<ProblemType>" problem
@@ -57,6 +61,8 @@ Feature: Answer problems
| numerical |
| formula |
| script |
| radio_text |
| checkbox_text |
Scenario: I can reset a problem
@@ -84,6 +90,10 @@ Feature: Answer problems
| formula | incorrect |
| script | correct |
| script | incorrect |
| radio_text | correct |
| radio_text | incorrect |
| checkbox_text | correct |
| checkbox_text | incorrect |
Scenario: I can answer a problem with one attempt correctly and not reset

View File

@@ -18,7 +18,7 @@ from capa.tests.response_xml_factory import OptionResponseXMLFactory, \
ChoiceResponseXMLFactory, MultipleChoiceResponseXMLFactory, \
StringResponseXMLFactory, NumericalResponseXMLFactory, \
FormulaResponseXMLFactory, CustomResponseXMLFactory, \
CodeResponseXMLFactory
CodeResponseXMLFactory, ChoiceTextResponseXMLFactory
from nose.tools import assert_true
@@ -131,6 +131,32 @@ PROBLEM_DICT = {
'grader_payload': '{"grader": "ps1/Spring2013/test_grader.py"}', },
'correct': ['span.correct'],
'incorrect': ['span.incorrect'],
'unanswered': ['span.unanswered']},
'radio_text': {
'factory': ChoiceTextResponseXMLFactory(),
'kwargs': {
'question_text': 'The correct answer is Choice 0 and input 8',
'type': 'radiotextgroup',
'choices': [("true", {"answer": "8", "tolerance": "1"}),
("false", {"answer": "8", "tolerance": "1"})
]
},
'correct': ['section.choicetextgroup_correct'],
'incorrect': ['span.incorrect', 'section.choicetextgroup_incorrect'],
'unanswered': ['span.unanswered']},
'checkbox_text': {
'factory': ChoiceTextResponseXMLFactory(),
'kwargs': {
'question_text': 'The correct answer is Choice 0 and input 8',
'type': 'checkboxtextgroup',
'choices': [("true", {"answer": "8", "tolerance": "1"}),
("false", {"answer": "8", "tolerance": "1"})
]
},
'correct': ['span.correct'],
'incorrect': ['span.incorrect'],
'unanswered': ['span.unanswered']}
}
@@ -196,6 +222,19 @@ def answer_problem(problem_type, correctness):
# (configured in the problem XML above)
pass
elif problem_type == 'radio_text' or problem_type == 'checkbox_text':
input_value = "8" if correctness == 'correct' else "5"
choice = "choiceinput_0bc" if correctness == 'correct' else "choiceinput_1bc"
world.css_check(inputfield(problem_type, choice=choice))
world.css_fill(
inputfield(
problem_type,
choice="choiceinput_0_numtolerance_input_0"
),
input_value
)
def problem_has_answer(problem_type, answer_class):
if problem_type == "drop down":
@@ -244,6 +283,17 @@ def problem_has_answer(problem_type, answer_class):
expected = "x^2+2*x+y" if answer_class == 'correct' else 'x^2'
assert_textfield('formula', expected)
elif problem_type in ("radio_text", "checkbox_text"):
if answer_class == 'blank':
expected = ('', '')
assert_choicetext_values(problem_type, (), expected)
elif answer_class == 'incorrect':
expected = ('5', '')
assert_choicetext_values(problem_type, ["choiceinput_1bc"], expected)
else:
expected = ('8', '')
assert_choicetext_values(problem_type, ["choiceinput_0bc"], expected)
else:
# The other response types use random data,
# which would be difficult to check
@@ -292,6 +342,12 @@ def inputfield(problem_type, choice=None, input_num=1):
sel = ("input#input_i4x-edx-model_course-problem-%s_2_%s" %
(problem_type.replace(" ", "_"), str(input_num)))
# this is necessary due to naming requirement for this problem type
if problem_type in ("radio_text", "checkbox_text"):
sel = "input#i4x-edx-model_course-problem-{0}_2_{1}".format(
problem_type.replace(" ", "_"), str(input_num)
)
if choice is not None:
base = "_choice_" if problem_type == "multiple choice" else "_"
sel = sel + base + str(choice)
@@ -325,3 +381,29 @@ def assert_checked(problem_type, choices):
def assert_textfield(problem_type, expected_text, input_num=1):
element_value = world.css_value(inputfield(problem_type, input_num=input_num))
assert element_value == expected_text
def assert_choicetext_values(problem_type, choices, expected_values):
"""
Asserts that only the given choices are checked, and given
text fields have a desired value
"""
# Names of the radio buttons or checkboxes
all_choices = ['choiceinput_0bc', 'choiceinput_1bc']
# Names of the numtolerance_inputs
all_inputs = [
"choiceinput_0_numtolerance_input_0",
"choiceinput_1_numtolerance_input_0"
]
for this_choice in all_choices:
element = world.css_find(inputfield(problem_type, choice=this_choice))
if this_choice in choices:
assert element.checked
else:
assert not element.checked
for (name, expected) in zip(all_inputs, expected_values):
element = world.css_find(inputfield(problem_type, name))
# Remove any trailing spaces that may have been added
assert element.value.strip() == expected

View File

@@ -0,0 +1,239 @@
"""A command to clean the StudentModuleHistory table.
When we added XBlock storage, each field modification wrote a new history row
to the db. Now that we have bulk saves to avoid that database hammering, we
need to clean out the unnecessary rows from the database.
This command that does that.
"""
import datetime
import json
import logging
import optparse
import time
import traceback
from django.core.management.base import NoArgsCommand
from django.db import connection
class Command(NoArgsCommand):
"""The actual clean_history command to clean history rows."""
help = "Deletes unneeded rows from the StudentModuleHistory table."
option_list = NoArgsCommand.option_list + (
optparse.make_option(
'--batch',
type='int',
default=100,
help="Batch size, number of module_ids to examine in a transaction.",
),
optparse.make_option(
'--dry-run',
action='store_true',
default=False,
help="Don't change the database, just show what would be done.",
),
optparse.make_option(
'--sleep',
type='float',
default=0,
help="Seconds to sleep between batches.",
),
)
def handle_noargs(self, **options):
# We don't want to see the SQL output from the db layer.
logging.getLogger("django.db.backends").setLevel(logging.INFO)
smhc = StudentModuleHistoryCleaner(
dry_run=options["dry_run"],
)
smhc.main(batch_size=options["batch"], sleep=options["sleep"])
class StudentModuleHistoryCleaner(object):
"""Logic to clean rows from the StudentModuleHistory table."""
DELETE_GAP_SECS = 0.5 # Rows this close can be discarded.
STATE_FILE = "clean_history.json"
BATCH_SIZE = 100
def __init__(self, dry_run=False):
self.dry_run = dry_run
self.next_student_module_id = 0
self.last_student_module_id = 0
def main(self, batch_size=None, sleep=0):
"""Invoked from the management command to do all the work."""
batch_size = batch_size or self.BATCH_SIZE
connection.enter_transaction_management()
self.last_student_module_id = self.get_last_student_module_id()
self.load_state()
while self.next_student_module_id <= self.last_student_module_id:
for smid in self.module_ids_to_check(batch_size):
try:
self.clean_one_student_module(smid)
except Exception: # pylint: disable=W0703
trace = traceback.format_exc()
self.say("Couldn't clean student_module_id {}:\n{}".format(smid, trace))
if not self.dry_run:
self.commit()
self.save_state()
if sleep:
time.sleep(sleep)
def say(self, message):
"""
Display a message to the user.
The message will have a trailing newline added to it.
"""
print message
def commit(self):
"""
Commit the transaction.
"""
self.say("Committing")
connection.commit()
def load_state(self):
"""
Load the latest state from disk.
"""
try:
state_file = open(self.STATE_FILE)
except IOError:
self.say("No stored state")
self.next_student_module_id = 0
else:
with state_file:
state = json.load(state_file)
self.say(
"Loaded stored state: {}".format(
json.dumps(state, sort_keys=True)
)
)
self.next_student_module_id = state['next_student_module_id']
def save_state(self):
"""
Save the state to disk.
"""
state = {
'next_student_module_id': self.next_student_module_id,
}
with open(self.STATE_FILE, "w") as state_file:
json.dump(state, state_file)
self.say("Saved state: {}".format(json.dumps(state, sort_keys=True)))
def get_last_student_module_id(self):
"""
Return the id of the last student_module.
"""
cursor = connection.cursor()
cursor.execute("""
SELECT max(student_module_id) FROM courseware_studentmodulehistory
""")
last = cursor.fetchone()[0]
self.say("Last student_module_id is {}".format(last))
return last
def module_ids_to_check(self, batch_size):
"""Produce a sequence of student module ids to check.
`batch_size` is how many module ids to produce, max.
The sequence starts with `next_student_module_id`, and goes up to
and including `last_student_module_id`.
`next_student_module_id` is updated as each id is yielded.
"""
start = self.next_student_module_id
for smid in range(start, start+batch_size):
if smid > self.last_student_module_id:
break
yield smid
self.next_student_module_id = smid+1
def get_history_for_student_modules(self, student_module_id):
"""
Get the history rows for a student module.
```student_module_id```: the id of the student module we're
interested in.
Return a list: [(id, created), ...], all the rows of history.
"""
cursor = connection.cursor()
cursor.execute("""
SELECT id, created FROM courseware_studentmodulehistory
WHERE student_module_id = %s
ORDER BY created, id
""",
[student_module_id]
)
history = cursor.fetchall()
return history
def delete_history(self, ids_to_delete):
"""
Delete history rows.
```ids_to_delete```: a non-empty list (or set...) of history row ids to delete.
"""
assert ids_to_delete
cursor = connection.cursor()
cursor.execute("""
DELETE FROM courseware_studentmodulehistory
WHERE id IN ({ids})
""".format(ids=",".join(str(i) for i in ids_to_delete))
)
def clean_one_student_module(self, student_module_id):
"""Clean one StudentModule's-worth of history.
`student_module_id`: the id of the StudentModule to process.
"""
delete_gap = datetime.timedelta(seconds=self.DELETE_GAP_SECS)
history = self.get_history_for_student_modules(student_module_id)
if not history:
self.say("No history for student_module_id {}".format(student_module_id))
return
ids_to_delete = []
next_created = None
for history_id, created in reversed(history):
if next_created is not None:
# Compare this timestamp with the next one.
if (next_created - created) < delete_gap:
# This row is followed closely by another, we can discard
# this one.
ids_to_delete.append(history_id)
next_created = created
verb = "Would have deleted" if self.dry_run else "Deleting"
self.say("{verb} {to_delete} rows of {total} for student_module_id {id}".format(
verb=verb,
to_delete=len(ids_to_delete),
total=len(history),
id=student_module_id,
))
if ids_to_delete and not self.dry_run:
self.delete_history(ids_to_delete)

View File

@@ -0,0 +1,483 @@
"""Test the clean_history management command."""
import fnmatch
from mock import Mock
import os.path
import textwrap
import dateutil.parser
from django.test import TransactionTestCase
from django.db import connection
from courseware.management.commands.clean_history import StudentModuleHistoryCleaner
# In lots of places in this file, smhc == StudentModuleHistoryCleaner
def parse_date(sdate):
"""Parse a string date into a datetime."""
parsed = dateutil.parser.parse(sdate)
parsed = parsed.replace(tzinfo=dateutil.tz.gettz('UTC'))
return parsed
class SmhcSayStubbed(StudentModuleHistoryCleaner):
"""StudentModuleHistoryCleaner, but with .say() stubbed for testing."""
def __init__(self, **kwargs):
super(SmhcSayStubbed, self).__init__(**kwargs)
self.said_lines = []
def say(self, msg):
self.said_lines.append(msg)
class SmhcDbMocked(SmhcSayStubbed):
"""StudentModuleHistoryCleaner, but with db access mocked."""
def __init__(self, **kwargs):
super(SmhcDbMocked, self).__init__(**kwargs)
self.get_history_for_student_modules = Mock()
self.delete_history = Mock()
def set_rows(self, rows):
"""Set the mocked history rows."""
rows = [(row_id, parse_date(created)) for row_id, created in rows]
self.get_history_for_student_modules.return_value = rows
class HistoryCleanerTest(TransactionTestCase):
"""Base class for all history cleaner tests."""
maxDiff = None
def setUp(self):
super(HistoryCleanerTest, self).setUp()
self.addCleanup(self.clean_up_state_file)
def write_state_file(self, state):
"""Write the string `state` into the state file read by StudentModuleHistoryCleaner."""
with open(StudentModuleHistoryCleaner.STATE_FILE, "w") as state_file:
state_file.write(state)
def read_state_file(self):
"""Return the string contents of the state file read by StudentModuleHistoryCleaner."""
with open(StudentModuleHistoryCleaner.STATE_FILE) as state_file:
return state_file.read()
def clean_up_state_file(self):
"""Remove any state file lying around."""
if os.path.exists(StudentModuleHistoryCleaner.STATE_FILE):
os.remove(StudentModuleHistoryCleaner.STATE_FILE)
def assert_said(self, smhc, *msgs):
"""Fail if the `smhc` didn't say `msgs`.
The messages passed here are `fnmatch`-style patterns: "*" means anything.
"""
for said, pattern in zip(smhc.said_lines, msgs):
if not fnmatch.fnmatch(said, pattern):
fmt = textwrap.dedent("""\
Messages:
{msgs}
don't match patterns:
{patterns}
Failed at {said!r} and {pattern!r}
""")
msg = fmt.format(
msgs="\n".join(smhc.said_lines),
patterns="\n".join(msgs),
said=said,
pattern=pattern
)
self.fail(msg)
def parse_rows(self, rows):
"""Parse convenient rows into real data."""
rows = [
(row_id, parse_date(created), student_module_id)
for row_id, created, student_module_id in rows
]
return rows
def write_history(self, rows):
"""Write history rows to the db.
Each row should be (id, created, student_module_id).
"""
cursor = connection.cursor()
cursor.executemany("""
INSERT INTO courseware_studentmodulehistory
(id, created, student_module_id)
VALUES (%s, %s, %s)
""",
self.parse_rows(rows),
)
def read_history(self):
"""Read the history from the db, and return it as a list of tuples.
Returns [(id, created, student_module_id), ...]
"""
cursor = connection.cursor()
cursor.execute("""
SELECT id, created, student_module_id FROM courseware_studentmodulehistory
""")
return cursor.fetchall()
def assert_history(self, rows):
"""Assert that the history rows are the same as `rows`."""
self.assertEqual(self.parse_rows(rows), self.read_history())
class HistoryCleanerNoDbTest(HistoryCleanerTest):
"""Tests of StudentModuleHistoryCleaner with db access mocked."""
def test_empty(self):
smhc = SmhcDbMocked()
smhc.set_rows([])
smhc.clean_one_student_module(1)
self.assert_said(smhc, "No history for student_module_id 1")
# Nothing to delete, so delete_history wasn't called.
self.assertFalse(smhc.delete_history.called)
def test_one_row(self):
smhc = SmhcDbMocked()
smhc.set_rows([
(1, "2013-07-13 12:11:10.987"),
])
smhc.clean_one_student_module(1)
self.assert_said(smhc, "Deleting 0 rows of 1 for student_module_id 1")
# Nothing to delete, so delete_history wasn't called.
self.assertFalse(smhc.delete_history.called)
def test_one_row_dry_run(self):
smhc = SmhcDbMocked(dry_run=True)
smhc.set_rows([
(1, "2013-07-13 12:11:10.987"),
])
smhc.clean_one_student_module(1)
self.assert_said(smhc, "Would have deleted 0 rows of 1 for student_module_id 1")
# Nothing to delete, so delete_history wasn't called.
self.assertFalse(smhc.delete_history.called)
def test_two_rows_close(self):
smhc = SmhcDbMocked()
smhc.set_rows([
(7, "2013-07-13 12:34:56.789"),
(9, "2013-07-13 12:34:56.987"),
])
smhc.clean_one_student_module(1)
self.assert_said(smhc, "Deleting 1 rows of 2 for student_module_id 1")
smhc.delete_history.assert_called_once_with([7])
def test_two_rows_far(self):
smhc = SmhcDbMocked()
smhc.set_rows([
(7, "2013-07-13 12:34:56.789"),
(9, "2013-07-13 12:34:57.890"),
])
smhc.clean_one_student_module(1)
self.assert_said(smhc, "Deleting 0 rows of 2 for student_module_id 1")
self.assertFalse(smhc.delete_history.called)
def test_a_bunch_of_rows(self):
smhc = SmhcDbMocked()
smhc.set_rows([
( 4, "2013-07-13 16:30:00.000"), # keep
( 8, "2013-07-13 16:30:01.100"),
(15, "2013-07-13 16:30:01.200"),
(16, "2013-07-13 16:30:01.300"), # keep
(23, "2013-07-13 16:30:02.400"),
(42, "2013-07-13 16:30:02.500"),
(98, "2013-07-13 16:30:02.600"), # keep
(99, "2013-07-13 16:30:59.000"), # keep
])
smhc.clean_one_student_module(17)
self.assert_said(smhc, "Deleting 4 rows of 8 for student_module_id 17")
smhc.delete_history.assert_called_once_with([42, 23, 15, 8])
class HistoryCleanerWitDbTest(HistoryCleanerTest):
"""Tests of StudentModuleHistoryCleaner with a real db."""
def test_no_history(self):
# Cleaning a student_module_id with no history leaves the db unchanged.
smhc = SmhcSayStubbed()
self.write_history([
( 4, "2013-07-13 16:30:00.000", 11), # keep
( 8, "2013-07-13 16:30:01.100", 11),
(15, "2013-07-13 16:30:01.200", 11),
(16, "2013-07-13 16:30:01.300", 11), # keep
(23, "2013-07-13 16:30:02.400", 11),
(42, "2013-07-13 16:30:02.500", 11),
(98, "2013-07-13 16:30:02.600", 11), # keep
(99, "2013-07-13 16:30:59.000", 11), # keep
])
smhc.clean_one_student_module(22)
self.assert_said(smhc, "No history for student_module_id 22")
self.assert_history([
( 4, "2013-07-13 16:30:00.000", 11), # keep
( 8, "2013-07-13 16:30:01.100", 11),
(15, "2013-07-13 16:30:01.200", 11),
(16, "2013-07-13 16:30:01.300", 11), # keep
(23, "2013-07-13 16:30:02.400", 11),
(42, "2013-07-13 16:30:02.500", 11),
(98, "2013-07-13 16:30:02.600", 11), # keep
(99, "2013-07-13 16:30:59.000", 11), # keep
])
def test_a_bunch_of_rows(self):
# Cleaning a student_module_id with 8 records, 4 to delete.
smhc = SmhcSayStubbed()
self.write_history([
( 4, "2013-07-13 16:30:00.000", 11), # keep
( 8, "2013-07-13 16:30:01.100", 11),
(15, "2013-07-13 16:30:01.200", 11),
(16, "2013-07-13 16:30:01.300", 11), # keep
(17, "2013-07-13 16:30:01.310", 22), # other student_module_id!
(23, "2013-07-13 16:30:02.400", 11),
(42, "2013-07-13 16:30:02.500", 11),
(98, "2013-07-13 16:30:02.600", 11), # keep
(99, "2013-07-13 16:30:59.000", 11), # keep
])
smhc.clean_one_student_module(11)
self.assert_said(smhc, "Deleting 4 rows of 8 for student_module_id 11")
self.assert_history([
( 4, "2013-07-13 16:30:00.000", 11), # keep
(16, "2013-07-13 16:30:01.300", 11), # keep
(17, "2013-07-13 16:30:01.310", 22), # other student_module_id!
(98, "2013-07-13 16:30:02.600", 11), # keep
(99, "2013-07-13 16:30:59.000", 11), # keep
])
def test_a_bunch_of_rows_dry_run(self):
# Cleaning a student_module_id with 8 records, 4 to delete,
# but don't really do it.
smhc = SmhcSayStubbed(dry_run=True)
self.write_history([
( 4, "2013-07-13 16:30:00.000", 11), # keep
( 8, "2013-07-13 16:30:01.100", 11),
(15, "2013-07-13 16:30:01.200", 11),
(16, "2013-07-13 16:30:01.300", 11), # keep
(23, "2013-07-13 16:30:02.400", 11),
(42, "2013-07-13 16:30:02.500", 11),
(98, "2013-07-13 16:30:02.600", 11), # keep
(99, "2013-07-13 16:30:59.000", 11), # keep
])
smhc.clean_one_student_module(11)
self.assert_said(smhc, "Would have deleted 4 rows of 8 for student_module_id 11")
self.assert_history([
( 4, "2013-07-13 16:30:00.000", 11), # keep
( 8, "2013-07-13 16:30:01.100", 11),
(15, "2013-07-13 16:30:01.200", 11),
(16, "2013-07-13 16:30:01.300", 11), # keep
(23, "2013-07-13 16:30:02.400", 11),
(42, "2013-07-13 16:30:02.500", 11),
(98, "2013-07-13 16:30:02.600", 11), # keep
(99, "2013-07-13 16:30:59.000", 11), # keep
])
def test_a_bunch_of_rows_in_jumbled_order(self):
# Cleaning a student_module_id with 8 records, 4 to delete.
smhc = SmhcSayStubbed()
self.write_history([
(23, "2013-07-13 16:30:01.100", 11),
(24, "2013-07-13 16:30:01.300", 11), # keep
(27, "2013-07-13 16:30:02.500", 11),
(30, "2013-07-13 16:30:01.350", 22), # other student_module_id!
(32, "2013-07-13 16:30:59.000", 11), # keep
(50, "2013-07-13 16:30:02.400", 11),
(51, "2013-07-13 16:30:02.600", 11), # keep
(56, "2013-07-13 16:30:00.000", 11), # keep
(57, "2013-07-13 16:30:01.200", 11),
])
smhc.clean_one_student_module(11)
self.assert_said(smhc, "Deleting 4 rows of 8 for student_module_id 11")
self.assert_history([
(24, "2013-07-13 16:30:01.300", 11), # keep
(30, "2013-07-13 16:30:01.350", 22), # other student_module_id!
(32, "2013-07-13 16:30:59.000", 11), # keep
(51, "2013-07-13 16:30:02.600", 11), # keep
(56, "2013-07-13 16:30:00.000", 11), # keep
])
def test_a_bunch_of_rows_with_timestamp_ties(self):
# Sometimes rows are written with identical timestamps. The one with
# the greater id is the winner in that case.
smhc = SmhcSayStubbed()
self.write_history([
(21, "2013-07-13 16:30:01.100", 11),
(24, "2013-07-13 16:30:01.100", 11), # keep
(22, "2013-07-13 16:30:01.100", 11),
(23, "2013-07-13 16:30:01.100", 11),
(27, "2013-07-13 16:30:02.500", 11),
(30, "2013-07-13 16:30:01.350", 22), # other student_module_id!
(32, "2013-07-13 16:30:59.000", 11), # keep
(50, "2013-07-13 16:30:02.500", 11), # keep
])
smhc.clean_one_student_module(11)
self.assert_said(smhc, "Deleting 4 rows of 7 for student_module_id 11")
self.assert_history([
(24, "2013-07-13 16:30:01.100", 11), # keep
(30, "2013-07-13 16:30:01.350", 22), # other student_module_id!
(32, "2013-07-13 16:30:59.000", 11), # keep
(50, "2013-07-13 16:30:02.500", 11), # keep
])
def test_get_last_student_module(self):
# Can we find the last student_module_id properly?
smhc = SmhcSayStubbed()
self.write_history([
(23, "2013-07-13 16:30:01.100", 11),
(24, "2013-07-13 16:30:01.300", 44),
(27, "2013-07-13 16:30:02.500", 11),
(30, "2013-07-13 16:30:01.350", 22),
(32, "2013-07-13 16:30:59.000", 11),
(51, "2013-07-13 16:30:02.600", 33),
(56, "2013-07-13 16:30:00.000", 11),
])
last = smhc.get_last_student_module_id()
self.assertEqual(last, 44)
self.assert_said(smhc, "Last student_module_id is 44")
def test_load_state_with_no_stored_state(self):
smhc = SmhcSayStubbed()
self.assertFalse(os.path.exists(smhc.STATE_FILE))
smhc.load_state()
self.assertEqual(smhc.next_student_module_id, 0)
self.assert_said(smhc, "No stored state")
def test_load_stored_state(self):
self.write_state_file('{"next_student_module_id": 23}')
smhc = SmhcSayStubbed()
smhc.load_state()
self.assertEqual(smhc.next_student_module_id, 23)
self.assert_said(smhc, 'Loaded stored state: {"next_student_module_id": 23}')
def test_save_state(self):
smhc = SmhcSayStubbed()
smhc.next_student_module_id = 47
smhc.save_state()
state = self.read_state_file()
self.assertEqual(state, '{"next_student_module_id": 47}')
class SmhcForTestingMain(SmhcSayStubbed):
"""A StudentModuleHistoryCleaner with a few function stubbed for testing main."""
def __init__(self, *args, **kwargs):
self.exception_smids = kwargs.pop('exception_smids', ())
super(SmhcForTestingMain, self).__init__(*args, **kwargs)
def clean_one_student_module(self, smid):
self.say("(not really cleaning {})".format(smid))
if smid in self.exception_smids:
raise Exception("Something went wrong!")
def commit(self):
self.say("(not really committing)")
class HistoryCleanerMainTest(HistoryCleanerTest):
"""Tests of StudentModuleHistoryCleaner.main(), using SmhcForTestingMain."""
def test_only_one_record(self):
smhc = SmhcForTestingMain()
self.write_history([
(1, "2013-07-15 11:47:00.000", 1),
])
smhc.main()
self.assert_said(smhc,
'Last student_module_id is 1',
'No stored state',
'(not really cleaning 0)',
'(not really cleaning 1)',
'(not really committing)',
'Saved state: {"next_student_module_id": 2}',
)
def test_already_processed_some(self):
smhc = SmhcForTestingMain()
self.write_state_file('{"next_student_module_id": 25}')
self.write_history([
(1, "2013-07-15 15:04:00.000", 23),
(2, "2013-07-15 15:04:11.000", 23),
(3, "2013-07-15 15:04:01.000", 24),
(4, "2013-07-15 15:04:00.000", 25),
(5, "2013-07-15 15:04:00.000", 26),
])
smhc.main()
self.assert_said(smhc,
'Last student_module_id is 26',
'Loaded stored state: {"next_student_module_id": 25}',
'(not really cleaning 25)',
'(not really cleaning 26)',
'(not really committing)',
'Saved state: {"next_student_module_id": 27}'
)
def test_working_in_batches(self):
smhc = SmhcForTestingMain()
self.write_state_file('{"next_student_module_id": 25}')
self.write_history([
(3, "2013-07-15 15:04:01.000", 24),
(4, "2013-07-15 15:04:00.000", 25),
(5, "2013-07-15 15:04:00.000", 26),
(6, "2013-07-15 15:04:00.000", 27),
(7, "2013-07-15 15:04:00.000", 28),
(8, "2013-07-15 15:04:00.000", 29),
])
smhc.main(batch_size=3)
self.assert_said(smhc,
'Last student_module_id is 29',
'Loaded stored state: {"next_student_module_id": 25}',
'(not really cleaning 25)',
'(not really cleaning 26)',
'(not really cleaning 27)',
'(not really committing)',
'Saved state: {"next_student_module_id": 28}',
'(not really cleaning 28)',
'(not really cleaning 29)',
'(not really committing)',
'Saved state: {"next_student_module_id": 30}',
)
def test_something_failing_while_cleaning(self):
smhc = SmhcForTestingMain(exception_smids=[26])
self.write_state_file('{"next_student_module_id": 25}')
self.write_history([
(3, "2013-07-15 15:04:01.000", 24),
(4, "2013-07-15 15:04:00.000", 25),
(5, "2013-07-15 15:04:00.000", 26),
(6, "2013-07-15 15:04:00.000", 27),
(7, "2013-07-15 15:04:00.000", 28),
(8, "2013-07-15 15:04:00.000", 29),
])
smhc.main(batch_size=3)
self.assert_said(smhc,
'Last student_module_id is 29',
'Loaded stored state: {"next_student_module_id": 25}',
'(not really cleaning 25)',
'(not really cleaning 26)',
"Couldn't clean student_module_id 26:\nTraceback*Exception: Something went wrong!\n",
'(not really cleaning 27)',
'(not really committing)',
'Saved state: {"next_student_module_id": 28}',
'(not really cleaning 28)',
'(not really cleaning 29)',
'(not really committing)',
'Saved state: {"next_student_module_id": 30}',
)

View File

@@ -12,9 +12,14 @@ from .models import (
XModuleStudentPrefsField,
XModuleStudentInfoField
)
import logging
from django.db import DatabaseError
from xblock.runtime import KeyValueStore, InvalidScopeError
from xblock.core import Scope
from xblock.core import KeyValueMultiSaveError, Scope
log = logging.getLogger(__name__)
class InvalidWriteError(Exception):
@@ -242,9 +247,10 @@ class ModelDataCache(object):
course_id=self.course_id,
student=self.user,
module_state_key=key.block_scope_id.url(),
defaults={'state': json.dumps({}),
'module_type': key.block_scope_id.category,
},
defaults={
'state': json.dumps({}),
'module_type': key.block_scope_id.category,
},
)
elif key.scope == Scope.content:
field_object, _ = XModuleContentField.objects.get_or_create(
@@ -328,22 +334,57 @@ class LmsKeyValueStore(KeyValueStore):
return json.loads(field_object.value)
def set(self, key, value):
if key.field_name in self._descriptor_model_data:
raise InvalidWriteError("Not allowed to overwrite descriptor model data", key.field_name)
"""
Set a single value in the KeyValueStore
"""
self.set_many({key: value})
field_object = self._model_data_cache.find_or_create(key)
def set_many(self, kv_dict):
"""
Provide a bulk save mechanism.
if key.scope not in self._allowed_scopes:
raise InvalidScopeError(key.scope)
`kv_dict`: A dictionary of dirty fields that maps
xblock.DbModel._key : value
if key.scope == Scope.user_state:
state = json.loads(field_object.state)
state[key.field_name] = value
field_object.state = json.dumps(state)
else:
field_object.value = json.dumps(value)
"""
saved_fields = []
# field_objects maps a field_object to a list of associated fields
field_objects = dict()
for field in kv_dict:
# Check field for validity
if field.field_name in self._descriptor_model_data:
raise InvalidWriteError("Not allowed to overwrite descriptor model data", field.field_name)
field_object.save()
if field.scope not in self._allowed_scopes:
raise InvalidScopeError(field.scope)
# If the field is valid and isn't already in the dictionary, add it.
field_object = self._model_data_cache.find_or_create(field)
if field_object not in field_objects.keys():
field_objects[field_object] = []
# Update the list of associated fields
field_objects[field_object].append(field)
# Special case when scope is for the user state, because this scope saves fields in a single row
if field.scope == Scope.user_state:
state = json.loads(field_object.state)
state[field.field_name] = kv_dict[field]
field_object.state = json.dumps(state)
else:
# The remaining scopes save fields on different rows, so
# we don't have to worry about conflicts
field_object.value = json.dumps(kv_dict[field])
for field_object in field_objects:
try:
# Save the field object that we made above
field_object.save()
# If save is successful on this scope, add the saved fields to
# the list of successful saves
saved_fields.extend([field.field_name for field in field_objects[field_object]])
except DatabaseError:
log.error('Error saving fields %r', field_objects[field_object])
raise KeyValueMultiSaveError(saved_fields)
def delete(self, key):
if key.field_name in self._descriptor_model_data:

View File

@@ -27,7 +27,7 @@ from xmodule.modulestore import Location
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.x_module import ModuleSystem
from xmodule_modifiers import replace_course_urls, replace_static_urls, add_histogram, wrap_xmodule
from xmodule_modifiers import replace_course_urls, replace_static_urls, add_histogram, wrap_xmodule, save_module # pylint: disable=F0401
import static_replace
from psychometrics.psychoanalyze import make_psychometrics_data_update_handler
@@ -36,6 +36,8 @@ from student.models import unique_id_for_user
from courseware.access import has_access
from courseware.masquerade import setup_masquerade
from courseware.model_data import LmsKeyValueStore, LmsUsage, ModelDataCache
from xblock.runtime import KeyValueStore
from xblock.core import Scope
from courseware.models import StudentModule
from util.sandboxing import can_execute_unsafe_code
from util.json_request import JsonResponse
@@ -226,7 +228,7 @@ def get_module_for_descriptor_internal(user, descriptor, model_data_cache, cours
userid=str(user.id),
mod_id=descriptor.location.url(),
dispatch=dispatch),
)
)
return xqueue_callback_url_prefix + relative_xqueue_callback_url
# Default queuename is course-specific and is derived from the course that
@@ -234,11 +236,12 @@ def get_module_for_descriptor_internal(user, descriptor, model_data_cache, cours
# TODO: Queuename should be derived from 'course_settings.json' of each course
xqueue_default_queuename = descriptor.location.org + '-' + descriptor.location.course
xqueue = {'interface': xqueue_interface,
'construct_callback': make_xqueue_callback,
'default_queuename': xqueue_default_queuename.replace(' ', '_'),
'waittime': settings.XQUEUE_WAITTIME_BETWEEN_REQUESTS
}
xqueue = {
'interface': xqueue_interface,
'construct_callback': make_xqueue_callback,
'default_queuename': xqueue_default_queuename.replace(' ', '_'),
'waittime': settings.XQUEUE_WAITTIME_BETWEEN_REQUESTS
}
# This is a hacky way to pass settings to the combined open ended xmodule
# It needs an S3 interface to upload images to S3
@@ -286,18 +289,24 @@ def get_module_for_descriptor_internal(user, descriptor, model_data_cache, cours
)
def publish(event):
"""A function that allows XModules to publish events. This only supports grade changes right now."""
if event.get('event_name') != 'grade':
return
student_module, created = StudentModule.objects.get_or_create(
course_id=course_id,
student=user,
module_type=descriptor.location.category,
module_state_key=descriptor.location.url(),
defaults={'state': '{}'},
usage = LmsUsage(descriptor.location, descriptor.location)
# Construct the key for the module
key = KeyValueStore.Key(
scope=Scope.user_state,
student_id=user.id,
block_scope_id=usage.id,
field_name='grade'
)
student_module = model_data_cache.find_or_create(key)
# Update the grades
student_module.grade = event.get('value')
student_module.max_grade = event.get('max_value')
# Save all changes to the underlying KeyValueStore
student_module.save()
# Bin score into range and increment stats
@@ -388,9 +397,31 @@ def get_module_for_descriptor_internal(user, descriptor, model_data_cache, cours
if has_access(user, module, 'staff', course_id):
module.get_html = add_histogram(module.get_html, module, user)
# force the module to save after rendering
module.get_html = save_module(module.get_html, module)
return module
def find_target_student_module(request, user_id, course_id, mod_id):
"""
Retrieve target StudentModule
"""
user = User.objects.get(id=user_id)
model_data_cache = ModelDataCache.cache_for_descriptor_descendents(
course_id,
user,
modulestore().get_instance(course_id, mod_id),
depth=0,
select_for_update=True
)
instance = get_module(user, request, mod_id, model_data_cache, course_id, grade_bucket_type='xqueue')
if instance is None:
msg = "No module {0} for user {1}--access denied?".format(mod_id, user)
log.debug(msg)
raise Http404
return instance
@csrf_exempt
def xqueue_callback(request, course_id, userid, mod_id, dispatch):
'''
@@ -409,20 +440,7 @@ def xqueue_callback(request, course_id, userid, mod_id, dispatch):
if not isinstance(header, dict) or 'lms_key' not in header:
raise Http404
# Retrieve target StudentModule
user = User.objects.get(id=userid)
model_data_cache = ModelDataCache.cache_for_descriptor_descendents(
course_id,
user,
modulestore().get_instance(course_id, mod_id),
depth=0,
select_for_update=True
)
instance = get_module(user, request, mod_id, model_data_cache, course_id, grade_bucket_type='xqueue')
if instance is None:
msg = "No module {0} for user {1}--access denied?".format(mod_id, user)
log.debug(msg)
raise Http404
instance = find_target_student_module(request, userid, course_id, mod_id)
# Transfer 'queuekey' from xqueue response header to the data.
# This is required to use the interface defined by 'handle_ajax'
@@ -433,6 +451,8 @@ def xqueue_callback(request, course_id, userid, mod_id, dispatch):
try:
# Can ignore the return value--not used for xqueue_callback
instance.handle_ajax(dispatch, data)
# Save any state that has changed to the underlying KeyValueStore
instance.save()
except:
log.exception("error processing ajax call")
raise
@@ -504,6 +524,8 @@ def modx_dispatch(request, dispatch, location, course_id):
# Let the module handle the AJAX
try:
ajax_return = instance.handle_ajax(dispatch, data)
# Save any fields that have changed to the underlying KeyValueStore
instance.save()
# If we can't find the module, respond with a 404
except NotFoundError:

View File

@@ -1,5 +1,8 @@
"""
Test for lms courseware app, module data (runtime data storage for XBlocks)
"""
import json
from mock import Mock
from mock import Mock, patch
from functools import partial
from courseware.model_data import LmsKeyValueStore, InvalidWriteError
@@ -15,6 +18,8 @@ from courseware.tests.factories import StudentPrefsFactory, StudentInfoFactory
from xblock.core import Scope, BlockScope
from xmodule.modulestore import Location
from django.test import TestCase
from django.db import DatabaseError
from xblock.core import KeyValueMultiSaveError
def mock_field(scope, name):
@@ -66,12 +71,17 @@ class TestDescriptorFallback(TestCase):
self.assertRaises(InvalidWriteError, self.kvs.set, settings_key('field_b'), 'foo')
self.assertEquals('settings', self.desc_md['field_b'])
self.assertRaises(InvalidWriteError, self.kvs.set_many, {content_key('field_a'): 'foo'})
self.assertEquals('content', self.desc_md['field_a'])
self.assertRaises(InvalidWriteError, self.kvs.delete, content_key('field_a'))
self.assertEquals('content', self.desc_md['field_a'])
self.assertRaises(InvalidWriteError, self.kvs.delete, settings_key('field_b'))
self.assertEquals('settings', self.desc_md['field_b'])
class TestInvalidScopes(TestCase):
def setUp(self):
self.desc_md = {}
@@ -83,17 +93,20 @@ class TestInvalidScopes(TestCase):
for scope in (Scope(user=True, block=BlockScope.DEFINITION),
Scope(user=False, block=BlockScope.TYPE),
Scope(user=False, block=BlockScope.ALL)):
self.assertRaises(InvalidScopeError, self.kvs.get, LmsKeyValueStore.Key(scope, None, None, 'field'))
self.assertRaises(InvalidScopeError, self.kvs.set, LmsKeyValueStore.Key(scope, None, None, 'field'), 'value')
self.assertRaises(InvalidScopeError, self.kvs.delete, LmsKeyValueStore.Key(scope, None, None, 'field'))
self.assertRaises(InvalidScopeError, self.kvs.has, LmsKeyValueStore.Key(scope, None, None, 'field'))
key = LmsKeyValueStore.Key(scope, None, None, 'field')
self.assertRaises(InvalidScopeError, self.kvs.get, key)
self.assertRaises(InvalidScopeError, self.kvs.set, key, 'value')
self.assertRaises(InvalidScopeError, self.kvs.delete, key)
self.assertRaises(InvalidScopeError, self.kvs.has, key)
self.assertRaises(InvalidScopeError, self.kvs.set_many, {key: 'value'})
class TestStudentModuleStorage(TestCase):
def setUp(self):
self.desc_md = {}
student_module = StudentModuleFactory(state=json.dumps({'a_field': 'a_value'}))
student_module = StudentModuleFactory(state=json.dumps({'a_field': 'a_value', 'b_field': 'b_value'}))
self.user = student_module.student
self.mdc = ModelDataCache([mock_descriptor([mock_field(Scope.user_state, 'a_field')])], course_id, self.user)
self.kvs = LmsKeyValueStore(self.desc_md, self.mdc)
@@ -110,13 +123,13 @@ class TestStudentModuleStorage(TestCase):
"Test that setting an existing user_state field changes the value"
self.kvs.set(user_state_key('a_field'), 'new_value')
self.assertEquals(1, StudentModule.objects.all().count())
self.assertEquals({'a_field': 'new_value'}, json.loads(StudentModule.objects.all()[0].state))
self.assertEquals({'b_field': 'b_value', 'a_field': 'new_value'}, json.loads(StudentModule.objects.all()[0].state))
def test_set_missing_field(self):
"Test that setting a new user_state field changes the value"
self.kvs.set(user_state_key('not_a_field'), 'new_value')
self.assertEquals(1, StudentModule.objects.all().count())
self.assertEquals({'a_field': 'a_value', 'not_a_field': 'new_value'}, json.loads(StudentModule.objects.all()[0].state))
self.assertEquals({'b_field': 'b_value', 'a_field': 'a_value', 'not_a_field': 'new_value'}, json.loads(StudentModule.objects.all()[0].state))
def test_delete_existing_field(self):
"Test that deleting an existing field removes it from the StudentModule"
@@ -128,7 +141,7 @@ class TestStudentModuleStorage(TestCase):
"Test that deleting a missing field from an existing StudentModule raises a KeyError"
self.assertRaises(KeyError, self.kvs.delete, user_state_key('not_a_field'))
self.assertEquals(1, StudentModule.objects.all().count())
self.assertEquals({'a_field': 'a_value'}, json.loads(StudentModule.objects.all()[0].state))
self.assertEquals({'b_field': 'b_value', 'a_field': 'a_value'}, json.loads(StudentModule.objects.all()[0].state))
def test_has_existing_field(self):
"Test that `has` returns True for existing fields in StudentModules"
@@ -138,6 +151,35 @@ class TestStudentModuleStorage(TestCase):
"Test that `has` returns False for missing fields in StudentModule"
self.assertFalse(self.kvs.has(user_state_key('not_a_field')))
def construct_kv_dict(self):
"""Construct a kv_dict that can be passed to set_many"""
key1 = user_state_key('field_a')
key2 = user_state_key('field_b')
new_value = 'new value'
newer_value = 'newer value'
return {key1: new_value, key2: newer_value}
def test_set_many(self):
"Test setting many fields that are scoped to Scope.user_state"
kv_dict = self.construct_kv_dict()
self.kvs.set_many(kv_dict)
for key in kv_dict:
self.assertEquals(self.kvs.get(key), kv_dict[key])
def test_set_many_failure(self):
"Test failures when setting many fields that are scoped to Scope.user_state"
kv_dict = self.construct_kv_dict()
# because we're patching the underlying save, we need to ensure the
# fields are in the cache
for key in kv_dict:
self.kvs.set(key, 'test_value')
with patch('django.db.models.Model.save', side_effect=DatabaseError):
with self.assertRaises(KeyValueMultiSaveError) as exception_context:
self.kvs.set_many(kv_dict)
self.assertEquals(len(exception_context.exception.saved_field_names), 0)
class TestMissingStudentModule(TestCase):
def setUp(self):
@@ -176,6 +218,14 @@ class TestMissingStudentModule(TestCase):
class StorageTestBase(object):
"""
A base class for that gets subclassed when testing each of the scopes.
"""
# Disable pylint warnings that arise because of the way the child classes call
# this base class -- pylint's static analysis can't keep up with it.
# pylint: disable=E1101, E1102
factory = None
scope = None
key_factory = None
@@ -188,7 +238,10 @@ class StorageTestBase(object):
else:
self.user = UserFactory.create()
self.desc_md = {}
self.mdc = ModelDataCache([mock_descriptor([mock_field(self.scope, 'existing_field')])], course_id, self.user)
self.mock_descriptor = mock_descriptor([
mock_field(self.scope, 'existing_field'),
mock_field(self.scope, 'other_existing_field')])
self.mdc = ModelDataCache([self.mock_descriptor], course_id, self.user)
self.kvs = LmsKeyValueStore(self.desc_md, self.mdc)
def test_set_and_get_existing_field(self):
@@ -234,6 +287,38 @@ class StorageTestBase(object):
"Test that `has` return False for an existing Storage Field"
self.assertFalse(self.kvs.has(self.key_factory('missing_field')))
def construct_kv_dict(self):
"""Construct a kv_dict that can be passed to set_many"""
key1 = self.key_factory('existing_field')
key2 = self.key_factory('other_existing_field')
new_value = 'new value'
newer_value = 'newer value'
return {key1: new_value, key2: newer_value}
def test_set_many(self):
"""Test that setting many regular fields at the same time works"""
kv_dict = self.construct_kv_dict()
self.kvs.set_many(kv_dict)
for key in kv_dict:
self.assertEquals(self.kvs.get(key), kv_dict[key])
def test_set_many_failure(self):
"""Test that setting many regular fields with a DB error """
kv_dict = self.construct_kv_dict()
for key in kv_dict:
self.kvs.set(key, 'test value')
with patch('django.db.models.Model.save', side_effect=[None, DatabaseError]):
with self.assertRaises(KeyValueMultiSaveError) as exception_context:
self.kvs.set_many(kv_dict)
exception = exception_context.exception
self.assertEquals(len(exception.saved_field_names), 1)
self.assertEquals(exception.saved_field_names[0], 'existing_field')
class TestSettingsStorage(StorageTestBase, TestCase):
factory = SettingsFactory

View File

@@ -1,4 +1,7 @@
from mock import MagicMock
"""
Test for lms courseware app, module render unit
"""
from mock import MagicMock, patch
import json
from django.http import Http404, HttpResponse
@@ -28,6 +31,20 @@ class ModuleRenderTestCase(LoginEnrollmentTestCase):
self.location = ['i4x', 'edX', 'toy', 'chapter', 'Overview']
self.course_id = 'edX/toy/2012_Fall'
self.toy_course = modulestore().get_course(self.course_id)
self.mock_user = UserFactory()
self.mock_user.id = 1
self.request_factory = RequestFactory()
# Construct a mock module for the modulestore to return
self.mock_module = MagicMock()
self.mock_module.id = 1
self.dispatch = 'score_update'
# Construct a 'standard' xqueue_callback url
self.callback_url = reverse('xqueue_callback', kwargs=dict(course_id=self.course_id,
userid=str(self.mock_user.id),
mod_id=self.mock_module.id,
dispatch=self.dispatch))
def test_get_module(self):
self.assertIsNone(render.get_module('dummyuser', None,
@@ -56,7 +73,7 @@ class ModuleRenderTestCase(LoginEnrollmentTestCase):
mock_request_3 = MagicMock()
mock_request_3.POST.copy.return_value = {'position': 1}
mock_request_3.FILES = False
mock_request_3.user = UserFactory()
mock_request_3.user = self.mock_user
inputfile_2 = Stub()
inputfile_2.size = 1
inputfile_2.name = 'name'
@@ -87,6 +104,46 @@ class ModuleRenderTestCase(LoginEnrollmentTestCase):
self.course_id
)
def test_xqueue_callback_success(self):
"""
Test for happy-path xqueue_callback
"""
fake_key = 'fake key'
xqueue_header = json.dumps({'lms_key': fake_key})
data = {
'xqueue_header': xqueue_header,
'xqueue_body': 'hello world',
}
# Patch getmodule to return our mock module
with patch('courseware.module_render.find_target_student_module') as get_fake_module:
get_fake_module.return_value = self.mock_module
# call xqueue_callback with our mocked information
request = self.request_factory.post(self.callback_url, data)
render.xqueue_callback(request, self.course_id, self.mock_user.id, self.mock_module.id, self.dispatch)
# Verify that handle ajax is called with the correct data
request.POST['queuekey'] = fake_key
self.mock_module.handle_ajax.assert_called_once_with(self.dispatch, request.POST)
def test_xqueue_callback_missing_header_info(self):
data = {
'xqueue_header': '{}',
'xqueue_body': 'hello world',
}
with patch('courseware.module_render.find_target_student_module') as get_fake_module:
get_fake_module.return_value = self.mock_module
# Test with missing xqueue data
with self.assertRaises(Http404):
request = self.request_factory.post(self.callback_url, {})
render.xqueue_callback(request, self.course_id, self.mock_user.id, self.mock_module.id, self.dispatch)
# Test with missing xqueue_header
with self.assertRaises(Http404):
request = self.request_factory.post(self.callback_url, data)
render.xqueue_callback(request, self.course_id, self.mock_user.id, self.mock_module.id, self.dispatch)
def test_get_score_bucket(self):
self.assertEquals(render.get_score_bucket(0, 10), 'incorrect')
self.assertEquals(render.get_score_bucket(1, 10), 'partial')

View File

@@ -167,6 +167,8 @@ def save_child_position(seq_module, child_name):
# Only save if position changed
if position != seq_module.position:
seq_module.position = position
# Save this new position to the underlying KeyValueStore
seq_module.save()
def check_for_active_timelimit_module(request, course_id, course):

View File

@@ -15,6 +15,7 @@ from xmodule.course_module import CourseDescriptor
from student.models import unique_id_for_user
from xmodule.x_module import ModuleSystem
from mitxmako.shortcuts import render_to_string
from utils import does_location_exist
log = logging.getLogger(__name__)
@@ -240,7 +241,6 @@ def get_next(request, course_id):
return HttpResponse(_get_next(course_id, grader_id, location),
mimetype="application/json")
def get_problem_list(request, course_id):
"""
Get all the problems for the given course id
@@ -266,6 +266,20 @@ def get_problem_list(request, course_id):
_check_access(request.user, course_id)
try:
response = staff_grading_service().get_problem_list(course_id, unique_id_for_user(request.user))
response = json.loads(response)
problem_list = response['problem_list']
valid_problem_list = []
for i in xrange(0,len(problem_list)):
#Needed to ensure that the 'location' key can be accessed
try:
problem_list[i] = json.loads(problem_list[i])
except Exception:
pass
if does_location_exist(course_id, problem_list[i]['location']):
valid_problem_list.append(problem_list[i])
response['problem_list'] = valid_problem_list
response = json.dumps(response)
return HttpResponse(response,
mimetype="application/json")
except GradingServiceError:

View File

@@ -0,0 +1,16 @@
from xmodule.modulestore import search
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError
def does_location_exist(course_id, location):
"""
Checks to see if a valid module exists at a given location (ie has not been deleted)
course_id - string course id
location - string location
"""
try:
search.path_to_location(modulestore(), course_id, location)
return True
except ItemNotFoundError:
#If the problem cannot be found at the location received from the grading controller server, it has been deleted by the course author.
return False

View File

@@ -178,6 +178,7 @@ def student_problem_list(request, course_id):
error_text = ""
problem_list = []
base_course_url = reverse('courses')
list_to_remove = []
try:
#Get list of all open ended problems that the grading server knows about
@@ -191,7 +192,6 @@ def student_problem_list(request, course_id):
problem_list = problem_list_dict['problem_list']
#A list of problems to remove (problems that can't be found in the course)
list_to_remove = []
for i in xrange(0, len(problem_list)):
try:
#Try to load each problem in the courseware to get links to them

View File

@@ -308,6 +308,8 @@ CODE_JAIL = {
'limits': {
# How many CPU seconds can jailed code use?
'CPU': 1,
# How large a file can jailed code write?
'FSIZE': 50000,
},
}

View File

@@ -10,10 +10,6 @@
<div class="grader-status">
% if state == 'initial':
<span class="unanswered" style="display:inline-block;" id="status_${id}">Unanswered</span>
% elif state in ['done', 'post_assessment'] and correct == 'correct':
<span class="correct" id="status_${id}"></span> <p>Correct</p>
% elif state in ['done', 'post_assessment'] and correct == 'incorrect':
<span class="incorrect" id="status_${id}"></span> <p>Incorrect. </p>
% elif state == 'assessing':
<span class="grading" id="status_${id}">Submitted for grading.
% if eta_message is not None:

View File

@@ -8,6 +8,6 @@
-e git://github.com/eventbrite/zendesk.git@d53fe0e81b623f084e91776bcf6369f8b7b63879#egg=zendesk
# Our libraries:
-e git+https://github.com/edx/XBlock.git@4d8735e883#egg=XBlock
-e git+https://github.com/edx/codejail.git@0a1b468#egg=codejail
-e git+https://github.com/edx/XBlock.git@3974e999fe853a37dfa6fadf0611289434349409#egg=XBlock
-e git+https://github.com/edx/codejail.git@c08967fb44d1bcdb259d3ec58812e3ac592539c2#egg=codejail
-e git+https://github.com/edx/diff-cover.git@v0.1.3#egg=diff_cover

View File

@@ -2,6 +2,7 @@
-e common/lib/calc
-e common/lib/capa
-e common/lib/chem
-e common/lib/sandbox-packages
-e common/lib/symmath
-e common/lib/xmodule
-e .