diff --git a/.gitignore b/.gitignore
index 2fd1ca0181..493df5a7fd 100644
--- a/.gitignore
+++ b/.gitignore
@@ -27,4 +27,5 @@ lms/lib/comment_client/python
nosetests.xml
cover_html/
.idea/
+.redcar/
chromedriver.log
\ No newline at end of file
diff --git a/.redcar/lucene/segments.gen b/.redcar/lucene/segments.gen
new file mode 100644
index 0000000000..568652b956
Binary files /dev/null and b/.redcar/lucene/segments.gen differ
diff --git a/.redcar/lucene_last_updated b/.redcar/lucene_last_updated
new file mode 100644
index 0000000000..3692c8e076
--- /dev/null
+++ b/.redcar/lucene_last_updated
@@ -0,0 +1 @@
+1360614836
diff --git a/.redcar/redcar.lock b/.redcar/redcar.lock
new file mode 100644
index 0000000000..0677ede437
--- /dev/null
+++ b/.redcar/redcar.lock
@@ -0,0 +1 @@
+10664: Locked by 10664 at Mon Feb 11 14:22:22 -0500 2013
diff --git a/.redcar/storage/cursor_saver.yaml b/.redcar/storage/cursor_saver.yaml
new file mode 100644
index 0000000000..ba9b833044
--- /dev/null
+++ b/.redcar/storage/cursor_saver.yaml
@@ -0,0 +1,4 @@
+---
+cursor_positions: []
+
+files_to_retain: 0
diff --git a/.redcar/tags.REMOVED.git-id b/.redcar/tags.REMOVED.git-id
new file mode 100644
index 0000000000..f8318e5f1f
--- /dev/null
+++ b/.redcar/tags.REMOVED.git-id
@@ -0,0 +1 @@
+ce76efcea5f0a5b2238364f81d54f1d393853a1a
\ No newline at end of file
diff --git a/cms/djangoapps/contentstore/tests/test_contentstore.py b/cms/djangoapps/contentstore/tests/test_contentstore.py
index b79d86b52f..66e6551019 100644
--- a/cms/djangoapps/contentstore/tests/test_contentstore.py
+++ b/cms/djangoapps/contentstore/tests/test_contentstore.py
@@ -5,7 +5,7 @@ from django.test.utils import override_settings
from django.conf import settings
from django.core.urlresolvers import reverse
from path import path
-from tempfile import mkdtemp
+from tempdir import mkdtemp_clean
import json
from fs.osfs import OSFS
import copy
@@ -194,7 +194,7 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase):
import_from_xml(ms, 'common/test/data/', ['full'])
location = CourseDescriptor.id_to_location('edX/full/6.002_Spring_2012')
- root_dir = path(mkdtemp())
+ root_dir = path(mkdtemp_clean())
print 'Exporting to tempdir = {0}'.format(root_dir)
@@ -264,6 +264,7 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase):
self.assertContains(resp, '/c4x/edX/full/asset/handouts_schematic_tutorial.pdf')
+
class ContentStoreTest(ModuleStoreTestCase):
"""
Tests for the CMS ContentStore application.
@@ -421,6 +422,64 @@ class ContentStoreTest(ModuleStoreTestCase):
self.assertIn('markdown', problem.metadata, "markdown is missing from metadata")
self.assertNotIn('markdown', problem.editable_metadata_fields, "Markdown slipped into the editable metadata fields")
+ def test_import_metadata_with_attempts_empty_string(self):
+ import_from_xml(modulestore(), 'common/test/data/', ['simple'])
+ ms = modulestore('direct')
+ did_load_item = False
+ try:
+ ms.get_item(Location(['i4x', 'edX', 'simple', 'problem', 'ps01-simple', None]))
+ did_load_item = True
+ except ItemNotFoundError:
+ pass
+
+ # make sure we found the item (e.g. it didn't error while loading)
+ self.assertTrue(did_load_item)
+
+ def test_metadata_inheritance(self):
+ import_from_xml(modulestore(), 'common/test/data/', ['full'])
+
+ ms = modulestore('direct')
+ course = ms.get_item(Location(['i4x', 'edX', 'full', 'course', '6.002_Spring_2012', None]))
+
+ verticals = ms.get_items(['i4x', 'edX', 'full', 'vertical', None, None])
+
+ # let's assert on the metadata_inheritance on an existing vertical
+ for vertical in verticals:
+ self.assertIn('xqa_key', vertical.metadata)
+ self.assertEqual(course.metadata['xqa_key'], vertical.metadata['xqa_key'])
+
+ self.assertGreater(len(verticals), 0)
+
+ new_component_location = Location('i4x', 'edX', 'full', 'html', 'new_component')
+ source_template_location = Location('i4x', 'edx', 'templates', 'html', 'Blank_HTML_Page')
+
+ # crate a new module and add it as a child to a vertical
+ ms.clone_item(source_template_location, new_component_location)
+ parent = verticals[0]
+ ms.update_children(parent.location, parent.definition.get('children', []) + [new_component_location.url()])
+
+ # flush the cache
+ ms.get_cached_metadata_inheritance_tree(new_component_location, -1)
+ new_module = ms.get_item(new_component_location)
+
+ # check for grace period definition which should be defined at the course level
+ self.assertIn('graceperiod', new_module.metadata)
+
+ self.assertEqual(course.metadata['graceperiod'], new_module.metadata['graceperiod'])
+
+ #
+ # now let's define an override at the leaf node level
+ #
+ new_module.metadata['graceperiod'] = '1 day'
+ ms.update_metadata(new_module.location, new_module.metadata)
+
+ # flush the cache and refetch
+ ms.get_cached_metadata_inheritance_tree(new_component_location, -1)
+ new_module = ms.get_item(new_component_location)
+
+ self.assertIn('graceperiod', new_module.metadata)
+ self.assertEqual('1 day', new_module.metadata['graceperiod'])
+
class TemplateTestCase(ModuleStoreTestCase):
diff --git a/cms/djangoapps/contentstore/tests/tests.py b/cms/djangoapps/contentstore/tests/tests.py
index 166982e35f..c4a46459e2 100644
--- a/cms/djangoapps/contentstore/tests/tests.py
+++ b/cms/djangoapps/contentstore/tests/tests.py
@@ -4,7 +4,6 @@ from django.test.client import Client
from django.conf import settings
from django.core.urlresolvers import reverse
from path import path
-from tempfile import mkdtemp
import json
from fs.osfs import OSFS
import copy
diff --git a/cms/djangoapps/contentstore/tests/utils.py b/cms/djangoapps/contentstore/tests/utils.py
index be028b2836..b6b8cd5023 100644
--- a/cms/djangoapps/contentstore/tests/utils.py
+++ b/cms/djangoapps/contentstore/tests/utils.py
@@ -1,6 +1,6 @@
import json
import copy
-from time import time
+from uuid import uuid4
from django.test import TestCase
from django.conf import settings
@@ -20,13 +20,12 @@ class ModuleStoreTestCase(TestCase):
def _pre_setup(self):
super(ModuleStoreTestCase, self)._pre_setup()
- # Use the current seconds since epoch to differentiate
+ # Use a uuid to differentiate
# the mongo collections on jenkins.
- sec_since_epoch = '%s' % int(time() * 100)
self.orig_MODULESTORE = copy.deepcopy(settings.MODULESTORE)
self.test_MODULESTORE = self.orig_MODULESTORE
- self.test_MODULESTORE['default']['OPTIONS']['collection'] = 'modulestore_%s' % sec_since_epoch
- self.test_MODULESTORE['direct']['OPTIONS']['collection'] = 'modulestore_%s' % sec_since_epoch
+ self.test_MODULESTORE['default']['OPTIONS']['collection'] = 'modulestore_%s' % uuid4().hex
+ self.test_MODULESTORE['direct']['OPTIONS']['collection'] = 'modulestore_%s' % uuid4().hex
settings.MODULESTORE = self.test_MODULESTORE
# Flush and initialize the module store
diff --git a/cms/envs/common.py b/cms/envs/common.py
index 281dd97f20..50f237c374 100644
--- a/cms/envs/common.py
+++ b/cms/envs/common.py
@@ -20,7 +20,6 @@ Longer TODO:
"""
import sys
-import tempfile
import os.path
import os
import lms.envs.common
@@ -59,7 +58,8 @@ sys.path.append(COMMON_ROOT / 'lib')
############################# WEB CONFIGURATION #############################
# This is where we stick our compiled template files.
-MAKO_MODULE_DIR = tempfile.mkdtemp('mako')
+from tempdir import mkdtemp_clean
+MAKO_MODULE_DIR = mkdtemp_clean('mako')
MAKO_TEMPLATES = {}
MAKO_TEMPLATES['main'] = [
PROJECT_ROOT / 'templates',
diff --git a/cms/static/js/models/settings/course_details.js b/cms/static/js/models/settings/course_details.js
index 168cb960be..97d71f6c79 100644
--- a/cms/static/js/models/settings/course_details.js
+++ b/cms/static/js/models/settings/course_details.js
@@ -68,10 +68,10 @@ CMS.Models.Settings.CourseDetails = Backbone.Model.extend({
save_videosource: function(newsource) {
// newsource either is or just the "speed:key, *" string
// returns the videosource for the preview which iss the key whose speed is closest to 1
- if (_.isEmpty(newsource) && !_.isEmpty(this.get('intro_video'))) this.set({'intro_video': null});
+ if (_.isEmpty(newsource) && !_.isEmpty(this.get('intro_video'))) this.save({'intro_video': null});
// TODO remove all whitespace w/in string
else {
- if (this.get('intro_video') !== newsource) this.set('intro_video', newsource);
+ if (this.get('intro_video') !== newsource) this.save('intro_video', newsource);
}
return this.videosourceSample();
diff --git a/cms/static/sass/_courseware.scss b/cms/static/sass/_courseware.scss
index f2bd25c601..45ea111b6f 100644
--- a/cms/static/sass/_courseware.scss
+++ b/cms/static/sass/_courseware.scss
@@ -498,6 +498,7 @@ input.courseware-unit-search-input {
}
&.new-section {
+
header {
height: auto;
@include clearfix();
@@ -506,6 +507,15 @@ input.courseware-unit-search-input {
.expand-collapse-icon {
visibility: hidden;
}
+
+ .item-details {
+ padding: 25px 0 0 0;
+
+ .section-name {
+ float: none;
+ width: 100%;
+ }
+ }
}
}
diff --git a/common/djangoapps/course_groups/cohorts.py b/common/djangoapps/course_groups/cohorts.py
index 155f82e0c7..f0234ec71a 100644
--- a/common/djangoapps/course_groups/cohorts.py
+++ b/common/djangoapps/course_groups/cohorts.py
@@ -6,6 +6,7 @@ forums, and to the cohort admin views.
from django.contrib.auth.models import User
from django.http import Http404
import logging
+import random
from courseware import courses
from student.models import get_user_by_username_or_email
@@ -64,7 +65,23 @@ def is_commentable_cohorted(course_id, commentable_id):
ans))
return ans
+
+def get_cohorted_commentables(course_id):
+ """
+ Given a course_id return a list of strings representing cohorted commentables
+ """
+ course = courses.get_course_by_id(course_id)
+
+ if not course.is_cohorted:
+ # this is the easy case :)
+ ans = []
+ else:
+ ans = course.cohorted_discussions
+
+ return ans
+
+
def get_cohort(user, course_id):
"""
Given a django User and a course_id, return the user's cohort in that
@@ -96,9 +113,30 @@ def get_cohort(user, course_id):
group_type=CourseUserGroup.COHORT,
users__id=user.id)
except CourseUserGroup.DoesNotExist:
- # TODO: add auto-cohorting logic here once we know what that will be.
+ # Didn't find the group. We'll go on to create one if needed.
+ pass
+
+ if not course.auto_cohort:
return None
+ choices = course.auto_cohort_groups
+ if len(choices) == 0:
+ # Nowhere to put user
+ log.warning("Course %s is auto-cohorted, but there are no"
+ " auto_cohort_groups specified",
+ course_id)
+ return None
+
+ # Put user in a random group, creating it if needed
+ group_name = random.choice(choices)
+ group, created = CourseUserGroup.objects.get_or_create(
+ course_id=course_id,
+ group_type=CourseUserGroup.COHORT,
+ name=group_name)
+
+ user.course_groups.add(group)
+ return group
+
def get_course_cohorts(course_id):
"""
diff --git a/common/djangoapps/course_groups/tests/tests.py b/common/djangoapps/course_groups/tests/tests.py
index b3ad928b39..efed39d536 100644
--- a/common/djangoapps/course_groups/tests/tests.py
+++ b/common/djangoapps/course_groups/tests/tests.py
@@ -47,7 +47,10 @@ class TestCohorts(django.test.TestCase):
@staticmethod
def config_course_cohorts(course, discussions,
- cohorted, cohorted_discussions=None):
+ cohorted,
+ cohorted_discussions=None,
+ auto_cohort=None,
+ auto_cohort_groups=None):
"""
Given a course with no discussion set up, add the discussions and set
the cohort config appropriately.
@@ -59,6 +62,9 @@ class TestCohorts(django.test.TestCase):
cohorted: bool.
cohorted_discussions: optional list of topic names. If specified,
converts them to use the same ids as topic names.
+ auto_cohort: optional bool.
+ auto_cohort_groups: optional list of strings
+ (names of groups to put students into).
Returns:
Nothing -- modifies course in place.
@@ -76,6 +82,12 @@ class TestCohorts(django.test.TestCase):
if cohorted_discussions is not None:
d["cohorted_discussions"] = [to_id(name)
for name in cohorted_discussions]
+
+ if auto_cohort is not None:
+ d["auto_cohort"] = auto_cohort
+ if auto_cohort_groups is not None:
+ d["auto_cohort_groups"] = auto_cohort_groups
+
course.metadata["cohort_config"] = d
@@ -89,12 +101,9 @@ class TestCohorts(django.test.TestCase):
def test_get_cohort(self):
- # Need to fix this, but after we're testing on staging. (Looks like
- # problem is that when get_cohort internally tries to look up the
- # course.id, it fails, even though we loaded it through the modulestore.
-
- # Proper fix: give all tests a standard modulestore that uses the test
- # dir.
+ """
+ Make sure get_cohort() does the right thing when the course is cohorted
+ """
course = modulestore().get_course("edX/toy/2012_Fall")
self.assertEqual(course.id, "edX/toy/2012_Fall")
self.assertFalse(course.is_cohorted)
@@ -122,6 +131,54 @@ class TestCohorts(django.test.TestCase):
self.assertEquals(get_cohort(other_user, course.id), None,
"other_user shouldn't have a cohort")
+ def test_auto_cohorting(self):
+ """
+ Make sure get_cohort() does the right thing when the course is auto_cohorted
+ """
+ course = modulestore().get_course("edX/toy/2012_Fall")
+ self.assertEqual(course.id, "edX/toy/2012_Fall")
+ self.assertFalse(course.is_cohorted)
+
+ user1 = User.objects.create(username="test", email="a@b.com")
+ user2 = User.objects.create(username="test2", email="a2@b.com")
+ user3 = User.objects.create(username="test3", email="a3@b.com")
+
+ cohort = CourseUserGroup.objects.create(name="TestCohort",
+ course_id=course.id,
+ group_type=CourseUserGroup.COHORT)
+
+ # user1 manually added to a cohort
+ cohort.users.add(user1)
+
+ # Make the course auto cohorted...
+ self.config_course_cohorts(course, [], cohorted=True,
+ auto_cohort=True,
+ auto_cohort_groups=["AutoGroup"])
+
+ self.assertEquals(get_cohort(user1, course.id).id, cohort.id,
+ "user1 should stay put")
+
+ self.assertEquals(get_cohort(user2, course.id).name, "AutoGroup",
+ "user2 should be auto-cohorted")
+
+ # Now make the group list empty
+ self.config_course_cohorts(course, [], cohorted=True,
+ auto_cohort=True,
+ auto_cohort_groups=[])
+
+ self.assertEquals(get_cohort(user3, course.id), None,
+ "No groups->no auto-cohorting")
+
+ # Now make it different
+ self.config_course_cohorts(course, [], cohorted=True,
+ auto_cohort=True,
+ auto_cohort_groups=["OtherGroup"])
+
+ self.assertEquals(get_cohort(user3, course.id).name, "OtherGroup",
+ "New list->new group")
+ self.assertEquals(get_cohort(user2, course.id).name, "AutoGroup",
+ "user2 should still be in originally placed cohort")
+
def test_get_course_cohorts(self):
course1_id = 'a/b/c'
diff --git a/common/djangoapps/mitxmako/makoloader.py b/common/djangoapps/mitxmako/makoloader.py
index 29184299b6..d623e8bcff 100644
--- a/common/djangoapps/mitxmako/makoloader.py
+++ b/common/djangoapps/mitxmako/makoloader.py
@@ -9,6 +9,7 @@ from django.template.loaders.app_directories import Loader as AppDirectoriesLoad
from mitxmako.template import Template
import mitxmako.middleware
+import tempdir
log = logging.getLogger(__name__)
@@ -30,7 +31,7 @@ class MakoLoader(object):
if module_directory is None:
log.warning("For more caching of mako templates, set the MAKO_MODULE_DIR in settings!")
- module_directory = tempfile.mkdtemp()
+ module_directory = tempdir.mkdtemp_clean()
self.module_directory = module_directory
diff --git a/common/djangoapps/mitxmako/middleware.py b/common/djangoapps/mitxmako/middleware.py
index 64cb2e5415..3f66f8cc48 100644
--- a/common/djangoapps/mitxmako/middleware.py
+++ b/common/djangoapps/mitxmako/middleware.py
@@ -13,7 +13,7 @@
# limitations under the License.
from mako.lookup import TemplateLookup
-import tempfile
+import tempdir
from django.template import RequestContext
from django.conf import settings
@@ -29,7 +29,7 @@ class MakoMiddleware(object):
module_directory = getattr(settings, 'MAKO_MODULE_DIR', None)
if module_directory is None:
- module_directory = tempfile.mkdtemp()
+ module_directory = tempdir.mkdtemp_clean()
for location in template_locations:
lookup[location] = TemplateLookup(directories=template_locations[location],
diff --git a/common/djangoapps/student/management/commands/tests/test_pearson.py b/common/djangoapps/student/management/commands/tests/test_pearson.py
index 12969405de..65d628fba0 100644
--- a/common/djangoapps/student/management/commands/tests/test_pearson.py
+++ b/common/djangoapps/student/management/commands/tests/test_pearson.py
@@ -7,6 +7,7 @@ import logging
import os
from tempfile import mkdtemp
import cStringIO
+import shutil
import sys
from django.test import TestCase
@@ -143,23 +144,18 @@ class PearsonTestCase(TestCase):
'''
Base class for tests running Pearson-related commands
'''
- import_dir = mkdtemp(prefix="import")
- export_dir = mkdtemp(prefix="export")
def assertErrorContains(self, error_message, expected):
self.assertTrue(error_message.find(expected) >= 0, 'error message "{}" did not contain "{}"'.format(error_message, expected))
+ def setUp(self):
+ self.import_dir = mkdtemp(prefix="import")
+ self.addCleanup(shutil.rmtree, self.import_dir)
+ self.export_dir = mkdtemp(prefix="export")
+ self.addCleanup(shutil.rmtree, self.export_dir)
+
def tearDown(self):
- def delete_temp_dir(dirname):
- if os.path.exists(dirname):
- for filename in os.listdir(dirname):
- os.remove(os.path.join(dirname, filename))
- os.rmdir(dirname)
-
- # clean up after any test data was dumped to temp directory
- delete_temp_dir(self.import_dir)
- delete_temp_dir(self.export_dir)
-
+ pass
# and clean up the database:
# TestCenterUser.objects.all().delete()
# TestCenterRegistration.objects.all().delete()
diff --git a/common/lib/capa/capa/verifiers/draganddrop.py b/common/lib/capa/capa/verifiers/draganddrop.py
index eb91208923..239ff2b9a4 100644
--- a/common/lib/capa/capa/verifiers/draganddrop.py
+++ b/common/lib/capa/capa/verifiers/draganddrop.py
@@ -111,7 +111,7 @@ class DragAndDrop(object):
Returns: bool.
'''
for draggable in self.excess_draggables:
- if not self.excess_draggables[draggable]:
+ if self.excess_draggables[draggable]:
return False # user answer has more draggables than correct answer
# Number of draggables in user_groups may be differ that in
@@ -304,8 +304,13 @@ class DragAndDrop(object):
user_answer = json.loads(user_answer)
- # check if we have draggables that are not in correct answer:
- self.excess_draggables = {}
+ # This dictionary will hold a key for each draggable the user placed on
+ # the image. The value is True if that draggable is not mentioned in any
+ # correct_answer entries. If the draggable is mentioned in at least one
+ # correct_answer entry, the value is False.
+ # default to consider every user answer excess until proven otherwise.
+ self.excess_draggables = dict((users_draggable.keys()[0],True)
+ for users_draggable in user_answer['draggables'])
# create identical data structures from user answer and correct answer
for i in xrange(0, len(correct_answer)):
@@ -322,11 +327,8 @@ class DragAndDrop(object):
self.user_groups[groupname].append(draggable_name)
self.user_positions[groupname]['user'].append(
draggable_dict[draggable_name])
- self.excess_draggables[draggable_name] = True
- else:
- self.excess_draggables[draggable_name] = \
- self.excess_draggables.get(draggable_name, False)
-
+ # proved that this is not excess
+ self.excess_draggables[draggable_name] = False
def grade(user_input, correct_answer):
""" Creates DragAndDrop instance from user_input and correct_answer and
diff --git a/common/lib/capa/capa/verifiers/tests_draganddrop.py b/common/lib/capa/capa/verifiers/tests_draganddrop.py
index 9b1b15ce0c..bcd024fa89 100644
--- a/common/lib/capa/capa/verifiers/tests_draganddrop.py
+++ b/common/lib/capa/capa/verifiers/tests_draganddrop.py
@@ -46,6 +46,18 @@ class Test_DragAndDrop_Grade(unittest.TestCase):
correct_answer = {'1': 't1', 'name_with_icon': 't2'}
self.assertTrue(draganddrop.grade(user_input, correct_answer))
+ def test_expect_no_actions_wrong(self):
+ user_input = '{"draggables": [{"1": "t1"}, \
+ {"name_with_icon": "t2"}]}'
+ correct_answer = []
+ self.assertFalse(draganddrop.grade(user_input, correct_answer))
+
+ def test_expect_no_actions_right(self):
+ user_input = '{"draggables": []}'
+ correct_answer = []
+ self.assertTrue(draganddrop.grade(user_input, correct_answer))
+
+
def test_targets_false(self):
user_input = '{"draggables": [{"1": "t1"}, \
{"name_with_icon": "t2"}]}'
diff --git a/common/lib/tempdir.py b/common/lib/tempdir.py
new file mode 100644
index 0000000000..0acd92ba33
--- /dev/null
+++ b/common/lib/tempdir.py
@@ -0,0 +1,17 @@
+"""Make temporary directories nicely."""
+
+import atexit
+import os.path
+import shutil
+import tempfile
+
+def mkdtemp_clean(suffix="", prefix="tmp", dir=None):
+ """Just like mkdtemp, but the directory will be deleted when the process ends."""
+ the_dir = tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=dir)
+ atexit.register(cleanup_tempdir, the_dir)
+ return the_dir
+
+def cleanup_tempdir(the_dir):
+ """Called on process exit to remove a temp directory."""
+ if os.path.exists(the_dir):
+ shutil.rmtree(the_dir)
diff --git a/common/lib/xmodule/xmodule/capa_module.py b/common/lib/xmodule/xmodule/capa_module.py
index 4635cc6871..a115a54376 100644
--- a/common/lib/xmodule/xmodule/capa_module.py
+++ b/common/lib/xmodule/xmodule/capa_module.py
@@ -429,6 +429,11 @@ class CapaModule(XModule):
# used by conditional module
return self.attempts > 0
+ def is_correct(self):
+ """True if full points"""
+ d = self.get_score()
+ return d['score'] == d['total']
+
def answer_available(self):
'''
Is the user allowed to see an answer?
@@ -449,6 +454,9 @@ class CapaModule(XModule):
return self.lcp.done
elif self.show_answer == 'closed':
return self.closed()
+ elif self.show_answer == 'finished':
+ return self.closed() or self.is_correct()
+
elif self.show_answer == 'past_due':
return self.is_past_due()
elif self.show_answer == 'always':
diff --git a/common/lib/xmodule/xmodule/combined_open_ended_module.py b/common/lib/xmodule/xmodule/combined_open_ended_module.py
index f7b9b6a026..ee69d925d0 100644
--- a/common/lib/xmodule/xmodule/combined_open_ended_module.py
+++ b/common/lib/xmodule/xmodule/combined_open_ended_module.py
@@ -108,11 +108,13 @@ class CombinedOpenEndedModule(XModule):
instance_state = {}
self.version = self.metadata.get('version', DEFAULT_VERSION)
+ version_error_string = "Version of combined open ended module {0} is not correct. Going with version {1}"
if not isinstance(self.version, basestring):
try:
self.version = str(self.version)
except:
- log.error("Version {0} is not correct. Going with version {1}".format(self.version, DEFAULT_VERSION))
+ #This is a dev_facing_error
+ log.info(version_error_string.format(self.version, DEFAULT_VERSION))
self.version = DEFAULT_VERSION
versions = [i[0] for i in VERSION_TUPLES]
@@ -122,7 +124,8 @@ class CombinedOpenEndedModule(XModule):
try:
version_index = versions.index(self.version)
except:
- log.error("Version {0} is not correct. Going with version {1}".format(self.version, DEFAULT_VERSION))
+ #This is a dev_facing_error
+ log.error(version_error_string.format(self.version, DEFAULT_VERSION))
self.version = DEFAULT_VERSION
version_index = versions.index(self.version)
@@ -205,4 +208,4 @@ class CombinedOpenEndedDescriptor(XmlDescriptor, EditingDescriptor):
for child in ['task']:
add_child(child)
- return elt
\ No newline at end of file
+ return elt
diff --git a/common/lib/xmodule/xmodule/course_module.py b/common/lib/xmodule/xmodule/course_module.py
index 2c69c449ba..2ed780fcae 100644
--- a/common/lib/xmodule/xmodule/course_module.py
+++ b/common/lib/xmodule/xmodule/course_module.py
@@ -352,6 +352,13 @@ class CourseDescriptor(SequenceDescriptor):
"""
return self.metadata.get('tabs')
+ @property
+ def pdf_textbooks(self):
+ """
+ Return the pdf_textbooks config, as a python object, or None if not specified.
+ """
+ return self.metadata.get('pdf_textbooks')
+
@tabs.setter
def tabs(self, value):
self.metadata['tabs'] = value
@@ -371,6 +378,28 @@ class CourseDescriptor(SequenceDescriptor):
return bool(config.get("cohorted"))
+ @property
+ def auto_cohort(self):
+ """
+ Return whether the course is auto-cohorted.
+ """
+ if not self.is_cohorted:
+ return False
+
+ return bool(self.metadata.get("cohort_config", {}).get(
+ "auto_cohort", False))
+
+ @property
+ def auto_cohort_groups(self):
+ """
+ Return the list of groups to put students into. Returns [] if not
+ specified. Returns specified list even if is_cohorted and/or auto_cohort are
+ false.
+ """
+ return self.metadata.get("cohort_config", {}).get(
+ "auto_cohort_groups", [])
+
+
@property
def top_level_discussion_topic_ids(self):
"""
@@ -707,7 +736,7 @@ class CourseDescriptor(SequenceDescriptor):
def get_test_center_exam(self, exam_series_code):
exams = [exam for exam in self.test_center_exams if exam.exam_series_code == exam_series_code]
return exams[0] if len(exams) == 1 else None
-
+
@property
def title(self):
return self.display_name
diff --git a/common/lib/xmodule/xmodule/js/src/combinedopenended/display.coffee b/common/lib/xmodule/xmodule/js/src/combinedopenended/display.coffee
index fd0391450b..c749d65b45 100644
--- a/common/lib/xmodule/xmodule/js/src/combinedopenended/display.coffee
+++ b/common/lib/xmodule/xmodule/js/src/combinedopenended/display.coffee
@@ -1,6 +1,61 @@
class @Rubric
constructor: () ->
+ @initialize: (location) ->
+ $('.rubric').data("location", location)
+ $('input[class="score-selection"]').change @tracking_callback
+ # set up the hotkeys
+ $(window).unbind('keydown', @keypress_callback)
+ $(window).keydown @keypress_callback
+ # display the 'current' carat
+ @categories = $('.rubric-category')
+ @category = $(@categories.first())
+ @category.prepend('> ')
+ @category_index = 0
+
+
+ @keypress_callback: (event) =>
+ # don't try to do this when user is typing in a text input
+ if $(event.target).is('input, textarea')
+ return
+ # for when we select via top row
+ if event.which >= 48 and event.which <= 57
+ selected = event.which - 48
+ # for when we select via numpad
+ else if event.which >= 96 and event.which <= 105
+ selected = event.which - 96
+ # we don't want to do anything since we haven't pressed a number
+ else
+ return
+
+ # if we actually have a current category (not past the end)
+ if(@category_index <= @categories.length)
+ # find the valid selections for this category
+ inputs = $("input[name='score-selection-#{@category_index}']")
+ max_score = inputs.length - 1
+
+ if selected > max_score or selected < 0
+ return
+ inputs.filter("input[value=#{selected}]").click()
+
+ # move to the next category
+ old_category_text = @category.html().substring(5)
+ @category.html(old_category_text)
+ @category_index++
+ @category = $(@categories[@category_index])
+ @category.prepend('> ')
+
+ @tracking_callback: (event) ->
+ target_selection = $(event.target).val()
+ # chop off the beginning of the name so that we can get the number of the category
+ category = $(event.target).data("category")
+ location = $('.rubric').data('location')
+ # probably want the original problem location as well
+
+ data = {location: location, selection: target_selection, category: category}
+ Logger.log 'rubric_select', data
+
+
# finds the scores for each rubric category
@get_score_list: () =>
# find the number of categories:
@@ -34,6 +89,7 @@ class @CombinedOpenEnded
constructor: (element) ->
@element=element
@reinitialize(element)
+ $(window).keydown @keydown_handler
reinitialize: (element) ->
@wrapper=$(element).find('section.xmodule_CombinedOpenEndedModule')
@@ -45,6 +101,9 @@ class @CombinedOpenEnded
@task_count = @el.data('task-count')
@task_number = @el.data('task-number')
@accept_file_upload = @el.data('accept-file-upload')
+ @location = @el.data('location')
+ # set up handlers for click tracking
+ Rubric.initialize(@location)
@allow_reset = @el.data('allow_reset')
@reset_button = @$('.reset-button')
@@ -89,6 +148,8 @@ class @CombinedOpenEnded
@can_upload_files = false
@open_ended_child= @$('.open-ended-child')
+ @out_of_sync_message = 'The problem state got out of sync. Try reloading the page.'
+
if @task_number>1
@prompt_hide()
else if @task_number==1 and @child_state!='initial'
@@ -116,6 +177,9 @@ class @CombinedOpenEnded
@submit_evaluation_button = $('.submit-evaluation-button')
@submit_evaluation_button.click @message_post
Collapsible.setCollapsibles(@results_container)
+ # make sure we still have click tracking
+ $('.evaluation-response a').click @log_feedback_click
+ $('input[name="evaluation-score"]').change @log_feedback_selection
show_results: (event) =>
status_item = $(event.target).parent()
@@ -153,7 +217,6 @@ class @CombinedOpenEnded
@legend_container= $('.legend-container')
message_post: (event)=>
- Logger.log 'message_post', @answers
external_grader_message=$(event.target).parent().parent().parent()
evaluation_scoring = $(event.target).parent()
@@ -182,6 +245,7 @@ class @CombinedOpenEnded
$('section.evaluation').slideToggle()
@message_wrapper.html(response.message_html)
+
$.ajaxWithPrefix("#{@ajax_url}/save_post_assessment", settings)
@@ -283,6 +347,7 @@ class @CombinedOpenEnded
if response.success
@rubric_wrapper.html(response.rubric_html)
@rubric_wrapper.show()
+ Rubric.initialize(@location)
@answer_area.html(response.student_response)
@child_state = 'assessing'
@find_assessment_elements()
@@ -293,7 +358,12 @@ class @CombinedOpenEnded
$.ajaxWithPrefix("#{@ajax_url}/save_answer",settings)
else
- @errors_area.html('Problem state got out of sync. Try reloading the page.')
+ @errors_area.html(@out_of_sync_message)
+
+ keydown_handler: (e) =>
+ # only do anything when the key pressed is the 'enter' key
+ if e.which == 13 && @child_state == 'assessing' && Rubric.check_complete()
+ @save_assessment(e)
save_assessment: (event) =>
event.preventDefault()
@@ -315,7 +385,7 @@ class @CombinedOpenEnded
else
@errors_area.html(response.error)
else
- @errors_area.html('Problem state got out of sync. Try reloading the page.')
+ @errors_area.html(@out_of_sync_message)
save_hint: (event) =>
event.preventDefault()
@@ -330,7 +400,7 @@ class @CombinedOpenEnded
else
@errors_area.html(response.error)
else
- @errors_area.html('Problem state got out of sync. Try reloading the page.')
+ @errors_area.html(@out_of_sync_message)
skip_post_assessment: =>
if @child_state == 'post_assessment'
@@ -342,7 +412,7 @@ class @CombinedOpenEnded
else
@errors_area.html(response.error)
else
- @errors_area.html('Problem state got out of sync. Try reloading the page.')
+ @errors_area.html(@out_of_sync_message)
reset: (event) =>
event.preventDefault()
@@ -362,7 +432,7 @@ class @CombinedOpenEnded
else
@errors_area.html(response.error)
else
- @errors_area.html('Problem state got out of sync. Try reloading the page.')
+ @errors_area.html(@out_of_sync_message)
next_problem: =>
if @child_state == 'done'
@@ -385,7 +455,7 @@ class @CombinedOpenEnded
else
@errors_area.html(response.error)
else
- @errors_area.html('Problem state got out of sync. Try reloading the page.')
+ @errors_area.html(@out_of_sync_message)
gentle_alert: (msg) =>
if @el.find('.open-ended-alert').length
@@ -404,7 +474,7 @@ class @CombinedOpenEnded
$.postWithPrefix "#{@ajax_url}/check_for_score", (response) =>
if response.state == "done" or response.state=="post_assessment"
delete window.queuePollerID
- location.reload()
+ @reload()
else
window.queuePollerID = window.setTimeout(@poll, 10000)
@@ -438,7 +508,9 @@ class @CombinedOpenEnded
@prompt_container.toggleClass('open')
if @question_header.text() == "(Hide)"
new_text = "(Show)"
+ Logger.log 'oe_hide_question', {location: @location}
else
+ Logger.log 'oe_show_question', {location: @location}
new_text = "(Hide)"
@question_header.text(new_text)
@@ -454,4 +526,16 @@ class @CombinedOpenEnded
@prompt_container.toggleClass('open')
@question_header.text("(Show)")
+ log_feedback_click: (event) ->
+ link_text = $(event.target).html()
+ if link_text == 'See full feedback'
+ Logger.log 'oe_show_full_feedback', {}
+ else if link_text == 'Respond to Feedback'
+ Logger.log 'oe_show_respond_to_feedback', {}
+ else
+ generated_event_type = link_text.toLowerCase().replace(" ","_")
+ Logger.log "oe_" + generated_event_type, {}
+ log_feedback_selection: (event) ->
+ target_selection = $(event.target).val()
+ Logger.log 'oe_feedback_response_selected', {value: target_selection}
diff --git a/common/lib/xmodule/xmodule/js/src/peergrading/peer_grading_problem.coffee b/common/lib/xmodule/xmodule/js/src/peergrading/peer_grading_problem.coffee
index 5770238649..4bdb4bdf05 100644
--- a/common/lib/xmodule/xmodule/js/src/peergrading/peer_grading_problem.coffee
+++ b/common/lib/xmodule/xmodule/js/src/peergrading/peer_grading_problem.coffee
@@ -175,6 +175,7 @@ class @PeerGradingProblem
@prompt_container = $('.prompt-container')
@rubric_container = $('.rubric-container')
@flag_student_container = $('.flag-student-container')
+ @answer_unknown_container = $('.answer-unknown-container')
@calibration_panel = $('.calibration-panel')
@grading_panel = $('.grading-panel')
@content_panel = $('.content-panel')
@@ -208,6 +209,10 @@ class @PeerGradingProblem
@interstitial_page_button = $('.interstitial-page-button')
@calibration_interstitial_page_button = $('.calibration-interstitial-page-button')
@flag_student_checkbox = $('.flag-checkbox')
+ @answer_unknown_checkbox = $('.answer-unknown-checkbox')
+
+ $(window).keydown @keydown_handler
+
@collapse_question()
Collapsible.setCollapsibles(@content_panel)
@@ -249,9 +254,6 @@ class @PeerGradingProblem
fetch_submission_essay: () =>
@backend.post('get_next_submission', {location: @location}, @render_submission)
- gentle_alert: (msg) =>
- @grading_message.fadeIn()
- @grading_message.html("
" + msg + "
")
construct_data: () ->
data =
@@ -262,6 +264,7 @@ class @PeerGradingProblem
submission_key: @submission_key_input.val()
feedback: @feedback_area.val()
submission_flagged: @flag_student_checkbox.is(':checked')
+ answer_unknown: @answer_unknown_checkbox.is(':checked')
return data
@@ -334,6 +337,14 @@ class @PeerGradingProblem
@show_submit_button()
@grade = Rubric.get_total_score()
+ keydown_handler: (event) =>
+ if event.which == 13 && @submit_button.is(':visible')
+ if @calibration
+ @submit_calibration_essay()
+ else
+ @submit_grade()
+
+
##########
@@ -360,6 +371,8 @@ class @PeerGradingProblem
@calibration_panel.find('.grading-text').hide()
@grading_panel.find('.grading-text').hide()
@flag_student_container.hide()
+ @answer_unknown_container.hide()
+
@feedback_area.val("")
@submit_button.unbind('click')
@@ -388,6 +401,7 @@ class @PeerGradingProblem
@calibration_panel.find('.grading-text').show()
@grading_panel.find('.grading-text').show()
@flag_student_container.show()
+ @answer_unknown_container.show()
@feedback_area.val("")
@submit_button.unbind('click')
@@ -420,6 +434,7 @@ class @PeerGradingProblem
@submit_button.hide()
@action_button.hide()
@calibration_feedback_panel.hide()
+ Rubric.initialize(@location)
render_calibration_feedback: (response) =>
@@ -466,11 +481,17 @@ class @PeerGradingProblem
# And now hook up an event handler again
$("input[class='score-selection']").change @graded_callback
+ gentle_alert: (msg) =>
+ @grading_message.fadeIn()
+ @grading_message.html("" + msg + "
")
+
collapse_question: () =>
@prompt_container.slideToggle()
@prompt_container.toggleClass('open')
if @question_header.text() == "(Hide)"
+ Logger.log 'peer_grading_hide_question', {location: @location}
new_text = "(Show)"
else
+ Logger.log 'peer_grading_show_question', {location: @location}
new_text = "(Hide)"
@question_header.text(new_text)
diff --git a/common/lib/xmodule/xmodule/mako_module.py b/common/lib/xmodule/xmodule/mako_module.py
index dab5d5e85b..da96bfa212 100644
--- a/common/lib/xmodule/xmodule/mako_module.py
+++ b/common/lib/xmodule/xmodule/mako_module.py
@@ -44,5 +44,6 @@ class MakoModuleDescriptor(XModuleDescriptor):
# cdodge: encapsulate a means to expose "editable" metadata fields (i.e. not internal system metadata)
@property
def editable_metadata_fields(self):
- subset = [name for name in self.metadata.keys() if name not in self.system_metadata_fields]
+ subset = [name for name in self.metadata.keys() if name not in self.system_metadata_fields and
+ name not in self._inherited_metadata]
return subset
diff --git a/common/lib/xmodule/xmodule/modulestore/__init__.py b/common/lib/xmodule/xmodule/modulestore/__init__.py
index a9df6c3504..0ba7e36540 100644
--- a/common/lib/xmodule/xmodule/modulestore/__init__.py
+++ b/common/lib/xmodule/xmodule/modulestore/__init__.py
@@ -23,6 +23,15 @@ URL_RE = re.compile("""
(@(?P[^/]+))?
""", re.VERBOSE)
+MISSING_SLASH_URL_RE = re.compile("""
+ (?P[^:]+):/
+ (?P[^/]+)/
+ (?P[^/]+)/
+ (?P[^/]+)/
+ (?P[^@]+)
+ (@(?P[^/]+))?
+ """, re.VERBOSE)
+
# TODO (cpennington): We should decide whether we want to expand the
# list of valid characters in a location
INVALID_CHARS = re.compile(r"[^\w.-]")
@@ -164,12 +173,16 @@ class Location(_LocationBase):
if isinstance(location, basestring):
match = URL_RE.match(location)
if match is None:
- log.debug('location is instance of %s but no URL match' % basestring)
- raise InvalidLocationError(location)
- else:
- groups = match.groupdict()
- check_dict(groups)
- return _LocationBase.__new__(_cls, **groups)
+ # cdodge:
+ # check for a dropped slash near the i4x:// element of the location string. This can happen with some
+ # redirects (e.g. edx.org -> www.edx.org which I think happens in Nginx)
+ match = MISSING_SLASH_URL_RE.match(location)
+ if match is None:
+ log.debug('location is instance of %s but no URL match' % basestring)
+ raise InvalidLocationError(location)
+ groups = match.groupdict()
+ check_dict(groups)
+ return _LocationBase.__new__(_cls, **groups)
elif isinstance(location, (list, tuple)):
if len(location) not in (5, 6):
log.debug('location has wrong length')
diff --git a/common/lib/xmodule/xmodule/modulestore/mongo.py b/common/lib/xmodule/xmodule/modulestore/mongo.py
index f4db62ac31..012efb0c27 100644
--- a/common/lib/xmodule/xmodule/modulestore/mongo.py
+++ b/common/lib/xmodule/xmodule/modulestore/mongo.py
@@ -1,11 +1,13 @@
import pymongo
import sys
import logging
+import copy
from bson.son import SON
from fs.osfs import OSFS
from itertools import repeat
from path import path
+from datetime import datetime, timedelta
from importlib import import_module
from xmodule.errortracker import null_error_tracker, exc_info_to_str
@@ -27,9 +29,11 @@ class CachingDescriptorSystem(MakoDescriptorSystem):
"""
A system that has a cache of module json that it will use to load modules
from, with a backup of calling to the underlying modulestore for more data
+ TODO (cdodge) when the 'split module store' work has been completed we can remove all
+ references to metadata_inheritance_tree
"""
def __init__(self, modulestore, module_data, default_class, resources_fs,
- error_tracker, render_template):
+ error_tracker, render_template, metadata_inheritance_tree = None):
"""
modulestore: the module store that can be used to retrieve additional modules
@@ -54,6 +58,7 @@ class CachingDescriptorSystem(MakoDescriptorSystem):
# cdodge: other Systems have a course_id attribute defined. To keep things consistent, let's
# define an attribute here as well, even though it's None
self.course_id = None
+ self.metadata_inheritance_tree = metadata_inheritance_tree
def load_item(self, location):
location = Location(location)
@@ -61,11 +66,13 @@ class CachingDescriptorSystem(MakoDescriptorSystem):
if json_data is None:
return self.modulestore.get_item(location)
else:
- # TODO (vshnayder): metadata inheritance is somewhat broken because mongo, doesn't
- # always load an entire course. We're punting on this until after launch, and then
- # will build a proper course policy framework.
+ # load the module and apply the inherited metadata
try:
- return XModuleDescriptor.load_from_json(json_data, self, self.default_class)
+ module = XModuleDescriptor.load_from_json(json_data, self, self.default_class)
+ if self.metadata_inheritance_tree is not None:
+ metadata_to_inherit = self.metadata_inheritance_tree.get('parent_metadata', {}).get(location.url(),{})
+ module.inherit_metadata(metadata_to_inherit)
+ return module
except:
return ErrorDescriptor.from_json(
json_data,
@@ -142,6 +149,82 @@ class MongoModuleStore(ModuleStoreBase):
self.fs_root = path(fs_root)
self.error_tracker = error_tracker
self.render_template = render_template
+ self.metadata_inheritance_cache = {}
+
+ def get_metadata_inheritance_tree(self, location):
+ '''
+ TODO (cdodge) This method can be deleted when the 'split module store' work has been completed
+ '''
+
+ # get all collections in the course, this query should not return any leaf nodes
+ query = { '_id.org' : location.org,
+ '_id.course' : location.course,
+ '_id.revision' : None,
+ 'definition.children':{'$ne': []}
+ }
+ # we just want the Location, children, and metadata
+ record_filter = {'_id':1,'definition.children':1,'metadata':1}
+
+ # call out to the DB
+ resultset = self.collection.find(query, record_filter)
+
+ results_by_url = {}
+ root = None
+
+ # now go through the results and order them by the location url
+ for result in resultset:
+ location = Location(result['_id'])
+ results_by_url[location.url()] = result
+ if location.category == 'course':
+ root = location.url()
+
+ # now traverse the tree and compute down the inherited metadata
+ metadata_to_inherit = {}
+ def _compute_inherited_metadata(url):
+ my_metadata = results_by_url[url]['metadata']
+ for key in my_metadata.keys():
+ if key not in XModuleDescriptor.inheritable_metadata:
+ del my_metadata[key]
+ results_by_url[url]['metadata'] = my_metadata
+
+ # go through all the children and recurse, but only if we have
+ # in the result set. Remember results will not contain leaf nodes
+ for child in results_by_url[url].get('definition',{}).get('children',[]):
+ if child in results_by_url:
+ new_child_metadata = copy.deepcopy(my_metadata)
+ new_child_metadata.update(results_by_url[child]['metadata'])
+ results_by_url[child]['metadata'] = new_child_metadata
+ metadata_to_inherit[child] = new_child_metadata
+ _compute_inherited_metadata(child)
+ else:
+ # this is likely a leaf node, so let's record what metadata we need to inherit
+ metadata_to_inherit[child] = my_metadata
+
+ if root is not None:
+ _compute_inherited_metadata(root)
+
+ cache = {'parent_metadata': metadata_to_inherit,
+ 'timestamp' : datetime.now()}
+
+ return cache
+
+ def get_cached_metadata_inheritance_tree(self, location, max_age_allowed):
+ '''
+ TODO (cdodge) This method can be deleted when the 'split module store' work has been completed
+ '''
+ cache_name = '{0}/{1}'.format(location.org, location.course)
+ cache = self.metadata_inheritance_cache.get(cache_name,{'parent_metadata': {},
+ 'timestamp': datetime.now() - timedelta(hours=1)})
+ age = (datetime.now() - cache['timestamp'])
+
+ if age.seconds >= max_age_allowed:
+ logging.debug('loading entire inheritance tree for {0}'.format(cache_name))
+ cache = self.get_metadata_inheritance_tree(location)
+ self.metadata_inheritance_cache[cache_name] = cache
+
+ return cache
+
+
def _clean_item_data(self, item):
"""
@@ -196,6 +279,8 @@ class MongoModuleStore(ModuleStoreBase):
resource_fs = OSFS(root)
+ # TODO (cdodge): When the 'split module store' work has been completed, we should remove
+ # the 'metadata_inheritance_tree' parameter
system = CachingDescriptorSystem(
self,
data_cache,
@@ -203,6 +288,7 @@ class MongoModuleStore(ModuleStoreBase):
resource_fs,
self.error_tracker,
self.render_template,
+ metadata_inheritance_tree = self.get_cached_metadata_inheritance_tree(Location(item['location']), 60)
)
return system.load_item(item['location'])
@@ -261,11 +347,11 @@ class MongoModuleStore(ModuleStoreBase):
descendents of the queried modules for more efficient results later
in the request. The depth is counted in the number of
calls to get_children() to cache. None indicates to cache all descendents.
-
"""
location = Location.ensure_fully_specified(location)
item = self._find_one(location)
- return self._load_items([item], depth)[0]
+ module = self._load_items([item], depth)[0]
+ return module
def get_instance(self, course_id, location, depth=0):
"""
@@ -285,7 +371,8 @@ class MongoModuleStore(ModuleStoreBase):
sort=[('revision', pymongo.ASCENDING)],
)
- return self._load_items(list(items), depth)
+ modules = self._load_items(list(items), depth)
+ return modules
def clone_item(self, source, location):
"""
@@ -313,7 +400,7 @@ class MongoModuleStore(ModuleStoreBase):
raise DuplicateItemError(location)
- def get_course_for_item(self, location):
+ def get_course_for_item(self, location, depth=0):
'''
VS[compat]
cdodge: for a given Xmodule, return the course that it belongs to
@@ -327,7 +414,7 @@ class MongoModuleStore(ModuleStoreBase):
# know the 'name' parameter in this context, so we have
# to assume there's only one item in this query even though we are not specifying a name
course_search_location = ['i4x', location.org, location.course, 'course', None]
- courses = self.get_items(course_search_location)
+ courses = self.get_items(course_search_location, depth=depth)
# make sure we found exactly one match on this above course search
found_cnt = len(courses)
diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py
index 49851f7452..171441c562 100644
--- a/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py
+++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py
@@ -2,8 +2,7 @@ import json
import logging
from lxml import etree
from lxml.html import rewrite_links
-
-
+from xmodule.timeinfo import TimeInfo
from xmodule.capa_module import only_one, ComplexEncoder
from xmodule.editing_module import EditingDescriptor
from xmodule.html_checker import check_html
@@ -14,16 +13,13 @@ from xmodule.xml_module import XmlDescriptor
import self_assessment_module
import open_ended_module
from combined_open_ended_rubric import CombinedOpenEndedRubric, GRADER_TYPE_IMAGE_DICT, HUMAN_GRADER_TYPE, LEGEND_LIST
-import dateutil
-import dateutil.parser
-from xmodule.timeparse import parse_timedelta
log = logging.getLogger("mitx.courseware")
# Set the default number of max attempts. Should be 1 for production
# Set higher for debugging/testing
# attempts specified in xml definition overrides this.
-MAX_ATTEMPTS = 10000
+MAX_ATTEMPTS = 1
# Set maximum available number of points.
# Overriden by max_score specified in xml.
@@ -48,6 +44,10 @@ HUMAN_TASK_TYPE = {
'openended' : "edX Assessment",
}
+#Default value that controls whether or not to skip basic spelling checks in the controller
+#Metadata overrides this
+SKIP_BASIC_CHECKS = False
+
class CombinedOpenEndedV1Module():
"""
This is a module that encapsulates all open ended grading (self assessment, peer assessment, etc).
@@ -146,28 +146,17 @@ class CombinedOpenEndedV1Module():
self.max_attempts = int(self.metadata.get('attempts', MAX_ATTEMPTS))
self.is_scored = self.metadata.get('is_graded', IS_SCORED) in TRUE_DICT
self.accept_file_upload = self.metadata.get('accept_file_upload', ACCEPT_FILE_UPLOAD) in TRUE_DICT
+ self.skip_basic_checks = self.metadata.get('skip_spelling_checks', SKIP_BASIC_CHECKS)
display_due_date_string = self.metadata.get('due', None)
- if display_due_date_string is not None:
- try:
- self.display_due_date = dateutil.parser.parse(display_due_date_string)
- except ValueError:
- log.error("Could not parse due date {0} for location {1}".format(display_due_date_string, location))
- raise
- else:
- self.display_due_date = None
-
+
grace_period_string = self.metadata.get('graceperiod', None)
- if grace_period_string is not None and self.display_due_date:
- try:
- self.grace_period = parse_timedelta(grace_period_string)
- self.close_date = self.display_due_date + self.grace_period
- except:
- log.error("Error parsing the grace period {0} for location {1}".format(grace_period_string, location))
- raise
- else:
- self.grace_period = None
- self.close_date = self.display_due_date
+ try:
+ self.timeinfo = TimeInfo(display_due_date_string, grace_period_string)
+ except:
+ log.error("Error parsing due date information in location {0}".format(location))
+ raise
+ self.display_due_date = self.timeinfo.display_due_date
# Used for progress / grading. Currently get credit just for
# completion (doesn't matter if you self-assessed correct/incorrect).
@@ -185,8 +174,9 @@ class CombinedOpenEndedV1Module():
'rubric': definition['rubric'],
'display_name': self.display_name,
'accept_file_upload': self.accept_file_upload,
- 'close_date' : self.close_date,
+ 'close_date' : self.timeinfo.close_date,
's3_interface' : self.system.s3_interface,
+ 'skip_basic_checks' : self.skip_basic_checks,
}
self.task_xml = definition['task_xml']
@@ -340,6 +330,7 @@ class CombinedOpenEndedV1Module():
'status': self.get_status(False),
'display_name': self.display_name,
'accept_file_upload': self.accept_file_upload,
+ 'location': self.location,
'legend_list' : LEGEND_LIST,
}
@@ -656,7 +647,10 @@ class CombinedOpenEndedV1Module():
if self.attempts > self.max_attempts:
return {
'success': False,
- 'error': 'Too many attempts.'
+ #This is a student_facing_error
+ 'error': ('You have attempted this question {0} times. '
+ 'You are only allowed to attempt it {1} times.').format(
+ self.attempts, self.max_attempts)
}
self.state = self.INITIAL
self.allow_reset = False
@@ -795,7 +789,8 @@ class CombinedOpenEndedV1Descriptor(XmlDescriptor, EditingDescriptor):
expected_children = ['task', 'rubric', 'prompt']
for child in expected_children:
if len(xml_object.xpath(child)) == 0:
- raise ValueError("Combined Open Ended definition must include at least one '{0}' tag".format(child))
+ #This is a staff_facing_error
+ raise ValueError("Combined Open Ended definition must include at least one '{0}' tag. Contact the learning sciences group for assistance.".format(child))
def parse_task(k):
"""Assumes that xml_object has child k"""
@@ -820,4 +815,4 @@ class CombinedOpenEndedV1Descriptor(XmlDescriptor, EditingDescriptor):
for child in ['task']:
add_child(child)
- return elt
\ No newline at end of file
+ return elt
diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_rubric.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_rubric.py
index 7c00c5f029..f756b2b853 100644
--- a/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_rubric.py
+++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_rubric.py
@@ -4,7 +4,6 @@ from lxml import etree
log = logging.getLogger(__name__)
GRADER_TYPE_IMAGE_DICT = {
- '8B' : '/static/images/random_grading_icon.png',
'SA' : '/static/images/self_assessment_icon.png',
'PE' : '/static/images/peer_grading_icon.png',
'ML' : '/static/images/ml_grading_icon.png',
@@ -13,7 +12,6 @@ GRADER_TYPE_IMAGE_DICT = {
}
HUMAN_GRADER_TYPE = {
- '8B' : 'Magic-8-Ball-Assessment',
'SA' : 'Self-Assessment',
'PE' : 'Peer-Assessment',
'IN' : 'Instructor-Assessment',
@@ -71,8 +69,9 @@ class CombinedOpenEndedRubric(object):
})
success = True
except:
- error_message = "[render_rubric] Could not parse the rubric with xml: {0}".format(rubric_xml)
- log.error(error_message)
+ #This is a staff_facing_error
+ error_message = "[render_rubric] Could not parse the rubric with xml: {0}. Contact the learning sciences group for assistance.".format(rubric_xml)
+ log.exception(error_message)
raise RubricParsingError(error_message)
return {'success' : success, 'html' : html, 'rubric_scores' : rubric_scores}
@@ -81,7 +80,8 @@ class CombinedOpenEndedRubric(object):
success = rubric_dict['success']
rubric_feedback = rubric_dict['html']
if not success:
- error_message = "Could not parse rubric : {0} for location {1}".format(rubric_string, location.url())
+ #This is a staff_facing_error
+ error_message = "Could not parse rubric : {0} for location {1}. Contact the learning sciences group for assistance.".format(rubric_string, location.url())
log.error(error_message)
raise RubricParsingError(error_message)
@@ -90,13 +90,15 @@ class CombinedOpenEndedRubric(object):
for category in rubric_categories:
total = total + len(category['options']) - 1
if len(category['options']) > (max_score_allowed + 1):
- error_message = "Number of score points in rubric {0} higher than the max allowed, which is {1}".format(
+ #This is a staff_facing_error
+ error_message = "Number of score points in rubric {0} higher than the max allowed, which is {1}. Contact the learning sciences group for assistance.".format(
len(category['options']), max_score_allowed)
log.error(error_message)
raise RubricParsingError(error_message)
if total != max_score:
- error_msg = "The max score {0} for problem {1} does not match the total number of points in the rubric {2}".format(
+ #This is a staff_facing_error
+ error_msg = "The max score {0} for problem {1} does not match the total number of points in the rubric {2}. Contact the learning sciences group for assistance.".format(
max_score, location, total)
log.error(error_msg)
raise RubricParsingError(error_msg)
@@ -118,7 +120,8 @@ class CombinedOpenEndedRubric(object):
categories = []
for category in element:
if category.tag != 'category':
- raise RubricParsingError("[extract_categories] Expected a tag: got {0} instead".format(category.tag))
+ #This is a staff_facing_error
+ raise RubricParsingError("[extract_categories] Expected a tag: got {0} instead. Contact the learning sciences group for assistance.".format(category.tag))
else:
categories.append(self.extract_category(category))
return categories
@@ -144,12 +147,14 @@ class CombinedOpenEndedRubric(object):
self.has_score = True
# if we are missing the score tag and we are expecting one
elif self.has_score:
- raise RubricParsingError("[extract_category] Category {0} is missing a score".format(descriptionxml.text))
+ #This is a staff_facing_error
+ raise RubricParsingError("[extract_category] Category {0} is missing a score. Contact the learning sciences group for assistance.".format(descriptionxml.text))
# parse description
if descriptionxml.tag != 'description':
- raise RubricParsingError("[extract_category]: expected description tag, got {0} instead".format(descriptionxml.tag))
+ #This is a staff_facing_error
+ raise RubricParsingError("[extract_category]: expected description tag, got {0} instead. Contact the learning sciences group for assistance.".format(descriptionxml.tag))
description = descriptionxml.text
@@ -159,7 +164,8 @@ class CombinedOpenEndedRubric(object):
# parse options
for option in optionsxml:
if option.tag != 'option':
- raise RubricParsingError("[extract_category]: expected option tag, got {0} instead".format(option.tag))
+ #This is a staff_facing_error
+ raise RubricParsingError("[extract_category]: expected option tag, got {0} instead. Contact the learning sciences group for assistance.".format(option.tag))
else:
pointstr = option.get("points")
if pointstr:
@@ -168,7 +174,8 @@ class CombinedOpenEndedRubric(object):
try:
points = int(pointstr)
except ValueError:
- raise RubricParsingError("[extract_category]: expected points to have int, got {0} instead".format(pointstr))
+ #This is a staff_facing_error
+ raise RubricParsingError("[extract_category]: expected points to have int, got {0} instead. Contact the learning sciences group for assistance.".format(pointstr))
elif autonumbering:
# use the generated one if we're in the right mode
points = cur_points
@@ -200,7 +207,6 @@ class CombinedOpenEndedRubric(object):
for grader_type in tuple[3]:
rubric_categories[i]['options'][j]['grader_types'].append(grader_type)
- log.debug(rubric_categories)
html = self.system.render_template('open_ended_combined_rubric.html',
{'categories': rubric_categories,
'has_score': True,
@@ -219,13 +225,15 @@ class CombinedOpenEndedRubric(object):
Validates a set of options. This can and should be extended to filter out other bad edge cases
'''
if len(options) == 0:
- raise RubricParsingError("[extract_category]: no options associated with this category")
+ #This is a staff_facing_error
+ raise RubricParsingError("[extract_category]: no options associated with this category. Contact the learning sciences group for assistance.")
if len(options) == 1:
return
prev = options[0]['points']
for option in options[1:]:
if prev == option['points']:
- raise RubricParsingError("[extract_category]: found duplicate point values between two different options")
+ #This is a staff_facing_error
+ raise RubricParsingError("[extract_category]: found duplicate point values between two different options. Contact the learning sciences group for assistance.")
else:
prev = option['points']
@@ -241,11 +249,14 @@ class CombinedOpenEndedRubric(object):
"""
success = False
if len(scores)==0:
- log.error("Score length is 0.")
+ #This is a dev_facing_error
+ log.error("Score length is 0 when trying to reformat rubric scores for rendering.")
return success, ""
if len(scores) != len(score_types) or len(feedback_types) != len(scores):
- log.error("Length mismatches.")
+ #This is a dev_facing_error
+ log.error("Length mismatches when trying to reformat rubric scores for rendering. "
+ "Scores: {0}, Score Types: {1} Feedback Types: {2}".format(scores, score_types, feedback_types))
return success, ""
score_lists = []
diff --git a/lms/djangoapps/open_ended_grading/controller_query_service.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/controller_query_service.py
similarity index 81%
rename from lms/djangoapps/open_ended_grading/controller_query_service.py
rename to common/lib/xmodule/xmodule/open_ended_grading_classes/controller_query_service.py
index 1b124fc116..1dd5c57ad4 100644
--- a/lms/djangoapps/open_ended_grading/controller_query_service.py
+++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/controller_query_service.py
@@ -1,8 +1,5 @@
import logging
-from xmodule.open_ended_grading_classes.grading_service_module import GradingService
-
-from xmodule.x_module import ModuleSystem
-from mitxmako.shortcuts import render_to_string
+from grading_service_module import GradingService
log = logging.getLogger(__name__)
@@ -11,8 +8,8 @@ class ControllerQueryService(GradingService):
"""
Interface to staff grading backend.
"""
- def __init__(self, config):
- config['system'] = ModuleSystem(None, None, None, render_to_string, None)
+ def __init__(self, config, system):
+ config['system'] = system
super(ControllerQueryService, self).__init__(config)
self.url = config['url'] + config['grading_controller']
self.login_url = self.url + '/login/'
@@ -77,3 +74,16 @@ class ControllerQueryService(GradingService):
response = self.post(self.take_action_on_flags_url, params)
return response
+
+def convert_seconds_to_human_readable(seconds):
+ if seconds < 60:
+ human_string = "{0} seconds".format(seconds)
+ elif seconds < 60 * 60:
+ human_string = "{0} minutes".format(round(seconds/60,1))
+ elif seconds < (24*60*60):
+ human_string = "{0} hours".format(round(seconds/(60*60),1))
+ else:
+ human_string = "{0} days".format(round(seconds/(60*60*24),1))
+
+ eta_string = "{0}".format(human_string)
+ return eta_string
diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/grading_service_module.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/grading_service_module.py
index 6bd7a6fd0e..8a4caa1291 100644
--- a/common/lib/xmodule/xmodule/open_ended_grading_classes/grading_service_module.py
+++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/grading_service_module.py
@@ -51,6 +51,8 @@ class GradingService(object):
r = self._try_with_login(op)
except (RequestException, ConnectionError, HTTPError) as err:
# reraise as promised GradingServiceError, but preserve stacktrace.
+ #This is a dev_facing_error
+ log.error("Problem posting data to the grading controller. URL: {0}, data: {1}".format(url, data))
raise GradingServiceError, str(err), sys.exc_info()[2]
return r.text
@@ -67,6 +69,8 @@ class GradingService(object):
r = self._try_with_login(op)
except (RequestException, ConnectionError, HTTPError) as err:
# reraise as promised GradingServiceError, but preserve stacktrace.
+ #This is a dev_facing_error
+ log.error("Problem getting data from the grading controller. URL: {0}, params: {1}".format(url, params))
raise GradingServiceError, str(err), sys.exc_info()[2]
return r.text
@@ -119,11 +123,13 @@ class GradingService(object):
return response_json
# if we can't parse the rubric into HTML,
except etree.XMLSyntaxError, RubricParsingError:
+ #This is a dev_facing_error
log.exception("Cannot parse rubric string. Raw string: {0}"
.format(rubric))
return {'success': False,
'error': 'Error displaying submission'}
except ValueError:
+ #This is a dev_facing_error
log.exception("Error parsing response: {0}".format(response))
return {'success': False,
'error': "Error displaying submission"}
diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/open_ended_image_submission.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/open_ended_image_submission.py
index 88921c1429..edae69854f 100644
--- a/common/lib/xmodule/xmodule/open_ended_grading_classes/open_ended_image_submission.py
+++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/open_ended_image_submission.py
@@ -251,8 +251,9 @@ def upload_to_s3(file_to_upload, keyname, s3_interface):
return True, public_url
except:
- error_message = "Could not connect to S3."
- log.exception(error_message)
+ #This is a dev_facing_error
+ error_message = "Could not connect to S3 to upload peer grading image. Trying to utilize bucket: {0}".format(bucketname.lower())
+ log.error(error_message)
return False, error_message
diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/open_ended_module.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/open_ended_module.py
index 54db1b6557..974d23965f 100644
--- a/common/lib/xmodule/xmodule/open_ended_grading_classes/open_ended_module.py
+++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/open_ended_module.py
@@ -59,12 +59,14 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
self.submission_id = None
self.grader_id = None
+ error_message = "No {0} found in problem xml for open ended problem. Contact the learning sciences group for assistance."
if oeparam is None:
- raise ValueError("No oeparam found in problem xml.")
+ #This is a staff_facing_error
+ raise ValueError(error_message.format('oeparam'))
if self.prompt is None:
- raise ValueError("No prompt found in problem xml.")
+ raise ValueError(error_message.format('prompt'))
if self.rubric is None:
- raise ValueError("No rubric found in problem xml.")
+ raise ValueError(error_message.format('rubric'))
self._parse(oeparam, self.prompt, self.rubric, system)
@@ -73,6 +75,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
self.send_to_grader(self.latest_answer(), system)
self.created = False
+
def _parse(self, oeparam, prompt, rubric, system):
'''
Parse OpenEndedResponse XML:
@@ -98,19 +101,21 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
# __init__ adds it (easiest way to get problem location into
# response types)
except TypeError, ValueError:
- log.exception("Grader payload %r is not a json object!", grader_payload)
+ #This is a dev_facing_error
+ log.exception("Grader payload from external open ended grading server is not a json object! Object: {0}".format(grader_payload))
self.initial_display = find_with_default(oeparam, 'initial_display', '')
self.answer = find_with_default(oeparam, 'answer_display', 'No answer given.')
parsed_grader_payload.update({
- 'location': system.location.url(),
+ 'location': self.location_string,
'course_id': system.course_id,
'prompt': prompt_string,
'rubric': rubric_string,
'initial_display': self.initial_display,
'answer': self.answer,
- 'problem_id': self.display_name
+ 'problem_id': self.display_name,
+ 'skip_basic_checks': self.skip_basic_checks,
})
updated_grader_payload = json.dumps(parsed_grader_payload)
@@ -133,24 +138,27 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
"""
event_info = dict()
- event_info['problem_id'] = system.location.url()
+ event_info['problem_id'] = self.location_string
event_info['student_id'] = system.anonymous_student_id
event_info['survey_responses'] = get
survey_responses = event_info['survey_responses']
for tag in ['feedback', 'submission_id', 'grader_id', 'score']:
if tag not in survey_responses:
- return {'success': False, 'msg': "Could not find needed tag {0}".format(tag)}
+ #This is a student_facing_error
+ return {'success': False, 'msg': "Could not find needed tag {0} in the survey responses. Please try submitting again.".format(tag)}
try:
submission_id = int(survey_responses['submission_id'])
grader_id = int(survey_responses['grader_id'])
feedback = str(survey_responses['feedback'].encode('ascii', 'ignore'))
score = int(survey_responses['score'])
except:
+ #This is a dev_facing_error
error_message = ("Could not parse submission id, grader id, "
"or feedback from message_post ajax call. Here is the message data: {0}".format(
survey_responses))
log.exception(error_message)
+ #This is a student_facing_error
return {'success': False, 'msg': "There was an error saving your feedback. Please contact course staff."}
qinterface = system.xqueue['interface']
@@ -187,6 +195,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
self.state = self.DONE
+ #This is a student_facing_message
return {'success': success, 'msg': "Successfully submitted your feedback."}
def send_to_grader(self, submission, system):
@@ -336,18 +345,22 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
for tag in ['success', 'feedback', 'submission_id', 'grader_id']:
if tag not in response_items:
- return format_feedback('errors', 'Error getting feedback')
+ #This is a student_facing_error
+ return format_feedback('errors', 'Error getting feedback from grader.')
feedback_items = response_items['feedback']
try:
feedback = json.loads(feedback_items)
except (TypeError, ValueError):
- log.exception("feedback_items have invalid json %r", feedback_items)
- return format_feedback('errors', 'Could not parse feedback')
+ #This is a dev_facing_error
+ log.exception("feedback_items from external open ended grader have invalid json {0}".format(feedback_items))
+ #This is a student_facing_error
+ return format_feedback('errors', 'Error getting feedback from grader.')
if response_items['success']:
if len(feedback) == 0:
- return format_feedback('errors', 'No feedback available')
+ #This is a student_facing_error
+ return format_feedback('errors', 'No feedback available from grader.')
for tag in do_not_render:
if tag in feedback:
@@ -356,6 +369,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
feedback_lst = sorted(feedback.items(), key=get_priority)
feedback_list_part1 = u"\n".join(format_feedback(k, v) for k, v in feedback_lst)
else:
+ #This is a student_facing_error
feedback_list_part1 = format_feedback('errors', response_items['feedback'])
feedback_list_part2 = (u"\n".join([format_feedback_hidden(feedback_type, value)
@@ -431,14 +445,16 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
try:
score_result = json.loads(score_msg)
except (TypeError, ValueError):
- error_message = ("External grader message should be a JSON-serialized dict."
+ #This is a dev_facing_error
+ error_message = ("External open ended grader message should be a JSON-serialized dict."
" Received score_msg = {0}".format(score_msg))
log.error(error_message)
fail['feedback'] = error_message
return fail
if not isinstance(score_result, dict):
- error_message = ("External grader message should be a JSON-serialized dict."
+ #This is a dev_facing_error
+ error_message = ("External open ended grader message should be a JSON-serialized dict."
" Received score_result = {0}".format(score_result))
log.error(error_message)
fail['feedback'] = error_message
@@ -446,7 +462,8 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
for tag in ['score', 'feedback', 'grader_type', 'success', 'grader_id', 'submission_id']:
if tag not in score_result:
- error_message = ("External grader message is missing required tag: {0}"
+ #This is a dev_facing_error
+ error_message = ("External open ended grader message is missing required tag: {0}"
.format(tag))
log.error(error_message)
fail['feedback'] = error_message
@@ -563,7 +580,10 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
}
if dispatch not in handlers:
- return 'Error'
+ #This is a dev_facing_error
+ log.error("Cannot find {0} in handlers in handle_ajax function for open_ended_module.py".format(dispatch))
+ #This is a dev_facing_error
+ return json.dumps({'error': 'Error handling action. Please try again.', 'success' : False})
before = self.get_progress()
d = handlers[dispatch](get, system)
@@ -604,15 +624,21 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
success, get = self.append_image_to_student_answer(get)
error_message = ""
if success:
- get['student_answer'] = OpenEndedModule.sanitize_html(get['student_answer'])
- self.new_history_entry(get['student_answer'])
- self.send_to_grader(get['student_answer'], system)
- self.change_state(self.ASSESSING)
+ success, allowed_to_submit, error_message = self.check_if_student_can_submit()
+ if allowed_to_submit:
+ get['student_answer'] = OpenEndedModule.sanitize_html(get['student_answer'])
+ self.new_history_entry(get['student_answer'])
+ self.send_to_grader(get['student_answer'], system)
+ self.change_state(self.ASSESSING)
+ else:
+ #Error message already defined
+ success = False
else:
+ #This is a student_facing_error
error_message = "There was a problem saving the image in your submission. Please try a different image, or try pasting a link to an image into the answer box."
return {
- 'success': True,
+ 'success': success,
'error': error_message,
'student_response': get['student_answer']
}
@@ -637,17 +663,21 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
Output: Rendered HTML
"""
#set context variables and render template
+ eta_string = None
if self.state != self.INITIAL:
latest = self.latest_answer()
previous_answer = latest if latest is not None else self.initial_display
post_assessment = self.latest_post_assessment(system)
score = self.latest_score()
correct = 'correct' if self.is_submission_correct(score) else 'incorrect'
+ if self.state == self.ASSESSING:
+ eta_string = self.get_eta()
else:
post_assessment = ""
correct = ""
previous_answer = self.initial_display
+
context = {
'prompt': self.prompt,
'previous_answer': previous_answer,
@@ -660,6 +690,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
'child_type': 'openended',
'correct': correct,
'accept_file_upload': self.accept_file_upload,
+ 'eta_message' : eta_string,
}
html = system.render_template('open_ended.html', context)
return html
@@ -689,7 +720,8 @@ class OpenEndedDescriptor(XmlDescriptor, EditingDescriptor):
"""
for child in ['openendedparam']:
if len(xml_object.xpath(child)) != 1:
- raise ValueError("Open Ended definition must include exactly one '{0}' tag".format(child))
+ #This is a staff_facing_error
+ raise ValueError("Open Ended definition must include exactly one '{0}' tag. Contact the learning sciences group for assistance.".format(child))
def parse(k):
"""Assumes that xml_object has child k"""
diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/openendedchild.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/openendedchild.py
index ba8a74cc31..50f9534717 100644
--- a/common/lib/xmodule/xmodule/open_ended_grading_classes/openendedchild.py
+++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/openendedchild.py
@@ -22,6 +22,8 @@ from xmodule.stringify import stringify_children
from xmodule.xml_module import XmlDescriptor
from xmodule.modulestore import Location
from capa.util import *
+from peer_grading_service import PeerGradingService
+import controller_query_service
from datetime import datetime
@@ -99,10 +101,21 @@ class OpenEndedChild(object):
self.accept_file_upload = static_data['accept_file_upload']
self.close_date = static_data['close_date']
self.s3_interface = static_data['s3_interface']
+ self.skip_basic_checks = static_data['skip_basic_checks']
# Used for progress / grading. Currently get credit just for
# completion (doesn't matter if you self-assessed correct/incorrect).
self._max_score = static_data['max_score']
+ self.peer_gs = PeerGradingService(system.open_ended_grading_interface, system)
+ self.controller_qs = controller_query_service.ControllerQueryService(system.open_ended_grading_interface,system)
+
+ self.system = system
+
+ self.location_string = location
+ try:
+ self.location_string = self.location_string.url()
+ except:
+ pass
self.setup_response(system, location, definition, descriptor)
@@ -126,12 +139,14 @@ class OpenEndedChild(object):
if self.closed():
return True, {
'success': False,
- 'error': 'This problem is now closed.'
+ #This is a student_facing_error
+ 'error': 'The problem close date has passed, and this problem is now closed.'
}
elif self.attempts > self.max_attempts:
return True, {
'success': False,
- 'error': 'Too many attempts.'
+ #This is a student_facing_error
+ 'error': 'You have attempted this problem {0} times. You are allowed {1} attempts.'.format(self.attempts, self.max_attempts)
}
else:
return False, {}
@@ -250,7 +265,8 @@ class OpenEndedChild(object):
try:
return Progress(self.get_score()['score'], self._max_score)
except Exception as err:
- log.exception("Got bad progress")
+ #This is a dev_facing_error
+ log.exception("Got bad progress from open ended child module. Max Score: {1}".format(self._max_score))
return None
return None
@@ -258,10 +274,12 @@ class OpenEndedChild(object):
"""
return dict out-of-sync error message, and also log.
"""
- log.warning("Assessment module state out sync. state: %r, get: %r. %s",
+ #This is a dev_facing_error
+ log.warning("Open ended child state out sync. state: %r, get: %r. %s",
self.state, get, msg)
+ #This is a student_facing_error
return {'success': False,
- 'error': 'The problem state got out-of-sync'}
+ 'error': 'The problem state got out-of-sync. Please try reloading the page.'}
def get_html(self):
"""
@@ -339,6 +357,10 @@ class OpenEndedChild(object):
if get_data['can_upload_files'] in ['true', '1']:
has_file_to_upload = True
file = get_data['student_file'][0]
+ if self.system.track_fuction:
+ self.system.track_function('open_ended_image_upload', {'filename': file.name})
+ else:
+ log.info("No tracking function found when uploading image.")
uploaded_to_s3, image_ok, s3_public_url = self.upload_image_to_s3(file)
if uploaded_to_s3:
image_tag = self.generate_image_tag_from_url(s3_public_url, file.name)
@@ -407,3 +429,55 @@ class OpenEndedChild(object):
success = True
return success, string
+
+ def check_if_student_can_submit(self):
+ location = self.location_string
+
+ student_id = self.system.anonymous_student_id
+ success = False
+ allowed_to_submit = True
+ response = {}
+ #This is a student_facing_error
+ error_string = ("You need to peer grade {0} more in order to make another submission. "
+ "You have graded {1}, and {2} are required. You have made {3} successful peer grading submissions.")
+ try:
+ response = self.peer_gs.get_data_for_location(self.location_string, student_id)
+ count_graded = response['count_graded']
+ count_required = response['count_required']
+ student_sub_count = response['student_sub_count']
+ success = True
+ except:
+ #This is a dev_facing_error
+ log.error("Could not contact external open ended graders for location {0} and student {1}".format(self.location_string,student_id))
+ #This is a student_facing_error
+ error_message = "Could not contact the graders. Please notify course staff."
+ return success, allowed_to_submit, error_message
+ if count_graded>=count_required:
+ return success, allowed_to_submit, ""
+ else:
+ allowed_to_submit = False
+ #This is a student_facing_error
+ error_message = error_string.format(count_required-count_graded, count_graded, count_required, student_sub_count)
+ return success, allowed_to_submit, error_message
+
+ def get_eta(self):
+ response = self.controller_qs.check_for_eta(self.location_string)
+ try:
+ response = json.loads(response)
+ except:
+ pass
+
+ success = response['success']
+ if isinstance(success, basestring):
+ success = (success.lower()=="true")
+
+ if success:
+ eta = controller_query_service.convert_seconds_to_human_readable(response['eta'])
+ eta_string = "Please check back for your response in at most {0}.".format(eta)
+ else:
+ eta_string = ""
+
+ return eta_string
+
+
+
diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/peer_grading_service.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/peer_grading_service.py
index 5b639be4f4..42c54f0463 100644
--- a/common/lib/xmodule/xmodule/open_ended_grading_classes/peer_grading_service.py
+++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/peer_grading_service.py
@@ -30,8 +30,8 @@ class PeerGradingService(GradingService):
self.system = system
def get_data_for_location(self, problem_location, student_id):
- response = self.get(self.get_data_for_location_url,
- {'location': problem_location, 'student_id': student_id})
+ params = {'location': problem_location, 'student_id': student_id}
+ response = self.get(self.get_data_for_location_url, params)
return self.try_to_decode(response)
def get_next_submission(self, problem_location, grader_id):
@@ -106,7 +106,7 @@ class MockPeerGradingService(object):
'max_score': 4})
def save_grade(self, location, grader_id, submission_id,
- score, feedback, submission_key):
+ score, feedback, submission_key, rubric_scores, submission_flagged):
return json.dumps({'success': True})
def is_student_calibrated(self, problem_location, grader_id):
@@ -122,7 +122,8 @@ class MockPeerGradingService(object):
'max_score': 4})
def save_calibration_essay(self, problem_location, grader_id,
- calibration_essay_id, submission_key, score, feedback):
+ calibration_essay_id, submission_key, score,
+ feedback, rubric_scores):
return {'success': True, 'actual_score': 2}
def get_problem_list(self, course_id, grader_id):
diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/self_assessment_module.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/self_assessment_module.py
index c608eeea06..7ecb3c4d5e 100644
--- a/common/lib/xmodule/xmodule/open_ended_grading_classes/self_assessment_module.py
+++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/self_assessment_module.py
@@ -90,7 +90,10 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
}
if dispatch not in handlers:
- return 'Error'
+ #This is a dev_facing_error
+ log.error("Cannot find {0} in handlers in handle_ajax function for open_ended_module.py".format(dispatch))
+ #This is a dev_facing_error
+ return json.dumps({'error': 'Error handling action. Please try again.', 'success' : False})
before = self.get_progress()
d = handlers[dispatch](get, system)
@@ -123,7 +126,8 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
elif self.state in (self.POST_ASSESSMENT, self.DONE):
context['read_only'] = True
else:
- raise ValueError("Illegal state '%r'" % self.state)
+ #This is a dev_facing_error
+ raise ValueError("Self assessment module is in an illegal state '{0}'".format(self.state))
return system.render_template('self_assessment_rubric.html', context)
@@ -148,7 +152,8 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
elif self.state == self.DONE:
context['read_only'] = True
else:
- raise ValueError("Illegal state '%r'" % self.state)
+ #This is a dev_facing_error
+ raise ValueError("Self Assessment module is in an illegal state '{0}'".format(self.state))
return system.render_template('self_assessment_hint.html', context)
@@ -177,10 +182,16 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
# add new history element with answer and empty score and hint.
success, get = self.append_image_to_student_answer(get)
if success:
- get['student_answer'] = SelfAssessmentModule.sanitize_html(get['student_answer'])
- self.new_history_entry(get['student_answer'])
- self.change_state(self.ASSESSING)
+ success, allowed_to_submit, error_message = self.check_if_student_can_submit()
+ if allowed_to_submit:
+ get['student_answer'] = SelfAssessmentModule.sanitize_html(get['student_answer'])
+ self.new_history_entry(get['student_answer'])
+ self.change_state(self.ASSESSING)
+ else:
+ #Error message already defined
+ success = False
else:
+ #This is a student_facing_error
error_message = "There was a problem saving the image in your submission. Please try a different image, or try pasting a link to an image into the answer box."
return {
@@ -214,7 +225,10 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
for i in xrange(0,len(score_list)):
score_list[i] = int(score_list[i])
except ValueError:
- return {'success': False, 'error': "Non-integer score value, or no score list"}
+ #This is a dev_facing_error
+ log.error("Non-integer score value passed to save_assessment ,or no score list present.")
+ #This is a student_facing_error
+ return {'success': False, 'error': "Error saving your score. Please notify course staff."}
#Record score as assessment and rubric scores as post assessment
self.record_latest_score(score)
@@ -256,6 +270,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
try:
rubric_scores = json.loads(latest_post_assessment)
except:
+ #This is a dev_facing_error
log.error("Cannot parse rubric scores in self assessment module from {0}".format(latest_post_assessment))
rubric_scores = []
return [rubric_scores]
@@ -287,7 +302,8 @@ class SelfAssessmentDescriptor(XmlDescriptor, EditingDescriptor):
expected_children = []
for child in expected_children:
if len(xml_object.xpath(child)) != 1:
- raise ValueError("Self assessment definition must include exactly one '{0}' tag".format(child))
+ #This is a staff_facing_error
+ raise ValueError("Self assessment definition must include exactly one '{0}' tag. Contact the learning sciences group for assistance.".format(child))
def parse(k):
"""Assumes that xml_object has child k"""
diff --git a/common/lib/xmodule/xmodule/peer_grading_module.py b/common/lib/xmodule/xmodule/peer_grading_module.py
index 457c8ee72a..e262db5615 100644
--- a/common/lib/xmodule/xmodule/peer_grading_module.py
+++ b/common/lib/xmodule/xmodule/peer_grading_module.py
@@ -3,6 +3,7 @@ import logging
from lxml import etree
+from datetime import datetime
from pkg_resources import resource_string
from .capa_module import ComplexEncoder
from .editing_module import EditingDescriptor
@@ -10,6 +11,8 @@ from .stringify import stringify_children
from .x_module import XModule
from .xml_module import XmlDescriptor
from xmodule.modulestore import Location
+from xmodule.modulestore.django import modulestore
+from timeinfo import TimeInfo
from xmodule.open_ended_grading_classes.peer_grading_service import PeerGradingService, GradingServiceError
@@ -21,6 +24,8 @@ TRUE_DICT = [True, "True", "true", "TRUE"]
MAX_SCORE = 1
IS_GRADED = True
+EXTERNAL_GRADER_NO_CONTACT_ERROR = "Failed to contact external graders. Please notify course staff."
+
class PeerGradingModule(XModule):
_VERSION = 1
@@ -50,6 +55,7 @@ class PeerGradingModule(XModule):
self.system = system
self.peer_gs = PeerGradingService(self.system.open_ended_grading_interface, self.system)
+
self.use_for_single_location = self.metadata.get('use_for_single_location', USE_FOR_SINGLE_LOCATION)
if isinstance(self.use_for_single_location, basestring):
self.use_for_single_location = (self.use_for_single_location in TRUE_DICT)
@@ -58,10 +64,28 @@ class PeerGradingModule(XModule):
if isinstance(self.is_graded, basestring):
self.is_graded = (self.is_graded in TRUE_DICT)
+ display_due_date_string = self.metadata.get('due', None)
+ grace_period_string = self.metadata.get('graceperiod', None)
+
+ try:
+ self.timeinfo = TimeInfo(display_due_date_string, grace_period_string)
+ except:
+ log.error("Error parsing due date information in location {0}".format(location))
+ raise
+
+ self.display_due_date = self.timeinfo.display_due_date
+
self.link_to_location = self.metadata.get('link_to_location', USE_FOR_SINGLE_LOCATION)
if self.use_for_single_location == True:
- #This will raise an exception if the location is invalid
- link_to_location_object = Location(self.link_to_location)
+ try:
+ self.linked_problem = modulestore().get_instance(self.system.course_id, self.link_to_location)
+ except:
+ log.error("Linked location {0} for peer grading module {1} does not exist".format(
+ self.link_to_location, self.location))
+ raise
+ due_date = self.linked_problem.metadata.get('peer_grading_due', None)
+ if due_date:
+ self.metadata['due'] = due_date
self.ajax_url = self.system.ajax_url
if not self.ajax_url.endswith("/"):
@@ -73,6 +97,15 @@ class PeerGradingModule(XModule):
#This could result in an exception, but not wrapping in a try catch block so it moves up the stack
self.max_grade = int(self.max_grade)
+ def closed(self):
+ return self._closed(self.timeinfo)
+
+ def _closed(self, timeinfo):
+ if timeinfo.close_date is not None and datetime.utcnow() > timeinfo.close_date:
+ return True
+ return False
+
+
def _err_response(self, msg):
"""
Return a HttpResponse with a json dump with success=False, and the given error message.
@@ -92,6 +125,8 @@ class PeerGradingModule(XModule):
Needs to be implemented by inheritors. Renders the HTML that students see.
@return:
"""
+ if self.closed():
+ return self.peer_grading_closed()
if not self.use_for_single_location:
return self.peer_grading()
else:
@@ -112,7 +147,10 @@ class PeerGradingModule(XModule):
}
if dispatch not in handlers:
- return 'Error'
+ #This is a dev_facing_error
+ log.error("Cannot find {0} in handlers in handle_ajax function for open_ended_module.py".format(dispatch))
+ #This is a dev_facing_error
+ return json.dumps({'error': 'Error handling action. Please try again.', 'success' : False})
d = handlers[dispatch](get)
@@ -130,6 +168,7 @@ class PeerGradingModule(XModule):
count_required = response['count_required']
success = True
except GradingServiceError:
+ #This is a dev_facing_error
log.exception("Error getting location data from controller for location {0}, student {1}"
.format(location, student_id))
@@ -155,6 +194,7 @@ class PeerGradingModule(XModule):
count_graded = response['count_graded']
count_required = response['count_required']
if count_required > 0 and count_graded >= count_required:
+ #Ensures that once a student receives a final score for peer grading, that it does not change.
self.student_data_for_location = response
score_dict = {
@@ -204,10 +244,12 @@ class PeerGradingModule(XModule):
response = self.peer_gs.get_next_submission(location, grader_id)
return response
except GradingServiceError:
+ #This is a dev_facing_error
log.exception("Error getting next submission. server url: {0} location: {1}, grader_id: {2}"
.format(self.peer_gs.url, location, grader_id))
+ #This is a student_facing_error
return {'success': False,
- 'error': 'Could not connect to grading service'}
+ 'error': EXTERNAL_GRADER_NO_CONTACT_ERROR}
def save_grade(self, get):
"""
@@ -244,14 +286,16 @@ class PeerGradingModule(XModule):
score, feedback, submission_key, rubric_scores, submission_flagged)
return response
except GradingServiceError:
- log.exception("""Error saving grade. server url: {0}, location: {1}, submission_id:{2},
+ #This is a dev_facing_error
+ log.exception("""Error saving grade to open ended grading service. server url: {0}, location: {1}, submission_id:{2},
submission_key: {3}, score: {4}"""
.format(self.peer_gs.url,
location, submission_id, submission_key, score)
)
+ #This is a student_facing_error
return {
'success': False,
- 'error': 'Could not connect to grading service'
+ 'error': EXTERNAL_GRADER_NO_CONTACT_ERROR
}
def is_student_calibrated(self, get):
@@ -284,11 +328,13 @@ class PeerGradingModule(XModule):
response = self.peer_gs.is_student_calibrated(location, grader_id)
return response
except GradingServiceError:
- log.exception("Error from grading service. server url: {0}, grader_id: {0}, location: {1}"
+ #This is a dev_facing_error
+ log.exception("Error from open ended grading service. server url: {0}, grader_id: {0}, location: {1}"
.format(self.peer_gs.url, grader_id, location))
+ #This is a student_facing_error
return {
'success': False,
- 'error': 'Could not connect to grading service'
+ 'error': EXTERNAL_GRADER_NO_CONTACT_ERROR
}
def show_calibration_essay(self, get):
@@ -327,16 +373,20 @@ class PeerGradingModule(XModule):
response = self.peer_gs.show_calibration_essay(location, grader_id)
return response
except GradingServiceError:
- log.exception("Error from grading service. server url: {0}, location: {0}"
+ #This is a dev_facing_error
+ log.exception("Error from open ended grading service. server url: {0}, location: {0}"
.format(self.peer_gs.url, location))
+ #This is a student_facing_error
return {'success': False,
- 'error': 'Could not connect to grading service'}
+ 'error': EXTERNAL_GRADER_NO_CONTACT_ERROR}
# if we can't parse the rubric into HTML,
except etree.XMLSyntaxError:
+ #This is a dev_facing_error
log.exception("Cannot parse rubric string. Raw string: {0}"
.format(rubric))
+ #This is a student_facing_error
return {'success': False,
- 'error': 'Error displaying submission'}
+ 'error': 'Error displaying submission. Please notify course staff.'}
def save_calibration_essay(self, get):
@@ -375,8 +425,20 @@ class PeerGradingModule(XModule):
submission_key, score, feedback, rubric_scores)
return response
except GradingServiceError:
+ #This is a dev_facing_error
log.exception("Error saving calibration grade, location: {0}, submission_id: {1}, submission_key: {2}, grader_id: {3}".format(location, submission_id, submission_key, grader_id))
- return self._err_response('Could not connect to grading service')
+ #This is a student_facing_error
+ return self._err_response('There was an error saving your score. Please notify course staff.')
+
+ def peer_grading_closed(self):
+ '''
+ Show the Peer grading closed template
+ '''
+ html = self.system.render_template('peer_grading/peer_grading_closed.html', {
+ 'use_for_single_location': self.use_for_single_location
+ })
+ return html
+
def peer_grading(self, get=None):
'''
@@ -397,13 +459,49 @@ class PeerGradingModule(XModule):
problem_list = problem_list_dict['problem_list']
except GradingServiceError:
- error_text = "Error occured while contacting the grading service"
+ #This is a student_facing_error
+ error_text = EXTERNAL_GRADER_NO_CONTACT_ERROR
success = False
# catch error if if the json loads fails
except ValueError:
- error_text = "Could not get problem list"
+ #This is a student_facing_error
+ error_text = "Could not get list of problems to peer grade. Please notify course staff."
success = False
+
+ def _find_corresponding_module_for_location(location):
+ '''
+ find the peer grading module that links to the given location
+ '''
+ try:
+ return modulestore().get_instance(self.system.course_id, location)
+ except:
+ # the linked problem doesn't exist
+ log.error("Problem {0} does not exist in this course".format(location))
+ raise
+
+
+ for problem in problem_list:
+ problem_location = problem['location']
+ descriptor = _find_corresponding_module_for_location(problem_location)
+ if descriptor:
+ problem['due'] = descriptor.metadata.get('peer_grading_due', None)
+ grace_period_string = descriptor.metadata.get('graceperiod', None)
+ try:
+ problem_timeinfo = TimeInfo(problem['due'], grace_period_string)
+ except:
+ log.error("Malformed due date or grace period string for location {0}".format(problem_location))
+ raise
+ if self._closed(problem_timeinfo):
+ problem['closed'] = True
+ else:
+ problem['closed'] = False
+ else:
+ # if we can't find the due date, assume that it doesn't have one
+ problem['due'] = None
+ problem['closed'] = False
+
+
ajax_url = self.ajax_url
html = self.system.render_template('peer_grading/peer_grading.html', {
'course_id': self.system.course_id,
@@ -425,6 +523,8 @@ class PeerGradingModule(XModule):
if get == None or get.get('location') == None:
if not self.use_for_single_location:
#This is an error case, because it must be set to use a single location to be called without get parameters
+ #This is a dev_facing_error
+ log.error("Peer grading problem in peer_grading_module called with no get parameters, but use_for_single_location is False.")
return {'html': "", 'success': False}
problem_location = self.link_to_location
@@ -489,7 +589,8 @@ class PeerGradingDescriptor(XmlDescriptor, EditingDescriptor):
expected_children = []
for child in expected_children:
if len(xml_object.xpath(child)) == 0:
- raise ValueError("Peer grading definition must include at least one '{0}' tag".format(child))
+ #This is a staff_facing_error
+ raise ValueError("Peer grading definition must include at least one '{0}' tag. Contact the learning sciences group for assistance.".format(child))
def parse_task(k):
"""Assumes that xml_object has child k"""
diff --git a/common/lib/xmodule/xmodule/tests/__init__.py b/common/lib/xmodule/xmodule/tests/__init__.py
index 04e7ee19b1..9474717cb2 100644
--- a/common/lib/xmodule/xmodule/tests/__init__.py
+++ b/common/lib/xmodule/xmodule/tests/__init__.py
@@ -19,6 +19,15 @@ import xmodule
from xmodule.x_module import ModuleSystem
from mock import Mock
+open_ended_grading_interface = {
+ 'url': 'http://sandbox-grader-001.m.edx.org/peer_grading',
+ 'username': 'incorrect_user',
+ 'password': 'incorrect_pass',
+ 'staff_grading' : 'staff_grading',
+ 'peer_grading' : 'peer_grading',
+ 'grading_controller' : 'grading_controller'
+ }
+
test_system = ModuleSystem(
ajax_url='courses/course_id/modx/a_location',
track_function=Mock(),
@@ -31,7 +40,8 @@ test_system = ModuleSystem(
debug=True,
xqueue={'interface': None, 'callback_url': '/', 'default_queuename': 'testqueue', 'waittime': 10},
node_path=os.environ.get("NODE_PATH", "/usr/local/lib/node_modules"),
- anonymous_student_id='student'
+ anonymous_student_id='student',
+ open_ended_grading_interface= open_ended_grading_interface
)
diff --git a/common/lib/xmodule/xmodule/tests/test_capa_module.py b/common/lib/xmodule/xmodule/tests/test_capa_module.py
index a22fcdb5f6..e84267c1e7 100644
--- a/common/lib/xmodule/xmodule/tests/test_capa_module.py
+++ b/common/lib/xmodule/xmodule/tests/test_capa_module.py
@@ -42,6 +42,7 @@ class CapaFactory(object):
force_save_button=None,
attempts=None,
problem_state=None,
+ correct=False
):
"""
All parameters are optional, and are added to the created problem if specified.
@@ -58,6 +59,7 @@ class CapaFactory(object):
module.
attempts: also added to instance state. Will be converted to an int.
+ correct: if True, the problem will be initialized to be answered correctly.
"""
definition = {'data': CapaFactory.sample_problem_xml, }
location = Location(["i4x", "edX", "capa_test", "problem",
@@ -81,10 +83,19 @@ class CapaFactory(object):
instance_state_dict = {}
if problem_state is not None:
instance_state_dict = problem_state
+
if attempts is not None:
# converting to int here because I keep putting "0" and "1" in the tests
# since everything else is a string.
instance_state_dict['attempts'] = int(attempts)
+
+ if correct:
+ # TODO: make this actually set an answer of 3.14, and mark it correct
+ #instance_state_dict['student_answers'] = {}
+ #instance_state_dict['correct_map'] = {}
+ pass
+
+
if len(instance_state_dict) > 0:
instance_state = json.dumps(instance_state_dict)
else:
@@ -94,13 +105,16 @@ class CapaFactory(object):
definition, descriptor,
instance_state, None, metadata=metadata)
+ if correct:
+ # TODO: probably better to actually set the internal state properly, but...
+ module.get_score = lambda: {'score': 1, 'total': 1}
+
return module
class CapaModuleTest(unittest.TestCase):
-
def setUp(self):
now = datetime.datetime.now()
day_delta = datetime.timedelta(days=1)
@@ -120,6 +134,18 @@ class CapaModuleTest(unittest.TestCase):
self.assertNotEqual(module.url_name, other_module.url_name,
"Factory should be creating unique names for each problem")
+
+ def test_correct(self):
+ """
+ Check that the factory creates correct and incorrect problems properly.
+ """
+ module = CapaFactory.create()
+ self.assertEqual(module.get_score()['score'], 0)
+
+ other_module = CapaFactory.create(correct=True)
+ self.assertEqual(other_module.get_score()['score'], 1)
+
+
def test_showanswer_default(self):
"""
Make sure the show answer logic does the right thing.
@@ -178,7 +204,7 @@ class CapaModuleTest(unittest.TestCase):
for everyone--e.g. after due date + grace period.
"""
- # can see after attempts used up, even with due date in the future
+ # can't see after attempts used up, even with due date in the future
used_all_attempts = CapaFactory.create(showanswer='past_due',
max_attempts="1",
attempts="1",
@@ -209,3 +235,50 @@ class CapaModuleTest(unittest.TestCase):
due=self.yesterday_str,
graceperiod=self.two_day_delta_str)
self.assertFalse(still_in_grace.answer_available())
+
+ def test_showanswer_finished(self):
+ """
+ With showanswer="finished" should show answer after the problem is closed,
+ or after the answer is correct.
+ """
+
+ # can see after attempts used up, even with due date in the future
+ used_all_attempts = CapaFactory.create(showanswer='finished',
+ max_attempts="1",
+ attempts="1",
+ due=self.tomorrow_str)
+ self.assertTrue(used_all_attempts.answer_available())
+
+
+ # can see after due date
+ past_due_date = CapaFactory.create(showanswer='finished',
+ max_attempts="1",
+ attempts="0",
+ due=self.yesterday_str)
+ self.assertTrue(past_due_date.answer_available())
+
+
+ # can't see because attempts left and wrong
+ attempts_left_open = CapaFactory.create(showanswer='finished',
+ max_attempts="1",
+ attempts="0",
+ due=self.tomorrow_str)
+ self.assertFalse(attempts_left_open.answer_available())
+
+ # _can_ see because attempts left and right
+ correct_ans = CapaFactory.create(showanswer='finished',
+ max_attempts="1",
+ attempts="0",
+ due=self.tomorrow_str,
+ correct=True)
+ self.assertTrue(correct_ans.answer_available())
+
+
+ # Can see even though grace period hasn't expired, because have no more
+ # attempts.
+ still_in_grace = CapaFactory.create(showanswer='finished',
+ max_attempts="1",
+ attempts="1",
+ due=self.yesterday_str,
+ graceperiod=self.two_day_delta_str)
+ self.assertTrue(still_in_grace.answer_available())
diff --git a/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py b/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py
index ef6344eb57..5f6496f823 100644
--- a/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py
+++ b/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py
@@ -48,6 +48,7 @@ class OpenEndedChildTest(unittest.TestCase):
'close_date': None,
's3_interface' : "",
'open_ended_grading_interface' : {},
+ 'skip_basic_checks' : False,
}
definition = Mock()
descriptor = Mock()
@@ -167,6 +168,7 @@ class OpenEndedModuleTest(unittest.TestCase):
'close_date': None,
's3_interface' : test_util_open_ended.S3_INTERFACE,
'open_ended_grading_interface' : test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE,
+ 'skip_basic_checks' : False,
}
oeparam = etree.XML('''
@@ -301,6 +303,7 @@ class CombinedOpenEndedModuleTest(unittest.TestCase):
'close_date' : "",
's3_interface' : test_util_open_ended.S3_INTERFACE,
'open_ended_grading_interface' : test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE,
+ 'skip_basic_checks' : False,
}
oeparam = etree.XML('''
diff --git a/common/lib/xmodule/xmodule/tests/test_export.py b/common/lib/xmodule/xmodule/tests/test_export.py
index da1b04bd94..e9fb89e9f6 100644
--- a/common/lib/xmodule/xmodule/tests/test_export.py
+++ b/common/lib/xmodule/xmodule/tests/test_export.py
@@ -4,7 +4,7 @@ from fs.osfs import OSFS
from nose.tools import assert_equals, assert_true
from path import path
from tempfile import mkdtemp
-from shutil import copytree
+import shutil
from xmodule.modulestore.xml import XMLModuleStore
@@ -46,11 +46,11 @@ class RoundTripTestCase(unittest.TestCase):
Thus we make sure that export and import work properly.
'''
def check_export_roundtrip(self, data_dir, course_dir):
- root_dir = path(mkdtemp())
+ root_dir = path(self.temp_dir)
print "Copying test course to temp dir {0}".format(root_dir)
data_dir = path(data_dir)
- copytree(data_dir / course_dir, root_dir / course_dir)
+ shutil.copytree(data_dir / course_dir, root_dir / course_dir)
print "Starting import"
initial_import = XMLModuleStore(root_dir, course_dirs=[course_dir])
@@ -108,6 +108,8 @@ class RoundTripTestCase(unittest.TestCase):
def setUp(self):
self.maxDiff = None
+ self.temp_dir = mkdtemp()
+ self.addCleanup(shutil.rmtree, self.temp_dir)
def test_toy_roundtrip(self):
self.check_export_roundtrip(DATA_DIR, "toy")
diff --git a/common/lib/xmodule/xmodule/tests/test_self_assessment.py b/common/lib/xmodule/xmodule/tests/test_self_assessment.py
index fe55c88e82..b9c3076b7c 100644
--- a/common/lib/xmodule/xmodule/tests/test_self_assessment.py
+++ b/common/lib/xmodule/xmodule/tests/test_self_assessment.py
@@ -1,11 +1,10 @@
import json
-from mock import Mock
+from mock import Mock, MagicMock
import unittest
from xmodule.open_ended_grading_classes.self_assessment_module import SelfAssessmentModule
from xmodule.modulestore import Location
from lxml import etree
-from nose.plugins.skip import SkipTest
from . import test_system
@@ -51,6 +50,7 @@ class SelfAssessmentTest(unittest.TestCase):
'close_date': None,
's3_interface' : test_util_open_ended.S3_INTERFACE,
'open_ended_grading_interface' : test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE,
+ 'skip_basic_checks' : False,
}
self.module = SelfAssessmentModule(test_system, self.location,
@@ -63,13 +63,29 @@ class SelfAssessmentTest(unittest.TestCase):
self.assertTrue("This is sample prompt text" in html)
def test_self_assessment_flow(self):
- raise SkipTest()
+ responses = {'assessment': '0', 'score_list[]': ['0', '0']}
+ def get_fake_item(name):
+ return responses[name]
+
+ def get_data_for_location(self,location,student):
+ return {
+ 'count_graded' : 0,
+ 'count_required' : 0,
+ 'student_sub_count': 0,
+ }
+
+ mock_query_dict = MagicMock()
+ mock_query_dict.__getitem__.side_effect = get_fake_item
+ mock_query_dict.getlist = get_fake_item
+
+ self.module.peer_gs.get_data_for_location = get_data_for_location
+
self.assertEqual(self.module.get_score()['score'], 0)
self.module.save_answer({'student_answer': "I am an answer"}, test_system)
self.assertEqual(self.module.state, self.module.ASSESSING)
- self.module.save_assessment({'assessment': '0'}, test_system)
+ self.module.save_assessment(mock_query_dict, test_system)
self.assertEqual(self.module.state, self.module.DONE)
@@ -79,5 +95,6 @@ class SelfAssessmentTest(unittest.TestCase):
# if we now assess as right, skip the REQUEST_HINT state
self.module.save_answer({'student_answer': 'answer 4'}, test_system)
- self.module.save_assessment({'assessment': '1'}, test_system)
+ responses['assessment'] = '1'
+ self.module.save_assessment(mock_query_dict, test_system)
self.assertEqual(self.module.state, self.module.DONE)
diff --git a/common/lib/xmodule/xmodule/timeinfo.py b/common/lib/xmodule/xmodule/timeinfo.py
new file mode 100644
index 0000000000..6c6a72e700
--- /dev/null
+++ b/common/lib/xmodule/xmodule/timeinfo.py
@@ -0,0 +1,39 @@
+import dateutil
+import dateutil.parser
+import datetime
+from timeparse import parse_timedelta
+
+import logging
+log = logging.getLogger(__name__)
+
+class TimeInfo(object):
+ """
+ This is a simple object that calculates and stores datetime information for an XModule
+ based on the due date string and the grace period string
+
+ So far it parses out three different pieces of time information:
+ self.display_due_date - the 'official' due date that gets displayed to students
+ self.grace_period - the length of the grace period
+ self.close_date - the real due date
+
+ """
+ def __init__(self, display_due_date_string, grace_period_string):
+ if display_due_date_string is not None:
+ try:
+ self.display_due_date = dateutil.parser.parse(display_due_date_string)
+ except ValueError:
+ log.error("Could not parse due date {0}".format(display_due_date_string))
+ raise
+ else:
+ self.display_due_date = None
+
+ if grace_period_string is not None and self.display_due_date:
+ try:
+ self.grace_period = parse_timedelta(grace_period_string)
+ self.close_date = self.display_due_date + self.grace_period
+ except:
+ log.error("Error parsing the grace period {0}".format(grace_period_string))
+ raise
+ else:
+ self.grace_period = None
+ self.close_date = self.display_due_date
diff --git a/common/lib/xmodule/xmodule/x_module.py b/common/lib/xmodule/xmodule/x_module.py
index b5e9b10ea6..dccc96a7ca 100644
--- a/common/lib/xmodule/xmodule/x_module.py
+++ b/common/lib/xmodule/xmodule/x_module.py
@@ -411,7 +411,6 @@ class ResourceTemplates(object):
return templates
-
class XModuleDescriptor(Plugin, HTMLSnippet, ResourceTemplates):
"""
An XModuleDescriptor is a specification for an element of a course. This
@@ -585,11 +584,11 @@ class XModuleDescriptor(Plugin, HTMLSnippet, ResourceTemplates):
def inherit_metadata(self, metadata):
"""
Updates this module with metadata inherited from a containing module.
- Only metadata specified in self.inheritable_metadata will
+ Only metadata specified in inheritable_metadata will
be inherited
"""
# Set all inheritable metadata from kwargs that are
- # in self.inheritable_metadata and aren't already set in metadata
+ # in inheritable_metadata and aren't already set in metadata
for attr in self.inheritable_metadata:
if attr not in self.metadata and attr in metadata:
self._inherited_metadata.add(attr)
diff --git a/common/lib/xmodule/xmodule/xml_module.py b/common/lib/xmodule/xmodule/xml_module.py
index 64c3aabbcc..773531c528 100644
--- a/common/lib/xmodule/xmodule/xml_module.py
+++ b/common/lib/xmodule/xmodule/xml_module.py
@@ -128,8 +128,7 @@ class XmlDescriptor(XModuleDescriptor):
'graded': bool_map,
'hide_progress_tab': bool_map,
'allow_anonymous': bool_map,
- 'allow_anonymous_to_peers': bool_map,
- 'weight': int_map
+ 'allow_anonymous_to_peers': bool_map
}
diff --git a/common/static/coffee/src/discussion/discussion.coffee b/common/static/coffee/src/discussion/discussion.coffee
index 068cde3de4..9cee068b74 100644
--- a/common/static/coffee/src/discussion/discussion.coffee
+++ b/common/static/coffee/src/discussion/discussion.coffee
@@ -39,6 +39,8 @@ if Backbone?
url = DiscussionUtil.urlFor 'threads'
when 'followed'
url = DiscussionUtil.urlFor 'followed_threads', options.user_id
+ if options['group_id']
+ data['group_id'] = options['group_id']
data['sort_key'] = sort_options.sort_key || 'date'
data['sort_order'] = sort_options.sort_order || 'desc'
DiscussionUtil.safeAjax
diff --git a/common/static/coffee/src/discussion/discussion_module_view.coffee b/common/static/coffee/src/discussion/discussion_module_view.coffee
index 554f292c71..d5f0b2cab3 100644
--- a/common/static/coffee/src/discussion/discussion_module_view.coffee
+++ b/common/static/coffee/src/discussion/discussion_module_view.coffee
@@ -70,10 +70,21 @@ if Backbone?
DiscussionUtil.loadRoles(response.roles)
allow_anonymous = response.allow_anonymous
allow_anonymous_to_peers = response.allow_anonymous_to_peers
+ cohorts = response.cohorts
# $elem.html("Hide Discussion")
@discussion = new Discussion()
@discussion.reset(response.discussion_data, {silent: false})
- $discussion = $(Mustache.render $("script#_inline_discussion").html(), {'threads':response.discussion_data, 'discussionId': discussionId, 'allow_anonymous_to_peers': allow_anonymous_to_peers, 'allow_anonymous': allow_anonymous})
+
+ #use same discussion template but different thread templated
+ #determined in the coffeescript based on whether or not there's a
+ #group id
+
+ if response.is_cohorted
+ source = "script#_inline_discussion_cohorted"
+ else
+ source = "script#_inline_discussion"
+
+ $discussion = $(Mustache.render $(source).html(), {'threads':response.discussion_data, 'discussionId': discussionId, 'allow_anonymous_to_peers': allow_anonymous_to_peers, 'allow_anonymous': allow_anonymous, 'cohorts':cohorts})
if @$('section.discussion').length
@$('section.discussion').replaceWith($discussion)
else
diff --git a/common/static/coffee/src/discussion/views/discussion_thread_list_view.coffee b/common/static/coffee/src/discussion/views/discussion_thread_list_view.coffee
index c5f66c87ec..8364963218 100644
--- a/common/static/coffee/src/discussion/views/discussion_thread_list_view.coffee
+++ b/common/static/coffee/src/discussion/views/discussion_thread_list_view.coffee
@@ -9,6 +9,7 @@ if Backbone?
"click .browse-topic-drop-search-input": "ignoreClick"
"click .post-list .list-item a": "threadSelected"
"click .post-list .more-pages a": "loadMorePages"
+ "change .cohort-options": "chooseCohort"
'keyup .browse-topic-drop-search-input': DiscussionFilter.filterDrop
initialize: ->
@@ -128,10 +129,20 @@ if Backbone?
switch @mode
when 'search'
options.search_text = @current_search
+ if @group_id
+ options.group_id = @group_id
when 'followed'
options.user_id = window.user.id
+ options.group_id = "all"
when 'commentables'
options.commentable_ids = @discussionIds
+ if @group_id
+ options.group_id = @group_id
+ when 'all'
+ if @group_id
+ options.group_id = @group_id
+
+
@collection.retrieveAnotherPage(@mode, options, {sort_key: @sortBy})
renderThread: (thread) =>
@@ -263,13 +274,25 @@ if Backbone?
if discussionId == "#all"
@discussionIds = ""
@$(".post-search-field").val("")
+ @$('.cohort').show()
@retrieveAllThreads()
else if discussionId == "#following"
@retrieveFollowed(event)
+ @$('.cohort').hide()
else
discussionIds = _.map item.find(".board-name[data-discussion_id]"), (board) -> $(board).data("discussion_id").id
- @retrieveDiscussions(discussionIds)
-
+
+ if $(event.target).attr('cohorted') == "True"
+ @retrieveDiscussions(discussionIds, "function(){$('.cohort').show();}")
+ else
+ @retrieveDiscussions(discussionIds, "function(){$('.cohort').hide();}")
+
+ chooseCohort: (event) ->
+ @group_id = @$('.cohort-options :selected').val()
+ @collection.current_page = 0
+ @collection.reset()
+ @loadMorePages(event)
+
retrieveDiscussion: (discussion_id, callback=null) ->
url = DiscussionUtil.urlFor("retrieve_discussion", discussion_id)
DiscussionUtil.safeAjax
diff --git a/common/static/coffee/src/discussion/views/discussion_thread_view_inline.coffee b/common/static/coffee/src/discussion/views/discussion_thread_view_inline.coffee
index 7dab9ae342..e648955d08 100644
--- a/common/static/coffee/src/discussion/views/discussion_thread_view_inline.coffee
+++ b/common/static/coffee/src/discussion/views/discussion_thread_view_inline.coffee
@@ -16,7 +16,10 @@ if Backbone?
@$delegateElement = @$local
render: ->
- @template = DiscussionUtil.getTemplate("_inline_thread")
+ if @model.has('group_id')
+ @template = DiscussionUtil.getTemplate("_inline_thread_cohorted")
+ else
+ @template = DiscussionUtil.getTemplate("_inline_thread")
if not @model.has('abbreviatedBody')
@abbreviateBody()
diff --git a/common/static/coffee/src/discussion/views/new_post_inline_vew.coffee b/common/static/coffee/src/discussion/views/new_post_inline_vew.coffee
index ed5ee13919..ffd43ff7bf 100644
--- a/common/static/coffee/src/discussion/views/new_post_inline_vew.coffee
+++ b/common/static/coffee/src/discussion/views/new_post_inline_vew.coffee
@@ -25,6 +25,7 @@ if Backbone?
event.preventDefault()
title = @$(".new-post-title").val()
body = @$(".new-post-body").find(".wmd-input").val()
+ group = @$(".new-post-group option:selected").attr("value")
# TODO tags: commenting out til we know what to do with them
#tags = @$(".new-post-tags").val()
@@ -45,6 +46,7 @@ if Backbone?
data:
title: title
body: body
+ group_id: group
# TODO tags: commenting out til we know what to do with them
#tags: tags
diff --git a/common/static/coffee/src/discussion/views/new_post_view.coffee b/common/static/coffee/src/discussion/views/new_post_view.coffee
index 1c49fdbc8e..606e4f30d7 100644
--- a/common/static/coffee/src/discussion/views/new_post_view.coffee
+++ b/common/static/coffee/src/discussion/views/new_post_view.coffee
@@ -14,8 +14,14 @@ if Backbone?
@setSelectedTopic()
DiscussionUtil.makeWmdEditor @$el, $.proxy(@$, @), "new-post-body"
+
@$(".new-post-tags").tagsInput DiscussionUtil.tagsInputOptions()
-
+
+ if @$($(".topic_menu li a")[0]).attr('cohorted') != "True"
+ $('.choose-cohort').hide();
+
+
+
events:
"submit .new-post-form": "createPost"
"click .topic_dropdown_button": "toggleTopicDropdown"
@@ -65,6 +71,11 @@ if Backbone?
@topicText = @getFullTopicName($target)
@topicId = $target.data('discussion_id')
@setSelectedTopic()
+ if $target.attr('cohorted') == "True"
+ $('.choose-cohort').show();
+ else
+ $('.choose-cohort').hide();
+
setSelectedTopic: ->
@dropdownButton.html(@fitName(@topicText) + ' ▾')
@@ -116,6 +127,7 @@ if Backbone?
title = @$(".new-post-title").val()
body = @$(".new-post-body").find(".wmd-input").val()
tags = @$(".new-post-tags").val()
+ group = @$(".new-post-group option:selected").attr("value")
anonymous = false || @$("input.discussion-anonymous").is(":checked")
anonymous_to_peers = false || @$("input.discussion-anonymous-to-peers").is(":checked")
@@ -137,6 +149,7 @@ if Backbone?
anonymous: anonymous
anonymous_to_peers: anonymous_to_peers
auto_subscribe: follow
+ group_id: group
error: DiscussionUtil.formErrorHandler(@$(".new-post-form-errors"))
success: (response, textStatus) =>
# TODO: Move this out of the callback, this makes it feel sluggish
diff --git a/common/static/css/pdfviewer.css b/common/static/css/pdfviewer.css
new file mode 100644
index 0000000000..656bc47c29
--- /dev/null
+++ b/common/static/css/pdfviewer.css
@@ -0,0 +1,763 @@
+/* Copyright 2012 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+* {
+ padding: 0;
+ margin: 0;
+}
+
+html {
+ height: 100%;
+}
+
+body {
+ height: 100%;
+ background-color: #404040;
+ background-image: url(vendor/pdfjs/images/texture.png);
+}
+
+body,
+input,
+button,
+select {
+ font: message-box;
+}
+
+.hidden {
+ display: none;
+}
+[hidden] {
+ display: none !important;
+}
+
+#viewerContainer.presentationControls {
+ cursor: default;
+}
+*/
+
+/* outer/inner center provides horizontal center */
+.outerCenter {
+ float: right;
+ position: relative;
+ right: 50%;
+}
+
+.innerCenter {
+ float: right;
+ position: relative;
+ right: -50%;
+}
+
+#outerContainer {
+ width: 100%;
+ height: 100%;
+}
+
+#mainContainer {
+/* position: absolute;
+ top: 0;
+ right: 0;
+ bottom: 0;
+ left: 0;*/
+ -webkit-transition-duration: 200ms;
+ -webkit-transition-timing-function: ease;
+ -moz-transition-duration: 200ms;
+ -moz-transition-timing-function: ease;
+ -ms-transition-duration: 200ms;
+ -ms-transition-timing-function: ease;
+ -o-transition-duration: 200ms;
+ -o-transition-timing-function: ease;
+ transition-duration: 200ms;
+ transition-timing-function: ease;
+}
+
+#viewerContainer {
+/* overflow: auto; */
+ box-shadow: inset 1px 0 0 hsla(0,0%,100%,.05);
+/* position: absolute;
+ top: 32px;
+ right: 0;
+ bottom: 0;
+ left: 0; */
+/* switch to using these instead: */
+ position: relative;
+ overflow: hidden;
+}
+
+.toolbar {
+/* position: absolute; */
+ left: 0;
+ right: 0;
+ height: 32px;
+ z-index: 9999;
+ cursor: default;
+}
+
+#toolbarContainer {
+ width: 100%;
+}
+
+
+#toolbarViewer {
+ position: relative;
+ height: 32px;
+ background-image: url(vendor/pdfjs/images/texture.png),
+ -webkit-linear-gradient(hsla(0,0%,32%,.99), hsla(0,0%,27%,.95));
+ background-image: url(vendor/pdfjs/images/texture.png),
+ -moz-linear-gradient(hsla(0,0%,32%,.99), hsla(0,0%,27%,.95));
+ background-image: url(vendor/pdfjs/images/texture.png),
+ -ms-linear-gradient(hsla(0,0%,32%,.99), hsla(0,0%,27%,.95));
+ background-image: url(vendor/pdfjs/images/texture.png),
+ -o-linear-gradient(hsla(0,0%,32%,.99), hsla(0,0%,27%,.95));
+ background-image: url(vendor/pdfjs/images/texture.png),
+ linear-gradient(hsla(0,0%,32%,.99), hsla(0,0%,27%,.95));
+ box-shadow: inset 1px 0 0 hsla(0,0%,100%,.08),
+ inset 0 1px 1px hsla(0,0%,0%,.15),
+ inset 0 -1px 0 hsla(0,0%,100%,.05),
+ 0 1px 0 hsla(0,0%,0%,.15),
+ 0 1px 1px hsla(0,0%,0%,.1);
+}
+
+#toolbarViewerLeft {
+ margin-left: -1px;
+/* position: absolute; */
+ top: 0;
+ left: 0;
+}
+
+#toolbarViewerRight {
+/* position: absolute; */
+ top: 0;
+ right: 0;
+}
+
+#toolbarViewerLeft > *,
+#toolbarViewerMiddle > *,
+#toolbarViewerRight > * {
+ float: left;
+}
+
+.splitToolbarButton {
+ margin: 3px 2px 4px 0;
+ display: inline-block;
+}
+.splitToolbarButton > .toolbarButton {
+ border-radius: 0;
+ float: left;
+}
+
+.toolbarButton {
+ border: 0 none;
+ background-color: rgba(0, 0, 0, 0);
+ width: 32px;
+ height: 25px;
+}
+
+.toolbarButton > span {
+ display: inline-block;
+ width: 0;
+ height: 0;
+ overflow: hidden;
+}
+
+.toolbarButton[disabled] {
+ opacity: .5;
+}
+
+.toolbarButton.group {
+ margin-right:0;
+}
+
+.splitToolbarButton.toggled .toolbarButton {
+ margin: 0;
+}
+
+.splitToolbarButton:hover > .toolbarButton,
+.splitToolbarButton:focus > .toolbarButton,
+.splitToolbarButton.toggled > .toolbarButton,
+.toolbarButton.textButton {
+ background-color: hsla(0,0%,0%,.12);
+ background-image: -webkit-linear-gradient(hsla(0,0%,100%,.05), hsla(0,0%,100%,0));
+ background-image: -moz-linear-gradient(hsla(0,0%,100%,.05), hsla(0,0%,100%,0));
+ background-image: -ms-linear-gradient(hsla(0,0%,100%,.05), hsla(0,0%,100%,0));
+ background-image: -o-linear-gradient(hsla(0,0%,100%,.05), hsla(0,0%,100%,0));
+ background-image: linear-gradient(hsla(0,0%,100%,.05), hsla(0,0%,100%,0));
+ background-clip: padding-box;
+ border: 1px solid hsla(0,0%,0%,.35);
+ border-color: hsla(0,0%,0%,.32) hsla(0,0%,0%,.38) hsla(0,0%,0%,.42);
+ box-shadow: 0 1px 0 hsla(0,0%,100%,.05) inset,
+ 0 0 1px hsla(0,0%,100%,.15) inset,
+ 0 1px 0 hsla(0,0%,100%,.05);
+ -webkit-transition-property: background-color, border-color, box-shadow;
+ -webkit-transition-duration: 150ms;
+ -webkit-transition-timing-function: ease;
+ -moz-transition-property: background-color, border-color, box-shadow;
+ -moz-transition-duration: 150ms;
+ -moz-transition-timing-function: ease;
+ -ms-transition-property: background-color, border-color, box-shadow;
+ -ms-transition-duration: 150ms;
+ -ms-transition-timing-function: ease;
+ -o-transition-property: background-color, border-color, box-shadow;
+ -o-transition-duration: 150ms;
+ -o-transition-timing-function: ease;
+ transition-property: background-color, border-color, box-shadow;
+ transition-duration: 150ms;
+ transition-timing-function: ease;
+
+}
+.splitToolbarButton > .toolbarButton:hover,
+.splitToolbarButton > .toolbarButton:focus,
+.dropdownToolbarButton:hover,
+.toolbarButton.textButton:hover,
+.toolbarButton.textButton:focus {
+ background-color: hsla(0,0%,0%,.2);
+ box-shadow: 0 1px 0 hsla(0,0%,100%,.05) inset,
+ 0 0 1px hsla(0,0%,100%,.15) inset,
+ 0 0 1px hsla(0,0%,0%,.05);
+ z-index: 199;
+}
+.splitToolbarButton > .toolbarButton:first-child {
+ position: relative;
+ margin: 0;
+ margin-right: -1px;
+ border-top-left-radius: 2px;
+ border-bottom-left-radius: 2px;
+ border-right-color: transparent;
+}
+.splitToolbarButton > .toolbarButton:last-child {
+ position: relative;
+ margin: 0;
+ margin-left: -1px;
+ border-top-right-radius: 2px;
+ border-bottom-right-radius: 2px;
+ border-left-color: transparent;
+}
+.splitToolbarButtonSeparator {
+ padding: 8px 0;
+ width: 1px;
+ background-color: hsla(0,0%,00%,.5);
+ z-index: 99;
+ box-shadow: 0 0 0 1px hsla(0,0%,100%,.08);
+ display: inline-block;
+ margin: 5px 0;
+ float:left;
+}
+}
+.splitToolbarButton:hover > .splitToolbarButtonSeparator,
+.splitToolbarButton.toggled > .splitToolbarButtonSeparator {
+ padding: 12px 0;
+ margin: 1px 0;
+ box-shadow: 0 0 0 1px hsla(0,0%,100%,.03);
+ -webkit-transition-property: padding;
+ -webkit-transition-duration: 10ms;
+ -webkit-transition-timing-function: ease;
+ -moz-transition-property: padding;
+ -moz-transition-duration: 10ms;
+ -moz-transition-timing-function: ease;
+ -ms-transition-property: padding;
+ -ms-transition-duration: 10ms;
+ -ms-transition-timing-function: ease;
+ -o-transition-property: padding;
+ -o-transition-duration: 10ms;
+ -o-transition-timing-function: ease;
+ transition-property: padding;
+ transition-duration: 10ms;
+ transition-timing-function: ease;
+}
+
+.toolbarButton,
+.dropdownToolbarButton {
+ min-width: 16px;
+ padding: 2px 6px 0;
+ border: 1px solid transparent;
+ border-radius: 2px;
+ color: hsl(0,0%,95%);
+ font-size: 12px;
+ line-height: 14px;
+ -webkit-user-select:none;
+ -moz-user-select:none;
+ -ms-user-select:none;
+ /* Opera does not support user-select, use <... unselectable="on"> instead */
+ cursor: default;
+ -webkit-transition-property: background-color, border-color, box-shadow;
+ -webkit-transition-duration: 150ms;
+ -webkit-transition-timing-function: ease;
+ -moz-transition-property: background-color, border-color, box-shadow;
+ -moz-transition-duration: 150ms;
+ -moz-transition-timing-function: ease;
+ -ms-transition-property: background-color, border-color, box-shadow;
+ -ms-transition-duration: 150ms;
+ -ms-transition-timing-function: ease;
+ -o-transition-property: background-color, border-color, box-shadow;
+ -o-transition-duration: 150ms;
+ -o-transition-timing-function: ease;
+ transition-property: background-color, border-color, box-shadow;
+ transition-duration: 150ms;
+ transition-timing-function: ease;
+}
+
+.toolbarButton {
+ margin: 3px 2px 4px 0;
+}
+
+.toolbarButton:hover,
+.toolbarButton:focus,
+.dropdownToolbarButton {
+ background-color: hsla(0,0%,0%,.12);
+ background-image: -webkit-linear-gradient(hsla(0,0%,100%,.05), hsla(0,0%,100%,0));
+ background-image: -moz-linear-gradient(hsla(0,0%,100%,.05), hsla(0,0%,100%,0));
+ background-image: -ms-linear-gradient(hsla(0,0%,100%,.05), hsla(0,0%,100%,0));
+ background-image: -o-linear-gradient(hsla(0,0%,100%,.05), hsla(0,0%,100%,0));
+ background-image: linear-gradient(hsla(0,0%,100%,.05), hsla(0,0%,100%,0));
+ background-clip: padding-box;
+ border: 1px solid hsla(0,0%,0%,.35);
+ border-color: hsla(0,0%,0%,.32) hsla(0,0%,0%,.38) hsla(0,0%,0%,.42);
+ box-shadow: 0 1px 0 hsla(0,0%,100%,.05) inset,
+ 0 0 1px hsla(0,0%,100%,.15) inset,
+ 0 1px 0 hsla(0,0%,100%,.05);
+}
+
+.toolbarButton:hover:active,
+.dropdownToolbarButton:hover:active {
+ background-color: hsla(0,0%,0%,.2);
+ background-image: -webkit-linear-gradient(hsla(0,0%,100%,.05), hsla(0,0%,100%,0));
+ background-image: -moz-linear-gradient(hsla(0,0%,100%,.05), hsla(0,0%,100%,0));
+ background-image: -ms-linear-gradient(hsla(0,0%,100%,.05), hsla(0,0%,100%,0));
+ background-image: -o-linear-gradient(hsla(0,0%,100%,.05), hsla(0,0%,100%,0));
+ background-image: linear-gradient(hsla(0,0%,100%,.05), hsla(0,0%,100%,0));
+ border-color: hsla(0,0%,0%,.35) hsla(0,0%,0%,.4) hsla(0,0%,0%,.45);
+ box-shadow: 0 1px 1px hsla(0,0%,0%,.1) inset,
+ 0 0 1px hsla(0,0%,0%,.2) inset,
+ 0 1px 0 hsla(0,0%,100%,.05);
+ -webkit-transition-property: background-color, border-color, box-shadow;
+ -webkit-transition-duration: 10ms;
+ -webkit-transition-timing-function: linear;
+ -moz-transition-property: background-color, border-color, box-shadow;
+ -moz-transition-duration: 10ms;
+ -moz-transition-timing-function: linear;
+ -ms-transition-property: background-color, border-color, box-shadow;
+ -ms-transition-duration: 10ms;
+ -ms-transition-timing-function: linear;
+ -o-transition-property: background-color, border-color, box-shadow;
+ -o-transition-duration: 10ms;
+ -o-transition-timing-function: linear;
+ transition-property: background-color, border-color, box-shadow;
+ transition-duration: 10ms;
+ transition-timing-function: linear;
+}
+
+.toolbarButton.toggled,
+.splitToolbarButton.toggled > .toolbarButton.toggled {
+ background-color: hsla(0,0%,0%,.3);
+ background-image: -webkit-linear-gradient(hsla(0,0%,100%,.05), hsla(0,0%,100%,0));
+ background-image: -moz-linear-gradient(hsla(0,0%,100%,.05), hsla(0,0%,100%,0));
+ background-image: -ms-linear-gradient(hsla(0,0%,100%,.05), hsla(0,0%,100%,0));
+ background-image: -o-linear-gradient(hsla(0,0%,100%,.05), hsla(0,0%,100%,0));
+ background-image: linear-gradient(hsla(0,0%,100%,.05), hsla(0,0%,100%,0));
+ border-color: hsla(0,0%,0%,.4) hsla(0,0%,0%,.45) hsla(0,0%,0%,.5);
+ box-shadow: 0 1px 1px hsla(0,0%,0%,.1) inset,
+ 0 0 1px hsla(0,0%,0%,.2) inset,
+ 0 1px 0 hsla(0,0%,100%,.05);
+ -webkit-transition-property: background-color, border-color, box-shadow;
+ -webkit-transition-duration: 10ms;
+ -webkit-transition-timing-function: linear;
+ -moz-transition-property: background-color, border-color, box-shadow;
+ -moz-transition-duration: 10ms;
+ -moz-transition-timing-function: linear;
+ -ms-transition-property: background-color, border-color, box-shadow;
+ -ms-transition-duration: 10ms;
+ -ms-transition-timing-function: linear;
+ -o-transition-property: background-color, border-color, box-shadow;
+ -o-transition-duration: 10ms;
+ -o-transition-timing-function: linear;
+ transition-property: background-color, border-color, box-shadow;
+ transition-duration: 10ms;
+ transition-timing-function: linear;
+}
+
+.toolbarButton.toggled:hover:active,
+.splitToolbarButton.toggled > .toolbarButton.toggled:hover:active {
+ background-color: hsla(0,0%,0%,.4);
+ border-color: hsla(0,0%,0%,.4) hsla(0,0%,0%,.5) hsla(0,0%,0%,.55);
+ box-shadow: 0 1px 1px hsla(0,0%,0%,.2) inset,
+ 0 0 1px hsla(0,0%,0%,.3) inset,
+ 0 1px 0 hsla(0,0%,100%,.05);
+}
+
+.dropdownToolbarButton {
+ max-width: 120px;
+ padding: 3px 2px 2px;
+ overflow: hidden;
+ background: url(vendor/pdfjs/images/toolbarButton-menuArrows.png) no-repeat;
+ background-position: 95%;
+}
+
+.dropdownToolbarButton > select {
+ -webkit-appearance: none;
+ -moz-appearance: none; /* in the future this might matter, see bugzilla bug #649849 */
+ min-width: 140px;
+ font-size: 12px;
+ color: hsl(0,0%,95%);
+ margin:0;
+ padding:0;
+ border:none;
+ background: rgba(0,0,0,0); /* Opera does not support 'transparent'