+
@@ -56,6 +56,7 @@
diff --git a/cms/templates/widgets/qualaroo.html b/cms/templates/widgets/qualaroo.html
new file mode 100644
index 0000000000..04d10e08d1
--- /dev/null
+++ b/cms/templates/widgets/qualaroo.html
@@ -0,0 +1,13 @@
+% if settings.MITX_FEATURES.get('STUDIO_NPS_SURVEY'):
+
+
+
+
+
+
+% endif
diff --git a/cms/templates/widgets/tender.html b/cms/templates/widgets/tender.html
new file mode 100644
index 0000000000..74318f7dac
--- /dev/null
+++ b/cms/templates/widgets/tender.html
@@ -0,0 +1,13 @@
+% if user.is_authenticated():
+
+
+
+% endif
\ No newline at end of file
diff --git a/cms/urls.py b/cms/urls.py
index 8054757c36..f5b258de16 100644
--- a/cms/urls.py
+++ b/cms/urls.py
@@ -42,36 +42,52 @@ urlpatterns = ('',
'contentstore.views.remove_user', name='remove_user'),
url(r'^(?P[^/]+)/(?P[^/]+)/course/(?P[^/]+)/remove_user$',
'contentstore.views.remove_user', name='remove_user'),
- url(r'^(?P[^/]+)/(?P[^/]+)/info/(?P[^/]+)$', 'contentstore.views.course_info', name='course_info'),
- url(r'^(?P[^/]+)/(?P[^/]+)/course_info/updates/(?P.*)$', 'contentstore.views.course_info_updates', name='course_info_json'),
- url(r'^(?P[^/]+)/(?P[^/]+)/settings-details/(?P[^/]+)$', 'contentstore.views.get_course_settings', name='course_settings'),
- url(r'^(?P[^/]+)/(?P[^/]+)/settings-grading/(?P[^/]+)$', 'contentstore.views.course_config_graders_page', name='course_settings'),
- url(r'^(?P[^/]+)/(?P[^/]+)/settings-details/(?P[^/]+)/section/(?P[^/]+).*$', 'contentstore.views.course_settings_updates', name='course_settings'),
- url(r'^(?P[^/]+)/(?P[^/]+)/settings-grading/(?P[^/]+)/(?P.*)$', 'contentstore.views.course_grader_updates', name='course_settings'),
+ url(r'^(?P[^/]+)/(?P[^/]+)/info/(?P[^/]+)$',
+ 'contentstore.views.course_info', name='course_info'),
+ url(r'^(?P[^/]+)/(?P[^/]+)/course_info/updates/(?P.*)$',
+ 'contentstore.views.course_info_updates', name='course_info_json'),
+ url(r'^(?P[^/]+)/(?P[^/]+)/settings-details/(?P[^/]+)$',
+ 'contentstore.views.get_course_settings', name='settings_details'),
+ url(r'^(?P[^/]+)/(?P[^/]+)/settings-grading/(?P[^/]+)$',
+ 'contentstore.views.course_config_graders_page', name='settings_grading'),
+ url(r'^(?P[^/]+)/(?P[^/]+)/settings-details/(?P[^/]+)/section/(?P[^/]+).*$',
+ 'contentstore.views.course_settings_updates', name='course_settings'),
+ url(r'^(?P[^/]+)/(?P[^/]+)/settings-grading/(?P[^/]+)/(?P.*)$',
+ 'contentstore.views.course_grader_updates', name='course_settings'),
# This is the URL to initially render the course advanced settings.
- url(r'^(?P[^/]+)/(?P[^/]+)/settings-advanced/(?P[^/]+)$', 'contentstore.views.course_config_advanced_page', name='course_advanced_settings'),
+ url(r'^(?P[^/]+)/(?P[^/]+)/settings-advanced/(?P[^/]+)$',
+ 'contentstore.views.course_config_advanced_page', name='course_advanced_settings'),
# This is the URL used by BackBone for updating and re-fetching the model.
- url(r'^(?P[^/]+)/(?P[^/]+)/settings-advanced/(?P[^/]+)/update.*$', 'contentstore.views.course_advanced_updates', name='course_advanced_settings_updates'),
+ url(r'^(?P[^/]+)/(?P[^/]+)/settings-advanced/(?P[^/]+)/update.*$',
+ 'contentstore.views.course_advanced_updates', name='course_advanced_settings_updates'),
- url(r'^(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/gradeas.*$', 'contentstore.views.assignment_type_update', name='assignment_type_update'),
+ url(r'^(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/gradeas.*$',
+ 'contentstore.views.assignment_type_update', name='assignment_type_update'),
- url(r'^pages/(?P[^/]+)/(?P[^/]+)/course/(?P[^/]+)$', 'contentstore.views.static_pages',
+ url(r'^pages/(?P[^/]+)/(?P[^/]+)/course/(?P[^/]+)$',
+ 'contentstore.views.static_pages',
name='static_pages'),
- url(r'^edit_static/(?P[^/]+)/(?P[^/]+)/course/(?P[^/]+)$', 'contentstore.views.edit_static', name='edit_static'),
- url(r'^edit_tabs/(?P[^/]+)/(?P[^/]+)/course/(?P[^/]+)$', 'contentstore.views.edit_tabs', name='edit_tabs'),
- url(r'^(?P[^/]+)/(?P[^/]+)/assets/(?P[^/]+)$', 'contentstore.views.asset_index', name='asset_index'),
+ url(r'^edit_static/(?P[^/]+)/(?P[^/]+)/course/(?P[^/]+)$',
+ 'contentstore.views.edit_static', name='edit_static'),
+ url(r'^edit_tabs/(?P[^/]+)/(?P[^/]+)/course/(?P[^/]+)$',
+ 'contentstore.views.edit_tabs', name='edit_tabs'),
+ url(r'^(?P[^/]+)/(?P[^/]+)/assets/(?P[^/]+)$',
+ 'contentstore.views.asset_index', name='asset_index'),
# this is a generic method to return the data/metadata associated with a xmodule
- url(r'^module_info/(?P.*)$', 'contentstore.views.module_info', name='module_info'),
+ url(r'^module_info/(?P.*)$',
+ 'contentstore.views.module_info', name='module_info'),
# temporary landing page for a course
- url(r'^edge/(?P[^/]+)/(?P[^/]+)/course/(?P[^/]+)$', 'contentstore.views.landing', name='landing'),
+ url(r'^edge/(?P[^/]+)/(?P[^/]+)/course/(?P[^/]+)$',
+ 'contentstore.views.landing', name='landing'),
url(r'^not_found$', 'contentstore.views.not_found', name='not_found'),
url(r'^server_error$', 'contentstore.views.server_error', name='server_error'),
- url(r'^(?P[^/]+)/(?P[^/]+)/assets/(?P[^/]+)$', 'contentstore.views.asset_index', name='asset_index'),
+ url(r'^(?P[^/]+)/(?P[^/]+)/assets/(?P[^/]+)$',
+ 'contentstore.views.asset_index', name='asset_index'),
# temporary landing page for edge
url(r'^edge$', 'contentstore.views.edge', name='edge'),
@@ -84,6 +100,9 @@ urlpatterns = ('',
# User creation and updating views
urlpatterns += (
url(r'^ux-alerts$', 'contentstore.views.ux_alerts', name='ux-alerts'),
+ url(r'^(?P[^/]+)/(?P[^/]+)/checklists/(?P[^/]+)$', 'contentstore.views.get_checklists', name='checklists'),
+ url(r'^(?P[^/]+)/(?P[^/]+)/checklists/(?P[^/]+)/update(/)?(?P.+)?.*$',
+ 'contentstore.views.update_checklist', name='checklists_updates'),
url(r'^howitworks$', 'contentstore.views.howitworks', name='howitworks'),
url(r'^signup$', 'contentstore.views.signup', name='signup'),
diff --git a/cms/xmodule_namespace.py b/cms/xmodule_namespace.py
index cad3110574..c9bb8f4c6e 100644
--- a/cms/xmodule_namespace.py
+++ b/cms/xmodule_namespace.py
@@ -40,7 +40,6 @@ class CmsNamespace(Namespace):
"""
Namespace with fields common to all blocks in Studio
"""
- is_draft = Boolean(help="Whether this module is a draft", default=False, scope=Scope.settings)
published_date = DateTuple(help="Date when the module was published", scope=Scope.settings)
published_by = String(help="Id of the user who published this module", scope=Scope.settings)
empty = StringyBoolean(help="Whether this is an empty template", scope=Scope.settings, default=False)
diff --git a/common/djangoapps/contentserver/middleware.py b/common/djangoapps/contentserver/middleware.py
index c5e887801e..8e9e70046d 100644
--- a/common/djangoapps/contentserver/middleware.py
+++ b/common/djangoapps/contentserver/middleware.py
@@ -5,6 +5,7 @@ from django.http import HttpResponse, Http404, HttpResponseNotModified
from xmodule.contentstore.django import contentstore
from xmodule.contentstore.content import StaticContent, XASSET_LOCATION_TAG
+from xmodule.modulestore import InvalidLocationError
from cache_toolbox.core import get_cached_content, set_cached_content
from xmodule.exceptions import NotFoundError
@@ -13,7 +14,14 @@ class StaticContentServer(object):
def process_request(self, request):
# look to see if the request is prefixed with 'c4x' tag
if request.path.startswith('/' + XASSET_LOCATION_TAG + '/'):
- loc = StaticContent.get_location_from_path(request.path)
+ try:
+ loc = StaticContent.get_location_from_path(request.path)
+ except InvalidLocationError:
+ # return a 'Bad Request' to browser as we have a malformed Location
+ response = HttpResponse()
+ response.status_code = 400
+ return response
+
# first look in our cache so we don't have to round-trip to the DB
content = get_cached_content(loc)
if content is None:
diff --git a/common/djangoapps/request_cache/__init__.py b/common/djangoapps/request_cache/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/common/djangoapps/request_cache/middleware.py b/common/djangoapps/request_cache/middleware.py
new file mode 100644
index 0000000000..9d3dffdf27
--- /dev/null
+++ b/common/djangoapps/request_cache/middleware.py
@@ -0,0 +1,20 @@
+import threading
+
+_request_cache_threadlocal = threading.local()
+_request_cache_threadlocal.data = {}
+
+class RequestCache(object):
+ @classmethod
+ def get_request_cache(cls):
+ return _request_cache_threadlocal
+
+ def clear_request_cache(self):
+ _request_cache_threadlocal.data = {}
+
+ def process_request(self, request):
+ self.clear_request_cache()
+ return None
+
+ def process_response(self, request, response):
+ self.clear_request_cache()
+ return response
\ No newline at end of file
diff --git a/common/djangoapps/student/models.py b/common/djangoapps/student/models.py
index 54bdd77297..56b1293c2d 100644
--- a/common/djangoapps/student/models.py
+++ b/common/djangoapps/student/models.py
@@ -75,10 +75,15 @@ class UserProfile(models.Model):
GENDER_CHOICES = (('m', 'Male'), ('f', 'Female'), ('o', 'Other'))
gender = models.CharField(blank=True, null=True, max_length=6, db_index=True,
choices=GENDER_CHOICES)
- LEVEL_OF_EDUCATION_CHOICES = (('p_se', 'Doctorate in science or engineering'),
- ('p_oth', 'Doctorate in another field'),
+
+ # [03/21/2013] removed these, but leaving comment since there'll still be
+ # p_se and p_oth in the existing data in db.
+ # ('p_se', 'Doctorate in science or engineering'),
+ # ('p_oth', 'Doctorate in another field'),
+ LEVEL_OF_EDUCATION_CHOICES = (('p', 'Doctorate'),
('m', "Master's or professional degree"),
('b', "Bachelor's degree"),
+ ('a', "Associate's degree"),
('hs', "Secondary/high school"),
('jhs', "Junior secondary/junior high/middle school"),
('el', "Elementary/primary school"),
diff --git a/common/djangoapps/student/views.py b/common/djangoapps/student/views.py
index 902ec82677..8267816e2c 100644
--- a/common/djangoapps/student/views.py
+++ b/common/djangoapps/student/views.py
@@ -311,7 +311,7 @@ def change_enrollment(request):
course = course_from_id(course_id)
except ItemNotFoundError:
log.warning("User {0} tried to enroll in non-existent course {1}"
- .format(user.username, enrollment.course_id))
+ .format(user.username, course_id))
return {'success': False, 'error': 'The course requested does not exist.'}
if not has_access(user, course, 'enroll'):
@@ -325,7 +325,12 @@ def change_enrollment(request):
"course:{0}".format(course_num),
"run:{0}".format(run)])
- enrollment, created = CourseEnrollment.objects.get_or_create(user=user, course_id=course.id)
+ try:
+ enrollment, created = CourseEnrollment.objects.get_or_create(user=user, course_id=course.id)
+ except IntegrityError:
+ # If we've already created this enrollment in a separate transaction,
+ # then just continue
+ pass
return {'success': True}
elif action == "unenroll":
@@ -369,14 +374,14 @@ def login_user(request, error=""):
try:
user = User.objects.get(email=email)
except User.DoesNotExist:
- log.warning("Login failed - Unknown user email: {0}".format(email))
+ log.warning(u"Login failed - Unknown user email: {0}".format(email))
return HttpResponse(json.dumps({'success': False,
'value': 'Email or password is incorrect.'})) # TODO: User error message
username = user.username
user = authenticate(username=username, password=password)
if user is None:
- log.warning("Login failed - password for {0} is invalid".format(email))
+ log.warning(u"Login failed - password for {0} is invalid".format(email))
return HttpResponse(json.dumps({'success': False,
'value': 'Email or password is incorrect.'}))
@@ -392,7 +397,7 @@ def login_user(request, error=""):
log.critical("Login failed - Could not create session. Is memcached running?")
log.exception(e)
- log.info("Login success - {0} ({1})".format(username, email))
+ log.info(u"Login success - {0} ({1})".format(username, email))
try_change_enrollment(request)
@@ -400,7 +405,7 @@ def login_user(request, error=""):
return HttpResponse(json.dumps({'success': True}))
- log.warning("Login failed - Account not active for user {0}, resending activation".format(username))
+ log.warning(u"Login failed - Account not active for user {0}, resending activation".format(username))
reactivation_email_for_user(user)
not_activated_msg = "This account has not been activated. We have " + \
diff --git a/common/djangoapps/terrain/course_helpers.py b/common/djangoapps/terrain/course_helpers.py
new file mode 100644
index 0000000000..f0df456c80
--- /dev/null
+++ b/common/djangoapps/terrain/course_helpers.py
@@ -0,0 +1,140 @@
+#pylint: disable=C0111
+#pylint: disable=W0621
+
+from lettuce import world, step
+from .factories import *
+from django.conf import settings
+from django.http import HttpRequest
+from django.contrib.auth.models import User
+from django.contrib.auth import authenticate, login
+from django.contrib.auth.middleware import AuthenticationMiddleware
+from django.contrib.sessions.middleware import SessionMiddleware
+from student.models import CourseEnrollment
+from xmodule.modulestore.django import _MODULESTORES, modulestore
+from xmodule.templates import update_templates
+from bs4 import BeautifulSoup
+import os.path
+from urllib import quote_plus
+from lettuce.django import django_url
+
+
+@world.absorb
+def create_user(uname):
+
+ # If the user already exists, don't try to create it again
+ if len(User.objects.filter(username=uname)) > 0:
+ return
+
+ portal_user = UserFactory.build(username=uname, email=uname + '@edx.org')
+ portal_user.set_password('test')
+ portal_user.save()
+
+ registration = world.RegistrationFactory(user=portal_user)
+ registration.register(portal_user)
+ registration.activate()
+
+ user_profile = world.UserProfileFactory(user=portal_user)
+
+
+@world.absorb
+def log_in(username, password):
+ '''
+ Log the user in programatically
+ '''
+
+ # Authenticate the user
+ user = authenticate(username=username, password=password)
+ assert(user is not None and user.is_active)
+
+ # Send a fake HttpRequest to log the user in
+ # We need to process the request using
+ # Session middleware and Authentication middleware
+ # to ensure that session state can be stored
+ request = HttpRequest()
+ SessionMiddleware().process_request(request)
+ AuthenticationMiddleware().process_request(request)
+ login(request, user)
+
+ # Save the session
+ request.session.save()
+
+ # Retrieve the sessionid and add it to the browser's cookies
+ cookie_dict = {settings.SESSION_COOKIE_NAME: request.session.session_key}
+ try:
+ world.browser.cookies.add(cookie_dict)
+
+ # WebDriver has an issue where we cannot set cookies
+ # before we make a GET request, so if we get an error,
+ # we load the '/' page and try again
+ except:
+ world.browser.visit(django_url('/'))
+ world.browser.cookies.add(cookie_dict)
+
+
+@world.absorb
+def register_by_course_id(course_id, is_staff=False):
+ create_user('robot')
+ u = User.objects.get(username='robot')
+ if is_staff:
+ u.is_staff = True
+ u.save()
+ CourseEnrollment.objects.get_or_create(user=u, course_id=course_id)
+
+
+
+@world.absorb
+def save_the_course_content(path='/tmp'):
+ html = world.browser.html.encode('ascii', 'ignore')
+ soup = BeautifulSoup(html)
+
+ # get rid of the header, we only want to compare the body
+ soup.head.decompose()
+
+ # for now, remove the data-id attributes, because they are
+ # causing mismatches between cms-master and master
+ for item in soup.find_all(attrs={'data-id': re.compile('.*')}):
+ del item['data-id']
+
+ # we also need to remove them from unrendered problems,
+ # where they are contained in the text of divs instead of
+ # in attributes of tags
+ # Be careful of whether or not it was the last attribute
+ # and needs a trailing space
+ for item in soup.find_all(text=re.compile(' data-id=".*?" ')):
+ s = unicode(item.string)
+ item.string.replace_with(re.sub(' data-id=".*?" ', ' ', s))
+
+ for item in soup.find_all(text=re.compile(' data-id=".*?"')):
+ s = unicode(item.string)
+ item.string.replace_with(re.sub(' data-id=".*?"', ' ', s))
+
+ # prettify the html so it will compare better, with
+ # each HTML tag on its own line
+ output = soup.prettify()
+
+ # use string slicing to grab everything after 'courseware/' in the URL
+ u = world.browser.url
+ section_url = u[u.find('courseware/') + 11:]
+
+
+ if not os.path.exists(path):
+ os.makedirs(path)
+
+ filename = '%s.html' % (quote_plus(section_url))
+ f = open('%s/%s' % (path, filename), 'w')
+ f.write(output)
+ f.close
+
+
+@world.absorb
+def clear_courses():
+ # Flush and initialize the module store
+ # It needs the templates because it creates new records
+ # by cloning from the template.
+ # Note that if your test module gets in some weird state
+ # (though it shouldn't), do this manually
+ # from the bash shell to drop it:
+ # $ mongo test_xmodule --eval "db.dropDatabase()"
+ _MODULESTORES = {}
+ modulestore().collection.drop()
+ update_templates()
diff --git a/common/djangoapps/terrain/steps.py b/common/djangoapps/terrain/steps.py
index 890d5fe450..a2db80712f 100644
--- a/common/djangoapps/terrain/steps.py
+++ b/common/djangoapps/terrain/steps.py
@@ -1,20 +1,21 @@
+#pylint: disable=C0111
+#pylint: disable=W0621
+
+# Disable the "wildcard import" warning so we can bring in all methods from
+# course helpers and ui helpers
+#pylint: disable=W0401
+
+# Disable the "Unused import %s from wildcard import" warning
+#pylint: disable=W0614
+
+# Disable the "unused argument" warning because lettuce uses "step"
+#pylint: disable=W0613
+
from lettuce import world, step
-from .factories import *
+from .course_helpers import *
+from .ui_helpers import *
from lettuce.django import django_url
-from django.conf import settings
-from django.http import HttpRequest
-from django.contrib.auth.models import User
-from django.contrib.auth import authenticate, login
-from django.contrib.auth.middleware import AuthenticationMiddleware
-from django.contrib.sessions.middleware import SessionMiddleware
-from student.models import CourseEnrollment
-from urllib import quote_plus
-from nose.tools import assert_equals
-from bs4 import BeautifulSoup
-import time
-import re
-import os.path
-from selenium.common.exceptions import WebDriverException
+from nose.tools import assert_equals, assert_in
from logging import getLogger
logger = getLogger(__name__)
@@ -22,7 +23,7 @@ logger = getLogger(__name__)
@step(u'I wait (?:for )?"(\d+)" seconds?$')
def wait(step, seconds):
- time.sleep(float(seconds))
+ world.wait(seconds)
@step('I reload the page$')
@@ -30,44 +31,49 @@ def reload_the_page(step):
world.browser.reload()
+@step('I press the browser back button$')
+def browser_back(step):
+ world.browser.driver.back()
+
+
@step('I (?:visit|access|open) the homepage$')
def i_visit_the_homepage(step):
- world.browser.visit(django_url('/'))
- assert world.browser.is_element_present_by_css('header.global', 10)
+ world.visit('/')
+ assert world.is_css_present('header.global')
@step(u'I (?:visit|access|open) the dashboard$')
def i_visit_the_dashboard(step):
- world.browser.visit(django_url('/dashboard'))
- assert world.browser.is_element_present_by_css('section.container.dashboard', 5)
+ world.visit('/dashboard')
+ assert world.is_css_present('section.container.dashboard')
@step('I should be on the dashboard page$')
def i_should_be_on_the_dashboard(step):
- assert world.browser.is_element_present_by_css('section.container.dashboard', 5)
+ assert world.is_css_present('section.container.dashboard')
assert world.browser.title == 'Dashboard'
@step(u'I (?:visit|access|open) the courses page$')
def i_am_on_the_courses_page(step):
- world.browser.visit(django_url('/courses'))
- assert world.browser.is_element_present_by_css('section.courses')
+ world.visit('/courses')
+ assert world.is_css_present('section.courses')
@step(u'I press the "([^"]*)" button$')
def and_i_press_the_button(step, value):
button_css = 'input[value="%s"]' % value
- world.browser.find_by_css(button_css).first.click()
+ world.css_click(button_css)
@step(u'I click the link with the text "([^"]*)"$')
def click_the_link_with_the_text_group1(step, linktext):
- world.browser.find_link_by_text(linktext).first.click()
+ world.click_link(linktext)
@step('I should see that the path is "([^"]*)"$')
def i_should_see_that_the_path_is(step, path):
- assert world.browser.url == django_url(path)
+ assert world.url_equals(path)
@step(u'the page title should be "([^"]*)"$')
@@ -80,10 +86,15 @@ def the_page_title_should_contain(step, title):
assert(title in world.browser.title)
+@step('I log in$')
+def i_log_in(step):
+ world.log_in('robot', 'test')
+
+
@step('I am a logged in user$')
def i_am_logged_in_user(step):
- create_user('robot')
- log_in('robot', 'test')
+ world.create_user('robot')
+ world.log_in('robot', 'test')
@step('I am not logged in$')
@@ -93,151 +104,41 @@ def i_am_not_logged_in(step):
@step('I am staff for course "([^"]*)"$')
def i_am_staff_for_course_by_id(step, course_id):
- register_by_course_id(course_id, True)
+ world.register_by_course_id(course_id, True)
-@step('I log in$')
-def i_log_in(step):
- log_in('robot', 'test')
+@step(r'click (?:the|a) link (?:called|with the text) "([^"]*)"$')
+def click_the_link_called(step, text):
+ world.click_link(text)
+
+
+@step(r'should see that the url is "([^"]*)"$')
+def should_have_the_url(step, url):
+ assert_equals(world.browser.url, url)
+
+
+@step(r'should see (?:the|a) link (?:called|with the text) "([^"]*)"$')
+def should_see_a_link_called(step, text):
+ assert len(world.browser.find_link_by_text(text)) > 0
+
+
+@step(r'should see "(.*)" (?:somewhere|anywhere) in (?:the|this) page')
+def should_see_in_the_page(step, text):
+ assert_in(text, world.css_text('body'))
+
+
+@step('I am logged in$')
+def i_am_logged_in(step):
+ world.create_user('robot')
+ world.log_in('robot', 'test')
+ world.browser.visit(django_url('/'))
@step(u'I am an edX user$')
def i_am_an_edx_user(step):
- create_user('robot')
-
-#### helper functions
+ world.create_user('robot')
-@world.absorb
-def scroll_to_bottom():
- # Maximize the browser
- world.browser.execute_script("window.scrollTo(0, screen.height);")
-
-
-@world.absorb
-def create_user(uname):
-
- # If the user already exists, don't try to create it again
- if len(User.objects.filter(username=uname)) > 0:
- return
-
- portal_user = UserFactory.build(username=uname, email=uname + '@edx.org')
- portal_user.set_password('test')
- portal_user.save()
-
- registration = world.RegistrationFactory(user=portal_user)
- registration.register(portal_user)
- registration.activate()
-
- user_profile = world.UserProfileFactory(user=portal_user)
-
-
-@world.absorb
-def log_in(username, password):
- '''
- Log the user in programatically
- '''
-
- # Authenticate the user
- user = authenticate(username=username, password=password)
- assert(user is not None and user.is_active)
-
- # Send a fake HttpRequest to log the user in
- # We need to process the request using
- # Session middleware and Authentication middleware
- # to ensure that session state can be stored
- request = HttpRequest()
- SessionMiddleware().process_request(request)
- AuthenticationMiddleware().process_request(request)
- login(request, user)
-
- # Save the session
- request.session.save()
-
- # Retrieve the sessionid and add it to the browser's cookies
- cookie_dict = {settings.SESSION_COOKIE_NAME: request.session.session_key}
- try:
- world.browser.cookies.add(cookie_dict)
-
- # WebDriver has an issue where we cannot set cookies
- # before we make a GET request, so if we get an error,
- # we load the '/' page and try again
- except:
- world.browser.visit(django_url('/'))
- world.browser.cookies.add(cookie_dict)
-
-
-@world.absorb
-def register_by_course_id(course_id, is_staff=False):
- create_user('robot')
- u = User.objects.get(username='robot')
- if is_staff:
- u.is_staff = True
- u.save()
- CourseEnrollment.objects.get_or_create(user=u, course_id=course_id)
-
-
-@world.absorb
-def save_the_html(path='/tmp'):
- u = world.browser.url
- html = world.browser.html.encode('ascii', 'ignore')
- filename = '%s.html' % quote_plus(u)
- f = open('%s/%s' % (path, filename), 'w')
- f.write(html)
- f.close
-
-
-@world.absorb
-def save_the_course_content(path='/tmp'):
- html = world.browser.html.encode('ascii', 'ignore')
- soup = BeautifulSoup(html)
-
- # get rid of the header, we only want to compare the body
- soup.head.decompose()
-
- # for now, remove the data-id attributes, because they are
- # causing mismatches between cms-master and master
- for item in soup.find_all(attrs={'data-id': re.compile('.*')}):
- del item['data-id']
-
- # we also need to remove them from unrendered problems,
- # where they are contained in the text of divs instead of
- # in attributes of tags
- # Be careful of whether or not it was the last attribute
- # and needs a trailing space
- for item in soup.find_all(text=re.compile(' data-id=".*?" ')):
- s = unicode(item.string)
- item.string.replace_with(re.sub(' data-id=".*?" ', ' ', s))
-
- for item in soup.find_all(text=re.compile(' data-id=".*?"')):
- s = unicode(item.string)
- item.string.replace_with(re.sub(' data-id=".*?"', ' ', s))
-
- # prettify the html so it will compare better, with
- # each HTML tag on its own line
- output = soup.prettify()
-
- # use string slicing to grab everything after 'courseware/' in the URL
- u = world.browser.url
- section_url = u[u.find('courseware/') + 11:]
-
-
- if not os.path.exists(path):
- os.makedirs(path)
-
- filename = '%s.html' % (quote_plus(section_url))
- f = open('%s/%s' % (path, filename), 'w')
- f.write(output)
- f.close
-
-@world.absorb
-def css_click(css_selector):
- try:
- world.browser.find_by_css(css_selector).click()
-
- except WebDriverException:
- # Occassionally, MathJax or other JavaScript can cover up
- # an element temporarily.
- # If this happens, wait a second, then try again
- time.sleep(1)
- world.browser.find_by_css(css_selector).click()
+@step(u'User "([^"]*)" is an edX user$')
+def registered_edx_user(step, uname):
+ world.create_user(uname)
diff --git a/common/djangoapps/terrain/ui_helpers.py b/common/djangoapps/terrain/ui_helpers.py
new file mode 100644
index 0000000000..d4d99e17b5
--- /dev/null
+++ b/common/djangoapps/terrain/ui_helpers.py
@@ -0,0 +1,117 @@
+#pylint: disable=C0111
+#pylint: disable=W0621
+
+from lettuce import world, step
+import time
+from urllib import quote_plus
+from selenium.common.exceptions import WebDriverException
+from selenium.webdriver.support import expected_conditions as EC
+from selenium.webdriver.common.by import By
+from selenium.webdriver.support.ui import WebDriverWait
+from lettuce.django import django_url
+
+
+@world.absorb
+def wait(seconds):
+ time.sleep(float(seconds))
+
+
+@world.absorb
+def wait_for(func):
+ WebDriverWait(world.browser.driver, 5).until(func)
+
+
+@world.absorb
+def visit(url):
+ world.browser.visit(django_url(url))
+
+
+@world.absorb
+def url_equals(url):
+ return world.browser.url == django_url(url)
+
+
+@world.absorb
+def is_css_present(css_selector):
+ return world.browser.is_element_present_by_css(css_selector, wait_time=4)
+
+
+@world.absorb
+def css_has_text(css_selector, text):
+ return world.css_text(css_selector) == text
+
+
+@world.absorb
+def css_find(css):
+ def is_visible(driver):
+ return EC.visibility_of_element_located((By.CSS_SELECTOR, css,))
+
+ world.browser.is_element_present_by_css(css, 5)
+ wait_for(is_visible)
+ return world.browser.find_by_css(css)
+
+
+@world.absorb
+def css_click(css_selector):
+ '''
+ First try to use the regular click method,
+ but if clicking in the middle of an element
+ doesn't work it might be that it thinks some other
+ element is on top of it there so click in the upper left
+ '''
+ try:
+ world.browser.find_by_css(css_selector).click()
+
+ except WebDriverException:
+ # Occassionally, MathJax or other JavaScript can cover up
+ # an element temporarily.
+ # If this happens, wait a second, then try again
+ time.sleep(1)
+ world.browser.find_by_css(css_selector).click()
+
+
+@world.absorb
+def css_click_at(css, x=10, y=10):
+ '''
+ A method to click at x,y coordinates of the element
+ rather than in the center of the element
+ '''
+ e = css_find(css).first
+ e.action_chains.move_to_element_with_offset(e._element, x, y)
+ e.action_chains.click()
+ e.action_chains.perform()
+
+
+@world.absorb
+def css_fill(css_selector, text):
+ world.browser.find_by_css(css_selector).first.fill(text)
+
+
+@world.absorb
+def click_link(partial_text):
+ world.browser.find_link_by_partial_text(partial_text).first.click()
+
+
+@world.absorb
+def css_text(css_selector):
+
+ # Wait for the css selector to appear
+ if world.is_css_present(css_selector):
+ return world.browser.find_by_css(css_selector).first.text
+ else:
+ return ""
+
+
+@world.absorb
+def css_visible(css_selector):
+ return world.browser.find_by_css(css_selector).visible
+
+
+@world.absorb
+def save_the_html(path='/tmp'):
+ u = world.browser.url
+ html = world.browser.html.encode('ascii', 'ignore')
+ filename = '%s.html' % quote_plus(u)
+ f = open('%s/%s' % (path, filename), 'w')
+ f.write(html)
+ f.close
diff --git a/common/djangoapps/util/converters.py b/common/djangoapps/util/converters.py
deleted file mode 100644
index ec2d29ecfa..0000000000
--- a/common/djangoapps/util/converters.py
+++ /dev/null
@@ -1,30 +0,0 @@
-import time
-import datetime
-import re
-import calendar
-
-
-def time_to_date(time_obj):
- """
- Convert a time.time_struct to a true universal time (can pass to js Date constructor)
- """
- # TODO change to using the isoformat() function on datetime. js date can parse those
- return calendar.timegm(time_obj) * 1000
-
-
-def jsdate_to_time(field):
- """
- Convert a universal time (iso format) or msec since epoch to a time obj
- """
- if field is None:
- return field
- elif isinstance(field, basestring):
- # ISO format but ignores time zone assuming it's Z.
- d = datetime.datetime(*map(int, re.split('[^\d]', field)[:6])) # stop after seconds. Debatable
- return d.utctimetuple()
- elif isinstance(field, (int, long, float)):
- return time.gmtime(field / 1000)
- elif isinstance(field, time.struct_time):
- return field
- else:
- raise ValueError("Couldn't convert %r to time" % field)
diff --git a/common/lib/capa/capa/capa_problem.py b/common/lib/capa/capa/capa_problem.py
index 42753fc90b..6580114bcc 100644
--- a/common/lib/capa/capa/capa_problem.py
+++ b/common/lib/capa/capa/capa_problem.py
@@ -16,7 +16,6 @@ This is used by capa_module.
from __future__ import division
from datetime import datetime
-import json
import logging
import math
import numpy
@@ -32,9 +31,9 @@ from xml.sax.saxutils import unescape
from copy import deepcopy
import chem
+import chem.miller
import chem.chemcalc
import chem.chemtools
-import chem.miller
import verifiers
import verifiers.draganddrop
@@ -97,8 +96,13 @@ class LoncapaProblem(object):
- problem_text (string): xml defining the problem
- id (string): identifier for this problem; often a filename (no spaces)
- - state (dict): student state
- - seed (int): random number generator seed (int)
+ - seed (int): random number generator seed (int)
+ - state (dict): containing the following keys:
+ - 'seed' - (int) random number generator seed
+ - 'student_answers' - (dict) maps input id to the stored answer for that input
+ - 'correct_map' (CorrectMap) a map of each input to their 'correctness'
+ - 'done' - (bool) indicates whether or not this problem is considered done
+ - 'input_state' - (dict) maps input_id to a dictionary that holds the state for that input
- system (ModuleSystem): ModuleSystem instance which provides OS,
rendering, and user context
@@ -110,21 +114,23 @@ class LoncapaProblem(object):
self.system = system
if self.system is None:
raise Exception()
- self.seed = seed
- if state:
- if 'seed' in state:
- self.seed = state['seed']
- if 'student_answers' in state:
- self.student_answers = state['student_answers']
- if 'correct_map' in state:
- self.correct_map.set_dict(state['correct_map'])
- if 'done' in state:
- self.done = state['done']
+ state = state if state else {}
- # TODO: Does this deplete the Linux entropy pool? Is this fast enough?
- if not self.seed:
+ # Set seed according to the following priority:
+ # 1. Contained in problem's state
+ # 2. Passed into capa_problem via constructor
+ # 3. Assign from the OS's random number generator
+ self.seed = state.get('seed', seed)
+ if self.seed is None:
self.seed = struct.unpack('i', os.urandom(4))[0]
+ self.student_answers = state.get('student_answers', {})
+ if 'correct_map' in state:
+ self.correct_map.set_dict(state['correct_map'])
+ self.done = state.get('done', False)
+ self.input_state = state.get('input_state', {})
+
+
# Convert startouttext and endouttext to proper
problem_text = re.sub("startouttext\s*/", "text", problem_text)
@@ -188,6 +194,7 @@ class LoncapaProblem(object):
return {'seed': self.seed,
'student_answers': self.student_answers,
'correct_map': self.correct_map.get_dict(),
+ 'input_state': self.input_state,
'done': self.done}
def get_max_score(self):
@@ -237,6 +244,20 @@ class LoncapaProblem(object):
self.correct_map.set_dict(cmap.get_dict())
return cmap
+ def ungraded_response(self, xqueue_msg, queuekey):
+ '''
+ Handle any responses from the xqueue that do not contain grades
+ Will try to pass the queue message to all inputtypes that can handle ungraded responses
+
+ Does not return any value
+ '''
+ # check against each inputtype
+ for the_input in self.inputs.values():
+ # if the input type has an ungraded function, pass in the values
+ if hasattr(the_input, 'ungraded_response'):
+ the_input.ungraded_response(xqueue_msg, queuekey)
+
+
def is_queued(self):
'''
Returns True if any part of the problem has been submitted to an external queue
@@ -351,7 +372,7 @@ class LoncapaProblem(object):
dispatch = get['dispatch']
return self.inputs[input_id].handle_ajax(dispatch, get)
else:
- log.warning("Could not find matching input for id: %s" % problem_id)
+ log.warning("Could not find matching input for id: %s" % input_id)
return {}
@@ -527,11 +548,15 @@ class LoncapaProblem(object):
value = ""
if self.student_answers and problemid in self.student_answers:
value = self.student_answers[problemid]
-
+
+ if input_id not in self.input_state:
+ self.input_state[input_id] = {}
+
# do the rendering
state = {'value': value,
'status': status,
'id': input_id,
+ 'input_state': self.input_state[input_id],
'feedback': {'message': msg,
'hint': hint,
'hintmode': hintmode, }}
diff --git a/common/lib/capa/capa/correctmap.py b/common/lib/capa/capa/correctmap.py
index b726f765d8..950cd199fc 100644
--- a/common/lib/capa/capa/correctmap.py
+++ b/common/lib/capa/capa/correctmap.py
@@ -80,16 +80,17 @@ class CorrectMap(object):
Special migration case:
If correct_map is a one-level dict, then convert it to the new dict of dicts format.
- '''
- if correct_map and not (type(correct_map[correct_map.keys()[0]]) == dict):
- # empty current dict
- self.__init__()
- # create new dict entries
+ '''
+ # empty current dict
+ self.__init__()
+
+ # create new dict entries
+ if correct_map and not isinstance(correct_map.values()[0], dict):
+ # special migration
for k in correct_map:
- self.set(k, correct_map[k])
+ self.set(k, correctness=correct_map[k])
else:
- self.__init__()
for k in correct_map:
self.set(k, **correct_map[k])
diff --git a/common/lib/capa/capa/inputtypes.py b/common/lib/capa/capa/inputtypes.py
index c2babfa479..b4e9fe1654 100644
--- a/common/lib/capa/capa/inputtypes.py
+++ b/common/lib/capa/capa/inputtypes.py
@@ -37,18 +37,18 @@ graded status as'status'
# makes sense, but a bunch of problems have markup that assumes block. Bigger TODO: figure out a
# general css and layout strategy for capa, document it, then implement it.
-from collections import namedtuple
import json
import logging
from lxml import etree
import re
import shlex # for splitting quoted strings
import sys
-import os
import pyparsing
from .registry import TagRegistry
from capa.chem import chemcalc
+import xqueue_interface
+from datetime import datetime
log = logging.getLogger(__name__)
@@ -97,7 +97,8 @@ class Attribute(object):
"""
val = element.get(self.name)
if self.default == self._sentinel and val is None:
- raise ValueError('Missing required attribute {0}.'.format(self.name))
+ raise ValueError(
+ 'Missing required attribute {0}.'.format(self.name))
if val is None:
# not required, so return default
@@ -132,6 +133,8 @@ class InputTypeBase(object):
* 'id' -- the id of this input, typically
"{problem-location}_{response-num}_{input-num}"
* 'status' (answered, unanswered, unsubmitted)
+ * 'input_state' -- dictionary containing any inputtype-specific state
+ that has been preserved
* 'feedback' (dictionary containing keys for hints, errors, or other
feedback from previous attempt. Specifically 'message', 'hint',
'hintmode'. If 'hintmode' is 'always', the hint is always displayed.)
@@ -149,7 +152,8 @@ class InputTypeBase(object):
self.id = state.get('id', xml.get('id'))
if self.id is None:
- raise ValueError("input id state is None. xml is {0}".format(etree.tostring(xml)))
+ raise ValueError("input id state is None. xml is {0}".format(
+ etree.tostring(xml)))
self.value = state.get('value', '')
@@ -157,6 +161,7 @@ class InputTypeBase(object):
self.msg = feedback.get('message', '')
self.hint = feedback.get('hint', '')
self.hintmode = feedback.get('hintmode', None)
+ self.input_state = state.get('input_state', {})
# put hint above msg if it should be displayed
if self.hintmode == 'always':
@@ -169,14 +174,15 @@ class InputTypeBase(object):
self.process_requirements()
# Call subclass "constructor" -- means they don't have to worry about calling
- # super().__init__, and are isolated from changes to the input constructor interface.
+ # super().__init__, and are isolated from changes to the input
+ # constructor interface.
self.setup()
except Exception as err:
# Something went wrong: add xml to message, but keep the traceback
- msg = "Error in xml '{x}': {err} ".format(x=etree.tostring(xml), err=str(err))
+ msg = "Error in xml '{x}': {err} ".format(
+ x=etree.tostring(xml), err=str(err))
raise Exception, msg, sys.exc_info()[2]
-
@classmethod
def get_attributes(cls):
"""
@@ -186,7 +192,6 @@ class InputTypeBase(object):
"""
return []
-
def process_requirements(self):
"""
Subclasses can declare lists of required and optional attributes. This
@@ -196,7 +201,8 @@ class InputTypeBase(object):
Processes attributes, putting the results in the self.loaded_attributes dictionary. Also creates a set
self.to_render, containing the names of attributes that should be included in the context by default.
"""
- # Use local dicts and sets so that if there are exceptions, we don't end up in a partially-initialized state.
+ # Use local dicts and sets so that if there are exceptions, we don't
+ # end up in a partially-initialized state.
loaded = {}
to_render = set()
for a in self.get_attributes():
@@ -226,7 +232,7 @@ class InputTypeBase(object):
get: a dictionary containing the data that was sent with the ajax call
Output:
- a dictionary object that can be serialized into JSON. This will be sent back to the Javascript.
+ a dictionary object that can be serialized into JSON. This will be sent back to the Javascript.
"""
pass
@@ -247,8 +253,9 @@ class InputTypeBase(object):
'value': self.value,
'status': self.status,
'msg': self.msg,
- }
- context.update((a, v) for (a, v) in self.loaded_attributes.iteritems() if a in self.to_render)
+ }
+ context.update((a, v) for (
+ a, v) in self.loaded_attributes.iteritems() if a in self.to_render)
context.update(self._extra_context())
return context
@@ -371,7 +378,6 @@ class ChoiceGroup(InputTypeBase):
return [Attribute("show_correctness", "always"),
Attribute("submitted_message", "Answer received.")]
-
def _extra_context(self):
return {'input_type': self.html_input_type,
'choices': self.choices,
@@ -436,7 +442,6 @@ class JavascriptInput(InputTypeBase):
Attribute('display_class', None),
Attribute('display_file', None), ]
-
def setup(self):
# Need to provide a value that JSON can parse if there is no
# student-supplied value yet.
@@ -459,7 +464,6 @@ class TextLine(InputTypeBase):
template = "textline.html"
tags = ['textline']
-
@classmethod
def get_attributes(cls):
"""
@@ -474,12 +478,12 @@ class TextLine(InputTypeBase):
# Attributes below used in setup(), not rendered directly.
Attribute('math', None, render=False),
- # TODO: 'dojs' flag is temporary, for backwards compatibility with 8.02x
+ # TODO: 'dojs' flag is temporary, for backwards compatibility with
+ # 8.02x
Attribute('dojs', None, render=False),
Attribute('preprocessorClassName', None, render=False),
Attribute('preprocessorSrc', None, render=False),
- ]
-
+ ]
def setup(self):
self.do_math = bool(self.loaded_attributes['math'] or
@@ -490,12 +494,12 @@ class TextLine(InputTypeBase):
self.preprocessor = None
if self.do_math:
# Preprocessor to insert between raw input and Mathjax
- self.preprocessor = {'class_name': self.loaded_attributes['preprocessorClassName'],
- 'script_src': self.loaded_attributes['preprocessorSrc']}
+ self.preprocessor = {
+ 'class_name': self.loaded_attributes['preprocessorClassName'],
+ 'script_src': self.loaded_attributes['preprocessorSrc']}
if None in self.preprocessor.values():
self.preprocessor = None
-
def _extra_context(self):
return {'do_math': self.do_math,
'preprocessor': self.preprocessor, }
@@ -539,7 +543,8 @@ class FileSubmission(InputTypeBase):
"""
# Check if problem has been queued
self.queue_len = 0
- # Flag indicating that the problem has been queued, 'msg' is length of queue
+ # Flag indicating that the problem has been queued, 'msg' is length of
+ # queue
if self.status == 'incomplete':
self.status = 'queued'
self.queue_len = self.msg
@@ -547,7 +552,6 @@ class FileSubmission(InputTypeBase):
def _extra_context(self):
return {'queue_len': self.queue_len, }
- return context
registry.register(FileSubmission)
@@ -562,8 +566,9 @@ class CodeInput(InputTypeBase):
template = "codeinput.html"
tags = ['codeinput',
- 'textbox', # Another (older) name--at some point we may want to make it use a
- # non-codemirror editor.
+ 'textbox',
+ # Another (older) name--at some point we may want to make it use a
+ # non-codemirror editor.
]
# pulled out for testing
@@ -586,22 +591,29 @@ class CodeInput(InputTypeBase):
Attribute('tabsize', 4, transform=int),
]
- def setup(self):
+ def setup_code_response_rendering(self):
"""
Implement special logic: handle queueing state, and default input.
"""
- # if no student input yet, then use the default input given by the problem
- if not self.value:
- self.value = self.xml.text
+ # if no student input yet, then use the default input given by the
+ # problem
+ if not self.value and self.xml.text:
+ self.value = self.xml.text.strip()
# Check if problem has been queued
self.queue_len = 0
- # Flag indicating that the problem has been queued, 'msg' is length of queue
+ # Flag indicating that the problem has been queued, 'msg' is length of
+ # queue
if self.status == 'incomplete':
self.status = 'queued'
self.queue_len = self.msg
self.msg = self.submitted_msg
+
+ def setup(self):
+ ''' setup this input type '''
+ self.setup_code_response_rendering()
+
def _extra_context(self):
"""Defined queue_len, add it """
return {'queue_len': self.queue_len, }
@@ -610,8 +622,164 @@ registry.register(CodeInput)
#-----------------------------------------------------------------------------
+
+
+class MatlabInput(CodeInput):
+ '''
+ InputType for handling Matlab code input
+
+ TODO: API_KEY will go away once we have a way to specify it per-course
+ Example:
+
+ Initial Text
+
+ %api_key=API_KEY
+
+
+ '''
+ template = "matlabinput.html"
+ tags = ['matlabinput']
+
+ plot_submitted_msg = ("Submitted. As soon as a response is returned, "
+ "this message will be replaced by that feedback.")
+
+ def setup(self):
+ '''
+ Handle matlab-specific parsing
+ '''
+ self.setup_code_response_rendering()
+
+ xml = self.xml
+ self.plot_payload = xml.findtext('./plot_payload')
+
+ # Check if problem has been queued
+ self.queuename = 'matlab'
+ self.queue_msg = ''
+ if 'queue_msg' in self.input_state and self.status in ['queued','incomplete', 'unsubmitted']:
+ self.queue_msg = self.input_state['queue_msg']
+ if 'queued' in self.input_state and self.input_state['queuestate'] is not None:
+ self.status = 'queued'
+ self.queue_len = 1
+ self.msg = self.plot_submitted_msg
+
+
+ def handle_ajax(self, dispatch, get):
+ '''
+ Handle AJAX calls directed to this input
+
+ Args:
+ - dispatch (str) - indicates how we want this ajax call to be handled
+ - get (dict) - dictionary of key-value pairs that contain useful data
+ Returns:
+
+ '''
+
+ if dispatch == 'plot':
+ return self._plot_data(get)
+ return {}
+
+ def ungraded_response(self, queue_msg, queuekey):
+ '''
+ Handle the response from the XQueue
+ Stores the response in the input_state so it can be rendered later
+
+ Args:
+ - queue_msg (str) - message returned from the queue. The message to be rendered
+ - queuekey (str) - a key passed to the queue. Will be matched up to verify that this is the response we're waiting for
+
+ Returns:
+ nothing
+ '''
+ # check the queuekey against the saved queuekey
+ if('queuestate' in self.input_state and self.input_state['queuestate'] == 'queued'
+ and self.input_state['queuekey'] == queuekey):
+ msg = self._parse_data(queue_msg)
+ # save the queue message so that it can be rendered later
+ self.input_state['queue_msg'] = msg
+ self.input_state['queuestate'] = None
+ self.input_state['queuekey'] = None
+
+ def _extra_context(self):
+ ''' Set up additional context variables'''
+ extra_context = {
+ 'queue_len': self.queue_len,
+ 'queue_msg': self.queue_msg
+ }
+ return extra_context
+
+ def _parse_data(self, queue_msg):
+ '''
+ Parses the message out of the queue message
+ Args:
+ queue_msg (str) - a JSON encoded string
+ Returns:
+ returns the value for the the key 'msg' in queue_msg
+ '''
+ try:
+ result = json.loads(queue_msg)
+ except (TypeError, ValueError):
+ log.error("External message should be a JSON serialized dict."
+ " Received queue_msg = %s" % queue_msg)
+ raise
+ msg = result['msg']
+ return msg
+
+
+ def _plot_data(self, get):
+ '''
+ AJAX handler for the plot button
+ Args:
+ get (dict) - should have key 'submission' which contains the student submission
+ Returns:
+ dict - 'success' - whether or not we successfully queued this submission
+ - 'message' - message to be rendered in case of error
+ '''
+ # only send data if xqueue exists
+ if self.system.xqueue is None:
+ return {'success': False, 'message': 'Cannot connect to the queue'}
+
+ # pull relevant info out of get
+ response = get['submission']
+
+ # construct xqueue headers
+ qinterface = self.system.xqueue['interface']
+ qtime = datetime.strftime(datetime.utcnow(), xqueue_interface.dateformat)
+ callback_url = self.system.xqueue['construct_callback']('ungraded_response')
+ anonymous_student_id = self.system.anonymous_student_id
+ queuekey = xqueue_interface.make_hashkey(str(self.system.seed) + qtime +
+ anonymous_student_id +
+ self.id)
+ xheader = xqueue_interface.make_xheader(
+ lms_callback_url = callback_url,
+ lms_key = queuekey,
+ queue_name = self.queuename)
+
+ # save the input state
+ self.input_state['queuekey'] = queuekey
+ self.input_state['queuestate'] = 'queued'
+
+
+ # construct xqueue body
+ student_info = {'anonymous_student_id': anonymous_student_id,
+ 'submission_time': qtime}
+ contents = {'grader_payload': self.plot_payload,
+ 'student_info': json.dumps(student_info),
+ 'student_response': response}
+
+ (error, msg) = qinterface.send_to_queue(header=xheader,
+ body = json.dumps(contents))
+
+ return {'success': error == 0, 'message': msg}
+
+
+registry.register(MatlabInput)
+
+
+#-----------------------------------------------------------------------------
+
class Schematic(InputTypeBase):
"""
+ InputType for the schematic editor
"""
template = "schematicinput.html"
@@ -630,7 +798,6 @@ class Schematic(InputTypeBase):
Attribute('initial_value', None),
Attribute('submit_analyses', None), ]
- return context
registry.register(Schematic)
@@ -660,12 +827,12 @@ class ImageInput(InputTypeBase):
Attribute('height'),
Attribute('width'), ]
-
def setup(self):
"""
if value is of the form [x,y] then parse it and send along coordinates of previous answer
"""
- m = re.match('\[([0-9]+),([0-9]+)]', self.value.strip().replace(' ', ''))
+ m = re.match('\[([0-9]+),([0-9]+)]',
+ self.value.strip().replace(' ', ''))
if m:
# Note: we subtract 15 to compensate for the size of the dot on the screen.
# (is a 30x30 image--lms/static/green-pointer.png).
@@ -673,7 +840,6 @@ class ImageInput(InputTypeBase):
else:
(self.gx, self.gy) = (0, 0)
-
def _extra_context(self):
return {'gx': self.gx,
@@ -730,7 +896,7 @@ class VseprInput(InputTypeBase):
registry.register(VseprInput)
-#--------------------------------------------------------------------------------
+#-------------------------------------------------------------------------
class ChemicalEquationInput(InputTypeBase):
@@ -794,7 +960,8 @@ class ChemicalEquationInput(InputTypeBase):
result['error'] = "Couldn't parse formula: {0}".format(p)
except Exception:
# this is unexpected, so log
- log.warning("Error while previewing chemical formula", exc_info=True)
+ log.warning(
+ "Error while previewing chemical formula", exc_info=True)
result['error'] = "Error while rendering preview"
return result
@@ -843,16 +1010,16 @@ class DragAndDropInput(InputTypeBase):
'can_reuse': ""}
tag_attrs['target'] = {'id': Attribute._sentinel,
- 'x': Attribute._sentinel,
- 'y': Attribute._sentinel,
- 'w': Attribute._sentinel,
- 'h': Attribute._sentinel}
+ 'x': Attribute._sentinel,
+ 'y': Attribute._sentinel,
+ 'w': Attribute._sentinel,
+ 'h': Attribute._sentinel}
dic = dict()
for attr_name in tag_attrs[tag_type].keys():
dic[attr_name] = Attribute(attr_name,
- default=tag_attrs[tag_type][attr_name]).parse_from_xml(tag)
+ default=tag_attrs[tag_type][attr_name]).parse_from_xml(tag)
if tag_type == 'draggable' and not self.no_labels:
dic['label'] = dic['label'] or dic['id']
@@ -865,7 +1032,7 @@ class DragAndDropInput(InputTypeBase):
# add labels to images?:
self.no_labels = Attribute('no_labels',
- default="False").parse_from_xml(self.xml)
+ default="False").parse_from_xml(self.xml)
to_js = dict()
@@ -874,16 +1041,16 @@ class DragAndDropInput(InputTypeBase):
# outline places on image where to drag adn drop
to_js['target_outline'] = Attribute('target_outline',
- default="False").parse_from_xml(self.xml)
+ default="False").parse_from_xml(self.xml)
# one draggable per target?
to_js['one_per_target'] = Attribute('one_per_target',
- default="True").parse_from_xml(self.xml)
+ default="True").parse_from_xml(self.xml)
# list of draggables
to_js['draggables'] = [parse(draggable, 'draggable') for draggable in
- self.xml.iterchildren('draggable')]
+ self.xml.iterchildren('draggable')]
# list of targets
to_js['targets'] = [parse(target, 'target') for target in
- self.xml.iterchildren('target')]
+ self.xml.iterchildren('target')]
# custom background color for labels:
label_bg_color = Attribute('label_bg_color',
@@ -896,7 +1063,7 @@ class DragAndDropInput(InputTypeBase):
registry.register(DragAndDropInput)
-#--------------------------------------------------------------------------------------------------------------------
+#-------------------------------------------------------------------------
class EditAMoleculeInput(InputTypeBase):
@@ -934,6 +1101,7 @@ registry.register(EditAMoleculeInput)
#-----------------------------------------------------------------------------
+
class DesignProtein2dInput(InputTypeBase):
"""
An input type for design of a protein in 2D. Integrates with the Protex java applet.
@@ -969,14 +1137,16 @@ registry.register(DesignProtein2dInput)
#-----------------------------------------------------------------------------
+
class EditAGeneInput(InputTypeBase):
"""
- An input type for editing a gene. Integrates with the genex java applet.
+ An input type for editing a gene.
+ Integrates with the genex GWT application.
Example:
-
- """
+
+ """
template = "editageneinput.html"
tags = ['editageneinput']
@@ -986,9 +1156,7 @@ class EditAGeneInput(InputTypeBase):
"""
Note: width, height, and dna_sequencee are required.
"""
- return [Attribute('width'),
- Attribute('height'),
- Attribute('dna_sequence'),
+ return [Attribute('genex_dna_sequence'),
Attribute('genex_problem_number')
]
@@ -1005,6 +1173,7 @@ registry.register(EditAGeneInput)
#---------------------------------------------------------------------
+
class AnnotationInput(InputTypeBase):
"""
Input type for annotations: students can enter some notes or other text
@@ -1037,13 +1206,14 @@ class AnnotationInput(InputTypeBase):
def setup(self):
xml = self.xml
- self.debug = False # set to True to display extra debug info with input
- self.return_to_annotation = True # return only works in conjunction with annotatable xmodule
+ self.debug = False # set to True to display extra debug info with input
+ self.return_to_annotation = True # return only works in conjunction with annotatable xmodule
self.title = xml.findtext('./title', 'Annotation Exercise')
self.text = xml.findtext('./text')
self.comment = xml.findtext('./comment')
- self.comment_prompt = xml.findtext('./comment_prompt', 'Type a commentary below:')
+ self.comment_prompt = xml.findtext(
+ './comment_prompt', 'Type a commentary below:')
self.tag_prompt = xml.findtext('./tag_prompt', 'Select one tag:')
self.options = self._find_options()
@@ -1061,7 +1231,7 @@ class AnnotationInput(InputTypeBase):
'id': index,
'description': option.text,
'choice': option.get('choice')
- } for (index, option) in enumerate(elements) ]
+ } for (index, option) in enumerate(elements)]
def _validate_options(self):
''' Raises a ValueError if the choice attribute is missing or invalid. '''
@@ -1071,7 +1241,8 @@ class AnnotationInput(InputTypeBase):
if choice is None:
raise ValueError('Missing required choice attribute.')
elif choice not in valid_choices:
- raise ValueError('Invalid choice attribute: {0}. Must be one of: {1}'.format(choice, ', '.join(valid_choices)))
+ raise ValueError('Invalid choice attribute: {0}. Must be one of: {1}'.format(
+ choice, ', '.join(valid_choices)))
def _unpack(self, json_value):
''' Unpacks the json input state into a dict. '''
@@ -1089,20 +1260,20 @@ class AnnotationInput(InputTypeBase):
return {
'options_value': options_value,
- 'has_options_value': len(options_value) > 0, # for convenience
+ 'has_options_value': len(options_value) > 0, # for convenience
'comment_value': comment_value,
}
def _extra_context(self):
extra_context = {
- 'title': self.title,
- 'text': self.text,
- 'comment': self.comment,
- 'comment_prompt': self.comment_prompt,
- 'tag_prompt': self.tag_prompt,
- 'options': self.options,
- 'return_to_annotation': self.return_to_annotation,
- 'debug': self.debug
+ 'title': self.title,
+ 'text': self.text,
+ 'comment': self.comment,
+ 'comment_prompt': self.comment_prompt,
+ 'tag_prompt': self.tag_prompt,
+ 'options': self.options,
+ 'return_to_annotation': self.return_to_annotation,
+ 'debug': self.debug
}
extra_context.update(self._unpack(self.value))
@@ -1110,4 +1281,3 @@ class AnnotationInput(InputTypeBase):
return extra_context
registry.register(AnnotationInput)
-
diff --git a/common/lib/capa/capa/responsetypes.py b/common/lib/capa/capa/responsetypes.py
index 6bf98999d8..5b1b46d858 100644
--- a/common/lib/capa/capa/responsetypes.py
+++ b/common/lib/capa/capa/responsetypes.py
@@ -17,6 +17,7 @@ import logging
import numbers
import numpy
import os
+import sys
import random
import re
import requests
@@ -52,12 +53,17 @@ class LoncapaProblemError(Exception):
class ResponseError(Exception):
'''
- Error for failure in processing a response
+ Error for failure in processing a response, including
+ exceptions that occur when executing a custom script.
'''
pass
class StudentInputError(Exception):
+ '''
+ Error for an invalid student input.
+ For example, submitting a string when the problem expects a number
+ '''
pass
#-----------------------------------------------------------------------------
@@ -128,21 +134,25 @@ class LoncapaResponse(object):
for abox in inputfields:
if abox.tag not in self.allowed_inputfields:
- msg = "%s: cannot have input field %s" % (unicode(self), abox.tag)
- msg += "\nSee XML source line %s" % getattr(xml, 'sourceline', '')
+ msg = "%s: cannot have input field %s" % (
+ unicode(self), abox.tag)
+ msg += "\nSee XML source line %s" % getattr(
+ xml, 'sourceline', '')
raise LoncapaProblemError(msg)
if self.max_inputfields and len(inputfields) > self.max_inputfields:
msg = "%s: cannot have more than %s input fields" % (
unicode(self), self.max_inputfields)
- msg += "\nSee XML source line %s" % getattr(xml, 'sourceline', '')
+ msg += "\nSee XML source line %s" % getattr(
+ xml, 'sourceline', '')
raise LoncapaProblemError(msg)
for prop in self.required_attributes:
if not xml.get(prop):
msg = "Error in problem specification: %s missing required attribute %s" % (
unicode(self), prop)
- msg += "\nSee XML source line %s" % getattr(xml, 'sourceline', '')
+ msg += "\nSee XML source line %s" % getattr(
+ xml, 'sourceline', '')
raise LoncapaProblemError(msg)
# ordered list of answer_id values for this response
@@ -163,7 +173,8 @@ class LoncapaResponse(object):
for entry in self.inputfields:
answer = entry.get('correct_answer')
if answer:
- self.default_answer_map[entry.get('id')] = contextualize_text(answer, self.context)
+ self.default_answer_map[entry.get(
+ 'id')] = contextualize_text(answer, self.context)
if hasattr(self, 'setup_response'):
self.setup_response()
@@ -211,7 +222,8 @@ class LoncapaResponse(object):
Returns the new CorrectMap, with (correctness,msg,hint,hintmode) for each answer_id.
'''
new_cmap = self.get_score(student_answers)
- self.get_hints(convert_files_to_filenames(student_answers), new_cmap, old_cmap)
+ self.get_hints(convert_files_to_filenames(
+ student_answers), new_cmap, old_cmap)
# log.debug('new_cmap = %s' % new_cmap)
return new_cmap
@@ -241,14 +253,17 @@ class LoncapaResponse(object):
# callback procedure to a social hint generation system.
if not hintfn in self.context:
msg = 'missing specified hint function %s in script context' % hintfn
- msg += "\nSee XML source line %s" % getattr(self.xml, 'sourceline', '')
+ msg += "\nSee XML source line %s" % getattr(
+ self.xml, 'sourceline', '')
raise LoncapaProblemError(msg)
try:
- self.context[hintfn](self.answer_ids, student_answers, new_cmap, old_cmap)
+ self.context[hintfn](
+ self.answer_ids, student_answers, new_cmap, old_cmap)
except Exception as err:
msg = 'Error %s in evaluating hint function %s' % (err, hintfn)
- msg += "\nSee XML source line %s" % getattr(self.xml, 'sourceline', '')
+ msg += "\nSee XML source line %s" % getattr(
+ self.xml, 'sourceline', '')
raise ResponseError(msg)
return
@@ -270,17 +285,19 @@ class LoncapaResponse(object):
if (self.hint_tag is not None
and hintgroup.find(self.hint_tag) is not None
- and hasattr(self, 'check_hint_condition')):
+ and hasattr(self, 'check_hint_condition')):
rephints = hintgroup.findall(self.hint_tag)
- hints_to_show = self.check_hint_condition(rephints, student_answers)
+ hints_to_show = self.check_hint_condition(
+ rephints, student_answers)
# can be 'on_request' or 'always' (default)
hintmode = hintgroup.get('mode', 'always')
for hintpart in hintgroup.findall('hintpart'):
if hintpart.get('on') in hints_to_show:
hint_text = hintpart.find('text').text
- # make the hint appear after the last answer box in this response
+ # make the hint appear after the last answer box in this
+ # response
aid = self.answer_ids[-1]
new_cmap.set_hint_and_mode(aid, hint_text, hintmode)
log.debug('after hint: new_cmap = %s' % new_cmap)
@@ -340,7 +357,6 @@ class LoncapaResponse(object):
response_msg_div = etree.Element('div')
response_msg_div.text = str(response_msg)
-
# Set the css class of the message
response_msg_div.set("class", "response_message")
@@ -384,20 +400,20 @@ class JavascriptResponse(LoncapaResponse):
# until we decide on exactly how to solve this issue. For now, files are
# manually being compiled to DATA_DIR/js/compiled.
- #latestTimestamp = 0
- #basepath = self.system.filestore.root_path + '/js/'
- #for filename in (self.display_dependencies + [self.display]):
+ # latestTimestamp = 0
+ # basepath = self.system.filestore.root_path + '/js/'
+ # for filename in (self.display_dependencies + [self.display]):
# filepath = basepath + filename
# timestamp = os.stat(filepath).st_mtime
# if timestamp > latestTimestamp:
# latestTimestamp = timestamp
#
- #h = hashlib.md5()
- #h.update(self.answer_id + str(self.display_dependencies))
- #compiled_filename = 'compiled/' + h.hexdigest() + '.js'
- #compiled_filepath = basepath + compiled_filename
+ # h = hashlib.md5()
+ # h.update(self.answer_id + str(self.display_dependencies))
+ # compiled_filename = 'compiled/' + h.hexdigest() + '.js'
+ # compiled_filepath = basepath + compiled_filename
- #if not os.path.exists(compiled_filepath) or os.stat(compiled_filepath).st_mtime < latestTimestamp:
+ # if not os.path.exists(compiled_filepath) or os.stat(compiled_filepath).st_mtime < latestTimestamp:
# outfile = open(compiled_filepath, 'w')
# for filename in (self.display_dependencies + [self.display]):
# filepath = basepath + filename
@@ -419,7 +435,7 @@ class JavascriptResponse(LoncapaResponse):
id=self.xml.get('id'))[0]
self.display_xml = self.xml.xpath('//*[@id=$id]//display',
- id=self.xml.get('id'))[0]
+ id=self.xml.get('id'))[0]
self.xml.remove(self.generator_xml)
self.xml.remove(self.grader_xml)
@@ -430,17 +446,20 @@ class JavascriptResponse(LoncapaResponse):
self.display = self.display_xml.get("src")
if self.generator_xml.get("dependencies"):
- self.generator_dependencies = self.generator_xml.get("dependencies").split()
+ self.generator_dependencies = self.generator_xml.get(
+ "dependencies").split()
else:
self.generator_dependencies = []
if self.grader_xml.get("dependencies"):
- self.grader_dependencies = self.grader_xml.get("dependencies").split()
+ self.grader_dependencies = self.grader_xml.get(
+ "dependencies").split()
else:
self.grader_dependencies = []
if self.display_xml.get("dependencies"):
- self.display_dependencies = self.display_xml.get("dependencies").split()
+ self.display_dependencies = self.display_xml.get(
+ "dependencies").split()
else:
self.display_dependencies = []
@@ -461,10 +480,10 @@ class JavascriptResponse(LoncapaResponse):
return subprocess.check_output(subprocess_args, env=self.get_node_env())
-
def generate_problem_state(self):
- generator_file = os.path.dirname(os.path.normpath(__file__)) + '/javascript_problem_generator.js'
+ generator_file = os.path.dirname(os.path.normpath(
+ __file__)) + '/javascript_problem_generator.js'
output = self.call_node([generator_file,
self.generator,
json.dumps(self.generator_dependencies),
@@ -478,17 +497,18 @@ class JavascriptResponse(LoncapaResponse):
params = {}
for param in self.xml.xpath('//*[@id=$id]//responseparam',
- id=self.xml.get('id')):
+ id=self.xml.get('id')):
raw_param = param.get("value")
- params[param.get("name")] = json.loads(contextualize_text(raw_param, self.context))
+ params[param.get("name")] = json.loads(
+ contextualize_text(raw_param, self.context))
return params
def prepare_inputfield(self):
for inputfield in self.xml.xpath('//*[@id=$id]//javascriptinput',
- id=self.xml.get('id')):
+ id=self.xml.get('id')):
escapedict = {'"': '"'}
@@ -501,7 +521,7 @@ class JavascriptResponse(LoncapaResponse):
escapedict)
inputfield.set("problem_state", encoded_problem_state)
- inputfield.set("display_file", self.display_filename)
+ inputfield.set("display_file", self.display_filename)
inputfield.set("display_class", self.display_class)
def get_score(self, student_answers):
@@ -519,7 +539,8 @@ class JavascriptResponse(LoncapaResponse):
if submission is None or submission == '':
submission = json.dumps(None)
- grader_file = os.path.dirname(os.path.normpath(__file__)) + '/javascript_problem_grader.js'
+ grader_file = os.path.dirname(os.path.normpath(
+ __file__)) + '/javascript_problem_grader.js'
outputs = self.call_node([grader_file,
self.grader,
json.dumps(self.grader_dependencies),
@@ -528,8 +549,8 @@ class JavascriptResponse(LoncapaResponse):
json.dumps(self.params)]).split('\n')
all_correct = json.loads(outputs[0].strip())
- evaluation = outputs[1].strip()
- solution = outputs[2].strip()
+ evaluation = outputs[1].strip()
+ solution = outputs[2].strip()
return (all_correct, evaluation, solution)
def get_answers(self):
@@ -539,9 +560,7 @@ class JavascriptResponse(LoncapaResponse):
return {self.answer_id: self.solution}
-
#-----------------------------------------------------------------------------
-
class ChoiceResponse(LoncapaResponse):
"""
This response type is used when the student chooses from a discrete set of
@@ -599,9 +618,10 @@ class ChoiceResponse(LoncapaResponse):
self.assign_choice_names()
correct_xml = self.xml.xpath('//*[@id=$id]//choice[@correct="true"]',
- id=self.xml.get('id'))
+ id=self.xml.get('id'))
- self.correct_choices = set([choice.get('name') for choice in correct_xml])
+ self.correct_choices = set([choice.get(
+ 'name') for choice in correct_xml])
def assign_choice_names(self):
'''
@@ -654,7 +674,8 @@ class MultipleChoiceResponse(LoncapaResponse):
allowed_inputfields = ['choicegroup']
def setup_response(self):
- # call secondary setup for MultipleChoice questions, to set name attributes
+ # call secondary setup for MultipleChoice questions, to set name
+ # attributes
self.mc_setup_response()
# define correct choices (after calling secondary setup)
@@ -692,7 +713,7 @@ class MultipleChoiceResponse(LoncapaResponse):
# log.debug('%s: student_answers=%s, correct_choices=%s' % (
# unicode(self), student_answers, self.correct_choices))
if (self.answer_id in student_answers
- and student_answers[self.answer_id] in self.correct_choices):
+ and student_answers[self.answer_id] in self.correct_choices):
return CorrectMap(self.answer_id, 'correct')
else:
return CorrectMap(self.answer_id, 'incorrect')
@@ -760,7 +781,8 @@ class OptionResponse(LoncapaResponse):
return cmap
def get_answers(self):
- amap = dict([(af.get('id'), contextualize_text(af.get('correct'), self.context)) for af in self.answer_fields])
+ amap = dict([(af.get('id'), contextualize_text(af.get(
+ 'correct'), self.context)) for af in self.answer_fields])
# log.debug('%s: expected answers=%s' % (unicode(self),amap))
return amap
@@ -780,8 +802,9 @@ class NumericalResponse(LoncapaResponse):
context = self.context
self.correct_answer = contextualize_text(xml.get('answer'), context)
try:
- self.tolerance_xml = xml.xpath('//*[@id=$id]//responseparam[@type="tolerance"]/@default',
- id=xml.get('id'))[0]
+ self.tolerance_xml = xml.xpath(
+ '//*[@id=$id]//responseparam[@type="tolerance"]/@default',
+ id=xml.get('id'))[0]
self.tolerance = contextualize_text(self.tolerance_xml, context)
except Exception:
self.tolerance = '0'
@@ -798,21 +821,25 @@ class NumericalResponse(LoncapaResponse):
try:
correct_ans = complex(self.correct_answer)
except ValueError:
- log.debug("Content error--answer '{0}' is not a valid complex number".format(self.correct_answer))
- raise StudentInputError("There was a problem with the staff answer to this problem")
+ log.debug("Content error--answer '{0}' is not a valid complex number".format(
+ self.correct_answer))
+ raise StudentInputError(
+ "There was a problem with the staff answer to this problem")
try:
- correct = compare_with_tolerance(evaluator(dict(), dict(), student_answer),
- correct_ans, self.tolerance)
+ correct = compare_with_tolerance(
+ evaluator(dict(), dict(), student_answer),
+ correct_ans, self.tolerance)
# We should catch this explicitly.
# I think this is just pyparsing.ParseException, calc.UndefinedVariable:
# But we'd need to confirm
except:
- # Use the traceback-preserving version of re-raising with a different type
+ # Use the traceback-preserving version of re-raising with a
+ # different type
import sys
type, value, traceback = sys.exc_info()
- raise StudentInputError, ("Invalid input: could not interpret '%s' as a number" %
+ raise StudentInputError, ("Could not interpret '%s' as a number" %
cgi.escape(student_answer)), traceback
if correct:
@@ -837,7 +864,8 @@ class StringResponse(LoncapaResponse):
max_inputfields = 1
def setup_response(self):
- self.correct_answer = contextualize_text(self.xml.get('answer'), self.context).strip()
+ self.correct_answer = contextualize_text(
+ self.xml.get('answer'), self.context).strip()
def get_score(self, student_answers):
'''Grade a string response '''
@@ -846,7 +874,8 @@ class StringResponse(LoncapaResponse):
return CorrectMap(self.answer_id, 'correct' if correct else 'incorrect')
def check_string(self, expected, given):
- if self.xml.get('type') == 'ci': return given.lower() == expected.lower()
+ if self.xml.get('type') == 'ci':
+ return given.lower() == expected.lower()
return given == expected
def check_hint_condition(self, hxml_set, student_answers):
@@ -854,8 +883,10 @@ class StringResponse(LoncapaResponse):
hints_to_show = []
for hxml in hxml_set:
name = hxml.get('name')
- correct_answer = contextualize_text(hxml.get('answer'), self.context).strip()
- if self.check_string(correct_answer, given): hints_to_show.append(name)
+ correct_answer = contextualize_text(
+ hxml.get('answer'), self.context).strip()
+ if self.check_string(correct_answer, given):
+ hints_to_show.append(name)
log.debug('hints_to_show = %s' % hints_to_show)
return hints_to_show
@@ -889,7 +920,7 @@ class CustomResponse(LoncapaResponse):
correct[0] ='incorrect'
"""},
- {'snippet': """
+
diff --git a/common/lib/capa/capa/tests/__init__.py b/common/lib/capa/capa/tests/__init__.py
index 89cb5a5ee9..72d82c683b 100644
--- a/common/lib/capa/capa/tests/__init__.py
+++ b/common/lib/capa/capa/tests/__init__.py
@@ -2,7 +2,7 @@ import fs
import fs.osfs
import os
-from mock import Mock
+from mock import Mock, MagicMock
import xml.sax.saxutils as saxutils
@@ -16,6 +16,11 @@ def tst_render_template(template, context):
"""
return '
+
+ {payload}
+
+ """.format(r = self.rows,
+ c = self.cols,
+ tabsize = self.tabsize,
+ m = self.mode,
+ payload = self.payload,
+ ln = self.linenumbers)
+ elt = etree.fromstring(self.xml)
+ state = {'value': 'print "good evening"',
+ 'status': 'incomplete',
+ 'feedback': {'message': '3'}, }
+
+ self.input_class = lookup_tag('matlabinput')
+ self.the_input = self.input_class(test_system, elt, state)
+
+
+ def test_rendering(self):
+ context = self.the_input._get_render_context()
+
+ expected = {'id': 'prob_1_2',
+ 'value': 'print "good evening"',
+ 'status': 'queued',
+ 'msg': self.input_class.submitted_msg,
+ 'mode': self.mode,
+ 'rows': self.rows,
+ 'cols': self.cols,
+ 'queue_msg': '',
+ 'linenumbers': 'true',
+ 'hidden': '',
+ 'tabsize': int(self.tabsize),
+ 'queue_len': '3',
+ }
+
+ self.assertEqual(context, expected)
+
+
+ def test_rendering_with_state(self):
+ state = {'value': 'print "good evening"',
+ 'status': 'incomplete',
+ 'input_state': {'queue_msg': 'message'},
+ 'feedback': {'message': '3'}, }
+ elt = etree.fromstring(self.xml)
+
+ input_class = lookup_tag('matlabinput')
+ the_input = self.input_class(test_system, elt, state)
+ context = the_input._get_render_context()
+
+ expected = {'id': 'prob_1_2',
+ 'value': 'print "good evening"',
+ 'status': 'queued',
+ 'msg': self.input_class.submitted_msg,
+ 'mode': self.mode,
+ 'rows': self.rows,
+ 'cols': self.cols,
+ 'queue_msg': 'message',
+ 'linenumbers': 'true',
+ 'hidden': '',
+ 'tabsize': int(self.tabsize),
+ 'queue_len': '3',
+ }
+
+ self.assertEqual(context, expected)
+
+ def test_plot_data(self):
+ get = {'submission': 'x = 1234;'}
+ response = self.the_input.handle_ajax("plot", get)
+
+ test_system.xqueue['interface'].send_to_queue.assert_called_with(header=ANY, body=ANY)
+
+ self.assertTrue(response['success'])
+ self.assertTrue(self.the_input.input_state['queuekey'] is not None)
+ self.assertEqual(self.the_input.input_state['queuestate'], 'queued')
+
+
+
class SchematicTest(unittest.TestCase):
'''
diff --git a/common/lib/capa/capa/tests/test_responsetypes.py b/common/lib/capa/capa/tests/test_responsetypes.py
index e024909d75..bf64d3cc69 100644
--- a/common/lib/capa/capa/tests/test_responsetypes.py
+++ b/common/lib/capa/capa/tests/test_responsetypes.py
@@ -13,10 +13,13 @@ import textwrap
from . import test_system
import capa.capa_problem as lcp
+from capa.responsetypes import LoncapaProblemError, \
+ StudentInputError, ResponseError
from capa.correctmap import CorrectMap
from capa.util import convert_files_to_filenames
from capa.xqueue_interface import dateformat
+
class ResponseTest(unittest.TestCase):
""" Base class for tests of capa responses."""
@@ -35,16 +38,21 @@ class ResponseTest(unittest.TestCase):
correct_map = problem.grade_answers(input_dict)
self.assertEquals(correct_map.get_correctness('1_2_1'), expected_correctness)
+ def assert_answer_format(self, problem):
+ answers = problem.get_question_answers()
+ self.assertTrue(answers['1_2_1'] is not None)
+
def assert_multiple_grade(self, problem, correct_answers, incorrect_answers):
for input_str in correct_answers:
result = problem.grade_answers({'1_2_1': input_str}).get_correctness('1_2_1')
self.assertEqual(result, 'correct',
- msg="%s should be marked correct" % str(input_str))
+ msg="%s should be marked correct" % str(input_str))
for input_str in incorrect_answers:
result = problem.grade_answers({'1_2_1': input_str}).get_correctness('1_2_1')
self.assertEqual(result, 'incorrect',
- msg="%s should be marked incorrect" % str(input_str))
+ msg="%s should be marked incorrect" % str(input_str))
+
class MultiChoiceResponseTest(ResponseTest):
from response_xml_factory import MultipleChoiceResponseXMLFactory
@@ -60,7 +68,7 @@ class MultiChoiceResponseTest(ResponseTest):
def test_named_multiple_choice_grade(self):
problem = self.build_problem(choices=[False, True, False],
- choice_names=["foil_1", "foil_2", "foil_3"])
+ choice_names=["foil_1", "foil_2", "foil_3"])
# Ensure that we get the expected grades
self.assert_grade(problem, 'choice_foil_1', 'incorrect')
@@ -91,7 +99,7 @@ class TrueFalseResponseTest(ResponseTest):
def test_named_true_false_grade(self):
problem = self.build_problem(choices=[False, True, True],
- choice_names=['foil_1','foil_2','foil_3'])
+ choice_names=['foil_1', 'foil_2', 'foil_3'])
# Check the results
# Mark correct if and only if ALL (and only) correct chocies selected
@@ -107,6 +115,7 @@ class TrueFalseResponseTest(ResponseTest):
self.assert_grade(problem, 'choice_foil_4', 'incorrect')
self.assert_grade(problem, 'not_a_choice', 'incorrect')
+
class ImageResponseTest(ResponseTest):
from response_xml_factory import ImageResponseXMLFactory
xml_factory_class = ImageResponseXMLFactory
@@ -118,7 +127,7 @@ class ImageResponseTest(ResponseTest):
# Anything inside the rectangle (and along the borders) is correct
# Everything else is incorrect
correct_inputs = ["[12,19]", "[10,10]", "[20,20]",
- "[10,15]", "[20,15]", "[15,10]", "[15,20]"]
+ "[10,15]", "[20,15]", "[15,10]", "[15,20]"]
incorrect_inputs = ["[4,6]", "[25,15]", "[15,40]", "[15,4]"]
self.assert_multiple_grade(problem, correct_inputs, incorrect_inputs)
@@ -145,7 +154,7 @@ class ImageResponseTest(ResponseTest):
def test_multiple_regions_grade(self):
# Define multiple regions that the user can select
- region_str="[[[10,10], [20,10], [20, 30]], [[100,100], [120,100], [120,150]]]"
+ region_str = "[[[10,10], [20,10], [20, 30]], [[100,100], [120,100], [120,150]]]"
# Expect that only points inside the regions are marked correct
problem = self.build_problem(regions=region_str)
@@ -155,7 +164,7 @@ class ImageResponseTest(ResponseTest):
def test_region_and_rectangle_grade(self):
rectangle_str = "(100,100)-(200,200)"
- region_str="[[10,10], [20,10], [20, 30]]"
+ region_str = "[[10,10], [20,10], [20, 30]]"
# Expect that only points inside the rectangle or region are marked correct
problem = self.build_problem(regions=region_str, rectangle=rectangle_str)
@@ -163,6 +172,13 @@ class ImageResponseTest(ResponseTest):
incorrect_inputs = ["[0,0]", "[600,300]"]
self.assert_multiple_grade(problem, correct_inputs, incorrect_inputs)
+ def test_show_answer(self):
+ rectangle_str = "(100,100)-(200,200)"
+ region_str = "[[10,10], [20,10], [20, 30]]"
+
+ problem = self.build_problem(regions=region_str, rectangle=rectangle_str)
+ self.assert_answer_format(problem)
+
class SymbolicResponseTest(unittest.TestCase):
def test_sr_grade(self):
@@ -171,85 +187,85 @@ class SymbolicResponseTest(unittest.TestCase):
test_lcp = lcp.LoncapaProblem(open(symbolicresponse_file).read(), '1', system=test_system)
correct_answers = {'1_2_1': 'cos(theta)*[[1,0],[0,1]] + i*sin(theta)*[[0,1],[1,0]]',
'1_2_1_dynamath': '''
-
-''',
+
+ ''',
}
wrong_answers = {'1_2_1': '2',
'1_2_1_dynamath': '''
''',
- }
+
+ 2
+
+ ''',
+ }
self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct')
self.assertEquals(test_lcp.grade_answers(wrong_answers).get_correctness('1_2_1'), 'incorrect')
@@ -260,7 +276,7 @@ class OptionResponseTest(ResponseTest):
def test_grade(self):
problem = self.build_problem(options=["first", "second", "third"],
- correct_option="second")
+ correct_option="second")
# Assert that we get the expected grades
self.assert_grade(problem, "first", "incorrect")
@@ -281,9 +297,9 @@ class FormulaResponseTest(ResponseTest):
# The expected solution is numerically equivalent to x+2y
problem = self.build_problem(sample_dict=sample_dict,
- num_samples=10,
- tolerance=0.01,
- answer="x+2*y")
+ num_samples=10,
+ tolerance=0.01,
+ answer="x+2*y")
# Expect an equivalent formula to be marked correct
# 2x - x + y + y = x + 2y
@@ -297,33 +313,31 @@ class FormulaResponseTest(ResponseTest):
def test_hint(self):
# Sample variables x and y in the range [-10, 10]
- sample_dict = {'x': (-10, 10), 'y': (-10,10) }
+ sample_dict = {'x': (-10, 10), 'y': (-10, 10)}
# Give a hint if the user leaves off the coefficient
# or leaves out x
hints = [('x + 3*y', 'y_coefficient', 'Check the coefficient of y'),
- ('2*y', 'missing_x', 'Try including the variable x')]
-
+ ('2*y', 'missing_x', 'Try including the variable x')]
# The expected solution is numerically equivalent to x+2y
problem = self.build_problem(sample_dict=sample_dict,
- num_samples=10,
- tolerance=0.01,
- answer="x+2*y",
- hints=hints)
+ num_samples=10,
+ tolerance=0.01,
+ answer="x+2*y",
+ hints=hints)
# Expect to receive a hint if we add an extra y
input_dict = {'1_2_1': "x + 2*y + y"}
correct_map = problem.grade_answers(input_dict)
self.assertEquals(correct_map.get_hint('1_2_1'),
- 'Check the coefficient of y')
+ 'Check the coefficient of y')
# Expect to receive a hint if we leave out x
input_dict = {'1_2_1': "2*y"}
correct_map = problem.grade_answers(input_dict)
self.assertEquals(correct_map.get_hint('1_2_1'),
- 'Try including the variable x')
-
+ 'Try including the variable x')
def test_script(self):
# Calculate the answer using a script
@@ -334,10 +348,10 @@ class FormulaResponseTest(ResponseTest):
# The expected solution is numerically equivalent to 2*x
problem = self.build_problem(sample_dict=sample_dict,
- num_samples=10,
- tolerance=0.01,
- answer="$calculated_ans",
- script=script)
+ num_samples=10,
+ tolerance=0.01,
+ answer="$calculated_ans",
+ script=script)
# Expect that the inputs are graded correctly
self.assert_grade(problem, '2*x', 'correct')
@@ -348,7 +362,6 @@ class StringResponseTest(ResponseTest):
from response_xml_factory import StringResponseXMLFactory
xml_factory_class = StringResponseXMLFactory
-
def test_case_sensitive(self):
problem = self.build_problem(answer="Second", case_sensitive=True)
@@ -372,23 +385,23 @@ class StringResponseTest(ResponseTest):
def test_hints(self):
hints = [("wisconsin", "wisc", "The state capital of Wisconsin is Madison"),
- ("minnesota", "minn", "The state capital of Minnesota is St. Paul")]
+ ("minnesota", "minn", "The state capital of Minnesota is St. Paul")]
problem = self.build_problem(answer="Michigan",
- case_sensitive=False,
- hints=hints)
+ case_sensitive=False,
+ hints=hints)
# We should get a hint for Wisconsin
input_dict = {'1_2_1': 'Wisconsin'}
correct_map = problem.grade_answers(input_dict)
self.assertEquals(correct_map.get_hint('1_2_1'),
- "The state capital of Wisconsin is Madison")
+ "The state capital of Wisconsin is Madison")
# We should get a hint for Minnesota
input_dict = {'1_2_1': 'Minnesota'}
correct_map = problem.grade_answers(input_dict)
self.assertEquals(correct_map.get_hint('1_2_1'),
- "The state capital of Minnesota is St. Paul")
+ "The state capital of Minnesota is St. Paul")
# We should NOT get a hint for Michigan (the correct answer)
input_dict = {'1_2_1': 'Michigan'}
@@ -400,6 +413,7 @@ class StringResponseTest(ResponseTest):
correct_map = problem.grade_answers(input_dict)
self.assertEquals(correct_map.get_hint('1_2_1'), "")
+
class CodeResponseTest(ResponseTest):
from response_xml_factory import CodeResponseXMLFactory
xml_factory_class = CodeResponseXMLFactory
@@ -409,9 +423,9 @@ class CodeResponseTest(ResponseTest):
grader_payload = json.dumps({"grader": "ps04/grade_square.py"})
self.problem = self.build_problem(initial_display="def square(x):",
- answer_display="answer",
- grader_payload=grader_payload,
- num_responses=2)
+ answer_display="answer",
+ grader_payload=grader_payload,
+ num_responses=2)
@staticmethod
def make_queuestate(key, time):
@@ -442,7 +456,6 @@ class CodeResponseTest(ResponseTest):
self.assertEquals(self.problem.is_queued(), True)
-
def test_update_score(self):
'''
Test whether LoncapaProblem.update_score can deliver queued result to the right subproblem
@@ -495,7 +508,6 @@ class CodeResponseTest(ResponseTest):
else:
self.assertTrue(self.problem.correct_map.is_queued(test_id)) # Should be queued, message undelivered
-
def test_recentmost_queuetime(self):
'''
Test whether the LoncapaProblem knows about the time of queue requests
@@ -538,13 +550,14 @@ class CodeResponseTest(ResponseTest):
self.assertEquals(answers_converted['1_3_1'], ['answer1', 'answer2', 'answer3'])
self.assertEquals(answers_converted['1_4_1'], [fp.name, fp.name])
+
class ChoiceResponseTest(ResponseTest):
from response_xml_factory import ChoiceResponseXMLFactory
xml_factory_class = ChoiceResponseXMLFactory
def test_radio_group_grade(self):
problem = self.build_problem(choice_type='radio',
- choices=[False, True, False])
+ choices=[False, True, False])
# Check that we get the expected results
self.assert_grade(problem, 'choice_0', 'incorrect')
@@ -554,10 +567,9 @@ class ChoiceResponseTest(ResponseTest):
# No choice 3 exists --> mark incorrect
self.assert_grade(problem, 'choice_3', 'incorrect')
-
def test_checkbox_group_grade(self):
problem = self.build_problem(choice_type='checkbox',
- choices=[False, True, True])
+ choices=[False, True, True])
# Check that we get the expected results
# (correct if and only if BOTH correct choices chosen)
@@ -581,14 +593,15 @@ class JavascriptResponseTest(ResponseTest):
os.system("coffee -c %s" % (coffee_file_path))
problem = self.build_problem(generator_src="test_problem_generator.js",
- grader_src="test_problem_grader.js",
- display_class="TestProblemDisplay",
- display_src="test_problem_display.js",
- param_dict={'value': '4'})
+ grader_src="test_problem_grader.js",
+ display_class="TestProblemDisplay",
+ display_src="test_problem_display.js",
+ param_dict={'value': '4'})
# Test that we get graded correctly
- self.assert_grade(problem, json.dumps({0:4}), "correct")
- self.assert_grade(problem, json.dumps({0:5}), "incorrect")
+ self.assert_grade(problem, json.dumps({0: 4}), "correct")
+ self.assert_grade(problem, json.dumps({0: 5}), "incorrect")
+
class NumericalResponseTest(ResponseTest):
from response_xml_factory import NumericalResponseXMLFactory
@@ -596,27 +609,26 @@ class NumericalResponseTest(ResponseTest):
def test_grade_exact(self):
problem = self.build_problem(question_text="What is 2 + 2?",
- explanation="The answer is 4",
- answer=4)
+ explanation="The answer is 4",
+ answer=4)
correct_responses = ["4", "4.0", "4.00"]
incorrect_responses = ["", "3.9", "4.1", "0"]
self.assert_multiple_grade(problem, correct_responses, incorrect_responses)
-
def test_grade_decimal_tolerance(self):
problem = self.build_problem(question_text="What is 2 + 2 approximately?",
- explanation="The answer is 4",
- answer=4,
- tolerance=0.1)
+ explanation="The answer is 4",
+ answer=4,
+ tolerance=0.1)
correct_responses = ["4.0", "4.00", "4.09", "3.91"]
incorrect_responses = ["", "4.11", "3.89", "0"]
self.assert_multiple_grade(problem, correct_responses, incorrect_responses)
def test_grade_percent_tolerance(self):
problem = self.build_problem(question_text="What is 2 + 2 approximately?",
- explanation="The answer is 4",
- answer=4,
- tolerance="10%")
+ explanation="The answer is 4",
+ answer=4,
+ tolerance="10%")
correct_responses = ["4.0", "4.3", "3.7", "4.30", "3.70"]
incorrect_responses = ["", "4.5", "3.5", "0"]
self.assert_multiple_grade(problem, correct_responses, incorrect_responses)
@@ -624,9 +636,9 @@ class NumericalResponseTest(ResponseTest):
def test_grade_with_script(self):
script_text = "computed_response = math.sqrt(4)"
problem = self.build_problem(question_text="What is sqrt(4)?",
- explanation="The answer is 2",
- answer="$computed_response",
- script=script_text)
+ explanation="The answer is 2",
+ answer="$computed_response",
+ script=script_text)
correct_responses = ["2", "2.0"]
incorrect_responses = ["", "2.01", "1.99", "0"]
self.assert_multiple_grade(problem, correct_responses, incorrect_responses)
@@ -634,10 +646,10 @@ class NumericalResponseTest(ResponseTest):
def test_grade_with_script_and_tolerance(self):
script_text = "computed_response = math.sqrt(4)"
problem = self.build_problem(question_text="What is sqrt(4)?",
- explanation="The answer is 2",
- answer="$computed_response",
- tolerance="0.1",
- script=script_text)
+ explanation="The answer is 2",
+ answer="$computed_response",
+ tolerance="0.1",
+ script=script_text)
correct_responses = ["2", "2.0", "2.05", "1.95"]
incorrect_responses = ["", "2.11", "1.89", "0"]
self.assert_multiple_grade(problem, correct_responses, incorrect_responses)
@@ -651,7 +663,6 @@ class NumericalResponseTest(ResponseTest):
self.assert_multiple_grade(problem, correct_responses, incorrect_responses)
-
class CustomResponseTest(ResponseTest):
from response_xml_factory import CustomResponseXMLFactory
xml_factory_class = CustomResponseXMLFactory
@@ -692,7 +703,6 @@ class CustomResponseTest(ResponseTest):
overall_msg = correctmap.get_overall_message()
self.assertEqual(overall_msg, "Overall message")
-
def test_function_code_single_input(self):
# For function code, we pass in these arguments:
@@ -746,7 +756,7 @@ class CustomResponseTest(ResponseTest):
""")
problem = self.build_problem(script=script, cfn="check_func",
- expect="42", num_inputs=2)
+ expect="42", num_inputs=2)
# Correct answer -- expect both inputs marked correct
input_dict = {'1_2_1': '42', '1_2_2': '42'}
@@ -768,7 +778,6 @@ class CustomResponseTest(ResponseTest):
correctness = correct_map.get_correctness('1_2_2')
self.assertEqual(correctness, 'incorrect')
-
def test_function_code_multiple_inputs(self):
# If the has multiple inputs associated with it,
@@ -794,10 +803,10 @@ class CustomResponseTest(ResponseTest):
""")
problem = self.build_problem(script=script,
- cfn="check_func", num_inputs=3)
+ cfn="check_func", num_inputs=3)
# Grade the inputs (one input incorrect)
- input_dict = {'1_2_1': '-999', '1_2_2': '2', '1_2_3': '3' }
+ input_dict = {'1_2_1': '-999', '1_2_2': '2', '1_2_3': '3'}
correct_map = problem.grade_answers(input_dict)
# Expect that we receive the overall message (for the whole response)
@@ -813,7 +822,6 @@ class CustomResponseTest(ResponseTest):
self.assertEqual(correct_map.get_msg('1_2_2'), 'Feedback 2')
self.assertEqual(correct_map.get_msg('1_2_3'), 'Feedback 3')
-
def test_multiple_inputs_return_one_status(self):
# When given multiple inputs, the 'answer_given' argument
# to the check_func() is a list of inputs
@@ -835,10 +843,10 @@ class CustomResponseTest(ResponseTest):
""")
problem = self.build_problem(script=script,
- cfn="check_func", num_inputs=3)
+ cfn="check_func", num_inputs=3)
# Grade the inputs (one input incorrect)
- input_dict = {'1_2_1': '-999', '1_2_2': '2', '1_2_3': '3' }
+ input_dict = {'1_2_1': '-999', '1_2_2': '2', '1_2_3': '3'}
correct_map = problem.grade_answers(input_dict)
# Everything marked incorrect
@@ -847,7 +855,7 @@ class CustomResponseTest(ResponseTest):
self.assertEqual(correct_map.get_correctness('1_2_3'), 'incorrect')
# Grade the inputs (everything correct)
- input_dict = {'1_2_1': '1', '1_2_2': '2', '1_2_3': '3' }
+ input_dict = {'1_2_1': '1', '1_2_2': '2', '1_2_3': '3'}
correct_map = problem.grade_answers(input_dict)
# Everything marked incorrect
@@ -858,7 +866,7 @@ class CustomResponseTest(ResponseTest):
# Message is interpreted as an "overall message"
self.assertEqual(correct_map.get_overall_message(), 'Message text')
- def test_script_exception(self):
+ def test_script_exception_function(self):
# Construct a script that will raise an exception
script = textwrap.dedent("""
@@ -869,7 +877,17 @@ class CustomResponseTest(ResponseTest):
problem = self.build_problem(script=script, cfn="check_func")
# Expect that an exception gets raised when we check the answer
- with self.assertRaises(Exception):
+ with self.assertRaises(ResponseError):
+ problem.grade_answers({'1_2_1': '42'})
+
+ def test_script_exception_inline(self):
+
+ # Construct a script that will raise an exception
+ script = 'raise Exception("Test")'
+ problem = self.build_problem(answer=script)
+
+ # Expect that an exception gets raised when we check the answer
+ with self.assertRaises(ResponseError):
problem.grade_answers({'1_2_1': '42'})
def test_invalid_dict_exception(self):
@@ -883,10 +901,70 @@ class CustomResponseTest(ResponseTest):
problem = self.build_problem(script=script, cfn="check_func")
# Expect that an exception gets raised when we check the answer
- with self.assertRaises(Exception):
+ with self.assertRaises(ResponseError):
problem.grade_answers({'1_2_1': '42'})
+ def test_module_imports_inline(self):
+ '''
+ Check that the correct modules are available to custom
+ response scripts
+ '''
+
+ for module_name in ['random', 'numpy', 'math', 'scipy',
+ 'calc', 'eia', 'chemcalc', 'chemtools',
+ 'miller', 'draganddrop']:
+
+ # Create a script that checks that the name is defined
+ # If the name is not defined, then the script
+ # will raise an exception
+ script = textwrap.dedent('''
+ correct[0] = 'correct'
+ assert('%s' in globals())''' % module_name)
+
+ # Create the problem
+ problem = self.build_problem(answer=script)
+
+ # Expect that we can grade an answer without
+ # getting an exception
+ try:
+ problem.grade_answers({'1_2_1': '42'})
+
+ except ResponseError:
+ self.fail("Could not use name '%s' in custom response"
+ % module_name)
+
+ def test_module_imports_function(self):
+ '''
+ Check that the correct modules are available to custom
+ response scripts
+ '''
+
+ for module_name in ['random', 'numpy', 'math', 'scipy',
+ 'calc', 'eia', 'chemcalc', 'chemtools',
+ 'miller', 'draganddrop']:
+
+ # Create a script that checks that the name is defined
+ # If the name is not defined, then the script
+ # will raise an exception
+ script = textwrap.dedent('''
+ def check_func(expect, answer_given):
+ assert('%s' in globals())
+ return True''' % module_name)
+
+ # Create the problem
+ problem = self.build_problem(script=script, cfn="check_func")
+
+ # Expect that we can grade an answer without
+ # getting an exception
+ try:
+ problem.grade_answers({'1_2_1': '42'})
+
+ except ResponseError:
+ self.fail("Could not use name '%s' in custom response"
+ % module_name)
+
+
class SchematicResponseTest(ResponseTest):
from response_xml_factory import SchematicResponseXMLFactory
xml_factory_class = SchematicResponseXMLFactory
@@ -902,13 +980,13 @@ class SchematicResponseTest(ResponseTest):
# To test that the context is set up correctly,
# we create a script that sets *correct* to true
# if and only if we find the *submission* (list)
- script="correct = ['correct' if 'test' in submission[0] else 'incorrect']"
+ script = "correct = ['correct' if 'test' in submission[0] else 'incorrect']"
problem = self.build_problem(answer=script)
# The actual dictionary would contain schematic information
# sent from the JavaScript simulation
submission_dict = {'test': 'test'}
- input_dict = { '1_2_1': json.dumps(submission_dict) }
+ input_dict = {'1_2_1': json.dumps(submission_dict)}
correct_map = problem.grade_answers(input_dict)
# Expect that the problem is graded as true
@@ -916,6 +994,19 @@ class SchematicResponseTest(ResponseTest):
# is what we expect)
self.assertEqual(correct_map.get_correctness('1_2_1'), 'correct')
+ def test_script_exception(self):
+
+ # Construct a script that will raise an exception
+ script = "raise Exception('test')"
+ problem = self.build_problem(answer=script)
+
+ # Expect that an exception gets raised when we check the answer
+ with self.assertRaises(ResponseError):
+ submission_dict = {'test': 'test'}
+ input_dict = {'1_2_1': json.dumps(submission_dict)}
+ problem.grade_answers(input_dict)
+
+
class AnnotationResponseTest(ResponseTest):
from response_xml_factory import AnnotationResponseXMLFactory
xml_factory_class = AnnotationResponseXMLFactory
@@ -924,18 +1015,18 @@ class AnnotationResponseTest(ResponseTest):
(correct, partially, incorrect) = ('correct', 'partially-correct', 'incorrect')
answer_id = '1_2_1'
- options = (('x', correct),('y', partially),('z', incorrect))
- make_answer = lambda option_ids: {answer_id: json.dumps({'options': option_ids })}
+ options = (('x', correct), ('y', partially), ('z', incorrect))
+ make_answer = lambda option_ids: {answer_id: json.dumps({'options': option_ids})}
tests = [
- {'correctness': correct, 'points': 2,'answers': make_answer([0]) },
- {'correctness': partially, 'points': 1, 'answers': make_answer([1]) },
- {'correctness': incorrect, 'points': 0, 'answers': make_answer([2]) },
- {'correctness': incorrect, 'points': 0, 'answers': make_answer([0,1,2]) },
- {'correctness': incorrect, 'points': 0, 'answers': make_answer([]) },
- {'correctness': incorrect, 'points': 0, 'answers': make_answer('') },
- {'correctness': incorrect, 'points': 0, 'answers': make_answer(None) },
- {'correctness': incorrect, 'points': 0, 'answers': {answer_id: 'null' } },
+ {'correctness': correct, 'points': 2, 'answers': make_answer([0])},
+ {'correctness': partially, 'points': 1, 'answers': make_answer([1])},
+ {'correctness': incorrect, 'points': 0, 'answers': make_answer([2])},
+ {'correctness': incorrect, 'points': 0, 'answers': make_answer([0, 1, 2])},
+ {'correctness': incorrect, 'points': 0, 'answers': make_answer([])},
+ {'correctness': incorrect, 'points': 0, 'answers': make_answer('')},
+ {'correctness': incorrect, 'points': 0, 'answers': make_answer(None)},
+ {'correctness': incorrect, 'points': 0, 'answers': {answer_id: 'null'}},
]
for (index, test) in enumerate(tests):
diff --git a/common/lib/xmodule/xmodule/annotatable_module.py b/common/lib/xmodule/xmodule/annotatable_module.py
index db2aa13cb7..3e5108ed3a 100644
--- a/common/lib/xmodule/xmodule/annotatable_module.py
+++ b/common/lib/xmodule/xmodule/annotatable_module.py
@@ -20,8 +20,7 @@ class AnnotatableModule(AnnotatableFields, XModule):
resource_string(__name__, 'js/src/collapsible.coffee'),
resource_string(__name__, 'js/src/html/display.coffee'),
resource_string(__name__, 'js/src/annotatable/display.coffee')],
- 'js': []
- }
+ 'js': []}
js_module_name = "Annotatable"
css = {'scss': [resource_string(__name__, 'css/annotatable/display.scss')]}
icon_class = 'annotatable'
@@ -49,11 +48,11 @@ class AnnotatableModule(AnnotatableFields, XModule):
if color is not None:
if color in self.highlight_colors:
- cls.append('highlight-'+color)
+ cls.append('highlight-' + color)
attr['_delete'] = highlight_key
attr['value'] = ' '.join(cls)
- return { 'class' : attr }
+ return {'class': attr}
def _get_annotation_data_attr(self, index, el):
""" Returns a dict in which the keys are the HTML data attributes
@@ -73,7 +72,7 @@ class AnnotatableModule(AnnotatableFields, XModule):
if xml_key in el.attrib:
value = el.get(xml_key, '')
html_key = attrs_map[xml_key]
- data_attrs[html_key] = { 'value': value, '_delete': xml_key }
+ data_attrs[html_key] = {'value': value, '_delete': xml_key}
return data_attrs
@@ -91,7 +90,6 @@ class AnnotatableModule(AnnotatableFields, XModule):
delete_key = attr[key]['_delete']
del el.attrib[delete_key]
-
def _render_content(self):
""" Renders annotatable content with annotation spans and returns HTML. """
xmltree = etree.fromstring(self.content)
@@ -132,4 +130,3 @@ class AnnotatableDescriptor(AnnotatableFields, RawDescriptor):
stores_state = True
template_dir_name = "annotatable"
mako_template = "widgets/raw-edit.html"
-
diff --git a/common/lib/xmodule/xmodule/capa_module.py b/common/lib/xmodule/xmodule/capa_module.py
index e66b1d3495..ca7e052e7e 100644
--- a/common/lib/xmodule/xmodule/capa_module.py
+++ b/common/lib/xmodule/xmodule/capa_module.py
@@ -1,25 +1,24 @@
import cgi
import datetime
-import dateutil
-import dateutil.parser
import hashlib
import json
import logging
import traceback
import sys
-from lxml import etree
from pkg_resources import resource_string
from capa.capa_problem import LoncapaProblem
-from capa.responsetypes import StudentInputError
+from capa.responsetypes import StudentInputError,\
+ ResponseError, LoncapaProblemError
from capa.util import convert_files_to_filenames
from .progress import Progress
from xmodule.x_module import XModule
from xmodule.raw_module import RawDescriptor
-from xmodule.exceptions import NotFoundError
-from xblock.core import Integer, Scope, BlockScope, ModelType, String, Boolean, Object, Float
-from .fields import Timedelta
+from xmodule.exceptions import NotFoundError, ProcessingError
+from xblock.core import Integer, Scope, String, Boolean, Object, Float
+from .fields import Timedelta, Date
+from xmodule.util.date_utils import time_to_datetime
log = logging.getLogger("mitx.courseware")
@@ -86,13 +85,14 @@ class ComplexEncoder(json.JSONEncoder):
class CapaFields(object):
attempts = StringyInteger(help="Number of attempts taken by the student on this problem", default=0, scope=Scope.student_state)
max_attempts = StringyInteger(help="Maximum number of attempts that a student is allowed", scope=Scope.settings)
- due = String(help="Date that this problem is due by", scope=Scope.settings)
+ due = Date(help="Date that this problem is due by", scope=Scope.settings)
graceperiod = Timedelta(help="Amount of time after the due date that submissions will be accepted", scope=Scope.settings)
showanswer = String(help="When to show the problem answer to the student", scope=Scope.settings, default="closed")
force_save_button = Boolean(help="Whether to force the save button to appear on the page", scope=Scope.settings, default=False)
rerandomize = Randomization(help="When to rerandomize the problem", default="always", scope=Scope.settings)
data = String(help="XML data for the problem", scope=Scope.content)
correct_map = Object(help="Dictionary with the correctness of current student answers", scope=Scope.student_state, default={})
+ input_state = Object(help="Dictionary for maintaining the state of inputtypes", scope=Scope.student_state)
student_answers = Object(help="Dictionary with the current student responses", scope=Scope.student_state)
done = Boolean(help="Whether the student has answered the problem", scope=Scope.student_state)
display_name = String(help="Display name for this module", scope=Scope.settings)
@@ -123,10 +123,7 @@ class CapaModule(CapaFields, XModule):
def __init__(self, system, location, descriptor, model_data):
XModule.__init__(self, system, location, descriptor, model_data)
- if self.due:
- due_date = dateutil.parser.parse(self.due)
- else:
- due_date = None
+ due_date = time_to_datetime(self.due)
if self.graceperiod is not None and due_date:
self.close_date = due_date + self.graceperiod
@@ -149,6 +146,16 @@ class CapaModule(CapaFields, XModule):
# TODO (vshnayder): move as much as possible of this work and error
# checking to descriptor load time
self.lcp = self.new_lcp(self.get_state_for_lcp())
+
+ # At this point, we need to persist the randomization seed
+ # so that when the problem is re-loaded (to check/view/save)
+ # it stays the same.
+ # However, we do not want to write to the database
+ # every time the module is loaded.
+ # So we set the seed ONLY when there is not one set already
+ if self.seed is None:
+ self.seed = self.lcp.seed
+
except Exception as err:
msg = 'cannot create LoncapaProblem {loc}: {err}'.format(
loc=self.location.url(), err=err)
@@ -188,6 +195,7 @@ class CapaModule(CapaFields, XModule):
'done': self.done,
'correct_map': self.correct_map,
'student_answers': self.student_answers,
+ 'input_state': self.input_state,
'seed': self.seed,
}
@@ -195,6 +203,7 @@ class CapaModule(CapaFields, XModule):
lcp_state = self.lcp.get_state()
self.done = lcp_state['done']
self.correct_map = lcp_state['correct_map']
+ self.input_state = lcp_state['input_state']
self.student_answers = lcp_state['student_answers']
self.seed = lcp_state['seed']
@@ -443,14 +452,22 @@ class CapaModule(CapaFields, XModule):
'problem_save': self.save_problem,
'problem_show': self.get_answer,
'score_update': self.update_score,
- 'input_ajax': self.lcp.handle_input_ajax
+ 'input_ajax': self.handle_input_ajax,
+ 'ungraded_response': self.handle_ungraded_response
}
if dispatch not in handlers:
return 'Error'
before = self.get_progress()
- d = handlers[dispatch](get)
+
+ try:
+ d = handlers[dispatch](get)
+
+ except Exception as err:
+ _, _, traceback_obj = sys.exc_info()
+ raise ProcessingError, err.message, traceback_obj
+
after = self.get_progress()
d.update({
'progress_changed': after != before,
@@ -537,6 +554,43 @@ class CapaModule(CapaFields, XModule):
return dict() # No AJAX return is needed
+ def handle_ungraded_response(self, get):
+ '''
+ Delivers a response from the XQueue to the capa problem
+
+ The score of the problem will not be updated
+
+ Args:
+ - get (dict) must contain keys:
+ queuekey - a key specific to this response
+ xqueue_body - the body of the response
+ Returns:
+ empty dictionary
+
+ No ajax return is needed, so an empty dict is returned
+ '''
+ queuekey = get['queuekey']
+ score_msg = get['xqueue_body']
+ # pass along the xqueue message to the problem
+ self.lcp.ungraded_response(score_msg, queuekey)
+ self.set_state_from_lcp()
+ return dict()
+
+ def handle_input_ajax(self, get):
+ '''
+ Handle ajax calls meant for a particular input in the problem
+
+ Args:
+ - get (dict) - data that should be passed to the input
+ Returns:
+ - dict containing the response from the input
+ '''
+ response = self.lcp.handle_input_ajax(get)
+ # save any state changes that may occur
+ self.set_state_from_lcp()
+ return response
+
+
def get_answer(self, get):
'''
For the "show answer" button.
@@ -684,9 +738,24 @@ class CapaModule(CapaFields, XModule):
try:
correct_map = self.lcp.grade_answers(answers)
self.set_state_from_lcp()
- except StudentInputError as inst:
- log.exception("StudentInputError in capa_module:problem_check")
- return {'success': inst.message}
+
+ except (StudentInputError, ResponseError, LoncapaProblemError) as inst:
+ log.warning("StudentInputError in capa_module:problem_check",
+ exc_info=True)
+
+ # If the user is a staff member, include
+ # the full exception, including traceback,
+ # in the response
+ if self.system.user_is_staff:
+ msg = "Staff debug info: %s" % traceback.format_exc()
+
+ # Otherwise, display just an error message,
+ # without a stack trace
+ else:
+ msg = "Error: %s" % str(inst.message)
+
+ return {'success': msg}
+
except Exception, err:
if self.system.DEBUG:
msg = "Error checking problem: " + str(err)
@@ -737,7 +806,7 @@ class CapaModule(CapaFields, XModule):
event_info['answers'] = answers
# Too late. Cannot submit
- if self.closed() and not self.max_attempts ==0:
+ if self.closed() and not self.max_attempts == 0:
event_info['failure'] = 'closed'
self.system.track_function('save_problem_fail', event_info)
return {'success': False,
@@ -757,7 +826,7 @@ class CapaModule(CapaFields, XModule):
self.system.track_function('save_problem_success', event_info)
msg = "Your answers have been saved"
- if not self.max_attempts ==0:
+ if not self.max_attempts == 0:
msg += " but not graded. Hit 'Check' to grade them."
return {'success': True,
'msg': msg}
diff --git a/common/lib/xmodule/xmodule/combined_open_ended_module.py b/common/lib/xmodule/xmodule/combined_open_ended_module.py
index 48fbfcced1..f70cf62d29 100644
--- a/common/lib/xmodule/xmodule/combined_open_ended_module.py
+++ b/common/lib/xmodule/xmodule/combined_open_ended_module.py
@@ -1,4 +1,3 @@
-import json
import logging
from lxml import etree
@@ -6,14 +5,16 @@ from pkg_resources import resource_string
from xmodule.raw_module import RawDescriptor
from .x_module import XModule
-from xblock.core import Integer, Scope, BlockScope, ModelType, String, Boolean, Object, Float, List
+from xblock.core import Integer, Scope, String, Boolean, List
from xmodule.open_ended_grading_classes.combined_open_ended_modulev1 import CombinedOpenEndedV1Module, CombinedOpenEndedV1Descriptor
from collections import namedtuple
+from .fields import Date
+from xmodule.open_ended_grading_classes.xblock_field_types import StringyFloat
log = logging.getLogger("mitx.courseware")
V1_SETTINGS_ATTRIBUTES = ["display_name", "attempts", "is_graded", "accept_file_upload",
- "skip_spelling_checks", "due", "graceperiod", "max_score"]
+ "skip_spelling_checks", "due", "graceperiod"]
V1_STUDENT_ATTRIBUTES = ["current_task_number", "task_states", "state",
"student_attempts", "ready_to_reset"]
@@ -63,12 +64,12 @@ class CombinedOpenEndedFields(object):
scope=Scope.settings)
skip_spelling_checks = Boolean(help="Whether or not to skip initial spelling checks.", default=True,
scope=Scope.settings)
- due = String(help="Date that this problem is due by", default=None, scope=Scope.settings)
+ due = Date(help="Date that this problem is due by", default=None, scope=Scope.settings)
graceperiod = String(help="Amount of time after the due date that submissions will be accepted", default=None,
scope=Scope.settings)
- max_score = Integer(help="Maximum score for the problem.", default=1, scope=Scope.settings)
version = VersionInteger(help="Current version number", default=DEFAULT_VERSION, scope=Scope.settings)
data = String(help="XML data for the problem", scope=Scope.content)
+ weight = StringyFloat(help="How much to weight this problem by", scope=Scope.settings)
class CombinedOpenEndedModule(CombinedOpenEndedFields, XModule):
@@ -104,10 +105,11 @@ class CombinedOpenEndedModule(CombinedOpenEndedFields, XModule):
icon_class = 'problem'
- js = {'coffee': [resource_string(__name__, 'js/src/combinedopenended/display.coffee'),
- resource_string(__name__, 'js/src/collapsible.coffee'),
- resource_string(__name__, 'js/src/javascript_loader.coffee'),
- ]}
+ js = {'coffee':
+ [resource_string(__name__, 'js/src/combinedopenended/display.coffee'),
+ resource_string(__name__, 'js/src/collapsible.coffee'),
+ resource_string(__name__, 'js/src/javascript_loader.coffee'),
+ ]}
js_module_name = "CombinedOpenEnded"
css = {'scss': [resource_string(__name__, 'css/combinedopenended/display.scss')]}
@@ -118,7 +120,7 @@ class CombinedOpenEndedModule(CombinedOpenEndedFields, XModule):
Definition file should have one or many task blocks, a rubric block, and a prompt block:
Sample file:
-
+
Blah blah rubric.
@@ -190,8 +192,8 @@ class CombinedOpenEndedModule(CombinedOpenEndedFields, XModule):
def get_score(self):
return self.child_module.get_score()
- #def max_score(self):
- # return self.child_module.max_score()
+ def max_score(self):
+ return self.child_module.max_score()
def get_progress(self):
return self.child_module.get_progress()
@@ -218,4 +220,3 @@ class CombinedOpenEndedDescriptor(CombinedOpenEndedFields, RawDescriptor):
stores_state = True
has_score = True
template_dir_name = "combinedopenended"
-
diff --git a/common/lib/xmodule/xmodule/conditional_module.py b/common/lib/xmodule/xmodule/conditional_module.py
index a9375cae78..b3e0e0e06b 100644
--- a/common/lib/xmodule/xmodule/conditional_module.py
+++ b/common/lib/xmodule/xmodule/conditional_module.py
@@ -40,8 +40,21 @@ class ConditionalModule(ConditionalFields, XModule):
poll_answer - map to `poll_answer` module attribute
voted - map to `voted` module attribute
- tag attributes:
- sources - location id of modules, separated by ';'
+ tag attributes:
+ sources - location id of required modules, separated by ';'
+
+ You can add you own rules for tag, like
+ "completed", "attempted" etc. To do that yo must extend
+ `ConditionalModule.conditions_map` variable and add pair:
+ my_attr: my_property/my_method
+
+ After that you can use it:
+
+ ...
+
+
+ And my_property/my_method will be called for required modules.
+
"""
js = {'coffee': [resource_string(__name__, 'js/src/javascript_loader.coffee'),
diff --git a/common/lib/xmodule/xmodule/course_module.py b/common/lib/xmodule/xmodule/course_module.py
index 7c47e0887a..ed5a37e580 100644
--- a/common/lib/xmodule/xmodule/course_module.py
+++ b/common/lib/xmodule/xmodule/course_module.py
@@ -1,25 +1,23 @@
import logging
from cStringIO import StringIO
-from math import exp, erf
+from math import exp
from lxml import etree
from path import path # NOTE (THK): Only used for detecting presence of syllabus
import requests
import time
from datetime import datetime
+import dateutil.parser
+
from xmodule.modulestore import Location
from xmodule.seq_module import SequenceDescriptor, SequenceModule
from xmodule.timeparse import parse_time
from xmodule.util.decorators import lazyproperty
from xmodule.graders import grader_from_conf
-from datetime import datetime
+from xmodule.util.date_utils import time_to_datetime
import json
-import logging
-import requests
-import time
-import copy
-from xblock.core import Scope, ModelType, List, String, Object, Boolean
+from xblock.core import Scope, List, String, Object, Boolean
from .fields import Date
@@ -29,30 +27,30 @@ log = logging.getLogger(__name__)
class StringOrDate(Date):
def from_json(self, value):
"""
- Parse an optional metadata key containing a time: if present, complain
- if it doesn't parse.
- Return None if not present or invalid.
+ Parse an optional metadata key containing a time or a string:
+ if present, assume it's a string if it doesn't parse.
"""
- if value is None:
- return None
-
try:
- return time.strptime(value, self.time_format)
+ result = super(StringOrDate, self).from_json(value)
except ValueError:
return value
+ if result is None:
+ return value
+ else:
+ return result
def to_json(self, value):
"""
- Convert a time struct to a string
+ Convert a time struct or string to a string.
"""
- if value is None:
- return None
-
try:
- return time.strftime(self.time_format, value)
- except (ValueError, TypeError):
+ result = super(StringOrDate, self).to_json(value)
+ except:
return value
-
+ if result is None:
+ return value
+ else:
+ return result
edx_xml_parser = etree.XMLParser(dtd_validation=False, load_dtd=False,
@@ -60,6 +58,7 @@ edx_xml_parser = etree.XMLParser(dtd_validation=False, load_dtd=False,
_cached_toc = {}
+
class Textbook(object):
def __init__(self, title, book_url):
self.title = title
@@ -154,7 +153,7 @@ class CourseFields(object):
enrollment_end = Date(help="Date that enrollment for this class is closed", scope=Scope.settings)
start = Date(help="Start time when this module is visible", scope=Scope.settings)
end = Date(help="Date that this class ends", scope=Scope.settings)
- advertised_start = StringOrDate(help="Date that this course is advertised to start", scope=Scope.settings)
+ advertised_start = String(help="Date that this course is advertised to start", scope=Scope.settings)
grading_policy = Object(help="Grading policy definition for this class", scope=Scope.content)
show_calculator = Boolean(help="Whether to show the calculator in this course", default=False, scope=Scope.settings)
display_name = String(help="Display name for this module", scope=Scope.settings)
@@ -179,7 +178,7 @@ class CourseFields(object):
allow_anonymous_to_peers = Boolean(scope=Scope.settings, default=False)
advanced_modules = List(help="Beta modules used in your course", scope=Scope.settings)
has_children = True
-
+ checklists = List(scope=Scope.settings)
info_sidebar_name = String(scope=Scope.settings, default='Course Handouts')
# An extra property is used rather than the wiki_slug/number because
@@ -367,7 +366,7 @@ class CourseDescriptor(CourseFields, SequenceDescriptor):
textbooks.append((textbook.get('title'), textbook.get('book_url')))
xml_object.remove(textbook)
- #Load the wiki tag if it exists
+ # Load the wiki tag if it exists
wiki_slug = None
wiki_tag = xml_object.find("wiki")
if wiki_tag is not None:
@@ -535,17 +534,17 @@ class CourseDescriptor(CourseFields, SequenceDescriptor):
def _sorting_dates(self):
# utility function to get datetime objects for dates used to
# compute the is_new flag and the sorting_score
- def to_datetime(timestamp):
- return datetime(*timestamp[:6])
announcement = self.announcement
if announcement is not None:
- announcement = to_datetime(announcement)
- if self.advertised_start is None or isinstance(self.advertised_start, basestring):
- start = to_datetime(self.start)
- else:
- start = to_datetime(self.advertised_start)
- now = to_datetime(time.gmtime())
+ announcement = time_to_datetime(announcement)
+
+ try:
+ start = dateutil.parser.parse(self.advertised_start)
+ except (ValueError, AttributeError):
+ start = time_to_datetime(self.start)
+
+ now = datetime.utcnow()
return announcement, start, now
@@ -635,8 +634,17 @@ class CourseDescriptor(CourseFields, SequenceDescriptor):
@property
def start_date_text(self):
+ def try_parse_iso_8601(text):
+ try:
+ result = datetime.strptime(text, "%Y-%m-%dT%H:%M")
+ result = result.strftime("%b %d, %Y")
+ except ValueError:
+ result = text.title()
+
+ return result
+
if isinstance(self.advertised_start, basestring):
- return self.advertised_start
+ return try_parse_iso_8601(self.advertised_start)
elif self.advertised_start is None and self.start is None:
return 'TBD'
else:
@@ -675,7 +683,7 @@ class CourseDescriptor(CourseFields, SequenceDescriptor):
# *end* of the same day, not the same time. It's going to be used as the
# end of the exam overall, so we don't want the exam to disappear too soon.
# It's also used optionally as the registration end date, so time matters there too.
- self.last_eligible_appointment_date = self._try_parse_time('Last_Eligible_Appointment_Date') # or self.first_eligible_appointment_date
+ self.last_eligible_appointment_date = self._try_parse_time('Last_Eligible_Appointment_Date') # or self.first_eligible_appointment_date
if self.last_eligible_appointment_date is None:
raise ValueError("Last appointment date must be specified")
self.registration_start_date = self._try_parse_time('Registration_Start_Date') or time.gmtime(0)
diff --git a/common/lib/xmodule/xmodule/css/annotatable/display.scss b/common/lib/xmodule/xmodule/css/annotatable/display.scss
index 308b379ec1..e2c095de2d 100644
--- a/common/lib/xmodule/xmodule/css/annotatable/display.scss
+++ b/common/lib/xmodule/xmodule/css/annotatable/display.scss
@@ -1,6 +1,16 @@
+/* TODO: move top-level variables to a common _variables.scss.
+ * NOTE: These variables were only added here because when this was integrated with the CMS,
+ * SASS compilation errors were triggered because the CMS didn't have the same variables defined
+ * that the LMS did, so the quick fix was to localize the LMS variables not shared by the CMS.
+ * -Abarrett and Vshnayder
+ */
$border-color: #C8C8C8;
$body-font-size: em(14);
+.annotatable-wrapper {
+ position: relative;
+}
+
.annotatable-header {
margin-bottom: .5em;
.annotatable-title {
@@ -55,6 +65,7 @@ $body-font-size: em(14);
display: inline;
cursor: pointer;
+ $highlight_index: 0;
@each $highlight in (
(yellow rgba(255,255,10,0.3) rgba(255,255,10,0.9)),
(red rgba(178,19,16,0.3) rgba(178,19,16,0.9)),
@@ -62,12 +73,13 @@ $body-font-size: em(14);
(green rgba(25,255,132,0.3) rgba(25,255,132,0.9)),
(blue rgba(35,163,255,0.3) rgba(35,163,255,0.9)),
(purple rgba(115,9,178,0.3) rgba(115,9,178,0.9))) {
-
+
+ $highlight_index: $highlight_index + 1;
$marker: nth($highlight,1);
$color: nth($highlight,2);
$selected_color: nth($highlight,3);
- @if $marker == yellow {
+ @if $highlight_index == 1 {
&.highlight {
background-color: $color;
&.selected { background-color: $selected_color; }
@@ -127,6 +139,7 @@ $body-font-size: em(14);
font-weight: 400;
padding: 0 10px 10px 10px;
background-color: transparent;
+ border-color: transparent;
}
p {
color: inherit;
@@ -143,6 +156,7 @@ $body-font-size: em(14);
margin: 0px 0px 10px 0;
max-height: 225px;
overflow: auto;
+ line-height: normal;
}
.annotatable-reply {
display: block;
@@ -165,5 +179,3 @@ $body-font-size: em(14);
border-top-color: rgba(0, 0, 0, .85);
}
}
-
-
diff --git a/common/lib/xmodule/xmodule/exceptions.py b/common/lib/xmodule/xmodule/exceptions.py
index 3db5ceccde..d38fbb12bb 100644
--- a/common/lib/xmodule/xmodule/exceptions.py
+++ b/common/lib/xmodule/xmodule/exceptions.py
@@ -1,6 +1,12 @@
class InvalidDefinitionError(Exception):
pass
-
class NotFoundError(Exception):
pass
+
+class ProcessingError(Exception):
+ '''
+ An error occurred while processing a request to the XModule.
+ For example: if an exception occurs while checking a capa problem.
+ '''
+ pass
diff --git a/common/lib/xmodule/xmodule/fields.py b/common/lib/xmodule/xmodule/fields.py
index fb80752e56..ea857933fc 100644
--- a/common/lib/xmodule/xmodule/fields.py
+++ b/common/lib/xmodule/xmodule/fields.py
@@ -4,27 +4,36 @@ import re
from datetime import timedelta
from xblock.core import ModelType
+import datetime
+import dateutil.parser
log = logging.getLogger(__name__)
class Date(ModelType):
- time_format = "%Y-%m-%dT%H:%M"
-
- def from_json(self, value):
+ '''
+ Date fields know how to parse and produce json (iso) compatible formats.
+ '''
+ def from_json(self, field):
"""
Parse an optional metadata key containing a time: if present, complain
if it doesn't parse.
Return None if not present or invalid.
"""
- if value is None:
+ if field is None:
+ return field
+ elif field is "":
return None
-
- try:
- return time.strptime(value, self.time_format)
- except ValueError as e:
- msg = "Field {0} has bad value '{1}': '{2}'".format(
- self._name, value, e)
+ elif isinstance(field, basestring):
+ d = dateutil.parser.parse(field)
+ return d.utctimetuple()
+ elif isinstance(field, (int, long, float)):
+ return time.gmtime(field / 1000)
+ elif isinstance(field, time.struct_time):
+ return field
+ else:
+ msg = "Field {0} has bad value '{1}'".format(
+ self._name, field)
log.warning(msg)
return None
@@ -34,8 +43,11 @@ class Date(ModelType):
"""
if value is None:
return None
-
- return time.strftime(self.time_format, value)
+ if isinstance(value, time.struct_time):
+ # struct_times are always utc
+ return time.strftime('%Y-%m-%dT%H:%M:%SZ', value)
+ elif isinstance(value, datetime.datetime):
+ return value.isoformat() + 'Z'
TIMEDELTA_REGEX = re.compile(r'^((?P\d+?) day(?:s?))?(\s)?((?P\d+?) hour(?:s?))?(\s)?((?P\d+?) minute(?:s)?)?(\s)?((?P\d+?) second(?:s)?)?$')
@@ -66,4 +78,4 @@ class Timedelta(ModelType):
cur_value = getattr(value, attr, 0)
if cur_value > 0:
values.append("%d %s" % (cur_value, attr))
- return ' '.join(values)
\ No newline at end of file
+ return ' '.join(values)
diff --git a/common/lib/xmodule/xmodule/foldit_module.py b/common/lib/xmodule/xmodule/foldit_module.py
index 884f9e2df2..1851a4adc2 100644
--- a/common/lib/xmodule/xmodule/foldit_module.py
+++ b/common/lib/xmodule/xmodule/foldit_module.py
@@ -1,6 +1,5 @@
import logging
from lxml import etree
-from dateutil import parser
from pkg_resources import resource_string
@@ -8,6 +7,9 @@ from xmodule.editing_module import EditingDescriptor
from xmodule.x_module import XModule
from xmodule.xml_module import XmlDescriptor
from xblock.core import Scope, Integer, String
+from .fields import Date
+from xmodule.util.date_utils import time_to_datetime
+
log = logging.getLogger(__name__)
@@ -16,7 +18,7 @@ class FolditFields(object):
# default to what Spring_7012x uses
required_level = Integer(default=4, scope=Scope.settings)
required_sublevel = Integer(default=5, scope=Scope.settings)
- due = String(help="Date that this problem is due by", scope=Scope.settings, default='')
+ due = Date(help="Date that this problem is due by", scope=Scope.settings)
show_basic_score = String(scope=Scope.settings, default='false')
show_leaderboard = String(scope=Scope.settings, default='false')
@@ -36,17 +38,8 @@ class FolditModule(FolditFields, XModule):
required_sublevel="3"
show_leaderboard="false"/>
"""
- def parse_due_date():
- """
- Pull out the date, or None
- """
- s = self.due
- if s:
- return parser.parse(s)
- else:
- return None
- self.due_time = parse_due_date()
+ self.due_time = time_to_datetime(self.due)
def is_complete(self):
"""
@@ -178,8 +171,8 @@ class FolditDescriptor(FolditFields, XmlDescriptor, EditingDescriptor):
@classmethod
def definition_from_xml(cls, xml_object, system):
- return ({}, [])
+ return {}, []
- def definition_to_xml(self):
+ def definition_to_xml(self, resource_fs):
xml_object = etree.Element('foldit')
return xml_object
diff --git a/common/lib/xmodule/xmodule/graders.py b/common/lib/xmodule/xmodule/graders.py
index 35318f4f1e..862da791c0 100644
--- a/common/lib/xmodule/xmodule/graders.py
+++ b/common/lib/xmodule/xmodule/graders.py
@@ -45,8 +45,9 @@ def invalid_args(func, argdict):
Given a function and a dictionary of arguments, returns a set of arguments
from argdict that aren't accepted by func
"""
- args, varargs, keywords, defaults = inspect.getargspec(func)
- if keywords: return set() # All accepted
+ args, _, keywords, _ = inspect.getargspec(func)
+ if keywords:
+ return set() # All accepted
return set(argdict) - set(args)
@@ -119,7 +120,7 @@ class CourseGrader(object):
that has the matching section format.
The grader outputs a dictionary with the following keys:
- - percent: Contaisn a float value, which is the final percentage score for the student.
+ - percent: Contains a float value, which is the final percentage score for the student.
- section_breakdown: This is a list of dictionaries which provide details on sections
that were graded. These are used for display in a graph or chart. The format for a
section_breakdown dictionary is explained below.
@@ -150,6 +151,7 @@ class CourseGrader(object):
@abc.abstractmethod
def grade(self, grade_sheet, generate_random_scores=False):
+ '''Given a grade sheet, return a dict containing grading information'''
raise NotImplementedError
@@ -158,7 +160,10 @@ class WeightedSubsectionsGrader(CourseGrader):
This grader takes a list of tuples containing (grader, category_name, weight) and computes
a final grade by totalling the contribution of each sub grader and multiplying it by the
given weight. For example, the sections may be
- [ (homeworkGrader, "Homework", 0.15), (labGrader, "Labs", 0.15), (midtermGrader, "Midterm", 0.30), (finalGrader, "Final", 0.40) ]
+
+ [ (homeworkGrader, "Homework", 0.15), (labGrader, "Labs", 0.15), (midtermGrader, "Midterm", 0.30),
+ (finalGrader, "Final", 0.40) ]
+
All items in section_breakdown for each subgrader will be combined. A grade_breakdown will be
composed using the score from each grader.
@@ -177,12 +182,12 @@ class WeightedSubsectionsGrader(CourseGrader):
for subgrader, category, weight in self.sections:
subgrade_result = subgrader.grade(grade_sheet, generate_random_scores)
- weightedPercent = subgrade_result['percent'] * weight
- section_detail = "{0} = {1:.1%} of a possible {2:.0%}".format(category, weightedPercent, weight)
+ weighted_percent = subgrade_result['percent'] * weight
+ section_detail = "{0} = {1:.1%} of a possible {2:.0%}".format(category, weighted_percent, weight)
- total_percent += weightedPercent
+ total_percent += weighted_percent
section_breakdown += subgrade_result['section_breakdown']
- grade_breakdown.append({'percent': weightedPercent, 'detail': section_detail, 'category': category})
+ grade_breakdown.append({'percent': weighted_percent, 'detail': section_detail, 'category': category})
return {'percent': total_percent,
'section_breakdown': section_breakdown,
@@ -203,32 +208,33 @@ class SingleSectionGrader(CourseGrader):
self.category = category or name
def grade(self, grade_sheet, generate_random_scores=False):
- foundScore = None
+ found_score = None
if self.type in grade_sheet:
for score in grade_sheet[self.type]:
if score.section == self.name:
- foundScore = score
+ found_score = score
break
- if foundScore or generate_random_scores:
+ if found_score or generate_random_scores:
if generate_random_scores: # for debugging!
earned = random.randint(2, 15)
possible = random.randint(earned, 15)
else: # We found the score
- earned = foundScore.earned
- possible = foundScore.possible
+ earned = found_score.earned
+ possible = found_score.possible
percent = earned / float(possible)
detail = "{name} - {percent:.0%} ({earned:.3n}/{possible:.3n})".format(name=self.name,
- percent=percent,
- earned=float(earned),
- possible=float(possible))
+ percent=percent,
+ earned=float(earned),
+ possible=float(possible))
else:
percent = 0.0
detail = "{name} - 0% (?/?)".format(name=self.name)
- breakdown = [{'percent': percent, 'label': self.short_label, 'detail': detail, 'category': self.category, 'prominent': True}]
+ breakdown = [{'percent': percent, 'label': self.short_label,
+ 'detail': detail, 'category': self.category, 'prominent': True}]
return {'percent': percent,
'section_breakdown': breakdown,
@@ -250,6 +256,13 @@ class AssignmentFormatGrader(CourseGrader):
show_only_average is to suppress the display of each assignment in this grader and instead
only show the total score of this grader in the breakdown.
+ hide_average is to suppress the display of the total score in this grader and instead
+ only show each assignment in this grader in the breakdown.
+
+ If there is only a single assignment in this grader, then it acts like a SingleSectionGrader
+ and returns only one entry for the grader. Since the assignment and the total are the same,
+ the total is returned but is not labeled as an average.
+
category should be presentable to the user, but may not appear. When the grade breakdown is
displayed, scores from the same category will be similar (for example, by color).
@@ -263,7 +276,8 @@ class AssignmentFormatGrader(CourseGrader):
min_count = 2 would produce the labels "Assignment 3", "Assignment 4"
"""
- def __init__(self, type, min_count, drop_count, category=None, section_type=None, short_label=None, show_only_average=False, hide_average=False, starting_index=1):
+ def __init__(self, type, min_count, drop_count, category=None, section_type=None, short_label=None,
+ show_only_average=False, hide_average=False, starting_index=1):
self.type = type
self.min_count = min_count
self.drop_count = drop_count
@@ -275,7 +289,8 @@ class AssignmentFormatGrader(CourseGrader):
self.hide_average = hide_average
def grade(self, grade_sheet, generate_random_scores=False):
- def totalWithDrops(breakdown, drop_count):
+ def total_with_drops(breakdown, drop_count):
+ '''calculates total score for a section while dropping lowest scores'''
#create an array of tuples with (index, mark), sorted by mark['percent'] descending
sorted_breakdown = sorted(enumerate(breakdown), key=lambda x: -x[1]['percent'])
# A list of the indices of the dropped scores
@@ -308,33 +323,50 @@ class AssignmentFormatGrader(CourseGrader):
section_name = scores[i].section
percentage = earned / float(possible)
- summary = "{section_type} {index} - {name} - {percent:.0%} ({earned:.3n}/{possible:.3n})".format(index=i + self.starting_index,
- section_type=self.section_type,
- name=section_name,
- percent=percentage,
- earned=float(earned),
- possible=float(possible))
+ summary_format = "{section_type} {index} - {name} - {percent:.0%} ({earned:.3n}/{possible:.3n})"
+ summary = summary_format.format(index=i + self.starting_index,
+ section_type=self.section_type,
+ name=section_name,
+ percent=percentage,
+ earned=float(earned),
+ possible=float(possible))
else:
percentage = 0
- summary = "{section_type} {index} Unreleased - 0% (?/?)".format(index=i + self.starting_index, section_type=self.section_type)
+ summary = "{section_type} {index} Unreleased - 0% (?/?)".format(index=i + self.starting_index,
+ section_type=self.section_type)
- short_label = "{short_label} {index:02d}".format(index=i + self.starting_index, short_label=self.short_label)
+ short_label = "{short_label} {index:02d}".format(index=i + self.starting_index,
+ short_label=self.short_label)
- breakdown.append({'percent': percentage, 'label': short_label, 'detail': summary, 'category': self.category})
+ breakdown.append({'percent': percentage, 'label': short_label,
+ 'detail': summary, 'category': self.category})
- total_percent, dropped_indices = totalWithDrops(breakdown, self.drop_count)
+ total_percent, dropped_indices = total_with_drops(breakdown, self.drop_count)
for dropped_index in dropped_indices:
- breakdown[dropped_index]['mark'] = {'detail': "The lowest {drop_count} {section_type} scores are dropped.".format(drop_count=self.drop_count, section_type=self.section_type)}
+ breakdown[dropped_index]['mark'] = {'detail': "The lowest {drop_count} {section_type} scores are dropped."
+ .format(drop_count=self.drop_count, section_type=self.section_type)}
- total_detail = "{section_type} Average = {percent:.0%}".format(percent=total_percent, section_type=self.section_type)
- total_label = "{short_label} Avg".format(short_label=self.short_label)
+ if len(breakdown) == 1:
+ # if there is only one entry in a section, suppress the existing individual entry and the average,
+ # and just display a single entry for the section. That way it acts automatically like a
+ # SingleSectionGrader.
+ total_detail = "{section_type} = {percent:.0%}".format(percent=total_percent,
+ section_type=self.section_type)
+ total_label = "{short_label}".format(short_label=self.short_label)
+ breakdown = [{'percent': total_percent, 'label': total_label,
+ 'detail': total_detail, 'category': self.category, 'prominent': True}, ]
+ else:
+ total_detail = "{section_type} Average = {percent:.0%}".format(percent=total_percent,
+ section_type=self.section_type)
+ total_label = "{short_label} Avg".format(short_label=self.short_label)
- if self.show_only_average:
- breakdown = []
+ if self.show_only_average:
+ breakdown = []
- if not self.hide_average:
- breakdown.append({'percent': total_percent, 'label': total_label, 'detail': total_detail, 'category': self.category, 'prominent': True})
+ if not self.hide_average:
+ breakdown.append({'percent': total_percent, 'label': total_label,
+ 'detail': total_detail, 'category': self.category, 'prominent': True})
return {'percent': total_percent,
'section_breakdown': breakdown,
diff --git a/common/lib/xmodule/xmodule/js/src/annotatable/display.coffee b/common/lib/xmodule/xmodule/js/src/annotatable/display.coffee
index 2ad49ae6d7..8a32c8f51e 100644
--- a/common/lib/xmodule/xmodule/js/src/annotatable/display.coffee
+++ b/common/lib/xmodule/xmodule/js/src/annotatable/display.coffee
@@ -1,7 +1,8 @@
class @Annotatable
_debug: false
- # selectors for the annotatable xmodule
+ # selectors for the annotatable xmodule
+ wrapperSelector: '.annotatable-wrapper'
toggleAnnotationsSelector: '.annotatable-toggle-annotations'
toggleInstructionsSelector: '.annotatable-toggle-instructions'
instructionsSelector: '.annotatable-instructions'
@@ -61,7 +62,7 @@ class @Annotatable
my: 'bottom center' # of tooltip
at: 'top center' # of target
target: $(el) # where the tooltip was triggered (i.e. the annotation span)
- container: @$el
+ container: @$(@wrapperSelector)
adjust:
y: -5
show:
@@ -75,6 +76,7 @@ class @Annotatable
classes: 'ui-tooltip-annotatable'
events:
show: @onShowTip
+ move: @onMoveTip
onClickToggleAnnotations: (e) => @toggleAnnotations()
@@ -87,6 +89,55 @@ class @Annotatable
onShowTip: (event, api) =>
event.preventDefault() if @annotationsHidden
+ onMoveTip: (event, api, position) =>
+ ###
+ This method handles an edge case in which a tooltip is displayed above
+ a non-overlapping span like this:
+
+ (( TOOLTIP ))
+ \/
+ text text text ... text text text ......
+
+
+ The problem is that the tooltip looks disconnected from both spans, so
+ we should re-position the tooltip to appear above the span.
+ ###
+
+ tip = api.elements.tooltip
+ adjust_y = api.options.position?.adjust?.y || 0
+ container = api.options.position?.container || $('body')
+ target = api.elements.target
+
+ rects = $(target).get(0).getClientRects()
+ is_non_overlapping = (rects?.length == 2 and rects[0].left > rects[1].right)
+
+ if is_non_overlapping
+ # we want to choose the largest of the two non-overlapping spans and display
+ # the tooltip above the center of it (see api.options.position settings)
+ focus_rect = (if rects[0].width > rects[1].width then rects[0] else rects[1])
+ rect_center = focus_rect.left + (focus_rect.width / 2)
+ rect_top = focus_rect.top
+ tip_width = $(tip).width()
+ tip_height = $(tip).height()
+
+ # tooltip is positioned relative to its container, so we need to factor in offsets
+ container_offset = $(container).offset()
+ offset_left = -container_offset.left
+ offset_top = $(document).scrollTop() - container_offset.top
+
+ tip_left = offset_left + rect_center - (tip_width / 2)
+ tip_top = offset_top + rect_top - tip_height + adjust_y
+
+ # make sure the new tip position doesn't clip the edges of the screen
+ win_width = $(window).width()
+ if tip_left < offset_left
+ tip_left = offset_left
+ else if tip_left + tip_width > win_width + offset_left
+ tip_left = win_width + offset_left - tip_width
+
+ # final step: update the position object (used by qtip2 to show the tip after the move event)
+ $.extend position, 'left': tip_left, 'top': tip_top
+
getSpanForProblemReturn: (el) ->
problem_id = $(@problemReturnSelector).index(el)
@$(@spanSelector).filter("[data-problem-id='#{problem_id}']")
diff --git a/common/lib/xmodule/xmodule/js/src/capa/display.coffee b/common/lib/xmodule/xmodule/js/src/capa/display.coffee
index 158c2b98d0..70704ab247 100644
--- a/common/lib/xmodule/xmodule/js/src/capa/display.coffee
+++ b/common/lib/xmodule/xmodule/js/src/capa/display.coffee
@@ -41,6 +41,11 @@ class @Problem
@el.attr progress: response.progress_status
@el.trigger('progressChanged')
+ forceUpdate: (response) =>
+ @el.attr progress: response.progress_status
+ @el.trigger('progressChanged')
+
+
queueing: =>
@queued_items = @$(".xqueue")
@num_queued_items = @queued_items.length
@@ -71,6 +76,7 @@ class @Problem
@num_queued_items = @new_queued_items.length
if @num_queued_items == 0
+ @forceUpdate response
delete window.queuePollerID
else
# TODO: Some logic to dynamically adjust polling rate based on queuelen
diff --git a/common/lib/xmodule/xmodule/modulestore/__init__.py b/common/lib/xmodule/xmodule/modulestore/__init__.py
index 022e016a58..2593b04472 100644
--- a/common/lib/xmodule/xmodule/modulestore/__init__.py
+++ b/common/lib/xmodule/xmodule/modulestore/__init__.py
@@ -10,6 +10,7 @@ from collections import namedtuple
from .exceptions import InvalidLocationError, InsufficientSpecificationError
from xmodule.errortracker import ErrorLog, make_error_tracker
+from bson.son import SON
log = logging.getLogger('mitx.' + 'modulestore')
@@ -457,3 +458,13 @@ class ModuleStoreBase(ModuleStore):
if c.id == course_id:
return c
return None
+
+
+def namedtuple_to_son(namedtuple, prefix=''):
+ """
+ Converts a namedtuple into a SON object with the same key order
+ """
+ son = SON()
+ for idx, field_name in enumerate(namedtuple._fields):
+ son[prefix + field_name] = namedtuple[idx]
+ return son
diff --git a/common/lib/xmodule/xmodule/modulestore/draft.py b/common/lib/xmodule/xmodule/modulestore/draft.py
index 71922c08df..cfce5eb7db 100644
--- a/common/lib/xmodule/xmodule/modulestore/draft.py
+++ b/common/lib/xmodule/xmodule/modulestore/draft.py
@@ -1,7 +1,8 @@
from datetime import datetime
-from . import ModuleStoreBase, Location
+from . import ModuleStoreBase, Location, namedtuple_to_son
from .exceptions import ItemNotFoundError
+import logging
DRAFT = 'draft'
@@ -15,11 +16,11 @@ def as_draft(location):
def wrap_draft(item):
"""
- Sets `item.cms.is_draft` to `True` if the item is a
+ Sets `item.is_draft` to `True` if the item is a
draft, and `False` otherwise. Sets the item's location to the
non-draft location in either case
"""
- item.cms.is_draft = item.location.revision == DRAFT
+ setattr(item, 'is_draft', item.location.revision == DRAFT)
item.location = item.location._replace(revision=None)
return item
@@ -55,11 +56,10 @@ class DraftModuleStore(ModuleStoreBase):
get_children() to cache. None indicates to cache all descendents
"""
- # cdodge: we're forcing depth=0 here as the Draft store is not handling caching well
try:
- return wrap_draft(super(DraftModuleStore, self).get_item(as_draft(location), depth=0))
+ return wrap_draft(super(DraftModuleStore, self).get_item(as_draft(location), depth=depth))
except ItemNotFoundError:
- return wrap_draft(super(DraftModuleStore, self).get_item(location, depth=0))
+ return wrap_draft(super(DraftModuleStore, self).get_item(location, depth=depth))
def get_instance(self, course_id, location, depth=0):
"""
@@ -67,11 +67,10 @@ class DraftModuleStore(ModuleStoreBase):
TODO (vshnayder): this may want to live outside the modulestore eventually
"""
- # cdodge: we're forcing depth=0 here as the Draft store is not handling caching well
try:
- return wrap_draft(super(DraftModuleStore, self).get_instance(course_id, as_draft(location), depth=0))
+ return wrap_draft(super(DraftModuleStore, self).get_instance(course_id, as_draft(location), depth=depth))
except ItemNotFoundError:
- return wrap_draft(super(DraftModuleStore, self).get_instance(course_id, location, depth=0))
+ return wrap_draft(super(DraftModuleStore, self).get_instance(course_id, location, depth=depth))
def get_items(self, location, course_id=None, depth=0):
"""
@@ -88,9 +87,8 @@ class DraftModuleStore(ModuleStoreBase):
"""
draft_loc = as_draft(location)
- # cdodge: we're forcing depth=0 here as the Draft store is not handling caching well
- draft_items = super(DraftModuleStore, self).get_items(draft_loc, course_id=course_id, depth=0)
- items = super(DraftModuleStore, self).get_items(location, course_id=course_id, depth=0)
+ draft_items = super(DraftModuleStore, self).get_items(draft_loc, course_id=course_id, depth=depth)
+ items = super(DraftModuleStore, self).get_items(location, course_id=course_id, depth=depth)
draft_locs_found = set(item.location._replace(revision=None) for item in draft_items)
non_draft_items = [
@@ -118,7 +116,7 @@ class DraftModuleStore(ModuleStoreBase):
"""
draft_loc = as_draft(location)
draft_item = self.get_item(location)
- if not draft_item.cms.is_draft:
+ if not getattr(draft_item, 'is_draft', False):
self.clone_item(location, draft_loc)
return super(DraftModuleStore, self).update_item(draft_loc, data)
@@ -133,7 +131,7 @@ class DraftModuleStore(ModuleStoreBase):
"""
draft_loc = as_draft(location)
draft_item = self.get_item(location)
- if not draft_item.cms.is_draft:
+ if not getattr(draft_item, 'is_draft', False):
self.clone_item(location, draft_loc)
return super(DraftModuleStore, self).update_children(draft_loc, children)
@@ -149,7 +147,7 @@ class DraftModuleStore(ModuleStoreBase):
draft_loc = as_draft(location)
draft_item = self.get_item(location)
- if not draft_item.cms.is_draft:
+ if not getattr(draft_item, 'is_draft', False):
self.clone_item(location, draft_loc)
if 'is_draft' in metadata:
@@ -192,3 +190,36 @@ class DraftModuleStore(ModuleStoreBase):
"""
super(DraftModuleStore, self).clone_item(location, as_draft(location))
super(DraftModuleStore, self).delete_item(location)
+
+ def _query_children_for_cache_children(self, items):
+ # first get non-draft in a round-trip
+ queried_children = []
+ to_process_non_drafts = super(DraftModuleStore, self)._query_children_for_cache_children(items)
+
+ to_process_dict = {}
+ for non_draft in to_process_non_drafts:
+ to_process_dict[Location(non_draft["_id"])] = non_draft
+
+ # now query all draft content in another round-trip
+ query = {
+ '_id': {'$in': [namedtuple_to_son(as_draft(Location(item))) for item in items]}
+ }
+ to_process_drafts = list(self.collection.find(query))
+
+ # now we have to go through all drafts and replace the non-draft
+ # with the draft. This is because the semantics of the DraftStore is to
+ # always return the draft - if available
+ for draft in to_process_drafts:
+ draft_loc = Location(draft["_id"])
+ draft_as_non_draft_loc = draft_loc._replace(revision=None)
+
+ # does non-draft exist in the collection
+ # if so, replace it
+ if draft_as_non_draft_loc in to_process_dict:
+ to_process_dict[draft_as_non_draft_loc] = draft
+
+ # convert the dict - which is used for look ups - back into a list
+ for key, value in to_process_dict.iteritems():
+ queried_children.append(value)
+
+ return queried_children
diff --git a/common/lib/xmodule/xmodule/modulestore/mongo.py b/common/lib/xmodule/xmodule/modulestore/mongo.py
index f6fa98fc28..f1e09b024a 100644
--- a/common/lib/xmodule/xmodule/modulestore/mongo.py
+++ b/common/lib/xmodule/xmodule/modulestore/mongo.py
@@ -3,12 +3,12 @@ import sys
import logging
import copy
-from bson.son import SON
from collections import namedtuple
from fs.osfs import OSFS
from itertools import repeat
from path import path
from datetime import datetime
+from operator import attrgetter
from importlib import import_module
from xmodule.errortracker import null_error_tracker, exc_info_to_str
@@ -18,7 +18,7 @@ from xmodule.error_module import ErrorDescriptor
from xblock.runtime import DbModel, KeyValueStore, InvalidScopeError
from xblock.core import Scope
-from . import ModuleStoreBase, Location
+from . import ModuleStoreBase, Location, namedtuple_to_son
from .draft import DraftModuleStore
from .exceptions import (ItemNotFoundError,
DuplicateItemError)
@@ -96,6 +96,7 @@ class MongoKeyValueStore(KeyValueStore):
else:
return False
+
MongoUsage = namedtuple('MongoUsage', 'id, def_id')
@@ -107,7 +108,7 @@ class CachingDescriptorSystem(MakoDescriptorSystem):
references to metadata_inheritance_tree
"""
def __init__(self, modulestore, module_data, default_class, resources_fs,
- error_tracker, render_template, metadata_inheritance_tree = None):
+ error_tracker, render_template, cached_metadata=None):
"""
modulestore: the module store that can be used to retrieve additional modules
@@ -132,9 +133,13 @@ class CachingDescriptorSystem(MakoDescriptorSystem):
# cdodge: other Systems have a course_id attribute defined. To keep things consistent, let's
# define an attribute here as well, even though it's None
self.course_id = None
- self.metadata_inheritance_tree = metadata_inheritance_tree
+ self.cached_metadata = cached_metadata
+
def load_item(self, location):
+ """
+ Return an XModule instance for the specified location
+ """
location = Location(location)
json_data = self.module_data.get(location)
if json_data is None:
@@ -165,8 +170,8 @@ class CachingDescriptorSystem(MakoDescriptorSystem):
model_data = DbModel(kvs, class_, None, MongoUsage(self.course_id, location))
module = class_(self, location, model_data)
- if self.metadata_inheritance_tree is not None:
- metadata_to_inherit = self.metadata_inheritance_tree.get('parent_metadata', {}).get(location.url(), {})
+ if self.cached_metadata is not None:
+ metadata_to_inherit = self.cached_metadata.get(location.url(), {})
inherit_metadata(module, metadata_to_inherit)
return module
except:
@@ -196,14 +201,7 @@ def location_to_query(location, wildcard=True):
return query
-def namedtuple_to_son(namedtuple, prefix=''):
- """
- Converts a namedtuple into a SON object with the same key order
- """
- son = SON()
- for idx, field_name in enumerate(namedtuple._fields):
- son[prefix + field_name] = namedtuple[idx]
- return son
+metadata_cache_key = attrgetter('org', 'course')
class MongoModuleStore(ModuleStoreBase):
@@ -215,7 +213,8 @@ class MongoModuleStore(ModuleStoreBase):
def __init__(self, host, db, collection, fs_root, render_template,
port=27017, default_class=None,
error_tracker=null_error_tracker,
- user=None, password=None, **kwargs):
+ user=None, password=None, request_cache=None,
+ metadata_inheritance_cache_subsystem=None, **kwargs):
ModuleStoreBase.__init__(self)
@@ -228,7 +227,6 @@ class MongoModuleStore(ModuleStoreBase):
if user is not None and password is not None:
self.collection.database.authenticate(user, password)
-
# Force mongo to report errors, at the expense of performance
self.collection.safe = True
@@ -247,8 +245,10 @@ class MongoModuleStore(ModuleStoreBase):
self.error_tracker = error_tracker
self.render_template = render_template
self.ignore_write_events_on_courses = []
+ self.request_cache = request_cache
+ self.metadata_inheritance_cache_subsystem = metadata_inheritance_cache_subsystem
- def get_metadata_inheritance_tree(self, location):
+ def compute_metadata_inheritance_tree(self, location):
'''
TODO (cdodge) This method can be deleted when the 'split module store' work has been completed
'''
@@ -258,10 +258,15 @@ class MongoModuleStore(ModuleStoreBase):
query = {
'_id.org': location.org,
'_id.course': location.course,
- '_id.category': {'$in': [ 'course', 'chapter', 'sequential', 'vertical']}
+ '_id.category': {'$in': ['course', 'chapter', 'sequential', 'vertical']}
}
- # we just want the Location, children, and metadata
- record_filter = {'_id': 1, 'definition.children': 1, 'metadata': 1}
+ # we just want the Location, children, and inheritable metadata
+ record_filter = {'_id': 1, 'definition.children': 1}
+
+ # just get the inheritable metadata since that is all we need for the computation
+ # this minimizes both data pushed over the wire
+ for attr in INHERITABLE_METADATA:
+ record_filter['metadata.{0}'.format(attr)] = 1
# call out to the DB
resultset = self.collection.find(query, record_filter)
@@ -278,7 +283,11 @@ class MongoModuleStore(ModuleStoreBase):
# now traverse the tree and compute down the inherited metadata
metadata_to_inherit = {}
+
def _compute_inherited_metadata(url):
+ """
+ Helper method for computing inherited metadata for a specific location url
+ """
my_metadata = {}
# check for presence of metadata key. Note that a given module may not yet be fully formed.
# example: update_item -> update_children -> update_metadata sequence on new item create
@@ -293,7 +302,7 @@ class MongoModuleStore(ModuleStoreBase):
# go through all the children and recurse, but only if we have
# in the result set. Remember results will not contain leaf nodes
- for child in results_by_url[url].get('definition',{}).get('children',[]):
+ for child in results_by_url[url].get('definition', {}).get('children', []):
if child in results_by_url:
new_child_metadata = copy.deepcopy(my_metadata)
new_child_metadata.update(results_by_url[child].get('metadata', {}))
@@ -304,42 +313,57 @@ class MongoModuleStore(ModuleStoreBase):
# this is likely a leaf node, so let's record what metadata we need to inherit
metadata_to_inherit[child] = my_metadata
-
if root is not None:
_compute_inherited_metadata(root)
- return {'parent_metadata': metadata_to_inherit,
- 'timestamp' : datetime.now()}
+ return metadata_to_inherit
def get_cached_metadata_inheritance_tree(self, location, force_refresh=False):
'''
TODO (cdodge) This method can be deleted when the 'split module store' work has been completed
'''
- key_name = '{0}/{1}'.format(location.org, location.course)
+ key = metadata_cache_key(location)
+ tree = {}
+
+ if not force_refresh:
+ # see if we are first in the request cache (if present)
+ if self.request_cache is not None and key in self.request_cache.data.get('metadata_inheritance', {}):
+ return self.request_cache.data['metadata_inheritance'][key]
- tree = None
- if self.metadata_inheritance_cache is not None:
- tree = self.metadata_inheritance_cache.get(key_name)
- else:
- # This is to help guard against an accident prod runtime without a cache
- logging.warning('Running MongoModuleStore without metadata_inheritance_cache. This should not happen in production!')
+ # then look in any caching subsystem (e.g. memcached)
+ if self.metadata_inheritance_cache_subsystem is not None:
+ tree = self.metadata_inheritance_cache_subsystem.get(key, {})
+ else:
+ logging.warning('Running MongoModuleStore without a metadata_inheritance_cache_subsystem. This is OK in localdev and testing environment. Not OK in production.')
- if tree is None or force_refresh:
- tree = self.get_metadata_inheritance_tree(location)
- if self.metadata_inheritance_cache is not None:
- self.metadata_inheritance_cache.set(key_name, tree)
+ if not tree:
+ # if not in subsystem, or we are on force refresh, then we have to compute
+ tree = self.compute_metadata_inheritance_tree(location)
+
+ # now write out computed tree to caching subsystem (e.g. memcached), if available
+ if self.metadata_inheritance_cache_subsystem is not None:
+ self.metadata_inheritance_cache_subsystem.set(key, tree)
+
+ # now populate a request_cache, if available. NOTE, we are outside of the
+ # scope of the above if: statement so that after a memcache hit, it'll get
+ # put into the request_cache
+ if self.request_cache is not None:
+ # we can't assume the 'metadatat_inheritance' part of the request cache dict has been
+ # defined
+ if 'metadata_inheritance' not in self.request_cache.data:
+ self.request_cache.data['metadata_inheritance'] = {}
+ self.request_cache.data['metadata_inheritance'][key] = tree
return tree
def refresh_cached_metadata_inheritance_tree(self, location):
+ """
+ Refresh the cached metadata inheritance tree for the org/course combination
+ for location
+ """
pseudo_course_id = '/'.join([location.org, location.course])
if pseudo_course_id not in self.ignore_write_events_on_courses:
- self.get_cached_metadata_inheritance_tree(location, force_refresh = True)
-
- def clear_cached_metadata_inheritance_tree(self, location):
- key_name = '{0}/{1}'.format(location.org, location.course)
- if self.metadata_inheritance_cache is not None:
- self.metadata_inheritance_cache.delete(key_name)
+ self.get_cached_metadata_inheritance_tree(location, force_refresh=True)
def _clean_item_data(self, item):
"""
@@ -348,6 +372,13 @@ class MongoModuleStore(ModuleStoreBase):
item['location'] = item['_id']
del item['_id']
+ def _query_children_for_cache_children(self, items):
+ # first get non-draft in a round-trip
+ query = {
+ '_id': {'$in': [namedtuple_to_son(Location(item)) for item in items]}
+ }
+ return list(self.collection.find(query))
+
def _cache_children(self, items, depth=0):
"""
Returns a dictionary mapping Location -> item data, populated with json data
@@ -366,23 +397,23 @@ class MongoModuleStore(ModuleStoreBase):
children.extend(item.get('definition', {}).get('children', []))
data[Location(item['location'])] = item
+ if depth == 0:
+ break
+
# Load all children by id. See
# http://www.mongodb.org/display/DOCS/Advanced+Queries#AdvancedQueries-%24or
# for or-query syntax
+ to_process = []
if children:
- query = {
- '_id': {'$in': [namedtuple_to_son(Location(child)) for child in children]}
- }
- to_process = self.collection.find(query)
- else:
- to_process = []
+ to_process = self._query_children_for_cache_children(children)
+
# If depth is None, then we just recurse until we hit all the descendents
if depth is not None:
depth -= 1
return data
- def _load_item(self, item, data_cache, should_apply_metadata_inheritence=True):
+ def _load_item(self, item, data_cache, apply_cached_metadata=True):
"""
Load an XModuleDescriptor from item, using the children stored in data_cache
"""
@@ -394,10 +425,9 @@ class MongoModuleStore(ModuleStoreBase):
resource_fs = OSFS(root)
- metadata_inheritance_tree = None
-
- if should_apply_metadata_inheritence:
- metadata_inheritance_tree = self.get_cached_metadata_inheritance_tree(Location(item['location']))
+ cached_metadata = {}
+ if apply_cached_metadata:
+ cached_metadata = self.get_cached_metadata_inheritance_tree(Location(item['location']))
# TODO (cdodge): When the 'split module store' work has been completed, we should remove
# the 'metadata_inheritance_tree' parameter
@@ -408,7 +438,7 @@ class MongoModuleStore(ModuleStoreBase):
resource_fs,
self.error_tracker,
self.render_template,
- metadata_inheritance_tree = metadata_inheritance_tree
+ cached_metadata,
)
return system.load_item(item['location'])
@@ -420,9 +450,9 @@ class MongoModuleStore(ModuleStoreBase):
data_cache = self._cache_children(items, depth)
# if we are loading a course object, if we're not prefetching children (depth != 0) then don't
- # bother with the metadata inheritence
- return [self._load_item(item, data_cache,
- should_apply_metadata_inheritence=(item['location']['category'] != 'course' or depth != 0)) for item in items]
+ # bother with the metadata inheritance
+ return [self._load_item(item, data_cache,
+ apply_cached_metadata=(item['location']['category']!='course' or depth !=0)) for item in items]
def get_courses(self):
'''
@@ -556,7 +586,8 @@ class MongoModuleStore(ModuleStoreBase):
raise Exception('Could not find course at {0}'.format(course_search_location))
if found_cnt > 1:
- raise Exception('Found more than one course at {0}. There should only be one!!! Dump = {1}'.format(course_search_location, courses))
+ raise Exception('Found more than one course at {0}. There should only be one!!! '
+ 'Dump = {1}'.format(course_search_location, courses))
return courses[0]
@@ -628,7 +659,7 @@ class MongoModuleStore(ModuleStoreBase):
self._update_single_item(location, {'metadata': metadata})
# recompute (and update) the metadata inheritance tree which is cached
- self.refresh_cached_metadata_inheritance_tree(loc)
+ self.refresh_cached_metadata_inheritance_tree(loc)
def delete_item(self, location):
"""
@@ -651,7 +682,7 @@ class MongoModuleStore(ModuleStoreBase):
# from overriding our default value set in the init method.
safe=self.collection.safe)
# recompute (and update) the metadata inheritance tree which is cached
- self.refresh_cached_metadata_inheritance_tree(Location(location))
+ self.refresh_cached_metadata_inheritance_tree(Location(location))
def get_parent_locations(self, location, course_id):
'''Find all locations that are the parents of this location in this
@@ -672,4 +703,10 @@ class MongoModuleStore(ModuleStoreBase):
# DraftModuleStore is first, because it needs to intercept calls to MongoModuleStore
class DraftMongoModuleStore(DraftModuleStore, MongoModuleStore):
+ """
+ Version of MongoModuleStore with draft capability mixed in
+ """
+ """
+ Version of MongoModuleStore with draft capability mixed in
+ """
pass
diff --git a/common/lib/xmodule/xmodule/modulestore/store_utilities.py b/common/lib/xmodule/xmodule/modulestore/store_utilities.py
index cb3cd375a7..2935069090 100644
--- a/common/lib/xmodule/xmodule/modulestore/store_utilities.py
+++ b/common/lib/xmodule/xmodule/modulestore/store_utilities.py
@@ -136,3 +136,4 @@ def delete_course(modulestore, contentstore, source_location, commit = False):
modulestore.delete_item(source_location)
return True
+
diff --git a/common/lib/xmodule/xmodule/modulestore/tests/test_mongo.py b/common/lib/xmodule/xmodule/modulestore/tests/test_mongo.py
index 6f6f47ba85..061d70d09f 100644
--- a/common/lib/xmodule/xmodule/modulestore/tests/test_mongo.py
+++ b/common/lib/xmodule/xmodule/modulestore/tests/test_mongo.py
@@ -1,6 +1,7 @@
import pymongo
-from nose.tools import assert_equals, assert_raises, assert_not_equals, with_setup
+from mock import Mock
+from nose.tools import assert_equals, assert_raises, assert_not_equals, with_setup, assert_false
from pprint import pprint
from xmodule.modulestore import Location
diff --git a/common/lib/xmodule/xmodule/modulestore/xml_importer.py b/common/lib/xmodule/xmodule/modulestore/xml_importer.py
index 6a4ce5131b..7c1f1fb4f7 100644
--- a/common/lib/xmodule/xmodule/modulestore/xml_importer.py
+++ b/common/lib/xmodule/xmodule/modulestore/xml_importer.py
@@ -356,6 +356,26 @@ def remap_namespace(module, target_location_namespace):
return module
+def validate_no_non_editable_metadata(module_store, course_id, category, allowed=None):
+ '''
+ Assert that there is no metadata within a particular category that we can't support editing
+ However we always allow 'display_name' and 'xml_attribtues'
+ '''
+ _allowed = (allowed if allowed is not None else []) + ['xml_attributes', 'display_name']
+
+ err_cnt = 0
+ for module_loc in module_store.modules[course_id]:
+ module = module_store.modules[course_id][module_loc]
+ if module.location.category == category:
+ my_metadata = dict(own_metadata(module))
+ for key in my_metadata.keys():
+ if key not in _allowed:
+ err_cnt = err_cnt + 1
+ print ': found metadata on {0}. Studio will not support editing this piece of metadata, so it is not allowed. Metadata: {1} = {2}'. format(module.location.url(), key, my_metadata[key])
+
+ return err_cnt
+
+
def validate_category_hierarchy(module_store, course_id, parent_category, expected_child_category):
err_cnt = 0
@@ -440,6 +460,13 @@ def perform_xlint(data_dir, course_dirs,
err_cnt += validate_category_hierarchy(module_store, course_id, "chapter", "sequential")
# constrain that sequentials only have 'verticals'
err_cnt += validate_category_hierarchy(module_store, course_id, "sequential", "vertical")
+ # don't allow metadata on verticals, since we can't edit them in studio
+ err_cnt += validate_no_non_editable_metadata(module_store, course_id, "vertical")
+ # don't allow metadata on chapters, since we can't edit them in studio
+ err_cnt += validate_no_non_editable_metadata(module_store, course_id, "chapter",['start'])
+ # don't allow metadata on sequences that we can't edit
+ err_cnt += validate_no_non_editable_metadata(module_store, course_id, "sequential",
+ ['due','format','start','graded'])
# check for a presence of a course marketing video
location_elements = course_id.split('/')
@@ -456,3 +483,5 @@ def perform_xlint(data_dir, course_dirs,
print "This course can be imported, but some errors may occur during the run of the course. It is recommend that you fix your courseware before importing"
else:
print "This course can be imported successfully."
+
+ return err_cnt
diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py
index 98a54601de..59df481954 100644
--- a/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py
+++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py
@@ -19,12 +19,8 @@ log = logging.getLogger("mitx.courseware")
# attempts specified in xml definition overrides this.
MAX_ATTEMPTS = 1
-# Set maximum available number of points.
-# Overriden by max_score specified in xml.
-MAX_SCORE = 1
-
#The highest score allowed for the overall xmodule and for each rubric point
-MAX_SCORE_ALLOWED = 3
+MAX_SCORE_ALLOWED = 50
#If true, default behavior is to score module as a practice problem. Otherwise, no grade at all is shown in progress
#Metadata overrides this.
@@ -88,7 +84,7 @@ class CombinedOpenEndedV1Module():
Definition file should have one or many task blocks, a rubric block, and a prompt block:
Sample file:
-
+
Blah blah rubric.
@@ -143,23 +139,19 @@ class CombinedOpenEndedV1Module():
self.accept_file_upload = self.instance_state.get('accept_file_upload', ACCEPT_FILE_UPLOAD) in TRUE_DICT
self.skip_basic_checks = self.instance_state.get('skip_spelling_checks', SKIP_BASIC_CHECKS) in TRUE_DICT
- display_due_date_string = self.instance_state.get('due', None)
+ due_date = self.instance_state.get('due', None)
grace_period_string = self.instance_state.get('graceperiod', None)
try:
- self.timeinfo = TimeInfo(display_due_date_string, grace_period_string)
+ self.timeinfo = TimeInfo(due_date, grace_period_string)
except:
log.error("Error parsing due date information in location {0}".format(location))
raise
self.display_due_date = self.timeinfo.display_due_date
- # Used for progress / grading. Currently get credit just for
- # completion (doesn't matter if you self-assessed correct/incorrect).
- self._max_score = self.instance_state.get('max_score', MAX_SCORE)
-
self.rubric_renderer = CombinedOpenEndedRubric(system, True)
rubric_string = stringify_children(definition['rubric'])
- self.rubric_renderer.check_if_rubric_is_parseable(rubric_string, location, MAX_SCORE_ALLOWED, self._max_score)
+ self._max_score = self.rubric_renderer.check_if_rubric_is_parseable(rubric_string, location, MAX_SCORE_ALLOWED)
#Static data is passed to the child modules to render
self.static_data = {
@@ -363,7 +355,15 @@ class CombinedOpenEndedV1Module():
"""
self.update_task_states()
html = self.current_task.get_html(self.system)
- return_html = rewrite_links(html, self.rewrite_content_links)
+ return_html = html
+ try:
+ #Without try except block, get this error:
+ # File "/home/vik/mitx_all/mitx/common/lib/xmodule/xmodule/x_module.py", line 263, in rewrite_content_links
+ # if link.startswith(XASSET_SRCREF_PREFIX):
+ # Placing try except so that if the error is fixed, this code will start working again.
+ return_html = rewrite_links(html, self.rewrite_content_links)
+ except:
+ pass
return return_html
def get_current_attributes(self, task_number):
@@ -782,7 +782,7 @@ class CombinedOpenEndedV1Descriptor():
template_dir_name = "combinedopenended"
def __init__(self, system):
- self.system =system
+ self.system = system
@classmethod
def definition_from_xml(cls, xml_object, system):
diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_rubric.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_rubric.py
index bceb12e444..6245d4d31c 100644
--- a/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_rubric.py
+++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_rubric.py
@@ -79,7 +79,7 @@ class CombinedOpenEndedRubric(object):
raise RubricParsingError(error_message)
return {'success': success, 'html': html, 'rubric_scores': rubric_scores}
- def check_if_rubric_is_parseable(self, rubric_string, location, max_score_allowed, max_score):
+ def check_if_rubric_is_parseable(self, rubric_string, location, max_score_allowed):
rubric_dict = self.render_rubric(rubric_string)
success = rubric_dict['success']
rubric_feedback = rubric_dict['html']
@@ -101,12 +101,7 @@ class CombinedOpenEndedRubric(object):
log.error(error_message)
raise RubricParsingError(error_message)
- if int(total) != int(max_score):
- #This is a staff_facing_error
- error_msg = "The max score {0} for problem {1} does not match the total number of points in the rubric {2}. Contact the learning sciences group for assistance.".format(
- max_score, location, total)
- log.error(error_msg)
- raise RubricParsingError(error_msg)
+ return int(total)
def extract_categories(self, element):
'''
diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/open_ended_image_submission.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/open_ended_image_submission.py
index 6956f336a5..2eb9502269 100644
--- a/common/lib/xmodule/xmodule/open_ended_grading_classes/open_ended_image_submission.py
+++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/open_ended_image_submission.py
@@ -36,7 +36,7 @@ ALLOWABLE_IMAGE_SUFFIXES = [
]
#Maximum allowed dimensions (x and y) for an uploaded image
-MAX_ALLOWED_IMAGE_DIM = 1500
+MAX_ALLOWED_IMAGE_DIM = 2000
#Dimensions to which image is resized before it is evaluated for color count, etc
MAX_IMAGE_DIM = 150
@@ -178,7 +178,7 @@ class URLProperties(object):
Runs all available url tests
@return: True if URL passes tests, false if not.
"""
- url_is_okay = self.check_suffix() and self.check_if_parses() and self.check_domain()
+ url_is_okay = self.check_suffix() and self.check_if_parses()
return url_is_okay
def check_domain(self):
diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/open_ended_module.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/open_ended_module.py
index 1f84d2ab8c..8373700837 100644
--- a/common/lib/xmodule/xmodule/open_ended_grading_classes/open_ended_module.py
+++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/open_ended_module.py
@@ -174,7 +174,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
str(len(self.child_history)))
xheader = xqueue_interface.make_xheader(
- lms_callback_url=system.xqueue['callback_url'],
+ lms_callback_url=system.xqueue['construct_callback'](),
lms_key=queuekey,
queue_name=self.message_queue_name
)
@@ -224,7 +224,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
anonymous_student_id +
str(len(self.child_history)))
- xheader = xqueue_interface.make_xheader(lms_callback_url=system.xqueue['callback_url'],
+ xheader = xqueue_interface.make_xheader(lms_callback_url=system.xqueue['construct_callback'](),
lms_key=queuekey,
queue_name=self.queue_name)
diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/openendedchild.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/openendedchild.py
index 2e49565bec..b9341f0cbe 100644
--- a/common/lib/xmodule/xmodule/open_ended_grading_classes/openendedchild.py
+++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/openendedchild.py
@@ -357,10 +357,6 @@ class OpenEndedChild(object):
if get_data['can_upload_files'] in ['true', '1']:
has_file_to_upload = True
file = get_data['student_file'][0]
- if self.system.track_fuction:
- self.system.track_function('open_ended_image_upload', {'filename': file.name})
- else:
- log.info("No tracking function found when uploading image.")
uploaded_to_s3, image_ok, s3_public_url = self.upload_image_to_s3(file)
if uploaded_to_s3:
image_tag = self.generate_image_tag_from_url(s3_public_url, file.name)
diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/xblock_field_types.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/xblock_field_types.py
new file mode 100644
index 0000000000..2dcb7a4cda
--- /dev/null
+++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/xblock_field_types.py
@@ -0,0 +1,14 @@
+from xblock.core import Integer, Float
+
+
+class StringyFloat(Float):
+ """
+ A model type that converts from string to floats when reading from json
+ """
+
+ def from_json(self, value):
+ try:
+ return float(value)
+ except:
+ return None
+
diff --git a/common/lib/xmodule/xmodule/peer_grading_module.py b/common/lib/xmodule/xmodule/peer_grading_module.py
index e18f2ceca3..5d064378bf 100644
--- a/common/lib/xmodule/xmodule/peer_grading_module.py
+++ b/common/lib/xmodule/xmodule/peer_grading_module.py
@@ -6,13 +6,13 @@ from lxml import etree
from datetime import datetime
from pkg_resources import resource_string
from .capa_module import ComplexEncoder
-from .stringify import stringify_children
from .x_module import XModule
from xmodule.raw_module import RawDescriptor
-from xmodule.modulestore import Location
from xmodule.modulestore.django import modulestore
from .timeinfo import TimeInfo
from xblock.core import Object, Integer, Boolean, String, Scope
+from xmodule.open_ended_grading_classes.xblock_field_types import StringyFloat
+from xmodule.fields import Date
from xmodule.open_ended_grading_classes.peer_grading_service import PeerGradingService, GradingServiceError, MockPeerGradingService
@@ -28,13 +28,18 @@ EXTERNAL_GRADER_NO_CONTACT_ERROR = "Failed to contact external graders. Please
class PeerGradingFields(object):
- use_for_single_location = Boolean(help="Whether to use this for a single location or as a panel.", default=USE_FOR_SINGLE_LOCATION, scope=Scope.settings)
- link_to_location = String(help="The location this problem is linked to.", default=LINK_TO_LOCATION, scope=Scope.settings)
- is_graded = Boolean(help="Whether or not this module is scored.",default=IS_GRADED, scope=Scope.settings)
- display_due_date_string = String(help="Due date that should be displayed.", default=None, scope=Scope.settings)
+ use_for_single_location = Boolean(help="Whether to use this for a single location or as a panel.",
+ default=USE_FOR_SINGLE_LOCATION, scope=Scope.settings)
+ link_to_location = String(help="The location this problem is linked to.", default=LINK_TO_LOCATION,
+ scope=Scope.settings)
+ is_graded = Boolean(help="Whether or not this module is scored.", default=IS_GRADED, scope=Scope.settings)
+ due_date = Date(help="Due date that should be displayed.", default=None, scope=Scope.settings)
grace_period_string = String(help="Amount of grace to give on the due date.", default=None, scope=Scope.settings)
- max_grade = Integer(help="The maximum grade that a student can receieve for this problem.", default=MAX_SCORE, scope=Scope.settings)
- student_data_for_location = Object(help="Student data for a given peer grading problem.", default=json.dumps({}),scope=Scope.student_state)
+ max_grade = Integer(help="The maximum grade that a student can receieve for this problem.", default=MAX_SCORE,
+ scope=Scope.settings)
+ student_data_for_location = Object(help="Student data for a given peer grading problem.", default=json.dumps({}),
+ scope=Scope.student_state)
+ weight = StringyFloat(help="How much to weight this problem by", scope=Scope.settings)
class PeerGradingModule(PeerGradingFields, XModule):
@@ -72,7 +77,7 @@ class PeerGradingModule(PeerGradingFields, XModule):
self._model_data['due'] = due_date
try:
- self.timeinfo = TimeInfo(self.display_due_date_string, self.grace_period_string)
+ self.timeinfo = TimeInfo(self.due_date, self.grace_period_string)
except:
log.error("Error parsing due date information in location {0}".format(location))
raise
diff --git a/common/lib/xmodule/xmodule/templates/combinedopenended/default.yaml b/common/lib/xmodule/xmodule/templates/combinedopenended/default.yaml
index f2aba0e18b..515d9071b1 100644
--- a/common/lib/xmodule/xmodule/templates/combinedopenended/default.yaml
+++ b/common/lib/xmodule/xmodule/templates/combinedopenended/default.yaml
@@ -2,12 +2,12 @@
metadata:
display_name: Open Ended Response
max_attempts: 1
- max_score: 1
is_graded: False
version: 1
display_name: Open Ended Response
skip_spelling_checks: False
accept_file_upload: False
+ weight: ""
data: |
diff --git a/common/lib/xmodule/xmodule/templates/course/empty.yaml b/common/lib/xmodule/xmodule/templates/course/empty.yaml
index cb2f3bcec6..89f1bfcf21 100644
--- a/common/lib/xmodule/xmodule/templates/course/empty.yaml
+++ b/common/lib/xmodule/xmodule/templates/course/empty.yaml
@@ -2,5 +2,123 @@
metadata:
display_name: Empty
start: 2020-10-10T10:00
+ checklists: [
+ {"short_description" : "Getting Started With Studio",
+ "items" : [{"short_description": "Add Course Team Members",
+ "long_description": "Grant your collaborators permission to edit your course so you can work together.",
+ "is_checked": false,
+ "action_url": "ManageUsers",
+ "action_text": "Edit Course Team",
+ "action_external": false},
+ {"short_description": "Set Important Dates for Your Course",
+ "long_description": "Establish your course's student enrollment and launch dates on the Schedule and Details page.",
+ "is_checked": false,
+ "action_url": "SettingsDetails",
+ "action_text": "Edit Course Details & Schedule",
+ "action_external": false},
+ {"short_description": "Draft Your Course's Grading Policy",
+ "long_description": "Set up your assignment types and grading policy even if you haven't created all your assignments.",
+ "is_checked": false,
+ "action_url": "SettingsGrading",
+ "action_text": "Edit Grading Settings",
+ "action_external": false},
+ {"short_description": "Explore the Other Studio Checklists",
+ "long_description": "Discover other available course authoring tools, and find help when you need it.",
+ "is_checked": false,
+ "action_url": "",
+ "action_text": "",
+ "action_external": false}]
+ },
+ {"short_description" : "Draft a Rough Course Outline",
+ "items" : [{"short_description": "Create Your First Section and Subsection",
+ "long_description": "Use your course outline to build your first Section and Subsection.",
+ "is_checked": false,
+ "action_url": "CourseOutline",
+ "action_text": "Edit Course Outline",
+ "action_external": false},
+ {"short_description": "Set Section Release Dates",
+ "long_description": "Specify the release dates for each Section in your course. Sections become visible to students on their release dates.",
+ "is_checked": false,
+ "action_url": "CourseOutline",
+ "action_text": "Edit Course Outline",
+ "action_external": false},
+ {"short_description": "Designate a Subsection as Graded",
+ "long_description": "Set a Subsection to be graded as a specific assignment type. Assignments within graded Subsections count toward a student's final grade.",
+ "is_checked": false,
+ "action_url": "CourseOutline",
+ "action_text": "Edit Course Outline",
+ "action_external": false},
+ {"short_description": "Reordering Course Content",
+ "long_description": "Use drag and drop to reorder the content in your course.",
+ "is_checked": false,
+ "action_url": "CourseOutline",
+ "action_text": "Edit Course Outline",
+ "action_external": false},
+ {"short_description": "Renaming Sections",
+ "long_description": "Rename Sections by clicking the Section name from the Course Outline.",
+ "is_checked": false,
+ "action_url": "CourseOutline",
+ "action_text": "Edit Course Outline",
+ "action_external": false},
+ {"short_description": "Deleting Course Content",
+ "long_description": "Delete Sections, Subsections, or Units you don't need anymore. Be careful, as there is no Undo function.",
+ "is_checked": false,
+ "action_url": "CourseOutline",
+ "action_text": "Edit Course Outline",
+ "action_external": false},
+ {"short_description": "Add an Instructor-Only Section to Your Outline",
+ "long_description": "Some course authors find using a section for unsorted, in-progress work useful. To do this, create a section and set the release date to the distant future.",
+ "is_checked": false,
+ "action_url": "CourseOutline",
+ "action_text": "Edit Course Outline",
+ "action_external": false}]
+ },
+ {"short_description" : "Explore edX's Support Tools",
+ "items" : [{"short_description": "Explore the Studio Help Forum",
+ "long_description": "Access the Studio Help forum from the menu that appears when you click your user name in the top right corner of Studio.",
+ "is_checked": false,
+ "action_url": "http://help.edge.edx.org/",
+ "action_text": "Visit Studio Help",
+ "action_external": true},
+ {"short_description": "Enroll in edX 101",
+ "long_description": "Register for edX 101, edX's primer for course creation.",
+ "is_checked": false,
+ "action_url": "https://edge.edx.org/courses/edX/edX101/How_to_Create_an_edX_Course/about",
+ "action_text": "Register for edX 101",
+ "action_external": true},
+ {"short_description": "Download the Studio Documentation",
+ "long_description": "Download the searchable Studio reference documentation in PDF form.",
+ "is_checked": false,
+ "action_url": "http://files.edx.org/Getting_Started_with_Studio.pdf",
+ "action_text": "Download Documentation",
+ "action_external": true}]
+ },
+ {"short_description" : "Draft Your Course About Page",
+ "items" : [{"short_description": "Draft a Course Description",
+ "long_description": "Courses on edX have an About page that includes a course video, description, and more. Draft the text students will read before deciding to enroll in your course.",
+ "is_checked": false,
+ "action_url": "SettingsDetails",
+ "action_text": "Edit Course Schedule & Details",
+ "action_external": false},
+ {"short_description": "Add Staff Bios",
+ "long_description": "Showing prospective students who their instructor will be is helpful. Include staff bios on the course About page.",
+ "is_checked": false,
+ "action_url": "SettingsDetails",
+ "action_text": "Edit Course Schedule & Details",
+ "action_external": false},
+ {"short_description": "Add Course FAQs",
+ "long_description": "Include a short list of frequently asked questions about your course.",
+ "is_checked": false,
+ "action_url": "SettingsDetails",
+ "action_text": "Edit Course Schedule & Details",
+ "action_external": false},
+ {"short_description": "Add Course Prerequisites",
+ "long_description": "Let students know what knowledge and/or skills they should have before they enroll in your course.",
+ "is_checked": false,
+ "action_url": "SettingsDetails",
+ "action_text": "Edit Course Schedule & Details",
+ "action_external": false}]
+ }
+ ]
data: { 'textbooks' : [ ], 'wiki_slug' : null }
children: []
diff --git a/common/lib/xmodule/xmodule/templates/peer_grading/default.yaml b/common/lib/xmodule/xmodule/templates/peer_grading/default.yaml
index cb8e29dfa2..1ba8f978d6 100644
--- a/common/lib/xmodule/xmodule/templates/peer_grading/default.yaml
+++ b/common/lib/xmodule/xmodule/templates/peer_grading/default.yaml
@@ -6,6 +6,7 @@ metadata:
link_to_location: None
is_graded: False
max_grade: 1
+ weight: ""
data: |
diff --git a/common/lib/xmodule/xmodule/tests/test_capa_module.py b/common/lib/xmodule/xmodule/tests/test_capa_module.py
index d2458cb3d0..1fefbb64cd 100644
--- a/common/lib/xmodule/xmodule/tests/test_capa_module.py
+++ b/common/lib/xmodule/xmodule/tests/test_capa_module.py
@@ -7,6 +7,8 @@ import random
import xmodule
import capa
+from capa.responsetypes import StudentInputError, \
+ LoncapaProblemError, ResponseError
from xmodule.capa_module import CapaModule
from xmodule.modulestore import Location
from lxml import etree
@@ -407,7 +409,7 @@ class CapaModuleTest(unittest.TestCase):
mock_html.return_value = "Test HTML"
# Check the problem
- get_request_dict = { CapaFactory.input_key(): '3.14'}
+ get_request_dict = {CapaFactory.input_key(): '3.14'}
result = module.check_problem(get_request_dict)
# Expect that the problem is marked correct
@@ -428,7 +430,7 @@ class CapaModuleTest(unittest.TestCase):
mock_is_correct.return_value = False
# Check the problem
- get_request_dict = { CapaFactory.input_key(): '0'}
+ get_request_dict = {CapaFactory.input_key(): '0'}
result = module.check_problem(get_request_dict)
# Expect that the problem is marked correct
@@ -446,7 +448,7 @@ class CapaModuleTest(unittest.TestCase):
with patch('xmodule.capa_module.CapaModule.closed') as mock_closed:
mock_closed.return_value = True
with self.assertRaises(xmodule.exceptions.NotFoundError):
- get_request_dict = { CapaFactory.input_key(): '3.14'}
+ get_request_dict = {CapaFactory.input_key(): '3.14'}
module.check_problem(get_request_dict)
# Expect that number of attempts NOT incremented
@@ -492,7 +494,7 @@ class CapaModuleTest(unittest.TestCase):
mock_is_queued.return_value = True
mock_get_queuetime.return_value = datetime.datetime.now()
- get_request_dict = { CapaFactory.input_key(): '3.14'}
+ get_request_dict = {CapaFactory.input_key(): '3.14'}
result = module.check_problem(get_request_dict)
# Expect an AJAX alert message in 'success'
@@ -502,21 +504,61 @@ class CapaModuleTest(unittest.TestCase):
self.assertEqual(module.attempts, 1)
- def test_check_problem_student_input_error(self):
- module = CapaFactory.create(attempts=1)
+ def test_check_problem_error(self):
- # Simulate a student input exception
- with patch('capa.capa_problem.LoncapaProblem.grade_answers') as mock_grade:
- mock_grade.side_effect = capa.responsetypes.StudentInputError('test error')
+ # Try each exception that capa_module should handle
+ for exception_class in [StudentInputError,
+ LoncapaProblemError,
+ ResponseError]:
- get_request_dict = { CapaFactory.input_key(): '3.14'}
- result = module.check_problem(get_request_dict)
+ # Create the module
+ module = CapaFactory.create(attempts=1)
+
+ # Ensure that the user is NOT staff
+ module.system.user_is_staff = False
+
+ # Simulate answering a problem that raises the exception
+ with patch('capa.capa_problem.LoncapaProblem.grade_answers') as mock_grade:
+ mock_grade.side_effect = exception_class('test error')
+
+ get_request_dict = {CapaFactory.input_key(): '3.14'}
+ result = module.check_problem(get_request_dict)
+
+ # Expect an AJAX alert message in 'success'
+ expected_msg = 'Error: test error'
+ self.assertEqual(expected_msg, result['success'])
+
+ # Expect that the number of attempts is NOT incremented
+ self.assertEqual(module.attempts, 1)
+
+ def test_check_problem_error_with_staff_user(self):
+
+ # Try each exception that capa module should handle
+ for exception_class in [StudentInputError,
+ LoncapaProblemError,
+ ResponseError]:
+
+ # Create the module
+ module = CapaFactory.create(attempts=1)
+
+ # Ensure that the user IS staff
+ module.system.user_is_staff = True
+
+ # Simulate answering a problem that raises an exception
+ with patch('capa.capa_problem.LoncapaProblem.grade_answers') as mock_grade:
+ mock_grade.side_effect = exception_class('test error')
+
+ get_request_dict = {CapaFactory.input_key(): '3.14'}
+ result = module.check_problem(get_request_dict)
# Expect an AJAX alert message in 'success'
self.assertTrue('test error' in result['success'])
- # Expect that the number of attempts is NOT incremented
- self.assertEqual(module.attempts, 1)
+ # We DO include traceback information for staff users
+ self.assertTrue('Traceback' in result['success'])
+
+ # Expect that the number of attempts is NOT incremented
+ self.assertEqual(module.attempts, 1)
def test_reset_problem(self):
@@ -573,11 +615,11 @@ class CapaModuleTest(unittest.TestCase):
module = CapaFactory.create(done=False)
# Save the problem
- get_request_dict = { CapaFactory.input_key(): '3.14'}
+ get_request_dict = {CapaFactory.input_key(): '3.14'}
result = module.save_problem(get_request_dict)
# Expect that answers are saved to the problem
- expected_answers = { CapaFactory.answer_key(): '3.14'}
+ expected_answers = {CapaFactory.answer_key(): '3.14'}
self.assertEqual(module.lcp.student_answers, expected_answers)
# Expect that the result is success
@@ -592,7 +634,7 @@ class CapaModuleTest(unittest.TestCase):
mock_closed.return_value = True
# Try to save the problem
- get_request_dict = { CapaFactory.input_key(): '3.14'}
+ get_request_dict = {CapaFactory.input_key(): '3.14'}
result = module.save_problem(get_request_dict)
# Expect that the result is failure
@@ -603,7 +645,7 @@ class CapaModuleTest(unittest.TestCase):
module = CapaFactory.create(rerandomize='always', done=True)
# Try to save
- get_request_dict = { CapaFactory.input_key(): '3.14'}
+ get_request_dict = {CapaFactory.input_key(): '3.14'}
result = module.save_problem(get_request_dict)
# Expect that we cannot save
@@ -614,7 +656,7 @@ class CapaModuleTest(unittest.TestCase):
module = CapaFactory.create(rerandomize='never', done=True)
# Try to save
- get_request_dict = { CapaFactory.input_key(): '3.14'}
+ get_request_dict = {CapaFactory.input_key(): '3.14'}
result = module.save_problem(get_request_dict)
# Expect that we succeed
@@ -626,7 +668,7 @@ class CapaModuleTest(unittest.TestCase):
# Just in case, we also check what happens if we have
# more attempts than allowed.
attempts = random.randint(1, 10)
- module = CapaFactory.create(attempts=attempts -1, max_attempts=attempts)
+ module = CapaFactory.create(attempts=attempts - 1, max_attempts=attempts)
self.assertEqual(module.check_button_name(), "Final Check")
module = CapaFactory.create(attempts=attempts, max_attempts=attempts)
@@ -636,14 +678,14 @@ class CapaModuleTest(unittest.TestCase):
self.assertEqual(module.check_button_name(), "Final Check")
# Otherwise, button name is "Check"
- module = CapaFactory.create(attempts=attempts -2, max_attempts=attempts)
+ module = CapaFactory.create(attempts=attempts - 2, max_attempts=attempts)
self.assertEqual(module.check_button_name(), "Check")
- module = CapaFactory.create(attempts=attempts -3, max_attempts=attempts)
+ module = CapaFactory.create(attempts=attempts - 3, max_attempts=attempts)
self.assertEqual(module.check_button_name(), "Check")
# If no limit on attempts, then always show "Check"
- module = CapaFactory.create(attempts=attempts -3)
+ module = CapaFactory.create(attempts=attempts - 3)
self.assertEqual(module.check_button_name(), "Check")
module = CapaFactory.create(attempts=0)
@@ -859,3 +901,97 @@ class CapaModuleTest(unittest.TestCase):
# Expect that the module has created a new dummy problem with the error
self.assertNotEqual(original_problem, module.lcp)
+
+
+ def test_random_seed_no_change(self):
+
+ # Run the test for each possible rerandomize value
+ for rerandomize in ['never', 'per_student', 'always', 'onreset']:
+ module = CapaFactory.create(rerandomize=rerandomize)
+
+ # Get the seed
+ # By this point, the module should have persisted the seed
+ seed = module.seed
+ self.assertTrue(seed is not None)
+
+ # If we're not rerandomizing, the seed is always set
+ # to the same value (1)
+ if rerandomize == 'never':
+ self.assertEqual(seed, 1)
+
+ # Check the problem
+ get_request_dict = { CapaFactory.input_key(): '3.14'}
+ module.check_problem(get_request_dict)
+
+ # Expect that the seed is the same
+ self.assertEqual(seed, module.seed)
+
+ # Save the problem
+ module.save_problem(get_request_dict)
+
+ # Expect that the seed is the same
+ self.assertEqual(seed, module.seed)
+
+ def test_random_seed_with_reset(self):
+
+ def _reset_and_get_seed(module):
+ '''
+ Reset the XModule and return the module's seed
+ '''
+
+ # Simulate submitting an attempt
+ # We need to do this, or reset_problem() will
+ # fail with a complaint that we haven't submitted
+ # the problem yet.
+ module.done = True
+
+ # Reset the problem
+ module.reset_problem({})
+
+ # Return the seed
+ return module.seed
+
+ def _retry_and_check(num_tries, test_func):
+ '''
+ Returns True if *test_func* was successful
+ (returned True) within *num_tries* attempts
+
+ *test_func* must be a function
+ of the form test_func() -> bool
+ '''
+ success = False
+ for i in range(num_tries):
+ if test_func() is True:
+ success = True
+ break
+ return success
+
+ # Run the test for each possible rerandomize value
+ for rerandomize in ['never', 'per_student', 'always', 'onreset']:
+ module = CapaFactory.create(rerandomize=rerandomize)
+
+ # Get the seed
+ # By this point, the module should have persisted the seed
+ seed = module.seed
+ self.assertTrue(seed is not None)
+
+ # We do NOT want the seed to reset if rerandomize
+ # is set to 'never' -- it should still be 1
+ # The seed also stays the same if we're randomizing
+ # 'per_student': the same student should see the same problem
+ if rerandomize in ['never', 'per_student']:
+ self.assertEqual(seed, _reset_and_get_seed(module))
+
+ # Otherwise, we expect the seed to change
+ # to another valid seed
+ else:
+
+ # Since there's a small chance we might get the
+ # same seed again, give it 5 chances
+ # to generate a different seed
+ success = _retry_and_check(5,
+ lambda: _reset_and_get_seed(module) != seed)
+
+ self.assertTrue(module.seed != None)
+ msg = 'Could not get a new seed from reset after 5 tries'
+ self.assertTrue(success, msg)
diff --git a/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py b/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py
index 09c86baf27..1950389399 100644
--- a/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py
+++ b/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py
@@ -5,11 +5,15 @@ import unittest
from xmodule.open_ended_grading_classes.openendedchild import OpenEndedChild
from xmodule.open_ended_grading_classes.open_ended_module import OpenEndedModule
from xmodule.open_ended_grading_classes.combined_open_ended_modulev1 import CombinedOpenEndedV1Module
+from xmodule.combined_open_ended_module import CombinedOpenEndedModule
from xmodule.modulestore import Location
from lxml import etree
import capa.xqueue_interface as xqueue_interface
from datetime import datetime
+import logging
+
+log = logging.getLogger(__name__)
from . import test_system
@@ -57,7 +61,7 @@ class OpenEndedChildTest(unittest.TestCase):
def setUp(self):
self.test_system = test_system()
self.openendedchild = OpenEndedChild(self.test_system, self.location,
- self.definition, self.descriptor, self.static_data, self.metadata)
+ self.definition, self.descriptor, self.static_data, self.metadata)
def test_latest_answer_empty(self):
@@ -183,7 +187,12 @@ class OpenEndedModuleTest(unittest.TestCase):
self.test_system.location = self.location
self.mock_xqueue = MagicMock()
self.mock_xqueue.send_to_queue.return_value = (None, "Message")
- self.test_system.xqueue = {'interface': self.mock_xqueue, 'callback_url': '/', 'default_queuename': 'testqueue',
+
+ def constructed_callback(dispatch="score_update"):
+ return dispatch
+
+ self.test_system.xqueue = {'interface': self.mock_xqueue, 'construct_callback': constructed_callback,
+ 'default_queuename': 'testqueue',
'waittime': 1}
self.openendedmodule = OpenEndedModule(self.test_system, self.location,
self.definition, self.descriptor, self.static_data, self.metadata)
@@ -278,7 +287,18 @@ class OpenEndedModuleTest(unittest.TestCase):
class CombinedOpenEndedModuleTest(unittest.TestCase):
location = Location(["i4x", "edX", "open_ended", "combinedopenended",
"SampleQuestion"])
-
+ definition_template = """
+
+ {rubric}
+ {prompt}
+
+ {task1}
+
+
+ {task2}
+
+
+ """
prompt = "This is a question prompt "
rubric = '''
@@ -332,10 +352,15 @@ class CombinedOpenEndedModuleTest(unittest.TestCase):
'''
definition = {'prompt': etree.XML(prompt), 'rubric': etree.XML(rubric), 'task_xml': [task_xml1, task_xml2]}
- descriptor = Mock()
+ full_definition = definition_template.format(prompt=prompt, rubric=rubric, task1=task_xml1, task2=task_xml2)
+ descriptor = Mock(data=full_definition)
+ test_system = test_system()
+ combinedoe_container = CombinedOpenEndedModule(test_system,
+ location,
+ descriptor,
+ model_data={'data': full_definition, 'weight' : '1'})
def setUp(self):
- self.test_system = test_system()
# TODO: this constructor call is definitely wrong, but neither branch
# of the merge matches the module constructor. Someone (Vik?) should fix this.
self.combinedoe = CombinedOpenEndedV1Module(self.test_system,
@@ -365,3 +390,19 @@ class CombinedOpenEndedModuleTest(unittest.TestCase):
changed = self.combinedoe.update_task_states()
self.assertTrue(changed)
+
+ def test_get_max_score(self):
+ changed = self.combinedoe.update_task_states()
+ self.combinedoe.state = "done"
+ self.combinedoe.is_scored = True
+ max_score = self.combinedoe.max_score()
+ self.assertEqual(max_score, 1)
+
+ def test_container_get_max_score(self):
+ #The progress view requires that this function be exposed
+ max_score = self.combinedoe_container.max_score()
+ self.assertEqual(max_score, None)
+
+ def test_container_weight(self):
+ weight = self.combinedoe_container.weight
+ self.assertEqual(weight,1)
diff --git a/common/lib/xmodule/xmodule/tests/test_course_module.py b/common/lib/xmodule/xmodule/tests/test_course_module.py
index 59099b0dff..eda9cf386c 100644
--- a/common/lib/xmodule/xmodule/tests/test_course_module.py
+++ b/common/lib/xmodule/xmodule/tests/test_course_module.py
@@ -1,5 +1,6 @@
import unittest
from time import strptime
+
from fs.memoryfs import MemoryFS
from mock import Mock, patch
@@ -89,25 +90,41 @@ class IsNewCourseTestCase(unittest.TestCase):
((day2, None, None), (day1, None, None), self.assertLess),
((day1, None, None), (day1, None, None), self.assertEqual),
- # Non-parseable advertised starts are ignored in preference
- # to actual starts
- ((day2, None, "Spring 2013"), (day1, None, "Fall 2012"), self.assertLess),
- ((day1, None, "Spring 2013"), (day1, None, "Fall 2012"), self.assertEqual),
+ # Non-parseable advertised starts are ignored in preference to actual starts
+ ((day2, None, "Spring"), (day1, None, "Fall"), self.assertLess),
+ ((day1, None, "Spring"), (day1, None, "Fall"), self.assertEqual),
+
+ # Partially parsable advertised starts should take priority over start dates
+ ((day2, None, "October 2013"), (day2, None, "October 2012"), self.assertLess),
+ ((day2, None, "October 2013"), (day1, None, "October 2013"), self.assertEqual),
# Parseable advertised starts take priority over start dates
((day1, None, day2), (day1, None, day1), self.assertLess),
((day2, None, day2), (day1, None, day2), self.assertEqual),
-
]
- data = []
for a, b, assertion in dates:
a_score = self.get_dummy_course(start=a[0], announcement=a[1], advertised_start=a[2]).sorting_score
b_score = self.get_dummy_course(start=b[0], announcement=b[1], advertised_start=b[2]).sorting_score
print "Comparing %s to %s" % (a, b)
assertion(a_score, b_score)
+ @patch('xmodule.course_module.time.gmtime')
+ def test_start_date_text(self, gmtime_mock):
+ gmtime_mock.return_value = NOW
+ settings = [
+ # start, advertized, result
+ ('2012-12-02T12:00', None, 'Dec 02, 2012'),
+ ('2012-12-02T12:00', '2011-11-01T12:00', 'Nov 01, 2011'),
+ ('2012-12-02T12:00', 'Spring 2012', 'Spring 2012'),
+ ('2012-12-02T12:00', 'November, 2011', 'November, 2011'),
+ ]
+
+ for s in settings:
+ d = self.get_dummy_course(start=s[0], advertised_start=s[1])
+ print "Checking start=%s advertised=%s" % (s[0], s[1])
+ self.assertEqual(d.start_date_text, s[2])
@patch('xmodule.course_module.time.gmtime')
def test_is_newish(self, gmtime_mock):
@@ -125,7 +142,7 @@ class IsNewCourseTestCase(unittest.TestCase):
descriptor = self.get_dummy_course(start='2013-01-15T12:00')
assert(descriptor.is_newish is True)
- descriptor = self.get_dummy_course(start='2013-03-00T12:00')
+ descriptor = self.get_dummy_course(start='2013-03-01T12:00')
assert(descriptor.is_newish is True)
descriptor = self.get_dummy_course(start='2012-10-15T12:00')
diff --git a/common/lib/xmodule/xmodule/tests/test_date_utils.py b/common/lib/xmodule/xmodule/tests/test_date_utils.py
new file mode 100644
index 0000000000..2b294e028f
--- /dev/null
+++ b/common/lib/xmodule/xmodule/tests/test_date_utils.py
@@ -0,0 +1,26 @@
+# Tests for xmodule.util.date_utils
+
+from nose.tools import assert_equals
+from xmodule.util import date_utils
+import datetime
+import time
+
+def test_get_time_struct_display():
+ assert_equals("", date_utils.get_time_struct_display(None, ""))
+ test_time = time.struct_time((1992, 3, 12, 15, 3, 30, 1, 71, 0))
+ assert_equals("03/12/1992", date_utils.get_time_struct_display(test_time, '%m/%d/%Y'))
+ assert_equals("15:03", date_utils.get_time_struct_display(test_time, '%H:%M'))
+
+
+def test_get_default_time_display():
+ assert_equals("", date_utils.get_default_time_display(None))
+ test_time = time.struct_time((1992, 3, 12, 15, 3, 30, 1, 71, 0))
+ assert_equals("Mar 12, 1992 at 03:03 PM",
+ date_utils.get_default_time_display(test_time))
+
+
+def test_time_to_datetime():
+ assert_equals(None, date_utils.time_to_datetime(None))
+ test_time = time.struct_time((1992, 3, 12, 15, 3, 30, 1, 71, 0))
+ assert_equals(datetime.datetime(1992, 3, 12, 15, 3, 30),
+ date_utils.time_to_datetime(test_time))
diff --git a/common/lib/xmodule/xmodule/tests/test_fields.py b/common/lib/xmodule/xmodule/tests/test_fields.py
new file mode 100644
index 0000000000..7c8872efc1
--- /dev/null
+++ b/common/lib/xmodule/xmodule/tests/test_fields.py
@@ -0,0 +1,80 @@
+"""Tests for Date class defined in fields.py."""
+import datetime
+import unittest
+from django.utils.timezone import UTC
+from xmodule.fields import Date
+import time
+
+class DateTest(unittest.TestCase):
+ date = Date()
+
+ @staticmethod
+ def struct_to_datetime(struct_time):
+ return datetime.datetime(struct_time.tm_year, struct_time.tm_mon,
+ struct_time.tm_mday, struct_time.tm_hour,
+ struct_time.tm_min, struct_time.tm_sec, tzinfo=UTC())
+
+ def compare_dates(self, date1, date2, expected_delta):
+ dt1 = DateTest.struct_to_datetime(date1)
+ dt2 = DateTest.struct_to_datetime(date2)
+ self.assertEqual(dt1 - dt2, expected_delta, str(date1) + "-"
+ + str(date2) + "!=" + str(expected_delta))
+
+ def test_from_json(self):
+ '''Test conversion from iso compatible date strings to struct_time'''
+ self.compare_dates(
+ DateTest.date.from_json("2013-01-01"),
+ DateTest.date.from_json("2012-12-31"),
+ datetime.timedelta(days=1))
+ self.compare_dates(
+ DateTest.date.from_json("2013-01-01T00"),
+ DateTest.date.from_json("2012-12-31T23"),
+ datetime.timedelta(hours=1))
+ self.compare_dates(
+ DateTest.date.from_json("2013-01-01T00:00"),
+ DateTest.date.from_json("2012-12-31T23:59"),
+ datetime.timedelta(minutes=1))
+ self.compare_dates(
+ DateTest.date.from_json("2013-01-01T00:00:00"),
+ DateTest.date.from_json("2012-12-31T23:59:59"),
+ datetime.timedelta(seconds=1))
+ self.compare_dates(
+ DateTest.date.from_json("2013-01-01T00:00:00Z"),
+ DateTest.date.from_json("2012-12-31T23:59:59Z"),
+ datetime.timedelta(seconds=1))
+ self.compare_dates(
+ DateTest.date.from_json("2012-12-31T23:00:01-01:00"),
+ DateTest.date.from_json("2013-01-01T00:00:00+01:00"),
+ datetime.timedelta(hours=1, seconds=1))
+
+ def test_return_None(self):
+ self.assertIsNone(DateTest.date.from_json(""))
+ self.assertIsNone(DateTest.date.from_json(None))
+ self.assertIsNone(DateTest.date.from_json(['unknown value']))
+
+ def test_old_due_date_format(self):
+ current = datetime.datetime.today()
+ self.assertEqual(
+ time.struct_time((current.year, 3, 12, 12, 0, 0, 1, 71, 0)),
+ DateTest.date.from_json("March 12 12:00"))
+ self.assertEqual(
+ time.struct_time((current.year, 12, 4, 16, 30, 0, 2, 338, 0)),
+ DateTest.date.from_json("December 4 16:30"))
+
+ def test_to_json(self):
+ '''
+ Test converting time reprs to iso dates
+ '''
+ self.assertEqual(
+ DateTest.date.to_json(
+ time.strptime("2012-12-31T23:59:59Z", "%Y-%m-%dT%H:%M:%SZ")),
+ "2012-12-31T23:59:59Z")
+ self.assertEqual(
+ DateTest.date.to_json(
+ DateTest.date.from_json("2012-12-31T23:59:59Z")),
+ "2012-12-31T23:59:59Z")
+ self.assertEqual(
+ DateTest.date.to_json(
+ DateTest.date.from_json("2012-12-31T23:00:01-01:00")),
+ "2013-01-01T00:00:01Z")
+
diff --git a/common/lib/xmodule/xmodule/tests/test_graders.py b/common/lib/xmodule/xmodule/tests/test_graders.py
index 27416b1d5c..1a9ba50dc4 100644
--- a/common/lib/xmodule/xmodule/tests/test_graders.py
+++ b/common/lib/xmodule/xmodule/tests/test_graders.py
@@ -6,32 +6,34 @@ from xmodule.graders import Score, aggregate_scores
class GradesheetTest(unittest.TestCase):
+ '''Tests the aggregate_scores method'''
def test_weighted_grading(self):
scores = []
Score.__sub__ = lambda me, other: (me.earned - other.earned) + (me.possible - other.possible)
- all, graded = aggregate_scores(scores)
- self.assertEqual(all, Score(earned=0, possible=0, graded=False, section="summary"))
- self.assertEqual(graded, Score(earned=0, possible=0, graded=True, section="summary"))
+ all_total, graded_total = aggregate_scores(scores)
+ self.assertEqual(all_total, Score(earned=0, possible=0, graded=False, section="summary"))
+ self.assertEqual(graded_total, Score(earned=0, possible=0, graded=True, section="summary"))
scores.append(Score(earned=0, possible=5, graded=False, section="summary"))
- all, graded = aggregate_scores(scores)
- self.assertEqual(all, Score(earned=0, possible=5, graded=False, section="summary"))
- self.assertEqual(graded, Score(earned=0, possible=0, graded=True, section="summary"))
+ all_total, graded_total = aggregate_scores(scores)
+ self.assertEqual(all_total, Score(earned=0, possible=5, graded=False, section="summary"))
+ self.assertEqual(graded_total, Score(earned=0, possible=0, graded=True, section="summary"))
scores.append(Score(earned=3, possible=5, graded=True, section="summary"))
- all, graded = aggregate_scores(scores)
- self.assertAlmostEqual(all, Score(earned=3, possible=10, graded=False, section="summary"))
- self.assertAlmostEqual(graded, Score(earned=3, possible=5, graded=True, section="summary"))
+ all_total, graded_total = aggregate_scores(scores)
+ self.assertAlmostEqual(all_total, Score(earned=3, possible=10, graded=False, section="summary"))
+ self.assertAlmostEqual(graded_total, Score(earned=3, possible=5, graded=True, section="summary"))
scores.append(Score(earned=2, possible=5, graded=True, section="summary"))
- all, graded = aggregate_scores(scores)
- self.assertAlmostEqual(all, Score(earned=5, possible=15, graded=False, section="summary"))
- self.assertAlmostEqual(graded, Score(earned=5, possible=10, graded=True, section="summary"))
+ all_total, graded_total = aggregate_scores(scores)
+ self.assertAlmostEqual(all_total, Score(earned=5, possible=15, graded=False, section="summary"))
+ self.assertAlmostEqual(graded_total, Score(earned=5, possible=10, graded=True, section="summary"))
class GraderTest(unittest.TestCase):
+ '''Tests grader implementations'''
empty_gradesheet = {
}
@@ -44,136 +46,152 @@ class GraderTest(unittest.TestCase):
test_gradesheet = {
'Homework': [Score(earned=2, possible=20.0, graded=True, section='hw1'),
- Score(earned=16, possible=16.0, graded=True, section='hw2')],
- #The dropped scores should be from the assignments that don't exist yet
+ Score(earned=16, possible=16.0, graded=True, section='hw2')],
+ # The dropped scores should be from the assignments that don't exist yet
'Lab': [Score(earned=1, possible=2.0, graded=True, section='lab1'), # Dropped
- Score(earned=1, possible=1.0, graded=True, section='lab2'),
- Score(earned=1, possible=1.0, graded=True, section='lab3'),
- Score(earned=5, possible=25.0, graded=True, section='lab4'), # Dropped
- Score(earned=3, possible=4.0, graded=True, section='lab5'), # Dropped
- Score(earned=6, possible=7.0, graded=True, section='lab6'),
- Score(earned=5, possible=6.0, graded=True, section='lab7')],
+ Score(earned=1, possible=1.0, graded=True, section='lab2'),
+ Score(earned=1, possible=1.0, graded=True, section='lab3'),
+ Score(earned=5, possible=25.0, graded=True, section='lab4'), # Dropped
+ Score(earned=3, possible=4.0, graded=True, section='lab5'), # Dropped
+ Score(earned=6, possible=7.0, graded=True, section='lab6'),
+ Score(earned=5, possible=6.0, graded=True, section='lab7')],
'Midterm': [Score(earned=50.5, possible=100, graded=True, section="Midterm Exam"), ],
}
- def test_SingleSectionGrader(self):
- midtermGrader = graders.SingleSectionGrader("Midterm", "Midterm Exam")
- lab4Grader = graders.SingleSectionGrader("Lab", "lab4")
- badLabGrader = graders.SingleSectionGrader("Lab", "lab42")
+ def test_single_section_grader(self):
+ midterm_grader = graders.SingleSectionGrader("Midterm", "Midterm Exam")
+ lab4_grader = graders.SingleSectionGrader("Lab", "lab4")
+ bad_lab_grader = graders.SingleSectionGrader("Lab", "lab42")
- for graded in [midtermGrader.grade(self.empty_gradesheet),
- midtermGrader.grade(self.incomplete_gradesheet),
- badLabGrader.grade(self.test_gradesheet)]:
+ for graded in [midterm_grader.grade(self.empty_gradesheet),
+ midterm_grader.grade(self.incomplete_gradesheet),
+ bad_lab_grader.grade(self.test_gradesheet)]:
self.assertEqual(len(graded['section_breakdown']), 1)
self.assertEqual(graded['percent'], 0.0)
- graded = midtermGrader.grade(self.test_gradesheet)
+ graded = midterm_grader.grade(self.test_gradesheet)
self.assertAlmostEqual(graded['percent'], 0.505)
self.assertEqual(len(graded['section_breakdown']), 1)
- graded = lab4Grader.grade(self.test_gradesheet)
+ graded = lab4_grader.grade(self.test_gradesheet)
self.assertAlmostEqual(graded['percent'], 0.2)
self.assertEqual(len(graded['section_breakdown']), 1)
- def test_AssignmentFormatGrader(self):
- homeworkGrader = graders.AssignmentFormatGrader("Homework", 12, 2)
- noDropGrader = graders.AssignmentFormatGrader("Homework", 12, 0)
- #Even though the minimum number is 3, this should grade correctly when 7 assignments are found
- overflowGrader = graders.AssignmentFormatGrader("Lab", 3, 2)
- labGrader = graders.AssignmentFormatGrader("Lab", 7, 3)
+ def test_assignment_format_grader(self):
+ homework_grader = graders.AssignmentFormatGrader("Homework", 12, 2)
+ no_drop_grader = graders.AssignmentFormatGrader("Homework", 12, 0)
+ # Even though the minimum number is 3, this should grade correctly when 7 assignments are found
+ overflow_grader = graders.AssignmentFormatGrader("Lab", 3, 2)
+ lab_grader = graders.AssignmentFormatGrader("Lab", 7, 3)
- #Test the grading of an empty gradesheet
- for graded in [homeworkGrader.grade(self.empty_gradesheet),
- noDropGrader.grade(self.empty_gradesheet),
- homeworkGrader.grade(self.incomplete_gradesheet),
- noDropGrader.grade(self.incomplete_gradesheet)]:
+ # Test the grading of an empty gradesheet
+ for graded in [homework_grader.grade(self.empty_gradesheet),
+ no_drop_grader.grade(self.empty_gradesheet),
+ homework_grader.grade(self.incomplete_gradesheet),
+ no_drop_grader.grade(self.incomplete_gradesheet)]:
self.assertAlmostEqual(graded['percent'], 0.0)
- #Make sure the breakdown includes 12 sections, plus one summary
+ # Make sure the breakdown includes 12 sections, plus one summary
self.assertEqual(len(graded['section_breakdown']), 12 + 1)
- graded = homeworkGrader.grade(self.test_gradesheet)
+ graded = homework_grader.grade(self.test_gradesheet)
self.assertAlmostEqual(graded['percent'], 0.11) # 100% + 10% / 10 assignments
self.assertEqual(len(graded['section_breakdown']), 12 + 1)
- graded = noDropGrader.grade(self.test_gradesheet)
+ graded = no_drop_grader.grade(self.test_gradesheet)
self.assertAlmostEqual(graded['percent'], 0.0916666666666666) # 100% + 10% / 12 assignments
self.assertEqual(len(graded['section_breakdown']), 12 + 1)
- graded = overflowGrader.grade(self.test_gradesheet)
+ graded = overflow_grader.grade(self.test_gradesheet)
self.assertAlmostEqual(graded['percent'], 0.8880952380952382) # 100% + 10% / 5 assignments
self.assertEqual(len(graded['section_breakdown']), 7 + 1)
- graded = labGrader.grade(self.test_gradesheet)
+ graded = lab_grader.grade(self.test_gradesheet)
self.assertAlmostEqual(graded['percent'], 0.9226190476190477)
self.assertEqual(len(graded['section_breakdown']), 7 + 1)
- def test_WeightedSubsectionsGrader(self):
- #First, a few sub graders
- homeworkGrader = graders.AssignmentFormatGrader("Homework", 12, 2)
- labGrader = graders.AssignmentFormatGrader("Lab", 7, 3)
- midtermGrader = graders.SingleSectionGrader("Midterm", "Midterm Exam")
+ def test_assignment_format_grader_on_single_section_entry(self):
+ midterm_grader = graders.AssignmentFormatGrader("Midterm", 1, 0)
+ # Test the grading on a section with one item:
+ for graded in [midterm_grader.grade(self.empty_gradesheet),
+ midterm_grader.grade(self.incomplete_gradesheet)]:
+ self.assertAlmostEqual(graded['percent'], 0.0)
+ # Make sure the breakdown includes just the one summary
+ self.assertEqual(len(graded['section_breakdown']), 0 + 1)
+ self.assertEqual(graded['section_breakdown'][0]['label'], 'Midterm')
- weightedGrader = graders.WeightedSubsectionsGrader([(homeworkGrader, homeworkGrader.category, 0.25),
- (labGrader, labGrader.category, 0.25),
- (midtermGrader, midtermGrader.category, 0.5)])
+ graded = midterm_grader.grade(self.test_gradesheet)
+ self.assertAlmostEqual(graded['percent'], 0.505)
+ self.assertEqual(len(graded['section_breakdown']), 0 + 1)
- overOneWeightsGrader = graders.WeightedSubsectionsGrader([(homeworkGrader, homeworkGrader.category, 0.5),
- (labGrader, labGrader.category, 0.5),
- (midtermGrader, midtermGrader.category, 0.5)])
+ def test_weighted_subsections_grader(self):
+ # First, a few sub graders
+ homework_grader = graders.AssignmentFormatGrader("Homework", 12, 2)
+ lab_grader = graders.AssignmentFormatGrader("Lab", 7, 3)
+ # phasing out the use of SingleSectionGraders, and instead using AssignmentFormatGraders that
+ # will act like SingleSectionGraders on single sections.
+ midterm_grader = graders.AssignmentFormatGrader("Midterm", 1, 0)
- #The midterm should have all weight on this one
- zeroWeightsGrader = graders.WeightedSubsectionsGrader([(homeworkGrader, homeworkGrader.category, 0.0),
- (labGrader, labGrader.category, 0.0),
- (midtermGrader, midtermGrader.category, 0.5)])
+ weighted_grader = graders.WeightedSubsectionsGrader([(homework_grader, homework_grader.category, 0.25),
+ (lab_grader, lab_grader.category, 0.25),
+ (midterm_grader, midterm_grader.category, 0.5)])
- #This should always have a final percent of zero
- allZeroWeightsGrader = graders.WeightedSubsectionsGrader([(homeworkGrader, homeworkGrader.category, 0.0),
- (labGrader, labGrader.category, 0.0),
- (midtermGrader, midtermGrader.category, 0.0)])
+ over_one_weights_grader = graders.WeightedSubsectionsGrader([(homework_grader, homework_grader.category, 0.5),
+ (lab_grader, lab_grader.category, 0.5),
+ (midterm_grader, midterm_grader.category, 0.5)])
- emptyGrader = graders.WeightedSubsectionsGrader([])
+ # The midterm should have all weight on this one
+ zero_weights_grader = graders.WeightedSubsectionsGrader([(homework_grader, homework_grader.category, 0.0),
+ (lab_grader, lab_grader.category, 0.0),
+ (midterm_grader, midterm_grader.category, 0.5)])
- graded = weightedGrader.grade(self.test_gradesheet)
+ # This should always have a final percent of zero
+ all_zero_weights_grader = graders.WeightedSubsectionsGrader([(homework_grader, homework_grader.category, 0.0),
+ (lab_grader, lab_grader.category, 0.0),
+ (midterm_grader, midterm_grader.category, 0.0)])
+
+ empty_grader = graders.WeightedSubsectionsGrader([])
+
+ graded = weighted_grader.grade(self.test_gradesheet)
self.assertAlmostEqual(graded['percent'], 0.5106547619047619)
self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1)
self.assertEqual(len(graded['grade_breakdown']), 3)
- graded = overOneWeightsGrader.grade(self.test_gradesheet)
+ graded = over_one_weights_grader.grade(self.test_gradesheet)
self.assertAlmostEqual(graded['percent'], 0.7688095238095238)
self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1)
self.assertEqual(len(graded['grade_breakdown']), 3)
- graded = zeroWeightsGrader.grade(self.test_gradesheet)
+ graded = zero_weights_grader.grade(self.test_gradesheet)
self.assertAlmostEqual(graded['percent'], 0.2525)
self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1)
self.assertEqual(len(graded['grade_breakdown']), 3)
- graded = allZeroWeightsGrader.grade(self.test_gradesheet)
+ graded = all_zero_weights_grader.grade(self.test_gradesheet)
self.assertAlmostEqual(graded['percent'], 0.0)
self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1)
self.assertEqual(len(graded['grade_breakdown']), 3)
- for graded in [weightedGrader.grade(self.empty_gradesheet),
- weightedGrader.grade(self.incomplete_gradesheet),
- zeroWeightsGrader.grade(self.empty_gradesheet),
- allZeroWeightsGrader.grade(self.empty_gradesheet)]:
+ for graded in [weighted_grader.grade(self.empty_gradesheet),
+ weighted_grader.grade(self.incomplete_gradesheet),
+ zero_weights_grader.grade(self.empty_gradesheet),
+ all_zero_weights_grader.grade(self.empty_gradesheet)]:
self.assertAlmostEqual(graded['percent'], 0.0)
self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1)
self.assertEqual(len(graded['grade_breakdown']), 3)
- graded = emptyGrader.grade(self.test_gradesheet)
+ graded = empty_grader.grade(self.test_gradesheet)
self.assertAlmostEqual(graded['percent'], 0.0)
self.assertEqual(len(graded['section_breakdown']), 0)
self.assertEqual(len(graded['grade_breakdown']), 0)
- def test_graderFromConf(self):
+ def test_grader_from_conf(self):
- #Confs always produce a graders.WeightedSubsectionsGrader, so we test this by repeating the test
- #in test_graders.WeightedSubsectionsGrader, but generate the graders with confs.
+ # Confs always produce a graders.WeightedSubsectionsGrader, so we test this by repeating the test
+ # in test_graders.WeightedSubsectionsGrader, but generate the graders with confs.
- weightedGrader = graders.grader_from_conf([
+ weighted_grader = graders.grader_from_conf([
{
'type': "Homework",
'min_count': 12,
@@ -196,25 +214,25 @@ class GraderTest(unittest.TestCase):
},
])
- emptyGrader = graders.grader_from_conf([])
+ empty_grader = graders.grader_from_conf([])
- graded = weightedGrader.grade(self.test_gradesheet)
+ graded = weighted_grader.grade(self.test_gradesheet)
self.assertAlmostEqual(graded['percent'], 0.5106547619047619)
self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1)
self.assertEqual(len(graded['grade_breakdown']), 3)
- graded = emptyGrader.grade(self.test_gradesheet)
+ graded = empty_grader.grade(self.test_gradesheet)
self.assertAlmostEqual(graded['percent'], 0.0)
self.assertEqual(len(graded['section_breakdown']), 0)
self.assertEqual(len(graded['grade_breakdown']), 0)
- #Test that graders can also be used instead of lists of dictionaries
- homeworkGrader = graders.AssignmentFormatGrader("Homework", 12, 2)
- homeworkGrader2 = graders.grader_from_conf(homeworkGrader)
+ # Test that graders can also be used instead of lists of dictionaries
+ homework_grader = graders.AssignmentFormatGrader("Homework", 12, 2)
+ homework_grader2 = graders.grader_from_conf(homework_grader)
- graded = homeworkGrader2.grade(self.test_gradesheet)
+ graded = homework_grader2.grade(self.test_gradesheet)
self.assertAlmostEqual(graded['percent'], 0.11)
self.assertEqual(len(graded['section_breakdown']), 12 + 1)
- #TODO: How do we test failure cases? The parser only logs an error when
- #it can't parse something. Maybe it should throw exceptions?
+ # TODO: How do we test failure cases? The parser only logs an error when
+ # it can't parse something. Maybe it should throw exceptions?
diff --git a/common/lib/xmodule/xmodule/tests/test_import.py b/common/lib/xmodule/xmodule/tests/test_import.py
index 37b1d35938..9d73fdcc17 100644
--- a/common/lib/xmodule/xmodule/tests/test_import.py
+++ b/common/lib/xmodule/xmodule/tests/test_import.py
@@ -1,20 +1,16 @@
# -*- coding: utf-8 -*-
-from path import path
import unittest
from fs.memoryfs import MemoryFS
from lxml import etree
from mock import Mock, patch
-from collections import defaultdict
-from xmodule.x_module import XMLParsingSystem, XModuleDescriptor
from xmodule.xml_module import is_pointer_tag
-from xmodule.errortracker import make_error_tracker
from xmodule.modulestore import Location
from xmodule.modulestore.xml import ImportSystem, XMLModuleStore
-from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.modulestore.inheritance import compute_inherited_metadata
+from xmodule.fields import Date
from .test_export import DATA_DIR
@@ -137,7 +133,7 @@ class ImportTestCase(BaseCourseTestCase):
- inherited metadata doesn't leak to children.
"""
system = self.get_system()
- v = '1 hour'
+ v = 'March 20 17:00'
url_name = 'test1'
start_xml = '''
-
-
-
{0}
'.format(saxutils.escape(repr(context)))
+def calledback_url(dispatch = 'score_update'):
+ return dispatch
+
+xqueue_interface = MagicMock()
+xqueue_interface.send_to_queue.return_value = (0, 'Success!')
test_system = Mock(
ajax_url='courses/course_id/modx/a_location',
@@ -26,7 +31,7 @@ test_system = Mock(
user=Mock(),
filestore=fs.osfs.OSFS(os.path.join(TEST_DIR, "test_files")),
debug=True,
- xqueue={'interface': None, 'callback_url': '/', 'default_queuename': 'testqueue', 'waittime': 10},
+ xqueue={'interface': xqueue_interface, 'construct_callback': calledback_url, 'default_queuename': 'testqueue', 'waittime': 10},
node_path=os.environ.get("NODE_PATH", "/usr/local/lib/node_modules"),
anonymous_student_id='student'
)
diff --git a/common/lib/capa/capa/tests/test_inputtypes.py b/common/lib/capa/capa/tests/test_inputtypes.py
index 360fd9f2f6..250cedd549 100644
--- a/common/lib/capa/capa/tests/test_inputtypes.py
+++ b/common/lib/capa/capa/tests/test_inputtypes.py
@@ -23,6 +23,7 @@ import xml.sax.saxutils as saxutils
from . import test_system
from capa import inputtypes
+from mock import ANY
# just a handy shortcut
lookup_tag = inputtypes.registry.get_class_for_tag
@@ -300,6 +301,98 @@ class CodeInputTest(unittest.TestCase):
self.assertEqual(context, expected)
+class MatlabTest(unittest.TestCase):
+ '''
+ Test Matlab input types
+ '''
+ def setUp(self):
+ self.rows = '10'
+ self.cols = '80'
+ self.tabsize = '4'
+ self.mode = ""
+ self.payload = "payload"
+ self.linenumbers = 'true'
+ self.xml = """