Merge branch 'master' into jonahstanley/add-courseteam-tests

Conflicts:
	cms/djangoapps/contentstore/features/common.py
This commit is contained in:
JonahStanley
2013-06-19 11:29:43 -04:00
80 changed files with 9672 additions and 674 deletions

View File

@@ -5,6 +5,13 @@ These are notable changes in edx-platform. This is a rolling list of changes,
in roughly chronological order, most recent first. Add your entries at or near
the top. Include a label indicating the component affected.
LMS: Problem rescoring. Added options on the Grades tab of the
Instructor Dashboard to allow all students' submissions for a
particular problem to be rescored. Also supports resetting all
students' number of attempts to zero. Provides a list of background
tasks that are currently running for the course, and an option to
see a history of background tasks for a given problem.
LMS: Forums. Added handling for case where discussion module can get `None` as
value of lms.start in `lms/djangoapps/django_comment_client/utils.py`

View File

@@ -39,8 +39,6 @@ def get_users_in_course_group_by_role(location, role):
'''
Create all permission groups for a new course and subscribe the caller into those roles
'''
def create_all_course_groups(creator, location):
create_new_course_group(creator, location, INSTRUCTOR_ROLE_NAME)
create_new_course_group(creator, location, STAFF_ROLE_NAME)
@@ -57,13 +55,11 @@ def create_new_course_group(creator, location, role):
return
'''
This is to be called only by either a command line code path or through a app which has already
asserted permissions
'''
def _delete_course_group(location):
'''
This is to be called only by either a command line code path or through a app which has already
asserted permissions
'''
# remove all memberships
instructors = Group.objects.get(name=get_course_groupname_for_role(location, INSTRUCTOR_ROLE_NAME))
for user in instructors.user_set.all():
@@ -75,13 +71,11 @@ def _delete_course_group(location):
user.groups.remove(staff)
user.save()
'''
This is to be called only by either a command line code path or through an app which has already
asserted permissions to do this action
'''
def _copy_course_group(source, dest):
'''
This is to be called only by either a command line code path or through an app which has already
asserted permissions to do this action
'''
instructors = Group.objects.get(name=get_course_groupname_for_role(source, INSTRUCTOR_ROLE_NAME))
new_instructors_group = Group.objects.get(name=get_course_groupname_for_role(dest, INSTRUCTOR_ROLE_NAME))
for user in instructors.user_set.all():

View File

@@ -1,5 +1,5 @@
#pylint: disable=C0111
#pylint: disable=W0621
# pylint: disable=C0111
# pylint: disable=W0621
from lettuce import world, step
from nose.tools import assert_true
@@ -20,7 +20,7 @@ COURSE_ORG = 'MITx'
########### STEP HELPERS ##############
@step('I (?:visit|access|open) the Studio homepage$')
def i_visit_the_studio_homepage(step):
def i_visit_the_studio_homepage(_step):
# To make this go to port 8001, put
# LETTUCE_SERVER_PORT = 8001
# in your settings.py file.
@@ -30,17 +30,17 @@ def i_visit_the_studio_homepage(step):
@step('I am logged into Studio$')
def i_am_logged_into_studio(step):
def i_am_logged_into_studio(_step):
log_into_studio()
@step('I confirm the alert$')
def i_confirm_with_ok(step):
def i_confirm_with_ok(_step):
world.browser.get_alert().accept()
@step(u'I press the "([^"]*)" delete icon$')
def i_press_the_category_delete_icon(step, category):
def i_press_the_category_delete_icon(_step, category):
if category == 'section':
css = 'a.delete-button.delete-section-button span.delete-icon'
elif category == 'subsection':
@@ -51,7 +51,7 @@ def i_press_the_category_delete_icon(step, category):
@step('I have opened a new course in Studio$')
def i_have_opened_a_new_course(step):
def i_have_opened_a_new_course(_step):
open_new_course()
@@ -78,7 +78,6 @@ def create_studio_user(
registration.register(studio_user)
registration.activate()
def fill_in_course_info(
name=COURSE_NAME,
org=COURSE_ORG,
@@ -149,6 +148,7 @@ def set_date_and_time(date_css, desired_date, time_css, desired_time):
world.css_fill(date_css, desired_date)
# hit TAB to get to the time field
e = world.css_find(date_css).first
# pylint: disable=W0212
e._element.send_keys(Keys.TAB)
world.css_fill(time_css, desired_time)
e = world.css_find(time_css).first

View File

@@ -1,5 +1,5 @@
#pylint: disable=C0111
#pylint: disable=W0621
# pylint: disable=C0111
# pylint: disable=W0621
from lettuce import world, step
from common import *
@@ -8,7 +8,7 @@ from nose.tools import assert_equal
############### ACTIONS ####################
@step('I click the new section link$')
@step('I click the New Section link$')
def i_click_new_section_link(_step):
link_css = 'a.new-courseware-section-button'
world.css_click(link_css)

View File

@@ -1,5 +1,5 @@
#pylint: disable=C0111
#pylint: disable=W0621
# pylint: disable=C0111
# pylint: disable=W0621
from lettuce import world, step
from common import *

View File

@@ -6,13 +6,13 @@ from lettuce import world, step
@step('when I view the video it does not have autoplay enabled')
def does_not_autoplay(step):
def does_not_autoplay(_step):
assert world.css_find('.video')[0]['data-autoplay'] == 'False'
assert world.css_find('.video_control')[0].has_class('play')
@step('creating a video takes a single click')
def video_takes_a_single_click(step):
def video_takes_a_single_click(_step):
assert(not world.is_css_present('.xmodule_VideoModule'))
world.css_click("a[data-location='i4x://edx/templates/video/default']")
assert(world.is_css_present('.xmodule_VideoModule'))

View File

@@ -39,10 +39,7 @@ def get_module_info(store, location, parent_location=None, rewrite_static_links=
def set_module_info(store, location, post_data):
module = None
try:
if location.revision is None:
module = store.get_item(location)
else:
module = store.get_item(location)
module = store.get_item(location)
except:
pass

View File

@@ -99,6 +99,7 @@ class ChecklistTestCase(CourseTestCase):
'name': self.course.location.name,
'checklist_index': 2})
def get_first_item(checklist):
return checklist['items'][0]

View File

@@ -132,7 +132,7 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase):
# just pick one vertical
descriptor = store.get_items(Location('i4x', 'edX', 'simple', 'vertical', None, None))[0]
location = descriptor.location._replace(name='.' + descriptor.location.name)
location = descriptor.location.replace(name='.' + descriptor.location.name)
resp = self.client.get(reverse('edit_unit', kwargs={'location': location.url()}))
self.assertEqual(resp.status_code, 400)
@@ -224,7 +224,7 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase):
draft_store.clone_item(html_module.location, html_module.location)
html_module = draft_store.get_item(['i4x', 'edX', 'simple', 'html', 'test_html', None])
new_graceperiod = timedelta(**{'hours': 1})
new_graceperiod = timedelta(hours=1)
self.assertNotIn('graceperiod', own_metadata(html_module))
html_module.lms.graceperiod = new_graceperiod
@@ -369,7 +369,6 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase):
'''
module_store = modulestore('direct')
import_from_xml(module_store, 'common/test/data/', ['full'])
effort = module_store.get_item(Location(['i4x', 'edX', 'full', 'about', 'effort', None]))
self.assertEqual(effort.data, '6 hours')
@@ -617,12 +616,12 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase):
items = module_store.get_items(Location(['i4x', 'edX', 'full', 'vertical', None]))
self.assertEqual(len(items), 0)
def verify_content_existence(self, modulestore, root_dir, location, dirname, category_name, filename_suffix=''):
def verify_content_existence(self, store, root_dir, location, dirname, category_name, filename_suffix=''):
filesystem = OSFS(root_dir / 'test_export')
self.assertTrue(filesystem.exists(dirname))
query_loc = Location('i4x', location.org, location.course, category_name, None)
items = modulestore.get_items(query_loc)
items = store.get_items(query_loc)
for item in items:
filesystem = OSFS(root_dir / ('test_export/' + dirname))
@@ -768,7 +767,6 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase):
def test_prefetch_children(self):
module_store = modulestore('direct')
import_from_xml(module_store, 'common/test/data/', ['full'])
location = CourseDescriptor.id_to_location('edX/full/6.002_Spring_2012')
wrapper = MongoCollectionFindWrapper(module_store.collection.find)
@@ -864,7 +862,7 @@ class ContentStoreTest(ModuleStoreTestCase):
def test_create_course_duplicate_course(self):
"""Test new course creation - error path"""
resp = self.client.post(reverse('create_new_course'), self.course_data)
self.client.post(reverse('create_new_course'), self.course_data)
resp = self.client.post(reverse('create_new_course'), self.course_data)
data = parse_json(resp)
self.assertEqual(resp.status_code, 200)
@@ -872,7 +870,7 @@ class ContentStoreTest(ModuleStoreTestCase):
def test_create_course_duplicate_number(self):
"""Test new course creation - error path"""
resp = self.client.post(reverse('create_new_course'), self.course_data)
self.client.post(reverse('create_new_course'), self.course_data)
self.course_data['display_name'] = 'Robot Super Course Two'
resp = self.client.post(reverse('create_new_course'), self.course_data)
@@ -1090,11 +1088,9 @@ class ContentStoreTest(ModuleStoreTestCase):
json.dumps({'id': del_loc.url()}), "application/json")
self.assertEqual(200, resp.status_code)
def test_import_metadata_with_attempts_empty_string(self):
module_store = modulestore('direct')
import_from_xml(module_store, 'common/test/data/', ['simple'])
did_load_item = False
try:
module_store.get_item(Location(['i4x', 'edX', 'simple', 'problem', 'ps01-simple', None]))

View File

@@ -224,14 +224,14 @@ def add_extra_panel_tab(tab_type, course):
@param course: A course object from the modulestore.
@return: Boolean indicating whether or not a tab was added and a list of tabs for the course.
"""
#Copy course tabs
# Copy course tabs
course_tabs = copy.copy(course.tabs)
changed = False
#Check to see if open ended panel is defined in the course
# Check to see if open ended panel is defined in the course
tab_panel = EXTRA_TAB_PANELS.get(tab_type)
if tab_panel not in course_tabs:
#Add panel to the tabs if it is not defined
# Add panel to the tabs if it is not defined
course_tabs.append(tab_panel)
changed = True
return changed, course_tabs
@@ -244,14 +244,14 @@ def remove_extra_panel_tab(tab_type, course):
@param course: A course object from the modulestore.
@return: Boolean indicating whether or not a tab was added and a list of tabs for the course.
"""
#Copy course tabs
# Copy course tabs
course_tabs = copy.copy(course.tabs)
changed = False
#Check to see if open ended panel is defined in the course
# Check to see if open ended panel is defined in the course
tab_panel = EXTRA_TAB_PANELS.get(tab_type)
if tab_panel in course_tabs:
#Add panel to the tabs if it is not defined
# Add panel to the tabs if it is not defined
course_tabs = [ct for ct in course_tabs if ct != tab_panel]
changed = True
return changed, course_tabs

View File

@@ -12,8 +12,8 @@ from django.core.urlresolvers import reverse
from mitxmako.shortcuts import render_to_response
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError, InvalidLocationError
from xmodule.modulestore.exceptions import ItemNotFoundError, \
InvalidLocationError
from xmodule.modulestore import Location
from contentstore.course_info_model import get_course_updates, update_course_updates, delete_course_update
@@ -33,9 +33,6 @@ from .component import OPEN_ENDED_COMPONENT_TYPES, \
from django_comment_common.utils import seed_permissions_roles
import datetime
from django.utils.timezone import UTC
# TODO: should explicitly enumerate exports with __all__
__all__ = ['course_index', 'create_new_course', 'course_info',
'course_info_updates', 'get_course_settings',
'course_config_graders_page',

View File

@@ -103,7 +103,7 @@ def clone_item(request):
@expect_json
def delete_item(request):
item_location = request.POST['id']
item_loc = Location(item_location)
item_location = Location(item_location)
# check permissions for this user within this course
if not has_access(request.user, item_location):
@@ -124,11 +124,11 @@ def delete_item(request):
# cdodge: we need to remove our parent's pointer to us so that it is no longer dangling
if delete_all_versions:
parent_locs = modulestore('direct').get_parent_locations(item_loc, None)
parent_locs = modulestore('direct').get_parent_locations(item_location, None)
for parent_loc in parent_locs:
parent = modulestore('direct').get_item(parent_loc)
item_url = item_loc.url()
item_url = item_location.url()
if item_url in parent.children:
children = parent.children
children.remove(item_url)

View File

@@ -41,25 +41,25 @@ class CourseDetails(object):
course.enrollment_start = descriptor.enrollment_start
course.enrollment_end = descriptor.enrollment_end
temploc = course_location._replace(category='about', name='syllabus')
temploc = course_location.replace(category='about', name='syllabus')
try:
course.syllabus = get_modulestore(temploc).get_item(temploc).data
except ItemNotFoundError:
pass
temploc = temploc._replace(name='overview')
temploc = temploc.replace(name='overview')
try:
course.overview = get_modulestore(temploc).get_item(temploc).data
except ItemNotFoundError:
pass
temploc = temploc._replace(name='effort')
temploc = temploc.replace(name='effort')
try:
course.effort = get_modulestore(temploc).get_item(temploc).data
except ItemNotFoundError:
pass
temploc = temploc._replace(name='video')
temploc = temploc.replace(name='video')
try:
raw_video = get_modulestore(temploc).get_item(temploc).data
course.intro_video = CourseDetails.parse_video_tag(raw_video)
@@ -126,16 +126,16 @@ class CourseDetails(object):
# NOTE: below auto writes to the db w/o verifying that any of the fields actually changed
# to make faster, could compare against db or could have client send over a list of which fields changed.
temploc = Location(course_location)._replace(category='about', name='syllabus')
temploc = Location(course_location).replace(category='about', name='syllabus')
update_item(temploc, jsondict['syllabus'])
temploc = temploc._replace(name='overview')
temploc = temploc.replace(name='overview')
update_item(temploc, jsondict['overview'])
temploc = temploc._replace(name='effort')
temploc = temploc.replace(name='effort')
update_item(temploc, jsondict['effort'])
temploc = temploc._replace(name='video')
temploc = temploc.replace(name='video')
recomposed_video_tag = CourseDetails.recompose_video_tag(jsondict['intro_video'])
update_item(temploc, recomposed_video_tag)
@@ -174,10 +174,10 @@ class CourseDetails(object):
return result
# TODO move to a more general util? Is there a better way to do the isinstance model check?
# TODO move to a more general util?
class CourseSettingsEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, CourseDetails) or isinstance(obj, course_grading.CourseGradingModel):
if isinstance(obj, (CourseDetails, course_grading.CourseGradingModel)):
return obj.__dict__
elif isinstance(obj, Location):
return obj.dict()

View File

@@ -235,8 +235,7 @@ PIPELINE_JS = {
'source_filenames': sorted(
rooted_glob(COMMON_ROOT / 'static/', 'coffee/src/**/*.js') +
rooted_glob(PROJECT_ROOT / 'static/', 'coffee/src/**/*.js')
) + ['js/hesitate.js', 'js/base.js',
'js/models/feedback.js', 'js/views/feedback.js',
) + ['js/hesitate.js', 'js/base.js', 'js/views/feedback.js',
'js/models/section.js', 'js/views/section.js',
'js/models/metadata_model.js', 'js/views/metadata_editor_view.js',
'js/views/assets.js'],

View File

@@ -7,6 +7,7 @@
"js/vendor/jquery.cookie.js",
"js/vendor/json2.js",
"js/vendor/underscore-min.js",
"js/vendor/underscore.string.min.js",
"js/vendor/backbone-min.js",
"js/vendor/jquery.leanModal.min.js",
"js/vendor/sinon-1.7.1.js",

View File

@@ -1,34 +0,0 @@
describe "CMS.Models.SystemFeedback", ->
beforeEach ->
@model = new CMS.Models.SystemFeedback()
it "should have an empty message by default", ->
expect(@model.get("message")).toEqual("")
it "should have an empty title by default", ->
expect(@model.get("title")).toEqual("")
it "should not have an intent set by default", ->
expect(@model.get("intent")).toBeNull()
describe "CMS.Models.WarningMessage", ->
beforeEach ->
@model = new CMS.Models.WarningMessage()
it "should have the correct intent", ->
expect(@model.get("intent")).toEqual("warning")
describe "CMS.Models.ErrorMessage", ->
beforeEach ->
@model = new CMS.Models.ErrorMessage()
it "should have the correct intent", ->
expect(@model.get("intent")).toEqual("error")
describe "CMS.Models.ConfirmationMessage", ->
beforeEach ->
@model = new CMS.Models.ConfirmationMessage()
it "should have the correct intent", ->
expect(@model.get("intent")).toEqual("confirmation")

View File

@@ -18,79 +18,105 @@ beforeEach ->
else
return trimmedText.indexOf(text) != -1;
describe "CMS.Views.Alert as base class", ->
describe "CMS.Views.SystemFeedback", ->
beforeEach ->
@model = new CMS.Models.ConfirmationMessage({
@options =
title: "Portal"
message: "Welcome to the Aperture Science Computer-Aided Enrichment Center"
})
# it will be interesting to see when this.render is called, so lets spy on it
spyOn(CMS.Views.Alert.prototype, 'render').andCallThrough()
@renderSpy = spyOn(CMS.Views.Alert.Confirmation.prototype, 'render').andCallThrough()
@showSpy = spyOn(CMS.Views.Alert.Confirmation.prototype, 'show').andCallThrough()
@hideSpy = spyOn(CMS.Views.Alert.Confirmation.prototype, 'hide').andCallThrough()
it "renders on initalize", ->
view = new CMS.Views.Alert({model: @model})
expect(view.render).toHaveBeenCalled()
it "requires a type and an intent", ->
neither = =>
new CMS.Views.SystemFeedback(@options)
noType = =>
options = $.extend({}, @options)
options.intent = "confirmation"
new CMS.Views.SystemFeedback(options)
noIntent = =>
options = $.extend({}, @options)
options.type = "alert"
new CMS.Views.SystemFeedback(options)
both = =>
options = $.extend({}, @options)
options.type = "alert"
options.intent = "confirmation"
new CMS.Views.SystemFeedback(options)
expect(neither).toThrow()
expect(noType).toThrow()
expect(noIntent).toThrow()
expect(both).not.toThrow()
# for simplicity, we'll use CMS.Views.Alert.Confirmation from here on,
# which extends and proxies to CMS.Views.SystemFeedback
it "does not show on initalize", ->
view = new CMS.Views.Alert.Confirmation(@options)
expect(@renderSpy).not.toHaveBeenCalled()
expect(@showSpy).not.toHaveBeenCalled()
it "renders the template", ->
view = new CMS.Views.Alert({model: @model})
view = new CMS.Views.Alert.Confirmation(@options)
view.show()
expect(view.$(".action-close")).toBeDefined()
expect(view.$('.wrapper')).toBeShown()
expect(view.$el).toContainText(@model.get("title"))
expect(view.$el).toContainText(@model.get("message"))
expect(view.$el).toContainText(@options.title)
expect(view.$el).toContainText(@options.message)
it "close button sends a .hide() message", ->
spyOn(CMS.Views.Alert.prototype, 'hide').andCallThrough()
view = new CMS.Views.Alert({model: @model})
view = new CMS.Views.Alert.Confirmation(@options).show()
view.$(".action-close").click()
expect(CMS.Views.Alert.prototype.hide).toHaveBeenCalled()
expect(@hideSpy).toHaveBeenCalled()
expect(view.$('.wrapper')).toBeHiding()
describe "CMS.Views.Prompt", ->
beforeEach ->
@model = new CMS.Models.ConfirmationMessage({
title: "Portal"
message: "Welcome to the Aperture Science Computer-Aided Enrichment Center"
})
# for some reason, expect($("body")) blows up the test runner, so this test
# just exercises the Prompt rather than asserting on anything. Best I can
# do for now. :(
it "changes class on body", ->
# expect($("body")).not.toHaveClass("prompt-is-shown")
view = new CMS.Views.Prompt({model: @model})
view = new CMS.Views.Prompt.Confirmation({
title: "Portal"
message: "Welcome to the Aperture Science Computer-Aided Enrichment Center"
})
# expect($("body")).toHaveClass("prompt-is-shown")
view.hide()
# expect($("body")).not.toHaveClass("prompt-is-shown")
describe "CMS.Views.Alert click events", ->
describe "CMS.Views.SystemFeedback click events", ->
beforeEach ->
@model = new CMS.Models.WarningMessage(
@primaryClickSpy = jasmine.createSpy('primaryClick')
@secondaryClickSpy = jasmine.createSpy('secondaryClick')
@view = new CMS.Views.Notification.Warning(
title: "Unsaved",
message: "Your content is currently Unsaved.",
actions:
primary:
text: "Save",
class: "save-button",
click: jasmine.createSpy('primaryClick')
click: @primaryClickSpy
secondary: [{
text: "Revert",
class: "cancel-button",
click: jasmine.createSpy('secondaryClick')
click: @secondaryClickSpy
}]
)
@view = new CMS.Views.Alert({model: @model})
@view.show()
it "should trigger the primary event on a primary click", ->
@view.primaryClick()
expect(@model.get('actions').primary.click).toHaveBeenCalled()
@view.$(".action-primary").click()
expect(@primaryClickSpy).toHaveBeenCalled()
expect(@secondaryClickSpy).not.toHaveBeenCalled()
it "should trigger the secondary event on a secondary click", ->
@view.secondaryClick()
expect(@model.get('actions').secondary[0].click).toHaveBeenCalled()
@view.$(".action-secondary").click()
expect(@secondaryClickSpy).toHaveBeenCalled()
expect(@primaryClickSpy).not.toHaveBeenCalled()
it "should apply class to primary action", ->
expect(@view.$(".action-primary")).toHaveClass("save-button")
@@ -100,20 +126,18 @@ describe "CMS.Views.Alert click events", ->
describe "CMS.Views.Notification minShown and maxShown", ->
beforeEach ->
@model = new CMS.Models.SystemFeedback(
intent: "saving"
title: "Saving"
)
spyOn(CMS.Views.Notification.prototype, 'show').andCallThrough()
spyOn(CMS.Views.Notification.prototype, 'hide').andCallThrough()
@showSpy = spyOn(CMS.Views.Notification.Saving.prototype, 'show')
@showSpy.andCallThrough()
@hideSpy = spyOn(CMS.Views.Notification.Saving.prototype, 'hide')
@hideSpy.andCallThrough()
@clock = sinon.useFakeTimers()
afterEach ->
@clock.restore()
it "a minShown view should not hide too quickly", ->
view = new CMS.Views.Notification({model: @model, minShown: 1000})
expect(CMS.Views.Notification.prototype.show).toHaveBeenCalled()
view = new CMS.Views.Notification.Saving({minShown: 1000})
view.show()
expect(view.$('.wrapper')).toBeShown()
# call hide() on it, but the minShown should prevent it from hiding right away
@@ -125,8 +149,8 @@ describe "CMS.Views.Notification minShown and maxShown", ->
expect(view.$('.wrapper')).toBeHiding()
it "a maxShown view should hide by itself", ->
view = new CMS.Views.Notification({model: @model, maxShown: 1000})
expect(CMS.Views.Notification.prototype.show).toHaveBeenCalled()
view = new CMS.Views.Notification.Saving({maxShown: 1000})
view.show()
expect(view.$('.wrapper')).toBeShown()
# wait for the maxShown timeout to expire, and check again
@@ -134,13 +158,13 @@ describe "CMS.Views.Notification minShown and maxShown", ->
expect(view.$('.wrapper')).toBeHiding()
it "a minShown view can stay visible longer", ->
view = new CMS.Views.Notification({model: @model, minShown: 1000})
expect(CMS.Views.Notification.prototype.show).toHaveBeenCalled()
view = new CMS.Views.Notification.Saving({minShown: 1000})
view.show()
expect(view.$('.wrapper')).toBeShown()
# wait for the minShown timeout to expire, and check again
@clock.tick(1001)
expect(CMS.Views.Notification.prototype.hide).not.toHaveBeenCalled()
expect(@hideSpy).not.toHaveBeenCalled()
expect(view.$('.wrapper')).toBeShown()
# can now hide immediately
@@ -148,8 +172,8 @@ describe "CMS.Views.Notification minShown and maxShown", ->
expect(view.$('.wrapper')).toBeHiding()
it "a maxShown view can hide early", ->
view = new CMS.Views.Notification({model: @model, maxShown: 1000})
expect(CMS.Views.Notification.prototype.show).toHaveBeenCalled()
view = new CMS.Views.Notification.Saving({maxShown: 1000})
view.show()
expect(view.$('.wrapper')).toBeShown()
# wait 50 milliseconds, and hide it early
@@ -162,7 +186,8 @@ describe "CMS.Views.Notification minShown and maxShown", ->
expect(view.$('.wrapper')).toBeHiding()
it "a view can have both maxShown and minShown", ->
view = new CMS.Views.Notification({model: @model, minShown: 1000, maxShown: 2000})
view = new CMS.Views.Notification.Saving({minShown: 1000, maxShown: 2000})
view.show()
# can't hide early
@clock.tick(50)

View File

@@ -18,11 +18,15 @@ $ ->
$(document).ajaxError (event, jqXHR, ajaxSettings, thrownError) ->
if ajaxSettings.notifyOnError is false
return
msg = new CMS.Models.ErrorMessage(
if jqXHR.responseText
message = _.str.truncate(jqXHR.responseText, 300)
else
message = gettext("This may be happening because of an error with our server or your internet connection. Try refreshing the page or making sure you are online.")
msg = new CMS.Views.Notification.Error(
"title": gettext("Studio's having trouble saving your work")
"message": jqXHR.responseText || gettext("This may be happening because of an error with our server or your internet connection. Try refreshing the page or making sure you are online.")
"message": message
)
new CMS.Views.Notification({model: msg})
msg.show()
window.onTouchBasedDevice = ->
navigator.userAgent.match /iPhone|iPod|iPad/i

View File

@@ -1,55 +0,0 @@
CMS.Models.SystemFeedback = Backbone.Model.extend({
defaults: {
"intent": null, // "warning", "confirmation", "error", "announcement", "step-required", etc
"title": "",
"message": ""
/* could also have an "actions" hash: here is an example demonstrating
the expected structure
"actions": {
"primary": {
"text": "Save",
"class": "action-save",
"click": function() {
// do something when Save is clicked
// `this` refers to the model
}
},
"secondary": [
{
"text": "Cancel",
"class": "action-cancel",
"click": function() {}
}, {
"text": "Discard Changes",
"class": "action-discard",
"click": function() {}
}
]
}
*/
}
});
CMS.Models.WarningMessage = CMS.Models.SystemFeedback.extend({
defaults: $.extend({}, CMS.Models.SystemFeedback.prototype.defaults, {
"intent": "warning"
})
});
CMS.Models.ErrorMessage = CMS.Models.SystemFeedback.extend({
defaults: $.extend({}, CMS.Models.SystemFeedback.prototype.defaults, {
"intent": "error"
})
});
CMS.Models.ConfirmAssetDeleteMessage = CMS.Models.SystemFeedback.extend({
defaults: $.extend({}, CMS.Models.SystemFeedback.prototype.defaults, {
"intent": "warning"
})
});
CMS.Models.ConfirmationMessage = CMS.Models.SystemFeedback.extend({
defaults: $.extend({}, CMS.Models.SystemFeedback.prototype.defaults, {
"intent": "confirmation"
})
});

View File

@@ -22,22 +22,16 @@ CMS.Models.Section = Backbone.Model.extend({
},
showNotification: function() {
if(!this.msg) {
this.msg = new CMS.Models.SystemFeedback({
intent: "saving",
title: gettext("Saving…")
});
}
if(!this.msgView) {
this.msgView = new CMS.Views.Notification({
model: this.msg,
this.msg = new CMS.Views.Notification.Saving({
title: gettext("Saving…"),
closeIcon: false,
minShown: 1250
});
}
this.msgView.show();
this.msg.show();
},
hideNotification: function() {
if(!this.msgView) { return; }
this.msgView.hide();
if(!this.msg) { return; }
this.msg.hide();
}
});

View File

@@ -1,39 +1,64 @@
CMS.Views.Alert = Backbone.View.extend({
CMS.Views.SystemFeedback = Backbone.View.extend({
options: {
type: "alert",
title: "",
message: "",
intent: null, // "warning", "confirmation", "error", "announcement", "step-required", etc
type: null, // "alert", "notification", or "prompt": set by subclass
shown: true, // is this view currently being shown?
icon: true, // should we render an icon related to the message intent?
closeIcon: true, // should we render a close button in the top right corner?
minShown: 0, // length of time after this view has been shown before it can be hidden (milliseconds)
maxShown: Infinity // length of time after this view has been shown before it will be automatically hidden (milliseconds)
/* could also have an "actions" hash: here is an example demonstrating
the expected structure
actions: {
primary: {
"text": "Save",
"class": "action-save",
"click": function(view) {
// do something when Save is clicked
}
},
secondary: [
{
"text": "Cancel",
"class": "action-cancel",
"click": function(view) {}
}, {
"text": "Discard Changes",
"class": "action-discard",
"click": function(view) {}
}
]
}
*/
},
initialize: function() {
if(!this.options.type) {
throw "SystemFeedback: type required (given " +
JSON.stringify(this.options) + ")";
}
if(!this.options.intent) {
throw "SystemFeedback: intent required (given " +
JSON.stringify(this.options) + ")";
}
var tpl = $("#system-feedback-tpl").text();
if(!tpl) {
console.error("Couldn't load system-feedback template");
}
this.template = _.template(tpl);
this.setElement($("#page-"+this.options.type));
this.listenTo(this.model, 'change', this.render);
return this.show();
},
render: function() {
var attrs = $.extend({}, this.options, this.model.attributes);
this.$el.html(this.template(attrs));
return this;
},
events: {
"click .action-close": "hide",
"click .action-primary": "primaryClick",
"click .action-secondary": "secondaryClick"
},
// public API: show() and hide()
show: function() {
clearTimeout(this.hideTimeout);
this.options.shown = true;
this.shownAt = new Date();
this.render();
if($.isNumeric(this.options.maxShown)) {
this.hideTimeout = setTimeout($.proxy(this.hide, this),
this.hideTimeout = setTimeout(_.bind(this.hide, this),
this.options.maxShown);
}
return this;
@@ -43,7 +68,7 @@ CMS.Views.Alert = Backbone.View.extend({
this.options.minShown > new Date() - this.shownAt)
{
clearTimeout(this.hideTimeout);
this.hideTimeout = setTimeout($.proxy(this.hide, this),
this.hideTimeout = setTimeout(_.bind(this.hide, this),
this.options.minShown - (new Date() - this.shownAt));
} else {
this.options.shown = false;
@@ -52,40 +77,63 @@ CMS.Views.Alert = Backbone.View.extend({
}
return this;
},
primaryClick: function() {
var actions = this.model.get("actions");
// the rest of the API should be considered semi-private
events: {
"click .action-close": "hide",
"click .action-primary": "primaryClick",
"click .action-secondary": "secondaryClick"
},
render: function() {
// there can be only one active view of a given type at a time: only
// one alert, only one notification, only one prompt. Therefore, we'll
// use a singleton approach.
var parent = CMS.Views[_.str.capitalize(this.options.type)];
if(parent && parent.active && parent.active !== this) {
parent.active.stopListening();
}
this.$el.html(this.template(this.options));
parent.active = this;
return this;
},
primaryClick: function(event) {
var actions = this.options.actions;
if(!actions) { return; }
var primary = actions.primary;
if(!primary) { return; }
if(primary.click) {
primary.click.call(this.model, this);
primary.click.call(event.target, this, event);
}
},
secondaryClick: function(e) {
var actions = this.model.get("actions");
secondaryClick: function(event) {
var actions = this.options.actions;
if(!actions) { return; }
var secondaryList = actions.secondary;
if(!secondaryList) { return; }
// which secondary action was clicked?
var i = 0; // default to the first secondary action (easier for testing)
if(e && e.target) {
i = _.indexOf(this.$(".action-secondary"), e.target);
if(event && event.target) {
i = _.indexOf(this.$(".action-secondary"), event.target);
}
var secondary = this.model.get("actions").secondary[i];
var secondary = secondaryList[i];
if(secondary.click) {
secondary.click.call(this.model, this);
secondary.click.call(event.target, this, event);
}
}
});
CMS.Views.Notification = CMS.Views.Alert.extend({
options: $.extend({}, CMS.Views.Alert.prototype.options, {
CMS.Views.Alert = CMS.Views.SystemFeedback.extend({
options: $.extend({}, CMS.Views.SystemFeedback.prototype.options, {
type: "alert"
})
});
CMS.Views.Notification = CMS.Views.SystemFeedback.extend({
options: $.extend({}, CMS.Views.SystemFeedback.prototype.options, {
type: "notification",
closeIcon: false
})
});
CMS.Views.Prompt = CMS.Views.Alert.extend({
options: $.extend({}, CMS.Views.Alert.prototype.options, {
CMS.Views.Prompt = CMS.Views.SystemFeedback.extend({
options: $.extend({}, CMS.Views.SystemFeedback.prototype.options, {
type: "prompt",
closeIcon: false,
icon: false
@@ -98,6 +146,27 @@ CMS.Views.Prompt = CMS.Views.Alert.extend({
$body.removeClass('prompt-is-shown');
}
// super() in Javascript has awkward syntax :(
return CMS.Views.Alert.prototype.render.apply(this, arguments);
return CMS.Views.SystemFeedback.prototype.render.apply(this, arguments);
}
});
// create CMS.Views.Alert.Warning, CMS.Views.Notification.Confirmation,
// CMS.Views.Prompt.StepRequired, etc
var capitalCamel, types, intents;
capitalCamel = _.compose(_.str.capitalize, _.str.camelize);
types = ["alert", "notification", "prompt"];
intents = ["warning", "error", "confirmation", "announcement", "step-required", "help", "saving"];
_.each(types, function(type) {
_.each(intents, function(intent) {
// "class" is a reserved word in Javascript, so use "klass" instead
var klass, subklass;
klass = CMS.Views[capitalCamel(type)];
subklass = klass.extend({
options: $.extend({}, klass.prototype.options, {
type: type,
intent: intent
})
});
klass[capitalCamel(intent)] = subklass;
});
});

View File

@@ -67,7 +67,7 @@ CMS.Views.SectionEdit = Backbone.View.extend({
showInvalidMessage: function(model, error, options) {
model.set("name", model.previous("name"));
var that = this;
var msg = new CMS.Models.ErrorMessage({
var prompt = new CMS.Views.Prompt.Error({
title: gettext("Your change could not be saved"),
message: error,
actions: {
@@ -80,6 +80,6 @@ CMS.Views.SectionEdit = Backbone.View.extend({
}
}
});
new CMS.Views.Prompt({model: msg});
prompt.show();
}
});

View File

@@ -8,7 +8,6 @@
<%block name="jsextra">
<script src="${static.url('js/vendor/mustache.js')}"></script>
<script type="text/javascript" src="${static.url('js/views/assets.js')}"></script>
<script type='text/javascript'>
// we just want a singleton

View File

@@ -38,6 +38,7 @@
<script type="text/javascript" src="/jsi18n/"></script>
<script type="text/javascript" src="${static.url('js/vendor/json2.js')}"></script>
<script type="text/javascript" src="${static.url('js/vendor/underscore-min.js')}"></script>
<script type="text/javascript" src="${static.url('js/vendor/underscore.string.min.js')}"></script>
<script type="text/javascript" src="${static.url('js/vendor/backbone-min.js')}"></script>
<script type="text/javascript" src="${static.url('js/vendor/markitup/jquery.markitup.js')}"></script>
<script type="text/javascript" src="${static.url('js/vendor/markitup/sets/wiki/set.js')}"></script>
@@ -54,7 +55,6 @@
<script type="text/javascript" src="${static.url('js/vendor/CodeMirror/css.js')}"></script>
<script type="text/javascript" src="//www.youtube.com/player_api"></script>
<script src="${static.url('js/models/feedback.js')}"></script>
<script src="${static.url('js/views/feedback.js')}"></script>
<!-- view -->

View File

@@ -1,5 +1,5 @@
#pylint: disable=C0111
#pylint: disable=W0621
# pylint: disable=C0111
# pylint: disable=W0621
from lettuce import world, step
from .factories import *

View File

@@ -1,13 +1,11 @@
import json
import logging
import os
import pytz
import datetime
import dateutil.parser
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.http import Http404
from django.shortcuts import redirect
from django.conf import settings
from mitxmako.shortcuts import render_to_response
@@ -22,6 +20,7 @@ LOGFIELDS = ['username', 'ip', 'event_source', 'event_type', 'event', 'agent', '
def log_event(event):
"""Write tracking event to log file, and optionally to TrackingLog model."""
event_str = json.dumps(event)
log.info(event_str[:settings.TRACK_MAX_EVENT])
if settings.MITX_FEATURES.get('ENABLE_SQL_TRACKING_LOGS'):
@@ -34,6 +33,11 @@ def log_event(event):
def user_track(request):
"""
Log when GET call to "event" URL is made by a user.
GET call should provide "event_type", "event", and "page" arguments.
"""
try: # TODO: Do the same for many of the optional META parameters
username = request.user.username
except:
@@ -50,7 +54,6 @@ def user_track(request):
except:
agent = ''
# TODO: Move a bunch of this into log_event
event = {
"username": username,
"session": scookie,
@@ -68,6 +71,7 @@ def user_track(request):
def server_track(request, event_type, event, page=None):
"""Log events related to server requests."""
try:
username = request.user.username
except:
@@ -95,9 +99,52 @@ def server_track(request, event_type, event, page=None):
log_event(event)
def task_track(request_info, task_info, event_type, event, page=None):
"""
Logs tracking information for events occuring within celery tasks.
The `event_type` is a string naming the particular event being logged,
while `event` is a dict containing whatever additional contextual information
is desired.
The `request_info` is a dict containing information about the original
task request. Relevant keys are `username`, `ip`, `agent`, and `host`.
While the dict is required, the values in it are not, so that {} can be
passed in.
In addition, a `task_info` dict provides more information about the current
task, to be stored with the `event` dict. This may also be an empty dict.
The `page` parameter is optional, and allows the name of the page to
be provided.
"""
# supplement event information with additional information
# about the task in which it is running.
full_event = dict(event, **task_info)
# All fields must be specified, in case the tracking information is
# also saved to the TrackingLog model. Get values from the task-level
# information, or just add placeholder values.
event = {
"username": request_info.get('username', 'unknown'),
"ip": request_info.get('ip', 'unknown'),
"event_source": "task",
"event_type": event_type,
"event": full_event,
"agent": request_info.get('agent', 'unknown'),
"page": page,
"time": datetime.datetime.utcnow().isoformat(),
"host": request_info.get('host', 'unknown')
}
log_event(event)
@login_required
@ensure_csrf_cookie
def view_tracking_log(request, args=''):
"""View to output contents of TrackingLog model. For staff use only."""
if not request.user.is_staff:
return redirect('/')
nlen = 100

View File

@@ -1,4 +1,3 @@
import re
import json
import logging
import static_replace

View File

@@ -15,25 +15,22 @@ This is used by capa_module.
from datetime import datetime
import logging
import math
import numpy
import os.path
import re
import sys
from lxml import etree
from xml.sax.saxutils import unescape
from copy import deepcopy
from .correctmap import CorrectMap
import inputtypes
import customrender
from .util import contextualize_text, convert_files_to_filenames
import xqueue_interface
from capa.correctmap import CorrectMap
import capa.inputtypes as inputtypes
import capa.customrender as customrender
from capa.util import contextualize_text, convert_files_to_filenames
import capa.xqueue_interface as xqueue_interface
# to be replaced with auto-registering
import responsetypes
import safe_exec
import capa.responsetypes as responsetypes
from capa.safe_exec import safe_exec
# dict of tagname, Response Class -- this should come from auto-registering
response_tag_dict = dict([(x.response_tag, x) for x in responsetypes.__all__])
@@ -46,8 +43,8 @@ response_properties = ["codeparam", "responseparam", "answer", "openendedparam"]
# special problem tags which should be turned into innocuous HTML
html_transforms = {'problem': {'tag': 'div'},
"text": {'tag': 'span'},
"math": {'tag': 'span'},
'text': {'tag': 'span'},
'math': {'tag': 'span'},
}
# These should be removed from HTML output, including all subelements
@@ -134,7 +131,6 @@ class LoncapaProblem(object):
self.extracted_tree = self._extract_html(self.tree)
def do_reset(self):
'''
Reset internal state to unfinished, with no answers
@@ -175,7 +171,7 @@ class LoncapaProblem(object):
Return the maximum score for this problem.
'''
maxscore = 0
for response, responder in self.responders.iteritems():
for responder in self.responders.values():
maxscore += responder.get_max_score()
return maxscore
@@ -220,7 +216,7 @@ class LoncapaProblem(object):
def ungraded_response(self, xqueue_msg, queuekey):
'''
Handle any responses from the xqueue that do not contain grades
Will try to pass the queue message to all inputtypes that can handle ungraded responses
Will try to pass the queue message to all inputtypes that can handle ungraded responses
Does not return any value
'''
@@ -230,7 +226,6 @@ class LoncapaProblem(object):
if hasattr(the_input, 'ungraded_response'):
the_input.ungraded_response(xqueue_msg, queuekey)
def is_queued(self):
'''
Returns True if any part of the problem has been submitted to an external queue
@@ -238,7 +233,6 @@ class LoncapaProblem(object):
'''
return any(self.correct_map.is_queued(answer_id) for answer_id in self.correct_map)
def get_recentmost_queuetime(self):
'''
Returns a DateTime object that represents the timestamp of the most recent
@@ -256,11 +250,11 @@ class LoncapaProblem(object):
return max(queuetimes)
def grade_answers(self, answers):
'''
Grade student responses. Called by capa_module.check_problem.
answers is a dict of all the entries from request.POST, but with the first part
`answers` is a dict of all the entries from request.POST, but with the first part
of each key removed (the string before the first "_").
Thus, for example, input_ID123 -> ID123, and input_fromjs_ID123 -> fromjs_ID123
@@ -270,24 +264,72 @@ class LoncapaProblem(object):
# if answers include File objects, convert them to filenames.
self.student_answers = convert_files_to_filenames(answers)
return self._grade_answers(answers)
def supports_rescoring(self):
"""
Checks that the current problem definition permits rescoring.
More precisely, it checks that there are no response types in
the current problem that are not fully supported (yet) for rescoring.
This includes responsetypes for which the student's answer
is not properly stored in state, i.e. file submissions. At present,
we have no way to know if an existing response was actually a real
answer or merely the filename of a file submitted as an answer.
It turns out that because rescoring is a background task, limiting
it to responsetypes that don't support file submissions also means
that the responsetypes are synchronous. This is convenient as it
permits rescoring to be complete when the rescoring call returns.
"""
return all('filesubmission' not in responder.allowed_inputfields for responder in self.responders.values())
def rescore_existing_answers(self):
"""
Rescore student responses. Called by capa_module.rescore_problem.
"""
return self._grade_answers(None)
def _grade_answers(self, student_answers):
"""
Internal grading call used for checking new 'student_answers' and also
rescoring existing student_answers.
For new student_answers being graded, `student_answers` is a dict of all the
entries from request.POST, but with the first part of each key removed
(the string before the first "_"). Thus, for example,
input_ID123 -> ID123, and input_fromjs_ID123 -> fromjs_ID123.
For rescoring, `student_answers` is None.
Calls the Response for each question in this problem, to do the actual grading.
"""
# old CorrectMap
oldcmap = self.correct_map
# start new with empty CorrectMap
newcmap = CorrectMap()
# log.debug('Responders: %s' % self.responders)
# Call each responsetype instance to do actual grading
for responder in self.responders.values():
# File objects are passed only if responsetype explicitly allows for file
# submissions
if 'filesubmission' in responder.allowed_inputfields:
results = responder.evaluate_answers(answers, oldcmap)
# File objects are passed only if responsetype explicitly allows
# for file submissions. But we have no way of knowing if
# student_answers contains a proper answer or the filename of
# an earlier submission, so for now skip these entirely.
# TODO: figure out where to get file submissions when rescoring.
if 'filesubmission' in responder.allowed_inputfields and student_answers is None:
raise Exception("Cannot rescore problems with possible file submissions")
# use 'student_answers' only if it is provided, and if it might contain a file
# submission that would not exist in the persisted "student_answers".
if 'filesubmission' in responder.allowed_inputfields and student_answers is not None:
results = responder.evaluate_answers(student_answers, oldcmap)
else:
results = responder.evaluate_answers(convert_files_to_filenames(answers), oldcmap)
results = responder.evaluate_answers(self.student_answers, oldcmap)
newcmap.update(results)
self.correct_map = newcmap
# log.debug('%s: in grade_answers, answers=%s, cmap=%s' % (self,answers,newcmap))
return newcmap
def get_question_answers(self):
@@ -331,7 +373,6 @@ class LoncapaProblem(object):
html = contextualize_text(etree.tostring(self._extract_html(self.tree)), self.context)
return html
def handle_input_ajax(self, get):
'''
InputTypes can support specialized AJAX calls. Find the correct input and pass along the correct data
@@ -348,8 +389,6 @@ class LoncapaProblem(object):
log.warning("Could not find matching input for id: %s" % input_id)
return {}
# ======= Private Methods Below ========
def _process_includes(self):
@@ -359,16 +398,16 @@ class LoncapaProblem(object):
'''
includes = self.tree.findall('.//include')
for inc in includes:
file = inc.get('file')
if file is not None:
filename = inc.get('file')
if filename is not None:
try:
# open using ModuleSystem OSFS filestore
ifp = self.system.filestore.open(file)
ifp = self.system.filestore.open(filename)
except Exception as err:
log.warning('Error %s in problem xml include: %s' % (
err, etree.tostring(inc, pretty_print=True)))
log.warning('Cannot find file %s in %s' % (
file, self.system.filestore))
filename, self.system.filestore))
# if debugging, don't fail - just log error
# TODO (vshnayder): need real error handling, display to users
if not self.system.get('DEBUG'):
@@ -381,7 +420,7 @@ class LoncapaProblem(object):
except Exception as err:
log.warning('Error %s in problem xml include: %s' % (
err, etree.tostring(inc, pretty_print=True)))
log.warning('Cannot parse XML in %s' % (file))
log.warning('Cannot parse XML in %s' % (filename))
# if debugging, don't fail - just log error
# TODO (vshnayder): same as above
if not self.system.get('DEBUG'):
@@ -389,11 +428,11 @@ class LoncapaProblem(object):
else:
continue
# insert new XML into tree in place of inlcude
# insert new XML into tree in place of include
parent = inc.getparent()
parent.insert(parent.index(inc), incxml)
parent.remove(inc)
log.debug('Included %s into %s' % (file, self.problem_id))
log.debug('Included %s into %s' % (filename, self.problem_id))
def _extract_system_path(self, script):
"""
@@ -463,7 +502,7 @@ class LoncapaProblem(object):
if all_code:
try:
safe_exec.safe_exec(
safe_exec(
all_code,
context,
random_seed=self.seed,
@@ -519,18 +558,18 @@ class LoncapaProblem(object):
value = ""
if self.student_answers and problemid in self.student_answers:
value = self.student_answers[problemid]
if input_id not in self.input_state:
self.input_state[input_id] = {}
# do the rendering
state = {'value': value,
'status': status,
'id': input_id,
'input_state': self.input_state[input_id],
'feedback': {'message': msg,
'hint': hint,
'hintmode': hintmode, }}
'status': status,
'id': input_id,
'input_state': self.input_state[input_id],
'feedback': {'message': msg,
'hint': hint,
'hintmode': hintmode, }}
input_type_cls = inputtypes.registry.get_class_for_tag(problemtree.tag)
# save the input type so that we can make ajax calls on it if we need to
@@ -554,7 +593,7 @@ class LoncapaProblem(object):
for item in problemtree:
item_xhtml = self._extract_html(item)
if item_xhtml is not None:
tree.append(item_xhtml)
tree.append(item_xhtml)
if tree.tag in html_transforms:
tree.tag = html_transforms[problemtree.tag]['tag']

View File

@@ -4,7 +4,6 @@ Tests of responsetypes
from datetime import datetime
import json
from nose.plugins.skip import SkipTest
import os
import random
import unittest
@@ -56,9 +55,18 @@ class ResponseTest(unittest.TestCase):
self.assertEqual(result, 'incorrect',
msg="%s should be marked incorrect" % str(input_str))
def _get_random_number_code(self):
"""Returns code to be used to generate a random result."""
return "str(random.randint(0, 1e9))"
def _get_random_number_result(self, seed_value):
"""Returns a result that should be generated using the random_number_code."""
rand = random.Random(seed_value)
return str(rand.randint(0, 1e9))
class MultiChoiceResponseTest(ResponseTest):
from response_xml_factory import MultipleChoiceResponseXMLFactory
from capa.tests.response_xml_factory import MultipleChoiceResponseXMLFactory
xml_factory_class = MultipleChoiceResponseXMLFactory
def test_multiple_choice_grade(self):
@@ -80,7 +88,7 @@ class MultiChoiceResponseTest(ResponseTest):
class TrueFalseResponseTest(ResponseTest):
from response_xml_factory import TrueFalseResponseXMLFactory
from capa.tests.response_xml_factory import TrueFalseResponseXMLFactory
xml_factory_class = TrueFalseResponseXMLFactory
def test_true_false_grade(self):
@@ -120,7 +128,7 @@ class TrueFalseResponseTest(ResponseTest):
class ImageResponseTest(ResponseTest):
from response_xml_factory import ImageResponseXMLFactory
from capa.tests.response_xml_factory import ImageResponseXMLFactory
xml_factory_class = ImageResponseXMLFactory
def test_rectangle_grade(self):
@@ -184,7 +192,7 @@ class ImageResponseTest(ResponseTest):
class SymbolicResponseTest(ResponseTest):
from response_xml_factory import SymbolicResponseXMLFactory
from capa.tests.response_xml_factory import SymbolicResponseXMLFactory
xml_factory_class = SymbolicResponseXMLFactory
def test_grade_single_input(self):
@@ -224,8 +232,8 @@ class SymbolicResponseTest(ResponseTest):
def test_complex_number_grade(self):
problem = self.build_problem(math_display=True,
expect="[[cos(theta),i*sin(theta)],[i*sin(theta),cos(theta)]]",
options=["matrix", "imaginary"])
expect="[[cos(theta),i*sin(theta)],[i*sin(theta),cos(theta)]]",
options=["matrix", "imaginary"])
# For LaTeX-style inputs, symmath_check() will try to contact
# a server to convert the input to MathML.
@@ -312,16 +320,16 @@ class SymbolicResponseTest(ResponseTest):
# Should not allow multiple inputs, since we specify
# only one "expect" value
with self.assertRaises(Exception):
problem = self.build_problem(math_display=True,
expect="2*x+3*y",
num_inputs=3)
self.build_problem(math_display=True,
expect="2*x+3*y",
num_inputs=3)
def _assert_symbolic_grade(self, problem,
student_input,
dynamath_input,
expected_correctness):
student_input,
dynamath_input,
expected_correctness):
input_dict = {'1_2_1': str(student_input),
'1_2_1_dynamath': str(dynamath_input)}
'1_2_1_dynamath': str(dynamath_input)}
correct_map = problem.grade_answers(input_dict)
@@ -330,7 +338,7 @@ class SymbolicResponseTest(ResponseTest):
class OptionResponseTest(ResponseTest):
from response_xml_factory import OptionResponseXMLFactory
from capa.tests.response_xml_factory import OptionResponseXMLFactory
xml_factory_class = OptionResponseXMLFactory
def test_grade(self):
@@ -350,7 +358,7 @@ class FormulaResponseTest(ResponseTest):
"""
Test the FormulaResponse class
"""
from response_xml_factory import FormulaResponseXMLFactory
from capa.tests.response_xml_factory import FormulaResponseXMLFactory
xml_factory_class = FormulaResponseXMLFactory
def test_grade(self):
@@ -570,7 +578,7 @@ class FormulaResponseTest(ResponseTest):
class StringResponseTest(ResponseTest):
from response_xml_factory import StringResponseXMLFactory
from capa.tests.response_xml_factory import StringResponseXMLFactory
xml_factory_class = StringResponseXMLFactory
def test_case_sensitive(self):
@@ -647,19 +655,18 @@ class StringResponseTest(ResponseTest):
hintfn="gimme_a_random_hint",
script=textwrap.dedent("""
def gimme_a_random_hint(answer_ids, student_answers, new_cmap, old_cmap):
answer = str(random.randint(0, 1e9))
answer = {code}
new_cmap.set_hint_and_mode(answer_ids[0], answer, "always")
""")
""".format(code=self._get_random_number_code()))
)
correct_map = problem.grade_answers({'1_2_1': '2'})
hint = correct_map.get_hint('1_2_1')
r = random.Random(problem.seed)
self.assertEqual(hint, str(r.randint(0, 1e9)))
self.assertEqual(hint, self._get_random_number_result(problem.seed))
class CodeResponseTest(ResponseTest):
from response_xml_factory import CodeResponseXMLFactory
from capa.tests.response_xml_factory import CodeResponseXMLFactory
xml_factory_class = CodeResponseXMLFactory
def setUp(self):
@@ -673,6 +680,7 @@ class CodeResponseTest(ResponseTest):
@staticmethod
def make_queuestate(key, time):
"""Create queuestate dict"""
timestr = datetime.strftime(time, dateformat)
return {'key': key, 'time': timestr}
@@ -710,7 +718,7 @@ class CodeResponseTest(ResponseTest):
old_cmap = CorrectMap()
for i, answer_id in enumerate(answer_ids):
queuekey = 1000 + i
queuestate = CodeResponseTest.make_queuestate(1000 + i, datetime.now())
queuestate = CodeResponseTest.make_queuestate(queuekey, datetime.now())
old_cmap.update(CorrectMap(answer_id=answer_ids[i], queuestate=queuestate))
# Message format common to external graders
@@ -771,7 +779,7 @@ class CodeResponseTest(ResponseTest):
for i, answer_id in enumerate(answer_ids):
queuekey = 1000 + i
latest_timestamp = datetime.now()
queuestate = CodeResponseTest.make_queuestate(1000 + i, latest_timestamp)
queuestate = CodeResponseTest.make_queuestate(queuekey, latest_timestamp)
cmap.update(CorrectMap(answer_id=answer_id, queuestate=queuestate))
self.problem.correct_map.update(cmap)
@@ -796,7 +804,7 @@ class CodeResponseTest(ResponseTest):
class ChoiceResponseTest(ResponseTest):
from response_xml_factory import ChoiceResponseXMLFactory
from capa.tests.response_xml_factory import ChoiceResponseXMLFactory
xml_factory_class = ChoiceResponseXMLFactory
def test_radio_group_grade(self):
@@ -828,7 +836,7 @@ class ChoiceResponseTest(ResponseTest):
class JavascriptResponseTest(ResponseTest):
from response_xml_factory import JavascriptResponseXMLFactory
from capa.tests.response_xml_factory import JavascriptResponseXMLFactory
xml_factory_class = JavascriptResponseXMLFactory
def test_grade(self):
@@ -858,7 +866,7 @@ class JavascriptResponseTest(ResponseTest):
system.can_execute_unsafe_code = lambda: False
with self.assertRaises(LoncapaProblemError):
problem = self.build_problem(
self.build_problem(
system=system,
generator_src="test_problem_generator.js",
grader_src="test_problem_grader.js",
@@ -869,7 +877,7 @@ class JavascriptResponseTest(ResponseTest):
class NumericalResponseTest(ResponseTest):
from response_xml_factory import NumericalResponseXMLFactory
from capa.tests.response_xml_factory import NumericalResponseXMLFactory
xml_factory_class = NumericalResponseXMLFactory
def test_grade_exact(self):
@@ -961,7 +969,7 @@ class NumericalResponseTest(ResponseTest):
class CustomResponseTest(ResponseTest):
from response_xml_factory import CustomResponseXMLFactory
from capa.tests.response_xml_factory import CustomResponseXMLFactory
xml_factory_class = CustomResponseXMLFactory
def test_inline_code(self):
@@ -1000,15 +1008,14 @@ class CustomResponseTest(ResponseTest):
def test_inline_randomization(self):
# Make sure the seed from the problem gets fed into the script execution.
inline_script = """messages[0] = str(random.randint(0, 1e9))"""
inline_script = "messages[0] = {code}".format(code=self._get_random_number_code())
problem = self.build_problem(answer=inline_script)
input_dict = {'1_2_1': '0'}
correctmap = problem.grade_answers(input_dict)
input_msg = correctmap.get_msg('1_2_1')
r = random.Random(problem.seed)
self.assertEqual(input_msg, str(r.randint(0, 1e9)))
self.assertEqual(input_msg, self._get_random_number_result(problem.seed))
def test_function_code_single_input(self):
# For function code, we pass in these arguments:
@@ -1241,25 +1248,23 @@ class CustomResponseTest(ResponseTest):
def test_setup_randomization(self):
# Ensure that the problem setup script gets the random seed from the problem.
script = textwrap.dedent("""
num = random.randint(0, 1e9)
""")
num = {code}
""".format(code=self._get_random_number_code()))
problem = self.build_problem(script=script)
r = random.Random(problem.seed)
self.assertEqual(r.randint(0, 1e9), problem.context['num'])
self.assertEqual(problem.context['num'], self._get_random_number_result(problem.seed))
def test_check_function_randomization(self):
# The check function should get random-seeded from the problem.
script = textwrap.dedent("""
def check_func(expect, answer_given):
return {'ok': True, 'msg': str(random.randint(0, 1e9))}
""")
return {{'ok': True, 'msg': {code} }}
""".format(code=self._get_random_number_code()))
problem = self.build_problem(script=script, cfn="check_func", expect="42")
input_dict = {'1_2_1': '42'}
correct_map = problem.grade_answers(input_dict)
msg = correct_map.get_msg('1_2_1')
r = random.Random(problem.seed)
self.assertEqual(msg, str(r.randint(0, 1e9)))
self.assertEqual(msg, self._get_random_number_result(problem.seed))
def test_module_imports_inline(self):
'''
@@ -1320,7 +1325,7 @@ class CustomResponseTest(ResponseTest):
class SchematicResponseTest(ResponseTest):
from response_xml_factory import SchematicResponseXMLFactory
from capa.tests.response_xml_factory import SchematicResponseXMLFactory
xml_factory_class = SchematicResponseXMLFactory
def test_grade(self):
@@ -1349,11 +1354,10 @@ class SchematicResponseTest(ResponseTest):
def test_check_function_randomization(self):
# The check function should get a random seed from the problem.
script = "correct = ['correct' if (submission[0]['num'] == random.randint(0, 1e9)) else 'incorrect']"
script = "correct = ['correct' if (submission[0]['num'] == {code}) else 'incorrect']".format(code=self._get_random_number_code())
problem = self.build_problem(answer=script)
r = random.Random(problem.seed)
submission_dict = {'num': r.randint(0, 1e9)}
submission_dict = {'num': self._get_random_number_result(problem.seed)}
input_dict = {'1_2_1': json.dumps(submission_dict)}
correct_map = problem.grade_answers(input_dict)
@@ -1372,7 +1376,7 @@ class SchematicResponseTest(ResponseTest):
class AnnotationResponseTest(ResponseTest):
from response_xml_factory import AnnotationResponseXMLFactory
from capa.tests.response_xml_factory import AnnotationResponseXMLFactory
xml_factory_class = AnnotationResponseXMLFactory
def test_grade(self):
@@ -1393,7 +1397,7 @@ class AnnotationResponseTest(ResponseTest):
{'correctness': incorrect, 'points': 0, 'answers': {answer_id: 'null'}},
]
for (index, test) in enumerate(tests):
for test in tests:
expected_correctness = test['correctness']
expected_points = test['points']
answers = test['answers']

View File

@@ -424,7 +424,7 @@ class CapaModule(CapaFields, XModule):
# If we cannot construct the problem HTML,
# then generate an error message instead.
except Exception, err:
except Exception as err:
html = self.handle_problem_html_error(err)
# The convention is to pass the name of the check button
@@ -655,7 +655,7 @@ class CapaModule(CapaFields, XModule):
@staticmethod
def make_dict_of_responses(get):
'''Make dictionary of student responses (aka "answers")
get is POST dictionary (Djano QueryDict).
get is POST dictionary (Django QueryDict).
The *get* dict has keys of the form 'x_y', which are mapped
to key 'y' in the returned dict. For example,
@@ -739,13 +739,13 @@ class CapaModule(CapaFields, XModule):
# Too late. Cannot submit
if self.closed():
event_info['failure'] = 'closed'
self.system.track_function('save_problem_check_fail', event_info)
self.system.track_function('problem_check_fail', event_info)
raise NotFoundError('Problem is closed')
# Problem submitted. Student should reset before checking again
if self.done and self.rerandomize == "always":
event_info['failure'] = 'unreset'
self.system.track_function('save_problem_check_fail', event_info)
self.system.track_function('problem_check_fail', event_info)
raise NotFoundError('Problem must be reset before it can be checked again')
# Problem queued. Students must wait a specified waittime before they are allowed to submit
@@ -759,6 +759,8 @@ class CapaModule(CapaFields, XModule):
try:
correct_map = self.lcp.grade_answers(answers)
self.attempts = self.attempts + 1
self.lcp.done = True
self.set_state_from_lcp()
except (StudentInputError, ResponseError, LoncapaProblemError) as inst:
@@ -778,17 +780,13 @@ class CapaModule(CapaFields, XModule):
return {'success': msg}
except Exception, err:
except Exception as err:
if self.system.DEBUG:
msg = "Error checking problem: " + str(err)
msg += '\nTraceback:\n' + traceback.format_exc()
return {'success': msg}
raise
self.attempts = self.attempts + 1
self.lcp.done = True
self.set_state_from_lcp()
self.publish_grade()
# success = correct if ALL questions in this problem are correct
@@ -802,7 +800,7 @@ class CapaModule(CapaFields, XModule):
event_info['correct_map'] = correct_map.get_dict()
event_info['success'] = success
event_info['attempts'] = self.attempts
self.system.track_function('save_problem_check', event_info)
self.system.track_function('problem_check', event_info)
if hasattr(self.system, 'psychometrics_handler'): # update PsychometricsData using callback
self.system.psychometrics_handler(self.get_state_for_lcp())
@@ -814,12 +812,92 @@ class CapaModule(CapaFields, XModule):
'contents': html,
}
def rescore_problem(self):
"""
Checks whether the existing answers to a problem are correct.
This is called when the correct answer to a problem has been changed,
and the grade should be re-evaluated.
Returns a dict with one key:
{'success' : 'correct' | 'incorrect' | AJAX alert msg string }
Raises NotFoundError if called on a problem that has not yet been
answered, or NotImplementedError if it's a problem that cannot be rescored.
Returns the error messages for exceptions occurring while performing
the rescoring, rather than throwing them.
"""
event_info = {'state': self.lcp.get_state(), 'problem_id': self.location.url()}
if not self.lcp.supports_rescoring():
event_info['failure'] = 'unsupported'
self.system.track_function('problem_rescore_fail', event_info)
raise NotImplementedError("Problem's definition does not support rescoring")
if not self.done:
event_info['failure'] = 'unanswered'
self.system.track_function('problem_rescore_fail', event_info)
raise NotFoundError('Problem must be answered before it can be graded again')
# get old score, for comparison:
orig_score = self.lcp.get_score()
event_info['orig_score'] = orig_score['score']
event_info['orig_total'] = orig_score['total']
try:
correct_map = self.lcp.rescore_existing_answers()
except (StudentInputError, ResponseError, LoncapaProblemError) as inst:
log.warning("Input error in capa_module:problem_rescore", exc_info=True)
event_info['failure'] = 'input_error'
self.system.track_function('problem_rescore_fail', event_info)
return {'success': u"Error: {0}".format(inst.message)}
except Exception as err:
event_info['failure'] = 'unexpected'
self.system.track_function('problem_rescore_fail', event_info)
if self.system.DEBUG:
msg = u"Error checking problem: {0}".format(err.message)
msg += u'\nTraceback:\n' + traceback.format_exc()
return {'success': msg}
raise
# rescoring should have no effect on attempts, so don't
# need to increment here, or mark done. Just save.
self.set_state_from_lcp()
self.publish_grade()
new_score = self.lcp.get_score()
event_info['new_score'] = new_score['score']
event_info['new_total'] = new_score['total']
# success = correct if ALL questions in this problem are correct
success = 'correct'
for answer_id in correct_map:
if not correct_map.is_correct(answer_id):
success = 'incorrect'
# NOTE: We are logging both full grading and queued-grading submissions. In the latter,
# 'success' will always be incorrect
event_info['correct_map'] = correct_map.get_dict()
event_info['success'] = success
event_info['attempts'] = self.attempts
self.system.track_function('problem_rescore', event_info)
# psychometrics should be called on rescoring requests in the same way as check-problem
if hasattr(self.system, 'psychometrics_handler'): # update PsychometricsData using callback
self.system.psychometrics_handler(self.get_state_for_lcp())
return {'success': success}
def save_problem(self, get):
'''
"""
Save the passed in answers.
Returns a dict { 'success' : bool, ['error' : error-msg]},
with the error key only present if success is False.
'''
Returns a dict { 'success' : bool, 'msg' : message }
The message is informative on success, and an error message on failure.
"""
event_info = dict()
event_info['state'] = self.lcp.get_state()
event_info['problem_id'] = self.location.url()

View File

@@ -58,7 +58,7 @@ class CombinedOpenEndedFields(object):
state = String(help="Which step within the current task that the student is on.", default="initial",
scope=Scope.user_state)
student_attempts = Integer(help="Number of attempts taken by the student on this problem", default=0,
scope=Scope.user_state)
scope=Scope.user_state)
ready_to_reset = Boolean(
help="If the problem is ready to be reset or not.", default=False,
scope=Scope.user_state
@@ -66,7 +66,7 @@ class CombinedOpenEndedFields(object):
attempts = Integer(
display_name="Maximum Attempts",
help="The number of times the student can try to answer this problem.", default=1,
scope=Scope.settings, values = {"min" : 1 }
scope=Scope.settings, values={"min" : 1 }
)
is_graded = Boolean(display_name="Graded", help="Whether or not the problem is graded.", default=False, scope=Scope.settings)
accept_file_upload = Boolean(
@@ -89,7 +89,7 @@ class CombinedOpenEndedFields(object):
weight = Float(
display_name="Problem Weight",
help="Defines the number of points each problem is worth. If the value is not set, each problem is worth one point.",
scope=Scope.settings, values = {"min" : 0 , "step": ".1"}
scope=Scope.settings, values={"min" : 0 , "step": ".1"}
)
markdown = String(help="Markdown source of this module", scope=Scope.settings)

View File

@@ -77,10 +77,8 @@ class Date(ModelType):
else:
return value.isoformat()
TIMEDELTA_REGEX = re.compile(r'^((?P<days>\d+?) day(?:s?))?(\s)?((?P<hours>\d+?) hour(?:s?))?(\s)?((?P<minutes>\d+?) minute(?:s)?)?(\s)?((?P<seconds>\d+?) second(?:s)?)?$')
class Timedelta(ModelType):
def from_json(self, time_str):
"""

View File

@@ -84,7 +84,7 @@ class GraphicalSliderToolModule(GraphicalSliderToolFields, XModule):
xml = html.fromstring(html_string)
#substitute plot, if presented
# substitute plot, if presented
plot_div = '<div class="{element_class}_plot" id="{element_id}_plot" \
style="{style}"></div>'
plot_el = xml.xpath('//plot')
@@ -95,7 +95,7 @@ class GraphicalSliderToolModule(GraphicalSliderToolFields, XModule):
element_id=self.html_id,
style=plot_el.get('style', ""))))
#substitute sliders
# substitute sliders
slider_div = '<div class="{element_class}_slider" \
id="{element_id}_slider_{var}" \
data-var="{var}" \

View File

@@ -57,7 +57,7 @@ class HtmlDescriptor(HtmlFields, XmlDescriptor, EditingDescriptor):
if path.endswith('.html.xml'):
path = path[:-9] + '.html' # backcompat--look for html instead of xml
if path.endswith('.html.html'):
path = path[:-5] # some people like to include .html in filenames..
path = path[:-5] # some people like to include .html in filenames..
candidates = []
while os.sep in path:
candidates.append(path)
@@ -100,9 +100,9 @@ class HtmlDescriptor(HtmlFields, XmlDescriptor, EditingDescriptor):
pointer_path = "{category}/{url_path}".format(category='html',
url_path=name_to_pathname(location.name))
base = path(pointer_path).dirname()
#log.debug("base = {0}, base.dirname={1}, filename={2}".format(base, base.dirname(), filename))
# log.debug("base = {0}, base.dirname={1}, filename={2}".format(base, base.dirname(), filename))
filepath = "{base}/{name}.html".format(base=base, name=filename)
#log.debug("looking for html file for {0} at {1}".format(location, filepath))
# log.debug("looking for html file for {0} at {1}".format(location, filepath))
# VS[compat]
# TODO (cpennington): If the file doesn't exist at the right path,
@@ -111,7 +111,7 @@ class HtmlDescriptor(HtmlFields, XmlDescriptor, EditingDescriptor):
# online and has imported all current (fall 2012) courses from xml
if not system.resources_fs.exists(filepath):
candidates = cls.backcompat_paths(filepath)
#log.debug("candidates = {0}".format(candidates))
# log.debug("candidates = {0}".format(candidates))
for candidate in candidates:
if system.resources_fs.exists(candidate):
filepath = candidate

View File

@@ -196,7 +196,7 @@ class Location(_LocationBase):
raise InvalidLocationError(location)
if len(location) == 5:
args = tuple(location) + (None, )
args = tuple(location) + (None,)
else:
args = tuple(location)
@@ -415,7 +415,7 @@ class ModuleStoreBase(ModuleStore):
'''
Set up the error-tracking logic.
'''
self._location_errors = {} # location -> ErrorLog
self._location_errors = {} # location -> ErrorLog
self.metadata_inheritance_cache = None
self.modulestore_update_signal = None # can be set by runtime to route notifications of datastore changes
@@ -440,7 +440,7 @@ class ModuleStoreBase(ModuleStore):
"""
# check that item is present and raise the promised exceptions if needed
# TODO (vshnayder): post-launch, make errors properties of items
#self.get_item(location)
# self.get_item(location)
errorlog = self._get_errorlog(location)
return errorlog.errors

View File

@@ -15,14 +15,14 @@ def as_draft(location):
"""
Returns the Location that is the draft for `location`
"""
return Location(location)._replace(revision=DRAFT)
return Location(location).replace(revision=DRAFT)
def as_published(location):
"""
Returns the Location that is the published version for `location`
"""
return Location(location)._replace(revision=None)
return Location(location).replace(revision=None)
def wrap_draft(item):
@@ -32,7 +32,7 @@ def wrap_draft(item):
non-draft location in either case
"""
setattr(item, 'is_draft', item.location.revision == DRAFT)
item.location = item.location._replace(revision=None)
item.location = item.location.replace(revision=None)
return item
@@ -234,7 +234,7 @@ class DraftModuleStore(ModuleStoreBase):
# always return the draft - if available
for draft in to_process_drafts:
draft_loc = Location(draft["_id"])
draft_as_non_draft_loc = draft_loc._replace(revision=None)
draft_as_non_draft_loc = draft_loc.replace(revision=None)
# does non-draft exist in the collection
# if so, replace it

View File

@@ -307,7 +307,7 @@ class MongoModuleStore(ModuleStoreBase):
location = Location(result['_id'])
# We need to collate between draft and non-draft
# i.e. draft verticals can have children which are not in non-draft versions
location = location._replace(revision=None)
location = location.replace(revision=None)
location_url = location.url()
if location_url in results_by_url:
existing_children = results_by_url[location_url].get('definition', {}).get('children', [])

View File

@@ -19,18 +19,18 @@ log = logging.getLogger("mitx.courseware")
# attempts specified in xml definition overrides this.
MAX_ATTEMPTS = 1
#The highest score allowed for the overall xmodule and for each rubric point
# The highest score allowed for the overall xmodule and for each rubric point
MAX_SCORE_ALLOWED = 50
#If true, default behavior is to score module as a practice problem. Otherwise, no grade at all is shown in progress
#Metadata overrides this.
# If true, default behavior is to score module as a practice problem. Otherwise, no grade at all is shown in progress
# Metadata overrides this.
IS_SCORED = False
#If true, then default behavior is to require a file upload or pasted link from a student for this problem.
#Metadata overrides this.
# If true, then default behavior is to require a file upload or pasted link from a student for this problem.
# Metadata overrides this.
ACCEPT_FILE_UPLOAD = False
#Contains all reasonable bool and case combinations of True
# Contains all reasonable bool and case combinations of True
TRUE_DICT = ["True", True, "TRUE", "true"]
HUMAN_TASK_TYPE = {
@@ -38,8 +38,8 @@ HUMAN_TASK_TYPE = {
'openended': "edX Assessment",
}
#Default value that controls whether or not to skip basic spelling checks in the controller
#Metadata overrides this
# Default value that controls whether or not to skip basic spelling checks in the controller
# Metadata overrides this
SKIP_BASIC_CHECKS = False
@@ -74,7 +74,7 @@ class CombinedOpenEndedV1Module():
INTERMEDIATE_DONE = 'intermediate_done'
DONE = 'done'
#Where the templates live for this problem
# Where the templates live for this problem
TEMPLATE_DIR = "combinedopenended"
def __init__(self, system, location, definition, descriptor,
@@ -118,21 +118,21 @@ class CombinedOpenEndedV1Module():
self.instance_state = instance_state
self.display_name = instance_state.get('display_name', "Open Ended")
#We need to set the location here so the child modules can use it
# We need to set the location here so the child modules can use it
system.set('location', location)
self.system = system
#Tells the system which xml definition to load
# Tells the system which xml definition to load
self.current_task_number = instance_state.get('current_task_number', 0)
#This loads the states of the individual children
# This loads the states of the individual children
self.task_states = instance_state.get('task_states', [])
#Overall state of the combined open ended module
# Overall state of the combined open ended module
self.state = instance_state.get('state', self.INITIAL)
self.student_attempts = instance_state.get('student_attempts', 0)
self.weight = instance_state.get('weight', 1)
#Allow reset is true if student has failed the criteria to move to the next child task
# Allow reset is true if student has failed the criteria to move to the next child task
self.ready_to_reset = instance_state.get('ready_to_reset', False)
self.attempts = self.instance_state.get('attempts', MAX_ATTEMPTS)
self.is_scored = self.instance_state.get('is_graded', IS_SCORED) in TRUE_DICT
@@ -153,7 +153,7 @@ class CombinedOpenEndedV1Module():
rubric_string = stringify_children(definition['rubric'])
self._max_score = self.rubric_renderer.check_if_rubric_is_parseable(rubric_string, location, MAX_SCORE_ALLOWED)
#Static data is passed to the child modules to render
# Static data is passed to the child modules to render
self.static_data = {
'max_score': self._max_score,
'max_attempts': self.attempts,
@@ -243,11 +243,11 @@ class CombinedOpenEndedV1Module():
self.current_task_descriptor = children['descriptors'][current_task_type](self.system)
#This is the xml object created from the xml definition of the current task
# This is the xml object created from the xml definition of the current task
etree_xml = etree.fromstring(self.current_task_xml)
#This sends the etree_xml object through the descriptor module of the current task, and
#returns the xml parsed by the descriptor
# This sends the etree_xml object through the descriptor module of the current task, and
# returns the xml parsed by the descriptor
self.current_task_parsed_xml = self.current_task_descriptor.definition_from_xml(etree_xml, self.system)
if current_task_state is None and self.current_task_number == 0:
self.current_task = child_task_module(self.system, self.location,
@@ -293,8 +293,9 @@ class CombinedOpenEndedV1Module():
if self.current_task_number > 0:
last_response_data = self.get_last_response(self.current_task_number - 1)
current_response_data = self.get_current_attributes(self.current_task_number)
if (current_response_data['min_score_to_attempt'] > last_response_data['score']
or current_response_data['max_score_to_attempt'] < last_response_data['score']):
or current_response_data['max_score_to_attempt'] < last_response_data['score']):
self.state = self.DONE
self.ready_to_reset = True
@@ -307,7 +308,7 @@ class CombinedOpenEndedV1Module():
Output: A dictionary that can be rendered into the combined open ended template.
"""
task_html = self.get_html_base()
#set context variables and render template
# set context variables and render template
context = {
'items': [{'content': task_html}],
@@ -499,7 +500,6 @@ class CombinedOpenEndedV1Module():
"""
changed = self.update_task_states()
if changed:
#return_html=self.get_html()
pass
return return_html
@@ -730,15 +730,15 @@ class CombinedOpenEndedV1Module():
max_score = None
score = None
if self.is_scored and self.weight is not None:
#Finds the maximum score of all student attempts and keeps it.
# Finds the maximum score of all student attempts and keeps it.
score_mat = []
for i in xrange(0, len(self.task_states)):
#For each task, extract all student scores on that task (each attempt for each task)
# For each task, extract all student scores on that task (each attempt for each task)
last_response = self.get_last_response(i)
max_score = last_response.get('max_score', None)
score = last_response.get('all_scores', None)
if score is not None:
#Convert none scores and weight scores properly
# Convert none scores and weight scores properly
for z in xrange(0, len(score)):
if score[z] is None:
score[z] = 0
@@ -746,19 +746,19 @@ class CombinedOpenEndedV1Module():
score_mat.append(score)
if len(score_mat) > 0:
#Currently, assume that the final step is the correct one, and that those are the final scores.
#This will change in the future, which is why the machinery above exists to extract all scores on all steps
#TODO: better final score handling.
# Currently, assume that the final step is the correct one, and that those are the final scores.
# This will change in the future, which is why the machinery above exists to extract all scores on all steps
# TODO: better final score handling.
scores = score_mat[-1]
score = max(scores)
else:
score = 0
if max_score is not None:
#Weight the max score if it is not None
# Weight the max score if it is not None
max_score *= float(self.weight)
else:
#Without a max_score, we cannot have a score!
# Without a max_score, we cannot have a score!
score = None
score_dict = {
@@ -833,7 +833,7 @@ class CombinedOpenEndedV1Descriptor():
expected_children = ['task', 'rubric', 'prompt']
for child in expected_children:
if len(xml_object.xpath(child)) == 0:
#This is a staff_facing_error
# This is a staff_facing_error
raise ValueError(
"Combined Open Ended definition must include at least one '{0}' tag. Contact the learning sciences group for assistance. {1}".format(
child, xml_object))
@@ -848,6 +848,7 @@ class CombinedOpenEndedV1Descriptor():
return {'task_xml': parse_task('task'), 'prompt': parse('prompt'), 'rubric': parse('rubric')}
def definition_to_xml(self, resource_fs):
'''Return an xml element representing this definition.'''
elt = etree.Element('combinedopenended')

View File

@@ -57,13 +57,13 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
self.queue_name = definition.get('queuename', self.DEFAULT_QUEUE)
self.message_queue_name = definition.get('message-queuename', self.DEFAULT_MESSAGE_QUEUE)
#This is needed to attach feedback to specific responses later
# This is needed to attach feedback to specific responses later
self.submission_id = None
self.grader_id = None
error_message = "No {0} found in problem xml for open ended problem. Contact the learning sciences group for assistance."
if oeparam is None:
#This is a staff_facing_error
# This is a staff_facing_error
raise ValueError(error_message.format('oeparam'))
if self.child_prompt is None:
raise ValueError(error_message.format('prompt'))
@@ -95,14 +95,14 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
grader_payload = oeparam.find('grader_payload')
grader_payload = grader_payload.text if grader_payload is not None else ''
#Update grader payload with student id. If grader payload not json, error.
# Update grader payload with student id. If grader payload not json, error.
try:
parsed_grader_payload = json.loads(grader_payload)
# NOTE: self.system.location is valid because the capa_module
# __init__ adds it (easiest way to get problem location into
# response types)
except TypeError, ValueError:
#This is a dev_facing_error
# This is a dev_facing_error
log.exception(
"Grader payload from external open ended grading server is not a json object! Object: {0}".format(
grader_payload))
@@ -148,7 +148,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
survey_responses = event_info['survey_responses']
for tag in ['feedback', 'submission_id', 'grader_id', 'score']:
if tag not in survey_responses:
#This is a student_facing_error
# This is a student_facing_error
return {'success': False,
'msg': "Could not find needed tag {0} in the survey responses. Please try submitting again.".format(
tag)}
@@ -158,14 +158,14 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
feedback = str(survey_responses['feedback'].encode('ascii', 'ignore'))
score = int(survey_responses['score'])
except:
#This is a dev_facing_error
# This is a dev_facing_error
error_message = (
"Could not parse submission id, grader id, "
"or feedback from message_post ajax call. "
"Here is the message data: {0}".format(survey_responses)
)
log.exception(error_message)
#This is a student_facing_error
# This is a student_facing_error
return {'success': False, 'msg': "There was an error saving your feedback. Please contact course staff."}
xqueue = system.get('xqueue')
@@ -201,14 +201,14 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
body=json.dumps(contents)
)
#Convert error to a success value
# Convert error to a success value
success = True
if error:
success = False
self.child_state = self.DONE
#This is a student_facing_message
# This is a student_facing_message
return {'success': success, 'msg': "Successfully submitted your feedback."}
def send_to_grader(self, submission, system):
@@ -249,7 +249,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
'submission_time': qtime,
}
#Update contents with student response and student info
# Update contents with student response and student info
contents.update({
'student_info': json.dumps(student_info),
'student_response': submission,
@@ -369,21 +369,21 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
for tag in ['success', 'feedback', 'submission_id', 'grader_id']:
if tag not in response_items:
#This is a student_facing_error
# This is a student_facing_error
return format_feedback('errors', 'Error getting feedback from grader.')
feedback_items = response_items['feedback']
try:
feedback = json.loads(feedback_items)
except (TypeError, ValueError):
#This is a dev_facing_error
# This is a dev_facing_error
log.exception("feedback_items from external open ended grader have invalid json {0}".format(feedback_items))
#This is a student_facing_error
# This is a student_facing_error
return format_feedback('errors', 'Error getting feedback from grader.')
if response_items['success']:
if len(feedback) == 0:
#This is a student_facing_error
# This is a student_facing_error
return format_feedback('errors', 'No feedback available from grader.')
for tag in do_not_render:
@@ -393,7 +393,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
feedback_lst = sorted(feedback.items(), key=get_priority)
feedback_list_part1 = u"\n".join(format_feedback(k, v) for k, v in feedback_lst)
else:
#This is a student_facing_error
# This is a student_facing_error
feedback_list_part1 = format_feedback('errors', response_items['feedback'])
feedback_list_part2 = (u"\n".join([format_feedback_hidden(feedback_type, value)
@@ -470,7 +470,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
try:
score_result = json.loads(score_msg)
except (TypeError, ValueError):
#This is a dev_facing_error
# This is a dev_facing_error
error_message = ("External open ended grader message should be a JSON-serialized dict."
" Received score_msg = {0}".format(score_msg))
log.error(error_message)
@@ -478,7 +478,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
return fail
if not isinstance(score_result, dict):
#This is a dev_facing_error
# This is a dev_facing_error
error_message = ("External open ended grader message should be a JSON-serialized dict."
" Received score_result = {0}".format(score_result))
log.error(error_message)
@@ -487,13 +487,13 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
for tag in ['score', 'feedback', 'grader_type', 'success', 'grader_id', 'submission_id']:
if tag not in score_result:
#This is a dev_facing_error
# This is a dev_facing_error
error_message = ("External open ended grader message is missing required tag: {0}"
.format(tag))
log.error(error_message)
fail['feedback'] = error_message
return fail
#This is to support peer grading
# This is to support peer grading
if isinstance(score_result['score'], list):
feedback_items = []
rubric_scores = []
@@ -529,7 +529,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
feedback = feedback_items
score = int(median(score_result['score']))
else:
#This is for instructor and ML grading
# This is for instructor and ML grading
feedback, rubric_score = self._format_feedback(score_result, system)
score = score_result['score']
rubric_scores = [rubric_score]
@@ -608,9 +608,9 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
}
if dispatch not in handlers:
#This is a dev_facing_error
# This is a dev_facing_error
log.error("Cannot find {0} in handlers in handle_ajax function for open_ended_module.py".format(dispatch))
#This is a dev_facing_error
# This is a dev_facing_error
return json.dumps({'error': 'Error handling action. Please try again.', 'success': False})
before = self.get_progress()
@@ -659,10 +659,10 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
self.send_to_grader(get['student_answer'], system)
self.change_state(self.ASSESSING)
else:
#Error message already defined
# Error message already defined
success = False
else:
#This is a student_facing_error
# This is a student_facing_error
error_message = "There was a problem saving the image in your submission. Please try a different image, or try pasting a link to an image into the answer box."
return {
@@ -679,7 +679,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
"""
queuekey = get['queuekey']
score_msg = get['xqueue_body']
#TODO: Remove need for cmap
# TODO: Remove need for cmap
self._update_score(score_msg, queuekey, system)
return dict() # No AJAX return is needed
@@ -690,7 +690,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
Input: Modulesystem object
Output: Rendered HTML
"""
#set context variables and render template
# set context variables and render template
eta_string = None
if self.child_state != self.INITIAL:
latest = self.latest_answer()
@@ -749,7 +749,7 @@ class OpenEndedDescriptor():
"""
for child in ['openendedparam']:
if len(xml_object.xpath(child)) != 1:
#This is a staff_facing_error
# This is a staff_facing_error
raise ValueError(
"Open Ended definition must include exactly one '{0}' tag. Contact the learning sciences group for assistance.".format(
child))

View File

@@ -54,7 +54,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
@param system: Modulesystem
@return: Rendered HTML
"""
#set context variables and render template
# set context variables and render template
if self.child_state != self.INITIAL:
latest = self.latest_answer()
previous_answer = latest if latest is not None else ''
@@ -93,9 +93,9 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
}
if dispatch not in handlers:
#This is a dev_facing_error
# This is a dev_facing_error
log.error("Cannot find {0} in handlers in handle_ajax function for open_ended_module.py".format(dispatch))
#This is a dev_facing_error
# This is a dev_facing_error
return json.dumps({'error': 'Error handling action. Please try again.', 'success': False})
before = self.get_progress()
@@ -129,7 +129,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
elif self.child_state in (self.POST_ASSESSMENT, self.DONE):
context['read_only'] = True
else:
#This is a dev_facing_error
# This is a dev_facing_error
raise ValueError("Self assessment module is in an illegal state '{0}'".format(self.child_state))
return system.render_template('{0}/self_assessment_rubric.html'.format(self.TEMPLATE_DIR), context)
@@ -155,7 +155,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
elif self.child_state == self.DONE:
context['read_only'] = True
else:
#This is a dev_facing_error
# This is a dev_facing_error
raise ValueError("Self Assessment module is in an illegal state '{0}'".format(self.child_state))
return system.render_template('{0}/self_assessment_hint.html'.format(self.TEMPLATE_DIR), context)
@@ -190,10 +190,10 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
self.new_history_entry(get['student_answer'])
self.change_state(self.ASSESSING)
else:
#Error message already defined
# Error message already defined
success = False
else:
#This is a student_facing_error
# This is a student_facing_error
error_message = "There was a problem saving the image in your submission. Please try a different image, or try pasting a link to an image into the answer box."
return {
@@ -227,12 +227,12 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
for i in xrange(0, len(score_list)):
score_list[i] = int(score_list[i])
except ValueError:
#This is a dev_facing_error
# This is a dev_facing_error
log.error("Non-integer score value passed to save_assessment ,or no score list present.")
#This is a student_facing_error
# This is a student_facing_error
return {'success': False, 'error': "Error saving your score. Please notify course staff."}
#Record score as assessment and rubric scores as post assessment
# Record score as assessment and rubric scores as post assessment
self.record_latest_score(score)
self.record_latest_post_assessment(json.dumps(score_list))
@@ -272,7 +272,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
try:
rubric_scores = json.loads(latest_post_assessment)
except:
#This is a dev_facing_error
# This is a dev_facing_error
log.error("Cannot parse rubric scores in self assessment module from {0}".format(latest_post_assessment))
rubric_scores = []
return [rubric_scores]
@@ -306,7 +306,7 @@ class SelfAssessmentDescriptor():
expected_children = []
for child in expected_children:
if len(xml_object.xpath(child)) != 1:
#This is a staff_facing_error
# This is a staff_facing_error
raise ValueError(
"Self assessment definition must include exactly one '{0}' tag. Contact the learning sciences group for assistance.".format(
child))

View File

@@ -62,7 +62,7 @@ class SequenceModule(SequenceFields, XModule):
progress = reduce(Progress.add_counts, progresses)
return progress
def handle_ajax(self, dispatch, get): # TODO: bounds checking
def handle_ajax(self, dispatch, get): # TODO: bounds checking
''' get = request.POST instance '''
if dispatch == 'goto_position':
self.position = int(get['position'])

View File

@@ -55,7 +55,7 @@ class CustomTagDescriptor(RawDescriptor):
params = dict(xmltree.items())
# cdodge: look up the template as a module
template_loc = self.location._replace(category='custom_tag_template', name=template_name)
template_loc = self.location.replace(category='custom_tag_template', name=template_name)
template_module = modulestore().get_instance(system.course_id, template_loc)
template_module_data = template_module.data

View File

@@ -19,6 +19,7 @@ from django.http import QueryDict
from . import test_system
from pytz import UTC
from capa.correctmap import CorrectMap
class CapaFactory(object):
@@ -597,6 +598,85 @@ class CapaModuleTest(unittest.TestCase):
# Expect that the problem was NOT reset
self.assertTrue('success' in result and not result['success'])
def test_rescore_problem_correct(self):
module = CapaFactory.create(attempts=1, done=True)
# Simulate that all answers are marked correct, no matter
# what the input is, by patching LoncapaResponse.evaluate_answers()
with patch('capa.responsetypes.LoncapaResponse.evaluate_answers') as mock_evaluate_answers:
mock_evaluate_answers.return_value = CorrectMap(CapaFactory.answer_key(), 'correct')
result = module.rescore_problem()
# Expect that the problem is marked correct
self.assertEqual(result['success'], 'correct')
# Expect that we get no HTML
self.assertFalse('contents' in result)
# Expect that the number of attempts is not incremented
self.assertEqual(module.attempts, 1)
def test_rescore_problem_incorrect(self):
# make sure it also works when attempts have been reset,
# so add this to the test:
module = CapaFactory.create(attempts=0, done=True)
# Simulate that all answers are marked incorrect, no matter
# what the input is, by patching LoncapaResponse.evaluate_answers()
with patch('capa.responsetypes.LoncapaResponse.evaluate_answers') as mock_evaluate_answers:
mock_evaluate_answers.return_value = CorrectMap(CapaFactory.answer_key(), 'incorrect')
result = module.rescore_problem()
# Expect that the problem is marked incorrect
self.assertEqual(result['success'], 'incorrect')
# Expect that the number of attempts is not incremented
self.assertEqual(module.attempts, 0)
def test_rescore_problem_not_done(self):
# Simulate that the problem is NOT done
module = CapaFactory.create(done=False)
# Try to rescore the problem, and get exception
with self.assertRaises(xmodule.exceptions.NotFoundError):
module.rescore_problem()
def test_rescore_problem_not_supported(self):
module = CapaFactory.create(done=True)
# Try to rescore the problem, and get exception
with patch('capa.capa_problem.LoncapaProblem.supports_rescoring') as mock_supports_rescoring:
mock_supports_rescoring.return_value = False
with self.assertRaises(NotImplementedError):
module.rescore_problem()
def _rescore_problem_error_helper(self, exception_class):
"""Helper to allow testing all errors that rescoring might return."""
# Create the module
module = CapaFactory.create(attempts=1, done=True)
# Simulate answering a problem that raises the exception
with patch('capa.capa_problem.LoncapaProblem.rescore_existing_answers') as mock_rescore:
mock_rescore.side_effect = exception_class(u'test error \u03a9')
result = module.rescore_problem()
# Expect an AJAX alert message in 'success'
expected_msg = u'Error: test error \u03a9'
self.assertEqual(result['success'], expected_msg)
# Expect that the number of attempts is NOT incremented
self.assertEqual(module.attempts, 1)
def test_rescore_problem_student_input_error(self):
self._rescore_problem_error_helper(StudentInputError)
def test_rescore_problem_problem_error(self):
self._rescore_problem_error_helper(LoncapaProblemError)
def test_rescore_problem_response_error(self):
self._rescore_problem_error_helper(ResponseError)
def test_save_problem(self):
module = CapaFactory.create(done=False)

View File

@@ -20,7 +20,7 @@ from . import test_system
class DummySystem(ImportSystem):
@patch('xmodule.modulestore.xml.OSFS', lambda dir: MemoryFS())
@patch('xmodule.modulestore.xml.OSFS', lambda directory: MemoryFS())
def __init__(self, load_error_modules):
xmlstore = XMLModuleStore("data_dir", course_dirs=[], load_error_modules=load_error_modules)
@@ -41,7 +41,8 @@ class DummySystem(ImportSystem):
)
def render_template(self, template, context):
raise Exception("Shouldn't be called")
raise Exception("Shouldn't be called")
class ConditionalFactory(object):
"""
@@ -93,7 +94,7 @@ class ConditionalFactory(object):
# return dict:
return {'cond_module': cond_module,
'source_module': source_module,
'child_module': child_module }
'child_module': child_module}
class ConditionalModuleBasicTest(unittest.TestCase):
@@ -109,12 +110,11 @@ class ConditionalModuleBasicTest(unittest.TestCase):
'''verify that get_icon_class works independent of condition satisfaction'''
modules = ConditionalFactory.create(self.test_system)
for attempted in ["false", "true"]:
for icon_class in [ 'other', 'problem', 'video']:
for icon_class in ['other', 'problem', 'video']:
modules['source_module'].is_attempted = attempted
modules['child_module'].get_icon_class = lambda: icon_class
self.assertEqual(modules['cond_module'].get_icon_class(), icon_class)
def test_get_html(self):
modules = ConditionalFactory.create(self.test_system)
# because test_system returns the repr of the context dict passed to render_template,
@@ -224,4 +224,3 @@ class ConditionalModuleXmlTest(unittest.TestCase):
print "post-attempt ajax: ", ajax
html = ajax['html']
self.assertTrue(any(['This is a secret' in item for item in html]))

View File

@@ -15,7 +15,7 @@ from xblock.core import XBlock, Scope, String, Integer, Float, ModelType
log = logging.getLogger(__name__)
def dummy_track(event_type, event):
def dummy_track(_event_type, _event):
pass
@@ -231,7 +231,7 @@ class XModule(XModuleFields, HTMLSnippet, XBlock):
'''
return self.icon_class
### Functions used in the LMS
# Functions used in the LMS
def get_score(self):
"""
@@ -272,7 +272,7 @@ class XModule(XModuleFields, HTMLSnippet, XBlock):
'''
return None
def handle_ajax(self, dispatch, get):
def handle_ajax(self, _dispatch, _get):
''' dispatch is last part of the URL.
get is a dictionary-like object '''
return ""
@@ -647,13 +647,13 @@ class XModuleDescriptor(XModuleFields, HTMLSnippet, ResourceTemplates, XBlock):
# 1. A select editor for fields with a list of possible values (includes Booleans).
# 2. Number editors for integers and floats.
# 3. A generic string editor for anything else (editing JSON representation of the value).
type = "Generic"
editor_type = "Generic"
values = [] if field.values is None else copy.deepcopy(field.values)
if isinstance(values, tuple):
values = list(values)
if isinstance(values, list):
if len(values) > 0:
type = "Select"
editor_type = "Select"
for index, choice in enumerate(values):
json_choice = copy.deepcopy(choice)
if isinstance(json_choice, dict) and 'value' in json_choice:
@@ -662,11 +662,11 @@ class XModuleDescriptor(XModuleFields, HTMLSnippet, ResourceTemplates, XBlock):
json_choice = field.to_json(json_choice)
values[index] = json_choice
elif isinstance(field, Integer):
type = "Integer"
editor_type = "Integer"
elif isinstance(field, Float):
type = "Float"
editor_type = "Float"
metadata_fields[field.name] = {'field_name': field.name,
'type': type,
'type': editor_type,
'display_name': field.display_name,
'value': field.to_json(value),
'options': values,
@@ -862,7 +862,7 @@ class ModuleSystem(object):
class DoNothingCache(object):
"""A duck-compatible object to use in ModuleSystem when there's no cache."""
def get(self, key):
def get(self, _key):
return None
def set(self, key, value, timeout=None):

View File

@@ -56,7 +56,6 @@ def get_metadata_from_xml(xml_object, remove=True):
if meta is None:
return ''
dmdata = meta.text
#log.debug('meta for %s loaded: %s' % (xml_object,dmdata))
if remove:
xml_object.remove(meta)
return dmdata

View File

@@ -3,6 +3,11 @@ describe 'Logger', ->
expect(window.log_event).toBe Logger.log
describe 'log', ->
it 'sends an event to Segment.io, if the event is whitelisted', ->
spyOn(analytics, 'track')
Logger.log 'seq_goto', 'data'
expect(analytics.track).toHaveBeenCalledWith 'seq_goto', 'data'
it 'send a request to log event', ->
spyOn $, 'getWithPrefix'
Logger.log 'example', 'data'

View File

@@ -1,5 +1,12 @@
class @Logger
# events we want sent to Segment.io for tracking
SEGMENT_IO_WHITELIST = ["seq_goto", "seq_next", "seq_prev"]
@log: (event_type, data) ->
if event_type in SEGMENT_IO_WHITELIST
# Segment.io event tracking
analytics.track event_type, data
$.getWithPrefix '/event',
event_type: event_type
event: JSON.stringify(data)

5538
common/static/js/vendor/analytics.js vendored Normal file
View File

@@ -0,0 +1,5538 @@
;(function(){
/**
* Require the given path.
*
* @param {String} path
* @return {Object} exports
* @api public
*/
function require(path, parent, orig) {
var resolved = require.resolve(path);
// lookup failed
if (null == resolved) {
orig = orig || path;
parent = parent || 'root';
var err = new Error('Failed to require "' + orig + '" from "' + parent + '"');
err.path = orig;
err.parent = parent;
err.require = true;
throw err;
}
var module = require.modules[resolved];
// perform real require()
// by invoking the module's
// registered function
if (!module.exports) {
module.exports = {};
module.client = module.component = true;
module.call(this, module.exports, require.relative(resolved), module);
}
return module.exports;
}
/**
* Registered modules.
*/
require.modules = {};
/**
* Registered aliases.
*/
require.aliases = {};
/**
* Resolve `path`.
*
* Lookup:
*
* - PATH/index.js
* - PATH.js
* - PATH
*
* @param {String} path
* @return {String} path or null
* @api private
*/
require.resolve = function(path) {
if (path.charAt(0) === '/') path = path.slice(1);
var paths = [
path,
path + '.js',
path + '.json',
path + '/index.js',
path + '/index.json'
];
for (var i = 0; i < paths.length; i++) {
var path = paths[i];
if (require.modules.hasOwnProperty(path)) return path;
if (require.aliases.hasOwnProperty(path)) return require.aliases[path];
}
};
/**
* Normalize `path` relative to the current path.
*
* @param {String} curr
* @param {String} path
* @return {String}
* @api private
*/
require.normalize = function(curr, path) {
var segs = [];
if ('.' != path.charAt(0)) return path;
curr = curr.split('/');
path = path.split('/');
for (var i = 0; i < path.length; ++i) {
if ('..' == path[i]) {
curr.pop();
} else if ('.' != path[i] && '' != path[i]) {
segs.push(path[i]);
}
}
return curr.concat(segs).join('/');
};
/**
* Register module at `path` with callback `definition`.
*
* @param {String} path
* @param {Function} definition
* @api private
*/
require.register = function(path, definition) {
require.modules[path] = definition;
};
/**
* Alias a module definition.
*
* @param {String} from
* @param {String} to
* @api private
*/
require.alias = function(from, to) {
if (!require.modules.hasOwnProperty(from)) {
throw new Error('Failed to alias "' + from + '", it does not exist');
}
require.aliases[to] = from;
};
/**
* Return a require function relative to the `parent` path.
*
* @param {String} parent
* @return {Function}
* @api private
*/
require.relative = function(parent) {
var p = require.normalize(parent, '..');
/**
* lastIndexOf helper.
*/
function lastIndexOf(arr, obj) {
var i = arr.length;
while (i--) {
if (arr[i] === obj) return i;
}
return -1;
}
/**
* The relative require() itself.
*/
function localRequire(path) {
var resolved = localRequire.resolve(path);
return require(resolved, parent, path);
}
/**
* Resolve relative to the parent.
*/
localRequire.resolve = function(path) {
var c = path.charAt(0);
if ('/' == c) return path.slice(1);
if ('.' == c) return require.normalize(p, path);
// resolve deps by returning
// the dep in the nearest "deps"
// directory
var segs = parent.split('/');
var i = lastIndexOf(segs, 'deps') + 1;
if (!i) i = 0;
path = segs.slice(0, i + 1).join('/') + '/deps/' + path;
return path;
};
/**
* Check if module is defined at `path`.
*/
localRequire.exists = function(path) {
return require.modules.hasOwnProperty(localRequire.resolve(path));
};
return localRequire;
};
require.register("avetisk-defaults/index.js", function(exports, require, module){
'use strict';
/**
* Merge default values.
*
* @param {Object} dest
* @param {Object} defaults
* @return {Object}
* @api public
*/
var defaults = function (dest, src, recursive) {
for (var prop in src) {
if (recursive && dest[prop] instanceof Object && src[prop] instanceof Object) {
dest[prop] = defaults(dest[prop], src[prop], true);
} else if (! (prop in dest)) {
dest[prop] = src[prop];
}
}
return dest;
};
/**
* Expose `defaults`.
*/
module.exports = defaults;
});
require.register("component-clone/index.js", function(exports, require, module){
/**
* Module dependencies.
*/
var type;
try {
type = require('type');
} catch(e){
type = require('type-component');
}
/**
* Module exports.
*/
module.exports = clone;
/**
* Clones objects.
*
* @param {Mixed} any object
* @api public
*/
function clone(obj){
switch (type(obj)) {
case 'object':
var copy = {};
for (var key in obj) {
if (obj.hasOwnProperty(key)) {
copy[key] = clone(obj[key]);
}
}
return copy;
case 'array':
var copy = new Array(obj.length);
for (var i = 0, l = obj.length; i < l; i++) {
copy[i] = clone(obj[i]);
}
return copy;
case 'regexp':
// from millermedeiros/amd-utils - MIT
var flags = '';
flags += obj.multiline ? 'm' : '';
flags += obj.global ? 'g' : '';
flags += obj.ignoreCase ? 'i' : '';
return new RegExp(obj.source, flags);
case 'date':
return new Date(obj.getTime());
default: // string, number, boolean, …
return obj;
}
}
});
require.register("component-cookie/index.js", function(exports, require, module){
/**
* Encode.
*/
var encode = encodeURIComponent;
/**
* Decode.
*/
var decode = decodeURIComponent;
/**
* Set or get cookie `name` with `value` and `options` object.
*
* @param {String} name
* @param {String} value
* @param {Object} options
* @return {Mixed}
* @api public
*/
module.exports = function(name, value, options){
switch (arguments.length) {
case 3:
case 2:
return set(name, value, options);
case 1:
return get(name);
default:
return all();
}
};
/**
* Set cookie `name` to `value`.
*
* @param {String} name
* @param {String} value
* @param {Object} options
* @api private
*/
function set(name, value, options) {
options = options || {};
var str = encode(name) + '=' + encode(value);
if (null == value) options.maxage = -1;
if (options.maxage) {
options.expires = new Date(+new Date + options.maxage);
}
if (options.path) str += '; path=' + options.path;
if (options.domain) str += '; domain=' + options.domain;
if (options.expires) str += '; expires=' + options.expires.toUTCString();
if (options.secure) str += '; secure';
document.cookie = str;
}
/**
* Return all cookies.
*
* @return {Object}
* @api private
*/
function all() {
return parse(document.cookie);
}
/**
* Get cookie `name`.
*
* @param {String} name
* @return {String}
* @api private
*/
function get(name) {
return all()[name];
}
/**
* Parse cookie `str`.
*
* @param {String} str
* @return {Object}
* @api private
*/
function parse(str) {
var obj = {};
var pairs = str.split(/ *; */);
var pair;
if ('' == pairs[0]) return obj;
for (var i = 0; i < pairs.length; ++i) {
pair = pairs[i].split('=');
obj[decode(pair[0])] = decode(pair[1]);
}
return obj;
}
});
require.register("component-each/index.js", function(exports, require, module){
/**
* Module dependencies.
*/
var type = require('type');
/**
* HOP reference.
*/
var has = Object.prototype.hasOwnProperty;
/**
* Iterate the given `obj` and invoke `fn(val, i)`.
*
* @param {String|Array|Object} obj
* @param {Function} fn
* @api public
*/
module.exports = function(obj, fn){
switch (type(obj)) {
case 'array':
return array(obj, fn);
case 'object':
if ('number' == typeof obj.length) return array(obj, fn);
return object(obj, fn);
case 'string':
return string(obj, fn);
}
};
/**
* Iterate string chars.
*
* @param {String} obj
* @param {Function} fn
* @api private
*/
function string(obj, fn) {
for (var i = 0; i < obj.length; ++i) {
fn(obj.charAt(i), i);
}
}
/**
* Iterate object keys.
*
* @param {Object} obj
* @param {Function} fn
* @api private
*/
function object(obj, fn) {
for (var key in obj) {
if (has.call(obj, key)) {
fn(key, obj[key]);
}
}
}
/**
* Iterate array-ish.
*
* @param {Array|Object} obj
* @param {Function} fn
* @api private
*/
function array(obj, fn) {
for (var i = 0; i < obj.length; ++i) {
fn(obj[i], i);
}
}
});
require.register("component-event/index.js", function(exports, require, module){
/**
* Bind `el` event `type` to `fn`.
*
* @param {Element} el
* @param {String} type
* @param {Function} fn
* @param {Boolean} capture
* @return {Function}
* @api public
*/
exports.bind = function(el, type, fn, capture){
if (el.addEventListener) {
el.addEventListener(type, fn, capture || false);
} else {
el.attachEvent('on' + type, fn);
}
return fn;
};
/**
* Unbind `el` event `type`'s callback `fn`.
*
* @param {Element} el
* @param {String} type
* @param {Function} fn
* @param {Boolean} capture
* @return {Function}
* @api public
*/
exports.unbind = function(el, type, fn, capture){
if (el.removeEventListener) {
el.removeEventListener(type, fn, capture || false);
} else {
el.detachEvent('on' + type, fn);
}
return fn;
};
});
require.register("component-inherit/index.js", function(exports, require, module){
module.exports = function(a, b){
var fn = function(){};
fn.prototype = b.prototype;
a.prototype = new fn;
a.prototype.constructor = a;
};
});
require.register("component-object/index.js", function(exports, require, module){
/**
* HOP ref.
*/
var has = Object.prototype.hasOwnProperty;
/**
* Return own keys in `obj`.
*
* @param {Object} obj
* @return {Array}
* @api public
*/
exports.keys = Object.keys || function(obj){
var keys = [];
for (var key in obj) {
if (has.call(obj, key)) {
keys.push(key);
}
}
return keys;
};
/**
* Return own values in `obj`.
*
* @param {Object} obj
* @return {Array}
* @api public
*/
exports.values = function(obj){
var vals = [];
for (var key in obj) {
if (has.call(obj, key)) {
vals.push(obj[key]);
}
}
return vals;
};
/**
* Merge `b` into `a`.
*
* @param {Object} a
* @param {Object} b
* @return {Object} a
* @api public
*/
exports.merge = function(a, b){
for (var key in b) {
if (has.call(b, key)) {
a[key] = b[key];
}
}
return a;
};
/**
* Return length of `obj`.
*
* @param {Object} obj
* @return {Number}
* @api public
*/
exports.length = function(obj){
return exports.keys(obj).length;
};
/**
* Check if `obj` is empty.
*
* @param {Object} obj
* @return {Boolean}
* @api public
*/
exports.isEmpty = function(obj){
return 0 == exports.length(obj);
};
});
require.register("component-trim/index.js", function(exports, require, module){
exports = module.exports = trim;
function trim(str){
return str.replace(/^\s*|\s*$/g, '');
}
exports.left = function(str){
return str.replace(/^\s*/, '');
};
exports.right = function(str){
return str.replace(/\s*$/, '');
};
});
require.register("component-querystring/index.js", function(exports, require, module){
/**
* Module dependencies.
*/
var trim = require('trim');
/**
* Parse the given query `str`.
*
* @param {String} str
* @return {Object}
* @api public
*/
exports.parse = function(str){
if ('string' != typeof str) return {};
str = trim(str);
if ('' == str) return {};
var obj = {};
var pairs = str.split('&');
for (var i = 0; i < pairs.length; i++) {
var parts = pairs[i].split('=');
obj[parts[0]] = null == parts[1]
? ''
: decodeURIComponent(parts[1]);
}
return obj;
};
/**
* Stringify the given `obj`.
*
* @param {Object} obj
* @return {String}
* @api public
*/
exports.stringify = function(obj){
if (!obj) return '';
var pairs = [];
for (var key in obj) {
pairs.push(encodeURIComponent(key) + '=' + encodeURIComponent(obj[key]));
}
return pairs.join('&');
};
});
require.register("component-type/index.js", function(exports, require, module){
/**
* toString ref.
*/
var toString = Object.prototype.toString;
/**
* Return the type of `val`.
*
* @param {Mixed} val
* @return {String}
* @api public
*/
module.exports = function(val){
switch (toString.call(val)) {
case '[object Function]': return 'function';
case '[object Date]': return 'date';
case '[object RegExp]': return 'regexp';
case '[object Arguments]': return 'arguments';
case '[object Array]': return 'array';
case '[object String]': return 'string';
}
if (val === null) return 'null';
if (val === undefined) return 'undefined';
if (val && val.nodeType === 1) return 'element';
if (val === Object(val)) return 'object';
return typeof val;
};
});
require.register("component-url/index.js", function(exports, require, module){
/**
* Parse the given `url`.
*
* @param {String} str
* @return {Object}
* @api public
*/
exports.parse = function(url){
var a = document.createElement('a');
a.href = url;
return {
href: a.href,
host: a.host || location.host,
port: ('0' === a.port || '' === a.port) ? location.port : a.port,
hash: a.hash,
hostname: a.hostname || location.hostname,
pathname: a.pathname.charAt(0) != '/' ? '/' + a.pathname : a.pathname,
protocol: !a.protocol || ':' == a.protocol ? location.protocol : a.protocol,
search: a.search,
query: a.search.slice(1)
};
};
/**
* Check if `url` is absolute.
*
* @param {String} url
* @return {Boolean}
* @api public
*/
exports.isAbsolute = function(url){
return 0 == url.indexOf('//') || !!~url.indexOf('://');
};
/**
* Check if `url` is relative.
*
* @param {String} url
* @return {Boolean}
* @api public
*/
exports.isRelative = function(url){
return !exports.isAbsolute(url);
};
/**
* Check if `url` is cross domain.
*
* @param {String} url
* @return {Boolean}
* @api public
*/
exports.isCrossDomain = function(url){
url = exports.parse(url);
return url.hostname !== location.hostname
|| url.port !== location.port
|| url.protocol !== location.protocol;
};
});
require.register("segmentio-after/index.js", function(exports, require, module){
module.exports = function after (times, func) {
// After 0, really?
if (times <= 0) return func();
// That's more like it.
return function() {
if (--times < 1) {
return func.apply(this, arguments);
}
};
};
});
require.register("segmentio-alias/index.js", function(exports, require, module){
module.exports = function alias (object, aliases) {
// For each of our aliases, rename our object's keys.
for (var oldKey in aliases) {
var newKey = aliases[oldKey];
if (object[oldKey] !== undefined) {
object[newKey] = object[oldKey];
delete object[oldKey];
}
}
};
});
require.register("component-bind/index.js", function(exports, require, module){
/**
* Slice reference.
*/
var slice = [].slice;
/**
* Bind `obj` to `fn`.
*
* @param {Object} obj
* @param {Function|String} fn or string
* @return {Function}
* @api public
*/
module.exports = function(obj, fn){
if ('string' == typeof fn) fn = obj[fn];
if ('function' != typeof fn) throw new Error('bind() requires a function');
var args = [].slice.call(arguments, 2);
return function(){
return fn.apply(obj, args.concat(slice.call(arguments)));
}
};
});
require.register("segmentio-bind-all/index.js", function(exports, require, module){
var bind = require('bind')
, type = require('type');
module.exports = function (obj) {
for (var key in obj) {
var val = obj[key];
if (type(val) === 'function') obj[key] = bind(obj, obj[key]);
}
return obj;
};
});
require.register("segmentio-canonical/index.js", function(exports, require, module){
module.exports = function canonical () {
var tags = document.getElementsByTagName('link');
for (var i = 0, tag; tag = tags[i]; i++) {
if ('canonical' == tag.getAttribute('rel')) return tag.getAttribute('href');
}
};
});
require.register("segmentio-extend/index.js", function(exports, require, module){
module.exports = function extend (object) {
// Takes an unlimited number of extenders.
var args = Array.prototype.slice.call(arguments, 1);
// For each extender, copy their properties on our object.
for (var i = 0, source; source = args[i]; i++) {
if (!source) continue;
for (var property in source) {
object[property] = source[property];
}
}
return object;
};
});
require.register("segmentio-is-email/index.js", function(exports, require, module){
module.exports = function isEmail (string) {
return (/.+\@.+\..+/).test(string);
};
});
require.register("segmentio-is-meta/index.js", function(exports, require, module){
module.exports = function isMeta (e) {
if (e.metaKey || e.altKey || e.ctrlKey || e.shiftKey) return true;
// Logic that handles checks for the middle mouse button, based
// on [jQuery](https://github.com/jquery/jquery/blob/master/src/event.js#L466).
var which = e.which, button = e.button;
if (!which && button !== undefined) {
return (!button & 1) && (!button & 2) && (button & 4);
} else if (which === 2) {
return true;
}
return false;
};
});
require.register("component-json-fallback/index.js", function(exports, require, module){
/*
json2.js
2011-10-19
Public Domain.
NO WARRANTY EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
See http://www.JSON.org/js.html
This code should be minified before deployment.
See http://javascript.crockford.com/jsmin.html
USE YOUR OWN COPY. IT IS EXTREMELY UNWISE TO LOAD CODE FROM SERVERS YOU DO
NOT CONTROL.
This file creates a global JSON object containing two methods: stringify
and parse.
JSON.stringify(value, replacer, space)
value any JavaScript value, usually an object or array.
replacer an optional parameter that determines how object
values are stringified for objects. It can be a
function or an array of strings.
space an optional parameter that specifies the indentation
of nested structures. If it is omitted, the text will
be packed without extra whitespace. If it is a number,
it will specify the number of spaces to indent at each
level. If it is a string (such as '\t' or '&nbsp;'),
it contains the characters used to indent at each level.
This method produces a JSON text from a JavaScript value.
When an object value is found, if the object contains a toJSON
method, its toJSON method will be called and the result will be
stringified. A toJSON method does not serialize: it returns the
value represented by the name/value pair that should be serialized,
or undefined if nothing should be serialized. The toJSON method
will be passed the key associated with the value, and this will be
bound to the value
For example, this would serialize Dates as ISO strings.
Date.prototype.toJSON = function (key) {
function f(n) {
// Format integers to have at least two digits.
return n < 10 ? '0' + n : n;
}
return this.getUTCFullYear() + '-' +
f(this.getUTCMonth() + 1) + '-' +
f(this.getUTCDate()) + 'T' +
f(this.getUTCHours()) + ':' +
f(this.getUTCMinutes()) + ':' +
f(this.getUTCSeconds()) + 'Z';
};
You can provide an optional replacer method. It will be passed the
key and value of each member, with this bound to the containing
object. The value that is returned from your method will be
serialized. If your method returns undefined, then the member will
be excluded from the serialization.
If the replacer parameter is an array of strings, then it will be
used to select the members to be serialized. It filters the results
such that only members with keys listed in the replacer array are
stringified.
Values that do not have JSON representations, such as undefined or
functions, will not be serialized. Such values in objects will be
dropped; in arrays they will be replaced with null. You can use
a replacer function to replace those with JSON values.
JSON.stringify(undefined) returns undefined.
The optional space parameter produces a stringification of the
value that is filled with line breaks and indentation to make it
easier to read.
If the space parameter is a non-empty string, then that string will
be used for indentation. If the space parameter is a number, then
the indentation will be that many spaces.
Example:
text = JSON.stringify(['e', {pluribus: 'unum'}]);
// text is '["e",{"pluribus":"unum"}]'
text = JSON.stringify(['e', {pluribus: 'unum'}], null, '\t');
// text is '[\n\t"e",\n\t{\n\t\t"pluribus": "unum"\n\t}\n]'
text = JSON.stringify([new Date()], function (key, value) {
return this[key] instanceof Date ?
'Date(' + this[key] + ')' : value;
});
// text is '["Date(---current time---)"]'
JSON.parse(text, reviver)
This method parses a JSON text to produce an object or array.
It can throw a SyntaxError exception.
The optional reviver parameter is a function that can filter and
transform the results. It receives each of the keys and values,
and its return value is used instead of the original value.
If it returns what it received, then the structure is not modified.
If it returns undefined then the member is deleted.
Example:
// Parse the text. Values that look like ISO date strings will
// be converted to Date objects.
myData = JSON.parse(text, function (key, value) {
var a;
if (typeof value === 'string') {
a =
/^(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2}(?:\.\d*)?)Z$/.exec(value);
if (a) {
return new Date(Date.UTC(+a[1], +a[2] - 1, +a[3], +a[4],
+a[5], +a[6]));
}
}
return value;
});
myData = JSON.parse('["Date(09/09/2001)"]', function (key, value) {
var d;
if (typeof value === 'string' &&
value.slice(0, 5) === 'Date(' &&
value.slice(-1) === ')') {
d = new Date(value.slice(5, -1));
if (d) {
return d;
}
}
return value;
});
This is a reference implementation. You are free to copy, modify, or
redistribute.
*/
/*jslint evil: true, regexp: true */
/*members "", "\b", "\t", "\n", "\f", "\r", "\"", JSON, "\\", apply,
call, charCodeAt, getUTCDate, getUTCFullYear, getUTCHours,
getUTCMinutes, getUTCMonth, getUTCSeconds, hasOwnProperty, join,
lastIndex, length, parse, prototype, push, replace, slice, stringify,
test, toJSON, toString, valueOf
*/
// Create a JSON object only if one does not already exist. We create the
// methods in a closure to avoid creating global variables.
var JSON = {};
(function () {
'use strict';
function f(n) {
// Format integers to have at least two digits.
return n < 10 ? '0' + n : n;
}
if (typeof Date.prototype.toJSON !== 'function') {
Date.prototype.toJSON = function (key) {
return isFinite(this.valueOf())
? this.getUTCFullYear() + '-' +
f(this.getUTCMonth() + 1) + '-' +
f(this.getUTCDate()) + 'T' +
f(this.getUTCHours()) + ':' +
f(this.getUTCMinutes()) + ':' +
f(this.getUTCSeconds()) + 'Z'
: null;
};
String.prototype.toJSON =
Number.prototype.toJSON =
Boolean.prototype.toJSON = function (key) {
return this.valueOf();
};
}
var cx = /[\u0000\u00ad\u0600-\u0604\u070f\u17b4\u17b5\u200c-\u200f\u2028-\u202f\u2060-\u206f\ufeff\ufff0-\uffff]/g,
escapable = /[\\\"\x00-\x1f\x7f-\x9f\u00ad\u0600-\u0604\u070f\u17b4\u17b5\u200c-\u200f\u2028-\u202f\u2060-\u206f\ufeff\ufff0-\uffff]/g,
gap,
indent,
meta = { // table of character substitutions
'\b': '\\b',
'\t': '\\t',
'\n': '\\n',
'\f': '\\f',
'\r': '\\r',
'"' : '\\"',
'\\': '\\\\'
},
rep;
function quote(string) {
// If the string contains no control characters, no quote characters, and no
// backslash characters, then we can safely slap some quotes around it.
// Otherwise we must also replace the offending characters with safe escape
// sequences.
escapable.lastIndex = 0;
return escapable.test(string) ? '"' + string.replace(escapable, function (a) {
var c = meta[a];
return typeof c === 'string'
? c
: '\\u' + ('0000' + a.charCodeAt(0).toString(16)).slice(-4);
}) + '"' : '"' + string + '"';
}
function str(key, holder) {
// Produce a string from holder[key].
var i, // The loop counter.
k, // The member key.
v, // The member value.
length,
mind = gap,
partial,
value = holder[key];
// If the value has a toJSON method, call it to obtain a replacement value.
if (value && typeof value === 'object' &&
typeof value.toJSON === 'function') {
value = value.toJSON(key);
}
// If we were called with a replacer function, then call the replacer to
// obtain a replacement value.
if (typeof rep === 'function') {
value = rep.call(holder, key, value);
}
// What happens next depends on the value's type.
switch (typeof value) {
case 'string':
return quote(value);
case 'number':
// JSON numbers must be finite. Encode non-finite numbers as null.
return isFinite(value) ? String(value) : 'null';
case 'boolean':
case 'null':
// If the value is a boolean or null, convert it to a string. Note:
// typeof null does not produce 'null'. The case is included here in
// the remote chance that this gets fixed someday.
return String(value);
// If the type is 'object', we might be dealing with an object or an array or
// null.
case 'object':
// Due to a specification blunder in ECMAScript, typeof null is 'object',
// so watch out for that case.
if (!value) {
return 'null';
}
// Make an array to hold the partial results of stringifying this object value.
gap += indent;
partial = [];
// Is the value an array?
if (Object.prototype.toString.apply(value) === '[object Array]') {
// The value is an array. Stringify every element. Use null as a placeholder
// for non-JSON values.
length = value.length;
for (i = 0; i < length; i += 1) {
partial[i] = str(i, value) || 'null';
}
// Join all of the elements together, separated with commas, and wrap them in
// brackets.
v = partial.length === 0
? '[]'
: gap
? '[\n' + gap + partial.join(',\n' + gap) + '\n' + mind + ']'
: '[' + partial.join(',') + ']';
gap = mind;
return v;
}
// If the replacer is an array, use it to select the members to be stringified.
if (rep && typeof rep === 'object') {
length = rep.length;
for (i = 0; i < length; i += 1) {
if (typeof rep[i] === 'string') {
k = rep[i];
v = str(k, value);
if (v) {
partial.push(quote(k) + (gap ? ': ' : ':') + v);
}
}
}
} else {
// Otherwise, iterate through all of the keys in the object.
for (k in value) {
if (Object.prototype.hasOwnProperty.call(value, k)) {
v = str(k, value);
if (v) {
partial.push(quote(k) + (gap ? ': ' : ':') + v);
}
}
}
}
// Join all of the member texts together, separated with commas,
// and wrap them in braces.
v = partial.length === 0
? '{}'
: gap
? '{\n' + gap + partial.join(',\n' + gap) + '\n' + mind + '}'
: '{' + partial.join(',') + '}';
gap = mind;
return v;
}
}
// If the JSON object does not yet have a stringify method, give it one.
if (typeof JSON.stringify !== 'function') {
JSON.stringify = function (value, replacer, space) {
// The stringify method takes a value and an optional replacer, and an optional
// space parameter, and returns a JSON text. The replacer can be a function
// that can replace values, or an array of strings that will select the keys.
// A default replacer method can be provided. Use of the space parameter can
// produce text that is more easily readable.
var i;
gap = '';
indent = '';
// If the space parameter is a number, make an indent string containing that
// many spaces.
if (typeof space === 'number') {
for (i = 0; i < space; i += 1) {
indent += ' ';
}
// If the space parameter is a string, it will be used as the indent string.
} else if (typeof space === 'string') {
indent = space;
}
// If there is a replacer, it must be a function or an array.
// Otherwise, throw an error.
rep = replacer;
if (replacer && typeof replacer !== 'function' &&
(typeof replacer !== 'object' ||
typeof replacer.length !== 'number')) {
throw new Error('JSON.stringify');
}
// Make a fake root object containing our value under the key of ''.
// Return the result of stringifying the value.
return str('', {'': value});
};
}
// If the JSON object does not yet have a parse method, give it one.
if (typeof JSON.parse !== 'function') {
JSON.parse = function (text, reviver) {
// The parse method takes a text and an optional reviver function, and returns
// a JavaScript value if the text is a valid JSON text.
var j;
function walk(holder, key) {
// The walk method is used to recursively walk the resulting structure so
// that modifications can be made.
var k, v, value = holder[key];
if (value && typeof value === 'object') {
for (k in value) {
if (Object.prototype.hasOwnProperty.call(value, k)) {
v = walk(value, k);
if (v !== undefined) {
value[k] = v;
} else {
delete value[k];
}
}
}
}
return reviver.call(holder, key, value);
}
// Parsing happens in four stages. In the first stage, we replace certain
// Unicode characters with escape sequences. JavaScript handles many characters
// incorrectly, either silently deleting them, or treating them as line endings.
text = String(text);
cx.lastIndex = 0;
if (cx.test(text)) {
text = text.replace(cx, function (a) {
return '\\u' +
('0000' + a.charCodeAt(0).toString(16)).slice(-4);
});
}
// In the second stage, we run the text against regular expressions that look
// for non-JSON patterns. We are especially concerned with '()' and 'new'
// because they can cause invocation, and '=' because it can cause mutation.
// But just to be safe, we want to reject all unexpected forms.
// We split the second stage into 4 regexp operations in order to work around
// crippling inefficiencies in IE's and Safari's regexp engines. First we
// replace the JSON backslash pairs with '@' (a non-JSON character). Second, we
// replace all simple value tokens with ']' characters. Third, we delete all
// open brackets that follow a colon or comma or that begin the text. Finally,
// we look to see that the remaining characters are only whitespace or ']' or
// ',' or ':' or '{' or '}'. If that is so, then the text is safe for eval.
if (/^[\],:{}\s]*$/
.test(text.replace(/\\(?:["\\\/bfnrt]|u[0-9a-fA-F]{4})/g, '@')
.replace(/"[^"\\\n\r]*"|true|false|null|-?\d+(?:\.\d*)?(?:[eE][+\-]?\d+)?/g, ']')
.replace(/(?:^|:|,)(?:\s*\[)+/g, ''))) {
// In the third stage we use the eval function to compile the text into a
// JavaScript structure. The '{' operator is subject to a syntactic ambiguity
// in JavaScript: it can begin a block or an object literal. We wrap the text
// in parens to eliminate the ambiguity.
j = eval('(' + text + ')');
// In the optional fourth stage, we recursively walk the new structure, passing
// each name/value pair to a reviver function for possible transformation.
return typeof reviver === 'function'
? walk({'': j}, '')
: j;
}
// If the text is not JSON parseable, then a SyntaxError is thrown.
throw new SyntaxError('JSON.parse');
};
}
}());
module.exports = JSON
});
require.register("segmentio-json/index.js", function(exports, require, module){
module.exports = 'undefined' == typeof JSON
? require('json-fallback')
: JSON;
});
require.register("segmentio-load-date/index.js", function(exports, require, module){
/*
* Load date.
*
* For reference: http://www.html5rocks.com/en/tutorials/webperformance/basics/
*/
var time = new Date()
, perf = window.performance;
if (perf && perf.timing && perf.timing.responseEnd) {
time = new Date(perf.timing.responseEnd);
}
module.exports = time;
});
require.register("segmentio-load-script/index.js", function(exports, require, module){
var type = require('type');
module.exports = function loadScript (options, callback) {
if (!options) throw new Error('Cant load nothing...');
// Allow for the simplest case, just passing a `src` string.
if (type(options) === 'string') options = { src : options };
var https = document.location.protocol === 'https:';
// If you use protocol relative URLs, third-party scripts like Google
// Analytics break when testing with `file:` so this fixes that.
if (options.src && options.src.indexOf('//') === 0) {
options.src = https ? 'https:' + options.src : 'http:' + options.src;
}
// Allow them to pass in different URLs depending on the protocol.
if (https && options.https) options.src = options.https;
else if (!https && options.http) options.src = options.http;
// Make the `<script>` element and insert it before the first script on the
// page, which is guaranteed to exist since this Javascript is running.
var script = document.createElement('script');
script.type = 'text/javascript';
script.async = true;
script.src = options.src;
var firstScript = document.getElementsByTagName('script')[0];
firstScript.parentNode.insertBefore(script, firstScript);
// If we have a callback, attach event handlers, even in IE. Based off of
// the Third-Party Javascript script loading example:
// https://github.com/thirdpartyjs/thirdpartyjs-code/blob/master/examples/templates/02/loading-files/index.html
if (callback && type(callback) === 'function') {
if (script.addEventListener) {
script.addEventListener('load', callback, false);
} else if (script.attachEvent) {
script.attachEvent('onreadystatechange', function () {
if (/complete|loaded/.test(script.readyState)) callback();
});
}
}
// Return the script element in case they want to do anything special, like
// give it an ID or attributes.
return script;
};
});
require.register("segmentio-new-date/index.js", function(exports, require, module){
var type = require('type');
/**
* Returns a new Javascript Date object, allowing a variety of extra input types
* over the native one.
*
* @param {Date|String|Number} input
*/
module.exports = function newDate (input) {
// Convert input from seconds to milliseconds.
input = toMilliseconds(input);
// By default, delegate to Date, which will return `Invalid Date`s if wrong.
var date = new Date(input);
// If we have a string that the Date constructor couldn't parse, convert it.
if (isNaN(date.getTime()) && 'string' === type(input)) {
var milliseconds = toMilliseconds(parseInt(input, 10));
date = new Date(milliseconds);
}
return date;
};
/**
* If the number passed in is seconds from the epoch, turn it into milliseconds.
* Milliseconds would be greater than 31557600000 (December 31, 1970).
*
* @param seconds
*/
function toMilliseconds (seconds) {
if ('number' === type(seconds) && seconds < 31557600000) return seconds * 1000;
return seconds;
}
});
require.register("segmentio-on-body/index.js", function(exports, require, module){
var each = require('each');
/**
* Cache whether `<body>` exists.
*/
var body = false;
/**
* Callbacks to call when the body exists.
*/
var callbacks = [];
/**
* Export a way to add handlers to be invoked once the body exists.
*
* @param {Function} callback A function to call when the body exists.
*/
module.exports = function onBody (callback) {
if (body) {
call(callback);
} else {
callbacks.push(callback);
}
};
/**
* Set an interval to check for `document.body`.
*/
var interval = setInterval(function () {
if (!document.body) return;
body = true;
each(callbacks, call);
clearInterval(interval);
}, 5);
/**
* Call a callback, passing it the body.
*
* @param {Function} callback The callback to call.
*/
function call (callback) {
callback(document.body);
}
});
require.register("segmentio-store.js/store.js", function(exports, require, module){
var json = require('json')
, store = {}
, win = window
, doc = win.document
, localStorageName = 'localStorage'
, namespace = '__storejs__'
, storage;
store.disabled = false
store.set = function(key, value) {}
store.get = function(key) {}
store.remove = function(key) {}
store.clear = function() {}
store.transact = function(key, defaultVal, transactionFn) {
var val = store.get(key)
if (transactionFn == null) {
transactionFn = defaultVal
defaultVal = null
}
if (typeof val == 'undefined') { val = defaultVal || {} }
transactionFn(val)
store.set(key, val)
}
store.getAll = function() {}
store.serialize = function(value) {
return json.stringify(value)
}
store.deserialize = function(value) {
if (typeof value != 'string') { return undefined }
try { return json.parse(value) }
catch(e) { return value || undefined }
}
// Functions to encapsulate questionable FireFox 3.6.13 behavior
// when about.config::dom.storage.enabled === false
// See https://github.com/marcuswestin/store.js/issues#issue/13
function isLocalStorageNameSupported() {
try { return (localStorageName in win && win[localStorageName]) }
catch(err) { return false }
}
if (isLocalStorageNameSupported()) {
storage = win[localStorageName]
store.set = function(key, val) {
if (val === undefined) { return store.remove(key) }
storage.setItem(key, store.serialize(val))
return val
}
store.get = function(key) { return store.deserialize(storage.getItem(key)) }
store.remove = function(key) { storage.removeItem(key) }
store.clear = function() { storage.clear() }
store.getAll = function() {
var ret = {}
for (var i=0; i<storage.length; ++i) {
var key = storage.key(i)
ret[key] = store.get(key)
}
return ret
}
} else if (doc.documentElement.addBehavior) {
var storageOwner,
storageContainer
// Since #userData storage applies only to specific paths, we need to
// somehow link our data to a specific path. We choose /favicon.ico
// as a pretty safe option, since all browsers already make a request to
// this URL anyway and being a 404 will not hurt us here. We wrap an
// iframe pointing to the favicon in an ActiveXObject(htmlfile) object
// (see: http://msdn.microsoft.com/en-us/library/aa752574(v=VS.85).aspx)
// since the iframe access rules appear to allow direct access and
// manipulation of the document element, even for a 404 page. This
// document can be used instead of the current document (which would
// have been limited to the current path) to perform #userData storage.
try {
storageContainer = new ActiveXObject('htmlfile')
storageContainer.open()
storageContainer.write('<s' + 'cript>document.w=window</s' + 'cript><iframe src="/favicon.ico"></iframe>')
storageContainer.close()
storageOwner = storageContainer.w.frames[0].document
storage = storageOwner.createElement('div')
} catch(e) {
// somehow ActiveXObject instantiation failed (perhaps some special
// security settings or otherwse), fall back to per-path storage
storage = doc.createElement('div')
storageOwner = doc.body
}
function withIEStorage(storeFunction) {
return function() {
var args = Array.prototype.slice.call(arguments, 0)
args.unshift(storage)
// See http://msdn.microsoft.com/en-us/library/ms531081(v=VS.85).aspx
// and http://msdn.microsoft.com/en-us/library/ms531424(v=VS.85).aspx
storageOwner.appendChild(storage)
storage.addBehavior('#default#userData')
storage.load(localStorageName)
var result = storeFunction.apply(store, args)
storageOwner.removeChild(storage)
return result
}
}
// In IE7, keys may not contain special chars. See all of https://github.com/marcuswestin/store.js/issues/40
var forbiddenCharsRegex = new RegExp("[!\"#$%&'()*+,/\\\\:;<=>?@[\\]^`{|}~]", "g")
function ieKeyFix(key) {
return key.replace(forbiddenCharsRegex, '___')
}
store.set = withIEStorage(function(storage, key, val) {
key = ieKeyFix(key)
if (val === undefined) { return store.remove(key) }
storage.setAttribute(key, store.serialize(val))
storage.save(localStorageName)
return val
})
store.get = withIEStorage(function(storage, key) {
key = ieKeyFix(key)
return store.deserialize(storage.getAttribute(key))
})
store.remove = withIEStorage(function(storage, key) {
key = ieKeyFix(key)
storage.removeAttribute(key)
storage.save(localStorageName)
})
store.clear = withIEStorage(function(storage) {
var attributes = storage.XMLDocument.documentElement.attributes
storage.load(localStorageName)
for (var i=0, attr; attr=attributes[i]; i++) {
storage.removeAttribute(attr.name)
}
storage.save(localStorageName)
})
store.getAll = withIEStorage(function(storage) {
var attributes = storage.XMLDocument.documentElement.attributes
var ret = {}
for (var i=0, attr; attr=attributes[i]; ++i) {
var key = ieKeyFix(attr.name)
ret[attr.name] = store.deserialize(storage.getAttribute(key))
}
return ret
})
}
try {
store.set(namespace, namespace)
if (store.get(namespace) != namespace) { store.disabled = true }
store.remove(namespace)
} catch(e) {
store.disabled = true
}
store.enabled = !store.disabled
module.exports = store;
});
require.register("segmentio-top-domain/index.js", function(exports, require, module){
var url = require('url');
// Official Grammar: http://tools.ietf.org/html/rfc883#page-56
// Look for tlds with up to 2-6 characters.
module.exports = function (urlStr) {
var host = url.parse(urlStr).hostname
, topLevel = host.match(/[a-z0-9][a-z0-9\-]*[a-z0-9]\.[a-z\.]{2,6}$/i);
return topLevel ? topLevel[0] : host;
};
});
require.register("timoxley-next-tick/index.js", function(exports, require, module){
"use strict"
if (typeof setImmediate == 'function') {
module.exports = function(f){ setImmediate(f) }
}
// legacy node.js
else if (typeof process != 'undefined' && typeof process.nextTick == 'function') {
module.exports = process.nextTick
}
// fallback for other environments / postMessage behaves badly on IE8
else if (typeof window == 'undefined' || window.ActiveXObject || !window.postMessage) {
module.exports = function(f){ setTimeout(f) };
} else {
var q = [];
window.addEventListener('message', function(){
var i = 0;
while (i < q.length) {
try { q[i++](); }
catch (e) {
q = q.slice(i);
window.postMessage('tic!', '*');
throw e;
}
}
q.length = 0;
}, true);
module.exports = function(fn){
if (!q.length) window.postMessage('tic!', '*');
q.push(fn);
}
}
});
require.register("yields-prevent/index.js", function(exports, require, module){
/**
* prevent default on the given `e`.
*
* examples:
*
* anchor.onclick = prevent;
* anchor.onclick = function(e){
* if (something) return prevent(e);
* };
*
* @param {Event} e
*/
module.exports = function(e){
e = e || window.event
return e.preventDefault
? e.preventDefault()
: e.returnValue = false;
};
});
require.register("analytics/src/index.js", function(exports, require, module){
// Analytics.js
//
// (c) 2013 Segment.io Inc.
// Analytics.js may be freely distributed under the MIT license.
var Analytics = require('./analytics')
, providers = require('./providers');
module.exports = new Analytics(providers);
});
require.register("analytics/src/analytics.js", function(exports, require, module){
var after = require('after')
, bind = require('event').bind
, clone = require('clone')
, cookie = require('./cookie')
, each = require('each')
, extend = require('extend')
, isEmail = require('is-email')
, isMeta = require('is-meta')
, localStore = require('./localStore')
, newDate = require('new-date')
, size = require('object').length
, preventDefault = require('prevent')
, Provider = require('./provider')
, providers = require('./providers')
, querystring = require('querystring')
, type = require('type')
, url = require('url')
, user = require('./user')
, utils = require('./utils');
module.exports = Analytics;
/**
* Analytics.
*
* @param {Object} Providers - Provider classes that the user can initialize.
*/
function Analytics (Providers) {
var self = this;
this.VERSION = '0.11.9';
each(Providers, function (Provider) {
self.addProvider(Provider);
});
// Wrap `onload` with our own that will cache the loaded state of the page.
var oldonload = window.onload;
window.onload = function () {
self.loaded = true;
if ('function' === type(oldonload)) oldonload();
};
}
/**
* Extend the Analytics prototype.
*/
extend(Analytics.prototype, {
// Whether `onload` has fired.
loaded : false,
// Whether `analytics` has been initialized.
initialized : false,
// Whether all of our analytics providers are ready to accept calls. Give it a
// real jank name since we already use `analytics.ready` for the method.
readied : false,
// A queue for ready callbacks to run when our `readied` state becomes `true`.
callbacks : [],
// Milliseconds to wait for requests to clear before leaving the current page.
timeout : 300,
// A reference to the current user object.
user : user,
// The default Provider.
Provider : Provider,
// Providers that can be initialized. Add using `this.addProvider`.
_providers : {},
// The currently initialized providers.
providers : [],
/**
* Add a provider to `_providers` to be initialized later.
*
* @param {String} name - The name of the provider.
* @param {Function} Provider - The provider's class.
*/
addProvider : function (Provider) {
this._providers[Provider.prototype.name] = Provider;
},
/**
* Initialize
*
* Call `initialize` to setup analytics.js before identifying or
* tracking any users or events. For example:
*
* analytics.initialize({
* 'Google Analytics' : 'UA-XXXXXXX-X',
* 'Segment.io' : 'XXXXXXXXXXX',
* 'KISSmetrics' : 'XXXXXXXXXXX'
* });
*
* @param {Object} providers - a dictionary of the providers you want to
* enable. The keys are the names of the providers and their values are either
* an api key, or dictionary of extra settings (including the api key).
*
* @param {Object} options (optional) - extra settings to initialize with.
*/
initialize : function (providers, options) {
options || (options = {});
var self = this;
// Reset our state.
this.providers = [];
this.initialized = false;
this.readied = false;
// Set the storage options
cookie.options(options.cookie);
localStore.options(options.localStorage);
// Set the options for loading and saving the user
user.options(options.user);
user.load();
// Create a ready method that will call all of our ready callbacks after all
// of our providers have been initialized and loaded. We'll pass the
// function into each provider's initialize method, so they can callback
// after they've loaded successfully.
var ready = after(size(providers), function () {
self.readied = true;
var callback;
while(callback = self.callbacks.shift()) {
callback();
}
});
// Initialize a new instance of each provider with their `options`, and
// copy the provider into `this.providers`.
each(providers, function (key, options) {
var Provider = self._providers[key];
if (!Provider) return;
self.providers.push(new Provider(options, ready, self));
});
// Identify and track any `ajs_uid` and `ajs_event` parameters in the URL.
var query = url.parse(window.location.href).query;
var queries = querystring.parse(query);
if (queries.ajs_uid) this.identify(queries.ajs_uid);
if (queries.ajs_event) this.track(queries.ajs_event);
// Update the initialized state that other methods rely on.
this.initialized = true;
},
/**
* Ready
*
* Add a callback that will get called when all of the analytics services you
* initialize are ready to be called. It's like jQuery's `ready` except for
* analytics instead of the DOM.
*
* If we're already ready, it will callback immediately.
*
* @param {Function} callback - The callback to attach.
*/
ready : function (callback) {
if (type(callback) !== 'function') return;
if (this.readied) return callback();
this.callbacks.push(callback);
},
/**
* Identify
*
* Identifying a user ties all of their actions to an ID you recognize
* and records properties about a user. For example:
*
* analytics.identify('4d3ed089fb60ab534684b7e0', {
* name : 'Achilles',
* email : 'achilles@segment.io',
* age : 23
* });
*
* @param {String} userId (optional) - The ID you recognize the user by.
* Ideally this isn't an email, because that might change in the future.
*
* @param {Object} traits (optional) - A dictionary of traits you know about
* the user. Things like `name`, `age`, etc.
*
* @param {Object} options (optional) - Settings for the identify call.
*
* @param {Function} callback (optional) - A function to call after a small
* timeout, giving the identify call time to make requests.
*/
identify : function (userId, traits, options, callback) {
if (!this.initialized) return;
// Allow for optional arguments.
if (type(options) === 'function') {
callback = options;
options = undefined;
}
if (type(traits) === 'function') {
callback = traits;
traits = undefined;
}
if (type(userId) === 'object') {
if (traits && type(traits) === 'function') callback = traits;
traits = userId;
userId = undefined;
}
// Use our cookied ID if they didn't provide one.
if (userId === undefined || user === null) userId = user.id();
// Update the cookie with the new userId and traits.
var alias = user.update(userId, traits);
// Clone `traits` before we manipulate it, so we don't do anything uncouth
// and take the user.traits() so anonymous users carry over traits.
traits = cleanTraits(userId, clone(user.traits()));
// Call `identify` on all of our enabled providers that support it.
each(this.providers, function (provider) {
if (provider.identify && isEnabled(provider, options)) {
var args = [userId, clone(traits), clone(options)];
if (provider.ready) {
provider.identify.apply(provider, args);
} else {
provider.enqueue('identify', args);
}
}
});
// If we should alias, go ahead and do it.
// if (alias) this.alias(userId);
if (callback && type(callback) === 'function') {
setTimeout(callback, this.timeout);
}
},
/**
* Group
*
* Groups multiple users together under one "account" or "team" or "company".
* Acts on the currently identified user, so you need to call identify before
* calling group. For example:
*
* analytics.identify('4d3ed089fb60ab534684b7e0', {
* name : 'Achilles',
* email : 'achilles@segment.io',
* age : 23
* });
*
* analytics.group('5we93je3889fb60a937dk033', {
* name : 'Acme Co.',
* numberOfEmployees : 42,
* location : 'San Francisco'
* });
*
* @param {String} groupId - The ID you recognize the group by.
*
* @param {Object} properties (optional) - A dictionary of properties you know
* about the group. Things like `numberOfEmployees`, `location`, etc.
*
* @param {Object} options (optional) - Settings for the group call.
*
* @param {Function} callback (optional) - A function to call after a small
* timeout, giving the group call time to make requests.
*/
group : function (groupId, properties, options, callback) {
if (!this.initialized) return;
// Allow for optional arguments.
if (type(options) === 'function') {
callback = options;
options = undefined;
}
if (type(properties) === 'function') {
callback = properties;
properties = undefined;
}
// Clone `properties` before we manipulate it, so we don't do anything bad,
// and back it by an empty object so that providers can assume it exists.
properties = clone(properties) || {};
// Convert dates from more types of input into Date objects.
if (properties.created) properties.created = newDate(properties.created);
// Call `group` on all of our enabled providers that support it.
each(this.providers, function (provider) {
if (provider.group && isEnabled(provider, options)) {
var args = [groupId, clone(properties), clone(options)];
if (provider.ready) {
provider.group.apply(provider, args);
} else {
provider.enqueue('group', args);
}
}
});
// If we have a callback, call it after a small timeout.
if (callback && type(callback) === 'function') {
setTimeout(callback, this.timeout);
}
},
/**
* Track
*
* Record an event (or action) that your user has triggered. For example:
*
* analytics.track('Added a Friend', {
* level : 'hard',
* volume : 11
* });
*
* @param {String} event - The name of your event.
*
* @param {Object} properties (optional) - A dictionary of properties of the
* event. `properties` are all camelCase (we'll automatically conver them to
* the proper case each provider needs).
*
* @param {Object} options (optional) - Settings for the track call.
*
* @param {Function} callback - A function to call after a small
* timeout, giving the identify time to make requests.
*/
track : function (event, properties, options, callback) {
if (!this.initialized) return;
// Allow for optional arguments.
if (type(options) === 'function') {
callback = options;
options = undefined;
}
if (type(properties) === 'function') {
callback = properties;
properties = undefined;
}
// Call `track` on all of our enabled providers that support it.
each(this.providers, function (provider) {
if (provider.track && isEnabled(provider, options)) {
var args = [event, clone(properties), clone(options)];
if (provider.ready) {
provider.track.apply(provider, args);
} else {
provider.enqueue('track', args);
}
}
});
if (callback && type(callback) === 'function') {
setTimeout(callback, this.timeout);
}
},
/**
* Track Link
*
* A helper for tracking outbound links that would normally navigate away from
* the page before the track requests were made. It works by wrapping the
* calls in a short timeout, giving the requests time to fire.
*
* @param {Element|Array} links - The link element or array of link elements
* to bind to. (Allowing arrays makes it easy to pass in jQuery objects.)
*
* @param {String|Function} event - Passed directly to `track`. Or in the case
* that it's a function, it will be called with the link element as the first
* argument.
*
* @param {Object|Function} properties (optional) - Passed directly to
* `track`. Or in the case that it's a function, it will be called with the
* link element as the first argument.
*/
trackLink : function (links, event, properties) {
if (!links) return;
// Turn a single link into an array so that we're always handling
// arrays, which allows for passing jQuery objects.
if ('element' === type(links)) links = [links];
var self = this
, eventFunction = 'function' === type(event)
, propertiesFunction = 'function' === type(properties);
each(links, function (el) {
bind(el, 'click', function (e) {
// Allow for `event` or `properties` to be a function. And pass it the
// link element that was clicked.
var newEvent = eventFunction ? event(el) : event;
var newProperties = propertiesFunction ? properties(el) : properties;
self.track(newEvent, newProperties);
// To justify us preventing the default behavior we must:
//
// * Have an `href` to use.
// * Not have a `target="_blank"` attribute.
// * Not have any special keys pressed, because they might be trying to
// open in a new tab, or window, or download.
//
// This might not cover all cases, but we'd rather throw out an event
// than miss a case that breaks the user experience.
if (el.href && el.target !== '_blank' && !isMeta(e)) {
preventDefault(e);
// Navigate to the url after just enough of a timeout.
setTimeout(function () {
window.location.href = el.href;
}, self.timeout);
}
});
});
},
/**
* Track Form
*
* Similar to `trackClick`, this is a helper for tracking form submissions
* that would normally navigate away from the page before a track request can
* be sent. It works by preventing the default submit event, sending our
* track requests, and then submitting the form programmatically.
*
* @param {Element|Array} forms - The form element or array of form elements
* to bind to. (Allowing arrays makes it easy to pass in jQuery objects.)
*
* @param {String|Function} event - Passed directly to `track`. Or in the case
* that it's a function, it will be called with the form element as the first
* argument.
*
* @param {Object|Function} properties (optional) - Passed directly to
* `track`. Or in the case that it's a function, it will be called with the
* form element as the first argument.
*/
trackForm : function (form, event, properties) {
if (!form) return;
// Turn a single element into an array so that we're always handling arrays,
// which allows for passing jQuery objects.
if ('element' === type(form)) form = [form];
var self = this
, eventFunction = 'function' === type(event)
, propertiesFunction = 'function' === type(properties);
each(form, function (el) {
var handler = function (e) {
// Allow for `event` or `properties` to be a function. And pass it the
// form element that was submitted.
var newEvent = eventFunction ? event(el) : event;
var newProperties = propertiesFunction ? properties(el) : properties;
self.track(newEvent, newProperties);
preventDefault(e);
// Submit the form after a timeout, giving the event time to fire.
setTimeout(function () {
el.submit();
}, self.timeout);
};
// Support the form being submitted via jQuery instead of for real. This
// doesn't happen automatically because `el.submit()` doesn't actually
// fire submit handlers, which is what jQuery uses internally. >_<
var dom = window.jQuery || window.Zepto;
if (dom) {
dom(el).submit(handler);
} else {
bind(el, 'submit', handler);
}
});
},
/**
* Pageview
*
* Simulate a pageview in single-page applications, where real pageviews don't
* occur. This isn't support by all providers.
*
* @param {String} url (optional) - The path of the page (eg. '/login'). Most
* providers will default to the current pages URL, so you don't need this.
*
* @param {Object} options (optional) - Settings for the pageview call.
*
*/
pageview : function (url,options) {
if (!this.initialized) return;
// Call `pageview` on all of our enabled providers that support it.
each(this.providers, function (provider) {
if (provider.pageview && isEnabled(provider, options)) {
var args = [url];
if (provider.ready) {
provider.pageview.apply(provider, args);
} else {
provider.enqueue('pageview', args);
}
}
});
},
/**
* Alias
*
* Merges two previously unassociate user identities. This comes in handy if
* the same user visits from two different devices and you want to combine
* their analytics history.
*
* Some providers don't support merging users.
*
* @param {String} newId - The new ID you want to recognize the user by.
*
* @param {String} originalId (optional) - The original ID that the user was
* recognized by. This defaults to the current identified user's ID if there
* is one. In most cases you don't need to pass in the `originalId`.
*/
alias : function (newId, originalId, options) {
if (!this.initialized) return;
if (type(originalId) === 'object') {
options = originalId;
originalId = undefined;
}
// Call `alias` on all of our enabled providers that support it.
each(this.providers, function (provider) {
if (provider.alias && isEnabled(provider, options)) {
var args = [newId, originalId];
if (provider.ready) {
provider.alias.apply(provider, args);
} else {
provider.enqueue('alias', args);
}
}
});
},
/**
* Log
*
* Log an error to analytics providers that support it, like Sentry.
*
* @param {Error|String} error - The error or string to log.
* @param {Object} properties - Properties about the error.
* @param {Object} options (optional) - Settings for the log call.
*/
log : function (error, properties, options) {
if (!this.initialized) return;
each(this.providers, function (provider) {
if (provider.log && isEnabled(provider, options)) {
var args = [error, properties, options];
if (provider.ready) {
provider.log.apply(provider, args);
} else {
provider.enqueue('log', args);
}
}
});
}
});
/**
* Backwards compatibility.
*/
// Alias `trackClick` and `trackSubmit`.
Analytics.prototype.trackClick = Analytics.prototype.trackLink;
Analytics.prototype.trackSubmit = Analytics.prototype.trackForm;
/**
* Determine whether a provider is enabled or not based on the options object.
*
* @param {Object} provider - the current provider.
* @param {Object} options - the current call's options.
*
* @return {Boolean} - wether the provider is enabled.
*/
var isEnabled = function (provider, options) {
var enabled = true;
if (!options || !options.providers) return enabled;
// Default to the 'all' or 'All' setting.
var map = options.providers;
if (map.all !== undefined) enabled = map.all;
if (map.All !== undefined) enabled = map.All;
// Look for this provider's specific setting.
var name = provider.name;
if (map[name] !== undefined) enabled = map[name];
return enabled;
};
/**
* Clean up traits, default some useful things both so the user doesn't have to
* and so we don't have to do it on a provider-basis.
*
* @param {Object} traits The traits object.
* @return {Object} The new traits object.
*/
var cleanTraits = function (userId, traits) {
// Add the `email` trait if it doesn't exist and the `userId` is an email.
if (!traits.email && isEmail(userId)) traits.email = userId;
// Create the `name` trait if it doesn't exist and `firstName` and `lastName`
// are both supplied.
if (!traits.name && traits.firstName && traits.lastName) {
traits.name = traits.firstName + ' ' + traits.lastName;
}
// Convert dates from more types of input into Date objects.
if (traits.created) traits.created = newDate(traits.created);
if (traits.company && traits.company.created) {
traits.company.created = newDate(traits.company.created);
}
return traits;
};
});
require.register("analytics/src/cookie.js", function(exports, require, module){
var bindAll = require('bind-all')
, cookie = require('cookie')
, clone = require('clone')
, defaults = require('defaults')
, json = require('json')
, topDomain = require('top-domain');
function Cookie (options) {
this.options(options);
}
/**
* Get or set the cookie options
*
* @param {Object} options
* @field {Number} maxage (1 year)
* @field {String} domain
* @field {String} path
* @field {Boolean} secure
*/
Cookie.prototype.options = function (options) {
if (arguments.length === 0) return this._options;
options || (options = {});
var domain = '.' + topDomain(window.location.href);
// localhost cookies are special: http://curl.haxx.se/rfc/cookie_spec.html
if (domain === '.localhost') domain = '';
defaults(options, {
maxage : 31536000000, // default to a year
path : '/',
domain : domain
});
this._options = options;
};
/**
* Set a value in our cookie
*
* @param {String} key
* @param {Object} value
* @return {Boolean} saved
*/
Cookie.prototype.set = function (key, value) {
try {
value = json.stringify(value);
cookie(key, value, clone(this._options));
return true;
} catch (e) {
return false;
}
};
/**
* Get a value from our cookie
* @param {String} key
* @return {Object} value
*/
Cookie.prototype.get = function (key) {
try {
var value = cookie(key);
value = value ? json.parse(value) : null;
return value;
} catch (e) {
return null;
}
};
/**
* Remove a value from the cookie
*
* @param {String} key
* @return {Boolean} removed
*/
Cookie.prototype.remove = function (key) {
try {
cookie(key, null, clone(this._options));
return true;
} catch (e) {
return false;
}
};
/**
* Export singleton cookie
*/
module.exports = bindAll(new Cookie());
module.exports.Cookie = Cookie;
});
require.register("analytics/src/localStore.js", function(exports, require, module){
var bindAll = require('bind-all')
, defaults = require('defaults')
, store = require('store');
function Store (options) {
this.options(options);
}
/**
* Sets the options for the store
*
* @param {Object} options
* @field {Boolean} enabled (true)
*/
Store.prototype.options = function (options) {
if (arguments.length === 0) return this._options;
options || (options = {});
defaults(options, { enabled : true });
this.enabled = options.enabled && store.enabled;
this._options = options;
};
/**
* Sets a value in local storage
*
* @param {String} key
* @param {Object} value
*/
Store.prototype.set = function (key, value) {
if (!this.enabled) return false;
return store.set(key, value);
};
/**
* Gets a value from local storage
*
* @param {String} key
* @return {Object}
*/
Store.prototype.get = function (key) {
if (!this.enabled) return null;
return store.get(key);
};
/**
* Removes a value from local storage
*
* @param {String} key
*/
Store.prototype.remove = function (key) {
if (!this.enabled) return false;
return store.remove(key);
};
/**
* Singleton exports
*/
module.exports = bindAll(new Store());
});
require.register("analytics/src/provider.js", function(exports, require, module){
var each = require('each')
, extend = require('extend')
, type = require('type');
module.exports = Provider;
/**
* Provider
*
* @param {Object} options - settings to initialize the Provider with. This will
* be merged with the Provider's own defaults.
*
* @param {Function} ready - a ready callback, to be called when the provider is
* ready to handle analytics calls.
*/
function Provider (options, ready, analytics) {
var self = this;
// Store the reference to the global `analytics` object.
this.analytics = analytics;
// Make a queue of `{ method : 'identify', args : [] }` to unload once ready.
this.queue = [];
this.ready = false;
// Allow for `options` to only be a string if the provider has specified
// a default `key`, in which case convert `options` into a dictionary. Also
// allow for it to be `true`, like in Optimizely's case where there is no need
// for any default key.
if (type(options) !== 'object') {
if (options === true) {
options = {};
} else if (this.key) {
var key = options;
options = {};
options[this.key] = key;
} else {
throw new Error('Couldnt resolve options.');
}
}
// Extend the passed-in options with our defaults.
this.options = extend({}, this.defaults, options);
// Wrap our ready function, so that it ready from our internal queue first
// and then marks us as ready.
var dequeue = function () {
each(self.queue, function (call) {
var method = call.method
, args = call.args;
self[method].apply(self, args);
});
self.ready = true;
self.queue = [];
ready();
};
// Call our initialize method.
this.initialize.call(this, this.options, dequeue);
}
/**
* Inheritance helper.
*
* Modeled after Backbone's `extend` method:
* https://github.com/documentcloud/backbone/blob/master/backbone.js#L1464
*/
Provider.extend = function (properties) {
var parent = this;
var child = function () { return parent.apply(this, arguments); };
var Surrogate = function () { this.constructor = child; };
Surrogate.prototype = parent.prototype;
child.prototype = new Surrogate();
extend(child.prototype, properties);
return child;
};
/**
* Augment Provider's prototype.
*/
extend(Provider.prototype, {
/**
* Default settings for the provider.
*/
options : {},
/**
* The single required API key for the provider. This lets us support a terse
* initialization syntax:
*
* analytics.initialize({
* 'Provider' : 'XXXXXXX'
* });
*
* Only add this if the provider has a _single_ required key.
*/
key : undefined,
/**
* Initialize our provider.
*
* @param {Object} options - the settings for the provider.
* @param {Function} ready - a ready callback to call when we're ready to
* start accept analytics method calls.
*/
initialize : function (options, ready) {
ready();
},
/**
* Adds an item to the our internal pre-ready queue.
*
* @param {String} method - the analytics method to call (eg. 'track').
* @param {Object} args - the arguments to pass to the method.
*/
enqueue : function (method, args) {
this.queue.push({
method : method,
args : args
});
}
});
});
require.register("analytics/src/user.js", function(exports, require, module){
var bindAll = require('bind-all')
, clone = require('clone')
, cookie = require('./cookie')
, defaults = require('defaults')
, extend = require('extend')
, localStore = require('./localStore');
function User (options) {
this._id = null;
this._traits = {};
this.options(options);
}
/**
* Sets the options for the user
*
* @param {Object} options
* @field {Object} cookie
* @field {Object} localStorage
* @field {Boolean} persist (true)
*/
User.prototype.options = function (options) {
options || (options = {});
defaults(options, {
persist : true
});
this.cookie(options.cookie);
this.localStorage(options.localStorage);
this.persist = options.persist;
};
/**
* Get or set cookie options
*
* @param {Object} options
*/
User.prototype.cookie = function (options) {
if (arguments.length === 0) return this.cookieOptions;
options || (options = {});
defaults(options, {
key : 'ajs_user_id',
oldKey : 'ajs_user'
});
this.cookieOptions = options;
};
/**
* Get or set local storage options
*
* @param {Object} options
*/
User.prototype.localStorage = function (options) {
if (arguments.length === 0) return this.localStorageOptions;
options || (options = {});
defaults(options, {
key : 'ajs_user_traits'
});
this.localStorageOptions = options;
};
/**
* Get or set the user id
*
* @param {String} id
*/
User.prototype.id = function (id) {
if (arguments.length === 0) return this._id;
this._id = id;
};
/**
* Get or set the user traits
*
* @param {Object} traits
*/
User.prototype.traits = function (traits) {
if (arguments.length === 0) return clone(this._traits);
traits || (traits = {});
this._traits = traits;
};
/**
* Updates the current stored user with id and traits.
*
* @param {String} userId - the new user ID.
* @param {Object} traits - any new traits.
* @return {Boolean} whether alias should be called.
*/
User.prototype.update = function (userId, traits) {
// Make an alias call if there was no previous userId, there is one
// now, and we are using a cookie between page loads.
var alias = !this.id() && userId && this.persist;
traits || (traits = {});
// If there is a current user and the new user isn't the same,
// we want to just replace their traits. Otherwise extend.
if (this.id() && userId && this.id() !== userId) this.traits(traits);
else this.traits(extend(this.traits(), traits));
if (userId) this.id(userId);
this.save();
return alias;
};
/**
* Save the user to localstorage and cookie
*
* @return {Boolean} saved
*/
User.prototype.save = function () {
if (!this.persist) return false;
cookie.set(this.cookie().key, this.id());
localStore.set(this.localStorage().key, this.traits());
return true;
};
/**
* Loads a saved user, and set its information
*
* @return {Object} user
*/
User.prototype.load = function () {
if (this.loadOldCookie()) return this.toJSON();
var id = cookie.get(this.cookie().key)
, traits = localStore.get(this.localStorage().key);
this.id(id);
this.traits(traits);
return this.toJSON();
};
/**
* Clears the user, and removes the stored version
*
*/
User.prototype.clear = function () {
cookie.remove(this.cookie().key);
localStore.remove(this.localStorage().key);
this.id(null);
this.traits({});
};
/**
* Load the old user from the cookie. Should be phased
* out at some point
*
* @return {Boolean} loaded
*/
User.prototype.loadOldCookie = function () {
var user = cookie.get(this.cookie().oldKey);
if (!user) return false;
this.id(user.id);
this.traits(user.traits);
cookie.remove(this.cookie().oldKey);
return true;
};
/**
* Get the user info
*
* @return {Object}
*/
User.prototype.toJSON = function () {
return {
id : this.id(),
traits : this.traits()
};
};
/**
* Export the new user as a singleton.
*/
module.exports = bindAll(new User());
});
require.register("analytics/src/utils.js", function(exports, require, module){
// A helper to track events based on the 'anjs' url parameter
exports.getUrlParameter = function (urlSearchParameter, paramKey) {
var params = urlSearchParameter.replace('?', '').split('&');
for (var i = 0; i < params.length; i += 1) {
var param = params[i].split('=');
if (param.length === 2 && param[0] === paramKey) {
return decodeURIComponent(param[1]);
}
}
};
});
require.register("analytics/src/providers/adroll.js", function(exports, require, module){
// https://www.adroll.com/dashboard
var Provider = require('../provider')
, load = require('load-script');
module.exports = Provider.extend({
name : 'AdRoll',
defaults : {
// Adroll requires two options: `advId` and `pixId`.
advId : null,
pixId : null
},
initialize : function (options, ready) {
window.adroll_adv_id = options.advId;
window.adroll_pix_id = options.pixId;
window.__adroll_loaded = true;
load({
http : 'http://a.adroll.com/j/roundtrip.js',
https : 'https://s.adroll.com/j/roundtrip.js'
}, ready);
}
});
});
require.register("analytics/src/providers/amplitude.js", function(exports, require, module){
// https://github.com/amplitude/Amplitude-Javascript
var Provider = require('../provider')
, alias = require('alias')
, load = require('load-script');
module.exports = Provider.extend({
name : 'Amplitude',
key : 'apiKey',
defaults : {
// Amplitude's required API key.
apiKey : null,
// Whether to track pageviews to Amplitude.
pageview : false
},
initialize : function (options, ready) {
// Create the Amplitude global and queuer methods.
(function(e,t){var r=e.amplitude||{};
r._q=[];function i(e){r[e]=function(){r._q.push([e].concat(Array.prototype.slice.call(arguments,0)))}}
var s=["init","logEvent","setUserId","setGlobalUserProperties","setVersionName"];
for(var c=0;c<s.length;c++){i(s[c])}e.amplitude=r})(window,document);
// Load the Amplitude script and initialize with the API key.
load('https://d24n15hnbwhuhn.cloudfront.net/libs/amplitude-1.0-min.js');
window.amplitude.init(options.apiKey);
// Amplitude creates a queue, so it's ready immediately.
ready();
},
identify : function (userId, traits) {
if (userId) window.amplitude.setUserId(userId);
if (traits) window.amplitude.setGlobalUserProperties(traits);
},
track : function (event, properties) {
window.amplitude.logEvent(event, properties);
},
pageview : function (url) {
if (!this.options.pageview) return;
var properties = {
url : url || document.location.href,
name : document.title
};
this.track('Loaded a Page', properties);
}
});
});
require.register("analytics/src/providers/bitdeli.js", function(exports, require, module){
// https://bitdeli.com/docs
// https://bitdeli.com/docs/javascript-api.html
var Provider = require('../provider')
, load = require('load-script');
module.exports = Provider.extend({
name : 'Bitdeli',
defaults : {
// BitDeli requires two options: `inputId` and `authToken`.
inputId : null,
authToken : null,
// Whether or not to track an initial pageview when the page first
// loads. You might not want this if you're using a single-page app.
initialPageview : true
},
initialize : function (options, ready) {
window._bdq = window._bdq || [];
window._bdq.push(["setAccount", options.inputId, options.authToken]);
if (options.initialPageview) this.pageview();
load('//d2flrkr957qc5j.cloudfront.net/bitdeli.min.js');
// Bitdeli just uses a queue, so it's ready right away.
ready();
},
// Bitdeli uses two separate methods: `identify` for storing the `userId`
// and `set` for storing `traits`.
identify : function (userId, traits) {
if (userId) window._bdq.push(['identify', userId]);
if (traits) window._bdq.push(['set', traits]);
},
track : function (event, properties) {
window._bdq.push(['track', event, properties]);
},
// If `url` is undefined, Bitdeli uses the current page URL instead.
pageview : function (url) {
window._bdq.push(['trackPageview', url]);
}
});
});
require.register("analytics/src/providers/bugherd.js", function(exports, require, module){
// http://support.bugherd.com/home
var Provider = require('../provider')
, load = require('load-script');
module.exports = Provider.extend({
name : 'BugHerd',
key : 'apiKey',
defaults : {
apiKey : null,
// Optionally hide the feedback tab if you want to build your own.
// http://support.bugherd.com/entries/21497629-Create-your-own-Send-Feedback-tab
showFeedbackTab : true
},
initialize : function (options, ready) {
if (!options.showFeedbackTab) {
window.BugHerdConfig = { "feedback" : { "hide" : true } };
}
load('//www.bugherd.com/sidebarv2.js?apikey=' + options.apiKey, ready);
}
});
});
require.register("analytics/src/providers/chartbeat.js", function(exports, require, module){
// http://chartbeat.com/docs/adding_the_code/
// http://chartbeat.com/docs/configuration_variables/
// http://chartbeat.com/docs/handling_virtual_page_changes/
var Provider = require('../provider')
, load = require('load-script');
module.exports = Provider.extend({
name : 'Chartbeat',
defaults : {
// Chartbeat requires two options: `domain` and `uid`. All other
// configuration options are passed straight in!
domain : null,
uid : null
},
initialize : function (options, ready) {
// Since all the custom options just get passed through, update the
// Chartbeat `_sf_async_config` variable with options.
window._sf_async_config = options;
// Chartbeat's javascript should only load after the body
// is available, see https://github.com/segmentio/analytics.js/issues/107
var loadChartbeat = function () {
// We loop until the body is available.
if (!document.body) return setTimeout(loadChartbeat, 5);
// Use the stored date from when chartbeat was loaded.
window._sf_endpt = (new Date()).getTime();
// Load the Chartbeat javascript.
load({
https : 'https://a248.e.akamai.net/chartbeat.download.akamai.com/102508/js/chartbeat.js',
http : 'http://static.chartbeat.com/js/chartbeat.js'
}, ready);
};
loadChartbeat();
},
pageview : function (url) {
// In case the Chartbeat library hasn't loaded yet.
if (!window.pSUPERFLY) return;
// Requires a path, so default to the current one.
window.pSUPERFLY.virtualPage(url || window.location.pathname);
}
});
});
require.register("analytics/src/providers/clicktale.js", function(exports, require, module){
// http://wiki.clicktale.com/Article/JavaScript_API
var date = require('load-date')
, Provider = require('../provider')
, load = require('load-script')
, onBody = require('on-body');
module.exports = Provider.extend({
name : 'ClickTale',
key : 'projectId',
defaults : {
// If you sign up for a free account, this is the default http (non-ssl) CDN URL
// that you get. If you sign up for a premium account, you get a different
// custom CDN URL, so we have to leave it as an option.
httpCdnUrl : 'http://s.clicktale.net/WRe0.js',
// SSL support is only for premium accounts. Each premium account seems to have
// a different custom secure CDN URL, so we have to leave it as an option.
httpsCdnUrl : null,
// The Project ID is loaded in after the ClickTale CDN javascript has loaded.
projectId : null,
// The recording ratio specifies what fraction of people to screen-record.
// ClickTale has a special calculator in their setup flow that tells you
// what number to set for this.
recordingRatio : 0.01,
// The Partition ID determines where ClickTale stores the data according to
// http://wiki.clicktale.com/Article/JavaScript_API
partitionId : null
},
initialize : function (options, ready) {
// If we're on https:// but don't have a secure library, return early.
if (document.location.protocol === 'https:' && !options.httpsCdnUrl) return;
// ClickTale wants this at the "top" of the page. The analytics.js snippet
// sets this date synchronously now, and makes it available via load-date.
window.WRInitTime = date.getTime();
// Add the required ClickTale div to the body.
onBody(function (body) {
var div = document.createElement('div');
div.setAttribute('id', 'ClickTaleDiv');
div.setAttribute('style', 'display: none;');
body.appendChild(div);
});
var onloaded = function () {
window.ClickTale(
options.projectId,
options.recordingRatio,
options.partitionId
);
ready();
};
// If no SSL library is provided and we're on SSL then we can't load
// anything (always true for non-premium accounts).
load({
http : options.httpCdnUrl,
https : options.httpsCdnUrl
}, onloaded);
},
identify : function (userId, traits) {
// We set the userId as the ClickTale UID.
if (window.ClickTaleSetUID) window.ClickTaleSetUID(userId);
// We iterate over all the traits and set them as key-value field pairs.
if (window.ClickTaleField) {
for (var traitKey in traits) {
window.ClickTaleField(traitKey, traits[traitKey]);
}
}
},
track : function (event, properties) {
// ClickTaleEvent is an alias for ClickTaleTag
if (window.ClickTaleEvent) window.ClickTaleEvent(event);
}
});
});
require.register("analytics/src/providers/clicky.js", function(exports, require, module){
// http://clicky.com/help/customization/manual?new-domain
// http://clicky.com/help/customization/manual?new-domain#/help/customization#session
var Provider = require('../provider')
, user = require('../user')
, extend = require('extend')
, load = require('load-script');
module.exports = Provider.extend({
name : 'Clicky',
key : 'siteId',
defaults : {
siteId : null
},
initialize : function (options, ready) {
window.clicky_site_ids = window.clicky_site_ids || [];
window.clicky_site_ids.push(options.siteId);
var userId = user.id()
, traits = user.traits()
, session = {};
if (userId) session.id = userId;
extend(session, traits);
window.clicky_custom = { session : session };
load('//static.getclicky.com/js', ready);
},
track : function (event, properties) {
window.clicky.log(window.location.href, event);
}
});
});
require.register("analytics/src/providers/comscore.js", function(exports, require, module){
// http://direct.comscore.com/clients/help/FAQ.aspx#faqTagging
var Provider = require('../provider')
, load = require('load-script');
module.exports = Provider.extend({
name : 'comScore',
key : 'c2',
defaults : {
c1 : '2',
c2 : null
},
// Pass the entire options object directly into comScore.
initialize : function (options, ready) {
window._comscore = window._comscore || [];
window._comscore.push(options);
load({
http : 'http://b.scorecardresearch.com/beacon.js',
https : 'https://sb.scorecardresearch.com/beacon.js'
}, ready);
}
});
});
require.register("analytics/src/providers/crazyegg.js", function(exports, require, module){
var Provider = require('../provider')
, load = require('load-script');
module.exports = Provider.extend({
name : 'CrazyEgg',
key : 'accountNumber',
defaults : {
accountNumber : null
},
initialize : function (options, ready) {
var accountPath = options.accountNumber.slice(0,4) + '/' + options.accountNumber.slice(4);
load('//dnn506yrbagrg.cloudfront.net/pages/scripts/'+accountPath+'.js?'+Math.floor(new Date().getTime()/3600000), ready);
}
});
});
require.register("analytics/src/providers/customerio.js", function(exports, require, module){
// http://customer.io/docs/api/javascript.html
var Provider = require('../provider')
, isEmail = require('is-email')
, load = require('load-script');
module.exports = Provider.extend({
name : 'Customer.io',
key : 'siteId',
defaults : {
siteId : null
},
initialize : function (options, ready) {
var _cio = window._cio = window._cio || [];
(function() {
var a,b,c;
a = function (f) {
return function () {
_cio.push([f].concat(Array.prototype.slice.call(arguments,0)));
};
};
b = ['identify', 'track'];
for (c = 0; c < b.length; c++) {
_cio[b[c]] = a(b[c]);
}
})();
// Load the Customer.io script and add the required `id` and `data-site-id`.
var script = load('https://assets.customer.io/assets/track.js');
script.id = 'cio-tracker';
script.setAttribute('data-site-id', options.siteId);
// Since Customer.io creates their required methods in their snippet, we
// don't need to wait to be ready.
ready();
},
identify : function (userId, traits) {
// Don't do anything if we just have traits, because Customer.io
// requires a `userId`.
if (!userId) return;
// Customer.io takes the `userId` as part of the traits object.
traits.id = userId;
// Swap the `created` trait to the `created_at` that Customer.io needs
// and convert it from milliseconds to seconds.
if (traits.created) {
traits.created_at = Math.floor(traits.created/1000);
delete traits.created;
}
window._cio.identify(traits);
},
track : function (event, properties) {
window._cio.track(event, properties);
}
});
});
require.register("analytics/src/providers/errorception.js", function(exports, require, module){
// http://errorception.com/
var Provider = require('../provider')
, extend = require('extend')
, load = require('load-script')
, type = require('type');
module.exports = Provider.extend({
name : 'Errorception',
key : 'projectId',
defaults : {
projectId : null,
// Whether to store metadata about the user on `identify` calls, using
// the [Errorception `meta` API](http://blog.errorception.com/2012/11/capture-custom-data-with-your-errors.html).
meta : true
},
initialize : function (options, ready) {
window._errs = window._errs || [options.projectId];
load('//d15qhc0lu1ghnk.cloudfront.net/beacon.js');
// Attach the window `onerror` event.
var oldOnError = window.onerror;
window.onerror = function () {
window._errs.push(arguments);
// Chain the old onerror handler after we finish our work.
if ('function' === type(oldOnError)) {
oldOnError.apply(this, arguments);
}
};
// Errorception makes a queue, so it's ready immediately.
ready();
},
// Add the traits to the Errorception meta object.
identify : function (userId, traits) {
if (!this.options.meta) return;
// If the custom metadata object hasn't ever been made, make it.
window._errs.meta || (window._errs.meta = {});
// Add `userId` to traits.
traits.id = userId;
// Add all of the traits as metadata.
extend(window._errs.meta, traits);
}
});
});
require.register("analytics/src/providers/foxmetrics.js", function(exports, require, module){
// http://foxmetrics.com/documentation/apijavascript
var Provider = require('../provider')
, load = require('load-script');
module.exports = Provider.extend({
name : 'FoxMetrics',
key : 'appId',
defaults : {
appId : null
},
initialize : function (options, ready) {
var _fxm = window._fxm || {};
window._fxm = _fxm.events || [];
load('//d35tca7vmefkrc.cloudfront.net/scripts/' + options.appId + '.js');
// FoxMetrics makes a queue, so it's ready immediately.
ready();
},
identify : function (userId, traits) {
// A `userId` is required for profile updates.
if (!userId) return;
// FoxMetrics needs the first and last name seperately. Fallback to
// splitting the `name` trait if we don't have what we need.
var firstName = traits.firstName
, lastName = traits.lastName;
if (!firstName && traits.name) firstName = traits.name.split(' ')[0];
if (!lastName && traits.name) lastName = traits.name.split(' ')[1];
window._fxm.push([
'_fxm.visitor.profile',
userId, // user id
firstName, // first name
lastName, // last name
traits.email, // email
traits.address, // address
undefined, // social
undefined, // partners
traits // attributes
]);
},
track : function (event, properties) {
window._fxm.push([
event, // event name
properties.category, // category
properties // properties
]);
},
pageview : function (url) {
window._fxm.push([
'_fxm.pages.view',
undefined, // title
undefined, // name
undefined, // category
url, // url
undefined // referrer
]);
}
});
});
require.register("analytics/src/providers/gauges.js", function(exports, require, module){
// http://get.gaug.es/documentation/tracking/
var Provider = require('../provider')
, load = require('load-script');
module.exports = Provider.extend({
name : 'Gauges',
key : 'siteId',
defaults : {
siteId : null
},
initialize : function (options, ready) {
window._gauges = window._gauges || [];
var script = load('//secure.gaug.es/track.js');
// Gauges needs a few attributes on its script element.
script.id = 'gauges-tracker';
script.setAttribute('data-site-id', options.siteId);
// Gauges make a queue so it's ready immediately.
ready();
},
pageview : function (url) {
window._gauges.push(['track']);
}
});
});
require.register("analytics/src/providers/get-satisfaction.js", function(exports, require, module){
// You have to be signed in to access the snippet code:
// https://console.getsatisfaction.com/start/101022?signup=true#engage
var Provider = require('../provider')
, load = require('load-script')
, onBody = require('on-body');
module.exports = Provider.extend({
name : 'Get Satisfaction',
key : 'widgetId',
defaults : {
widgetId : null
},
initialize : function (options, ready) {
// Get Satisfaction requires a div that will become their widget tab. Append
// it once `document.body` exists.
var div = document.createElement('div');
var id = div.id = 'getsat-widget-' + options.widgetId;
onBody(function (body) {
body.appendChild(div);
});
// Usually they load their snippet synchronously, so we need to wait for it
// to come back before initializing the tab.
load('https://loader.engage.gsfn.us/loader.js', function () {
if (window.GSFN !== undefined) {
window.GSFN.loadWidget(options.widgetId, { containerId : id });
}
ready();
});
}
});
});
require.register("analytics/src/providers/google-analytics.js", function(exports, require, module){
// https://developers.google.com/analytics/devguides/collection/gajs/
var Provider = require('../provider')
, load = require('load-script')
, type = require('type')
, url = require('url')
, canonical = require('canonical');
module.exports = Provider.extend({
name : 'Google Analytics',
key : 'trackingId',
defaults : {
// Whether to anonymize the IP address collected for the user.
anonymizeIp : false,
// An optional domain setting, to restrict where events can originate from.
domain : null,
// Whether to enable GOogle's DoubleClick remarketing feature.
doubleClick : false,
// Whether to use Google Analytics's Enhanced Link Attribution feature:
// http://support.google.com/analytics/bin/answer.py?hl=en&answer=2558867
enhancedLinkAttribution : false,
// A domain to ignore for referrers. Maps to _addIgnoredRef
ignoreReferrer : null,
// Whether or not to track and initial pageview when initialized.
initialPageview : true,
// The setting to use for Google Analytics's Site Speed Sample Rate feature:
// https://developers.google.com/analytics/devguides/collection/gajs/methods/gaJSApiBasicConfiguration#_gat.GA_Tracker_._setSiteSpeedSampleRate
siteSpeedSampleRate : null,
// Your Google Analytics Tracking ID.
trackingId : null,
// Whether you're using the new Universal Analytics or not.
universalClient: false
},
initialize : function (options, ready) {
if (options.universalClient) this.initializeUniversal(options, ready);
else this.initializeClassic(options, ready);
},
initializeClassic: function (options, ready) {
window._gaq = window._gaq || [];
window._gaq.push(['_setAccount', options.trackingId]);
// Apply a bunch of optional settings.
if (options.domain) {
window._gaq.push(['_setDomainName', options.domain]);
}
if (options.enhancedLinkAttribution) {
var protocol = 'https:' === document.location.protocol ? 'https:' : 'http:';
var pluginUrl = protocol + '//www.google-analytics.com/plugins/ga/inpage_linkid.js';
window._gaq.push(['_require', 'inpage_linkid', pluginUrl]);
}
if (type(options.siteSpeedSampleRate) === 'number') {
window._gaq.push(['_setSiteSpeedSampleRate', options.siteSpeedSampleRate]);
}
if (options.anonymizeIp) {
window._gaq.push(['_gat._anonymizeIp']);
}
if (options.ignoreReferrer) {
window._gaq.push(['_addIgnoredRef', options.ignoreReferrer]);
}
if (options.initialPageview) {
var path, canon = canonical();
if (canon) path = url.parse(canon).pathname;
this.pageview(path);
}
// URLs change if DoubleClick is on. Even though Google Analytics makes a
// queue, the `_gat` object isn't available until the library loads.
if (options.doubleClick) {
load('//stats.g.doubleclick.net/dc.js', ready);
} else {
load({
http : 'http://www.google-analytics.com/ga.js',
https : 'https://ssl.google-analytics.com/ga.js'
}, ready);
}
},
initializeUniversal: function (options, ready) {
// GA-universal lets you set your own queue name
var global = this.global = 'ga';
// and needs to know about this queue name in this special object
// so that future plugins can also operate on the object
window['GoogleAnalyticsObject'] = global;
// setup the global variable
window[global] = window[global] || function () {
(window[global].q = window[global].q || []).push(arguments);
};
// GA also needs to know the current time (all from their snippet)
window[global].l = 1 * new Date();
var createOpts = {};
// Apply a bunch of optional settings.
if (options.domain)
createOpts.cookieDomain = options.domain || 'none';
if (type(options.siteSpeedSampleRate) === 'number')
createOpts.siteSpeedSampleRate = options.siteSpeedSampleRate;
if (options.anonymizeIp)
ga('set', 'anonymizeIp', true);
ga('create', options.trackingId, createOpts);
if (options.initialPageview) {
var path, canon = canonical();
if (canon) path = url.parse(canon).pathname;
this.pageview(path);
}
load('//www.google-analytics.com/analytics.js');
// Google makes a queue so it's ready immediately.
ready();
},
track : function (event, properties) {
properties || (properties = {});
var value;
// Since value is a common property name, ensure it is a number and Google
// requires that it be an integer.
if (type(properties.value) === 'number') value = Math.round(properties.value);
// Try to check for a `category` and `label`. A `category` is required,
// so if it's not there we use `'All'` as a default. We can safely push
// undefined if the special properties don't exist. Try using revenue
// first, but fall back to a generic `value` as well.
if (this.options.universalClient) {
var opts = {};
if (properties.noninteraction) opts.nonInteraction = properties.noninteraction;
window[this.global](
'send',
'event',
properties.category || 'All',
event,
properties.label,
Math.round(properties.revenue) || value,
opts
);
} else {
window._gaq.push([
'_trackEvent',
properties.category || 'All',
event,
properties.label,
Math.round(properties.revenue) || value,
properties.noninteraction
]);
}
},
pageview : function (url) {
if (this.options.universalClient) {
window[this.global]('send', 'pageview', url);
} else {
window._gaq.push(['_trackPageview', url]);
}
}
});
});
require.register("analytics/src/providers/gosquared.js", function(exports, require, module){
// http://www.gosquared.com/support
// https://www.gosquared.com/customer/portal/articles/612063-tracker-functions
var Provider = require('../provider')
, user = require('../user')
, load = require('load-script')
, onBody = require('on-body');
module.exports = Provider.extend({
name : 'GoSquared',
key : 'siteToken',
defaults : {
siteToken : null
},
initialize : function (options, ready) {
// GoSquared assumes a body in their script, so we need this wrapper.
onBody(function () {
var GoSquared = window.GoSquared = {};
GoSquared.acct = options.siteToken;
GoSquared.q = [];
window._gstc_lt =+ (new Date());
GoSquared.VisitorName = user.id();
GoSquared.Visitor = user.traits();
load('//d1l6p2sc9645hc.cloudfront.net/tracker.js');
// GoSquared makes a queue, so it's ready immediately.
ready();
});
},
identify : function (userId, traits) {
// TODO figure out if this will actually work. Seems like GoSquared will
// never know these values are updated.
if (userId) window.GoSquared.UserName = userId;
if (traits) window.GoSquared.Visitor = traits;
},
track : function (event, properties) {
// GoSquared sets a `gs_evt_name` property with a value of the event
// name, so it relies on properties being an object.
window.GoSquared.q.push(['TrackEvent', event, properties || {}]);
},
pageview : function (url) {
window.GoSquared.q.push(['TrackView', url]);
}
});
});
require.register("analytics/src/providers/heap.js", function(exports, require, module){
// https://heapanalytics.com/docs
var Provider = require('../provider')
, load = require('load-script');
module.exports = Provider.extend({
name : 'Heap',
key : 'apiKey',
defaults : {
apiKey : null
},
initialize : function (options, ready) {
window.heap=window.heap||[];window.heap.load=function(a){window._heapid=a;var b=document.createElement("script");b.type="text/javascript",b.async=!0,b.src=("https:"===document.location.protocol?"https:":"http:")+"//d36lvucg9kzous.cloudfront.net";var c=document.getElementsByTagName("script")[0];c.parentNode.insertBefore(b,c);var d=function(a){return function(){heap.push([a].concat(Array.prototype.slice.call(arguments,0)))}},e=["identify","track"];for(var f=0;f<e.length;f++)heap[e[f]]=d(e[f])};
window.heap.load(options.apiKey);
// heap creates its own queue, so we're ready right away
ready();
},
identify : function (userId, traits) {
window.heap.identify(traits);
},
track : function (event, properties) {
window.heap.track(event, properties);
}
});
});
require.register("analytics/src/providers/hittail.js", function(exports, require, module){
// http://www.hittail.com
var Provider = require('../provider')
, load = require('load-script');
module.exports = Provider.extend({
name : 'HitTail',
key : 'siteId',
defaults : {
siteId : null
},
initialize : function (options, ready) {
load('//' + options.siteId + '.hittail.com/mlt.js', ready);
}
});
});
require.register("analytics/src/providers/hubspot.js", function(exports, require, module){
// http://hubspot.clarify-it.com/d/4m62hl
var Provider = require('../provider')
, isEmail = require('is-email')
, load = require('load-script');
module.exports = Provider.extend({
name : 'HubSpot',
key : 'portalId',
defaults : {
portalId : null
},
initialize : function (options, ready) {
// HubSpot checks in their snippet to make sure another script with
// `hs-analytics` isn't already in the DOM. Seems excessive, but who knows
// if there's weird deprecation going on :p
if (!document.getElementById('hs-analytics')) {
window._hsq = window._hsq || [];
var script = load('https://js.hubspot.com/analytics/' + (Math.ceil(new Date()/300000)*300000) + '/' + options.portalId + '.js');
script.id = 'hs-analytics';
}
// HubSpot makes a queue, so it's ready immediately.
ready();
},
// HubSpot does not use a userId, but the email address is required on
// the traits object.
identify : function (userId, traits) {
window._hsq.push(["identify", traits]);
},
// Event Tracking is available to HubSpot Enterprise customers only. In
// addition to adding any unique event name, you can also use the id of an
// existing custom event as the event variable.
track : function (event, properties) {
window._hsq.push(["trackEvent", event, properties]);
},
// HubSpot doesn't support passing in a custom URL.
pageview : function (url) {
window._hsq.push(['_trackPageview']);
}
});
});
require.register("analytics/src/providers/index.js", function(exports, require, module){
module.exports = [
require('./adroll'),
require('./amplitude'),
require('./bitdeli'),
require('./bugherd'),
require('./chartbeat'),
require('./clicktale'),
require('./clicky'),
require('./comscore'),
require('./crazyegg'),
require('./customerio'),
require('./errorception'),
require('./foxmetrics'),
require('./gauges'),
require('./get-satisfaction'),
require('./google-analytics'),
require('./gosquared'),
require('./heap'),
require('./hittail'),
require('./hubspot'),
require('./improvely'),
require('./intercom'),
require('./keen-io'),
require('./kissmetrics'),
require('./klaviyo'),
require('./livechat'),
require('./lytics'),
require('./mixpanel'),
require('./olark'),
require('./optimizely'),
require('./perfect-audience'),
require('./pingdom'),
require('./preact'),
require('./qualaroo'),
require('./quantcast'),
require('./sentry'),
require('./snapengage'),
require('./usercycle'),
require('./userfox'),
require('./uservoice'),
require('./vero'),
require('./visual-website-optimizer'),
require('./woopra')
];
});
require.register("analytics/src/providers/improvely.js", function(exports, require, module){
// http://www.improvely.com/docs/landing-page-code
// http://www.improvely.com/docs/conversion-code
// http://www.improvely.com/docs/labeling-visitors
var Provider = require('../provider')
, alias = require('alias')
, load = require('load-script');
module.exports = Provider.extend({
name : 'Improvely',
defaults : {
// Improvely requires two options: `domain` and `projectId`.
domain : null,
projectId : null
},
initialize : function (options, ready) {
window._improvely = window._improvely || [];
window.improvely = window.improvely || {
init : function (e, t) { window._improvely.push(["init", e, t]); },
goal : function (e) { window._improvely.push(["goal", e]); },
label : function (e) { window._improvely.push(["label", e]); }
};
load('//' + options.domain + '.iljmp.com/improvely.js');
window.improvely.init(options.domain, options.projectId);
// Improvely creates a queue, so it's ready immediately.
ready();
},
identify : function (userId, traits) {
if (userId) window.improvely.label(userId);
},
track : function (event, properties) {
// Improvely calls `revenue` `amount`, and puts the `event` in properties as
// the `type`.
properties || (properties = {});
properties.type = event;
alias(properties, { 'revenue' : 'amount' });
window.improvely.goal(properties);
}
});
});
require.register("analytics/src/providers/intercom.js", function(exports, require, module){
// http://docs.intercom.io/
// http://docs.intercom.io/#IntercomJS
var Provider = require('../provider')
, extend = require('extend')
, load = require('load-script')
, isEmail = require('is-email');
module.exports = Provider.extend({
name : 'Intercom',
// Whether Intercom has already been booted or not. Intercom becomes booted
// after Intercom('boot', ...) has been called on the first identify.
booted : false,
key : 'appId',
defaults : {
// Intercom's required key.
appId : null,
// An optional setting to display the Intercom inbox widget.
activator : null,
// Whether to show the count of messages for the inbox widget.
counter : true
},
initialize : function (options, ready) {
load('https://static.intercomcdn.com/intercom.v1.js', ready);
},
identify : function (userId, traits, options) {
// Don't do anything if we just have traits the first time.
if (!this.booted && !userId) return;
// Intercom specific settings. BACKWARDS COMPATIBILITY: we need to check for
// the lowercase variant as well.
options || (options = {});
var Intercom = options.Intercom || options.intercom || {};
traits.increments = Intercom.increments;
traits.user_hash = Intercom.userHash || Intercom.user_hash;
// They need `created_at` as a Unix timestamp (seconds).
if (traits.created) {
traits.created_at = Math.floor(traits.created/1000);
delete traits.created;
}
// Convert a `company`'s `created` date.
if (traits.company && traits.company.created) {
traits.company.created_at = Math.floor(traits.company.created/1000);
delete traits.company.created;
}
// Optionally add the inbox widget.
if (this.options.activator) {
traits.widget = {
activator : this.options.activator,
use_counter : this.options.counter
};
}
// If this is the first time we've identified, `boot` instead of `update`
// and add our one-time boot settings.
if (this.booted) {
window.Intercom('update', traits);
} else {
extend(traits, {
app_id : this.options.appId,
user_id : userId
});
window.Intercom('boot', traits);
}
// Set the booted state, so that we know to call 'update' next time.
this.booted = true;
},
// Intercom doesn't have a separate `group` method, but they take a
// `companies` trait for the user.
group : function (groupId, properties, options) {
properties.id = groupId;
window.Intercom('update', { company : properties });
}
});
});
require.register("analytics/src/providers/keen-io.js", function(exports, require, module){
// https://keen.io/docs/
var Provider = require('../provider')
, load = require('load-script');
module.exports = Provider.extend({
name : 'Keen IO',
defaults : {
// The Project ID is **required**.
projectId : null,
// The Write Key is **required** to send events.
writeKey : null,
// The Read Key is optional, only if you want to "do analysis".
readKey : null,
// Whether or not to pass pageviews on to Keen IO.
pageview : true,
// Whether or not to track an initial pageview on `initialize`.
initialPageview : true
},
initialize : function (options, ready) {
window.Keen = window.Keen||{configure:function(e){this._cf=e},addEvent:function(e,t,n,i){this._eq=this._eq||[],this._eq.push([e,t,n,i])},setGlobalProperties:function(e){this._gp=e},onChartsReady:function(e){this._ocrq=this._ocrq||[],this._ocrq.push(e)}};
window.Keen.configure({
projectId : options.projectId,
writeKey : options.writeKey,
readKey : options.readKey
});
load('//dc8na2hxrj29i.cloudfront.net/code/keen-2.1.0-min.js');
if (options.initialPageview) this.pageview();
// Keen IO defines all their functions in the snippet, so they're ready.
ready();
},
identify : function (userId, traits) {
// Use Keen IO global properties to include `userId` and `traits` on
// every event sent to Keen IO.
var globalUserProps = {};
if (userId) globalUserProps.userId = userId;
if (traits) globalUserProps.traits = traits;
if (userId || traits) {
window.Keen.setGlobalProperties(function(eventCollection) {
return { user: globalUserProps };
});
}
},
track : function (event, properties) {
window.Keen.addEvent(event, properties);
},
pageview : function (url) {
if (!this.options.pageview) return;
var properties = {
url : url || document.location.href,
name : document.title
};
this.track('Loaded a Page', properties);
}
});
});
require.register("analytics/src/providers/kissmetrics.js", function(exports, require, module){
// http://support.kissmetrics.com/apis/javascript
var Provider = require('../provider')
, alias = require('alias')
, load = require('load-script');
module.exports = Provider.extend({
name : 'KISSmetrics',
key : 'apiKey',
defaults : {
apiKey : null
},
initialize : function (options, ready) {
window._kmq = window._kmq || [];
load('//i.kissmetrics.com/i.js');
load('//doug1izaerwt3.cloudfront.net/' + options.apiKey + '.1.js');
// KISSmetrics creates a queue, so it's ready immediately.
ready();
},
// KISSmetrics uses two separate methods: `identify` for storing the
// `userId`, and `set` for storing `traits`.
identify : function (userId, traits) {
if (userId) window._kmq.push(['identify', userId]);
if (traits) window._kmq.push(['set', traits]);
},
track : function (event, properties) {
// KISSmetrics handles revenue with the `'Billing Amount'` property by
// default, although it's changeable in the interface.
if (properties) {
alias(properties, {
'revenue' : 'Billing Amount'
});
}
window._kmq.push(['record', event, properties]);
},
// Although undocumented, KISSmetrics actually supports not passing a second
// ID, in which case it uses the currenty identified user's ID.
alias : function (newId, originalId) {
window._kmq.push(['alias', newId, originalId]);
}
});
});
require.register("analytics/src/providers/klaviyo.js", function(exports, require, module){
// https://www.klaviyo.com/docs
var Provider = require('../provider')
, load = require('load-script');
module.exports = Provider.extend({
name : 'Klaviyo',
key : 'apiKey',
defaults : {
apiKey : null
},
initialize : function (options, ready) {
window._learnq = window._learnq || [];
window._learnq.push(['account', options.apiKey]);
load('//a.klaviyo.com/media/js/learnmarklet.js');
// Klaviyo creats a queue, so it's ready immediately.
ready();
},
identify : function (userId, traits) {
// Klaviyo requires a `userId` and takes the it on the traits object itself.
if (!userId) return;
traits.$id = userId;
window._learnq.push(['identify', traits]);
},
track : function (event, properties) {
window._learnq.push(['track', event, properties]);
}
});
});
require.register("analytics/src/providers/livechat.js", function(exports, require, module){
// http://www.livechatinc.com/api/javascript-api
var Provider = require('../provider')
, each = require('each')
, load = require('load-script');
module.exports = Provider.extend({
name : 'LiveChat',
key : 'license',
defaults : {
license : null
},
initialize : function (options, ready) {
window.__lc = { license : options.license };
load('//cdn.livechatinc.com/tracking.js', ready);
},
// LiveChat isn't an analytics service, but we can use the `userId` and
// `traits` to tag the user with their real name in the chat console.
identify : function (userId, traits) {
// In case the LiveChat library hasn't loaded yet.
if (!window.LC_API) return;
// LiveChat takes them in an array format.
var variables = [];
if (userId) variables.push({ name: 'User ID', value: userId });
if (traits) {
each(traits, function (key, value) {
variables.push({
name : key,
value : value
});
});
}
window.LC_API.set_custom_variables(variables);
}
});
});
require.register("analytics/src/providers/lytics.js", function(exports, require, module){
// Lytics
// --------
// [Documentation](http://developer.lytics.io/doc#jstag),
var Provider = require('../provider')
, load = require('load-script');
module.exports = Provider.extend({
name : 'Lytics',
key : 'cid',
defaults : {
cid: null
},
initialize : function (options, ready) {
window.jstag = (function () {
var t={_q:[],_c:{cid:options.cid,url:'//c.lytics.io'},ts:(new Date()).getTime()};
t.send=function(){
this._q.push(["ready","send",Array.prototype.slice.call(arguments)]);
return this;
};
return t;
})();
load('//c.lytics.io/static/io.min.js');
ready();
},
identify: function (userId, traits) {
traits._uid = userId;
window.jstag.send(traits);
},
track: function (event, properties) {
properties._e = event;
window.jstag.send(properties);
},
pageview: function (url) {
window.jstag.send();
}
});
});
require.register("analytics/src/providers/mixpanel.js", function(exports, require, module){
// https://mixpanel.com/docs/integration-libraries/javascript
// https://mixpanel.com/docs/people-analytics/javascript
// https://mixpanel.com/docs/integration-libraries/javascript-full-api
var Provider = require('../provider')
, alias = require('alias')
, isEmail = require('is-email')
, load = require('load-script');
module.exports = Provider.extend({
name : 'Mixpanel',
key : 'token',
defaults : {
// Whether to call `mixpanel.nameTag` on `identify`.
nameTag : true,
// Whether to use Mixpanel's People API.
people : false,
// The Mixpanel API token for your account.
token : null,
// Whether to track pageviews to Mixpanel.
pageview : false,
// Whether to track an initial pageview on initialize.
initialPageview : false
},
initialize : function (options, ready) {
(function (c, a) {
window.mixpanel = a;
var b, d, h, e;
a._i = [];
a.init = function (b, c, f) {
function d(a, b) {
var c = b.split('.');
2 == c.length && (a = a[c[0]], b = c[1]);
a[b] = function () {
a.push([b].concat(Array.prototype.slice.call(arguments, 0)));
};
}
var g = a;
'undefined' !== typeof f ? g = a[f] = [] : f = 'mixpanel';
g.people = g.people || [];
h = ['disable', 'track', 'track_pageview', 'track_links', 'track_forms', 'register', 'register_once', 'unregister', 'identify', 'alias', 'name_tag', 'set_config', 'people.set', 'people.increment', 'people.track_charge', 'people.append'];
for (e = 0; e < h.length; e++) d(g, h[e]);
a._i.push([b, c, f]);
};
a.__SV = 1.2;
// Modification to the snippet: call ready whenever the library has
// fully loaded.
load('//cdn.mxpnl.com/libs/mixpanel-2.2.min.js', ready);
})(document, window.mixpanel || []);
// Pass options directly to `init` as the second argument.
window.mixpanel.init(options.token, options);
if (options.initialPageview) this.pageview();
},
identify : function (userId, traits) {
// Alias the traits' keys with dollar signs for Mixpanel's API.
alias(traits, {
'created' : '$created',
'email' : '$email',
'firstName' : '$first_name',
'lastName' : '$last_name',
'lastSeen' : '$last_seen',
'name' : '$name',
'username' : '$username',
'phone' : '$phone'
});
// Finally, call all of the identify equivalents. Verify certain calls
// against options to make sure they're enabled.
if (userId) {
window.mixpanel.identify(userId);
if (this.options.nameTag) window.mixpanel.name_tag(traits && traits.$email || userId);
}
if (traits) {
window.mixpanel.register(traits);
if (this.options.people) window.mixpanel.people.set(traits);
}
},
track : function (event, properties) {
window.mixpanel.track(event, properties);
// Mixpanel handles revenue with a `transaction` call in their People
// feature. So if we're using people, record a transcation.
if (properties && properties.revenue && this.options.people) {
window.mixpanel.people.track_charge(properties.revenue);
}
},
// Mixpanel doesn't actually track the pageviews, but they do show up in the
// Mixpanel stream.
pageview : function (url) {
window.mixpanel.track_pageview(url);
// If they don't want pageviews tracked, leave now.
if (!this.options.pageview) return;
var properties = {
url : url || document.location.href,
name : document.title
};
this.track('Loaded a Page', properties);
},
// Although undocumented, Mixpanel actually supports the `originalId`. It
// just usually defaults to the current user's `distinct_id`.
alias : function (newId, originalId) {
if(window.mixpanel.get_distinct_id &&
window.mixpanel.get_distinct_id() === newId) return;
// HACK: internal mixpanel API to ensure we don't overwrite.
if(window.mixpanel.get_property &&
window.mixpanel.get_property('$people_distinct_id') === newId) return;
window.mixpanel.alias(newId, originalId);
}
});
});
require.register("analytics/src/providers/olark.js", function(exports, require, module){
// http://www.olark.com/documentation
var Provider = require('../provider')
, isEmail = require('is-email');
module.exports = Provider.extend({
name : 'Olark',
key : 'siteId',
chatting : false,
defaults : {
siteId : null,
// Whether to use the user's name or email in the Olark chat console.
identify : true,
// Whether to log pageviews to the Olark chat console.
track : false,
// Whether to log pageviews to the Olark chat console.
pageview : true
},
initialize : function (options, ready) {
window.olark||(function(c){var f=window,d=document,l=f.location.protocol=="https:"?"https:":"http:",z=c.name,r="load";var nt=function(){f[z]=function(){(a.s=a.s||[]).push(arguments)};var a=f[z]._={},q=c.methods.length;while(q--){(function(n){f[z][n]=function(){f[z]("call",n,arguments)}})(c.methods[q])}a.l=c.loader;a.i=nt;a.p={0:+new Date};a.P=function(u){a.p[u]=new Date-a.p[0]};function s(){a.P(r);f[z](r)}f.addEventListener?f.addEventListener(r,s,false):f.attachEvent("on"+r,s);var ld=function(){function p(hd){hd="head";return["<",hd,"></",hd,"><",i,' onl' + 'oad="var d=',g,";d.getElementsByTagName('head')[0].",j,"(d.",h,"('script')).",k,"='",l,"//",a.l,"'",'"',"></",i,">"].join("")}var i="body",m=d[i];if(!m){return setTimeout(ld,100)}a.P(1);var j="appendChild",h="createElement",k="src",n=d[h]("div"),v=n[j](d[h](z)),b=d[h]("iframe"),g="document",e="domain",o;n.style.display="none";m.insertBefore(n,m.firstChild).id=z;b.frameBorder="0";b.id=z+"-loader";if(/MSIE[ ]+6/.test(navigator.userAgent)){b.src="javascript:false"}b.allowTransparency="true";v[j](b);try{b.contentWindow[g].open()}catch(w){c[e]=d[e];o="javascript:var d="+g+".open();d.domain='"+d.domain+"';";b[k]=o+"void(0);"}try{var t=b.contentWindow[g];t.write(p());t.close()}catch(x){b[k]=o+'d.write("'+p().replace(/"/g,String.fromCharCode(92)+'"')+'");d.close();'}a.P(2)};ld()};nt()})({loader: "static.olark.com/jsclient/loader0.js",name:"olark",methods:["configure","extend","declare","identify"]});
window.olark.identify(options.siteId);
// Set up event handlers for chat box open and close so that
// we know whether a conversation is active. If it is active,
// then we'll send track and pageview information.
var self = this;
window.olark('api.box.onExpand', function () { self.chatting = true; });
window.olark('api.box.onShrink', function () { self.chatting = false; });
// Olark creates it's method in the snippet, so it's ready immediately.
ready();
},
// Update traits about the user in Olark to make the operator's life easier.
identify : function (userId, traits) {
if (!this.options.identify) return;
var email = traits.email
, name = traits.name || traits.firstName
, phone = traits.phone
, nickname = name || email || userId;
// If we have a name and an email, add the email too to be more helpful.
if (name && email) nickname += ' ('+email+')';
// Call all of Olark's settings APIs.
window.olark('api.visitor.updateCustomFields', traits);
if (email) window.olark('api.visitor.updateEmailAddress', { emailAddress : email });
if (name) window.olark('api.visitor.updateFullName', { fullName : name });
if (phone) window.olark('api.visitor.updatePhoneNumber', { phoneNumber : phone });
if (nickname) window.olark('api.chat.updateVisitorNickname', { snippet : nickname });
},
// Log events the user triggers to the chat console, if you so desire it.
track : function (event, properties) {
if (!this.options.track || !this.chatting) return;
// To stay consistent with olark's default messages, it's all lowercase.
window.olark('api.chat.sendNotificationToOperator', {
body : 'visitor triggered "'+event+'"'
});
},
// Mimic the functionality Olark has for normal pageviews with pseudo-
// pageviews, telling the operator when a visitor changes pages.
pageview : function (url) {
if (!this.options.pageview || !this.chatting) return;
// To stay consistent with olark's default messages, it's all lowercase.
window.olark('api.chat.sendNotificationToOperator', {
body : 'looking at ' + window.location.href
});
}
});
});
require.register("analytics/src/providers/optimizely.js", function(exports, require, module){
// https://www.optimizely.com/docs/api
var each = require('each')
, nextTick = require('next-tick')
, Provider = require('../provider');
module.exports = Provider.extend({
name : 'Optimizely',
defaults : {
// Whether to replay variations into other enabled integrations as traits.
variations : true
},
initialize : function (options, ready, analytics) {
// Create the `optimizely` object in case it doesn't exist already.
// https://www.optimizely.com/docs/api#function-calls
window.optimizely = window.optimizely || [];
// If the `variations` option is true, replay our variations on the next
// tick to wait for the entire library to be ready for replays.
if (options.variations) {
var self = this;
nextTick(function () { self.replay(); });
}
// Optimizely should be on the page already, so it's always ready.
ready();
},
track : function (event, properties) {
// Optimizely takes revenue as cents, not dollars.
if (properties && properties.revenue) properties.revenue = properties.revenue * 100;
window.optimizely.push(['trackEvent', event, properties]);
},
replay : function () {
// Make sure we have access to Optimizely's `data` dictionary.
var data = window.optimizely.data;
if (!data) return;
// Grab a few pieces of data we'll need for replaying.
var experiments = data.experiments
, variationNamesMap = data.state.variationNamesMap;
// Create our traits object to add variations to.
var traits = {};
// Loop through all the experiement the user has been assigned a variation
// for and add them to our traits.
each(variationNamesMap, function (experimentId, variation) {
traits['Experiment: ' + experiments[experimentId].name] = variation;
});
this.analytics.identify(traits);
}
});
});
require.register("analytics/src/providers/perfect-audience.js", function(exports, require, module){
// https://www.perfectaudience.com/docs#javascript_api_autoopen
var Provider = require('../provider')
, load = require('load-script');
module.exports = Provider.extend({
name : 'Perfect Audience',
key : 'siteId',
defaults : {
siteId : null
},
initialize : function (options, ready) {
window._pa || (window._pa = {});
load('//tag.perfectaudience.com/serve/' + options.siteId + '.js', ready);
},
track : function (event, properties) {
window._pa.track(event, properties);
}
});
});
require.register("analytics/src/providers/pingdom.js", function(exports, require, module){
var date = require('load-date')
, Provider = require('../provider')
, load = require('load-script');
module.exports = Provider.extend({
name : 'Pingdom',
key : 'id',
defaults : {
id : null
},
initialize : function (options, ready) {
window._prum = [
['id', options.id],
['mark', 'firstbyte', date.getTime()]
];
// We've replaced the original snippet loader with our own load method.
load('//rum-static.pingdom.net/prum.min.js', ready);
}
});
});
require.register("analytics/src/providers/preact.js", function(exports, require, module){
// http://www.preact.io/api/javascript
var Provider = require('../provider')
, isEmail = require('is-email')
, load = require('load-script');
module.exports = Provider.extend({
name : 'Preact',
key : 'projectCode',
defaults : {
projectCode : null
},
initialize : function (options, ready) {
var _lnq = window._lnq = window._lnq || [];
_lnq.push(["_setCode", options.projectCode]);
load('//d2bbvl6dq48fa6.cloudfront.net/js/ln-2.4.min.js');
ready();
},
identify : function (userId, traits) {
// Don't do anything if we just have traits. Preact requires a `userId`.
if (!userId) return;
// Swap the `created` trait to the `created_at` that Preact needs
// and convert it from milliseconds to seconds.
if (traits.created) {
traits.created_at = Math.floor(traits.created/1000);
delete traits.created;
}
window._lnq.push(['_setPersonData', {
name : traits.name,
email : traits.email,
uid : userId,
properties : traits
}]);
},
group : function (groupId, properties) {
if (!groupId) return;
properties.id = groupId;
window._lnq.push(['_setAccount', properties]);
},
track : function (event, properties) {
properties || (properties = {});
// Preact takes a few special properties, and the rest in `extras`. So first
// convert and remove the special ones from `properties`.
var special = { name : event };
// They take `revenue` in cents.
if (properties.revenue) {
special.revenue = properties.revenue * 100;
delete properties.revenue;
}
if (properties.note) {
special.note = properties.note;
delete properties.note;
}
window._lnq.push(['_logEvent', special, properties]);
}
});
});
require.register("analytics/src/providers/qualaroo.js", function(exports, require, module){
// http://help.qualaroo.com/customer/portal/articles/731085-identify-survey-nudge-takers
// http://help.qualaroo.com/customer/portal/articles/731091-set-additional-user-properties
var Provider = require('../provider')
, isEmail = require('is-email')
, load = require('load-script');
module.exports = Provider.extend({
name : 'Qualaroo',
defaults : {
// Qualaroo has two required options.
customerId : null,
siteToken : null,
// Whether to record traits when a user triggers an event. This can be
// useful for sending targetted questionnaries.
track : false
},
// Qualaroo's script has two options in its URL.
initialize : function (options, ready) {
window._kiq = window._kiq || [];
load('//s3.amazonaws.com/ki.js/' + options.customerId + '/' + options.siteToken + '.js');
// Qualaroo creates a queue, so it's ready immediately.
ready();
},
// Qualaroo uses two separate methods: `identify` for storing the `userId`,
// and `set` for storing `traits`.
identify : function (userId, traits) {
var identity = traits.email || userId;
if (identity) window._kiq.push(['identify', identity]);
if (traits) window._kiq.push(['set', traits]);
},
// Qualaroo doesn't have `track` method yet, but to allow the users to do
// targetted questionnaires we can set name-value pairs on the user properties
// that apply to the current visit.
track : function (event, properties) {
if (!this.options.track) return;
// Create a name-value pair that will be pretty unique. For an event like
// 'Loaded a Page' this will make it 'Triggered: Loaded a Page'.
var traits = {};
traits['Triggered: ' + event] = true;
// Fire a normal identify, with traits only.
this.identify(null, traits);
}
});
});
require.register("analytics/src/providers/quantcast.js", function(exports, require, module){
// https://www.quantcast.com/learning-center/guides/using-the-quantcast-asynchronous-tag/
var Provider = require('../provider')
, load = require('load-script');
module.exports = Provider.extend({
name : 'Quantcast',
key : 'pCode',
defaults : {
pCode : null
},
initialize : function (options, ready) {
window._qevents = window._qevents || [];
window._qevents.push({ qacct: options.pCode });
load({
http : 'http://edge.quantserve.com/quant.js',
https : 'https://secure.quantserve.com/quant.js'
}, ready);
}
});
});
require.register("analytics/src/providers/sentry.js", function(exports, require, module){
// http://raven-js.readthedocs.org/en/latest/config/index.html
var Provider = require('../provider')
, load = require('load-script');
module.exports = Provider.extend({
name : 'Sentry',
key : 'config',
defaults : {
config : null
},
initialize : function (options, ready) {
load('//d3nslu0hdya83q.cloudfront.net/dist/1.0/raven.min.js', function () {
// For now, Raven basically requires `install` to be called.
// https://github.com/getsentry/raven-js/blob/master/src/raven.js#L87
window.Raven.config(options.config).install();
ready();
});
},
identify : function (userId, traits) {
traits.id = userId;
window.Raven.setUser(traits);
},
// Raven will automatically use `captureMessage` if the error is a string.
log : function (error, properties) {
window.Raven.captureException(error, properties);
}
});
});
require.register("analytics/src/providers/snapengage.js", function(exports, require, module){
// http://help.snapengage.com/installation-guide-getting-started-in-a-snap/
var Provider = require('../provider')
, isEmail = require('is-email')
, load = require('load-script');
module.exports = Provider.extend({
name : 'SnapEngage',
key : 'apiKey',
defaults : {
apiKey : null
},
initialize : function (options, ready) {
load('//commondatastorage.googleapis.com/code.snapengage.com/js/' + options.apiKey + '.js', ready);
},
// Set the email in the chat window if we have it.
identify : function (userId, traits, options) {
if (!traits.email) return;
window.SnapABug.setUserEmail(traits.email);
}
});
});
require.register("analytics/src/providers/usercycle.js", function(exports, require, module){
// http://docs.usercycle.com/javascript_api
var Provider = require('../provider')
, load = require('load-script')
, user = require('../user');
module.exports = Provider.extend({
name : 'USERcycle',
key : 'key',
defaults : {
key : null
},
initialize : function (options, ready) {
window._uc = window._uc || [];
window._uc.push(['_key', options.key]);
load('//api.usercycle.com/javascripts/track.js');
// USERcycle makes a queue, so it's ready immediately.
ready();
},
identify : function (userId, traits) {
if (userId) window._uc.push(['uid', userId]);
// USERcycle has a special "hidden" event that is used just for retention measurement.
// Lukas suggested on 6/4/2013 that we send traits on that event, since they use the
// the latest value of every event property as a "trait"
window._uc.push(['action', 'came_back', traits]);
},
track : function (event, properties) {
window._uc.push(['action', event, properties]);
}
});
});
require.register("analytics/src/providers/userfox.js", function(exports, require, module){
// https://www.userfox.com/docs/
var Provider = require('../provider')
, extend = require('extend')
, load = require('load-script')
, isEmail = require('is-email');
module.exports = Provider.extend({
name : 'userfox',
key : 'clientId',
defaults : {
// userfox's required key.
clientId : null
},
initialize : function (options, ready) {
window._ufq = window._ufq || [];
load('//d2y71mjhnajxcg.cloudfront.net/js/userfox-stable.js');
// userfox creates its own queue, so we're ready right away.
ready();
},
identify : function (userId, traits) {
if (!traits.email) return;
// Initialize the library with the email now that we have it.
window._ufq.push(['init', {
clientId : this.options.clientId,
email : traits.email
}]);
// Record traits to "track" if we have the required signup date `created`.
// userfox takes `signup_date` as a string of seconds since the epoch.
if (traits.created) {
traits.signup_date = (traits.created.getTime() / 1000).toString();
delete traits.created;
window._ufq.push(['track', traits]);
}
}
});
});
require.register("analytics/src/providers/uservoice.js", function(exports, require, module){
// http://feedback.uservoice.com/knowledgebase/articles/225-how-do-i-pass-custom-data-through-the-widget-and-i
var Provider = require('../provider')
, load = require('load-script')
, alias = require('alias')
, clone = require('clone');
module.exports = Provider.extend({
name : 'UserVoice',
defaults : {
// These first two options are required.
widgetId : null,
forumId : null,
// Should we show the tab automatically?
showTab : true,
// There's tons of options for the tab.
mode : 'full',
primaryColor : '#cc6d00',
linkColor : '#007dbf',
defaultMode : 'support',
tabLabel : 'Feedback & Support',
tabColor : '#cc6d00',
tabPosition : 'middle-right',
tabInverted : false
},
initialize : function (options, ready) {
window.UserVoice = window.UserVoice || [];
load('//widget.uservoice.com/' + options.widgetId + '.js', ready);
var optionsClone = clone(options);
alias(optionsClone, {
'forumId' : 'forum_id',
'primaryColor' : 'primary_color',
'linkColor' : 'link_color',
'defaultMode' : 'default_mode',
'tabLabel' : 'tab_label',
'tabColor' : 'tab_color',
'tabPosition' : 'tab_position',
'tabInverted' : 'tab_inverted'
});
// If we don't automatically show the tab, let them show it via
// javascript. This is the default name for the function in their snippet.
window.showClassicWidget = function (showWhat) {
window.UserVoice.push([showWhat || 'showLightbox', 'classic_widget', optionsClone]);
};
// If we *do* automatically show the tab, get on with it!
if (options.showTab) {
window.showClassicWidget('showTab');
}
},
identify : function (userId, traits) {
// Pull the ID into traits.
traits.id = userId;
window.UserVoice.push(['setCustomFields', traits]);
}
});
});
require.register("analytics/src/providers/vero.js", function(exports, require, module){
// https://github.com/getvero/vero-api/blob/master/sections/js.md
var Provider = require('../provider')
, isEmail = require('is-email')
, load = require('load-script');
module.exports = Provider.extend({
name : 'Vero',
key : 'apiKey',
defaults : {
apiKey : null
},
initialize : function (options, ready) {
window._veroq = window._veroq || [];
window._veroq.push(['init', { api_key: options.apiKey }]);
load('//d3qxef4rp70elm.cloudfront.net/m.js');
// Vero creates a queue, so it's ready immediately.
ready();
},
identify : function (userId, traits) {
// Don't do anything if we just have traits, because Vero
// requires a `userId`.
if (!userId || !traits.email) return;
// Vero takes the `userId` as part of the traits object.
traits.id = userId;
window._veroq.push(['user', traits]);
},
track : function (event, properties) {
window._veroq.push(['track', event, properties]);
}
});
});
require.register("analytics/src/providers/visual-website-optimizer.js", function(exports, require, module){
// http://v2.visualwebsiteoptimizer.com/tools/get_tracking_code.php
// http://visualwebsiteoptimizer.com/knowledge/integration-of-vwo-with-kissmetrics/
var each = require('each')
, inherit = require('inherit')
, nextTick = require('next-tick')
, Provider = require('../provider');
/**
* Expose `VWO`.
*/
module.exports = VWO;
/**
* `VWO` inherits from the generic `Provider`.
*/
function VWO () {
Provider.apply(this, arguments);
}
inherit(VWO, Provider);
/**
* Name.
*/
VWO.prototype.name = 'Visual Website Optimizer';
/**
* Default options.
*/
VWO.prototype.defaults = {
// Whether to replay variations into other integrations as traits.
replay : true
};
/**
* Initialize.
*/
VWO.prototype.initialize = function (options, ready) {
if (options.replay) this.replay();
ready();
};
/**
* Replay the experiments the user has seen as traits to all other integrations.
* Wait for the next tick to replay so that the `analytics` object and all of
* the integrations are fully initialized.
*/
VWO.prototype.replay = function () {
var analytics = this.analytics;
nextTick(function () {
experiments(function (err, traits) {
if (traits) analytics.identify(traits);
});
});
};
/**
* Get dictionary of experiment keys and variations.
* http://visualwebsiteoptimizer.com/knowledge/integration-of-vwo-with-kissmetrics/
*
* @param {Function} callback Called with `err, experiments`.
* @return {Object} Dictionary of experiments and variations.
*/
function experiments (callback) {
enqueue(function () {
var data = {};
var ids = window._vwo_exp_ids;
if (!ids) return callback();
each(ids, function (id) {
var name = variation(id);
if (name) data['Experiment: ' + id] = name;
});
callback(null, data);
});
}
/**
* Add a function to the VWO queue, creating one if it doesn't exist.
*
* @param {Function} fn Function to enqueue.
*/
function enqueue (fn) {
window._vis_opt_queue || (window._vis_opt_queue = []);
window._vis_opt_queue.push(fn);
}
/**
* Get the chosen variation's name from an experiment `id`.
* http://visualwebsiteoptimizer.com/knowledge/integration-of-vwo-with-kissmetrics/
*
* @param {String} id ID of the experiment to read.
* @return {String} Variation name.
*/
function variation (id) {
var experiments = window._vwo_exp;
if (!experiments) return null;
var experiment = experiments[id];
var variationId = experiment.combination_chosen;
return variationId ? experiment.comb_n[variationId] : null;
}
});
require.register("analytics/src/providers/woopra.js", function(exports, require, module){
// http://www.woopra.com/docs/setup/javascript-tracking/
var Provider = require('../provider')
, each = require('each')
, extend = require('extend')
, isEmail = require('is-email')
, load = require('load-script')
, type = require('type')
, user = require('../user');
module.exports = Provider.extend({
name : 'Woopra',
key : 'domain',
defaults : {
domain : null
},
initialize : function (options, ready) {
// Woopra gives us a nice ready callback.
var self = this;
window.woopraReady = function (tracker) {
tracker.setDomain(self.options.domain);
tracker.setIdleTimeout(300000);
var userId = user.id()
, traits = user.traits();
addTraits(userId, traits, tracker);
tracker.track();
ready();
return false;
};
load('//static.woopra.com/js/woopra.js');
},
identify : function (userId, traits) {
// We aren't guaranteed a tracker.
if (!window.woopraTracker) return;
addTraits(userId, traits, window.woopraTracker);
},
track : function (event, properties) {
// We aren't guaranteed a tracker.
if (!window.woopraTracker) return;
// Woopra takes its `event` as the `name` key.
properties || (properties = {});
properties.name = event;
window.woopraTracker.pushEvent(properties);
}
});
/**
* Convenience function for updating the userId and traits.
*
* @param {String} userId The user's ID.
* @param {Object} traits The user's traits.
* @param {Tracker} tracker The Woopra tracker object.
*/
function addTraits (userId, traits, tracker) {
// Move a `userId` into `traits`.
if (userId) traits.id = userId;
each(traits, function (key, value) {
// Woopra seems to only support strings as trait values.
if ('string' === type(value)) tracker.addVisitorProperty(key, value);
});
}
});
require.alias("avetisk-defaults/index.js", "analytics/deps/defaults/index.js");
require.alias("avetisk-defaults/index.js", "defaults/index.js");
require.alias("component-clone/index.js", "analytics/deps/clone/index.js");
require.alias("component-clone/index.js", "clone/index.js");
require.alias("component-type/index.js", "component-clone/deps/type/index.js");
require.alias("component-cookie/index.js", "analytics/deps/cookie/index.js");
require.alias("component-cookie/index.js", "cookie/index.js");
require.alias("component-each/index.js", "analytics/deps/each/index.js");
require.alias("component-each/index.js", "each/index.js");
require.alias("component-type/index.js", "component-each/deps/type/index.js");
require.alias("component-event/index.js", "analytics/deps/event/index.js");
require.alias("component-event/index.js", "event/index.js");
require.alias("component-inherit/index.js", "analytics/deps/inherit/index.js");
require.alias("component-inherit/index.js", "inherit/index.js");
require.alias("component-object/index.js", "analytics/deps/object/index.js");
require.alias("component-object/index.js", "object/index.js");
require.alias("component-querystring/index.js", "analytics/deps/querystring/index.js");
require.alias("component-querystring/index.js", "querystring/index.js");
require.alias("component-trim/index.js", "component-querystring/deps/trim/index.js");
require.alias("component-type/index.js", "analytics/deps/type/index.js");
require.alias("component-type/index.js", "type/index.js");
require.alias("component-url/index.js", "analytics/deps/url/index.js");
require.alias("component-url/index.js", "url/index.js");
require.alias("segmentio-after/index.js", "analytics/deps/after/index.js");
require.alias("segmentio-after/index.js", "after/index.js");
require.alias("segmentio-alias/index.js", "analytics/deps/alias/index.js");
require.alias("segmentio-alias/index.js", "alias/index.js");
require.alias("segmentio-bind-all/index.js", "analytics/deps/bind-all/index.js");
require.alias("segmentio-bind-all/index.js", "analytics/deps/bind-all/index.js");
require.alias("segmentio-bind-all/index.js", "bind-all/index.js");
require.alias("component-bind/index.js", "segmentio-bind-all/deps/bind/index.js");
require.alias("component-type/index.js", "segmentio-bind-all/deps/type/index.js");
require.alias("segmentio-bind-all/index.js", "segmentio-bind-all/index.js");
require.alias("segmentio-canonical/index.js", "analytics/deps/canonical/index.js");
require.alias("segmentio-canonical/index.js", "canonical/index.js");
require.alias("segmentio-extend/index.js", "analytics/deps/extend/index.js");
require.alias("segmentio-extend/index.js", "extend/index.js");
require.alias("segmentio-is-email/index.js", "analytics/deps/is-email/index.js");
require.alias("segmentio-is-email/index.js", "is-email/index.js");
require.alias("segmentio-is-meta/index.js", "analytics/deps/is-meta/index.js");
require.alias("segmentio-is-meta/index.js", "is-meta/index.js");
require.alias("segmentio-json/index.js", "analytics/deps/json/index.js");
require.alias("segmentio-json/index.js", "json/index.js");
require.alias("component-json-fallback/index.js", "segmentio-json/deps/json-fallback/index.js");
require.alias("segmentio-load-date/index.js", "analytics/deps/load-date/index.js");
require.alias("segmentio-load-date/index.js", "load-date/index.js");
require.alias("segmentio-load-script/index.js", "analytics/deps/load-script/index.js");
require.alias("segmentio-load-script/index.js", "load-script/index.js");
require.alias("component-type/index.js", "segmentio-load-script/deps/type/index.js");
require.alias("segmentio-new-date/index.js", "analytics/deps/new-date/index.js");
require.alias("segmentio-new-date/index.js", "new-date/index.js");
require.alias("component-type/index.js", "segmentio-new-date/deps/type/index.js");
require.alias("segmentio-on-body/index.js", "analytics/deps/on-body/index.js");
require.alias("segmentio-on-body/index.js", "on-body/index.js");
require.alias("component-each/index.js", "segmentio-on-body/deps/each/index.js");
require.alias("component-type/index.js", "component-each/deps/type/index.js");
require.alias("segmentio-store.js/store.js", "analytics/deps/store/store.js");
require.alias("segmentio-store.js/store.js", "analytics/deps/store/index.js");
require.alias("segmentio-store.js/store.js", "store/index.js");
require.alias("segmentio-json/index.js", "segmentio-store.js/deps/json/index.js");
require.alias("component-json-fallback/index.js", "segmentio-json/deps/json-fallback/index.js");
require.alias("segmentio-store.js/store.js", "segmentio-store.js/index.js");
require.alias("segmentio-top-domain/index.js", "analytics/deps/top-domain/index.js");
require.alias("segmentio-top-domain/index.js", "analytics/deps/top-domain/index.js");
require.alias("segmentio-top-domain/index.js", "top-domain/index.js");
require.alias("component-url/index.js", "segmentio-top-domain/deps/url/index.js");
require.alias("segmentio-top-domain/index.js", "segmentio-top-domain/index.js");
require.alias("timoxley-next-tick/index.js", "analytics/deps/next-tick/index.js");
require.alias("timoxley-next-tick/index.js", "next-tick/index.js");
require.alias("yields-prevent/index.js", "analytics/deps/prevent/index.js");
require.alias("yields-prevent/index.js", "prevent/index.js");
require.alias("analytics/src/index.js", "analytics/index.js");
if (typeof exports == "object") {
module.exports = require("analytics");
} else if (typeof define == "function" && define.amd) {
define(function(){ return require("analytics"); });
} else {
this["analytics"] = require("analytics");
}})();

File diff suppressed because one or more lines are too long

View File

@@ -26,6 +26,8 @@
<script type="text/javascript" src="<%= common_js_root %>/vendor/tiny_mce/tiny_mce.js"></script>
<script type="text/javascript" src="<%= common_js_root %>/vendor/mathjax-MathJax-c9db6ac/MathJax.js?config=default"></script>
<script type="text/javascript" src="<%= common_js_root %>/vendor/jquery.timeago.js"></script>
<script type="text/javascript" src="<%= common_js_root %>/vendor/sinon-1.7.1.js"></script>
<script type="text/javascript" src="<%= common_js_root %>/vendor/analytics.js"></script>
<script type="text/javascript">
AjaxPrefix.addAjaxPrefix(jQuery, function() {
return "";

View File

@@ -1,5 +1,5 @@
#pylint: disable=C0111
#pylint: disable=W0621
# pylint: disable=C0111
# pylint: disable=W0621
from __future__ import absolute_import

View File

@@ -112,12 +112,12 @@ def assert_problem_has_answer(step, problem_type, answer_class):
@step(u'I reset the problem')
def reset_problem(step):
def reset_problem(_step):
world.css_click('input.reset')
@step(u'I press the button with the label "([^"]*)"$')
def press_the_button_with_label(step, buttonname):
def press_the_button_with_label(_step, buttonname):
button_css = 'button span.show-label'
elem = world.css_find(button_css).first
assert_equal(elem.text, buttonname)
@@ -125,7 +125,7 @@ def press_the_button_with_label(step, buttonname):
@step(u'The "([^"]*)" button does( not)? appear')
def action_button_present(step, buttonname, doesnt_appear):
def action_button_present(_step, buttonname, doesnt_appear):
button_css = 'section.action input[value*="%s"]' % buttonname
if doesnt_appear:
assert world.is_css_not_present(button_css)

View File

@@ -1,5 +1,5 @@
#pylint: disable=C0111
#pylint: disable=W0621
# pylint: disable=C0111
# pylint: disable=W0621
from lettuce import world, step
from lettuce.django import django_url
@@ -7,7 +7,7 @@ from common import TEST_COURSE_ORG, TEST_COURSE_NAME
@step('I register for the course "([^"]*)"$')
def i_register_for_the_course(step, course):
def i_register_for_the_course(_step, course):
cleaned_name = TEST_COURSE_NAME.replace(' ', '_')
url = django_url('courses/%s/%s/%s/about' % (TEST_COURSE_ORG, course, cleaned_name))
world.browser.visit(url)
@@ -20,13 +20,13 @@ def i_register_for_the_course(step, course):
@step(u'I should see an empty dashboard message')
def i_should_see_empty_dashboard(step):
def i_should_see_empty_dashboard(_step):
empty_dash_css = 'section.empty-dashboard-message'
assert world.is_css_present(empty_dash_css)
@step(u'I should( NOT)? see the course numbered "([^"]*)" in my dashboard$')
def i_should_see_that_course_in_my_dashboard(step, doesnt_appear, course):
def i_should_see_that_course_in_my_dashboard(_step, doesnt_appear, course):
course_link_css = 'section.my-courses a[href*="%s"]' % course
if doesnt_appear:
assert world.is_css_not_present(course_link_css)
@@ -35,7 +35,7 @@ def i_should_see_that_course_in_my_dashboard(step, doesnt_appear, course):
@step(u'I unregister for the course numbered "([^"]*)"')
def i_unregister_for_that_course(step, course):
def i_unregister_for_that_course(_step, course):
unregister_css = 'section.info a[href*="#unenroll-modal"][data-course-number*="%s"]' % course
world.css_click(unregister_css)
button_css = 'section#unenroll-modal input[value="Unregister"]'

View File

@@ -8,12 +8,12 @@ from common import TEST_COURSE_NAME, TEST_SECTION_NAME, i_am_registered_for_the_
@step('when I view the video it has autoplay enabled')
def does_autoplay(step):
def does_autoplay(_step):
assert(world.css_find('.video')[0]['data-autoplay'] == 'True')
@step('the course has a Video component')
def view_video(step):
def view_video(_step):
coursename = TEST_COURSE_NAME.replace(' ', '_')
i_am_registered_for_the_course(step, coursename)

View File

@@ -4,9 +4,9 @@ WE'RE USING MIGRATIONS!
If you make changes to this model, be sure to create an appropriate migration
file and check it in at the same time as your model changes. To do that,
1. Go to the mitx dir
1. Go to the edx-platform dir
2. ./manage.py schemamigration courseware --auto description_of_your_change
3. Add the migration file created in mitx/courseware/migrations/
3. Add the migration file created in edx-platform/lms/djangoapps/courseware/migrations/
ASSUMPTIONS: modules have unique IDs, even across different module_types
@@ -17,6 +17,7 @@ from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
class StudentModule(models.Model):
"""
Keeps student state for a particular module in a particular course.

View File

@@ -121,7 +121,7 @@ def toc_for_course(user, request, course, active_chapter, active_section, model_
def get_module(user, request, location, model_data_cache, course_id,
position=None, not_found_ok = False, wrap_xmodule_display=True,
position=None, not_found_ok=False, wrap_xmodule_display=True,
grade_bucket_type=None, depth=0):
"""
Get an instance of the xmodule class identified by location,
@@ -161,16 +161,49 @@ def get_module(user, request, location, model_data_cache, course_id,
return None
def get_module_for_descriptor(user, request, descriptor, model_data_cache, course_id,
position=None, wrap_xmodule_display=True, grade_bucket_type=None):
"""
Actually implement get_module. See docstring there for details.
def get_xqueue_callback_url_prefix(request):
"""
Calculates default prefix based on request, but allows override via settings
This is separated from get_module_for_descriptor so that it can be called
by the LMS before submitting background tasks to run. The xqueue callbacks
should go back to the LMS, not to the worker.
"""
prefix = '{proto}://{host}'.format(
proto=request.META.get('HTTP_X_FORWARDED_PROTO', 'https' if request.is_secure() else 'http'),
host=request.get_host()
)
return settings.XQUEUE_INTERFACE.get('callback_url', prefix)
def get_module_for_descriptor(user, request, descriptor, model_data_cache, course_id,
position=None, wrap_xmodule_display=True, grade_bucket_type=None):
"""
Implements get_module, extracting out the request-specific functionality.
See get_module() docstring for further details.
"""
# allow course staff to masquerade as student
if has_access(user, descriptor, 'staff', course_id):
setup_masquerade(request, True)
track_function = make_track_function(request)
xqueue_callback_url_prefix = get_xqueue_callback_url_prefix(request)
return get_module_for_descriptor_internal(user, descriptor, model_data_cache, course_id,
track_function, xqueue_callback_url_prefix,
position, wrap_xmodule_display, grade_bucket_type)
def get_module_for_descriptor_internal(user, descriptor, model_data_cache, course_id,
track_function, xqueue_callback_url_prefix,
position=None, wrap_xmodule_display=True, grade_bucket_type=None):
"""
Actually implement get_module, without requiring a request.
See get_module() docstring for further details.
"""
# Short circuit--if the user shouldn't have access, bail without doing any work
if not has_access(user, descriptor, 'load', course_id):
return None
@@ -186,19 +219,13 @@ def get_module_for_descriptor(user, request, descriptor, model_data_cache, cours
def make_xqueue_callback(dispatch='score_update'):
# Fully qualified callback URL for external queueing system
xqueue_callback_url = '{proto}://{host}'.format(
host=request.get_host(),
proto=request.META.get('HTTP_X_FORWARDED_PROTO', 'https' if request.is_secure() else 'http')
)
xqueue_callback_url = settings.XQUEUE_INTERFACE.get('callback_url',xqueue_callback_url) # allow override
xqueue_callback_url += reverse('xqueue_callback',
kwargs=dict(course_id=course_id,
userid=str(user.id),
id=descriptor.location.url(),
dispatch=dispatch),
)
return xqueue_callback_url
relative_xqueue_callback_url = reverse('xqueue_callback',
kwargs=dict(course_id=course_id,
userid=str(user.id),
id=descriptor.location.url(),
dispatch=dispatch),
)
return xqueue_callback_url_prefix + relative_xqueue_callback_url
# Default queuename is course-specific and is derived from the course that
# contains the current module.
@@ -211,20 +238,20 @@ def get_module_for_descriptor(user, request, descriptor, model_data_cache, cours
'waittime': settings.XQUEUE_WAITTIME_BETWEEN_REQUESTS
}
#This is a hacky way to pass settings to the combined open ended xmodule
#It needs an S3 interface to upload images to S3
#It needs the open ended grading interface in order to get peer grading to be done
#this first checks to see if the descriptor is the correct one, and only sends settings if it is
# This is a hacky way to pass settings to the combined open ended xmodule
# It needs an S3 interface to upload images to S3
# It needs the open ended grading interface in order to get peer grading to be done
# this first checks to see if the descriptor is the correct one, and only sends settings if it is
#Get descriptor metadata fields indicating needs for various settings
# Get descriptor metadata fields indicating needs for various settings
needs_open_ended_interface = getattr(descriptor, "needs_open_ended_interface", False)
needs_s3_interface = getattr(descriptor, "needs_s3_interface", False)
#Initialize interfaces to None
# Initialize interfaces to None
open_ended_grading_interface = None
s3_interface = None
#Create interfaces if needed
# Create interfaces if needed
if needs_open_ended_interface:
open_ended_grading_interface = settings.OPEN_ENDED_GRADING_INTERFACE
open_ended_grading_interface['mock_peer_grading'] = settings.MOCK_PEER_GRADING
@@ -238,10 +265,15 @@ def get_module_for_descriptor(user, request, descriptor, model_data_cache, cours
def inner_get_module(descriptor):
"""
Delegate to get_module. It does an access check, so may return None
Delegate to get_module_for_descriptor_internal() with all values except `descriptor` set.
Because it does an access check, it may return None.
"""
return get_module_for_descriptor(user, request, descriptor,
model_data_cache, course_id, position)
# TODO: fix this so that make_xqueue_callback uses the descriptor passed into
# inner_get_module, not the parent's callback. Add it as an argument....
return get_module_for_descriptor_internal(user, descriptor, model_data_cache, course_id,
track_function, make_xqueue_callback,
position, wrap_xmodule_display, grade_bucket_type)
def xblock_model_data(descriptor):
return DbModel(
@@ -266,7 +298,7 @@ def get_module_for_descriptor(user, request, descriptor, model_data_cache, cours
student_module.max_grade = event.get('max_value')
student_module.save()
#Bin score into range and increment stats
# Bin score into range and increment stats
score_bucket = get_score_bucket(student_module.grade, student_module.max_grade)
org, course_num, run = course_id.split("/")
@@ -291,7 +323,7 @@ def get_module_for_descriptor(user, request, descriptor, model_data_cache, cours
# TODO (cpennington): When modules are shared between courses, the static
# prefix is going to have to be specific to the module, not the directory
# that the xml was loaded from
system = ModuleSystem(track_function=make_track_function(request),
system = ModuleSystem(track_function=track_function,
render_template=render_to_string,
ajax_url=ajax_url,
xqueue=xqueue,
@@ -440,13 +472,13 @@ def modx_dispatch(request, dispatch, location, course_id):
inputfiles = request.FILES.getlist(fileinput_id)
if len(inputfiles) > settings.MAX_FILEUPLOADS_PER_INPUT:
too_many_files_msg = 'Submission aborted! Maximum %d files may be submitted at once' %\
too_many_files_msg = 'Submission aborted! Maximum %d files may be submitted at once' % \
settings.MAX_FILEUPLOADS_PER_INPUT
return HttpResponse(json.dumps({'success': too_many_files_msg}))
for inputfile in inputfiles:
if inputfile.size > settings.STUDENT_FILEUPLOAD_MAX_SIZE: # Bytes
file_too_big_msg = 'Submission aborted! Your file "%s" is too large (max size: %d MB)' %\
if inputfile.size > settings.STUDENT_FILEUPLOAD_MAX_SIZE: # Bytes
file_too_big_msg = 'Submission aborted! Your file "%s" is too large (max size: %d MB)' % \
(inputfile.name, settings.STUDENT_FILEUPLOAD_MAX_SIZE / (1000 ** 2))
return HttpResponse(json.dumps({'success': file_too_big_msg}))
p[fileinput_id] = inputfiles

View File

@@ -11,21 +11,22 @@ from courseware.tests.tests import TEST_DATA_MONGO_MODULESTORE
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
class ProgressTestCase(TestCase):
def setUp(self):
def setUp(self):
self.mockuser1 = MagicMock()
self.mockuser0 = MagicMock()
self.course = MagicMock()
self.mockuser1.is_authenticated.return_value = True
self.mockuser0.is_authenticated.return_value = False
self.course.id = 'edX/full/6.002_Spring_2012'
self.tab = {'name': 'same'}
self.active_page1 = 'progress'
self.active_page0 = 'stagnation'
self.mockuser1 = MagicMock()
self.mockuser0 = MagicMock()
self.course = MagicMock()
self.mockuser1.is_authenticated.return_value = True
self.mockuser0.is_authenticated.return_value = False
self.course.id = 'edX/full/6.002_Spring_2012'
self.tab = {'name': 'same'}
self.active_page1 = 'progress'
self.active_page0 = 'stagnation'
def test_progress(self):
def test_progress(self):
self.assertEqual(tabs._progress(self.tab, self.mockuser0, self.course,
self.active_page0), [])
@@ -34,8 +35,8 @@ class ProgressTestCase(TestCase):
self.active_page1)[0].name, 'same')
self.assertEqual(tabs._progress(self.tab, self.mockuser1, self.course,
self.active_page1)[0].link,
reverse('progress', args = [self.course.id]))
self.active_page1)[0].link,
reverse('progress', args=[self.course.id]))
self.assertEqual(tabs._progress(self.tab, self.mockuser1, self.course,
self.active_page0)[0].is_active, False)
@@ -63,15 +64,15 @@ class WikiTestCase(TestCase):
'same')
self.assertEqual(tabs._wiki(self.tab, self.user,
self.course, self.active_page1)[0].link,
self.course, self.active_page1)[0].link,
reverse('course_wiki', args=[self.course.id]))
self.assertEqual(tabs._wiki(self.tab, self.user,
self.course, self.active_page1)[0].is_active,
self.course, self.active_page1)[0].is_active,
True)
self.assertEqual(tabs._wiki(self.tab, self.user,
self.course, self.active_page0)[0].is_active,
self.course, self.active_page0)[0].is_active,
False)
@override_settings(WIKI_ENABLED=False)
@@ -129,14 +130,13 @@ class StaticTabTestCase(TestCase):
self.assertEqual(tabs._static_tab(self.tabby, self.user,
self.course, self.active_page1)[0].link,
reverse('static_tab', args = [self.course.id,
self.tabby['url_slug']]))
reverse('static_tab', args=[self.course.id,
self.tabby['url_slug']]))
self.assertEqual(tabs._static_tab(self.tabby, self.user,
self.course, self.active_page1)[0].is_active,
True)
self.assertEqual(tabs._static_tab(self.tabby, self.user,
self.course, self.active_page0)[0].is_active,
False)
@@ -183,7 +183,7 @@ class TextbooksTestCase(TestCase):
self.assertEqual(tabs._textbooks(self.tab, self.mockuser1,
self.course, self.active_page1)[1].name,
'Topology')
'Topology')
self.assertEqual(tabs._textbooks(self.tab, self.mockuser1,
self.course, self.active_page1)[1].link,
@@ -206,6 +206,7 @@ class TextbooksTestCase(TestCase):
self.assertEqual(tabs._textbooks(self.tab, self.mockuser0,
self.course, self.active_pageX), [])
class KeyCheckerTestCase(TestCase):
def setUp(self):
@@ -223,39 +224,36 @@ class KeyCheckerTestCase(TestCase):
class NullValidatorTestCase(TestCase):
def setUp(self):
def setUp(self):
self.d = {}
self.dummy = {}
def test_null_validator(self):
self.assertIsNone(tabs.null_validator(self.d))
def test_null_validator(self):
self.assertIsNone(tabs.null_validator(self.dummy))
class ValidateTabsTestCase(TestCase):
def setUp(self):
self.courses = [MagicMock() for i in range(0,5)]
self.courses = [MagicMock() for i in range(0, 5)]
self.courses[0].tabs = None
self.courses[1].tabs = [{'type':'courseware'}, {'type': 'fax'}]
self.courses[1].tabs = [{'type': 'courseware'}, {'type': 'fax'}]
self.courses[2].tabs = [{'type':'shadow'}, {'type': 'course_info'}]
self.courses[2].tabs = [{'type': 'shadow'}, {'type': 'course_info'}]
self.courses[3].tabs = [{'type':'courseware'},{'type':'course_info', 'name': 'alice'},
{'type': 'wiki', 'name':'alice'}, {'type':'discussion', 'name': 'alice'},
{'type':'external_link', 'name': 'alice', 'link':'blink'},
{'type':'textbooks'}, {'type':'progress', 'name': 'alice'},
{'type':'static_tab', 'name':'alice', 'url_slug':'schlug'},
{'type': 'staff_grading'}]
self.courses[4].tabs = [{'type':'courseware'},{'type': 'course_info'}, {'type': 'flying'}]
self.courses[3].tabs = [{'type': 'courseware'}, {'type': 'course_info', 'name': 'alice'},
{'type': 'wiki', 'name': 'alice'}, {'type': 'discussion', 'name': 'alice'},
{'type': 'external_link', 'name': 'alice', 'link': 'blink'},
{'type': 'textbooks'}, {'type': 'progress', 'name': 'alice'},
{'type': 'static_tab', 'name': 'alice', 'url_slug': 'schlug'},
{'type': 'staff_grading'}]
self.courses[4].tabs = [{'type': 'courseware'}, {'type': 'course_info'}, {'type': 'flying'}]
def test_validate_tabs(self):
self.assertIsNone(tabs.validate_tabs(self.courses[0]))
self.assertRaises(tabs.InvalidTabsException, tabs.validate_tabs, self.courses[1])
self.assertRaises(tabs.InvalidTabsException, tabs.validate_tabs, self.courses[2])
@@ -268,15 +266,15 @@ class DiscussionLinkTestCase(ModuleStoreTestCase):
def setUp(self):
self.tabs_with_discussion = [
{'type':'courseware'},
{'type':'course_info'},
{'type':'discussion'},
{'type':'textbooks'},
{'type': 'courseware'},
{'type': 'course_info'},
{'type': 'discussion'},
{'type': 'textbooks'},
]
self.tabs_without_discussion = [
{'type':'courseware'},
{'type':'course_info'},
{'type':'textbooks'},
{'type': 'courseware'},
{'type': 'course_info'},
{'type': 'textbooks'},
]
@staticmethod

View File

@@ -10,7 +10,6 @@ import os
import re
import requests
from requests.status_codes import codes
import urllib
from collections import OrderedDict
from StringIO import StringIO
@@ -20,8 +19,10 @@ from django.contrib.auth.models import User, Group
from django.http import HttpResponse
from django_future.csrf import ensure_csrf_cookie
from django.views.decorators.cache import cache_control
from mitxmako.shortcuts import render_to_response
from django.core.urlresolvers import reverse
import xmodule.graders as xmgraders
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError
from courseware import grades
from courseware.access import (has_access, get_access_group_name,
@@ -33,13 +34,18 @@ from django_comment_common.models import (Role,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA)
from django_comment_client.utils import has_forum_access
from instructor.offline_gradecalc import student_grades, offline_grades_available
from instructor_task.api import (get_running_instructor_tasks,
get_instructor_task_history,
submit_rescore_problem_for_all_students,
submit_rescore_problem_for_student,
submit_reset_problem_attempts_for_all_students)
from instructor_task.views import get_task_completion_info
from mitxmako.shortcuts import render_to_response
from psychometrics import psychoanalyze
from student.models import CourseEnrollment, CourseEnrollmentAllowed
from xmodule.modulestore.django import modulestore
import xmodule.graders as xmgraders
import track.views
from .offline_gradecalc import student_grades, offline_grades_available
log = logging.getLogger(__name__)
@@ -68,6 +74,7 @@ def instructor_dashboard(request, course_id):
msg = ''
problems = []
plots = []
datatable = {}
# the instructor dashboard page is modal: grades, psychometrics, admin
# keep that state in request.session (defaults to grades mode)
@@ -78,26 +85,29 @@ def instructor_dashboard(request, course_id):
idash_mode = request.session.get('idash_mode', 'Grades')
# assemble some course statistics for output to instructor
datatable = {'header': ['Statistic', 'Value'],
'title': 'Course Statistics At A Glance',
}
data = [['# Enrolled', CourseEnrollment.objects.filter(course_id=course_id).count()]]
data += compute_course_stats(course).items()
if request.user.is_staff:
for field in course.fields:
if getattr(field.scope, 'user', False):
continue
data.append([field.name, json.dumps(field.read_json(course))])
for namespace in course.namespaces:
for field in getattr(course, namespace).fields:
def get_course_stats_table():
datatable = {'header': ['Statistic', 'Value'],
'title': 'Course Statistics At A Glance',
}
data = [['# Enrolled', CourseEnrollment.objects.filter(course_id=course_id).count()]]
data += compute_course_stats(course).items()
if request.user.is_staff:
for field in course.fields:
if getattr(field.scope, 'user', False):
continue
data.append(["{}.{}".format(namespace, field.name), json.dumps(field.read_json(course))])
datatable['data'] = data
data.append([field.name, json.dumps(field.read_json(course))])
for namespace in course.namespaces:
for field in getattr(course, namespace).fields:
if getattr(field.scope, 'user', False):
continue
data.append(["{}.{}".format(namespace, field.name), json.dumps(field.read_json(course))])
datatable['data'] = data
return datatable
def return_csv(fn, datatable, fp=None):
"""Outputs a CSV file from the contents of a datatable."""
if fp is None:
response = HttpResponse(mimetype='text/csv')
response['Content-Disposition'] = 'attachment; filename={0}'.format(fn)
@@ -111,12 +121,15 @@ def instructor_dashboard(request, course_id):
return response
def get_staff_group(course):
"""Get or create the staff access group"""
return get_group(course, 'staff')
def get_instructor_group(course):
"""Get or create the instructor access group"""
return get_group(course, 'instructor')
def get_group(course, groupname):
"""Get or create an access group"""
grpname = get_access_group_name(course, groupname)
try:
group = Group.objects.get(name=grpname)
@@ -136,6 +149,39 @@ def instructor_dashboard(request, course_id):
(group, _) = Group.objects.get_or_create(name=name)
return group
def get_module_url(urlname):
"""
Construct full URL for a module from its urlname.
Form is either urlname or modulename/urlname. If no modulename
is provided, "problem" is assumed.
"""
# tolerate an XML suffix in the urlname
if urlname[-4:] == ".xml":
urlname = urlname[:-4]
# implement default
if '/' not in urlname:
urlname = "problem/" + urlname
# complete the url using information about the current course:
(org, course_name, _) = course_id.split("/")
return "i4x://" + org + "/" + course_name + "/" + urlname
def get_student_from_identifier(unique_student_identifier):
"""Gets a student object using either an email address or username"""
msg = ""
try:
if "@" in unique_student_identifier:
student = User.objects.get(email=unique_student_identifier)
else:
student = User.objects.get(username=unique_student_identifier)
msg += "Found a single student. "
except User.DoesNotExist:
student = None
msg += "<font color='red'>Couldn't find student with that email or username. </font>"
return msg, student
# process actions from form POST
action = request.POST.get('action', '')
use_offline = request.POST.get('use_offline_grades', False)
@@ -205,88 +251,138 @@ def instructor_dashboard(request, course_id):
track.views.server_track(request, action, {}, page='idashboard')
msg += dump_grading_context(course)
elif "Reset student's attempts" in action or "Delete student state for problem" in action:
elif "Rescore ALL students' problem submissions" in action:
problem_urlname = request.POST.get('problem_for_all_students', '')
problem_url = get_module_url(problem_urlname)
try:
instructor_task = submit_rescore_problem_for_all_students(request, course_id, problem_url)
if instructor_task is None:
msg += '<font color="red">Failed to create a background task for rescoring "{0}".</font>'.format(problem_url)
else:
track_msg = 'rescore problem {problem} for all students in {course}'.format(problem=problem_url, course=course_id)
track.views.server_track(request, track_msg, {}, page='idashboard')
except ItemNotFoundError as e:
msg += '<font color="red">Failed to create a background task for rescoring "{0}": problem not found.</font>'.format(problem_url)
except Exception as e:
log.error("Encountered exception from rescore: {0}".format(e))
msg += '<font color="red">Failed to create a background task for rescoring "{0}": {1}.</font>'.format(problem_url, e.message)
elif "Reset ALL students' attempts" in action:
problem_urlname = request.POST.get('problem_for_all_students', '')
problem_url = get_module_url(problem_urlname)
try:
instructor_task = submit_reset_problem_attempts_for_all_students(request, course_id, problem_url)
if instructor_task is None:
msg += '<font color="red">Failed to create a background task for resetting "{0}".</font>'.format(problem_url)
else:
track_msg = 'reset problem {problem} for all students in {course}'.format(problem=problem_url, course=course_id)
track.views.server_track(request, track_msg, {}, page='idashboard')
except ItemNotFoundError as e:
log.error('Failure to reset: unknown problem "{0}"'.format(e))
msg += '<font color="red">Failed to create a background task for resetting "{0}": problem not found.</font>'.format(problem_url)
except Exception as e:
log.error("Encountered exception from reset: {0}".format(e))
msg += '<font color="red">Failed to create a background task for resetting "{0}": {1}.</font>'.format(problem_url, e.message)
elif "Show Background Task History for Student" in action:
# put this before the non-student case, since the use of "in" will cause this to be missed
unique_student_identifier = request.POST.get('unique_student_identifier', '')
message, student = get_student_from_identifier(unique_student_identifier)
if student is None:
msg += message
else:
problem_urlname = request.POST.get('problem_for_student', '')
problem_url = get_module_url(problem_urlname)
message, datatable = get_background_task_table(course_id, problem_url, student)
msg += message
elif "Show Background Task History" in action:
problem_urlname = request.POST.get('problem_for_all_students', '')
problem_url = get_module_url(problem_urlname)
message, datatable = get_background_task_table(course_id, problem_url)
msg += message
elif ("Reset student's attempts" in action or
"Delete student state for module" in action or
"Rescore student's problem submission" in action):
# get the form data
unique_student_identifier = request.POST.get('unique_student_identifier', '')
problem_to_reset = request.POST.get('problem_to_reset', '')
if problem_to_reset[-4:] == ".xml":
problem_to_reset = problem_to_reset[:-4]
problem_urlname = request.POST.get('problem_for_student', '')
module_state_key = get_module_url(problem_urlname)
# try to uniquely id student by email address or username
try:
if "@" in unique_student_identifier:
student_to_reset = User.objects.get(email=unique_student_identifier)
else:
student_to_reset = User.objects.get(username=unique_student_identifier)
msg += "Found a single student to reset. "
except:
student_to_reset = None
msg += "<font color='red'>Couldn't find student with that email or username. </font>"
if student_to_reset is not None:
message, student = get_student_from_identifier(unique_student_identifier)
msg += message
student_module = None
if student is not None:
# find the module in question
if '/' not in problem_to_reset: # allow state of modules other than problem to be reset
problem_to_reset = "problem/" + problem_to_reset # but problem is the default
try:
(org, course_name, _) = course_id.split("/")
module_state_key = "i4x://" + org + "/" + course_name + "/" + problem_to_reset
module_to_reset = StudentModule.objects.get(student_id=student_to_reset.id,
course_id=course_id,
module_state_key=module_state_key)
msg += "Found module to reset. "
except Exception:
student_module = StudentModule.objects.get(student_id=student.id,
course_id=course_id,
module_state_key=module_state_key)
msg += "Found module. "
except StudentModule.DoesNotExist:
msg += "<font color='red'>Couldn't find module with that urlname. </font>"
if "Delete student state for problem" in action:
# delete the state
try:
module_to_reset.delete()
msg += "<font color='red'>Deleted student module state for %s!</font>" % module_state_key
except:
msg += "Failed to delete module state for %s/%s" % (unique_student_identifier, problem_to_reset)
else:
# modify the problem's state
try:
# load the state json
problem_state = json.loads(module_to_reset.state)
old_number_of_attempts = problem_state["attempts"]
problem_state["attempts"] = 0
if student_module is not None:
if "Delete student state for module" in action:
# delete the state
try:
student_module.delete()
msg += "<font color='red'>Deleted student module state for %s!</font>" % module_state_key
track_format = 'delete student module state for problem {problem} for student {student} in {course}'
track_msg = track_format.format(problem=problem_url, student=unique_student_identifier, course=course_id)
track.views.server_track(request, track_msg, {}, page='idashboard')
except:
msg += "Failed to delete module state for %s/%s" % (unique_student_identifier, problem_urlname)
elif "Reset student's attempts" in action:
# modify the problem's state
try:
# load the state json
problem_state = json.loads(student_module.state)
old_number_of_attempts = problem_state["attempts"]
problem_state["attempts"] = 0
# save
module_to_reset.state = json.dumps(problem_state)
module_to_reset.save()
track.views.server_track(request,
'{instructor} reset attempts from {old_attempts} to 0 for {student} on problem {problem} in {course}'.format(
old_attempts=old_number_of_attempts,
student=student_to_reset,
problem=module_to_reset.module_state_key,
instructor=request.user,
course=course_id),
{},
page='idashboard')
msg += "<font color='green'>Module state successfully reset!</font>"
except:
msg += "<font color='red'>Couldn't reset module state. </font>"
# save
student_module.state = json.dumps(problem_state)
student_module.save()
track_format = '{instructor} reset attempts from {old_attempts} to 0 for {student} on problem {problem} in {course}'
track_msg = track_format.format(old_attempts=old_number_of_attempts,
student=student,
problem=student_module.module_state_key,
instructor=request.user,
course=course_id)
track.views.server_track(request, track_msg, {}, page='idashboard')
msg += "<font color='green'>Module state successfully reset!</font>"
except:
msg += "<font color='red'>Couldn't reset module state. </font>"
else:
# "Rescore student's problem submission" case
try:
instructor_task = submit_rescore_problem_for_student(request, course_id, module_state_key, student)
if instructor_task is None:
msg += '<font color="red">Failed to create a background task for rescoring "{0}" for student {1}.</font>'.format(module_state_key, unique_student_identifier)
else:
track_msg = 'rescore problem {problem} for student {student} in {course}'.format(problem=module_state_key, student=unique_student_identifier, course=course_id)
track.views.server_track(request, track_msg, {}, page='idashboard')
except Exception as e:
log.exception("Encountered exception from rescore: {0}")
msg += '<font color="red">Failed to create a background task for rescoring "{0}": {1}.</font>'.format(module_state_key, e.message)
elif "Get link to student's progress page" in action:
unique_student_identifier = request.POST.get('unique_student_identifier', '')
try:
if "@" in unique_student_identifier:
student_to_reset = User.objects.get(email=unique_student_identifier)
else:
student_to_reset = User.objects.get(username=unique_student_identifier)
progress_url = reverse('student_progress', kwargs={'course_id': course_id, 'student_id': student_to_reset.id})
# try to uniquely id student by email address or username
message, student = get_student_from_identifier(unique_student_identifier)
msg += message
if student is not None:
progress_url = reverse('student_progress', kwargs={'course_id': course_id, 'student_id': student.id})
track.views.server_track(request,
'{instructor} requested progress page for {student} in {course}'.format(
student=student_to_reset,
student=student,
instructor=request.user,
course=course_id),
{},
page='idashboard')
msg += "<a href='{0}' target='_blank'> Progress page for username: {1} with email address: {2}</a>.".format(progress_url, student_to_reset.username, student_to_reset.email)
except:
msg += "<font color='red'>Couldn't find student with that username. </font>"
msg += "<a href='{0}' target='_blank'> Progress page for username: {1} with email address: {2}</a>.".format(progress_url, student.username, student.email)
#----------------------------------------
# export grades to remote gradebook
@@ -427,7 +523,7 @@ def instructor_dashboard(request, course_id):
if problem_to_dump[-4:] == ".xml":
problem_to_dump = problem_to_dump[:-4]
try:
(org, course_name, run) = course_id.split("/")
(org, course_name, _) = course_id.split("/")
module_state_key = "i4x://" + org + "/" + course_name + "/problem/" + problem_to_dump
smdat = StudentModule.objects.filter(course_id=course_id,
module_state_key=module_state_key)
@@ -625,6 +721,16 @@ def instructor_dashboard(request, course_id):
if use_offline:
msg += "<br/><font color='orange'>Grades from %s</font>" % offline_grades_available(course_id)
# generate list of pending background tasks
if settings.MITX_FEATURES.get('ENABLE_INSTRUCTOR_BACKGROUND_TASKS'):
instructor_tasks = get_running_instructor_tasks(course_id)
else:
instructor_tasks = None
# display course stats only if there is no other table to display:
course_stats = None
if not datatable:
course_stats = get_course_stats_table()
#----------------------------------------
# context for rendering
@@ -634,12 +740,13 @@ def instructor_dashboard(request, course_id):
'instructor_access': instructor_access,
'forum_admin_access': forum_admin_access,
'datatable': datatable,
'course_stats': course_stats,
'msg': msg,
'modeflag': {idash_mode: 'selectedmode'},
'problems': problems, # psychometrics
'plots': plots, # psychometrics
'course_errors': modulestore().get_item_errors(course.location),
'instructor_tasks': instructor_tasks,
'djangopid': os.getpid(),
'mitx_version': getattr(settings, 'MITX_VERSION_STRING', ''),
'offline_grade_log': offline_grades_available(course_id),
@@ -1030,7 +1137,7 @@ def _do_unenroll_students(course_id, students):
"""Do the actual work of un-enrolling multiple students, presented as a string
of emails separated by commas or returns"""
old_students, old_students_lc = get_and_clean_student_list(students)
old_students, _ = get_and_clean_student_list(students)
status = dict([x, 'unprocessed'] for x in old_students)
for student in old_students:
@@ -1054,7 +1161,7 @@ def _do_unenroll_students(course_id, students):
try:
ce[0].delete()
status[student] = "un-enrolled"
except Exception as err:
except Exception:
if not isok:
status[student] = "Error! Failed to un-enroll"
@@ -1113,11 +1220,11 @@ def get_answers_distribution(request, course_id):
def compute_course_stats(course):
'''
"""
Compute course statistics, including number of problems, videos, html.
course is a CourseDescriptor from the xmodule system.
'''
"""
# walk the course by using get_children() until we come to the leaves; count the
# number of different leaf types
@@ -1137,10 +1244,10 @@ def compute_course_stats(course):
def dump_grading_context(course):
'''
"""
Dump information about course grading context (eg which problems are graded in what assignments)
Very useful for debugging grading_policy.json and policy.json
'''
"""
msg = "-----------------------------------------------------------------------------\n"
msg += "Course grader:\n"
@@ -1164,10 +1271,10 @@ def dump_grading_context(course):
msg += "--> Section %s:\n" % (gs)
for sec in gsvals:
s = sec['section_descriptor']
format = getattr(s.lms, 'format', None)
grade_format = getattr(s.lms, 'grade_format', None)
aname = ''
if format in graders:
g = graders[format]
if grade_format in graders:
g = graders[grade_format]
aname = '%s %02d' % (g.short_label, g.index)
g.index += 1
elif s.display_name in graders:
@@ -1176,8 +1283,73 @@ def dump_grading_context(course):
notes = ''
if getattr(s, 'score_by_attempt', False):
notes = ', score by attempt!'
msg += " %s (format=%s, Assignment=%s%s)\n" % (s.display_name, format, aname, notes)
msg += " %s (grade_format=%s, Assignment=%s%s)\n" % (s.display_name, grade_format, aname, notes)
msg += "all descriptors:\n"
msg += "length=%d\n" % len(gc['all_descriptors'])
msg = '<pre>%s</pre>' % msg.replace('<', '&lt;')
return msg
def get_background_task_table(course_id, problem_url, student=None):
"""
Construct the "datatable" structure to represent background task history.
Filters the background task history to the specified course and problem.
If a student is provided, filters to only those tasks for which that student
was specified.
Returns a tuple of (msg, datatable), where the msg is a possible error message,
and the datatable is the datatable to be used for display.
"""
history_entries = get_instructor_task_history(course_id, problem_url, student)
datatable = {}
msg = ""
# first check to see if there is any history at all
# (note that we don't have to check that the arguments are valid; it
# just won't find any entries.)
if (history_entries.count()) == 0:
if student is not None:
template = '<font color="red">Failed to find any background tasks for course "{course}", module "{problem}" and student "{student}".</font>'
msg += template.format(course=course_id, problem=problem_url, student=student.username)
else:
msg += '<font color="red">Failed to find any background tasks for course "{course}" and module "{problem}".</font>'.format(course=course_id, problem=problem_url)
else:
datatable['header'] = ["Task Type",
"Task Id",
"Requester",
"Submitted",
"Duration (sec)",
"Task State",
"Task Status",
"Task Output"]
datatable['data'] = []
for instructor_task in history_entries:
# get duration info, if known:
duration_sec = 'unknown'
if hasattr(instructor_task, 'task_output') and instructor_task.task_output is not None:
task_output = json.loads(instructor_task.task_output)
if 'duration_ms' in task_output:
duration_sec = int(task_output['duration_ms'] / 1000.0)
# get progress status message:
success, task_message = get_task_completion_info(instructor_task)
status = "Complete" if success else "Incomplete"
# generate row for this task:
row = [str(instructor_task.task_type),
str(instructor_task.task_id),
str(instructor_task.requester),
instructor_task.created.isoformat(' '),
duration_sec,
str(instructor_task.task_state),
status,
task_message]
datatable['data'].append(row)
if student is not None:
datatable['title'] = "{course_id} > {location} > {student}".format(course_id=course_id,
location=problem_url,
student=student.username)
else:
datatable['title'] = "{course_id} > {location}".format(course_id=course_id, location=problem_url)
return msg, datatable

View File

@@ -0,0 +1,164 @@
"""
API for submitting background tasks by an instructor for a course.
Also includes methods for getting information about tasks that have
already been submitted, filtered either by running state or input
arguments.
"""
from celery.states import READY_STATES
from xmodule.modulestore.django import modulestore
from instructor_task.models import InstructorTask
from instructor_task.tasks import (rescore_problem,
reset_problem_attempts,
delete_problem_state)
from instructor_task.api_helper import (check_arguments_for_rescoring,
encode_problem_and_student_input,
submit_task)
def get_running_instructor_tasks(course_id):
"""
Returns a query of InstructorTask objects of running tasks for a given course.
Used to generate a list of tasks to display on the instructor dashboard.
"""
instructor_tasks = InstructorTask.objects.filter(course_id=course_id)
# exclude states that are "ready" (i.e. not "running", e.g. failure, success, revoked):
for state in READY_STATES:
instructor_tasks = instructor_tasks.exclude(task_state=state)
return instructor_tasks.order_by('-id')
def get_instructor_task_history(course_id, problem_url, student=None):
"""
Returns a query of InstructorTask objects of historical tasks for a given course,
that match a particular problem and optionally a student.
"""
_, task_key = encode_problem_and_student_input(problem_url, student)
instructor_tasks = InstructorTask.objects.filter(course_id=course_id, task_key=task_key)
return instructor_tasks.order_by('-id')
def submit_rescore_problem_for_student(request, course_id, problem_url, student):
"""
Request a problem to be rescored as a background task.
The problem will be rescored for the specified student only. Parameters are the `course_id`,
the `problem_url`, and the `student` as a User object.
The url must specify the location of the problem, using i4x-type notation.
ItemNotFoundException is raised if the problem doesn't exist, or AlreadyRunningError
if the problem is already being rescored for this student, or NotImplementedError if
the problem doesn't support rescoring.
This method makes sure the InstructorTask entry is committed.
When called from any view that is wrapped by TransactionMiddleware,
and thus in a "commit-on-success" transaction, an autocommit buried within here
will cause any pending transaction to be committed by a successful
save here. Any future database operations will take place in a
separate transaction.
"""
# check arguments: let exceptions return up to the caller.
check_arguments_for_rescoring(course_id, problem_url)
task_type = 'rescore_problem'
task_class = rescore_problem
task_input, task_key = encode_problem_and_student_input(problem_url, student)
return submit_task(request, task_type, task_class, course_id, task_input, task_key)
def submit_rescore_problem_for_all_students(request, course_id, problem_url):
"""
Request a problem to be rescored as a background task.
The problem will be rescored for all students who have accessed the
particular problem in a course and have provided and checked an answer.
Parameters are the `course_id` and the `problem_url`.
The url must specify the location of the problem, using i4x-type notation.
ItemNotFoundException is raised if the problem doesn't exist, or AlreadyRunningError
if the problem is already being rescored, or NotImplementedError if the problem doesn't
support rescoring.
This method makes sure the InstructorTask entry is committed.
When called from any view that is wrapped by TransactionMiddleware,
and thus in a "commit-on-success" transaction, an autocommit buried within here
will cause any pending transaction to be committed by a successful
save here. Any future database operations will take place in a
separate transaction.
"""
# check arguments: let exceptions return up to the caller.
check_arguments_for_rescoring(course_id, problem_url)
# check to see if task is already running, and reserve it otherwise
task_type = 'rescore_problem'
task_class = rescore_problem
task_input, task_key = encode_problem_and_student_input(problem_url)
return submit_task(request, task_type, task_class, course_id, task_input, task_key)
def submit_reset_problem_attempts_for_all_students(request, course_id, problem_url):
"""
Request to have attempts reset for a problem as a background task.
The problem's attempts will be reset for all students who have accessed the
particular problem in a course. Parameters are the `course_id` and
the `problem_url`. The url must specify the location of the problem,
using i4x-type notation.
ItemNotFoundException is raised if the problem doesn't exist, or AlreadyRunningError
if the problem is already being reset.
This method makes sure the InstructorTask entry is committed.
When called from any view that is wrapped by TransactionMiddleware,
and thus in a "commit-on-success" transaction, an autocommit buried within here
will cause any pending transaction to be committed by a successful
save here. Any future database operations will take place in a
separate transaction.
"""
# check arguments: make sure that the problem_url is defined
# (since that's currently typed in). If the corresponding module descriptor doesn't exist,
# an exception will be raised. Let it pass up to the caller.
modulestore().get_instance(course_id, problem_url)
task_type = 'reset_problem_attempts'
task_class = reset_problem_attempts
task_input, task_key = encode_problem_and_student_input(problem_url)
return submit_task(request, task_type, task_class, course_id, task_input, task_key)
def submit_delete_problem_state_for_all_students(request, course_id, problem_url):
"""
Request to have state deleted for a problem as a background task.
The problem's state will be deleted for all students who have accessed the
particular problem in a course. Parameters are the `course_id` and
the `problem_url`. The url must specify the location of the problem,
using i4x-type notation.
ItemNotFoundException is raised if the problem doesn't exist, or AlreadyRunningError
if the particular problem's state is already being deleted.
This method makes sure the InstructorTask entry is committed.
When called from any view that is wrapped by TransactionMiddleware,
and thus in a "commit-on-success" transaction, an autocommit buried within here
will cause any pending transaction to be committed by a successful
save here. Any future database operations will take place in a
separate transaction.
"""
# check arguments: make sure that the problem_url is defined
# (since that's currently typed in). If the corresponding module descriptor doesn't exist,
# an exception will be raised. Let it pass up to the caller.
modulestore().get_instance(course_id, problem_url)
task_type = 'delete_problem_state'
task_class = delete_problem_state
task_input, task_key = encode_problem_and_student_input(problem_url)
return submit_task(request, task_type, task_class, course_id, task_input, task_key)

View File

@@ -0,0 +1,266 @@
import hashlib
import json
import logging
from django.db import transaction
from celery.result import AsyncResult
from celery.states import READY_STATES, SUCCESS, FAILURE, REVOKED
from courseware.module_render import get_xqueue_callback_url_prefix
from xmodule.modulestore.django import modulestore
from instructor_task.models import InstructorTask, PROGRESS
log = logging.getLogger(__name__)
class AlreadyRunningError(Exception):
"""Exception indicating that a background task is already running"""
pass
def _task_is_running(course_id, task_type, task_key):
"""Checks if a particular task is already running"""
runningTasks = InstructorTask.objects.filter(course_id=course_id, task_type=task_type, task_key=task_key)
# exclude states that are "ready" (i.e. not "running", e.g. failure, success, revoked):
for state in READY_STATES:
runningTasks = runningTasks.exclude(task_state=state)
return len(runningTasks) > 0
@transaction.autocommit
def _reserve_task(course_id, task_type, task_key, task_input, requester):
"""
Creates a database entry to indicate that a task is in progress.
Throws AlreadyRunningError if the task is already in progress.
Includes the creation of an arbitrary value for task_id, to be
submitted with the task call to celery.
Autocommit annotation makes sure the database entry is committed.
When called from any view that is wrapped by TransactionMiddleware,
and thus in a "commit-on-success" transaction, this autocommit here
will cause any pending transaction to be committed by a successful
save here. Any future database operations will take place in a
separate transaction.
Note that there is a chance of a race condition here, when two users
try to run the same task at almost exactly the same time. One user
could be after the check and before the create when the second user
gets to the check. At that point, both users are able to run their
tasks simultaneously. This is deemed a small enough risk to not
put in further safeguards.
"""
if _task_is_running(course_id, task_type, task_key):
raise AlreadyRunningError("requested task is already running")
# Create log entry now, so that future requests will know it's running.
return InstructorTask.create(course_id, task_type, task_key, task_input, requester)
def _get_xmodule_instance_args(request):
"""
Calculate parameters needed for instantiating xmodule instances.
The `request_info` will be passed to a tracking log function, to provide information
about the source of the task request. The `xqueue_callback_url_prefix` is used to
permit old-style xqueue callbacks directly to the appropriate module in the LMS.
"""
request_info = {'username': request.user.username,
'ip': request.META['REMOTE_ADDR'],
'agent': request.META.get('HTTP_USER_AGENT', ''),
'host': request.META['SERVER_NAME'],
}
xmodule_instance_args = {'xqueue_callback_url_prefix': get_xqueue_callback_url_prefix(request),
'request_info': request_info,
}
return xmodule_instance_args
def _update_instructor_task(instructor_task, task_result):
"""
Updates and possibly saves a InstructorTask entry based on a task Result.
Used when updated status is requested.
The `instructor_task` that is passed in is updated in-place, but
is usually not saved. In general, tasks that have finished (either with
success or failure) should have their entries updated by the task itself,
so are not updated here. Tasks that are still running are not updated
while they run. So the one exception to the no-save rule are tasks that
are in a "revoked" state. This may mean that the task never had the
opportunity to update the InstructorTask entry.
Calculates json to store in "task_output" field of the `instructor_task`,
as well as updating the task_state.
For a successful task, the json contains the output of the task result.
For a failed task, the json contains "exception", "message", and "traceback"
keys. A revoked task just has a "message" stating it was revoked.
"""
# Pull values out of the result object as close to each other as possible.
# If we wait and check the values later, the values for the state and result
# are more likely to have changed. Pull the state out first, and
# then code assuming that the result may not exactly match the state.
task_id = task_result.task_id
result_state = task_result.state
returned_result = task_result.result
result_traceback = task_result.traceback
# Assume we don't always update the InstructorTask entry if we don't have to:
entry_needs_saving = False
task_output = None
if result_state in [PROGRESS, SUCCESS]:
# construct a status message directly from the task result's result:
# it needs to go back with the entry passed in.
log.info("background task (%s), state %s: result: %s", task_id, result_state, returned_result)
task_output = InstructorTask.create_output_for_success(returned_result)
elif result_state == FAILURE:
# on failure, the result's result contains the exception that caused the failure
exception = returned_result
traceback = result_traceback if result_traceback is not None else ''
log.warning("background task (%s) failed: %s %s", task_id, returned_result, traceback)
task_output = InstructorTask.create_output_for_failure(exception, result_traceback)
elif result_state == REVOKED:
# on revocation, the result's result doesn't contain anything
# but we cannot rely on the worker thread to set this status,
# so we set it here.
entry_needs_saving = True
log.warning("background task (%s) revoked.", task_id)
task_output = InstructorTask.create_output_for_revoked()
# save progress and state into the entry, even if it's not being saved:
# when celery is run in "ALWAYS_EAGER" mode, progress needs to go back
# with the entry passed in.
instructor_task.task_state = result_state
if task_output is not None:
instructor_task.task_output = task_output
if entry_needs_saving:
instructor_task.save()
def get_updated_instructor_task(task_id):
"""
Returns InstructorTask object corresponding to a given `task_id`.
If the InstructorTask thinks the task is still running, then
the task's result is checked to return an updated state and output.
"""
# First check if the task_id is known
try:
instructor_task = InstructorTask.objects.get(task_id=task_id)
except InstructorTask.DoesNotExist:
log.warning("query for InstructorTask status failed: task_id=(%s) not found", task_id)
return None
# if the task is not already known to be done, then we need to query
# the underlying task's result object:
if instructor_task.task_state not in READY_STATES:
result = AsyncResult(task_id)
_update_instructor_task(instructor_task, result)
return instructor_task
def get_status_from_instructor_task(instructor_task):
"""
Get the status for a given InstructorTask entry.
Returns a dict, with the following keys:
'task_id': id assigned by LMS and used by celery.
'task_state': state of task as stored in celery's result store.
'in_progress': boolean indicating if task is still running.
'task_progress': dict containing progress information. This includes:
'attempted': number of attempts made
'updated': number of attempts that "succeeded"
'total': number of possible subtasks to attempt
'action_name': user-visible verb to use in status messages. Should be past-tense.
'duration_ms': how long the task has (or had) been running.
'exception': name of exception class raised in failed tasks.
'message': returned for failed and revoked tasks.
'traceback': optional, returned if task failed and produced a traceback.
"""
status = {}
if instructor_task is not None:
# status basic information matching what's stored in InstructorTask:
status['task_id'] = instructor_task.task_id
status['task_state'] = instructor_task.task_state
status['in_progress'] = instructor_task.task_state not in READY_STATES
if instructor_task.task_output is not None:
status['task_progress'] = json.loads(instructor_task.task_output)
return status
def check_arguments_for_rescoring(course_id, problem_url):
"""
Do simple checks on the descriptor to confirm that it supports rescoring.
Confirms first that the problem_url is defined (since that's currently typed
in). An ItemNotFoundException is raised if the corresponding module
descriptor doesn't exist. NotImplementedError is raised if the
corresponding module doesn't support rescoring calls.
"""
descriptor = modulestore().get_instance(course_id, problem_url)
if not hasattr(descriptor, 'module_class') or not hasattr(descriptor.module_class, 'rescore_problem'):
msg = "Specified module does not support rescoring."
raise NotImplementedError(msg)
def encode_problem_and_student_input(problem_url, student=None):
"""
Encode problem_url and optional student into task_key and task_input values.
`problem_url` is full URL of the problem.
`student` is the user object of the student
"""
if student is not None:
task_input = {'problem_url': problem_url, 'student': student.username}
task_key_stub = "{student}_{problem}".format(student=student.id, problem=problem_url)
else:
task_input = {'problem_url': problem_url}
task_key_stub = "_{problem}".format(problem=problem_url)
# create the key value by using MD5 hash:
task_key = hashlib.md5(task_key_stub).hexdigest()
return task_input, task_key
def submit_task(request, task_type, task_class, course_id, task_input, task_key):
"""
Helper method to submit a task.
Reserves the requested task, based on the `course_id`, `task_type`, and `task_key`,
checking to see if the task is already running. The `task_input` is also passed so that
it can be stored in the resulting InstructorTask entry. Arguments are extracted from
the `request` provided by the originating server request. Then the task is submitted to run
asynchronously, using the specified `task_class` and using the task_id constructed for it.
`AlreadyRunningError` is raised if the task is already running.
The _reserve_task method makes sure the InstructorTask entry is committed.
When called from any view that is wrapped by TransactionMiddleware,
and thus in a "commit-on-success" transaction, an autocommit buried within here
will cause any pending transaction to be committed by a successful
save here. Any future database operations will take place in a
separate transaction.
"""
# check to see if task is already running, and reserve it otherwise:
instructor_task = _reserve_task(course_id, task_type, task_key, task_input, request.user)
# submit task:
task_id = instructor_task.task_id
task_args = [instructor_task.id, _get_xmodule_instance_args(request)]
task_class.apply_async(task_args, task_id=task_id)
return instructor_task

View File

@@ -0,0 +1,86 @@
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'InstructorTask'
db.create_table('instructor_task_instructortask', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('task_type', self.gf('django.db.models.fields.CharField')(max_length=50, db_index=True)),
('course_id', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
('task_key', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
('task_input', self.gf('django.db.models.fields.CharField')(max_length=255)),
('task_id', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
('task_state', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, db_index=True)),
('task_output', self.gf('django.db.models.fields.CharField')(max_length=1024, null=True)),
('requester', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('instructor_task', ['InstructorTask'])
def backwards(self, orm):
# Deleting model 'InstructorTask'
db.delete_table('instructor_task_instructortask')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'instructor_task.instructortask': {
'Meta': {'object_name': 'InstructorTask'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'requester': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'task_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'task_input': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'task_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'task_output': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
'task_state': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'db_index': 'True'}),
'task_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['instructor_task']

View File

@@ -0,0 +1,156 @@
"""
WE'RE USING MIGRATIONS!
If you make changes to this model, be sure to create an appropriate migration
file and check it in at the same time as your model changes. To do that,
1. Go to the edx-platform dir
2. ./manage.py schemamigration instructor_task --auto description_of_your_change
3. Add the migration file created in edx-platform/lms/djangoapps/instructor_task/migrations/
ASSUMPTIONS: modules have unique IDs, even across different module_types
"""
from uuid import uuid4
import json
from django.contrib.auth.models import User
from django.db import models, transaction
# define custom states used by InstructorTask
QUEUING = 'QUEUING'
PROGRESS = 'PROGRESS'
class InstructorTask(models.Model):
"""
Stores information about background tasks that have been submitted to
perform work by an instructor (or course staff).
Examples include grading and rescoring.
`task_type` identifies the kind of task being performed, e.g. rescoring.
`course_id` uses the course run's unique id to identify the course.
`task_key` stores relevant input arguments encoded into key value for testing to see
if the task is already running (together with task_type and course_id).
`task_input` stores input arguments as JSON-serialized dict, for reporting purposes.
Examples include url of problem being rescored, id of student if only one student being rescored.
`task_id` stores the id used by celery for the background task.
`task_state` stores the last known state of the celery task
`task_output` stores the output of the celery task.
Format is a JSON-serialized dict. Content varies by task_type and task_state.
`requester` stores id of user who submitted the task
`created` stores date that entry was first created
`updated` stores date that entry was last modified
"""
task_type = models.CharField(max_length=50, db_index=True)
course_id = models.CharField(max_length=255, db_index=True)
task_key = models.CharField(max_length=255, db_index=True)
task_input = models.CharField(max_length=255)
task_id = models.CharField(max_length=255, db_index=True) # max_length from celery_taskmeta
task_state = models.CharField(max_length=50, null=True, db_index=True) # max_length from celery_taskmeta
task_output = models.CharField(max_length=1024, null=True)
requester = models.ForeignKey(User, db_index=True)
created = models.DateTimeField(auto_now_add=True, null=True)
updated = models.DateTimeField(auto_now=True)
def __repr__(self):
return 'InstructorTask<%r>' % ({
'task_type': self.task_type,
'course_id': self.course_id,
'task_input': self.task_input,
'task_id': self.task_id,
'task_state': self.task_state,
'task_output': self.task_output,
},)
def __unicode__(self):
return unicode(repr(self))
@classmethod
def create(cls, course_id, task_type, task_key, task_input, requester):
# create the task_id here, and pass it into celery:
task_id = str(uuid4())
json_task_input = json.dumps(task_input)
# check length of task_input, and return an exception if it's too long:
if len(json_task_input) > 255:
fmt = 'Task input longer than 255: "{input}" for "{task}" of "{course}"'
msg = fmt.format(input=json_task_input, task=task_type, course=course_id)
raise ValueError(msg)
# create the task, then save it:
instructor_task = cls(course_id=course_id,
task_type=task_type,
task_id=task_id,
task_key=task_key,
task_input=json_task_input,
task_state=QUEUING,
requester=requester)
instructor_task.save_now()
return instructor_task
@transaction.autocommit
def save_now(self):
"""Writes InstructorTask immediately, ensuring the transaction is committed."""
self.save()
@staticmethod
def create_output_for_success(returned_result):
"""
Converts successful result to output format.
Raises a ValueError exception if the output is too long.
"""
# In future, there should be a check here that the resulting JSON
# will fit in the column. In the meantime, just return an exception.
json_output = json.dumps(returned_result)
if len(json_output) > 1023:
raise ValueError("Length of task output is too long: {0}".format(json_output))
return json_output
@staticmethod
def create_output_for_failure(exception, traceback_string):
"""
Converts failed result information to output format.
Traceback information is truncated or not included if it would result in an output string
that would not fit in the database. If the output is still too long, then the
exception message is also truncated.
Truncation is indicated by adding "..." to the end of the value.
"""
tag = '...'
task_progress = {'exception': type(exception).__name__, 'message': str(exception.message)}
if traceback_string is not None:
# truncate any traceback that goes into the InstructorTask model:
task_progress['traceback'] = traceback_string
json_output = json.dumps(task_progress)
# if the resulting output is too long, then first shorten the
# traceback, and then the message, until it fits.
too_long = len(json_output) - 1023
if too_long > 0:
if traceback_string is not None:
if too_long >= len(traceback_string) - len(tag):
# remove the traceback entry entirely (so no key or value)
del task_progress['traceback']
too_long -= (len(traceback_string) + len('traceback'))
else:
# truncate the traceback:
task_progress['traceback'] = traceback_string[:-(too_long + len(tag))] + tag
too_long = 0
if too_long > 0:
# we need to shorten the message:
task_progress['message'] = task_progress['message'][:-(too_long + len(tag))] + tag
json_output = json.dumps(task_progress)
return json_output
@staticmethod
def create_output_for_revoked():
"""Creates standard message to store in output format for revoked tasks."""
return json.dumps({'message': 'Task revoked before running'})

View File

@@ -0,0 +1,97 @@
"""
This file contains tasks that are designed to perform background operations on the
running state of a course.
At present, these tasks all operate on StudentModule objects in one way or another,
so they share a visitor architecture. Each task defines an "update function" that
takes a module_descriptor, a particular StudentModule object, and xmodule_instance_args.
A task may optionally specify a "filter function" that takes a query for StudentModule
objects, and adds additional filter clauses.
A task also passes through "xmodule_instance_args", that are used to provide
information to our code that instantiates xmodule instances.
The task definition then calls the traversal function, passing in the three arguments
above, along with the id value for an InstructorTask object. The InstructorTask
object contains a 'task_input' row which is a JSON-encoded dict containing
a problem URL and optionally a student. These are used to set up the initial value
of the query for traversing StudentModule objects.
"""
from celery import task
from instructor_task.tasks_helper import (update_problem_module_state,
rescore_problem_module_state,
reset_attempts_module_state,
delete_problem_module_state)
@task
def rescore_problem(entry_id, xmodule_instance_args):
"""Rescores a problem in a course, for all students or one specific student.
`entry_id` is the id value of the InstructorTask entry that corresponds to this task.
The entry contains the `course_id` that identifies the course, as well as the
`task_input`, which contains task-specific input.
The task_input should be a dict with the following entries:
'problem_url': the full URL to the problem to be rescored. (required)
'student': the identifier (username or email) of a particular user whose
problem submission should be rescored. If not specified, all problem
submissions for the problem will be rescored.
`xmodule_instance_args` provides information needed by _get_module_instance_for_task()
to instantiate an xmodule instance.
"""
action_name = 'rescored'
update_fcn = rescore_problem_module_state
filter_fcn = lambda(modules_to_update): modules_to_update.filter(state__contains='"done": true')
return update_problem_module_state(entry_id,
update_fcn, action_name, filter_fcn=filter_fcn,
xmodule_instance_args=xmodule_instance_args)
@task
def reset_problem_attempts(entry_id, xmodule_instance_args):
"""Resets problem attempts to zero for a particular problem for all students in a course.
`entry_id` is the id value of the InstructorTask entry that corresponds to this task.
The entry contains the `course_id` that identifies the course, as well as the
`task_input`, which contains task-specific input.
The task_input should be a dict with the following entries:
'problem_url': the full URL to the problem to be rescored. (required)
`xmodule_instance_args` provides information needed by _get_module_instance_for_task()
to instantiate an xmodule instance.
"""
action_name = 'reset'
update_fcn = reset_attempts_module_state
return update_problem_module_state(entry_id,
update_fcn, action_name, filter_fcn=None,
xmodule_instance_args=xmodule_instance_args)
@task
def delete_problem_state(entry_id, xmodule_instance_args):
"""Deletes problem state entirely for all students on a particular problem in a course.
`entry_id` is the id value of the InstructorTask entry that corresponds to this task.
The entry contains the `course_id` that identifies the course, as well as the
`task_input`, which contains task-specific input.
The task_input should be a dict with the following entries:
'problem_url': the full URL to the problem to be rescored. (required)
`xmodule_instance_args` provides information needed by _get_module_instance_for_task()
to instantiate an xmodule instance.
"""
action_name = 'deleted'
update_fcn = delete_problem_module_state
return update_problem_module_state(entry_id,
update_fcn, action_name, filter_fcn=None,
xmodule_instance_args=xmodule_instance_args)

View File

@@ -0,0 +1,388 @@
"""
This file contains tasks that are designed to perform background operations on the
running state of a course.
"""
import json
from time import time
from sys import exc_info
from traceback import format_exc
from celery import current_task
from celery.utils.log import get_task_logger
from celery.signals import worker_process_init
from celery.states import SUCCESS, FAILURE
from django.contrib.auth.models import User
from django.db import transaction
from dogapi import dog_stats_api
from xmodule.modulestore.django import modulestore
import mitxmako.middleware as middleware
from track.views import task_track
from courseware.models import StudentModule
from courseware.model_data import ModelDataCache
from courseware.module_render import get_module_for_descriptor_internal
from instructor_task.models import InstructorTask, PROGRESS
# define different loggers for use within tasks and on client side
TASK_LOG = get_task_logger(__name__)
# define value to use when no task_id is provided:
UNKNOWN_TASK_ID = 'unknown-task_id'
def initialize_mako(sender=None, conf=None, **kwargs):
"""
Get mako templates to work on celery worker server's worker thread.
The initialization of Mako templating is usually done when Django is
initializing middleware packages as part of processing a server request.
When this is run on a celery worker server, no such initialization is
called.
To make sure that we don't load this twice (just in case), we look for the
result: the defining of the lookup paths for templates.
"""
if 'main' not in middleware.lookup:
TASK_LOG.info("Initializing Mako middleware explicitly")
middleware.MakoMiddleware()
# Actually make the call to define the hook:
worker_process_init.connect(initialize_mako)
class UpdateProblemModuleStateError(Exception):
"""
Error signaling a fatal condition while updating problem modules.
Used when the current module cannot be processed and no more
modules should be attempted.
"""
pass
def _get_current_task():
"""Stub to make it easier to test without actually running Celery"""
return current_task
def _perform_module_state_update(course_id, module_state_key, student_identifier, update_fcn, action_name, filter_fcn,
xmodule_instance_args):
"""
Performs generic update by visiting StudentModule instances with the update_fcn provided.
StudentModule instances are those that match the specified `course_id` and `module_state_key`.
If `student_identifier` is not None, it is used as an additional filter to limit the modules to those belonging
to that student. If `student_identifier` is None, performs update on modules for all students on the specified problem.
If a `filter_fcn` is not None, it is applied to the query that has been constructed. It takes one
argument, which is the query being filtered, and returns the filtered version of the query.
The `update_fcn` is called on each StudentModule that passes the resulting filtering.
It is passed three arguments: the module_descriptor for the module pointed to by the
module_state_key, the particular StudentModule to update, and the xmodule_instance_args being
passed through. If the value returned by the update function evaluates to a boolean True,
the update is successful; False indicates the update on the particular student module failed.
A raised exception indicates a fatal condition -- that no other student modules should be considered.
The return value is a dict containing the task's results, with the following keys:
'attempted': number of attempts made
'updated': number of attempts that "succeeded"
'total': number of possible subtasks to attempt
'action_name': user-visible verb to use in status messages. Should be past-tense.
Pass-through of input `action_name`.
'duration_ms': how long the task has (or had) been running.
Because this is run internal to a task, it does not catch exceptions. These are allowed to pass up to the
next level, so that it can set the failure modes and capture the error trace in the InstructorTask and the
result object.
"""
# get start time for task:
start_time = time()
# find the problem descriptor:
module_descriptor = modulestore().get_instance(course_id, module_state_key)
# find the module in question
modules_to_update = StudentModule.objects.filter(course_id=course_id,
module_state_key=module_state_key)
# give the option of rescoring an individual student. If not specified,
# then rescores all students who have responded to a problem so far
student = None
if student_identifier is not None:
# if an identifier is supplied, then look for the student,
# and let it throw an exception if none is found.
if "@" in student_identifier:
student = User.objects.get(email=student_identifier)
elif student_identifier is not None:
student = User.objects.get(username=student_identifier)
if student is not None:
modules_to_update = modules_to_update.filter(student_id=student.id)
if filter_fcn is not None:
modules_to_update = filter_fcn(modules_to_update)
# perform the main loop
num_updated = 0
num_attempted = 0
num_total = modules_to_update.count()
def get_task_progress():
"""Return a dict containing info about current task"""
current_time = time()
progress = {'action_name': action_name,
'attempted': num_attempted,
'updated': num_updated,
'total': num_total,
'duration_ms': int((current_time - start_time) * 1000),
}
return progress
task_progress = get_task_progress()
_get_current_task().update_state(state=PROGRESS, meta=task_progress)
for module_to_update in modules_to_update:
num_attempted += 1
# There is no try here: if there's an error, we let it throw, and the task will
# be marked as FAILED, with a stack trace.
with dog_stats_api.timer('instructor_tasks.module.time.step', tags=['action:{name}'.format(name=action_name)]):
if update_fcn(module_descriptor, module_to_update, xmodule_instance_args):
# If the update_fcn returns true, then it performed some kind of work.
# Logging of failures is left to the update_fcn itself.
num_updated += 1
# update task status:
task_progress = get_task_progress()
_get_current_task().update_state(state=PROGRESS, meta=task_progress)
return task_progress
def update_problem_module_state(entry_id, update_fcn, action_name, filter_fcn,
xmodule_instance_args):
"""
Performs generic update by visiting StudentModule instances with the update_fcn provided.
The `entry_id` is the primary key for the InstructorTask entry representing the task. This function
updates the entry on success and failure of the _perform_module_state_update function it
wraps. It is setting the entry's value for task_state based on what Celery would set it to once
the task returns to Celery: FAILURE if an exception is encountered, and SUCCESS if it returns normally.
Other arguments are pass-throughs to _perform_module_state_update, and documented there.
If no exceptions are raised, a dict containing the task's result is returned, with the following keys:
'attempted': number of attempts made
'updated': number of attempts that "succeeded"
'total': number of possible subtasks to attempt
'action_name': user-visible verb to use in status messages. Should be past-tense.
Pass-through of input `action_name`.
'duration_ms': how long the task has (or had) been running.
Before returning, this is also JSON-serialized and stored in the task_output column of the InstructorTask entry.
If an exception is raised internally, it is caught and recorded in the InstructorTask entry.
This is also a JSON-serialized dict, stored in the task_output column, containing the following keys:
'exception': type of exception object
'message': error message from exception object
'traceback': traceback information (truncated if necessary)
Once the exception is caught, it is raised again and allowed to pass up to the
task-running level, so that it can also set the failure modes and capture the error trace in the
result object that Celery creates.
"""
# get the InstructorTask to be updated. If this fails, then let the exception return to Celery.
# There's no point in catching it here.
entry = InstructorTask.objects.get(pk=entry_id)
# get inputs to use in this task from the entry:
task_id = entry.task_id
course_id = entry.course_id
task_input = json.loads(entry.task_input)
module_state_key = task_input.get('problem_url')
student_ident = task_input['student'] if 'student' in task_input else None
fmt = 'Starting to update problem modules as task "{task_id}": course "{course_id}" problem "{state_key}": nothing {action} yet'
TASK_LOG.info(fmt.format(task_id=task_id, course_id=course_id, state_key=module_state_key, action=action_name))
# add task_id to xmodule_instance_args, so that it can be output with tracking info:
if xmodule_instance_args is not None:
xmodule_instance_args['task_id'] = task_id
# Now that we have an entry we can try to catch failures:
task_progress = None
try:
# Check that the task_id submitted in the InstructorTask matches the current task
# that is running.
request_task_id = _get_current_task().request.id
if task_id != request_task_id:
fmt = 'Requested task "{task_id}" did not match actual task "{actual_id}"'
message = fmt.format(task_id=task_id, course_id=course_id, state_key=module_state_key, actual_id=request_task_id)
TASK_LOG.error(message)
raise UpdateProblemModuleStateError(message)
# Now do the work:
with dog_stats_api.timer('instructor_tasks.module.time.overall', tags=['action:{name}'.format(name=action_name)]):
task_progress = _perform_module_state_update(course_id, module_state_key, student_ident, update_fcn,
action_name, filter_fcn, xmodule_instance_args)
# If we get here, we assume we've succeeded, so update the InstructorTask entry in anticipation.
# But we do this within the try, in case creating the task_output causes an exception to be
# raised.
entry.task_output = InstructorTask.create_output_for_success(task_progress)
entry.task_state = SUCCESS
entry.save_now()
except Exception:
# try to write out the failure to the entry before failing
_, exception, traceback = exc_info()
traceback_string = format_exc(traceback) if traceback is not None else ''
TASK_LOG.warning("background task (%s) failed: %s %s", task_id, exception, traceback_string)
entry.task_output = InstructorTask.create_output_for_failure(exception, traceback_string)
entry.task_state = FAILURE
entry.save_now()
raise
# log and exit, returning task_progress info as task result:
fmt = 'Finishing task "{task_id}": course "{course_id}" problem "{state_key}": final: {progress}'
TASK_LOG.info(fmt.format(task_id=task_id, course_id=course_id, state_key=module_state_key, progress=task_progress))
return task_progress
def _get_task_id_from_xmodule_args(xmodule_instance_args):
"""Gets task_id from `xmodule_instance_args` dict, or returns default value if missing."""
return xmodule_instance_args.get('task_id', UNKNOWN_TASK_ID) if xmodule_instance_args is not None else UNKNOWN_TASK_ID
def _get_module_instance_for_task(course_id, student, module_descriptor, xmodule_instance_args=None,
grade_bucket_type=None):
"""
Fetches a StudentModule instance for a given `course_id`, `student` object, and `module_descriptor`.
`xmodule_instance_args` is used to provide information for creating a track function and an XQueue callback.
These are passed, along with `grade_bucket_type`, to get_module_for_descriptor_internal, which sidesteps
the need for a Request object when instantiating an xmodule instance.
"""
# reconstitute the problem's corresponding XModule:
model_data_cache = ModelDataCache.cache_for_descriptor_descendents(course_id, student, module_descriptor)
# get request-related tracking information from args passthrough, and supplement with task-specific
# information:
request_info = xmodule_instance_args.get('request_info', {}) if xmodule_instance_args is not None else {}
task_info = {"student": student.username, "task_id": _get_task_id_from_xmodule_args(xmodule_instance_args)}
def make_track_function():
'''
Make a tracking function that logs what happened.
For insertion into ModuleSystem, and used by CapaModule, which will
provide the event_type (as string) and event (as dict) as arguments.
The request_info and task_info (and page) are provided here.
'''
return lambda event_type, event: task_track(request_info, task_info, event_type, event, page='x_module_task')
xqueue_callback_url_prefix = xmodule_instance_args.get('xqueue_callback_url_prefix', '') \
if xmodule_instance_args is not None else ''
return get_module_for_descriptor_internal(student, module_descriptor, model_data_cache, course_id,
make_track_function(), xqueue_callback_url_prefix,
grade_bucket_type=grade_bucket_type)
@transaction.autocommit
def rescore_problem_module_state(module_descriptor, student_module, xmodule_instance_args=None):
'''
Takes an XModule descriptor and a corresponding StudentModule object, and
performs rescoring on the student's problem submission.
Throws exceptions if the rescoring is fatal and should be aborted if in a loop.
In particular, raises UpdateProblemModuleStateError if module fails to instantiate,
or if the module doesn't support rescoring.
Returns True if problem was successfully rescored for the given student, and False
if problem encountered some kind of error in rescoring.
'''
# unpack the StudentModule:
course_id = student_module.course_id
student = student_module.student
module_state_key = student_module.module_state_key
instance = _get_module_instance_for_task(course_id, student, module_descriptor, xmodule_instance_args, grade_bucket_type='rescore')
if instance is None:
# Either permissions just changed, or someone is trying to be clever
# and load something they shouldn't have access to.
msg = "No module {loc} for student {student}--access denied?".format(loc=module_state_key,
student=student)
TASK_LOG.debug(msg)
raise UpdateProblemModuleStateError(msg)
if not hasattr(instance, 'rescore_problem'):
# This should also not happen, since it should be already checked in the caller,
# but check here to be sure.
msg = "Specified problem does not support rescoring."
raise UpdateProblemModuleStateError(msg)
result = instance.rescore_problem()
if 'success' not in result:
# don't consider these fatal, but false means that the individual call didn't complete:
TASK_LOG.warning(u"error processing rescore call for course {course}, problem {loc} and student {student}: "
"unexpected response {msg}".format(msg=result, course=course_id, loc=module_state_key, student=student))
return False
elif result['success'] not in ['correct', 'incorrect']:
TASK_LOG.warning(u"error processing rescore call for course {course}, problem {loc} and student {student}: "
"{msg}".format(msg=result['success'], course=course_id, loc=module_state_key, student=student))
return False
else:
TASK_LOG.debug(u"successfully processed rescore call for course {course}, problem {loc} and student {student}: "
"{msg}".format(msg=result['success'], course=course_id, loc=module_state_key, student=student))
return True
@transaction.autocommit
def reset_attempts_module_state(_module_descriptor, student_module, xmodule_instance_args=None):
"""
Resets problem attempts to zero for specified `student_module`.
Always returns true, indicating success, if it doesn't raise an exception due to database error.
"""
problem_state = json.loads(student_module.state) if student_module.state else {}
if 'attempts' in problem_state:
old_number_of_attempts = problem_state["attempts"]
if old_number_of_attempts > 0:
problem_state["attempts"] = 0
# convert back to json and save
student_module.state = json.dumps(problem_state)
student_module.save()
# get request-related tracking information from args passthrough,
# and supplement with task-specific information:
request_info = xmodule_instance_args.get('request_info', {}) if xmodule_instance_args is not None else {}
task_info = {"student": student_module.student.username, "task_id": _get_task_id_from_xmodule_args(xmodule_instance_args)}
event_info = {"old_attempts": old_number_of_attempts, "new_attempts": 0}
task_track(request_info, task_info, 'problem_reset_attempts', event_info, page='x_module_task')
# consider the reset to be successful, even if no update was performed. (It's just "optimized".)
return True
@transaction.autocommit
def delete_problem_module_state(_module_descriptor, student_module, xmodule_instance_args=None):
"""
Delete the StudentModule entry.
Always returns true, indicating success, if it doesn't raise an exception due to database error.
"""
student_module.delete()
# get request-related tracking information from args passthrough,
# and supplement with task-specific information:
request_info = xmodule_instance_args.get('request_info', {}) if xmodule_instance_args is not None else {}
task_info = {"student": student_module.student.username, "task_id": _get_task_id_from_xmodule_args(xmodule_instance_args)}
task_track(request_info, task_info, 'problem_delete_state', {}, page='x_module_task')
return True

View File

@@ -0,0 +1,19 @@
import json
from factory import DjangoModelFactory, SubFactory
from student.tests.factories import UserFactory as StudentUserFactory
from instructor_task.models import InstructorTask
from celery.states import PENDING
class InstructorTaskFactory(DjangoModelFactory):
FACTORY_FOR = InstructorTask
task_type = 'rescore_problem'
course_id = "MITx/999/Robot_Super_Course"
task_input = json.dumps({})
task_key = None
task_id = None
task_state = PENDING
task_output = None
requester = SubFactory(StudentUserFactory)

View File

@@ -0,0 +1,138 @@
"""
Test for LMS instructor background task queue management
"""
from xmodule.modulestore.exceptions import ItemNotFoundError
from courseware.tests.factories import UserFactory
from instructor_task.api import (get_running_instructor_tasks,
get_instructor_task_history,
submit_rescore_problem_for_all_students,
submit_rescore_problem_for_student,
submit_reset_problem_attempts_for_all_students,
submit_delete_problem_state_for_all_students)
from instructor_task.api_helper import AlreadyRunningError
from instructor_task.models import InstructorTask, PROGRESS
from instructor_task.tests.test_base import (InstructorTaskTestCase,
InstructorTaskModuleTestCase,
TEST_COURSE_ID)
class InstructorTaskReportTest(InstructorTaskTestCase):
"""
Tests API and view methods that involve the reporting of status for background tasks.
"""
def test_get_running_instructor_tasks(self):
# when fetching running tasks, we get all running tasks, and only running tasks
for _ in range(1, 5):
self._create_failure_entry()
self._create_success_entry()
progress_task_ids = [self._create_progress_entry().task_id for _ in range(1, 5)]
task_ids = [instructor_task.task_id for instructor_task in get_running_instructor_tasks(TEST_COURSE_ID)]
self.assertEquals(set(task_ids), set(progress_task_ids))
def test_get_instructor_task_history(self):
# when fetching historical tasks, we get all tasks, including running tasks
expected_ids = []
for _ in range(1, 5):
expected_ids.append(self._create_failure_entry().task_id)
expected_ids.append(self._create_success_entry().task_id)
expected_ids.append(self._create_progress_entry().task_id)
task_ids = [instructor_task.task_id for instructor_task
in get_instructor_task_history(TEST_COURSE_ID, self.problem_url)]
self.assertEquals(set(task_ids), set(expected_ids))
class InstructorTaskSubmitTest(InstructorTaskModuleTestCase):
"""Tests API methods that involve the submission of background tasks."""
def setUp(self):
self.initialize_course()
self.student = UserFactory.create(username="student", email="student@edx.org")
self.instructor = UserFactory.create(username="instructor", email="instructor@edx.org")
def test_submit_nonexistent_modules(self):
# confirm that a rescore of a non-existent module returns an exception
problem_url = InstructorTaskModuleTestCase.problem_location("NonexistentProblem")
course_id = self.course.id
request = None
with self.assertRaises(ItemNotFoundError):
submit_rescore_problem_for_student(request, course_id, problem_url, self.student)
with self.assertRaises(ItemNotFoundError):
submit_rescore_problem_for_all_students(request, course_id, problem_url)
with self.assertRaises(ItemNotFoundError):
submit_reset_problem_attempts_for_all_students(request, course_id, problem_url)
with self.assertRaises(ItemNotFoundError):
submit_delete_problem_state_for_all_students(request, course_id, problem_url)
def test_submit_nonrescorable_modules(self):
# confirm that a rescore of an existent but unscorable module returns an exception
# (Note that it is easier to test a scoreable but non-rescorable module in test_tasks,
# where we are creating real modules.)
problem_url = self.problem_section.location.url()
course_id = self.course.id
request = None
with self.assertRaises(NotImplementedError):
submit_rescore_problem_for_student(request, course_id, problem_url, self.student)
with self.assertRaises(NotImplementedError):
submit_rescore_problem_for_all_students(request, course_id, problem_url)
def _test_submit_with_long_url(self, task_function, student=None):
problem_url_name = 'x' * 255
self.define_option_problem(problem_url_name)
location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
with self.assertRaises(ValueError):
if student is not None:
task_function(self.create_task_request(self.instructor), self.course.id, location, student)
else:
task_function(self.create_task_request(self.instructor), self.course.id, location)
def test_submit_rescore_all_with_long_url(self):
self._test_submit_with_long_url(submit_rescore_problem_for_all_students)
def test_submit_rescore_student_with_long_url(self):
self._test_submit_with_long_url(submit_rescore_problem_for_student, self.student)
def test_submit_reset_all_with_long_url(self):
self._test_submit_with_long_url(submit_reset_problem_attempts_for_all_students)
def test_submit_delete_all_with_long_url(self):
self._test_submit_with_long_url(submit_delete_problem_state_for_all_students)
def _test_submit_task(self, task_function, student=None):
# tests submit, and then tests a second identical submission.
problem_url_name = 'H1P1'
self.define_option_problem(problem_url_name)
location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
if student is not None:
instructor_task = task_function(self.create_task_request(self.instructor),
self.course.id, location, student)
else:
instructor_task = task_function(self.create_task_request(self.instructor),
self.course.id, location)
# test resubmitting, by updating the existing record:
instructor_task = InstructorTask.objects.get(id=instructor_task.id)
instructor_task.task_state = PROGRESS
instructor_task.save()
with self.assertRaises(AlreadyRunningError):
if student is not None:
task_function(self.create_task_request(self.instructor), self.course.id, location, student)
else:
task_function(self.create_task_request(self.instructor), self.course.id, location)
def test_submit_rescore_all(self):
self._test_submit_task(submit_rescore_problem_for_all_students)
def test_submit_rescore_student(self):
self._test_submit_task(submit_rescore_problem_for_student, self.student)
def test_submit_reset_all(self):
self._test_submit_task(submit_reset_problem_attempts_for_all_students)
def test_submit_delete_all(self):
self._test_submit_task(submit_delete_problem_state_for_all_students)

View File

@@ -0,0 +1,211 @@
"""
Base test classes for LMS instructor-initiated background tasks
"""
import json
from uuid import uuid4
from mock import Mock
from celery.states import SUCCESS, FAILURE
from django.test.testcases import TestCase
from django.contrib.auth.models import User
from django.test.utils import override_settings
from capa.tests.response_xml_factory import OptionResponseXMLFactory
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from student.tests.factories import CourseEnrollmentFactory, UserFactory
from courseware.model_data import StudentModule
from courseware.tests.tests import LoginEnrollmentTestCase, TEST_DATA_MONGO_MODULESTORE
from instructor_task.api_helper import encode_problem_and_student_input
from instructor_task.models import PROGRESS, QUEUING
from instructor_task.tests.factories import InstructorTaskFactory
from instructor_task.views import instructor_task_status
TEST_COURSE_ORG = 'edx'
TEST_COURSE_NAME = 'Test Course'
TEST_COURSE_NUMBER = '1.23x'
TEST_SECTION_NAME = "Problem"
TEST_COURSE_ID = 'edx/1.23x/test_course'
TEST_FAILURE_MESSAGE = 'task failed horribly'
TEST_FAILURE_EXCEPTION = 'RandomCauseError'
OPTION_1 = 'Option 1'
OPTION_2 = 'Option 2'
class InstructorTaskTestCase(TestCase):
"""
Tests API and view methods that involve the reporting of status for background tasks.
"""
def setUp(self):
self.student = UserFactory.create(username="student", email="student@edx.org")
self.instructor = UserFactory.create(username="instructor", email="instructor@edx.org")
self.problem_url = InstructorTaskTestCase.problem_location("test_urlname")
@staticmethod
def problem_location(problem_url_name):
"""
Create an internal location for a test problem.
"""
return "i4x://{org}/{number}/problem/{problem_url_name}".format(org='edx',
number='1.23x',
problem_url_name=problem_url_name)
def _create_entry(self, task_state=QUEUING, task_output=None, student=None):
"""Creates a InstructorTask entry for testing."""
task_id = str(uuid4())
progress_json = json.dumps(task_output) if task_output is not None else None
task_input, task_key = encode_problem_and_student_input(self.problem_url, student)
instructor_task = InstructorTaskFactory.create(course_id=TEST_COURSE_ID,
requester=self.instructor,
task_input=json.dumps(task_input),
task_key=task_key,
task_id=task_id,
task_state=task_state,
task_output=progress_json)
return instructor_task
def _create_failure_entry(self):
"""Creates a InstructorTask entry representing a failed task."""
# view task entry for task failure
progress = {'message': TEST_FAILURE_MESSAGE,
'exception': TEST_FAILURE_EXCEPTION,
}
return self._create_entry(task_state=FAILURE, task_output=progress)
def _create_success_entry(self, student=None):
"""Creates a InstructorTask entry representing a successful task."""
return self._create_progress_entry(student, task_state=SUCCESS)
def _create_progress_entry(self, student=None, task_state=PROGRESS):
"""Creates a InstructorTask entry representing a task in progress."""
progress = {'attempted': 3,
'updated': 2,
'total': 5,
'action_name': 'rescored',
}
return self._create_entry(task_state=task_state, task_output=progress, student=student)
@override_settings(MODULESTORE=TEST_DATA_MONGO_MODULESTORE)
class InstructorTaskModuleTestCase(LoginEnrollmentTestCase, ModuleStoreTestCase):
"""
Base test class for InstructorTask-related tests that require
the setup of a course and problem in order to access StudentModule state.
"""
course = None
current_user = None
def initialize_course(self):
"""Create a course in the store, with a chapter and section."""
self.module_store = modulestore()
# Create the course
self.course = CourseFactory.create(org=TEST_COURSE_ORG,
number=TEST_COURSE_NUMBER,
display_name=TEST_COURSE_NAME)
# Add a chapter to the course
chapter = ItemFactory.create(parent_location=self.course.location,
display_name=TEST_SECTION_NAME)
# add a sequence to the course to which the problems can be added
self.problem_section = ItemFactory.create(parent_location=chapter.location,
template='i4x://edx/templates/sequential/Empty',
display_name=TEST_SECTION_NAME)
@staticmethod
def get_user_email(username):
"""Generate email address based on username"""
return '{0}@test.com'.format(username)
def login_username(self, username):
"""Login the user, given the `username`."""
if self.current_user != username:
self.login(InstructorTaskModuleTestCase.get_user_email(username), "test")
self.current_user = username
def _create_user(self, username, is_staff=False):
"""Creates a user and enrolls them in the test course."""
email = InstructorTaskModuleTestCase.get_user_email(username)
thisuser = UserFactory.create(username=username, email=email, is_staff=is_staff)
CourseEnrollmentFactory.create(user=thisuser, course_id=self.course.id)
return thisuser
def create_instructor(self, username):
"""Creates an instructor for the test course."""
return self._create_user(username, is_staff=True)
def create_student(self, username):
"""Creates a student for the test course."""
return self._create_user(username, is_staff=False)
@staticmethod
def problem_location(problem_url_name):
"""
Create an internal location for a test problem.
"""
if "i4x:" in problem_url_name:
return problem_url_name
else:
return "i4x://{org}/{number}/problem/{problem_url_name}".format(org=TEST_COURSE_ORG,
number=TEST_COURSE_NUMBER,
problem_url_name=problem_url_name)
def define_option_problem(self, problem_url_name):
"""Create the problem definition so the answer is Option 1"""
factory = OptionResponseXMLFactory()
factory_args = {'question_text': 'The correct answer is {0}'.format(OPTION_1),
'options': [OPTION_1, OPTION_2],
'correct_option': OPTION_1,
'num_responses': 2}
problem_xml = factory.build_xml(**factory_args)
ItemFactory.create(parent_location=self.problem_section.location,
template="i4x://edx/templates/problem/Blank_Common_Problem",
display_name=str(problem_url_name),
data=problem_xml)
def redefine_option_problem(self, problem_url_name):
"""Change the problem definition so the answer is Option 2"""
factory = OptionResponseXMLFactory()
factory_args = {'question_text': 'The correct answer is {0}'.format(OPTION_2),
'options': [OPTION_1, OPTION_2],
'correct_option': OPTION_2,
'num_responses': 2}
problem_xml = factory.build_xml(**factory_args)
location = InstructorTaskTestCase.problem_location(problem_url_name)
self.module_store.update_item(location, problem_xml)
def get_student_module(self, username, descriptor):
"""Get StudentModule object for test course, given the `username` and the problem's `descriptor`."""
return StudentModule.objects.get(course_id=self.course.id,
student=User.objects.get(username=username),
module_type=descriptor.location.category,
module_state_key=descriptor.location.url(),
)
@staticmethod
def get_task_status(task_id):
"""Use api method to fetch task status, using mock request."""
mock_request = Mock()
mock_request.REQUEST = {'task_id': task_id}
response = instructor_task_status(mock_request)
status = json.loads(response.content)
return status
def create_task_request(self, requester_username):
"""Generate request that can be used for submitting tasks"""
request = Mock()
request.user = User.objects.get(username=requester_username)
request.get_host = Mock(return_value="testhost")
request.META = {'REMOTE_ADDR': '0:0:0:0', 'SERVER_NAME': 'testhost'}
request.is_secure = Mock(return_value=False)
return request

View File

@@ -0,0 +1,475 @@
"""
Integration Tests for LMS instructor-initiated background tasks
Runs tasks on answers to course problems to validate that code
paths actually work.
"""
import logging
import json
from mock import patch
import textwrap
from celery.states import SUCCESS, FAILURE
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from capa.tests.response_xml_factory import (CodeResponseXMLFactory,
CustomResponseXMLFactory)
from xmodule.modulestore.tests.factories import ItemFactory
from xmodule.modulestore.exceptions import ItemNotFoundError
from courseware.model_data import StudentModule
from instructor_task.api import (submit_rescore_problem_for_all_students,
submit_rescore_problem_for_student,
submit_reset_problem_attempts_for_all_students,
submit_delete_problem_state_for_all_students)
from instructor_task.models import InstructorTask
from instructor_task.tests.test_base import (InstructorTaskModuleTestCase, TEST_COURSE_ORG, TEST_COURSE_NUMBER,
OPTION_1, OPTION_2)
from capa.responsetypes import StudentInputError
log = logging.getLogger(__name__)
class TestIntegrationTask(InstructorTaskModuleTestCase):
"""
Base class to provide general methods used for "integration" testing of particular tasks.
"""
def submit_student_answer(self, username, problem_url_name, responses):
"""
Use ajax interface to submit a student answer.
Assumes the input list of responses has two values.
"""
def get_input_id(response_id):
"""Creates input id using information about the test course and the current problem."""
# Note that this is a capa-specific convention. The form is a version of the problem's
# URL, modified so that it can be easily stored in html, prepended with "input-" and
# appended with a sequence identifier for the particular response the input goes to.
return 'input_i4x-{0}-{1}-problem-{2}_{3}'.format(TEST_COURSE_ORG.lower(),
TEST_COURSE_NUMBER.replace('.', '_'),
problem_url_name, response_id)
# make sure that the requested user is logged in, so that the ajax call works
# on the right problem:
self.login_username(username)
# make ajax call:
modx_url = reverse('modx_dispatch',
kwargs={'course_id': self.course.id,
'location': InstructorTaskModuleTestCase.problem_location(problem_url_name),
'dispatch': 'problem_check', })
# we assume we have two responses, so assign them the correct identifiers.
resp = self.client.post(modx_url, {
get_input_id('2_1'): responses[0],
get_input_id('3_1'): responses[1],
})
return resp
def _assert_task_failure(self, entry_id, task_type, problem_url_name, expected_message):
"""Confirm that expected values are stored in InstructorTask on task failure."""
instructor_task = InstructorTask.objects.get(id=entry_id)
self.assertEqual(instructor_task.task_state, FAILURE)
self.assertEqual(instructor_task.requester.username, 'instructor')
self.assertEqual(instructor_task.task_type, task_type)
task_input = json.loads(instructor_task.task_input)
self.assertFalse('student' in task_input)
self.assertEqual(task_input['problem_url'], InstructorTaskModuleTestCase.problem_location(problem_url_name))
status = json.loads(instructor_task.task_output)
self.assertEqual(status['exception'], 'ZeroDivisionError')
self.assertEqual(status['message'], expected_message)
# check status returned:
status = InstructorTaskModuleTestCase.get_task_status(instructor_task.task_id)
self.assertEqual(status['message'], expected_message)
class TestRescoringTask(TestIntegrationTask):
"""
Integration-style tests for rescoring problems in a background task.
Exercises real problems with a minimum of patching.
"""
def setUp(self):
self.initialize_course()
self.create_instructor('instructor')
self.create_student('u1')
self.create_student('u2')
self.create_student('u3')
self.create_student('u4')
self.logout()
def render_problem(self, username, problem_url_name):
"""
Use ajax interface to request html for a problem.
"""
# make sure that the requested user is logged in, so that the ajax call works
# on the right problem:
self.login_username(username)
# make ajax call:
modx_url = reverse('modx_dispatch',
kwargs={'course_id': self.course.id,
'location': InstructorTaskModuleTestCase.problem_location(problem_url_name),
'dispatch': 'problem_get', })
resp = self.client.post(modx_url, {})
return resp
def check_state(self, username, descriptor, expected_score, expected_max_score, expected_attempts):
"""
Check that the StudentModule state contains the expected values.
The student module is found for the test course, given the `username` and problem `descriptor`.
Values checked include the number of attempts, the score, and the max score for a problem.
"""
module = self.get_student_module(username, descriptor)
self.assertEqual(module.grade, expected_score)
self.assertEqual(module.max_grade, expected_max_score)
state = json.loads(module.state)
attempts = state['attempts']
self.assertEqual(attempts, expected_attempts)
if attempts > 0:
self.assertTrue('correct_map' in state)
self.assertTrue('student_answers' in state)
self.assertGreater(len(state['correct_map']), 0)
self.assertGreater(len(state['student_answers']), 0)
def submit_rescore_all_student_answers(self, instructor, problem_url_name):
"""Submits the particular problem for rescoring"""
return submit_rescore_problem_for_all_students(self.create_task_request(instructor), self.course.id,
InstructorTaskModuleTestCase.problem_location(problem_url_name))
def submit_rescore_one_student_answer(self, instructor, problem_url_name, student):
"""Submits the particular problem for rescoring for a particular student"""
return submit_rescore_problem_for_student(self.create_task_request(instructor), self.course.id,
InstructorTaskModuleTestCase.problem_location(problem_url_name),
student)
def test_rescoring_option_problem(self):
"""Run rescore scenario on option problem"""
# get descriptor:
problem_url_name = 'H1P1'
self.define_option_problem(problem_url_name)
location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
descriptor = self.module_store.get_instance(self.course.id, location)
# first store answers for each of the separate users:
self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1])
self.submit_student_answer('u2', problem_url_name, [OPTION_1, OPTION_2])
self.submit_student_answer('u3', problem_url_name, [OPTION_2, OPTION_1])
self.submit_student_answer('u4', problem_url_name, [OPTION_2, OPTION_2])
self.check_state('u1', descriptor, 2, 2, 1)
self.check_state('u2', descriptor, 1, 2, 1)
self.check_state('u3', descriptor, 1, 2, 1)
self.check_state('u4', descriptor, 0, 2, 1)
# update the data in the problem definition
self.redefine_option_problem(problem_url_name)
# confirm that simply rendering the problem again does not result in a change
# in the grade:
self.render_problem('u1', problem_url_name)
self.check_state('u1', descriptor, 2, 2, 1)
# rescore the problem for only one student -- only that student's grade should change:
self.submit_rescore_one_student_answer('instructor', problem_url_name, User.objects.get(username='u1'))
self.check_state('u1', descriptor, 0, 2, 1)
self.check_state('u2', descriptor, 1, 2, 1)
self.check_state('u3', descriptor, 1, 2, 1)
self.check_state('u4', descriptor, 0, 2, 1)
# rescore the problem for all students
self.submit_rescore_all_student_answers('instructor', problem_url_name)
self.check_state('u1', descriptor, 0, 2, 1)
self.check_state('u2', descriptor, 1, 2, 1)
self.check_state('u3', descriptor, 1, 2, 1)
self.check_state('u4', descriptor, 2, 2, 1)
def test_rescoring_failure(self):
"""Simulate a failure in rescoring a problem"""
problem_url_name = 'H1P1'
self.define_option_problem(problem_url_name)
self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1])
expected_message = "bad things happened"
with patch('capa.capa_problem.LoncapaProblem.rescore_existing_answers') as mock_rescore:
mock_rescore.side_effect = ZeroDivisionError(expected_message)
instructor_task = self.submit_rescore_all_student_answers('instructor', problem_url_name)
self._assert_task_failure(instructor_task.id, 'rescore_problem', problem_url_name, expected_message)
def test_rescoring_bad_unicode_input(self):
"""Generate a real failure in rescoring a problem, with an answer including unicode"""
# At one point, the student answers that resulted in StudentInputErrors were being
# persisted (even though they were not counted as an attempt). That is not possible
# now, so it's harder to generate a test for how such input is handled.
problem_url_name = 'H1P1'
# set up an option problem -- doesn't matter really what problem it is, but we need
# it to have an answer.
self.define_option_problem(problem_url_name)
self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1])
# return an input error as if it were a numerical response, with an embedded unicode character:
expected_message = u"Could not interpret '2/3\u03a9' as a number"
with patch('capa.capa_problem.LoncapaProblem.rescore_existing_answers') as mock_rescore:
mock_rescore.side_effect = StudentInputError(expected_message)
instructor_task = self.submit_rescore_all_student_answers('instructor', problem_url_name)
# check instructor_task returned
instructor_task = InstructorTask.objects.get(id=instructor_task.id)
self.assertEqual(instructor_task.task_state, 'SUCCESS')
self.assertEqual(instructor_task.requester.username, 'instructor')
self.assertEqual(instructor_task.task_type, 'rescore_problem')
task_input = json.loads(instructor_task.task_input)
self.assertFalse('student' in task_input)
self.assertEqual(task_input['problem_url'], InstructorTaskModuleTestCase.problem_location(problem_url_name))
status = json.loads(instructor_task.task_output)
self.assertEqual(status['attempted'], 1)
self.assertEqual(status['updated'], 0)
self.assertEqual(status['total'], 1)
def define_code_response_problem(self, problem_url_name):
"""
Define an arbitrary code-response problem.
We'll end up mocking its evaluation later.
"""
factory = CodeResponseXMLFactory()
grader_payload = json.dumps({"grader": "ps04/grade_square.py"})
problem_xml = factory.build_xml(initial_display="def square(x):",
answer_display="answer",
grader_payload=grader_payload,
num_responses=2)
ItemFactory.create(parent_location=self.problem_section.location,
template="i4x://edx/templates/problem/Blank_Common_Problem",
display_name=str(problem_url_name),
data=problem_xml)
def test_rescoring_code_problem(self):
"""Run rescore scenario on problem with code submission"""
problem_url_name = 'H1P2'
self.define_code_response_problem(problem_url_name)
# we fully create the CodeResponse problem, but just pretend that we're queuing it:
with patch('capa.xqueue_interface.XQueueInterface.send_to_queue') as mock_send_to_queue:
mock_send_to_queue.return_value = (0, "Successfully queued")
self.submit_student_answer('u1', problem_url_name, ["answer1", "answer2"])
instructor_task = self.submit_rescore_all_student_answers('instructor', problem_url_name)
instructor_task = InstructorTask.objects.get(id=instructor_task.id)
self.assertEqual(instructor_task.task_state, FAILURE)
status = json.loads(instructor_task.task_output)
self.assertEqual(status['exception'], 'NotImplementedError')
self.assertEqual(status['message'], "Problem's definition does not support rescoring")
status = InstructorTaskModuleTestCase.get_task_status(instructor_task.task_id)
self.assertEqual(status['message'], "Problem's definition does not support rescoring")
def define_randomized_custom_response_problem(self, problem_url_name, redefine=False):
"""
Defines a custom response problem that uses a random value to determine correctness.
Generated answer is also returned as the `msg`, so that the value can be used as a
correct answer by a test.
If the `redefine` flag is set, then change the definition of correctness (from equals
to not-equals).
"""
factory = CustomResponseXMLFactory()
script = textwrap.dedent("""
def check_func(expect, answer_given):
expected = str(random.randint(0, 100))
return {'ok': answer_given %s expected, 'msg': expected}
""" % ('!=' if redefine else '=='))
problem_xml = factory.build_xml(script=script, cfn="check_func", expect="42", num_responses=1)
if redefine:
self.module_store.update_item(InstructorTaskModuleTestCase.problem_location(problem_url_name), problem_xml)
else:
# Use "per-student" rerandomization so that check-problem can be called more than once.
# Using "always" means we cannot check a problem twice, but we want to call once to get the
# correct answer, and call a second time with that answer to confirm it's graded as correct.
# Per-student rerandomization will at least generate different seeds for different users, so
# we get a little more test coverage.
ItemFactory.create(parent_location=self.problem_section.location,
template="i4x://edx/templates/problem/Blank_Common_Problem",
display_name=str(problem_url_name),
data=problem_xml,
metadata={"rerandomize": "per_student"})
def test_rescoring_randomized_problem(self):
"""Run rescore scenario on custom problem that uses randomize"""
# First define the custom response problem:
problem_url_name = 'H1P1'
self.define_randomized_custom_response_problem(problem_url_name)
location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
descriptor = self.module_store.get_instance(self.course.id, location)
# run with more than one user
userlist = ['u1', 'u2', 'u3', 'u4']
for username in userlist:
# first render the problem, so that a seed will be created for this user
self.render_problem(username, problem_url_name)
# submit a bogus answer, in order to get the problem to tell us its real answer
dummy_answer = "1000"
self.submit_student_answer(username, problem_url_name, [dummy_answer, dummy_answer])
# we should have gotten the problem wrong, since we're way out of range:
self.check_state(username, descriptor, 0, 1, 1)
# dig the correct answer out of the problem's message
module = self.get_student_module(username, descriptor)
state = json.loads(module.state)
correct_map = state['correct_map']
log.info("Correct Map: %s", correct_map)
# only one response, so pull it out:
answer = correct_map.values()[0]['msg']
self.submit_student_answer(username, problem_url_name, [answer, answer])
# we should now get the problem right, with a second attempt:
self.check_state(username, descriptor, 1, 1, 2)
# redefine the problem (as stored in Mongo) so that the definition of correct changes
self.define_randomized_custom_response_problem(problem_url_name, redefine=True)
# confirm that simply rendering the problem again does not result in a change
# in the grade (or the attempts):
self.render_problem('u1', problem_url_name)
self.check_state('u1', descriptor, 1, 1, 2)
# rescore the problem for only one student -- only that student's grade should change
# (and none of the attempts):
self.submit_rescore_one_student_answer('instructor', problem_url_name, User.objects.get(username='u1'))
for username in userlist:
self.check_state(username, descriptor, 0 if username == 'u1' else 1, 1, 2)
# rescore the problem for all students
self.submit_rescore_all_student_answers('instructor', problem_url_name)
# all grades should change to being wrong (with no change in attempts)
for username in userlist:
self.check_state(username, descriptor, 0, 1, 2)
class TestResetAttemptsTask(TestIntegrationTask):
"""
Integration-style tests for resetting problem attempts in a background task.
Exercises real problems with a minimum of patching.
"""
userlist = ['u1', 'u2', 'u3', 'u4']
def setUp(self):
self.initialize_course()
self.create_instructor('instructor')
for username in self.userlist:
self.create_student(username)
self.logout()
def get_num_attempts(self, username, descriptor):
"""returns number of attempts stored for `username` on problem `descriptor` for test course"""
module = self.get_student_module(username, descriptor)
state = json.loads(module.state)
return state['attempts']
def reset_problem_attempts(self, instructor, problem_url_name):
"""Submits the current problem for resetting"""
return submit_reset_problem_attempts_for_all_students(self.create_task_request(instructor), self.course.id,
InstructorTaskModuleTestCase.problem_location(problem_url_name))
def test_reset_attempts_on_problem(self):
"""Run reset-attempts scenario on option problem"""
# get descriptor:
problem_url_name = 'H1P1'
self.define_option_problem(problem_url_name)
location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
descriptor = self.module_store.get_instance(self.course.id, location)
num_attempts = 3
# first store answers for each of the separate users:
for _ in range(num_attempts):
for username in self.userlist:
self.submit_student_answer(username, problem_url_name, [OPTION_1, OPTION_1])
for username in self.userlist:
self.assertEquals(self.get_num_attempts(username, descriptor), num_attempts)
self.reset_problem_attempts('instructor', problem_url_name)
for username in self.userlist:
self.assertEquals(self.get_num_attempts(username, descriptor), 0)
def test_reset_failure(self):
"""Simulate a failure in resetting attempts on a problem"""
problem_url_name = 'H1P1'
self.define_option_problem(problem_url_name)
self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1])
expected_message = "bad things happened"
with patch('courseware.models.StudentModule.save') as mock_save:
mock_save.side_effect = ZeroDivisionError(expected_message)
instructor_task = self.reset_problem_attempts('instructor', problem_url_name)
self._assert_task_failure(instructor_task.id, 'reset_problem_attempts', problem_url_name, expected_message)
def test_reset_non_problem(self):
"""confirm that a non-problem can still be successfully reset"""
problem_url_name = self.problem_section.location.url()
instructor_task = self.reset_problem_attempts('instructor', problem_url_name)
instructor_task = InstructorTask.objects.get(id=instructor_task.id)
self.assertEqual(instructor_task.task_state, SUCCESS)
class TestDeleteProblemTask(TestIntegrationTask):
"""
Integration-style tests for deleting problem state in a background task.
Exercises real problems with a minimum of patching.
"""
userlist = ['u1', 'u2', 'u3', 'u4']
def setUp(self):
self.initialize_course()
self.create_instructor('instructor')
for username in self.userlist:
self.create_student(username)
self.logout()
def delete_problem_state(self, instructor, problem_url_name):
"""Submits the current problem for deletion"""
return submit_delete_problem_state_for_all_students(self.create_task_request(instructor), self.course.id,
InstructorTaskModuleTestCase.problem_location(problem_url_name))
def test_delete_problem_state(self):
"""Run delete-state scenario on option problem"""
# get descriptor:
problem_url_name = 'H1P1'
self.define_option_problem(problem_url_name)
location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
descriptor = self.module_store.get_instance(self.course.id, location)
# first store answers for each of the separate users:
for username in self.userlist:
self.submit_student_answer(username, problem_url_name, [OPTION_1, OPTION_1])
# confirm that state exists:
for username in self.userlist:
self.assertTrue(self.get_student_module(username, descriptor) is not None)
# run delete task:
self.delete_problem_state('instructor', problem_url_name)
# confirm that no state can be found:
for username in self.userlist:
with self.assertRaises(StudentModule.DoesNotExist):
self.get_student_module(username, descriptor)
def test_delete_failure(self):
"""Simulate a failure in deleting state of a problem"""
problem_url_name = 'H1P1'
self.define_option_problem(problem_url_name)
self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1])
expected_message = "bad things happened"
with patch('courseware.models.StudentModule.delete') as mock_delete:
mock_delete.side_effect = ZeroDivisionError(expected_message)
instructor_task = self.delete_problem_state('instructor', problem_url_name)
self._assert_task_failure(instructor_task.id, 'delete_problem_state', problem_url_name, expected_message)
def test_delete_non_problem(self):
"""confirm that a non-problem can still be successfully deleted"""
problem_url_name = self.problem_section.location.url()
instructor_task = self.delete_problem_state('instructor', problem_url_name)
instructor_task = InstructorTask.objects.get(id=instructor_task.id)
self.assertEqual(instructor_task.task_state, SUCCESS)

View File

@@ -0,0 +1,332 @@
"""
Unit tests for LMS instructor-initiated background tasks,
Runs tasks on answers to course problems to validate that code
paths actually work.
"""
import json
from uuid import uuid4
from mock import Mock, patch
from celery.states import SUCCESS, FAILURE
from xmodule.modulestore.exceptions import ItemNotFoundError
from courseware.model_data import StudentModule
from courseware.tests.factories import StudentModuleFactory
from student.tests.factories import UserFactory
from instructor_task.models import InstructorTask
from instructor_task.tests.test_base import InstructorTaskModuleTestCase, TEST_COURSE_ORG, TEST_COURSE_NUMBER
from instructor_task.tests.factories import InstructorTaskFactory
from instructor_task.tasks import rescore_problem, reset_problem_attempts, delete_problem_state
from instructor_task.tasks_helper import UpdateProblemModuleStateError, update_problem_module_state
PROBLEM_URL_NAME = "test_urlname"
class TestTaskFailure(Exception):
pass
class TestInstructorTasks(InstructorTaskModuleTestCase):
def setUp(self):
super(InstructorTaskModuleTestCase, self).setUp()
self.initialize_course()
self.instructor = self.create_instructor('instructor')
self.problem_url = InstructorTaskModuleTestCase.problem_location(PROBLEM_URL_NAME)
def _create_input_entry(self, student_ident=None):
"""Creates a InstructorTask entry for testing."""
task_id = str(uuid4())
task_input = {'problem_url': self.problem_url}
if student_ident is not None:
task_input['student'] = student_ident
instructor_task = InstructorTaskFactory.create(course_id=self.course.id,
requester=self.instructor,
task_input=json.dumps(task_input),
task_key='dummy value',
task_id=task_id)
return instructor_task
def _get_xmodule_instance_args(self):
"""
Calculate dummy values for parameters needed for instantiating xmodule instances.
"""
return {'xqueue_callback_url_prefix': 'dummy_value',
'request_info': {},
}
def _run_task_with_mock_celery(self, task_function, entry_id, task_id, expected_failure_message=None):
self.current_task = Mock()
self.current_task.request = Mock()
self.current_task.request.id = task_id
self.current_task.update_state = Mock()
if expected_failure_message is not None:
self.current_task.update_state.side_effect = TestTaskFailure(expected_failure_message)
with patch('instructor_task.tasks_helper._get_current_task') as mock_get_task:
mock_get_task.return_value = self.current_task
return task_function(entry_id, self._get_xmodule_instance_args())
def _test_missing_current_task(self, task_function):
# run without (mock) Celery running
task_entry = self._create_input_entry()
with self.assertRaises(UpdateProblemModuleStateError):
task_function(task_entry.id, self._get_xmodule_instance_args())
def test_rescore_missing_current_task(self):
self._test_missing_current_task(rescore_problem)
def test_reset_missing_current_task(self):
self._test_missing_current_task(reset_problem_attempts)
def test_delete_missing_current_task(self):
self._test_missing_current_task(delete_problem_state)
def _test_undefined_problem(self, task_function):
# run with celery, but no problem defined
task_entry = self._create_input_entry()
with self.assertRaises(ItemNotFoundError):
self._run_task_with_mock_celery(task_function, task_entry.id, task_entry.task_id)
def test_rescore_undefined_problem(self):
self._test_undefined_problem(rescore_problem)
def test_reset_undefined_problem(self):
self._test_undefined_problem(reset_problem_attempts)
def test_delete_undefined_problem(self):
self._test_undefined_problem(delete_problem_state)
def _test_run_with_task(self, task_function, action_name, expected_num_updated):
# run with some StudentModules for the problem
task_entry = self._create_input_entry()
status = self._run_task_with_mock_celery(task_function, task_entry.id, task_entry.task_id)
# check return value
self.assertEquals(status.get('attempted'), expected_num_updated)
self.assertEquals(status.get('updated'), expected_num_updated)
self.assertEquals(status.get('total'), expected_num_updated)
self.assertEquals(status.get('action_name'), action_name)
self.assertGreater('duration_ms', 0)
# compare with entry in table:
entry = InstructorTask.objects.get(id=task_entry.id)
self.assertEquals(json.loads(entry.task_output), status)
self.assertEquals(entry.task_state, SUCCESS)
def _test_run_with_no_state(self, task_function, action_name):
# run with no StudentModules for the problem
self.define_option_problem(PROBLEM_URL_NAME)
self._test_run_with_task(task_function, action_name, 0)
def test_rescore_with_no_state(self):
self._test_run_with_no_state(rescore_problem, 'rescored')
def test_reset_with_no_state(self):
self._test_run_with_no_state(reset_problem_attempts, 'reset')
def test_delete_with_no_state(self):
self._test_run_with_no_state(delete_problem_state, 'deleted')
def _create_students_with_state(self, num_students, state=None):
"""Create students, a problem, and StudentModule objects for testing"""
self.define_option_problem(PROBLEM_URL_NAME)
students = [
UserFactory.create(username='robot%d' % i, email='robot+test+%d@edx.org' % i)
for i in xrange(num_students)
]
for student in students:
StudentModuleFactory.create(course_id=self.course.id,
module_state_key=self.problem_url,
student=student,
state=state)
return students
def _assert_num_attempts(self, students, num_attempts):
"""Check the number attempts for all students is the same"""
for student in students:
module = StudentModule.objects.get(course_id=self.course.id,
student=student,
module_state_key=self.problem_url)
state = json.loads(module.state)
self.assertEquals(state['attempts'], num_attempts)
def test_reset_with_some_state(self):
initial_attempts = 3
input_state = json.dumps({'attempts': initial_attempts})
num_students = 10
students = self._create_students_with_state(num_students, input_state)
# check that entries were set correctly
self._assert_num_attempts(students, initial_attempts)
# run the task
self._test_run_with_task(reset_problem_attempts, 'reset', num_students)
# check that entries were reset
self._assert_num_attempts(students, 0)
def test_delete_with_some_state(self):
# This will create StudentModule entries -- we don't have to worry about
# the state inside them.
num_students = 10
students = self._create_students_with_state(num_students)
# check that entries were created correctly
for student in students:
StudentModule.objects.get(course_id=self.course.id,
student=student,
module_state_key=self.problem_url)
self._test_run_with_task(delete_problem_state, 'deleted', num_students)
# confirm that no state can be found anymore:
for student in students:
with self.assertRaises(StudentModule.DoesNotExist):
StudentModule.objects.get(course_id=self.course.id,
student=student,
module_state_key=self.problem_url)
def _test_reset_with_student(self, use_email):
# run with some StudentModules for the problem
num_students = 10
initial_attempts = 3
input_state = json.dumps({'attempts': initial_attempts})
students = self._create_students_with_state(num_students, input_state)
# check that entries were set correctly
for student in students:
module = StudentModule.objects.get(course_id=self.course.id,
student=student,
module_state_key=self.problem_url)
state = json.loads(module.state)
self.assertEquals(state['attempts'], initial_attempts)
if use_email:
student_ident = students[3].email
else:
student_ident = students[3].username
task_entry = self._create_input_entry(student_ident)
status = self._run_task_with_mock_celery(reset_problem_attempts, task_entry.id, task_entry.task_id)
# check return value
self.assertEquals(status.get('attempted'), 1)
self.assertEquals(status.get('updated'), 1)
self.assertEquals(status.get('total'), 1)
self.assertEquals(status.get('action_name'), 'reset')
self.assertGreater('duration_ms', 0)
# compare with entry in table:
entry = InstructorTask.objects.get(id=task_entry.id)
self.assertEquals(json.loads(entry.task_output), status)
self.assertEquals(entry.task_state, SUCCESS)
# check that the correct entry was reset
for index, student in enumerate(students):
module = StudentModule.objects.get(course_id=self.course.id,
student=student,
module_state_key=self.problem_url)
state = json.loads(module.state)
if index == 3:
self.assertEquals(state['attempts'], 0)
else:
self.assertEquals(state['attempts'], initial_attempts)
def test_reset_with_student_username(self):
self._test_reset_with_student(False)
def test_reset_with_student_email(self):
self._test_reset_with_student(True)
def _test_run_with_failure(self, task_function, expected_message):
# run with no StudentModules for the problem,
# because we will fail before entering the loop.
task_entry = self._create_input_entry()
self.define_option_problem(PROBLEM_URL_NAME)
with self.assertRaises(TestTaskFailure):
self._run_task_with_mock_celery(task_function, task_entry.id, task_entry.task_id, expected_message)
# compare with entry in table:
entry = InstructorTask.objects.get(id=task_entry.id)
self.assertEquals(entry.task_state, FAILURE)
output = json.loads(entry.task_output)
self.assertEquals(output['exception'], 'TestTaskFailure')
self.assertEquals(output['message'], expected_message)
def test_rescore_with_failure(self):
self._test_run_with_failure(rescore_problem, 'We expected this to fail')
def test_reset_with_failure(self):
self._test_run_with_failure(reset_problem_attempts, 'We expected this to fail')
def test_delete_with_failure(self):
self._test_run_with_failure(delete_problem_state, 'We expected this to fail')
def _test_run_with_long_error_msg(self, task_function):
# run with an error message that is so long it will require
# truncation (as well as the jettisoning of the traceback).
task_entry = self._create_input_entry()
self.define_option_problem(PROBLEM_URL_NAME)
expected_message = "x" * 1500
with self.assertRaises(TestTaskFailure):
self._run_task_with_mock_celery(task_function, task_entry.id, task_entry.task_id, expected_message)
# compare with entry in table:
entry = InstructorTask.objects.get(id=task_entry.id)
self.assertEquals(entry.task_state, FAILURE)
self.assertGreater(1023, len(entry.task_output))
output = json.loads(entry.task_output)
self.assertEquals(output['exception'], 'TestTaskFailure')
self.assertEquals(output['message'], expected_message[:len(output['message']) - 3] + "...")
self.assertTrue('traceback' not in output)
def test_rescore_with_long_error_msg(self):
self._test_run_with_long_error_msg(rescore_problem)
def test_reset_with_long_error_msg(self):
self._test_run_with_long_error_msg(reset_problem_attempts)
def test_delete_with_long_error_msg(self):
self._test_run_with_long_error_msg(delete_problem_state)
def _test_run_with_short_error_msg(self, task_function):
# run with an error message that is short enough to fit
# in the output, but long enough that the traceback won't.
# Confirm that the traceback is truncated.
task_entry = self._create_input_entry()
self.define_option_problem(PROBLEM_URL_NAME)
expected_message = "x" * 900
with self.assertRaises(TestTaskFailure):
self._run_task_with_mock_celery(task_function, task_entry.id, task_entry.task_id, expected_message)
# compare with entry in table:
entry = InstructorTask.objects.get(id=task_entry.id)
self.assertEquals(entry.task_state, FAILURE)
self.assertGreater(1023, len(entry.task_output))
output = json.loads(entry.task_output)
self.assertEquals(output['exception'], 'TestTaskFailure')
self.assertEquals(output['message'], expected_message)
self.assertEquals(output['traceback'][-3:], "...")
def test_rescore_with_short_error_msg(self):
self._test_run_with_short_error_msg(rescore_problem)
def test_reset_with_short_error_msg(self):
self._test_run_with_short_error_msg(reset_problem_attempts)
def test_delete_with_short_error_msg(self):
self._test_run_with_short_error_msg(delete_problem_state)
def test_successful_result_too_long(self):
# while we don't expect the existing tasks to generate output that is too
# long, we can test the framework will handle such an occurrence.
task_entry = self._create_input_entry()
self.define_option_problem(PROBLEM_URL_NAME)
action_name = 'x' * 1000
update_fcn = lambda(_module_descriptor, _student_module, _xmodule_instance_args): True
task_function = (lambda entry_id, xmodule_instance_args:
update_problem_module_state(entry_id,
update_fcn, action_name, filter_fcn=None,
xmodule_instance_args=None))
with self.assertRaises(ValueError):
self._run_task_with_mock_celery(task_function, task_entry.id, task_entry.task_id)
# compare with entry in table:
entry = InstructorTask.objects.get(id=task_entry.id)
self.assertEquals(entry.task_state, FAILURE)
self.assertGreater(1023, len(entry.task_output))
output = json.loads(entry.task_output)
self.assertEquals(output['exception'], 'ValueError')
self.assertTrue("Length of task output is too long" in output['message'])
self.assertTrue('traceback' not in output)

View File

@@ -0,0 +1,266 @@
"""
Test for LMS instructor background task queue management
"""
import json
from celery.states import SUCCESS, FAILURE, REVOKED, PENDING
from mock import Mock, patch
from django.utils.datastructures import MultiValueDict
from instructor_task.models import PROGRESS
from instructor_task.tests.test_base import (InstructorTaskTestCase,
TEST_FAILURE_MESSAGE,
TEST_FAILURE_EXCEPTION)
from instructor_task.views import instructor_task_status, get_task_completion_info
class InstructorTaskReportTest(InstructorTaskTestCase):
"""
Tests API and view methods that involve the reporting of status for background tasks.
"""
def _get_instructor_task_status(self, task_id):
"""Returns status corresponding to task_id via api method."""
request = Mock()
request.REQUEST = {'task_id': task_id}
return instructor_task_status(request)
def test_instructor_task_status(self):
instructor_task = self._create_failure_entry()
task_id = instructor_task.task_id
request = Mock()
request.REQUEST = {'task_id': task_id}
response = instructor_task_status(request)
output = json.loads(response.content)
self.assertEquals(output['task_id'], task_id)
def test_missing_instructor_task_status(self):
task_id = "missing_id"
request = Mock()
request.REQUEST = {'task_id': task_id}
response = instructor_task_status(request)
output = json.loads(response.content)
self.assertEquals(output, {})
def test_instructor_task_status_list(self):
# Fetch status for existing tasks by arg list, as if called from ajax.
# Note that ajax does something funny with the marshalling of
# list data, so the key value has "[]" appended to it.
task_ids = [(self._create_failure_entry()).task_id for _ in range(1, 5)]
request = Mock()
request.REQUEST = MultiValueDict({'task_ids[]': task_ids})
response = instructor_task_status(request)
output = json.loads(response.content)
self.assertEquals(len(output), len(task_ids))
for task_id in task_ids:
self.assertEquals(output[task_id]['task_id'], task_id)
def test_get_status_from_failure(self):
# get status for a task that has already failed
instructor_task = self._create_failure_entry()
task_id = instructor_task.task_id
response = self._get_instructor_task_status(task_id)
output = json.loads(response.content)
self.assertEquals(output['message'], TEST_FAILURE_MESSAGE)
self.assertEquals(output['succeeded'], False)
self.assertEquals(output['task_id'], task_id)
self.assertEquals(output['task_state'], FAILURE)
self.assertFalse(output['in_progress'])
expected_progress = {'exception': TEST_FAILURE_EXCEPTION,
'message': TEST_FAILURE_MESSAGE}
self.assertEquals(output['task_progress'], expected_progress)
def test_get_status_from_success(self):
# get status for a task that has already succeeded
instructor_task = self._create_success_entry()
task_id = instructor_task.task_id
response = self._get_instructor_task_status(task_id)
output = json.loads(response.content)
self.assertEquals(output['message'], "Problem rescored for 2 of 3 students (out of 5)")
self.assertEquals(output['succeeded'], False)
self.assertEquals(output['task_id'], task_id)
self.assertEquals(output['task_state'], SUCCESS)
self.assertFalse(output['in_progress'])
expected_progress = {'attempted': 3,
'updated': 2,
'total': 5,
'action_name': 'rescored'}
self.assertEquals(output['task_progress'], expected_progress)
def _test_get_status_from_result(self, task_id, mock_result):
"""
Provides mock result to caller of instructor_task_status, and returns resulting output.
"""
with patch('celery.result.AsyncResult.__new__') as mock_result_ctor:
mock_result_ctor.return_value = mock_result
response = self._get_instructor_task_status(task_id)
output = json.loads(response.content)
self.assertEquals(output['task_id'], task_id)
return output
def test_get_status_to_pending(self):
# get status for a task that hasn't begun to run yet
instructor_task = self._create_entry()
task_id = instructor_task.task_id
mock_result = Mock()
mock_result.task_id = task_id
mock_result.state = PENDING
output = self._test_get_status_from_result(task_id, mock_result)
for key in ['message', 'succeeded', 'task_progress']:
self.assertTrue(key not in output)
self.assertEquals(output['task_state'], 'PENDING')
self.assertTrue(output['in_progress'])
def test_update_progress_to_progress(self):
# view task entry for task in progress
instructor_task = self._create_progress_entry()
task_id = instructor_task.task_id
mock_result = Mock()
mock_result.task_id = task_id
mock_result.state = PROGRESS
mock_result.result = {'attempted': 5,
'updated': 4,
'total': 10,
'action_name': 'rescored'}
output = self._test_get_status_from_result(task_id, mock_result)
self.assertEquals(output['message'], "Progress: rescored 4 of 5 so far (out of 10)")
self.assertEquals(output['succeeded'], False)
self.assertEquals(output['task_state'], PROGRESS)
self.assertTrue(output['in_progress'])
self.assertEquals(output['task_progress'], mock_result.result)
def test_update_progress_to_failure(self):
# view task entry for task in progress that later fails
instructor_task = self._create_progress_entry()
task_id = instructor_task.task_id
mock_result = Mock()
mock_result.task_id = task_id
mock_result.state = FAILURE
mock_result.result = NotImplementedError("This task later failed.")
mock_result.traceback = "random traceback"
output = self._test_get_status_from_result(task_id, mock_result)
self.assertEquals(output['message'], "This task later failed.")
self.assertEquals(output['succeeded'], False)
self.assertEquals(output['task_state'], FAILURE)
self.assertFalse(output['in_progress'])
expected_progress = {'exception': 'NotImplementedError',
'message': "This task later failed.",
'traceback': "random traceback"}
self.assertEquals(output['task_progress'], expected_progress)
def test_update_progress_to_revoked(self):
# view task entry for task in progress that later fails
instructor_task = self._create_progress_entry()
task_id = instructor_task.task_id
mock_result = Mock()
mock_result.task_id = task_id
mock_result.state = REVOKED
output = self._test_get_status_from_result(task_id, mock_result)
self.assertEquals(output['message'], "Task revoked before running")
self.assertEquals(output['succeeded'], False)
self.assertEquals(output['task_state'], REVOKED)
self.assertFalse(output['in_progress'])
expected_progress = {'message': "Task revoked before running"}
self.assertEquals(output['task_progress'], expected_progress)
def _get_output_for_task_success(self, attempted, updated, total, student=None):
"""returns the task_id and the result returned by instructor_task_status()."""
# view task entry for task in progress
instructor_task = self._create_progress_entry(student)
task_id = instructor_task.task_id
mock_result = Mock()
mock_result.task_id = task_id
mock_result.state = SUCCESS
mock_result.result = {'attempted': attempted,
'updated': updated,
'total': total,
'action_name': 'rescored'}
output = self._test_get_status_from_result(task_id, mock_result)
return output
def test_update_progress_to_success(self):
output = self._get_output_for_task_success(10, 8, 10)
self.assertEquals(output['message'], "Problem rescored for 8 of 10 students")
self.assertEquals(output['succeeded'], False)
self.assertEquals(output['task_state'], SUCCESS)
self.assertFalse(output['in_progress'])
expected_progress = {'attempted': 10,
'updated': 8,
'total': 10,
'action_name': 'rescored'}
self.assertEquals(output['task_progress'], expected_progress)
def test_success_messages(self):
output = self._get_output_for_task_success(0, 0, 10)
self.assertEqual(output['message'], "Unable to find any students with submissions to be rescored (out of 10)")
self.assertFalse(output['succeeded'])
output = self._get_output_for_task_success(10, 0, 10)
self.assertEqual(output['message'], "Problem failed to be rescored for any of 10 students")
self.assertFalse(output['succeeded'])
output = self._get_output_for_task_success(10, 8, 10)
self.assertEqual(output['message'], "Problem rescored for 8 of 10 students")
self.assertFalse(output['succeeded'])
output = self._get_output_for_task_success(9, 8, 10)
self.assertEqual(output['message'], "Problem rescored for 8 of 9 students (out of 10)")
self.assertFalse(output['succeeded'])
output = self._get_output_for_task_success(10, 10, 10)
self.assertEqual(output['message'], "Problem successfully rescored for 10 students")
self.assertTrue(output['succeeded'])
output = self._get_output_for_task_success(0, 0, 1, student=self.student)
self.assertTrue("Unable to find submission to be rescored for student" in output['message'])
self.assertFalse(output['succeeded'])
output = self._get_output_for_task_success(1, 0, 1, student=self.student)
self.assertTrue("Problem failed to be rescored for student" in output['message'])
self.assertFalse(output['succeeded'])
output = self._get_output_for_task_success(1, 1, 1, student=self.student)
self.assertTrue("Problem successfully rescored for student" in output['message'])
self.assertTrue(output['succeeded'])
def test_get_info_for_queuing_task(self):
# get status for a task that is still running:
instructor_task = self._create_entry()
succeeded, message = get_task_completion_info(instructor_task)
self.assertFalse(succeeded)
self.assertEquals(message, "No status information available")
def test_get_info_for_missing_output(self):
# check for missing task_output
instructor_task = self._create_success_entry()
instructor_task.task_output = None
succeeded, message = get_task_completion_info(instructor_task)
self.assertFalse(succeeded)
self.assertEquals(message, "No status information available")
def test_get_info_for_broken_output(self):
# check for non-JSON task_output
instructor_task = self._create_success_entry()
instructor_task.task_output = "{ bad"
succeeded, message = get_task_completion_info(instructor_task)
self.assertFalse(succeeded)
self.assertEquals(message, "No parsable status information available")
def test_get_info_for_empty_output(self):
# check for JSON task_output with missing keys
instructor_task = self._create_success_entry()
instructor_task.task_output = "{}"
succeeded, message = get_task_completion_info(instructor_task)
self.assertFalse(succeeded)
self.assertEquals(message, "No progress status information available")
def test_get_info_for_broken_input(self):
# check for non-JSON task_input, but then just ignore it
instructor_task = self._create_success_entry()
instructor_task.task_input = "{ bad"
succeeded, message = get_task_completion_info(instructor_task)
self.assertFalse(succeeded)
self.assertEquals(message, "Problem rescored for 2 of 3 students (out of 5)")

View File

@@ -0,0 +1,172 @@
import json
import logging
from django.http import HttpResponse
from celery.states import FAILURE, REVOKED, READY_STATES
from instructor_task.api_helper import (get_status_from_instructor_task,
get_updated_instructor_task)
from instructor_task.models import PROGRESS
log = logging.getLogger(__name__)
# return status for completed tasks and tasks in progress
STATES_WITH_STATUS = [state for state in READY_STATES] + [PROGRESS]
def _get_instructor_task_status(task_id):
"""
Returns status for a specific task.
Written as an internal method here (rather than as a helper)
so that get_task_completion_info() can be called without
causing a circular dependency (since it's also called directly).
"""
instructor_task = get_updated_instructor_task(task_id)
status = get_status_from_instructor_task(instructor_task)
if instructor_task is not None and instructor_task.task_state in STATES_WITH_STATUS:
succeeded, message = get_task_completion_info(instructor_task)
status['message'] = message
status['succeeded'] = succeeded
return status
def instructor_task_status(request):
"""
View method that returns the status of a course-related task or tasks.
Status is returned as a JSON-serialized dict, wrapped as the content of a HTTPResponse.
The task_id can be specified to this view in one of three ways:
* by making a request containing 'task_id' as a parameter with a single value
Returns a dict containing status information for the specified task_id
* by making a request containing 'task_ids' as a parameter,
with a list of task_id values.
Returns a dict of dicts, with the task_id as key, and the corresponding
dict containing status information for the specified task_id
Task_id values that are unrecognized are skipped.
The dict with status information for a task contains the following keys:
'message': on complete tasks, status message reporting on final progress,
or providing exception message if failed. For tasks in progress,
indicates the current progress.
'succeeded': on complete tasks or tasks in progress, boolean value indicates if the
task outcome was successful: did it achieve what it set out to do.
This is in contrast with a successful task_state, which indicates that the
task merely completed.
'task_id': id assigned by LMS and used by celery.
'task_state': state of task as stored in celery's result store.
'in_progress': boolean indicating if task is still running.
'task_progress': dict containing progress information. This includes:
'attempted': number of attempts made
'updated': number of attempts that "succeeded"
'total': number of possible subtasks to attempt
'action_name': user-visible verb to use in status messages. Should be past-tense.
'duration_ms': how long the task has (or had) been running.
'exception': name of exception class raised in failed tasks.
'message': returned for failed and revoked tasks.
'traceback': optional, returned if task failed and produced a traceback.
"""
output = {}
if 'task_id' in request.REQUEST:
task_id = request.REQUEST['task_id']
output = _get_instructor_task_status(task_id)
elif 'task_ids[]' in request.REQUEST:
tasks = request.REQUEST.getlist('task_ids[]')
for task_id in tasks:
task_output = _get_instructor_task_status(task_id)
if task_output is not None:
output[task_id] = task_output
return HttpResponse(json.dumps(output, indent=4))
def get_task_completion_info(instructor_task):
"""
Construct progress message from progress information in InstructorTask entry.
Returns (boolean, message string) duple, where the boolean indicates
whether the task completed without incident. (It is possible for a
task to attempt many sub-tasks, such as rescoring many students' problem
responses, and while the task runs to completion, some of the students'
responses could not be rescored.)
Used for providing messages to instructor_task_status(), as well as
external calls for providing course task submission history information.
"""
succeeded = False
if instructor_task.task_state not in STATES_WITH_STATUS:
return (succeeded, "No status information available")
# we're more surprised if there is no output for a completed task, but just warn:
if instructor_task.task_output is None:
log.warning("No task_output information found for instructor_task {0}".format(instructor_task.task_id))
return (succeeded, "No status information available")
try:
task_output = json.loads(instructor_task.task_output)
except ValueError:
fmt = "No parsable task_output information found for instructor_task {0}: {1}"
log.warning(fmt.format(instructor_task.task_id, instructor_task.task_output))
return (succeeded, "No parsable status information available")
if instructor_task.task_state in [FAILURE, REVOKED]:
return (succeeded, task_output.get('message', 'No message provided'))
if any([key not in task_output for key in ['action_name', 'attempted', 'updated', 'total']]):
fmt = "Invalid task_output information found for instructor_task {0}: {1}"
log.warning(fmt.format(instructor_task.task_id, instructor_task.task_output))
return (succeeded, "No progress status information available")
action_name = task_output['action_name']
num_attempted = task_output['attempted']
num_updated = task_output['updated']
num_total = task_output['total']
student = None
try:
task_input = json.loads(instructor_task.task_input)
except ValueError:
fmt = "No parsable task_input information found for instructor_task {0}: {1}"
log.warning(fmt.format(instructor_task.task_id, instructor_task.task_input))
else:
student = task_input.get('student')
if instructor_task.task_state == PROGRESS:
# special message for providing progress updates:
msg_format = "Progress: {action} {updated} of {attempted} so far"
elif student is not None:
if num_attempted == 0:
msg_format = "Unable to find submission to be {action} for student '{student}'"
elif num_updated == 0:
msg_format = "Problem failed to be {action} for student '{student}'"
else:
succeeded = True
msg_format = "Problem successfully {action} for student '{student}'"
elif num_attempted == 0:
msg_format = "Unable to find any students with submissions to be {action}"
elif num_updated == 0:
msg_format = "Problem failed to be {action} for any of {attempted} students"
elif num_updated == num_attempted:
succeeded = True
msg_format = "Problem successfully {action} for {attempted} students"
else: # num_updated < num_attempted
msg_format = "Problem {action} for {updated} of {attempted} students"
if student is None and num_attempted != num_total:
msg_format += " (out of {total})"
# Update status in task result object itself:
message = msg_format.format(action=action_name, updated=num_updated,
attempted=num_attempted, total=num_total,
student=student)
return (succeeded, message)

View File

@@ -102,7 +102,7 @@ MITX_FEATURES = {
# Staff Debug tool.
'ENABLE_STUDENT_HISTORY_VIEW': True,
# segment.io for LMS--need to explicitly turn it on on production.
# segment.io for LMS--need to explicitly turn it on for production.
'SEGMENT_IO_LMS': False,
# Enables the student notes API and UI.
@@ -122,7 +122,10 @@ MITX_FEATURES = {
'USE_CUSTOM_THEME': False,
# Do autoplay videos for students
'AUTOPLAY_VIDEOS': True
'AUTOPLAY_VIDEOS': True,
# Enable instructor dash to submit background tasks
'ENABLE_INSTRUCTOR_BACKGROUND_TASKS': True,
}
# Used for A/B testing
@@ -691,6 +694,7 @@ INSTALLED_APPS = (
'util',
'certificates',
'instructor',
'instructor_task',
'open_ended_grading',
'psychometrics',
'licenses',

View File

@@ -0,0 +1,100 @@
// Define an InstructorTaskProgress object for updating a table on the instructor
// dashboard that shows the current background tasks that are currently running
// for the instructor's course. Any tasks that were running when the page is
// first displayed are passed in as instructor_tasks, and populate the "Pending Instructor
// Task" table. The InstructorTaskProgress is bound to this table, and periodically
// polls the LMS to see if any of the tasks has completed. Once a task is complete,
// it is not included in any further polling.
(function() {
var __bind = function(fn, me){ return function(){ return fn.apply(me, arguments); }; };
this.InstructorTaskProgress = (function() {
function InstructorTaskProgress(element) {
this.update_progress = __bind(this.update_progress, this);
this.get_status = __bind(this.get_status, this);
this.element = element;
this.entries = $(element).find('.task-progress-entry')
if (window.queuePollerID) {
window.clearTimeout(window.queuePollerID);
}
// Hardcode the initial delay before the first refresh to one second:
window.queuePollerID = window.setTimeout(this.get_status, 1000);
}
InstructorTaskProgress.prototype.$ = function(selector) {
return $(selector, this.element);
};
InstructorTaskProgress.prototype.update_progress = function(response) {
var _this = this;
// Response should be a dict with an entry for each requested task_id,
// with a "task-state" and "in_progress" key and optionally a "message"
// and a "task_progress.duration" key.
var something_in_progress = false;
for (task_id in response) {
var task_dict = response[task_id];
// find the corresponding entry, and update it:
entry = $(_this.element).find('[data-task-id="' + task_id + '"]');
entry.find('.task-state').text(task_dict.task_state)
var duration_value = (task_dict.task_progress && task_dict.task_progress.duration_ms
&& Math.round(task_dict.task_progress.duration_ms/1000)) || 'unknown';
entry.find('.task-duration').text(duration_value);
var progress_value = task_dict.message || '';
entry.find('.task-progress').text(progress_value);
// if the task is complete, then change the entry so it won't
// be queried again. Otherwise set a flag.
if (task_dict.in_progress === true) {
something_in_progress = true;
} else {
entry.data('inProgress', "False")
}
}
// if some entries are still incomplete, then repoll:
// Hardcode the refresh interval to be every five seconds.
// TODO: allow the refresh interval to be set. (And if it is disabled,
// then don't set the timeout at all.)
if (something_in_progress) {
window.queuePollerID = window.setTimeout(_this.get_status, 5000);
} else {
delete window.queuePollerID;
}
}
InstructorTaskProgress.prototype.get_status = function() {
var _this = this;
var task_ids = [];
// Construct the array of ids to get status for, by
// including the subset of entries that are still in progress.
this.entries.each(function(idx, element) {
var task_id = $(element).data('taskId');
var in_progress = $(element).data('inProgress');
if (in_progress="True") {
task_ids.push(task_id);
}
});
// Make call to get status for these ids.
// Note that the keyname here ends up with "[]" being appended
// in the POST parameter that shows up on the Django server.
// TODO: add error handler.
var ajax_url = '/instructor_task_status/';
var data = {'task_ids': task_ids };
$.post(ajax_url, data).done(this.update_progress);
};
return InstructorTaskProgress;
})();
}).call(this);
// once the page is rendered, create the progress object
var instructorTaskProgress;
$(document).ready(function() {
instructorTaskProgress = new InstructorTaskProgress($('#task-progress-wrapper'));
});

View File

@@ -9,7 +9,9 @@
<script type="text/javascript" src="${static.url('js/vendor/jquery-jvectormap-1.1.1/jquery-jvectormap-1.1.1.min.js')}"></script>
<script type="text/javascript" src="${static.url('js/vendor/jquery-jvectormap-1.1.1/jquery-jvectormap-world-mill-en.js')}"></script>
<script type="text/javascript" src="${static.url('js/course_groups/cohorts.js')}"></script>
%if instructor_tasks is not None:
<script type="text/javascript" src="${static.url('js/pending_tasks.js')}"></script>>
%endif
</%block>
<%include file="/courseware/course_navigation.html" args="active_page='instructor'" />
@@ -193,20 +195,78 @@ function goto( mode)
</ul>
<hr width="40%" style="align:left">
%endif
%if settings.MITX_FEATURES.get('ENABLE_INSTRUCTOR_BACKGROUND_TASKS'):
<H2>Course-specific grade adjustment</h2>
<p>
Specify a particular problem in the course here by its url:
<input type="text" name="problem_for_all_students" size="60">
</p>
<p>
You may use just the "urlname" if a problem, or "modulename/urlname" if not.
(For example, if the location is <tt>i4x://university/course/problem/problemname</tt>,
then just provide the <tt>problemname</tt>.
If the location is <tt>i4x://university/course/notaproblem/someothername</tt>, then
provide <tt>notaproblem/someothername</tt>.)
</p>
<p>
Then select an action:
<input type="submit" name="action" value="Reset ALL students' attempts">
<input type="submit" name="action" value="Rescore ALL students' problem submissions">
</p>
<p>
<p>These actions run in the background, and status for active tasks will appear in a table below.
To see status for all tasks submitted for this problem, click on this button:
</p>
<p>
<input type="submit" name="action" value="Show Background Task History">
</p>
<hr width="40%" style="align:left">
%endif
<H2>Student-specific grade inspection and adjustment</h2>
<p>edX email address or their username: </p>
<p><input type="text" name="unique_student_identifier"> <input type="submit" name="action" value="Get link to student's progress page"></p>
<p>and, if you want to reset the number of attempts for a problem, the urlname of that problem
(e.g. if the location is <tt>i4x://university/course/problem/problemname</tt>, then the urlname is <tt>problemname</tt>).</p>
<p> <input type="text" name="problem_to_reset" size="60"> <input type="submit" name="action" value="Reset student's attempts"> </p>
<p>
Specify the edX email address or username of a student here:
<input type="text" name="unique_student_identifier">
</p>
<p>
Click this, and a link to student's progress page will appear below:
<input type="submit" name="action" value="Get link to student's progress page">
</p>
<p>
Specify a particular problem in the course here by its url:
<input type="text" name="problem_for_student" size="60">
</p>
<p>
You may use just the "urlname" if a problem, or "modulename/urlname" if not.
(For example, if the location is <tt>i4x://university/course/problem/problemname</tt>,
then just provide the <tt>problemname</tt>.
If the location is <tt>i4x://university/course/notaproblem/someothername</tt>, then
provide <tt>notaproblem/someothername</tt>.)
</p>
<p>
Then select an action:
<input type="submit" name="action" value="Reset student's attempts">
%if settings.MITX_FEATURES.get('ENABLE_COURSE_BACKGROUND_TASKS'):
<input type="submit" name="action" value="Rescore student's problem submission">
%endif
</p>
%if instructor_access:
<p> You may also delete the entire state of a student for a problem:
<input type="submit" name="action" value="Delete student state for problem"> </p>
<p>To delete the state of other XBlocks specify modulename/urlname, eg
<tt>combinedopenended/Humanities_SA_Peer</tt></p>
<p>
You may also delete the entire state of a student for the specified module:
<input type="submit" name="action" value="Delete student state for module">
</p>
%endif
%if settings.MITX_FEATURES.get('ENABLE_COURSE_BACKGROUND_TASKS'):
<p>Rescoring runs in the background, and status for active tasks will appear in a table below.
To see status for all tasks submitted for this course and student, click on this button:
</p>
<p>
<input type="submit" name="action" value="Show Background Task History for Student">
</p>
%endif
%endif
@@ -234,6 +294,7 @@ function goto( mode)
##-----------------------------------------------------------------------------
%if modeflag.get('Admin'):
%if instructor_access:
<hr width="40%" style="align:left">
<p>
@@ -373,6 +434,7 @@ function goto( mode)
%if msg:
<p></p><p>${msg}</p>
%endif
##-----------------------------------------------------------------------------
%if modeflag.get('Analytics'):
@@ -559,6 +621,69 @@ function goto( mode)
</p>
%endif
## Output tasks in progress
%if instructor_tasks is not None and len(instructor_tasks) > 0:
<hr width="100%">
<h2>Pending Instructor Tasks</h2>
<div id="task-progress-wrapper">
<table class="stat_table">
<tr>
<th>Task Type</th>
<th>Task inputs</th>
<th>Task Id</th>
<th>Requester</th>
<th>Submitted</th>
<th>Task State</th>
<th>Duration (sec)</th>
<th>Task Progress</th>
</tr>
%for tasknum, instructor_task in enumerate(instructor_tasks):
<tr id="task-progress-entry-${tasknum}" class="task-progress-entry"
data-task-id="${instructor_task.task_id}"
data-in-progress="true">
<td>${instructor_task.task_type}</td>
<td>${instructor_task.task_input}</td>
<td class="task-id">${instructor_task.task_id}</td>
<td>${instructor_task.requester}</td>
<td>${instructor_task.created}</td>
<td class="task-state">${instructor_task.task_state}</td>
<td class="task-duration">unknown</td>
<td class="task-progress">unknown</td>
</tr>
%endfor
</table>
</div>
<br/>
%endif
##-----------------------------------------------------------------------------
%if course_stats and modeflag.get('Psychometrics') is None:
<br/>
<br/>
<p>
<hr width="100%">
<h2>${course_stats['title'] | h}</h2>
<table class="stat_table">
<tr>
%for hname in course_stats['header']:
<th>${hname | h}</th>
%endfor
</tr>
%for row in course_stats['data']:
<tr>
%for value in row:
<td>${value | h}</td>
%endfor
</tr>
%endfor
</table>
</p>
%endif
##-----------------------------------------------------------------------------
%if modeflag.get('Psychometrics'):

View File

@@ -394,6 +394,11 @@ if settings.MITX_FEATURES.get('ENABLE_SERVICE_STATUS'):
url(r'^status/', include('service_status.urls')),
)
if settings.MITX_FEATURES.get('ENABLE_INSTRUCTOR_BACKGROUND_TASKS'):
urlpatterns += (
url(r'^instructor_task_status/$', 'instructor_task.views.instructor_task_status', name='instructor_task_status'),
)
# FoldIt views
urlpatterns += (
# The path is hardcoded into their app...

View File

@@ -3,11 +3,11 @@
# Third-party:
-e git://github.com/edx/django-staticfiles.git@6d2504e5c8#egg=django-staticfiles
-e git://github.com/edx/django-pipeline.git#egg=django-pipeline
-e git://github.com/edx/django-wiki.git@e2e84558#egg=django-wiki
-e git://github.com/edx/django-wiki.git@ac906abe#egg=django-wiki
-e git://github.com/dementrock/pystache_custom.git@776973740bdaad83a3b029f96e415a7d1e8bec2f#egg=pystache_custom-dev
-e git://github.com/eventbrite/zendesk.git@d53fe0e81b623f084e91776bcf6369f8b7b63879#egg=zendesk
# Our libraries:
-e git+https://github.com/edx/XBlock.git@4d8735e883#egg=XBlock
-e git+https://github.com/edx/codejail.git@0a1b468#egg=codejail
-e git+https://github.com/edx/diff-cover.git@v0.1.1#egg=diff_cover
-e git+https://github.com/edx/diff-cover.git@v0.1.2#egg=diff_cover