From 59e3cae4c9c6f2c95a816a8280b470ec6add9486 Mon Sep 17 00:00:00 2001
From: David Baumgold
Date: Wed, 14 May 2014 13:55:26 -0400
Subject: [PATCH 01/12] Revert pull request #3466
---
cms/envs/common.py | 2 +-
.../student/firebase_token_generator.py | 99 ++++++++++++++
.../student/tests/test_token_generator.py | 43 ++++++
common/djangoapps/student/tests/tests.py | 25 +++-
common/djangoapps/student/views.py | 24 ++++
common/lib/xmodule/xmodule/annotator_token.py | 32 -----
.../xmodule/tests/test_annotator_token.py | 20 ---
.../xmodule/tests/test_textannotation.py | 13 +-
.../xmodule/tests/test_videoannotation.py | 98 +++++++++++++-
.../xmodule/xmodule/textannotation_module.py | 27 ++--
.../xmodule/xmodule/videoannotation_module.py | 82 ++++++++++--
.../ova/annotator-full-firebase-auth.js | 22 ---
lms/djangoapps/notes/views.py | 4 +-
lms/envs/common.py | 1 -
lms/templates/notes.html | 6 +-
lms/templates/textannotation.html | 126 +++++++++---------
lms/templates/videoannotation.html | 14 +-
lms/urls.py | 1 +
requirements/edx/base.txt | 1 -
19 files changed, 464 insertions(+), 176 deletions(-)
create mode 100644 common/djangoapps/student/firebase_token_generator.py
create mode 100644 common/djangoapps/student/tests/test_token_generator.py
delete mode 100644 common/lib/xmodule/xmodule/annotator_token.py
delete mode 100644 common/lib/xmodule/xmodule/tests/test_annotator_token.py
delete mode 100644 common/static/js/vendor/ova/annotator-full-firebase-auth.js
diff --git a/cms/envs/common.py b/cms/envs/common.py
index 76e0f4d50f..f7195d2c54 100644
--- a/cms/envs/common.py
+++ b/cms/envs/common.py
@@ -318,7 +318,7 @@ PIPELINE_CSS = {
'css/vendor/ui-lightness/jquery-ui-1.8.22.custom.css',
'css/vendor/jquery.qtip.min.css',
'js/vendor/markitup/skins/simple/style.css',
- 'js/vendor/markitup/sets/wiki/style.css',
+ 'js/vendor/markitup/sets/wiki/style.css'
],
'output_filename': 'css/cms-style-vendor.css',
},
diff --git a/common/djangoapps/student/firebase_token_generator.py b/common/djangoapps/student/firebase_token_generator.py
new file mode 100644
index 0000000000..f84a85277e
--- /dev/null
+++ b/common/djangoapps/student/firebase_token_generator.py
@@ -0,0 +1,99 @@
+'''
+ Firebase - library to generate a token
+ License: https://github.com/firebase/firebase-token-generator-python/blob/master/LICENSE
+ Tweaked and Edited by @danielcebrianr and @lduarte1991
+
+ This library will take either objects or strings and use python's built-in encoding
+ system as specified by RFC 3548. Thanks to the firebase team for their open-source
+ library. This was made specifically for speaking with the annotation_storage_url and
+ can be used and expanded, but not modified by anyone else needing such a process.
+'''
+from base64 import urlsafe_b64encode
+import hashlib
+import hmac
+import sys
+try:
+ import json
+except ImportError:
+ import simplejson as json
+
+__all__ = ['create_token']
+
+TOKEN_SEP = '.'
+
+
+def create_token(secret, data):
+ '''
+ Simply takes in the secret key and the data and
+ passes it to the local function _encode_token
+ '''
+ return _encode_token(secret, data)
+
+
+if sys.version_info < (2, 7):
+ def _encode(bytes_data):
+ '''
+ Takes a json object, string, or binary and
+ uses python's urlsafe_b64encode to encode data
+ and make it safe pass along in a url.
+ To make sure it does not conflict with variables
+ we make sure equal signs are removed.
+ More info: docs.python.org/2/library/base64.html
+ '''
+ encoded = urlsafe_b64encode(bytes(bytes_data))
+ return encoded.decode('utf-8').replace('=', '')
+else:
+ def _encode(bytes_info):
+ '''
+ Same as above function but for Python 2.7 or later
+ '''
+ encoded = urlsafe_b64encode(bytes_info)
+ return encoded.decode('utf-8').replace('=', '')
+
+
+def _encode_json(obj):
+ '''
+ Before a python dict object can be properly encoded,
+ it must be transformed into a jason object and then
+ transformed into bytes to be encoded using the function
+ defined above.
+ '''
+ return _encode(bytearray(json.dumps(obj), 'utf-8'))
+
+
+def _sign(secret, to_sign):
+ '''
+ This function creates a sign that goes at the end of the
+ message that is specific to the secret and not the actual
+ content of the encoded body.
+ More info on hashing: http://docs.python.org/2/library/hmac.html
+ The function creates a hashed values of the secret and to_sign
+ and returns the digested values based the secure hash
+ algorithm, 256
+ '''
+ def portable_bytes(string):
+ '''
+ Simply transforms a string into a bytes object,
+ which is a series of immutable integers 0<=x<=256.
+ Always try to encode as utf-8, unless it is not
+ compliant.
+ '''
+ try:
+ return bytes(string, 'utf-8')
+ except TypeError:
+ return bytes(string)
+ return _encode(hmac.new(portable_bytes(secret), portable_bytes(to_sign), hashlib.sha256).digest()) # pylint: disable=E1101
+
+
+def _encode_token(secret, claims):
+ '''
+ This is the main function that takes the secret token and
+ the data to be transmitted. There is a header created for decoding
+ purposes. Token_SEP means that a period/full stop separates the
+ header, data object/message, and signatures.
+ '''
+ encoded_header = _encode_json({'typ': 'JWT', 'alg': 'HS256'})
+ encoded_claims = _encode_json(claims)
+ secure_bits = '%s%s%s' % (encoded_header, TOKEN_SEP, encoded_claims)
+ sig = _sign(secret, secure_bits)
+ return '%s%s%s' % (secure_bits, TOKEN_SEP, sig)
diff --git a/common/djangoapps/student/tests/test_token_generator.py b/common/djangoapps/student/tests/test_token_generator.py
new file mode 100644
index 0000000000..1eb09c9173
--- /dev/null
+++ b/common/djangoapps/student/tests/test_token_generator.py
@@ -0,0 +1,43 @@
+"""
+This test will run for firebase_token_generator.py.
+"""
+
+from django.test import TestCase
+
+from student.firebase_token_generator import _encode, _encode_json, _encode_token, create_token
+
+
+class TokenGenerator(TestCase):
+ """
+ Tests for the file firebase_token_generator.py
+ """
+ def test_encode(self):
+ """
+ This tests makes sure that no matter what version of python
+ you have, the _encode function still returns the appropriate result
+ for a string.
+ """
+ expected = "dGVzdDE"
+ result = _encode("test1")
+ self.assertEqual(expected, result)
+
+ def test_encode_json(self):
+ """
+ Same as above, but this one focuses on a python dict type
+ transformed into a json object and then encoded.
+ """
+ expected = "eyJ0d28iOiAidGVzdDIiLCAib25lIjogInRlc3QxIn0"
+ result = _encode_json({'one': 'test1', 'two': 'test2'})
+ self.assertEqual(expected, result)
+
+ def test_create_token(self):
+ """
+ Unlike its counterpart in student/views.py, this function
+ just checks for the encoding of a token. The other function
+ will test depending on time and user.
+ """
+ expected = "eyJhbGciOiAiSFMyNTYiLCAidHlwIjogIkpXVCJ9.eyJ1c2VySWQiOiAidXNlcm5hbWUiLCAidHRsIjogODY0MDB9.-p1sr7uwCapidTQ0qB7DdU2dbF-hViKpPNN_5vD10t8"
+ result1 = _encode_token('4c7f4d1c-8ac4-4e9f-84c8-b271c57fcac4', {"userId": "username", "ttl": 86400})
+ result2 = create_token('4c7f4d1c-8ac4-4e9f-84c8-b271c57fcac4', {"userId": "username", "ttl": 86400})
+ self.assertEqual(expected, result1)
+ self.assertEqual(expected, result2)
diff --git a/common/djangoapps/student/tests/tests.py b/common/djangoapps/student/tests/tests.py
index 199a794bc4..c28a54afe8 100644
--- a/common/djangoapps/student/tests/tests.py
+++ b/common/djangoapps/student/tests/tests.py
@@ -26,7 +26,7 @@ from mock import Mock, patch
from student.models import anonymous_id_for_user, user_by_anonymous_id, CourseEnrollment, unique_id_for_user
from student.views import (process_survey_link, _cert_info,
- change_enrollment, complete_course_mode_info)
+ change_enrollment, complete_course_mode_info, token)
from student.tests.factories import UserFactory, CourseModeFactory
import shoppingcart
@@ -498,3 +498,26 @@ class AnonymousLookupTable(TestCase):
anonymous_id = anonymous_id_for_user(self.user, self.course.id)
real_user = user_by_anonymous_id(anonymous_id)
self.assertEqual(self.user, real_user)
+
+
+@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
+class Token(ModuleStoreTestCase):
+ """
+ Test for the token generator. This creates a random course and passes it through the token file which generates the
+ token that will be passed in to the annotation_storage_url.
+ """
+ request_factory = RequestFactory()
+ COURSE_SLUG = "100"
+ COURSE_NAME = "test_course"
+ COURSE_ORG = "edx"
+
+ def setUp(self):
+ self.course = CourseFactory.create(org=self.COURSE_ORG, display_name=self.COURSE_NAME, number=self.COURSE_SLUG)
+ self.user = User.objects.create(username="username", email="username")
+ self.req = self.request_factory.post('/token?course_id=edx/100/test_course', {'user': self.user})
+ self.req.user = self.user
+
+ def test_token(self):
+ expected = HttpResponse("eyJhbGciOiAiSFMyNTYiLCAidHlwIjogIkpXVCJ9.eyJpc3N1ZWRBdCI6ICIyMDE0LTAxLTIzVDE5OjM1OjE3LjUyMjEwNC01OjAwIiwgImNvbnN1bWVyS2V5IjogInh4eHh4eHh4LXh4eHgteHh4eC14eHh4LXh4eHh4eHh4eHh4eCIsICJ1c2VySWQiOiAidXNlcm5hbWUiLCAidHRsIjogODY0MDB9.OjWz9mzqJnYuzX-f3uCBllqJUa8PVWJjcDy_McfxLvc", mimetype="text/plain")
+ response = token(self.req)
+ self.assertEqual(expected.content.split('.')[0], response.content.split('.')[0])
diff --git a/common/djangoapps/student/views.py b/common/djangoapps/student/views.py
index b199196679..5cecaff5df 100644
--- a/common/djangoapps/student/views.py
+++ b/common/djangoapps/student/views.py
@@ -44,6 +44,7 @@ from student.models import (
create_comments_service_user, PasswordHistory
)
from student.forms import PasswordResetFormNoActive
+from student.firebase_token_generator import create_token
from verify_student.models import SoftwareSecurePhotoVerification, MidcourseReverificationWindow
from certificates.models import CertificateStatuses, certificate_status_for_student
@@ -1851,3 +1852,26 @@ def change_email_settings(request):
track.views.server_track(request, "change-email-settings", {"receive_emails": "no", "course": course_id}, page='dashboard')
return JsonResponse({"success": True})
+
+
+@login_required
+def token(request):
+ '''
+ Return a token for the backend of annotations.
+ It uses the course id to retrieve a variable that contains the secret
+ token found in inheritance.py. It also contains information of when
+ the token was issued. This will be stored with the user along with
+ the id for identification purposes in the backend.
+ '''
+ course_id = request.GET.get("course_id")
+ course = course_from_id(course_id)
+ dtnow = datetime.datetime.now()
+ dtutcnow = datetime.datetime.utcnow()
+ delta = dtnow - dtutcnow
+ newhour, newmin = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60, 60)
+ newtime = "%s%+02d:%02d" % (dtnow.isoformat(), newhour, newmin)
+ secret = course.annotation_token_secret
+ custom_data = {"issuedAt": newtime, "consumerKey": secret, "userId": request.user.email, "ttl": 86400}
+ newtoken = create_token(secret, custom_data)
+ response = HttpResponse(newtoken, mimetype="text/plain")
+ return response
diff --git a/common/lib/xmodule/xmodule/annotator_token.py b/common/lib/xmodule/xmodule/annotator_token.py
deleted file mode 100644
index 6fa5695978..0000000000
--- a/common/lib/xmodule/xmodule/annotator_token.py
+++ /dev/null
@@ -1,32 +0,0 @@
-"""
-This file contains a function used to retrieve the token for the annotation backend
-without having to create a view, but just returning a string instead.
-
-It can be called from other files by using the following:
-from xmodule.annotator_token import retrieve_token
-"""
-import datetime
-from firebase_token_generator import create_token
-
-
-def retrieve_token(userid, secret):
- '''
- Return a token for the backend of annotations.
- It uses the course id to retrieve a variable that contains the secret
- token found in inheritance.py. It also contains information of when
- the token was issued. This will be stored with the user along with
- the id for identification purposes in the backend.
- '''
-
- # the following five lines of code allows you to include the default timezone in the iso format
- # for more information: http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone
- dtnow = datetime.datetime.now()
- dtutcnow = datetime.datetime.utcnow()
- delta = dtnow - dtutcnow
- newhour, newmin = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60, 60)
- newtime = "%s%+02d:%02d" % (dtnow.isoformat(), newhour, newmin)
- # uses the issued time (UTC plus timezone), the consumer key and the user's email to maintain a
- # federated system in the annotation backend server
- custom_data = {"issuedAt": newtime, "consumerKey": secret, "userId": userid, "ttl": 86400}
- newtoken = create_token(secret, custom_data)
- return newtoken
diff --git a/common/lib/xmodule/xmodule/tests/test_annotator_token.py b/common/lib/xmodule/xmodule/tests/test_annotator_token.py
deleted file mode 100644
index ae06808bba..0000000000
--- a/common/lib/xmodule/xmodule/tests/test_annotator_token.py
+++ /dev/null
@@ -1,20 +0,0 @@
-"""
-This test will run for annotator_token.py
-"""
-import unittest
-
-from xmodule.annotator_token import retrieve_token
-
-
-class TokenRetriever(unittest.TestCase):
- """
- Tests to make sure that when passed in a username and secret token, that it will be encoded correctly
- """
- def test_token(self):
- """
- Test for the token generator. Give an a random username and secret token, it should create the properly encoded string of text.
- """
- expected = "eyJhbGciOiAiSFMyNTYiLCAidHlwIjogIkpXVCJ9.eyJpc3N1ZWRBdCI6ICIyMDE0LTAyLTI3VDE3OjAwOjQyLjQwNjQ0MSswOjAwIiwgImNvbnN1bWVyS2V5IjogImZha2Vfc2VjcmV0IiwgInVzZXJJZCI6ICJ1c2VybmFtZSIsICJ0dGwiOiA4NjQwMH0.Dx1PoF-7mqBOOSGDMZ9R_s3oaaLRPnn6CJgGGF2A5CQ"
- response = retrieve_token("username", "fake_secret")
- self.assertEqual(expected.split('.')[0], response.split('.')[0])
- self.assertNotEqual(expected.split('.')[2], response.split('.')[2])
\ No newline at end of file
diff --git a/common/lib/xmodule/xmodule/tests/test_textannotation.py b/common/lib/xmodule/xmodule/tests/test_textannotation.py
index 907eb78780..397e3990ef 100644
--- a/common/lib/xmodule/xmodule/tests/test_textannotation.py
+++ b/common/lib/xmodule/xmodule/tests/test_textannotation.py
@@ -38,6 +38,17 @@ class TextAnnotationModuleTestCase(unittest.TestCase):
ScopeIds(None, None, None, None)
)
+ def test_render_content(self):
+ """
+ Tests to make sure the sample xml is rendered and that it forms a valid xmltree
+ that does not contain a display_name.
+ """
+ content = self.mod._render_content() # pylint: disable=W0212
+ self.assertIsNotNone(content)
+ element = etree.fromstring(content)
+ self.assertIsNotNone(element)
+ self.assertFalse('display_name' in element.attrib, "Display Name should have been deleted from Content")
+
def test_extract_instructions(self):
"""
Tests to make sure that the instructions are correctly pulled from the sample xml above.
@@ -59,5 +70,5 @@ class TextAnnotationModuleTestCase(unittest.TestCase):
Tests the function that passes in all the information in the context that will be used in templates/textannotation.html
"""
context = self.mod.get_html()
- for key in ['display_name', 'tag', 'source', 'instructions_html', 'content_html', 'annotation_storage', 'token']:
+ for key in ['display_name', 'tag', 'source', 'instructions_html', 'content_html', 'annotation_storage']:
self.assertIn(key, context)
diff --git a/common/lib/xmodule/xmodule/tests/test_videoannotation.py b/common/lib/xmodule/xmodule/tests/test_videoannotation.py
index 4a081803aa..cb63d05503 100644
--- a/common/lib/xmodule/xmodule/tests/test_videoannotation.py
+++ b/common/lib/xmodule/xmodule/tests/test_videoannotation.py
@@ -34,6 +34,100 @@ class VideoAnnotationModuleTestCase(unittest.TestCase):
ScopeIds(None, None, None, None)
)
+ def test_annotation_class_attr_default(self):
+ """
+ Makes sure that it can detect annotation values in text-form if user
+ decides to add text to the area below video, video functionality is completely
+ found in javascript.
+ """
+ xml = 'test'
+ element = etree.fromstring(xml)
+
+ expected_attr = {'class': {'value': 'annotatable-span highlight'}}
+ actual_attr = self.mod._get_annotation_class_attr(element) # pylint: disable=W0212
+
+ self.assertIsInstance(actual_attr, dict)
+ self.assertDictEqual(expected_attr, actual_attr)
+
+ def test_annotation_class_attr_with_valid_highlight(self):
+ """
+ Same as above but more specific to an area that is highlightable in the appropriate
+ color designated.
+ """
+ xml = 'test'
+
+ for color in self.mod.highlight_colors:
+ element = etree.fromstring(xml.format(highlight=color))
+ value = 'annotatable-span highlight highlight-{highlight}'.format(highlight=color)
+
+ expected_attr = {'class': {
+ 'value': value,
+ '_delete': 'highlight'}
+ }
+ actual_attr = self.mod._get_annotation_class_attr(element) # pylint: disable=W0212
+
+ self.assertIsInstance(actual_attr, dict)
+ self.assertDictEqual(expected_attr, actual_attr)
+
+ def test_annotation_class_attr_with_invalid_highlight(self):
+ """
+ Same as above, but checked with invalid colors.
+ """
+ xml = 'test'
+
+ for invalid_color in ['rainbow', 'blink', 'invisible', '', None]:
+ element = etree.fromstring(xml.format(highlight=invalid_color))
+ expected_attr = {'class': {
+ 'value': 'annotatable-span highlight',
+ '_delete': 'highlight'}
+ }
+ actual_attr = self.mod._get_annotation_class_attr(element) # pylint: disable=W0212
+
+ self.assertIsInstance(actual_attr, dict)
+ self.assertDictEqual(expected_attr, actual_attr)
+
+ def test_annotation_data_attr(self):
+ """
+ Test that each highlight contains the data information from the annotation itself.
+ """
+ element = etree.fromstring('test')
+
+ expected_attr = {
+ 'data-comment-body': {'value': 'foo', '_delete': 'body'},
+ 'data-comment-title': {'value': 'bar', '_delete': 'title'},
+ 'data-problem-id': {'value': '0', '_delete': 'problem'}
+ }
+
+ actual_attr = self.mod._get_annotation_data_attr(element) # pylint: disable=W0212
+
+ self.assertIsInstance(actual_attr, dict)
+ self.assertDictEqual(expected_attr, actual_attr)
+
+ def test_render_annotation(self):
+ """
+ Tests to make sure that the spans designating annotations acutally visually render as annotations.
+ """
+ expected_html = 'z'
+ expected_el = etree.fromstring(expected_html)
+
+ actual_el = etree.fromstring('z')
+ self.mod._render_annotation(actual_el) # pylint: disable=W0212
+
+ self.assertEqual(expected_el.tag, actual_el.tag)
+ self.assertEqual(expected_el.text, actual_el.text)
+ self.assertDictEqual(dict(expected_el.attrib), dict(actual_el.attrib))
+
+ def test_render_content(self):
+ """
+ Like above, but using the entire text, it makes sure that display_name is removed and that there is only one
+ div encompassing the annotatable area.
+ """
+ content = self.mod._render_content() # pylint: disable=W0212
+ element = etree.fromstring(content)
+ self.assertIsNotNone(element)
+ self.assertEqual('div', element.tag, 'root tag is a div')
+ self.assertFalse('display_name' in element.attrib, "Display Name should have been deleted from Content")
+
def test_extract_instructions(self):
"""
This test ensures that if an instruction exists it is pulled and
@@ -66,6 +160,6 @@ class VideoAnnotationModuleTestCase(unittest.TestCase):
"""
Tests to make sure variables passed in truly exist within the html once it is all rendered.
"""
- context = self.mod.get_html() # pylint: disable=W0212
- for key in ['display_name', 'instructions_html', 'sourceUrl', 'typeSource', 'poster', 'annotation_storage']:
+ context = self.mod.get_html()
+ for key in ['display_name', 'content_html', 'instructions_html', 'sourceUrl', 'typeSource', 'poster', 'alert', 'annotation_storage']:
self.assertIn(key, context)
diff --git a/common/lib/xmodule/xmodule/textannotation_module.py b/common/lib/xmodule/xmodule/textannotation_module.py
index 4a673eb33e..1d732d8709 100644
--- a/common/lib/xmodule/xmodule/textannotation_module.py
+++ b/common/lib/xmodule/xmodule/textannotation_module.py
@@ -6,7 +6,6 @@ from pkg_resources import resource_string
from xmodule.x_module import XModule
from xmodule.raw_module import RawDescriptor
from xblock.core import Scope, String
-from xmodule.annotator_token import retrieve_token
import textwrap
@@ -31,7 +30,7 @@ class AnnotatableFields(object):
scope=Scope.settings,
default='Text Annotation',
)
- instructor_tags = String(
+ tags = String(
display_name="Tags for Assignments",
help="Add tags that automatically highlight in a certain color using the comma-separated form, i.e. imagery:red,parallelism:blue",
scope=Scope.settings,
@@ -44,7 +43,6 @@ class AnnotatableFields(object):
default='None',
)
annotation_storage_url = String(help="Location of Annotation backend", scope=Scope.settings, default="http://your_annotation_storage.com", display_name="Url for Annotation Storage")
- annotation_token_secret = String(help="Secret string for annotation storage", scope=Scope.settings, default="xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", display_name="Secret Token String for Annotation")
class TextAnnotationModule(AnnotatableFields, XModule):
@@ -61,9 +59,15 @@ class TextAnnotationModule(AnnotatableFields, XModule):
self.instructions = self._extract_instructions(xmltree)
self.content = etree.tostring(xmltree, encoding='unicode')
- self.user_email = ""
- if self.runtime.get_real_user is not None:
- self.user_email = self.runtime.get_real_user(self.runtime.anonymous_student_id).email
+ self.highlight_colors = ['yellow', 'orange', 'purple', 'blue', 'green']
+
+ def _render_content(self):
+ """ Renders annotatable content with annotation spans and returns HTML. """
+ xmltree = etree.fromstring(self.content)
+ if 'display_name' in xmltree.attrib:
+ del xmltree.attrib['display_name']
+
+ return etree.tostring(xmltree, encoding='unicode')
def _extract_instructions(self, xmltree):
""" Removes from the xmltree and returns them as a string, otherwise None. """
@@ -78,13 +82,13 @@ class TextAnnotationModule(AnnotatableFields, XModule):
""" Renders parameters to template. """
context = {
'display_name': self.display_name_with_default,
- 'tag': self.instructor_tags,
+ 'tag': self.tags,
'source': self.source,
'instructions_html': self.instructions,
- 'content_html': self.content,
- 'annotation_storage': self.annotation_storage_url,
- 'token': retrieve_token(self.user_email, self.annotation_token_secret),
+ 'content_html': self._render_content(),
+ 'annotation_storage': self.annotation_storage_url
}
+
return self.system.render_template('textannotation.html', context)
@@ -97,7 +101,6 @@ class TextAnnotationDescriptor(AnnotatableFields, RawDescriptor):
def non_editable_metadata_fields(self):
non_editable_fields = super(TextAnnotationDescriptor, self).non_editable_metadata_fields
non_editable_fields.extend([
- TextAnnotationDescriptor.annotation_storage_url,
- TextAnnotationDescriptor.annotation_token_secret,
+ TextAnnotationDescriptor.annotation_storage_url
])
return non_editable_fields
diff --git a/common/lib/xmodule/xmodule/videoannotation_module.py b/common/lib/xmodule/xmodule/videoannotation_module.py
index 68e5b40413..5f31509d01 100644
--- a/common/lib/xmodule/xmodule/videoannotation_module.py
+++ b/common/lib/xmodule/xmodule/videoannotation_module.py
@@ -7,7 +7,6 @@ from pkg_resources import resource_string
from xmodule.x_module import XModule
from xmodule.raw_module import RawDescriptor
from xblock.core import Scope, String
-from xmodule.annotator_token import retrieve_token
import textwrap
@@ -32,7 +31,7 @@ class AnnotatableFields(object):
sourceurl = String(help="The external source URL for the video.", display_name="Source URL", scope=Scope.settings, default="http://video-js.zencoder.com/oceans-clip.mp4")
poster_url = String(help="Poster Image URL", display_name="Poster URL", scope=Scope.settings, default="")
annotation_storage_url = String(help="Location of Annotation backend", scope=Scope.settings, default="http://your_annotation_storage.com", display_name="Url for Annotation Storage")
- annotation_token_secret = String(help="Secret string for annotation storage", scope=Scope.settings, default="xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", display_name="Secret Token String for Annotation")
+
class VideoAnnotationModule(AnnotatableFields, XModule):
'''Video Annotation Module'''
@@ -56,9 +55,73 @@ class VideoAnnotationModule(AnnotatableFields, XModule):
self.instructions = self._extract_instructions(xmltree)
self.content = etree.tostring(xmltree, encoding='unicode')
- self.user_email = ""
- if self.runtime.get_real_user is not None:
- self.user_email = self.runtime.get_real_user(self.runtime.anonymous_student_id).email
+ self.highlight_colors = ['yellow', 'orange', 'purple', 'blue', 'green']
+
+ def _get_annotation_class_attr(self, element):
+ """ Returns a dict with the CSS class attribute to set on the annotation
+ and an XML key to delete from the element.
+ """
+
+ attr = {}
+ cls = ['annotatable-span', 'highlight']
+ highlight_key = 'highlight'
+ color = element.get(highlight_key)
+
+ if color is not None:
+ if color in self.highlight_colors:
+ cls.append('highlight-' + color)
+ attr['_delete'] = highlight_key
+ attr['value'] = ' '.join(cls)
+
+ return {'class': attr}
+
+ def _get_annotation_data_attr(self, element):
+ """ Returns a dict in which the keys are the HTML data attributes
+ to set on the annotation element. Each data attribute has a
+ corresponding 'value' and (optional) '_delete' key to specify
+ an XML attribute to delete.
+ """
+
+ data_attrs = {}
+ attrs_map = {
+ 'body': 'data-comment-body',
+ 'title': 'data-comment-title',
+ 'problem': 'data-problem-id'
+ }
+
+ for xml_key in attrs_map.keys():
+ if xml_key in element.attrib:
+ value = element.get(xml_key, '')
+ html_key = attrs_map[xml_key]
+ data_attrs[html_key] = {'value': value, '_delete': xml_key}
+
+ return data_attrs
+
+ def _render_annotation(self, element):
+ """ Renders an annotation element for HTML output. """
+ attr = {}
+ attr.update(self._get_annotation_class_attr(element))
+ attr.update(self._get_annotation_data_attr(element))
+
+ element.tag = 'span'
+
+ for key in attr.keys():
+ element.set(key, attr[key]['value'])
+ if '_delete' in attr[key] and attr[key]['_delete'] is not None:
+ delete_key = attr[key]['_delete']
+ del element.attrib[delete_key]
+
+ def _render_content(self):
+ """ Renders annotatable content with annotation spans and returns HTML. """
+ xmltree = etree.fromstring(self.content)
+ xmltree.tag = 'div'
+ if 'display_name' in xmltree.attrib:
+ del xmltree.attrib['display_name']
+
+ for element in xmltree.findall('.//annotation'):
+ self._render_annotation(element)
+
+ return etree.tostring(xmltree, encoding='unicode')
def _extract_instructions(self, xmltree):
""" Removes from the xmltree and returns them as a string, otherwise None. """
@@ -91,9 +154,9 @@ class VideoAnnotationModule(AnnotatableFields, XModule):
'sourceUrl': self.sourceurl,
'typeSource': extension,
'poster': self.poster_url,
- 'content_html': self.content,
- 'annotation_storage': self.annotation_storage_url,
- 'token': retrieve_token(self.user_email, self.annotation_token_secret),
+ 'alert': self,
+ 'content_html': self._render_content(),
+ 'annotation_storage': self.annotation_storage_url
}
return self.system.render_template('videoannotation.html', context)
@@ -108,7 +171,6 @@ class VideoAnnotationDescriptor(AnnotatableFields, RawDescriptor):
def non_editable_metadata_fields(self):
non_editable_fields = super(VideoAnnotationDescriptor, self).non_editable_metadata_fields
non_editable_fields.extend([
- VideoAnnotationDescriptor.annotation_storage_url,
- VideoAnnotationDescriptor.annotation_token_secret,
+ VideoAnnotationDescriptor.annotation_storage_url
])
return non_editable_fields
diff --git a/common/static/js/vendor/ova/annotator-full-firebase-auth.js b/common/static/js/vendor/ova/annotator-full-firebase-auth.js
deleted file mode 100644
index defc25fc95..0000000000
--- a/common/static/js/vendor/ova/annotator-full-firebase-auth.js
+++ /dev/null
@@ -1,22 +0,0 @@
-Annotator.Plugin.Auth.prototype.haveValidToken = function() {
- return (
- this._unsafeToken &&
- this._unsafeToken.d.issuedAt &&
- this._unsafeToken.d.ttl &&
- this._unsafeToken.d.consumerKey &&
- this.timeToExpiry() > 0
- );
-};
-
-Annotator.Plugin.Auth.prototype.timeToExpiry = function() {
- var expiry, issue, now, timeToExpiry;
- now = new Date().getTime() / 1000;
- issue = createDateFromISO8601(this._unsafeToken.d.issuedAt).getTime() / 1000;
- expiry = issue + this._unsafeToken.d.ttl;
- timeToExpiry = expiry - now;
- if (timeToExpiry > 0) {
- return timeToExpiry;
- } else {
- return 0;
- }
-};
\ No newline at end of file
diff --git a/lms/djangoapps/notes/views.py b/lms/djangoapps/notes/views.py
index 1e14fcaa25..b6670a7e09 100644
--- a/lms/djangoapps/notes/views.py
+++ b/lms/djangoapps/notes/views.py
@@ -4,7 +4,6 @@ from edxmako.shortcuts import render_to_response
from courseware.courses import get_course_with_access
from notes.models import Note
from notes.utils import notes_enabled_for_course
-from xmodule.annotator_token import retrieve_token
@login_required
@@ -23,8 +22,7 @@ def notes(request, course_id):
'course': course,
'notes': notes,
'student': student,
- 'storage': storage,
- 'token': retrieve_token(student.email, course.annotation_token_secret),
+ 'storage': storage
}
return render_to_response('notes.html', context)
diff --git a/lms/envs/common.py b/lms/envs/common.py
index 0c2b37b2d5..9340aecc54 100644
--- a/lms/envs/common.py
+++ b/lms/envs/common.py
@@ -828,7 +828,6 @@ main_vendor_js = [
'js/vendor/swfobject/swfobject.js',
'js/vendor/jquery.ba-bbq.min.js',
'js/vendor/ova/annotator-full.js',
- 'js/vendor/ova/annotator-full-firebase-auth.js',
'js/vendor/ova/video.dev.js',
'js/vendor/ova/vjs.youtube.js',
'js/vendor/ova/rangeslider.js',
diff --git a/lms/templates/notes.html b/lms/templates/notes.html
index e44a78b08e..d896725581 100644
--- a/lms/templates/notes.html
+++ b/lms/templates/notes.html
@@ -68,8 +68,10 @@
//Grab uri of the course
var parts = window.location.href.split("/"),
- uri = '';
+ uri = '',
+ courseid;
for (var index = 0; index <= 6; index += 1) uri += parts[index]+"/"; //Get the unit url
+ courseid = parts[4] + "/" + parts[5] + "/" + parts[6];
var pagination = 100,
is_staff = false,
options = {
@@ -128,7 +130,7 @@
},
},
auth: {
- token: "${token}"
+ tokenUrl: location.protocol+'//'+location.host+"/token?course_id="+courseid
},
store: {
// The endpoint of the store on your server.
diff --git a/lms/templates/textannotation.html b/lms/templates/textannotation.html
index f69cb7b68c..3532681051 100644
--- a/lms/templates/textannotation.html
+++ b/lms/templates/textannotation.html
@@ -1,63 +1,64 @@
<%! from django.utils.translation import ugettext as _ %>
-
- % if display_name is not UNDEFINED and display_name is not None:
-
${display_name}
- % endif
-
- % if instructions_html is not UNDEFINED and instructions_html is not None:
-
%endif
From 356723a634e7d1b5f30610d63de40dbf6f0ca3ab Mon Sep 17 00:00:00 2001
From: Alison Hodges
Date: Thu, 15 May 2014 09:23:05 -0400
Subject: [PATCH 05/12] Correcting "make html" errors
---
.../source/exercises_tools/problem_with_hint.rst | 2 +-
.../course_authors/source/exercises_tools/vitalsource.rst | 2 +-
.../source/releasing_course/beta_testing.rst | 8 ++++----
3 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/docs/en_us/course_authors/source/exercises_tools/problem_with_hint.rst b/docs/en_us/course_authors/source/exercises_tools/problem_with_hint.rst
index 102ecf2637..999adee40d 100644
--- a/docs/en_us/course_authors/source/exercises_tools/problem_with_hint.rst
+++ b/docs/en_us/course_authors/source/exercises_tools/problem_with_hint.rst
@@ -57,7 +57,7 @@ To create the above problem:
-.. _Drag and Drop Problem XML:
+.. _Problem with Adaptive Hint XML:
*********************************
Problem with Adaptive Hint XML
diff --git a/docs/en_us/course_authors/source/exercises_tools/vitalsource.rst b/docs/en_us/course_authors/source/exercises_tools/vitalsource.rst
index 5404b804ea..28dd2cebff 100644
--- a/docs/en_us/course_authors/source/exercises_tools/vitalsource.rst
+++ b/docs/en_us/course_authors/source/exercises_tools/vitalsource.rst
@@ -10,7 +10,7 @@ The VitalSource Bookshelf e-reader tool provides your students with easy access
:width: 500
:alt: VitalSource e-book with highlighted note
-For more information about Vital Source and its features, visit the `VitalSource Bookshelf support site `_.
+For more information about Vital Source and its features, visit the `VitalSource Bookshelf support site `_.
.. note:: Before you add a VitalSource Bookshelf e-reader to your course, you must work with Vital Source to make sure the content you need already exists in the Vital Source inventory. If the content is not yet available, Vital Source works with the publisher of the e-book to create an e-book that meets the VitalSource Bookshelf specifications. **This process can take up to four months.** The following steps assume that the e-book you want is already part of the Vital Source inventory.
diff --git a/docs/en_us/course_authors/source/releasing_course/beta_testing.rst b/docs/en_us/course_authors/source/releasing_course/beta_testing.rst
index a21470590d..2390c6180f 100644
--- a/docs/en_us/course_authors/source/releasing_course/beta_testing.rst
+++ b/docs/en_us/course_authors/source/releasing_course/beta_testing.rst
@@ -194,9 +194,9 @@ When you add beta testers, note the following.
.. _Add_Testers_Bulk:
---------------------------
+================================
Add Multiple Beta Testers
---------------------------
+================================
If you have a number of beta testers that you want to add, you can use the "batch
add" option to add them all at once, rather than individually. With this
@@ -229,9 +229,9 @@ testers**.
.. note:: The **Auto Enroll** option has no effect when you click **Remove beta testers**. The user's role as a beta tester is removed; course enrollment is not affected.
------------------------------
+================================
Add Beta Testers Individually
------------------------------
+================================
To add a single beta tester:
From 026e6e4a6d6c699e7c85a3d54fbd9c76d5a1c755 Mon Sep 17 00:00:00 2001
From: Jay Zoldak
Date: Thu, 15 May 2014 11:51:06 -0400
Subject: [PATCH 06/12] Disable failing cms acceptance tests
---
.../contentstore/features/subsection.feature | 40 ++++++++++---------
1 file changed, 21 insertions(+), 19 deletions(-)
diff --git a/cms/djangoapps/contentstore/features/subsection.feature b/cms/djangoapps/contentstore/features/subsection.feature
index 7f0d7b85e4..9c4d4cecdb 100644
--- a/cms/djangoapps/contentstore/features/subsection.feature
+++ b/cms/djangoapps/contentstore/features/subsection.feature
@@ -38,13 +38,14 @@ Feature: CMS.Create Subsection
Then I see the subsection release date is 12/25/2011 03:00
And I see the subsection due date is 01/02/2012 04:00
- Scenario: Set release and due dates of subsection on enter
- Given I have opened a new subsection in Studio
- And I set the subsection release date on enter to 04/04/2014 03:00
- And I set the subsection due date on enter to 04/04/2014 04:00
- And I reload the page
- Then I see the subsection release date is 04/04/2014 03:00
- And I see the subsection due date is 04/04/2014 04:00
+# Disabling due to failure on master. JZ 05/14/2014 TODO: fix
+# Scenario: Set release and due dates of subsection on enter
+# Given I have opened a new subsection in Studio
+# And I set the subsection release date on enter to 04/04/2014 03:00
+# And I set the subsection due date on enter to 04/04/2014 04:00
+# And I reload the page
+# Then I see the subsection release date is 04/04/2014 03:00
+# And I see the subsection due date is 04/04/2014 04:00
Scenario: Delete a subsection
Given I have opened a new course section in Studio
@@ -55,15 +56,16 @@ Feature: CMS.Create Subsection
And I confirm the prompt
Then the subsection does not exist
- Scenario: Sync to Section
- Given I have opened a new course section in Studio
- And I click the Edit link for the release date
- And I set the section release date to 01/02/2103
- And I have added a new subsection
- And I click on the subsection
- And I set the subsection release date to 01/20/2103
- And I reload the page
- And I click the link to sync release date to section
- And I wait for "1" second
- And I reload the page
- Then I see the subsection release date is 01/02/2103
+# Disabling due to failure on master. JZ 05/14/2014 TODO: fix
+# Scenario: Sync to Section
+# Given I have opened a new course section in Studio
+# And I click the Edit link for the release date
+# And I set the section release date to 01/02/2103
+# And I have added a new subsection
+# And I click on the subsection
+# And I set the subsection release date to 01/20/2103
+# And I reload the page
+# And I click the link to sync release date to section
+# And I wait for "1" second
+# And I reload the page
+# Then I see the subsection release date is 01/02/2103
From 529d2fa33faccc9f19146daad7e094a01b14e2c9 Mon Sep 17 00:00:00 2001
From: Mark Hoeber
Date: Tue, 13 May 2014 13:54:23 -0400
Subject: [PATCH 07/12] Release Notes file setup
DOC-400
---
.../en_us/release_notes/source/05-15-2014.rst | 110 ++++++++++++++++++
docs/en_us/release_notes/source/index.rst | 1 +
docs/en_us/release_notes/source/links.rst | 11 +-
3 files changed, 121 insertions(+), 1 deletion(-)
create mode 100644 docs/en_us/release_notes/source/05-15-2014.rst
diff --git a/docs/en_us/release_notes/source/05-15-2014.rst b/docs/en_us/release_notes/source/05-15-2014.rst
new file mode 100644
index 0000000000..6ce8b975e2
--- /dev/null
+++ b/docs/en_us/release_notes/source/05-15-2014.rst
@@ -0,0 +1,110 @@
+###################################
+May 15, 2014
+###################################
+
+The following information reflects what is new in the edX Platform as of May 15, 2014. See previous pages in this document for a history of changes.
+
+**************************
+edX Documentation
+**************************
+
+You can access the `edX Status`_ page to get an up-to-date status for all
+services on edx.org and edX Edge. The page also includes the Twitter feed for
+@edXstatus, which the edX Operations team uses to post updates.
+
+You can access the public `edX roadmap`_ for
+details about the currently planned product direction.
+
+The following documentation is available:
+
+* `Building and Running an edX Course`_
+
+ You can also download the guide as a PDF from the edX Studio user interface.
+
+ Recent changes include:
+
+ * Updated the `Running Your Course`_ chapter to remove references to the “new
+ beta” Instructor Dashboard.
+
+ * Updated `Enrollment`_ section to reflect that usernames or email
+ addresses can be used to batch enroll students.
+
+ * Updated `Grade and Answer Data`_ section to include new features in
+ the problem **Staff Debug** viewer for rescoring, resetting attempts, and
+ deleting state for a specified student.
+
+ * Updated `Staffing`_ section to explain the labeling differences
+ between Studio and the LMS with respect to course team roles.
+
+ * Updated `Assign Discussion Administration Roles`_ section to include a note
+ about course staff requiring explicit granting of discussion administration
+ roles.
+
+ * Added the `VitalSource E-Reader Tool`_ section.
+
+ * Updated `Add Files to a Course`_ section to include warnings about
+ file size.
+
+ * Updated the `LTI Component`_ section to reflect new settings.
+
+
+* `edX Data Documentation`_
+
+ Recent changes include:
+
+ Updated `Tracking Logs`_ section to include events for course
+ enrollment activities: ``edx.course.enrollment.activated`` and
+ ``edx.course.enrollment.deactivated``.
+
+
+* `edX Platform Developer Documentation`_
+
+ Recent changes include:
+
+ Added an `Analytics`_ section for developers.
+
+
+* `edX XBlock Documentation`_
+
+
+
+*************
+edX Studio
+*************
+
+* A problem that prevented you from hiding the Wiki in the list of Pages when
+ using Firefox is resolved. (STUD-1581)
+
+* A problem that prevented you from importing a course created on edx.org into
+ edX Edge is resolved. (STUD-1599)
+
+* All text in the Video component UI has been updated for clarity. (DOC-206)
+
+***************************************
+edX Learning Management System
+***************************************
+
+* The Instructor Dashboard that appears to course teams by default in the
+ LMS has changed. The Instructor Dashboard that appears when you click
+ **Instructor** is now the "New Beta" dashboard. The "Standard" dashboard
+ remains available; a button click is required to access it. The two dashboard
+ versions are also relabeled in this release. The version that was previously
+ identified as the "New Beta Dashboard" is now labeled "Instructor Dashboard",
+ and the version previously identified as the "Standard Dashboard" is now
+ labeled "Legacy Dashboard". (LMS-1296)
+
+
+* Previously, when a student clicked **Run Code** for a MatLab problem, the
+ entire page was reloaded. This issue has been resolved so that now only the
+ MatLab problem elements are reloaded. (LMS-2505)
+
+
+****************
+edX Analytics
+****************
+
+* There is a new event tracking API for instrumenting events to capture user
+ actions and other point-in-time activities in Studio and the edX LMS. See
+ `Analytics`_ for more information.
+
+.. include:: links.rst
\ No newline at end of file
diff --git a/docs/en_us/release_notes/source/index.rst b/docs/en_us/release_notes/source/index.rst
index 471b78945f..f00cde5a41 100755
--- a/docs/en_us/release_notes/source/index.rst
+++ b/docs/en_us/release_notes/source/index.rst
@@ -19,6 +19,7 @@ There is a page in this document for each update to the edX system on `edx.org`_
:maxdepth: 1
read_me
+ 05-15-2014
05-12-2014
04-29-2014
04-23-2014
diff --git a/docs/en_us/release_notes/source/links.rst b/docs/en_us/release_notes/source/links.rst
index 67ceb17941..82ab96e16c 100644
--- a/docs/en_us/release_notes/source/links.rst
+++ b/docs/en_us/release_notes/source/links.rst
@@ -150,6 +150,13 @@
.. _Drag and Drop Problem: http://ca.readthedocs.org/en/latest/exercises_tools/drag_and_drop.html
+
+.. _Assign Discussion Administration Roles: http://edx.readthedocs.org/projects/ca/en/latest/running_course/discussions.html#assigning-discussion-roles
+
+.. _LTI Component: http://edx.readthedocs.org/projects/ca/en/latest/exercises_tools/lti_component.html
+
+.. _VitalSource E-Reader Tool: http://edx.readthedocs.org/projects/ca/en/latest/exercises_tools/vitalsource.html
+
.. DATA DOCUMENTATION
.. _Student Info and Progress Data: http://edx.readthedocs.org/projects/devdata/en/latest/internal_data_formats/sql_schema.html#student-info
@@ -172,4 +179,6 @@
.. _Contributing to Open edX: http://edx.readthedocs.org/projects/userdocs/en/latest/process/index.html
-.. _edX XBlock Documentation: http://edx.readthedocs.org/projects/xblock/en/latest/
\ No newline at end of file
+.. _edX XBlock Documentation: http://edx.readthedocs.org/projects/xblock/en/latest/
+
+.. _Analytics: http://edx.readthedocs.org/projects/userdocs/en/latest/analytics.html
\ No newline at end of file
From 9bc7a518ee22b5bbf11ff239d771ac6739f245ec Mon Sep 17 00:00:00 2001
From: David Adams
Date: Fri, 18 Apr 2014 12:16:10 -0700
Subject: [PATCH 08/12] Fixes issue with metrics tab click handlers Click
handlers were not getting attached to DOM elements in some cases on slow
running machines. Added logic to attach handlers when elements are ready.
Added 2 buttons on metrics tab:
Download Subsection Data for downloading to csv.
Download Problem Data for downloading to csv.
---
.../class_dashboard/dashboard_data.py | 137 +++++++---
.../tests/test_dashboard_data.py | 60 +++-
lms/djangoapps/class_dashboard/urls.py | 30 ++
.../instructor/views/instructor_dashboard.py | 2 +
.../sass/course/instructor/_instructor_2.scss | 22 +-
.../class_dashboard/all_section_metrics.js | 21 +-
.../class_dashboard/d3_stacked_bar_graph.js | 14 +-
.../courseware/instructor_dashboard.html | 4 +-
.../instructor_dashboard_2/metrics.html | 257 ++++++++++++------
lms/urls.py | 18 +-
10 files changed, 411 insertions(+), 154 deletions(-)
create mode 100644 lms/djangoapps/class_dashboard/urls.py
diff --git a/lms/djangoapps/class_dashboard/dashboard_data.py b/lms/djangoapps/class_dashboard/dashboard_data.py
index 209d647faf..aa7eb206ba 100644
--- a/lms/djangoapps/class_dashboard/dashboard_data.py
+++ b/lms/djangoapps/class_dashboard/dashboard_data.py
@@ -2,6 +2,7 @@
Computes the data to display on the Instructor Dashboard
"""
from util.json_request import JsonResponse
+import json
from courseware import models
from django.db.models import Count
@@ -21,9 +22,12 @@ def get_problem_grade_distribution(course_id):
`course_id` the course ID for the course interested in
- Output is a dict, where the key is the problem 'module_id' and the value is a dict with:
+ Output is 2 dicts:
+ 'prob-grade_distrib' where the key is the problem 'module_id' and the value is a dict with:
'max_grade' - max grade for this problem
'grade_distrib' - array of tuples (`grade`,`count`).
+ 'total_student_count' where the key is problem 'module_id' and the value is number of students
+ attempting the problem
"""
# Aggregate query on studentmodule table for grade data for all problems in course
@@ -34,6 +38,7 @@ def get_problem_grade_distribution(course_id):
).values('module_state_key', 'grade', 'max_grade').annotate(count_grade=Count('grade'))
prob_grade_distrib = {}
+ total_student_count = {}
# Loop through resultset building data for each problem
for row in db_query:
@@ -53,7 +58,10 @@ def get_problem_grade_distribution(course_id):
'grade_distrib': [(row['grade'], row['count_grade'])]
}
- return prob_grade_distrib
+ # Build set of total students attempting each problem
+ total_student_count[curr_problem] = total_student_count.get(curr_problem, 0) + row['count_grade']
+
+ return prob_grade_distrib, total_student_count
def get_sequential_open_distrib(course_id):
@@ -136,7 +144,7 @@ def get_d3_problem_grade_distrib(course_id):
'data' - data for the d3_stacked_bar_graph function of the grade distribution for that problem
"""
- prob_grade_distrib = get_problem_grade_distribution(course_id)
+ prob_grade_distrib, total_student_count = get_problem_grade_distribution(course_id)
d3_data = []
# Retrieve course object down to problems
@@ -178,19 +186,24 @@ def get_d3_problem_grade_distrib(course_id):
for (grade, count_grade) in problem_info['grade_distrib']:
percent = 0.0
if max_grade > 0:
- percent = (grade * 100.0) / max_grade
+ percent = round((grade * 100.0) / max_grade, 1)
- # Construct tooltip for problem in grade distibution view
- tooltip = _("{label} {problem_name} - {count_grade} {students} ({percent:.0f}%: {grade:.0f}/{max_grade:.0f} {questions})").format(
- label=label,
- problem_name=problem_name,
- count_grade=count_grade,
- students=_("students"),
- percent=percent,
- grade=grade,
- max_grade=max_grade,
- questions=_("questions"),
- )
+ # Compute percent of students with this grade
+ student_count_percent = 0
+ if total_student_count.get(child.location.url(), 0) > 0:
+ student_count_percent = count_grade * 100 / total_student_count[child.location.url()]
+
+ # Tooltip parameters for problem in grade distribution view
+ tooltip = {
+ 'type': 'problem',
+ 'label': label,
+ 'problem_name': problem_name,
+ 'count_grade': count_grade,
+ 'percent': percent,
+ 'grade': grade,
+ 'max_grade': max_grade,
+ 'student_count_percent': student_count_percent,
+ }
# Construct data to be sent to d3
stack_data.append({
@@ -246,11 +259,14 @@ def get_d3_sequential_open_distrib(course_id):
num_students = sequential_open_distrib[subsection.location.url()]
stack_data = []
- tooltip = _("{num_students} student(s) opened Subsection {subsection_num}: {subsection_name}").format(
- num_students=num_students,
- subsection_num=c_subsection,
- subsection_name=subsection_name,
- )
+
+ # Tooltip parameters for subsection in open_distribution view
+ tooltip = {
+ 'type': 'subsection',
+ 'num_students': num_students,
+ 'subsection_num': c_subsection,
+ 'subsection_name': subsection_name
+ }
stack_data.append({
'color': 0,
@@ -329,19 +345,18 @@ def get_d3_section_grade_distrib(course_id, section):
for (grade, count_grade) in grade_distrib[problem]['grade_distrib']:
percent = 0.0
if max_grade > 0:
- percent = (grade * 100.0) / max_grade
+ percent = round((grade * 100.0) / max_grade, 1)
# Construct tooltip for problem in grade distibution view
- tooltip = _("{problem_info_x} {problem_info_n} - {count_grade} {students} ({percent:.0f}%: {grade:.0f}/{max_grade:.0f} {questions})").format(
- problem_info_x=problem_info[problem]['x_value'],
- count_grade=count_grade,
- students=_("students"),
- percent=percent,
- problem_info_n=problem_info[problem]['display_name'],
- grade=grade,
- max_grade=max_grade,
- questions=_("questions"),
- )
+ tooltip = {
+ 'type': 'problem',
+ 'problem_info_x': problem_info[problem]['x_value'],
+ 'count_grade': count_grade,
+ 'percent': percent,
+ 'problem_info_n': problem_info[problem]['display_name'],
+ 'grade': grade,
+ 'max_grade': max_grade,
+ }
stack_data.append({
'color': percent,
@@ -415,6 +430,7 @@ def get_students_opened_subsection(request, csv=False):
If 'csv' is True, returns a header array, and an array of arrays in the format:
student names, usernames for CSV download.
"""
+
module_id = request.GET.get('module_id')
csv = request.GET.get('csv')
@@ -447,9 +463,11 @@ def get_students_opened_subsection(request, csv=False):
return JsonResponse(response_payload)
else:
tooltip = request.GET.get('tooltip')
- filename = sanitize_filename(tooltip[tooltip.index('S'):])
- header = ['Name', 'Username']
+ # Subsection name is everything after 3rd space in tooltip
+ filename = sanitize_filename(' '.join(tooltip.split(' ')[3:]))
+
+ header = [_("Name").encode('utf-8'), _("Username").encode('utf-8')]
for student in students:
results.append([student['student__profile__name'], student['student__username']])
@@ -507,7 +525,7 @@ def get_students_problem_grades(request, csv=False):
tooltip = request.GET.get('tooltip')
filename = sanitize_filename(tooltip[:tooltip.rfind(' - ')])
- header = ['Name', 'Username', 'Grade', 'Percent']
+ header = [_("Name").encode('utf-8'), _("Username").encode('utf-8'), _("Grade").encode('utf-8'), _("Percent").encode('utf-8')]
for student in students:
percent = 0
@@ -519,11 +537,60 @@ def get_students_problem_grades(request, csv=False):
return response
+def post_metrics_data_csv(request):
+ """
+ Generate a list of opened subsections or problems for the entire course for CSV download.
+ Returns a header array, and an array of arrays in the format:
+ section, subsection, count of students for subsections
+ or section, problem, name, count of students, percent of students, score for problems.
+ """
+
+ data = json.loads(request.POST['data'])
+ sections = json.loads(data['sections'])
+ tooltips = json.loads(data['tooltips'])
+ course_id = data['course_id']
+ data_type = data['data_type']
+
+ results = []
+ if data_type == 'subsection':
+ header = [_("Section").encode('utf-8'), _("Subsection").encode('utf-8'), _("Opened by this number of students").encode('utf-8')]
+ filename = sanitize_filename(_('subsections') + '_' + course_id)
+ elif data_type == 'problem':
+ header = [_("Section").encode('utf-8'), _("Problem").encode('utf-8'), _("Name").encode('utf-8'), _("Count of Students").encode('utf-8'), _("% of Students").encode('utf-8'), _("Score").encode('utf-8')]
+ filename = sanitize_filename(_('problems') + '_' + course_id)
+
+ for index, section in enumerate(sections):
+ results.append([section])
+
+ # tooltips array is array of dicts for subsections and
+ # array of array of dicts for problems.
+ if data_type == 'subsection':
+ for tooltip_dict in tooltips[index]:
+ num_students = tooltip_dict['num_students']
+ subsection = tooltip_dict['subsection_name']
+ # Append to results offsetting 1 column to the right.
+ results.append(['', subsection, num_students])
+
+ elif data_type == 'problem':
+ for tooltip in tooltips[index]:
+ for tooltip_dict in tooltip:
+ label = tooltip_dict['label']
+ problem_name = tooltip_dict['problem_name']
+ count_grade = tooltip_dict['count_grade']
+ student_count_percent = tooltip_dict['student_count_percent']
+ percent = tooltip_dict['percent']
+ # Append to results offsetting 1 column to the right.
+ results.append(['', label, problem_name, count_grade, student_count_percent, percent])
+
+ response = create_csv_response(filename, header, results)
+ return response
+
+
def sanitize_filename(filename):
"""
Utility function
"""
filename = filename.replace(" ", "_")
- filename = filename.encode('ascii')
+ filename = filename.encode('utf-8')
filename = filename[0:25] + '.csv'
return filename
diff --git a/lms/djangoapps/class_dashboard/tests/test_dashboard_data.py b/lms/djangoapps/class_dashboard/tests/test_dashboard_data.py
index a011ee6dce..5d20a8fa3c 100644
--- a/lms/djangoapps/class_dashboard/tests/test_dashboard_data.py
+++ b/lms/djangoapps/class_dashboard/tests/test_dashboard_data.py
@@ -95,12 +95,15 @@ class TestGetProblemGradeDistribution(ModuleStoreTestCase):
def test_get_problem_grade_distribution(self):
- prob_grade_distrib = get_problem_grade_distribution(self.course.id)
+ prob_grade_distrib, total_student_count = get_problem_grade_distribution(self.course.id)
for problem in prob_grade_distrib:
max_grade = prob_grade_distrib[problem]['max_grade']
self.assertEquals(1, max_grade)
+ for val in total_student_count.values():
+ self.assertEquals(USER_COUNT, val)
+
def test_get_sequential_open_distibution(self):
sequential_open_distrib = get_sequential_open_distrib(self.course.id)
@@ -243,6 +246,61 @@ class TestGetProblemGradeDistribution(ModuleStoreTestCase):
# Check response contains 1 line for each user +1 for the header
self.assertEquals(USER_COUNT + 1, len(response.content.splitlines()))
+ def test_post_metrics_data_subsections_csv(self):
+
+ url = reverse('post_metrics_data_csv')
+
+ sections = json.dumps(["Introduction"])
+ tooltips = json.dumps([[{"subsection_name": "Pre-Course Survey", "subsection_num": 1, "type": "subsection", "num_students": 18963}]])
+ course_id = self.course.id
+ data_type = 'subsection'
+
+ data = json.dumps({'sections': sections,
+ 'tooltips': tooltips,
+ 'course_id': course_id,
+ 'data_type': data_type,
+ })
+
+ response = self.client.post(url, {'data': data})
+ # Check response contains 1 line for header, 1 line for Section and 1 line for Subsection
+ self.assertEquals(3, len(response.content.splitlines()))
+
+ def test_post_metrics_data_problems_csv(self):
+
+ url = reverse('post_metrics_data_csv')
+
+ sections = json.dumps(["Introduction"])
+ tooltips = json.dumps([[[
+ {'student_count_percent': 0,
+ 'problem_name': 'Q1',
+ 'grade': 0,
+ 'percent': 0,
+ 'label': 'P1.2.1',
+ 'max_grade': 1,
+ 'count_grade': 26,
+ 'type': u'problem'},
+ {'student_count_percent': 99,
+ 'problem_name': 'Q1',
+ 'grade': 1,
+ 'percent': 100,
+ 'label': 'P1.2.1',
+ 'max_grade': 1,
+ 'count_grade': 4763,
+ 'type': 'problem'},
+ ]]])
+ course_id = self.course.id
+ data_type = 'problem'
+
+ data = json.dumps({'sections': sections,
+ 'tooltips': tooltips,
+ 'course_id': course_id,
+ 'data_type': data_type,
+ })
+
+ response = self.client.post(url, {'data': data})
+ # Check response contains 1 line for header, 1 line for Sections and 2 lines for problems
+ self.assertEquals(4, len(response.content.splitlines()))
+
def test_get_section_display_name(self):
section_display_name = get_section_display_name(self.course.id)
diff --git a/lms/djangoapps/class_dashboard/urls.py b/lms/djangoapps/class_dashboard/urls.py
new file mode 100644
index 0000000000..24198260e3
--- /dev/null
+++ b/lms/djangoapps/class_dashboard/urls.py
@@ -0,0 +1,30 @@
+"""
+Class Dashboard API endpoint urls.
+"""
+
+from django.conf.urls import patterns, url
+
+urlpatterns = patterns('', # nopep8
+ # Json request data for metrics for entire course
+ url(r'^(?P[^/]+/[^/]+/[^/]+)/all_sequential_open_distrib$',
+ 'class_dashboard.views.all_sequential_open_distrib', name="all_sequential_open_distrib"),
+
+ url(r'^(?P[^/]+/[^/]+/[^/]+)/all_problem_grade_distribution$',
+ 'class_dashboard.views.all_problem_grade_distribution', name="all_problem_grade_distribution"),
+
+ # Json request data for metrics for particular section
+ url(r'^(?P[^/]+/[^/]+/[^/]+)/problem_grade_distribution/(?P\d+)$',
+ 'class_dashboard.views.section_problem_grade_distrib', name="section_problem_grade_distrib"),
+
+ # For listing students that opened a sub-section
+ url(r'^get_students_opened_subsection$',
+ 'class_dashboard.dashboard_data.get_students_opened_subsection', name="get_students_opened_subsection"),
+
+ # For listing of students' grade per problem
+ url(r'^get_students_problem_grades$',
+ 'class_dashboard.dashboard_data.get_students_problem_grades', name="get_students_problem_grades"),
+
+ # For generating metrics data as a csv
+ url(r'^post_metrics_data_csv_url',
+ 'class_dashboard.dashboard_data.post_metrics_data_csv', name="post_metrics_data_csv"),
+)
diff --git a/lms/djangoapps/instructor/views/instructor_dashboard.py b/lms/djangoapps/instructor/views/instructor_dashboard.py
index a7c7bee453..9b0eeab55b 100644
--- a/lms/djangoapps/instructor/views/instructor_dashboard.py
+++ b/lms/djangoapps/instructor/views/instructor_dashboard.py
@@ -250,10 +250,12 @@ def _section_metrics(course_id, access):
'section_key': 'metrics',
'section_display_name': ('Metrics'),
'access': access,
+ 'course_id': course_id,
'sub_section_display_name': get_section_display_name(course_id),
'section_has_problem': get_array_section_has_problem(course_id),
'get_students_opened_subsection_url': reverse('get_students_opened_subsection'),
'get_students_problem_grades_url': reverse('get_students_problem_grades'),
+ 'post_metrics_data_csv_url': reverse('post_metrics_data_csv'),
}
return section_data
diff --git a/lms/static/sass/course/instructor/_instructor_2.scss b/lms/static/sass/course/instructor/_instructor_2.scss
index 35a984c6e8..96959814f0 100644
--- a/lms/static/sass/course/instructor/_instructor_2.scss
+++ b/lms/static/sass/course/instructor/_instructor_2.scss
@@ -591,17 +591,16 @@ section.instructor-dashboard-content-2 {
.instructor-dashboard-wrapper-2 section.idash-section#metrics {
- .metrics-container {
+ .metrics-container, .metrics-header-container {
position: relative;
width: 100%;
float: left;
clear: both;
margin-top: 25px;
-
- .metrics-left {
+
+ .metrics-left, .metrics-left-header {
position: relative;
width: 30%;
- height: 640px;
float: left;
margin-right: 2.5%;
@@ -609,10 +608,13 @@ section.instructor-dashboard-content-2 {
width: 100%;
}
}
- .metrics-right {
+ .metrics-section.metrics-left {
+ height: 640px;
+ }
+
+ .metrics-right, .metrics-right-header {
position: relative;
width: 65%;
- height: 295px;
float: left;
margin-left: 2.5%;
margin-bottom: 25px;
@@ -622,6 +624,10 @@ section.instructor-dashboard-content-2 {
}
}
+ .metrics-section.metrics-right {
+ height: 295px;
+ }
+
svg {
.stacked-bar {
cursor: pointer;
@@ -718,10 +724,6 @@ section.instructor-dashboard-content-2 {
border-radius: 5px;
margin-top: 25px;
}
-
- input#graph_reload {
- display: none;
- }
}
}
diff --git a/lms/templates/class_dashboard/all_section_metrics.js b/lms/templates/class_dashboard/all_section_metrics.js
index fc417255c7..86651eaaf4 100644
--- a/lms/templates/class_dashboard/all_section_metrics.js
+++ b/lms/templates/class_dashboard/all_section_metrics.js
@@ -1,4 +1,4 @@
-<%page args="id_opened_prefix, id_grade_prefix, id_attempt_prefix, id_tooltip_prefix, course_id, **kwargs"/>
+<%page args="id_opened_prefix, id_grade_prefix, id_attempt_prefix, id_tooltip_prefix, course_id, allSubsectionTooltipArr, allProblemTooltipArr, **kwargs"/>
<%!
import json
from django.core.urlresolvers import reverse
@@ -30,6 +30,13 @@ $(function () {
margin: {left:0},
};
+ // Construct array of tooltips for all sections for the "Download Subsection Data" button.
+ var sectionTooltipArr = new Array();
+ paramOpened.data.forEach( function(element, index, array) {
+ sectionTooltipArr[index] = element.stackData[0].tooltip;
+ });
+ allSubsectionTooltipArr[i] = sectionTooltipArr;
+
barGraphOpened = edx_d3CreateStackedBarGraph(paramOpened, d3.select(curr_id).append("svg"),
d3.select("#${id_tooltip_prefix}"+i));
barGraphOpened.scale.stackColor.range(["#555555","#555555"]);
@@ -68,6 +75,17 @@ $(function () {
bVerticalXAxisLabel : true,
};
+ // Construct array of tooltips for all sections for the "Download Problem Data" button.
+ var sectionTooltipArr = new Array();
+ paramGrade.data.forEach( function(element, index, array) {
+ var stackDataArr = new Array();
+ for (var j = 0; j < element.stackData.length; j++) {
+ stackDataArr[j] = element.stackData[j].tooltip
+ }
+ sectionTooltipArr[index] = stackDataArr;
+ });
+ allProblemTooltipArr[i] = sectionTooltipArr;
+
barGraphGrade = edx_d3CreateStackedBarGraph(paramGrade, d3.select(curr_id).append("svg"),
d3.select("#${id_tooltip_prefix}"+i));
barGraphGrade.scale.stackColor.domain([0,50,100]).range(["#e13f29","#cccccc","#17a74d"]);
@@ -83,6 +101,7 @@ $(function () {
i+=1;
}
+
});
});
\ No newline at end of file
diff --git a/lms/templates/class_dashboard/d3_stacked_bar_graph.js b/lms/templates/class_dashboard/d3_stacked_bar_graph.js
index 8552b3f48e..fd1bdb0f33 100644
--- a/lms/templates/class_dashboard/d3_stacked_bar_graph.js
+++ b/lms/templates/class_dashboard/d3_stacked_bar_graph.js
@@ -349,8 +349,20 @@ edx_d3CreateStackedBarGraph = function(parameters, svg, divTooltip) {
var top = pos[1]-10;
var width = $('#'+graph.divTooltip.attr("id")).width();
+ // Construct the tooltip
+ if (d.tooltip['type'] == 'subsection') {
+ tooltip_str = d.tooltip['num_students'] + ' ' + gettext('student(s) opened Subsection') + ' ' \
+ + d.tooltip['subsection_num'] + ': ' + d.tooltip['subsection_name']
+ }else if (d.tooltip['type'] == 'problem') {
+ tooltip_str = d.tooltip['label'] + ' ' + d.tooltip['problem_name'] + ' - ' \
+ + d.tooltip['count_grade'] + ' ' + gettext('students') + ' (' \
+ + d.tooltip['student_count_percent'] + '%) (' + \
+ + d.tooltip['percent'] + '%: ' + \
+ + d.tooltip['grade'] +'/' + d.tooltip['max_grade'] + ' '
+ + gettext('questions') + ')'
+ }
graph.divTooltip.style("visibility", "visible")
- .text(d.tooltip);
+ .text(tooltip_str);
if ((left+width+30) > $("#"+graph.divTooltip.node().parentNode.id).width())
left -= (width+30);
diff --git a/lms/templates/courseware/instructor_dashboard.html b/lms/templates/courseware/instructor_dashboard.html
index 00f12011e8..9bf61b34f1 100644
--- a/lms/templates/courseware/instructor_dashboard.html
+++ b/lms/templates/courseware/instructor_dashboard.html
@@ -725,7 +725,9 @@ function goto( mode)
%endfor
%endif
diff --git a/lms/templates/instructor/instructor_dashboard_2/metrics.html b/lms/templates/instructor/instructor_dashboard_2/metrics.html
index fbd07e5a19..535750ba15 100644
--- a/lms/templates/instructor/instructor_dashboard_2/metrics.html
+++ b/lms/templates/instructor/instructor_dashboard_2/metrics.html
@@ -1,4 +1,4 @@
-<%! from django.utils.translation import ugettext as _ %>
+ <%! from django.utils.translation import ugettext as _ %>
<%page args="section_data"/>
@@ -11,19 +11,35 @@
%else:
<%namespace name="d3_stacked_bar_graph" file="/class_dashboard/d3_stacked_bar_graph.js"/>
<%namespace name="all_section_metrics" file="/class_dashboard/all_section_metrics.js"/>
-
-
${_("Loading the latest graphs for you; depending on your class size, this may take a few minutes.")}
-
+
+
${_("Use Reload Graphs to refresh the graphs.")}
+
+
+
+
+
${_("Subsection Data")}
+
${_("Each bar shows the number of students that opened the subsection.")}
+
${_("You can click on any of the bars to list the students that opened the subsection.")}
+
${_("You can also download this data as a CSV file.")}
+
+
+
+
${_("Grade Distribution Data")}
+
${_("Each bar shows the grade distribution for that problem.")}
+
${_("You can click on any of the bars to list the students that attempted the problem, along with the grades they received.")}
+
${_("You can also download this data as a CSV file.")}
+
+
+
%for i in range(0, len(section_data['sub_section_display_name'])):