+ % elif state == 'incomplete':
+
+ % endif
+ % if hidden:
+
+ % endif
+
+
+
+
+ % if state == 'unsubmitted':
+ unanswered
+ % elif state == 'correct':
+ correct
+ % elif state == 'incorrect':
+ incorrect
+ % elif state == 'incomplete':
+ incomplete
+ % endif
+
+
+
+
+ % if msg:
+
${msg|n}
+ % endif
+% if state in ['unsubmitted', 'correct', 'incorrect', 'incomplete'] or hidden:
+
+% endif
+
diff --git a/common/lib/capa/capa/templates/textbox.html b/common/lib/capa/capa/templates/textbox.html
index 91aa6d41c8..6e44712d9f 100644
--- a/common/lib/capa/capa/templates/textbox.html
+++ b/common/lib/capa/capa/templates/textbox.html
@@ -5,8 +5,6 @@
% endif
>${value|h}
-
-
% if state == 'unsubmitted':
Unanswered
@@ -26,6 +24,8 @@
${state}
+
+
${msg|n}
diff --git a/common/lib/capa/capa/tests/__init__.py b/common/lib/capa/capa/tests/__init__.py
new file mode 100644
index 0000000000..c72d2a1538
--- /dev/null
+++ b/common/lib/capa/capa/tests/__init__.py
@@ -0,0 +1,21 @@
+import fs
+import fs.osfs
+import os
+
+from mock import Mock
+
+TEST_DIR = os.path.dirname(os.path.realpath(__file__))
+
+test_system = Mock(
+ ajax_url='courses/course_id/modx/a_location',
+ track_function=Mock(),
+ get_module=Mock(),
+ render_template=Mock(),
+ replace_urls=Mock(),
+ user=Mock(),
+ filestore=fs.osfs.OSFS(os.path.join(TEST_DIR, "test_files")),
+ debug=True,
+ xqueue={'interface':None, 'callback_url':'/', 'default_queuename': 'testqueue', 'waittime': 10},
+ node_path=os.environ.get("NODE_PATH", "/usr/local/lib/node_modules"),
+ anonymous_student_id = 'student'
+)
diff --git a/common/lib/xmodule/xmodule/tests/test_files/choiceresponse_checkbox.xml b/common/lib/capa/capa/tests/test_files/choiceresponse_checkbox.xml
similarity index 100%
rename from common/lib/xmodule/xmodule/tests/test_files/choiceresponse_checkbox.xml
rename to common/lib/capa/capa/tests/test_files/choiceresponse_checkbox.xml
diff --git a/common/lib/xmodule/xmodule/tests/test_files/choiceresponse_radio.xml b/common/lib/capa/capa/tests/test_files/choiceresponse_radio.xml
similarity index 100%
rename from common/lib/xmodule/xmodule/tests/test_files/choiceresponse_radio.xml
rename to common/lib/capa/capa/tests/test_files/choiceresponse_radio.xml
diff --git a/common/lib/xmodule/xmodule/tests/test_files/coderesponse.xml b/common/lib/capa/capa/tests/test_files/coderesponse.xml
similarity index 100%
rename from common/lib/xmodule/xmodule/tests/test_files/coderesponse.xml
rename to common/lib/capa/capa/tests/test_files/coderesponse.xml
diff --git a/common/lib/xmodule/xmodule/tests/test_files/coderesponse_externalresponseformat.xml b/common/lib/capa/capa/tests/test_files/coderesponse_externalresponseformat.xml
similarity index 100%
rename from common/lib/xmodule/xmodule/tests/test_files/coderesponse_externalresponseformat.xml
rename to common/lib/capa/capa/tests/test_files/coderesponse_externalresponseformat.xml
diff --git a/common/lib/xmodule/xmodule/tests/test_files/formularesponse_with_hint.xml b/common/lib/capa/capa/tests/test_files/formularesponse_with_hint.xml
similarity index 100%
rename from common/lib/xmodule/xmodule/tests/test_files/formularesponse_with_hint.xml
rename to common/lib/capa/capa/tests/test_files/formularesponse_with_hint.xml
diff --git a/common/lib/xmodule/xmodule/tests/test_files/imageresponse.xml b/common/lib/capa/capa/tests/test_files/imageresponse.xml
similarity index 100%
rename from common/lib/xmodule/xmodule/tests/test_files/imageresponse.xml
rename to common/lib/capa/capa/tests/test_files/imageresponse.xml
diff --git a/common/lib/xmodule/xmodule/tests/test_files/javascriptresponse.xml b/common/lib/capa/capa/tests/test_files/javascriptresponse.xml
similarity index 100%
rename from common/lib/xmodule/xmodule/tests/test_files/javascriptresponse.xml
rename to common/lib/capa/capa/tests/test_files/javascriptresponse.xml
diff --git a/common/lib/xmodule/xmodule/tests/test_files/js/compiled/c9a9cd4242d84c924fe5f8324e9ae79d.js b/common/lib/capa/capa/tests/test_files/js/compiled/c9a9cd4242d84c924fe5f8324e9ae79d.js
similarity index 100%
rename from common/lib/xmodule/xmodule/tests/test_files/js/compiled/c9a9cd4242d84c924fe5f8324e9ae79d.js
rename to common/lib/capa/capa/tests/test_files/js/compiled/c9a9cd4242d84c924fe5f8324e9ae79d.js
diff --git a/common/lib/xmodule/xmodule/tests/test_files/js/compiled/javascriptresponse.js b/common/lib/capa/capa/tests/test_files/js/compiled/javascriptresponse.js
similarity index 100%
rename from common/lib/xmodule/xmodule/tests/test_files/js/compiled/javascriptresponse.js
rename to common/lib/capa/capa/tests/test_files/js/compiled/javascriptresponse.js
diff --git a/common/lib/xmodule/xmodule/tests/test_files/js/mersenne-twister-min.js b/common/lib/capa/capa/tests/test_files/js/mersenne-twister-min.js
similarity index 100%
rename from common/lib/xmodule/xmodule/tests/test_files/js/mersenne-twister-min.js
rename to common/lib/capa/capa/tests/test_files/js/mersenne-twister-min.js
diff --git a/common/lib/xmodule/xmodule/tests/test_files/js/test_problem_display.coffee b/common/lib/capa/capa/tests/test_files/js/test_problem_display.coffee
similarity index 100%
rename from common/lib/xmodule/xmodule/tests/test_files/js/test_problem_display.coffee
rename to common/lib/capa/capa/tests/test_files/js/test_problem_display.coffee
diff --git a/common/lib/xmodule/xmodule/tests/test_files/js/test_problem_display.js b/common/lib/capa/capa/tests/test_files/js/test_problem_display.js
similarity index 100%
rename from common/lib/xmodule/xmodule/tests/test_files/js/test_problem_display.js
rename to common/lib/capa/capa/tests/test_files/js/test_problem_display.js
diff --git a/common/lib/xmodule/xmodule/tests/test_files/js/test_problem_generator.coffee b/common/lib/capa/capa/tests/test_files/js/test_problem_generator.coffee
similarity index 100%
rename from common/lib/xmodule/xmodule/tests/test_files/js/test_problem_generator.coffee
rename to common/lib/capa/capa/tests/test_files/js/test_problem_generator.coffee
diff --git a/common/lib/xmodule/xmodule/tests/test_files/js/test_problem_generator.js b/common/lib/capa/capa/tests/test_files/js/test_problem_generator.js
similarity index 100%
rename from common/lib/xmodule/xmodule/tests/test_files/js/test_problem_generator.js
rename to common/lib/capa/capa/tests/test_files/js/test_problem_generator.js
diff --git a/common/lib/xmodule/xmodule/tests/test_files/js/test_problem_grader.coffee b/common/lib/capa/capa/tests/test_files/js/test_problem_grader.coffee
similarity index 100%
rename from common/lib/xmodule/xmodule/tests/test_files/js/test_problem_grader.coffee
rename to common/lib/capa/capa/tests/test_files/js/test_problem_grader.coffee
diff --git a/common/lib/xmodule/xmodule/tests/test_files/js/test_problem_grader.js b/common/lib/capa/capa/tests/test_files/js/test_problem_grader.js
similarity index 100%
rename from common/lib/xmodule/xmodule/tests/test_files/js/test_problem_grader.js
rename to common/lib/capa/capa/tests/test_files/js/test_problem_grader.js
diff --git a/common/lib/xmodule/xmodule/tests/test_files/js/xproblem.coffee b/common/lib/capa/capa/tests/test_files/js/xproblem.coffee
similarity index 100%
rename from common/lib/xmodule/xmodule/tests/test_files/js/xproblem.coffee
rename to common/lib/capa/capa/tests/test_files/js/xproblem.coffee
diff --git a/common/lib/xmodule/xmodule/tests/test_files/js/xproblem.js b/common/lib/capa/capa/tests/test_files/js/xproblem.js
similarity index 100%
rename from common/lib/xmodule/xmodule/tests/test_files/js/xproblem.js
rename to common/lib/capa/capa/tests/test_files/js/xproblem.js
diff --git a/common/lib/xmodule/xmodule/tests/test_files/multi_bare.xml b/common/lib/capa/capa/tests/test_files/multi_bare.xml
similarity index 100%
rename from common/lib/xmodule/xmodule/tests/test_files/multi_bare.xml
rename to common/lib/capa/capa/tests/test_files/multi_bare.xml
diff --git a/common/lib/xmodule/xmodule/tests/test_files/multichoice.xml b/common/lib/capa/capa/tests/test_files/multichoice.xml
similarity index 100%
rename from common/lib/xmodule/xmodule/tests/test_files/multichoice.xml
rename to common/lib/capa/capa/tests/test_files/multichoice.xml
diff --git a/common/lib/xmodule/xmodule/tests/test_files/optionresponse.xml b/common/lib/capa/capa/tests/test_files/optionresponse.xml
similarity index 100%
rename from common/lib/xmodule/xmodule/tests/test_files/optionresponse.xml
rename to common/lib/capa/capa/tests/test_files/optionresponse.xml
diff --git a/common/lib/xmodule/xmodule/tests/test_files/stringresponse_with_hint.xml b/common/lib/capa/capa/tests/test_files/stringresponse_with_hint.xml
similarity index 100%
rename from common/lib/xmodule/xmodule/tests/test_files/stringresponse_with_hint.xml
rename to common/lib/capa/capa/tests/test_files/stringresponse_with_hint.xml
diff --git a/common/lib/xmodule/xmodule/tests/test_files/symbolicresponse.xml b/common/lib/capa/capa/tests/test_files/symbolicresponse.xml
similarity index 100%
rename from common/lib/xmodule/xmodule/tests/test_files/symbolicresponse.xml
rename to common/lib/capa/capa/tests/test_files/symbolicresponse.xml
diff --git a/common/lib/xmodule/xmodule/tests/test_files/truefalse.xml b/common/lib/capa/capa/tests/test_files/truefalse.xml
similarity index 100%
rename from common/lib/xmodule/xmodule/tests/test_files/truefalse.xml
rename to common/lib/capa/capa/tests/test_files/truefalse.xml
diff --git a/common/lib/capa/capa/tests/test_inputtypes.py b/common/lib/capa/capa/tests/test_inputtypes.py
new file mode 100644
index 0000000000..9ef642d468
--- /dev/null
+++ b/common/lib/capa/capa/tests/test_inputtypes.py
@@ -0,0 +1,68 @@
+"""
+Tests of input types (and actually responsetypes too)
+"""
+
+from datetime import datetime
+import json
+from mock import Mock
+from nose.plugins.skip import SkipTest
+import os
+import unittest
+
+from . import test_system
+from capa import inputtypes
+
+from lxml import etree
+
+def tst_render_template(template, context):
+ """
+ A test version of render to template. Renders to the repr of the context, completely ignoring the template name.
+ """
+ return repr(context)
+
+
+system = Mock(render_template=tst_render_template)
+
+class OptionInputTest(unittest.TestCase):
+ '''
+ Make sure option inputs work
+ '''
+ def test_rendering_new(self):
+ xml = """
"""
+ element = etree.fromstring(xml)
+
+ value = 'Down'
+ status = 'answered'
+ context = inputtypes._optioninput(element, value, status, test_system.render_template)
+ print 'context: ', context
+
+ expected = {'value': 'Down',
+ 'options': [('Up', 'Up'), ('Down', 'Down')],
+ 'state': 'answered',
+ 'msg': '',
+ 'inline': '',
+ 'id': 'sky_input'}
+
+ self.assertEqual(context, expected)
+
+
+ def test_rendering(self):
+ xml_str = """
"""
+ element = etree.fromstring(xml_str)
+
+ state = {'value': 'Down',
+ 'id': 'sky_input',
+ 'status': 'answered'}
+ option_input = inputtypes.OptionInput(system, element, state)
+
+ context = option_input._get_render_context()
+
+ expected = {'value': 'Down',
+ 'options': [('Up', 'Up'), ('Down', 'Down')],
+ 'state': 'answered',
+ 'msg': '',
+ 'inline': '',
+ 'id': 'sky_input'}
+
+ self.assertEqual(context, expected)
+
diff --git a/common/lib/capa/capa/tests/test_responsetypes.py b/common/lib/capa/capa/tests/test_responsetypes.py
new file mode 100644
index 0000000000..f2fa873080
--- /dev/null
+++ b/common/lib/capa/capa/tests/test_responsetypes.py
@@ -0,0 +1,384 @@
+"""
+Tests of responsetypes
+"""
+
+
+from datetime import datetime
+import json
+from nose.plugins.skip import SkipTest
+import os
+import unittest
+
+from . import test_system
+
+import capa.capa_problem as lcp
+from capa.correctmap import CorrectMap
+from capa.util import convert_files_to_filenames
+from capa.xqueue_interface import dateformat
+
+class MultiChoiceTest(unittest.TestCase):
+ def test_MC_grade(self):
+ multichoice_file = os.path.dirname(__file__) + "/test_files/multichoice.xml"
+ test_lcp = lcp.LoncapaProblem(open(multichoice_file).read(), '1', system=test_system)
+ correct_answers = {'1_2_1': 'choice_foil3'}
+ self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct')
+ false_answers = {'1_2_1': 'choice_foil2'}
+ self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect')
+
+ def test_MC_bare_grades(self):
+ multichoice_file = os.path.dirname(__file__) + "/test_files/multi_bare.xml"
+ test_lcp = lcp.LoncapaProblem(open(multichoice_file).read(), '1', system=test_system)
+ correct_answers = {'1_2_1': 'choice_2'}
+ self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct')
+ false_answers = {'1_2_1': 'choice_1'}
+ self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect')
+
+ def test_TF_grade(self):
+ truefalse_file = os.path.dirname(__file__) + "/test_files/truefalse.xml"
+ test_lcp = lcp.LoncapaProblem(open(truefalse_file).read(), '1', system=test_system)
+ correct_answers = {'1_2_1': ['choice_foil2', 'choice_foil1']}
+ self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct')
+ false_answers = {'1_2_1': ['choice_foil1']}
+ self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect')
+ false_answers = {'1_2_1': ['choice_foil1', 'choice_foil3']}
+ self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect')
+ false_answers = {'1_2_1': ['choice_foil3']}
+ self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect')
+ false_answers = {'1_2_1': ['choice_foil1', 'choice_foil2', 'choice_foil3']}
+ self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect')
+
+
+class ImageResponseTest(unittest.TestCase):
+ def test_ir_grade(self):
+ imageresponse_file = os.path.dirname(__file__) + "/test_files/imageresponse.xml"
+ test_lcp = lcp.LoncapaProblem(open(imageresponse_file).read(), '1', system=test_system)
+ correct_answers = {'1_2_1': '(490,11)-(556,98)',
+ '1_2_2': '(242,202)-(296,276)'}
+ test_answers = {'1_2_1': '[500,20]',
+ '1_2_2': '[250,300]',
+ }
+ self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_1'), 'correct')
+ self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_2'), 'incorrect')
+
+
+class SymbolicResponseTest(unittest.TestCase):
+ def test_sr_grade(self):
+ raise SkipTest() # This test fails due to dependencies on a local copy of snuggletex-webapp. Until we have figured that out, we'll just skip this test
+ symbolicresponse_file = os.path.dirname(__file__) + "/test_files/symbolicresponse.xml"
+ test_lcp = lcp.LoncapaProblem(open(symbolicresponse_file).read(), '1', system=test_system)
+ correct_answers = {'1_2_1': 'cos(theta)*[[1,0],[0,1]] + i*sin(theta)*[[0,1],[1,0]]',
+ '1_2_1_dynamath': '''
+
+
+
+ cos
+
+ (
+ θ
+ )
+
+
+ ⋅
+
+ [
+
+
+
+ 1
+
+
+ 0
+
+
+
+
+ 0
+
+
+ 1
+
+
+
+ ]
+
+ +
+ i
+ ⋅
+
+ sin
+
+ (
+ θ
+ )
+
+
+ ⋅
+
+ [
+
+
+
+ 0
+
+
+ 1
+
+
+
+
+ 1
+
+
+ 0
+
+
+
+ ]
+
+
+
+''',
+ }
+ wrong_answers = {'1_2_1': '2',
+ '1_2_1_dynamath': '''
+
+
+ 2
+
+ ''',
+ }
+ self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct')
+ self.assertEquals(test_lcp.grade_answers(wrong_answers).get_correctness('1_2_1'), 'incorrect')
+
+
+class OptionResponseTest(unittest.TestCase):
+ '''
+ Run this with
+
+ python manage.py test courseware.OptionResponseTest
+ '''
+ def test_or_grade(self):
+ optionresponse_file = os.path.dirname(__file__) + "/test_files/optionresponse.xml"
+ test_lcp = lcp.LoncapaProblem(open(optionresponse_file).read(), '1', system=test_system)
+ correct_answers = {'1_2_1': 'True',
+ '1_2_2': 'False'}
+ test_answers = {'1_2_1': 'True',
+ '1_2_2': 'True',
+ }
+ self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_1'), 'correct')
+ self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_2'), 'incorrect')
+
+
+class FormulaResponseWithHintTest(unittest.TestCase):
+ '''
+ Test Formula response problem with a hint
+ This problem also uses calc.
+ '''
+ def test_or_grade(self):
+ problem_file = os.path.dirname(__file__) + "/test_files/formularesponse_with_hint.xml"
+ test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=test_system)
+ correct_answers = {'1_2_1': '2.5*x-5.0'}
+ test_answers = {'1_2_1': '0.4*x-5.0'}
+ self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct')
+ cmap = test_lcp.grade_answers(test_answers)
+ self.assertEquals(cmap.get_correctness('1_2_1'), 'incorrect')
+ self.assertTrue('You have inverted' in cmap.get_hint('1_2_1'))
+
+
+class StringResponseWithHintTest(unittest.TestCase):
+ '''
+ Test String response problem with a hint
+ '''
+ def test_or_grade(self):
+ problem_file = os.path.dirname(__file__) + "/test_files/stringresponse_with_hint.xml"
+ test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=test_system)
+ correct_answers = {'1_2_1': 'Michigan'}
+ test_answers = {'1_2_1': 'Minnesota'}
+ self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct')
+ cmap = test_lcp.grade_answers(test_answers)
+ self.assertEquals(cmap.get_correctness('1_2_1'), 'incorrect')
+ self.assertTrue('St. Paul' in cmap.get_hint('1_2_1'))
+
+
+class CodeResponseTest(unittest.TestCase):
+ '''
+ Test CodeResponse
+ TODO: Add tests for external grader messages
+ '''
+ @staticmethod
+ def make_queuestate(key, time):
+ timestr = datetime.strftime(time, dateformat)
+ return {'key': key, 'time': timestr}
+
+ def test_is_queued(self):
+ """
+ Simple test of whether LoncapaProblem knows when it's been queued
+ """
+ problem_file = os.path.join(os.path.dirname(__file__), "test_files/coderesponse.xml")
+ with open(problem_file) as input_file:
+ test_lcp = lcp.LoncapaProblem(input_file.read(), '1', system=test_system)
+
+ answer_ids = sorted(test_lcp.get_question_answers())
+
+ # CodeResponse requires internal CorrectMap state. Build it now in the unqueued state
+ cmap = CorrectMap()
+ for answer_id in answer_ids:
+ cmap.update(CorrectMap(answer_id=answer_id, queuestate=None))
+ test_lcp.correct_map.update(cmap)
+
+ self.assertEquals(test_lcp.is_queued(), False)
+
+ # Now we queue the LCP
+ cmap = CorrectMap()
+ for i, answer_id in enumerate(answer_ids):
+ queuestate = CodeResponseTest.make_queuestate(i, datetime.now())
+ cmap.update(CorrectMap(answer_id=answer_ids[i], queuestate=queuestate))
+ test_lcp.correct_map.update(cmap)
+
+ self.assertEquals(test_lcp.is_queued(), True)
+
+
+ def test_update_score(self):
+ '''
+ Test whether LoncapaProblem.update_score can deliver queued result to the right subproblem
+ '''
+ problem_file = os.path.join(os.path.dirname(__file__), "test_files/coderesponse.xml")
+ with open(problem_file) as input_file:
+ test_lcp = lcp.LoncapaProblem(input_file.read(), '1', system=test_system)
+
+ answer_ids = sorted(test_lcp.get_question_answers())
+
+ # CodeResponse requires internal CorrectMap state. Build it now in the queued state
+ old_cmap = CorrectMap()
+ for i, answer_id in enumerate(answer_ids):
+ queuekey = 1000 + i
+ queuestate = CodeResponseTest.make_queuestate(1000+i, datetime.now())
+ old_cmap.update(CorrectMap(answer_id=answer_ids[i], queuestate=queuestate))
+
+ # Message format common to external graders
+ grader_msg = '
MESSAGE ' # Must be valid XML
+ correct_score_msg = json.dumps({'correct':True, 'score':1, 'msg': grader_msg})
+ incorrect_score_msg = json.dumps({'correct':False, 'score':0, 'msg': grader_msg})
+
+ xserver_msgs = {'correct': correct_score_msg,
+ 'incorrect': incorrect_score_msg,}
+
+ # Incorrect queuekey, state should not be updated
+ for correctness in ['correct', 'incorrect']:
+ test_lcp.correct_map = CorrectMap()
+ test_lcp.correct_map.update(old_cmap) # Deep copy
+
+ test_lcp.update_score(xserver_msgs[correctness], queuekey=0)
+ self.assertEquals(test_lcp.correct_map.get_dict(), old_cmap.get_dict()) # Deep comparison
+
+ for answer_id in answer_ids:
+ self.assertTrue(test_lcp.correct_map.is_queued(answer_id)) # Should be still queued, since message undelivered
+
+ # Correct queuekey, state should be updated
+ for correctness in ['correct', 'incorrect']:
+ for i, answer_id in enumerate(answer_ids):
+ test_lcp.correct_map = CorrectMap()
+ test_lcp.correct_map.update(old_cmap)
+
+ new_cmap = CorrectMap()
+ new_cmap.update(old_cmap)
+ npoints = 1 if correctness=='correct' else 0
+ new_cmap.set(answer_id=answer_id, npoints=npoints, correctness=correctness, msg=grader_msg, queuestate=None)
+
+ test_lcp.update_score(xserver_msgs[correctness], queuekey=1000 + i)
+ self.assertEquals(test_lcp.correct_map.get_dict(), new_cmap.get_dict())
+
+ for j, test_id in enumerate(answer_ids):
+ if j == i:
+ self.assertFalse(test_lcp.correct_map.is_queued(test_id)) # Should be dequeued, message delivered
+ else:
+ self.assertTrue(test_lcp.correct_map.is_queued(test_id)) # Should be queued, message undelivered
+
+
+ def test_recentmost_queuetime(self):
+ '''
+ Test whether the LoncapaProblem knows about the time of queue requests
+ '''
+ problem_file = os.path.join(os.path.dirname(__file__), "test_files/coderesponse.xml")
+ with open(problem_file) as input_file:
+ test_lcp = lcp.LoncapaProblem(input_file.read(), '1', system=test_system)
+
+ answer_ids = sorted(test_lcp.get_question_answers())
+
+ # CodeResponse requires internal CorrectMap state. Build it now in the unqueued state
+ cmap = CorrectMap()
+ for answer_id in answer_ids:
+ cmap.update(CorrectMap(answer_id=answer_id, queuestate=None))
+ test_lcp.correct_map.update(cmap)
+
+ self.assertEquals(test_lcp.get_recentmost_queuetime(), None)
+
+ # CodeResponse requires internal CorrectMap state. Build it now in the queued state
+ cmap = CorrectMap()
+ for i, answer_id in enumerate(answer_ids):
+ queuekey = 1000 + i
+ latest_timestamp = datetime.now()
+ queuestate = CodeResponseTest.make_queuestate(1000+i, latest_timestamp)
+ cmap.update(CorrectMap(answer_id=answer_id, queuestate=queuestate))
+ test_lcp.correct_map.update(cmap)
+
+ # Queue state only tracks up to second
+ latest_timestamp = datetime.strptime(datetime.strftime(latest_timestamp, dateformat), dateformat)
+
+ self.assertEquals(test_lcp.get_recentmost_queuetime(), latest_timestamp)
+
+ def test_convert_files_to_filenames(self):
+ '''
+ Test whether file objects are converted to filenames without altering other structures
+ '''
+ problem_file = os.path.join(os.path.dirname(__file__), "test_files/coderesponse.xml")
+ with open(problem_file) as fp:
+ answers_with_file = {'1_2_1': 'String-based answer',
+ '1_3_1': ['answer1', 'answer2', 'answer3'],
+ '1_4_1': [fp, fp]}
+ answers_converted = convert_files_to_filenames(answers_with_file)
+ self.assertEquals(answers_converted['1_2_1'], 'String-based answer')
+ self.assertEquals(answers_converted['1_3_1'], ['answer1', 'answer2', 'answer3'])
+ self.assertEquals(answers_converted['1_4_1'], [fp.name, fp.name])
+
+
+class ChoiceResponseTest(unittest.TestCase):
+
+ def test_cr_rb_grade(self):
+ problem_file = os.path.dirname(__file__) + "/test_files/choiceresponse_radio.xml"
+ test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=test_system)
+ correct_answers = {'1_2_1': 'choice_2',
+ '1_3_1': ['choice_2', 'choice_3']}
+ test_answers = {'1_2_1': 'choice_2',
+ '1_3_1': 'choice_2',
+ }
+ self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_1'), 'correct')
+ self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_3_1'), 'incorrect')
+
+ def test_cr_cb_grade(self):
+ problem_file = os.path.dirname(__file__) + "/test_files/choiceresponse_checkbox.xml"
+ test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=test_system)
+ correct_answers = {'1_2_1': 'choice_2',
+ '1_3_1': ['choice_2', 'choice_3'],
+ '1_4_1': ['choice_2', 'choice_3']}
+ test_answers = {'1_2_1': 'choice_2',
+ '1_3_1': 'choice_2',
+ '1_4_1': ['choice_2', 'choice_3'],
+ }
+ self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_1'), 'correct')
+ self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_3_1'), 'incorrect')
+ self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_4_1'), 'correct')
+
+class JavascriptResponseTest(unittest.TestCase):
+
+ def test_jr_grade(self):
+ problem_file = os.path.dirname(__file__) + "/test_files/javascriptresponse.xml"
+ coffee_file_path = os.path.dirname(__file__) + "/test_files/js/*.coffee"
+ os.system("coffee -c %s" % (coffee_file_path))
+ test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=test_system)
+ correct_answers = {'1_2_1': json.dumps({0: 4})}
+ incorrect_answers = {'1_2_1': json.dumps({0: 5})}
+
+ self.assertEquals(test_lcp.grade_answers(incorrect_answers).get_correctness('1_2_1'), 'incorrect')
+ self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct')
+
diff --git a/common/lib/capa/capa/util.py b/common/lib/capa/capa/util.py
index 75acf657e8..75bd9fb5bc 100644
--- a/common/lib/capa/capa/util.py
+++ b/common/lib/capa/capa/util.py
@@ -11,7 +11,7 @@ def compare_with_tolerance(v1, v2, tol):
- v1 : student result (number)
- v2 : instructor result (number)
- - tol : tolerance (string or number)
+ - tol : tolerance (string representing a number)
'''
relative = tol.endswith('%')
diff --git a/common/lib/xmodule/xmodule/capa_module.py b/common/lib/xmodule/xmodule/capa_module.py
index 95eb2c996c..634c360550 100644
--- a/common/lib/xmodule/xmodule/capa_module.py
+++ b/common/lib/xmodule/xmodule/capa_module.py
@@ -133,6 +133,11 @@ class CapaModule(XModule):
if self.rerandomize == 'never':
self.seed = 1
elif self.rerandomize == "per_student" and hasattr(self.system, 'id'):
+ # TODO: This line is badly broken:
+ # (1) We're passing student ID to xmodule.
+ # (2) There aren't bins of students. -- we only want 10 or 20 randomizations, and want to assign students
+ # to these bins, and may not want cohorts. So e.g. hash(your-id, problem_id) % num_bins.
+ # - analytics really needs small number of bins.
self.seed = system.id
else:
self.seed = None
@@ -619,12 +624,14 @@ class CapaModule(XModule):
if self.closed():
event_info['failure'] = 'closed'
self.system.track_function('reset_problem_fail', event_info)
- return "Problem is closed"
+ return {'success': False,
+ 'error': "Problem is closed"}
if not self.lcp.done:
event_info['failure'] = 'not_done'
self.system.track_function('reset_problem_fail', event_info)
- return "Refresh the page and make an attempt before resetting."
+ return {'success': False,
+ 'error': "Refresh the page and make an attempt before resetting."}
self.lcp.do_reset()
if self.rerandomize in ["always", "onreset"]:
diff --git a/common/lib/xmodule/xmodule/css/capa/display.scss b/common/lib/xmodule/xmodule/css/capa/display.scss
index aa3f96c2e7..fd67a3804e 100644
--- a/common/lib/xmodule/xmodule/css/capa/display.scss
+++ b/common/lib/xmodule/xmodule/css/capa/display.scss
@@ -572,7 +572,7 @@ section.problem {
}
}
- section {
+ > section {
padding: 9px;
}
}
diff --git a/common/lib/xmodule/xmodule/js/src/capa/display.coffee b/common/lib/xmodule/xmodule/js/src/capa/display.coffee
index 123f68145a..cc389c3fc9 100644
--- a/common/lib/xmodule/xmodule/js/src/capa/display.coffee
+++ b/common/lib/xmodule/xmodule/js/src/capa/display.coffee
@@ -334,10 +334,13 @@ class @Problem
inputtypeShowAnswerMethods:
choicegroup: (element, display, answers) =>
element = $(element)
- for key, value of answers
- element.find('input').attr('disabled', 'disabled')
- for choice in value
- element.find("label[for='input_#{key}_#{choice}']").addClass 'choicegroup_correct'
+
+ element.find('input').attr('disabled', 'disabled')
+
+ input_id = element.attr('id').replace(/inputtype_/,'')
+ answer = answers[input_id]
+ for choice in answer
+ element.find("label[for='input_#{input_id}_#{choice}']").addClass 'choicegroup_correct'
javascriptinput: (element, display, answers) =>
answer_id = $(element).attr('id').split("_")[1...].join("_")
diff --git a/common/lib/xmodule/xmodule/js/src/collapsible.coffee b/common/lib/xmodule/xmodule/js/src/collapsible.coffee
index 314e7ca868..18a186e106 100644
--- a/common/lib/xmodule/xmodule/js/src/collapsible.coffee
+++ b/common/lib/xmodule/xmodule/js/src/collapsible.coffee
@@ -11,11 +11,12 @@ class @Collapsible
###
el.find('.longform').hide()
el.find('.shortform').append('
See full output ')
- el.find('.collapsible section').hide()
+ el.find('.collapsible header + section').hide()
el.find('.full').click @toggleFull
el.find('.collapsible header a').click @toggleHint
@toggleFull: (event) =>
+ event.preventDefault()
$(event.target).parent().siblings().slideToggle()
$(event.target).parent().parent().toggleClass('open')
if $(event.target).text() == 'See full output'
diff --git a/common/lib/xmodule/xmodule/js/src/javascript_loader.coffee b/common/lib/xmodule/xmodule/js/src/javascript_loader.coffee
index c3f536787e..a687da3538 100644
--- a/common/lib/xmodule/xmodule/js/src/javascript_loader.coffee
+++ b/common/lib/xmodule/xmodule/js/src/javascript_loader.coffee
@@ -41,20 +41,30 @@ class @JavascriptLoader
callbackCalled = true
callback() if callback?
+ # Keep a map of what sources we're loaded from, and don't do it twice.
+ loaded = {}
placeholders.each (index, placeholder) ->
# TODO: Check if the script already exists in DOM. If so, (1) copy it
# into memory; (2) delete the DOM script element; (3) reappend it.
# This would prevent memory bloat and save a network request.
- s = document.createElement('script')
- s.setAttribute('src', $(placeholder).attr("data-src"))
- s.setAttribute('type', "text/javascript")
+ src = $(placeholder).attr("data-src")
+ if src not of loaded
+ loaded[src] = true
+ s = document.createElement('script')
+ s.setAttribute('src', src)
+ s.setAttribute('type', "text/javascript")
+
+ s.onload = completionHandlerGenerator(index)
- s.onload = completionHandlerGenerator(index)
+ # s.onload does not fire in IE8; this does.
+ s.onreadystatechange = completionHandlerGeneratorIE(index)
- # s.onload does not fire in IE8; this does.
- s.onreadystatechange = completionHandlerGeneratorIE(index)
-
- # Need to use the DOM elements directly or the scripts won't execute
- # properly.
- $('head')[0].appendChild(s)
+ # Need to use the DOM elements directly or the scripts won't execute
+ # properly.
+ $('head')[0].appendChild(s)
+ else
+ # just call the completion callback directly, without reloading the file
+ completionHandlerGenerator(index)()
$(placeholder).remove()
+
+
diff --git a/common/lib/xmodule/xmodule/tests/__init__.py b/common/lib/xmodule/xmodule/tests/__init__.py
index 654b6beb15..ed64c45118 100644
--- a/common/lib/xmodule/xmodule/tests/__init__.py
+++ b/common/lib/xmodule/xmodule/tests/__init__.py
@@ -1,31 +1,22 @@
-#
-# unittests for xmodule (and capa)
-#
-# Note: run this using a like like this:
-#
-# django-admin.py test --settings=lms.envs.test_ike --pythonpath=. common/lib/xmodule
+"""
+unittests for xmodule
+
+Run like this:
+
+ rake test_common/lib/xmodule
+
+"""
import unittest
import os
import fs
import fs.osfs
-import json
-import json
import numpy
-import xmodule
import capa.calc as calc
-import capa.capa_problem as lcp
-from capa.correctmap import CorrectMap
-from capa.util import convert_files_to_filenames
-from capa.xqueue_interface import dateformat
-from datetime import datetime
-from xmodule import graders, x_module
+import xmodule
from xmodule.x_module import ModuleSystem
-from xmodule.graders import Score, aggregate_scores
-from xmodule.progress import Progress
-from nose.plugins.skip import SkipTest
from mock import Mock
i4xs = ModuleSystem(
@@ -35,7 +26,7 @@ i4xs = ModuleSystem(
render_template=Mock(),
replace_urls=Mock(),
user=Mock(),
- filestore=fs.osfs.OSFS(os.path.dirname(os.path.realpath(__file__))+"/test_files"),
+ filestore=Mock(),
debug=True,
xqueue={'interface':None, 'callback_url':'/', 'default_queuename': 'testqueue', 'waittime': 10},
node_path=os.environ.get("NODE_PATH", "/usr/local/lib/node_modules"),
@@ -94,719 +85,3 @@ class ModelsTest(unittest.TestCase):
exception_happened = True
self.assertTrue(exception_happened)
-#-----------------------------------------------------------------------------
-# tests of capa_problem inputtypes
-
-
-class MultiChoiceTest(unittest.TestCase):
- def test_MC_grade(self):
- multichoice_file = os.path.dirname(__file__) + "/test_files/multichoice.xml"
- test_lcp = lcp.LoncapaProblem(open(multichoice_file).read(), '1', system=i4xs)
- correct_answers = {'1_2_1': 'choice_foil3'}
- self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct')
- false_answers = {'1_2_1': 'choice_foil2'}
- self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect')
-
- def test_MC_bare_grades(self):
- multichoice_file = os.path.dirname(__file__) + "/test_files/multi_bare.xml"
- test_lcp = lcp.LoncapaProblem(open(multichoice_file).read(), '1', system=i4xs)
- correct_answers = {'1_2_1': 'choice_2'}
- self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct')
- false_answers = {'1_2_1': 'choice_1'}
- self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect')
-
- def test_TF_grade(self):
- truefalse_file = os.path.dirname(__file__) + "/test_files/truefalse.xml"
- test_lcp = lcp.LoncapaProblem(open(truefalse_file).read(), '1', system=i4xs)
- correct_answers = {'1_2_1': ['choice_foil2', 'choice_foil1']}
- self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct')
- false_answers = {'1_2_1': ['choice_foil1']}
- self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect')
- false_answers = {'1_2_1': ['choice_foil1', 'choice_foil3']}
- self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect')
- false_answers = {'1_2_1': ['choice_foil3']}
- self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect')
- false_answers = {'1_2_1': ['choice_foil1', 'choice_foil2', 'choice_foil3']}
- self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect')
-
-
-class ImageResponseTest(unittest.TestCase):
- def test_ir_grade(self):
- imageresponse_file = os.path.dirname(__file__) + "/test_files/imageresponse.xml"
- test_lcp = lcp.LoncapaProblem(open(imageresponse_file).read(), '1', system=i4xs)
- correct_answers = {'1_2_1': '(490,11)-(556,98)',
- '1_2_2': '(242,202)-(296,276)'}
- test_answers = {'1_2_1': '[500,20]',
- '1_2_2': '[250,300]',
- }
- self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_1'), 'correct')
- self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_2'), 'incorrect')
-
-
-class SymbolicResponseTest(unittest.TestCase):
- def test_sr_grade(self):
- raise SkipTest() # This test fails due to dependencies on a local copy of snuggletex-webapp. Until we have figured that out, we'll just skip this test
- symbolicresponse_file = os.path.dirname(__file__) + "/test_files/symbolicresponse.xml"
- test_lcp = lcp.LoncapaProblem(open(symbolicresponse_file).read(), '1', system=i4xs)
- correct_answers = {'1_2_1': 'cos(theta)*[[1,0],[0,1]] + i*sin(theta)*[[0,1],[1,0]]',
- '1_2_1_dynamath': '''
-
-
-
- cos
-
- (
- θ
- )
-
-
- ⋅
-
- [
-
-
-
- 1
-
-
- 0
-
-
-
-
- 0
-
-
- 1
-
-
-
- ]
-
- +
- i
- ⋅
-
- sin
-
- (
- θ
- )
-
-
- ⋅
-
- [
-
-
-
- 0
-
-
- 1
-
-
-
-
- 1
-
-
- 0
-
-
-
- ]
-
-
-
-''',
- }
- wrong_answers = {'1_2_1': '2',
- '1_2_1_dynamath': '''
-
-
- 2
-
- ''',
- }
- self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct')
- self.assertEquals(test_lcp.grade_answers(wrong_answers).get_correctness('1_2_1'), 'incorrect')
-
-
-class OptionResponseTest(unittest.TestCase):
- '''
- Run this with
-
- python manage.py test courseware.OptionResponseTest
- '''
- def test_or_grade(self):
- optionresponse_file = os.path.dirname(__file__) + "/test_files/optionresponse.xml"
- test_lcp = lcp.LoncapaProblem(open(optionresponse_file).read(), '1', system=i4xs)
- correct_answers = {'1_2_1': 'True',
- '1_2_2': 'False'}
- test_answers = {'1_2_1': 'True',
- '1_2_2': 'True',
- }
- self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_1'), 'correct')
- self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_2'), 'incorrect')
-
-
-class FormulaResponseWithHintTest(unittest.TestCase):
- '''
- Test Formula response problem with a hint
- This problem also uses calc.
- '''
- def test_or_grade(self):
- problem_file = os.path.dirname(__file__) + "/test_files/formularesponse_with_hint.xml"
- test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=i4xs)
- correct_answers = {'1_2_1': '2.5*x-5.0'}
- test_answers = {'1_2_1': '0.4*x-5.0'}
- self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct')
- cmap = test_lcp.grade_answers(test_answers)
- self.assertEquals(cmap.get_correctness('1_2_1'), 'incorrect')
- self.assertTrue('You have inverted' in cmap.get_hint('1_2_1'))
-
-
-class StringResponseWithHintTest(unittest.TestCase):
- '''
- Test String response problem with a hint
- '''
- def test_or_grade(self):
- problem_file = os.path.dirname(__file__) + "/test_files/stringresponse_with_hint.xml"
- test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=i4xs)
- correct_answers = {'1_2_1': 'Michigan'}
- test_answers = {'1_2_1': 'Minnesota'}
- self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct')
- cmap = test_lcp.grade_answers(test_answers)
- self.assertEquals(cmap.get_correctness('1_2_1'), 'incorrect')
- self.assertTrue('St. Paul' in cmap.get_hint('1_2_1'))
-
-
-class CodeResponseTest(unittest.TestCase):
- '''
- Test CodeResponse
- TODO: Add tests for external grader messages
- '''
- @staticmethod
- def make_queuestate(key, time):
- timestr = datetime.strftime(time, dateformat)
- return {'key': key, 'time': timestr}
-
- def test_is_queued(self):
- '''
- Simple test of whether LoncapaProblem knows when it's been queued
- '''
- problem_file = os.path.join(os.path.dirname(__file__), "test_files/coderesponse.xml")
- with open(problem_file) as input_file:
- test_lcp = lcp.LoncapaProblem(input_file.read(), '1', system=i4xs)
-
- answer_ids = sorted(test_lcp.get_question_answers())
-
- # CodeResponse requires internal CorrectMap state. Build it now in the unqueued state
- cmap = CorrectMap()
- for answer_id in answer_ids:
- cmap.update(CorrectMap(answer_id=answer_id, queuestate=None))
- test_lcp.correct_map.update(cmap)
-
- self.assertEquals(test_lcp.is_queued(), False)
-
- # Now we queue the LCP
- cmap = CorrectMap()
- for i, answer_id in enumerate(answer_ids):
- queuestate = CodeResponseTest.make_queuestate(i, datetime.now())
- cmap.update(CorrectMap(answer_id=answer_ids[i], queuestate=queuestate))
- test_lcp.correct_map.update(cmap)
-
- self.assertEquals(test_lcp.is_queued(), True)
-
-
- def test_update_score(self):
- '''
- Test whether LoncapaProblem.update_score can deliver queued result to the right subproblem
- '''
- problem_file = os.path.join(os.path.dirname(__file__), "test_files/coderesponse.xml")
- with open(problem_file) as input_file:
- test_lcp = lcp.LoncapaProblem(input_file.read(), '1', system=i4xs)
-
- answer_ids = sorted(test_lcp.get_question_answers())
-
- # CodeResponse requires internal CorrectMap state. Build it now in the queued state
- old_cmap = CorrectMap()
- for i, answer_id in enumerate(answer_ids):
- queuekey = 1000 + i
- queuestate = CodeResponseTest.make_queuestate(1000+i, datetime.now())
- old_cmap.update(CorrectMap(answer_id=answer_ids[i], queuestate=queuestate))
-
- # Message format common to external graders
- grader_msg = '
MESSAGE ' # Must be valid XML
- correct_score_msg = json.dumps({'correct':True, 'score':1, 'msg': grader_msg})
- incorrect_score_msg = json.dumps({'correct':False, 'score':0, 'msg': grader_msg})
-
- xserver_msgs = {'correct': correct_score_msg,
- 'incorrect': incorrect_score_msg,}
-
- # Incorrect queuekey, state should not be updated
- for correctness in ['correct', 'incorrect']:
- test_lcp.correct_map = CorrectMap()
- test_lcp.correct_map.update(old_cmap) # Deep copy
-
- test_lcp.update_score(xserver_msgs[correctness], queuekey=0)
- self.assertEquals(test_lcp.correct_map.get_dict(), old_cmap.get_dict()) # Deep comparison
-
- for answer_id in answer_ids:
- self.assertTrue(test_lcp.correct_map.is_queued(answer_id)) # Should be still queued, since message undelivered
-
- # Correct queuekey, state should be updated
- for correctness in ['correct', 'incorrect']:
- for i, answer_id in enumerate(answer_ids):
- test_lcp.correct_map = CorrectMap()
- test_lcp.correct_map.update(old_cmap)
-
- new_cmap = CorrectMap()
- new_cmap.update(old_cmap)
- npoints = 1 if correctness=='correct' else 0
- new_cmap.set(answer_id=answer_id, npoints=npoints, correctness=correctness, msg=grader_msg, queuestate=None)
-
- test_lcp.update_score(xserver_msgs[correctness], queuekey=1000 + i)
- self.assertEquals(test_lcp.correct_map.get_dict(), new_cmap.get_dict())
-
- for j, test_id in enumerate(answer_ids):
- if j == i:
- self.assertFalse(test_lcp.correct_map.is_queued(test_id)) # Should be dequeued, message delivered
- else:
- self.assertTrue(test_lcp.correct_map.is_queued(test_id)) # Should be queued, message undelivered
-
-
- def test_recentmost_queuetime(self):
- '''
- Test whether the LoncapaProblem knows about the time of queue requests
- '''
- problem_file = os.path.join(os.path.dirname(__file__), "test_files/coderesponse.xml")
- with open(problem_file) as input_file:
- test_lcp = lcp.LoncapaProblem(input_file.read(), '1', system=i4xs)
-
- answer_ids = sorted(test_lcp.get_question_answers())
-
- # CodeResponse requires internal CorrectMap state. Build it now in the unqueued state
- cmap = CorrectMap()
- for answer_id in answer_ids:
- cmap.update(CorrectMap(answer_id=answer_id, queuestate=None))
- test_lcp.correct_map.update(cmap)
-
- self.assertEquals(test_lcp.get_recentmost_queuetime(), None)
-
- # CodeResponse requires internal CorrectMap state. Build it now in the queued state
- cmap = CorrectMap()
- for i, answer_id in enumerate(answer_ids):
- queuekey = 1000 + i
- latest_timestamp = datetime.now()
- queuestate = CodeResponseTest.make_queuestate(1000+i, latest_timestamp)
- cmap.update(CorrectMap(answer_id=answer_id, queuestate=queuestate))
- test_lcp.correct_map.update(cmap)
-
- # Queue state only tracks up to second
- latest_timestamp = datetime.strptime(datetime.strftime(latest_timestamp, dateformat), dateformat)
-
- self.assertEquals(test_lcp.get_recentmost_queuetime(), latest_timestamp)
-
- def test_convert_files_to_filenames(self):
- '''
- Test whether file objects are converted to filenames without altering other structures
- '''
- problem_file = os.path.join(os.path.dirname(__file__), "test_files/coderesponse.xml")
- with open(problem_file) as fp:
- answers_with_file = {'1_2_1': 'String-based answer',
- '1_3_1': ['answer1', 'answer2', 'answer3'],
- '1_4_1': [fp, fp]}
- answers_converted = convert_files_to_filenames(answers_with_file)
- self.assertEquals(answers_converted['1_2_1'], 'String-based answer')
- self.assertEquals(answers_converted['1_3_1'], ['answer1', 'answer2', 'answer3'])
- self.assertEquals(answers_converted['1_4_1'], [fp.name, fp.name])
-
-
-class ChoiceResponseTest(unittest.TestCase):
-
- def test_cr_rb_grade(self):
- problem_file = os.path.dirname(__file__) + "/test_files/choiceresponse_radio.xml"
- test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=i4xs)
- correct_answers = {'1_2_1': 'choice_2',
- '1_3_1': ['choice_2', 'choice_3']}
- test_answers = {'1_2_1': 'choice_2',
- '1_3_1': 'choice_2',
- }
- self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_1'), 'correct')
- self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_3_1'), 'incorrect')
-
- def test_cr_cb_grade(self):
- problem_file = os.path.dirname(__file__) + "/test_files/choiceresponse_checkbox.xml"
- test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=i4xs)
- correct_answers = {'1_2_1': 'choice_2',
- '1_3_1': ['choice_2', 'choice_3'],
- '1_4_1': ['choice_2', 'choice_3']}
- test_answers = {'1_2_1': 'choice_2',
- '1_3_1': 'choice_2',
- '1_4_1': ['choice_2', 'choice_3'],
- }
- self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_1'), 'correct')
- self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_3_1'), 'incorrect')
- self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_4_1'), 'correct')
-
-class JavascriptResponseTest(unittest.TestCase):
-
- def test_jr_grade(self):
- problem_file = os.path.dirname(__file__) + "/test_files/javascriptresponse.xml"
- coffee_file_path = os.path.dirname(__file__) + "/test_files/js/*.coffee"
- os.system("coffee -c %s" % (coffee_file_path))
- test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=i4xs)
- correct_answers = {'1_2_1': json.dumps({0: 4})}
- incorrect_answers = {'1_2_1': json.dumps({0: 5})}
-
- self.assertEquals(test_lcp.grade_answers(incorrect_answers).get_correctness('1_2_1'), 'incorrect')
- self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct')
-
-#-----------------------------------------------------------------------------
-# Grading tests
-
-
-class GradesheetTest(unittest.TestCase):
-
- def test_weighted_grading(self):
- scores = []
- Score.__sub__ = lambda me, other: (me.earned - other.earned) + (me.possible - other.possible)
-
- all, graded = aggregate_scores(scores)
- self.assertEqual(all, Score(earned=0, possible=0, graded=False, section="summary"))
- self.assertEqual(graded, Score(earned=0, possible=0, graded=True, section="summary"))
-
- scores.append(Score(earned=0, possible=5, graded=False, section="summary"))
- all, graded = aggregate_scores(scores)
- self.assertEqual(all, Score(earned=0, possible=5, graded=False, section="summary"))
- self.assertEqual(graded, Score(earned=0, possible=0, graded=True, section="summary"))
-
- scores.append(Score(earned=3, possible=5, graded=True, section="summary"))
- all, graded = aggregate_scores(scores)
- self.assertAlmostEqual(all, Score(earned=3, possible=10, graded=False, section="summary"))
- self.assertAlmostEqual(graded, Score(earned=3, possible=5, graded=True, section="summary"))
-
- scores.append(Score(earned=2, possible=5, graded=True, section="summary"))
- all, graded = aggregate_scores(scores)
- self.assertAlmostEqual(all, Score(earned=5, possible=15, graded=False, section="summary"))
- self.assertAlmostEqual(graded, Score(earned=5, possible=10, graded=True, section="summary"))
-
-
-class GraderTest(unittest.TestCase):
-
- empty_gradesheet = {
- }
-
- incomplete_gradesheet = {
- 'Homework': [],
- 'Lab': [],
- 'Midterm': [],
- }
-
- test_gradesheet = {
- 'Homework': [Score(earned=2, possible=20.0, graded=True, section='hw1'),
- Score(earned=16, possible=16.0, graded=True, section='hw2')],
- #The dropped scores should be from the assignments that don't exist yet
-
- 'Lab': [Score(earned=1, possible=2.0, graded=True, section='lab1'), # Dropped
- Score(earned=1, possible=1.0, graded=True, section='lab2'),
- Score(earned=1, possible=1.0, graded=True, section='lab3'),
- Score(earned=5, possible=25.0, graded=True, section='lab4'), # Dropped
- Score(earned=3, possible=4.0, graded=True, section='lab5'), # Dropped
- Score(earned=6, possible=7.0, graded=True, section='lab6'),
- Score(earned=5, possible=6.0, graded=True, section='lab7')],
-
- 'Midterm': [Score(earned=50.5, possible=100, graded=True, section="Midterm Exam"), ],
- }
-
- def test_SingleSectionGrader(self):
- midtermGrader = graders.SingleSectionGrader("Midterm", "Midterm Exam")
- lab4Grader = graders.SingleSectionGrader("Lab", "lab4")
- badLabGrader = graders.SingleSectionGrader("Lab", "lab42")
-
- for graded in [midtermGrader.grade(self.empty_gradesheet),
- midtermGrader.grade(self.incomplete_gradesheet),
- badLabGrader.grade(self.test_gradesheet)]:
- self.assertEqual(len(graded['section_breakdown']), 1)
- self.assertEqual(graded['percent'], 0.0)
-
- graded = midtermGrader.grade(self.test_gradesheet)
- self.assertAlmostEqual(graded['percent'], 0.505)
- self.assertEqual(len(graded['section_breakdown']), 1)
-
- graded = lab4Grader.grade(self.test_gradesheet)
- self.assertAlmostEqual(graded['percent'], 0.2)
- self.assertEqual(len(graded['section_breakdown']), 1)
-
- def test_AssignmentFormatGrader(self):
- homeworkGrader = graders.AssignmentFormatGrader("Homework", 12, 2)
- noDropGrader = graders.AssignmentFormatGrader("Homework", 12, 0)
- #Even though the minimum number is 3, this should grade correctly when 7 assignments are found
- overflowGrader = graders.AssignmentFormatGrader("Lab", 3, 2)
- labGrader = graders.AssignmentFormatGrader("Lab", 7, 3)
-
- #Test the grading of an empty gradesheet
- for graded in [homeworkGrader.grade(self.empty_gradesheet),
- noDropGrader.grade(self.empty_gradesheet),
- homeworkGrader.grade(self.incomplete_gradesheet),
- noDropGrader.grade(self.incomplete_gradesheet)]:
- self.assertAlmostEqual(graded['percent'], 0.0)
- #Make sure the breakdown includes 12 sections, plus one summary
- self.assertEqual(len(graded['section_breakdown']), 12 + 1)
-
- graded = homeworkGrader.grade(self.test_gradesheet)
- self.assertAlmostEqual(graded['percent'], 0.11) # 100% + 10% / 10 assignments
- self.assertEqual(len(graded['section_breakdown']), 12 + 1)
-
- graded = noDropGrader.grade(self.test_gradesheet)
- self.assertAlmostEqual(graded['percent'], 0.0916666666666666) # 100% + 10% / 12 assignments
- self.assertEqual(len(graded['section_breakdown']), 12 + 1)
-
- graded = overflowGrader.grade(self.test_gradesheet)
- self.assertAlmostEqual(graded['percent'], 0.8880952380952382) # 100% + 10% / 5 assignments
- self.assertEqual(len(graded['section_breakdown']), 7 + 1)
-
- graded = labGrader.grade(self.test_gradesheet)
- self.assertAlmostEqual(graded['percent'], 0.9226190476190477)
- self.assertEqual(len(graded['section_breakdown']), 7 + 1)
-
- def test_WeightedSubsectionsGrader(self):
- #First, a few sub graders
- homeworkGrader = graders.AssignmentFormatGrader("Homework", 12, 2)
- labGrader = graders.AssignmentFormatGrader("Lab", 7, 3)
- midtermGrader = graders.SingleSectionGrader("Midterm", "Midterm Exam")
-
- weightedGrader = graders.WeightedSubsectionsGrader([(homeworkGrader, homeworkGrader.category, 0.25), (labGrader, labGrader.category, 0.25),
- (midtermGrader, midtermGrader.category, 0.5)])
-
- overOneWeightsGrader = graders.WeightedSubsectionsGrader([(homeworkGrader, homeworkGrader.category, 0.5), (labGrader, labGrader.category, 0.5),
- (midtermGrader, midtermGrader.category, 0.5)])
-
- #The midterm should have all weight on this one
- zeroWeightsGrader = graders.WeightedSubsectionsGrader([(homeworkGrader, homeworkGrader.category, 0.0), (labGrader, labGrader.category, 0.0),
- (midtermGrader, midtermGrader.category, 0.5)])
-
- #This should always have a final percent of zero
- allZeroWeightsGrader = graders.WeightedSubsectionsGrader([(homeworkGrader, homeworkGrader.category, 0.0), (labGrader, labGrader.category, 0.0),
- (midtermGrader, midtermGrader.category, 0.0)])
-
- emptyGrader = graders.WeightedSubsectionsGrader([])
-
- graded = weightedGrader.grade(self.test_gradesheet)
- self.assertAlmostEqual(graded['percent'], 0.5106547619047619)
- self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1)
- self.assertEqual(len(graded['grade_breakdown']), 3)
-
- graded = overOneWeightsGrader.grade(self.test_gradesheet)
- self.assertAlmostEqual(graded['percent'], 0.7688095238095238)
- self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1)
- self.assertEqual(len(graded['grade_breakdown']), 3)
-
- graded = zeroWeightsGrader.grade(self.test_gradesheet)
- self.assertAlmostEqual(graded['percent'], 0.2525)
- self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1)
- self.assertEqual(len(graded['grade_breakdown']), 3)
-
- graded = allZeroWeightsGrader.grade(self.test_gradesheet)
- self.assertAlmostEqual(graded['percent'], 0.0)
- self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1)
- self.assertEqual(len(graded['grade_breakdown']), 3)
-
- for graded in [weightedGrader.grade(self.empty_gradesheet),
- weightedGrader.grade(self.incomplete_gradesheet),
- zeroWeightsGrader.grade(self.empty_gradesheet),
- allZeroWeightsGrader.grade(self.empty_gradesheet)]:
- self.assertAlmostEqual(graded['percent'], 0.0)
- self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1)
- self.assertEqual(len(graded['grade_breakdown']), 3)
-
- graded = emptyGrader.grade(self.test_gradesheet)
- self.assertAlmostEqual(graded['percent'], 0.0)
- self.assertEqual(len(graded['section_breakdown']), 0)
- self.assertEqual(len(graded['grade_breakdown']), 0)
-
- def test_graderFromConf(self):
-
- #Confs always produce a graders.WeightedSubsectionsGrader, so we test this by repeating the test
- #in test_graders.WeightedSubsectionsGrader, but generate the graders with confs.
-
- weightedGrader = graders.grader_from_conf([
- {
- 'type': "Homework",
- 'min_count': 12,
- 'drop_count': 2,
- 'short_label': "HW",
- 'weight': 0.25,
- },
- {
- 'type': "Lab",
- 'min_count': 7,
- 'drop_count': 3,
- 'category': "Labs",
- 'weight': 0.25
- },
- {
- 'type': "Midterm",
- 'name': "Midterm Exam",
- 'short_label': "Midterm",
- 'weight': 0.5,
- },
- ])
-
- emptyGrader = graders.grader_from_conf([])
-
- graded = weightedGrader.grade(self.test_gradesheet)
- self.assertAlmostEqual(graded['percent'], 0.5106547619047619)
- self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1)
- self.assertEqual(len(graded['grade_breakdown']), 3)
-
- graded = emptyGrader.grade(self.test_gradesheet)
- self.assertAlmostEqual(graded['percent'], 0.0)
- self.assertEqual(len(graded['section_breakdown']), 0)
- self.assertEqual(len(graded['grade_breakdown']), 0)
-
- #Test that graders can also be used instead of lists of dictionaries
- homeworkGrader = graders.AssignmentFormatGrader("Homework", 12, 2)
- homeworkGrader2 = graders.grader_from_conf(homeworkGrader)
-
- graded = homeworkGrader2.grade(self.test_gradesheet)
- self.assertAlmostEqual(graded['percent'], 0.11)
- self.assertEqual(len(graded['section_breakdown']), 12 + 1)
-
- #TODO: How do we test failure cases? The parser only logs an error when it can't parse something. Maybe it should throw exceptions?
-
-# --------------------------------------------------------------------------
-# Module progress tests
-
-
-class ProgressTest(unittest.TestCase):
- ''' Test that basic Progress objects work. A Progress represents a
- fraction between 0 and 1.
- '''
- not_started = Progress(0, 17)
- part_done = Progress(2, 6)
- half_done = Progress(3, 6)
- also_half_done = Progress(1, 2)
- done = Progress(7, 7)
-
- def test_create_object(self):
- # These should work:
- p = Progress(0, 2)
- p = Progress(1, 2)
- p = Progress(2, 2)
-
- p = Progress(2.5, 5.0)
- p = Progress(3.7, 12.3333)
-
- # These shouldn't
- self.assertRaises(ValueError, Progress, 0, 0)
- self.assertRaises(ValueError, Progress, 2, 0)
- self.assertRaises(ValueError, Progress, 1, -2)
-
- self.assertRaises(TypeError, Progress, 0, "all")
- # check complex numbers just for the heck of it :)
- self.assertRaises(TypeError, Progress, 2j, 3)
-
- def test_clamp(self):
- self.assertEqual((2, 2), Progress(3, 2).frac())
- self.assertEqual((0, 2), Progress(-2, 2).frac())
-
- def test_frac(self):
- p = Progress(1, 2)
- (a, b) = p.frac()
- self.assertEqual(a, 1)
- self.assertEqual(b, 2)
-
- def test_percent(self):
- self.assertEqual(self.not_started.percent(), 0)
- self.assertAlmostEqual(self.part_done.percent(), 33.33333333333333)
- self.assertEqual(self.half_done.percent(), 50)
- self.assertEqual(self.done.percent(), 100)
-
- self.assertEqual(self.half_done.percent(), self.also_half_done.percent())
-
- def test_started(self):
- self.assertFalse(self.not_started.started())
-
- self.assertTrue(self.part_done.started())
- self.assertTrue(self.half_done.started())
- self.assertTrue(self.done.started())
-
- def test_inprogress(self):
- # only true if working on it
- self.assertFalse(self.done.inprogress())
- self.assertFalse(self.not_started.inprogress())
-
- self.assertTrue(self.part_done.inprogress())
- self.assertTrue(self.half_done.inprogress())
-
- def test_done(self):
- self.assertTrue(self.done.done())
- self.assertFalse(self.half_done.done())
- self.assertFalse(self.not_started.done())
-
- def test_str(self):
- self.assertEqual(str(self.not_started), "0/17")
- self.assertEqual(str(self.part_done), "2/6")
- self.assertEqual(str(self.done), "7/7")
-
- def test_ternary_str(self):
- self.assertEqual(self.not_started.ternary_str(), "none")
- self.assertEqual(self.half_done.ternary_str(), "in_progress")
- self.assertEqual(self.done.ternary_str(), "done")
-
- def test_to_js_status(self):
- '''Test the Progress.to_js_status_str() method'''
-
- self.assertEqual(Progress.to_js_status_str(self.not_started), "none")
- self.assertEqual(Progress.to_js_status_str(self.half_done), "in_progress")
- self.assertEqual(Progress.to_js_status_str(self.done), "done")
- self.assertEqual(Progress.to_js_status_str(None), "NA")
-
- def test_to_js_detail_str(self):
- '''Test the Progress.to_js_detail_str() method'''
- f = Progress.to_js_detail_str
- for p in (self.not_started, self.half_done, self.done):
- self.assertEqual(f(p), str(p))
- # But None should be encoded as NA
- self.assertEqual(f(None), "NA")
-
- def test_add(self):
- '''Test the Progress.add_counts() method'''
- p = Progress(0, 2)
- p2 = Progress(1, 3)
- p3 = Progress(2, 5)
- pNone = None
- add = lambda a, b: Progress.add_counts(a, b).frac()
-
- self.assertEqual(add(p, p), (0, 4))
- self.assertEqual(add(p, p2), (1, 5))
- self.assertEqual(add(p2, p3), (3, 8))
-
- self.assertEqual(add(p2, pNone), p2.frac())
- self.assertEqual(add(pNone, p2), p2.frac())
-
- def test_equality(self):
- '''Test that comparing Progress objects for equality
- works correctly.'''
- p = Progress(1, 2)
- p2 = Progress(2, 4)
- p3 = Progress(1, 2)
- self.assertTrue(p == p3)
- self.assertFalse(p == p2)
-
- # Check != while we're at it
- self.assertTrue(p != p2)
- self.assertFalse(p != p3)
-
-
-class ModuleProgressTest(unittest.TestCase):
- ''' Test that get_progress() does the right thing for the different modules
- '''
- def test_xmodule_default(self):
- '''Make sure default get_progress exists, returns None'''
- xm = x_module.XModule(i4xs, 'a://b/c/d/e', None, {})
- p = xm.get_progress()
- self.assertEqual(p, None)
diff --git a/common/lib/xmodule/xmodule/tests/test_graders.py b/common/lib/xmodule/xmodule/tests/test_graders.py
new file mode 100644
index 0000000000..fa0e94d2d5
--- /dev/null
+++ b/common/lib/xmodule/xmodule/tests/test_graders.py
@@ -0,0 +1,220 @@
+"""Grading tests"""
+import unittest
+
+from xmodule import graders
+from xmodule.graders import Score, aggregate_scores
+
+class GradesheetTest(unittest.TestCase):
+
+ def test_weighted_grading(self):
+ scores = []
+ Score.__sub__ = lambda me, other: (me.earned - other.earned) + (me.possible - other.possible)
+
+ all, graded = aggregate_scores(scores)
+ self.assertEqual(all, Score(earned=0, possible=0, graded=False, section="summary"))
+ self.assertEqual(graded, Score(earned=0, possible=0, graded=True, section="summary"))
+
+ scores.append(Score(earned=0, possible=5, graded=False, section="summary"))
+ all, graded = aggregate_scores(scores)
+ self.assertEqual(all, Score(earned=0, possible=5, graded=False, section="summary"))
+ self.assertEqual(graded, Score(earned=0, possible=0, graded=True, section="summary"))
+
+ scores.append(Score(earned=3, possible=5, graded=True, section="summary"))
+ all, graded = aggregate_scores(scores)
+ self.assertAlmostEqual(all, Score(earned=3, possible=10, graded=False, section="summary"))
+ self.assertAlmostEqual(graded, Score(earned=3, possible=5, graded=True, section="summary"))
+
+ scores.append(Score(earned=2, possible=5, graded=True, section="summary"))
+ all, graded = aggregate_scores(scores)
+ self.assertAlmostEqual(all, Score(earned=5, possible=15, graded=False, section="summary"))
+ self.assertAlmostEqual(graded, Score(earned=5, possible=10, graded=True, section="summary"))
+
+
+class GraderTest(unittest.TestCase):
+
+ empty_gradesheet = {
+ }
+
+ incomplete_gradesheet = {
+ 'Homework': [],
+ 'Lab': [],
+ 'Midterm': [],
+ }
+
+ test_gradesheet = {
+ 'Homework': [Score(earned=2, possible=20.0, graded=True, section='hw1'),
+ Score(earned=16, possible=16.0, graded=True, section='hw2')],
+ #The dropped scores should be from the assignments that don't exist yet
+
+ 'Lab': [Score(earned=1, possible=2.0, graded=True, section='lab1'), # Dropped
+ Score(earned=1, possible=1.0, graded=True, section='lab2'),
+ Score(earned=1, possible=1.0, graded=True, section='lab3'),
+ Score(earned=5, possible=25.0, graded=True, section='lab4'), # Dropped
+ Score(earned=3, possible=4.0, graded=True, section='lab5'), # Dropped
+ Score(earned=6, possible=7.0, graded=True, section='lab6'),
+ Score(earned=5, possible=6.0, graded=True, section='lab7')],
+
+ 'Midterm': [Score(earned=50.5, possible=100, graded=True, section="Midterm Exam"), ],
+ }
+
+ def test_SingleSectionGrader(self):
+ midtermGrader = graders.SingleSectionGrader("Midterm", "Midterm Exam")
+ lab4Grader = graders.SingleSectionGrader("Lab", "lab4")
+ badLabGrader = graders.SingleSectionGrader("Lab", "lab42")
+
+ for graded in [midtermGrader.grade(self.empty_gradesheet),
+ midtermGrader.grade(self.incomplete_gradesheet),
+ badLabGrader.grade(self.test_gradesheet)]:
+ self.assertEqual(len(graded['section_breakdown']), 1)
+ self.assertEqual(graded['percent'], 0.0)
+
+ graded = midtermGrader.grade(self.test_gradesheet)
+ self.assertAlmostEqual(graded['percent'], 0.505)
+ self.assertEqual(len(graded['section_breakdown']), 1)
+
+ graded = lab4Grader.grade(self.test_gradesheet)
+ self.assertAlmostEqual(graded['percent'], 0.2)
+ self.assertEqual(len(graded['section_breakdown']), 1)
+
+ def test_AssignmentFormatGrader(self):
+ homeworkGrader = graders.AssignmentFormatGrader("Homework", 12, 2)
+ noDropGrader = graders.AssignmentFormatGrader("Homework", 12, 0)
+ #Even though the minimum number is 3, this should grade correctly when 7 assignments are found
+ overflowGrader = graders.AssignmentFormatGrader("Lab", 3, 2)
+ labGrader = graders.AssignmentFormatGrader("Lab", 7, 3)
+
+ #Test the grading of an empty gradesheet
+ for graded in [homeworkGrader.grade(self.empty_gradesheet),
+ noDropGrader.grade(self.empty_gradesheet),
+ homeworkGrader.grade(self.incomplete_gradesheet),
+ noDropGrader.grade(self.incomplete_gradesheet)]:
+ self.assertAlmostEqual(graded['percent'], 0.0)
+ #Make sure the breakdown includes 12 sections, plus one summary
+ self.assertEqual(len(graded['section_breakdown']), 12 + 1)
+
+ graded = homeworkGrader.grade(self.test_gradesheet)
+ self.assertAlmostEqual(graded['percent'], 0.11) # 100% + 10% / 10 assignments
+ self.assertEqual(len(graded['section_breakdown']), 12 + 1)
+
+ graded = noDropGrader.grade(self.test_gradesheet)
+ self.assertAlmostEqual(graded['percent'], 0.0916666666666666) # 100% + 10% / 12 assignments
+ self.assertEqual(len(graded['section_breakdown']), 12 + 1)
+
+ graded = overflowGrader.grade(self.test_gradesheet)
+ self.assertAlmostEqual(graded['percent'], 0.8880952380952382) # 100% + 10% / 5 assignments
+ self.assertEqual(len(graded['section_breakdown']), 7 + 1)
+
+ graded = labGrader.grade(self.test_gradesheet)
+ self.assertAlmostEqual(graded['percent'], 0.9226190476190477)
+ self.assertEqual(len(graded['section_breakdown']), 7 + 1)
+
+ def test_WeightedSubsectionsGrader(self):
+ #First, a few sub graders
+ homeworkGrader = graders.AssignmentFormatGrader("Homework", 12, 2)
+ labGrader = graders.AssignmentFormatGrader("Lab", 7, 3)
+ midtermGrader = graders.SingleSectionGrader("Midterm", "Midterm Exam")
+
+ weightedGrader = graders.WeightedSubsectionsGrader([(homeworkGrader, homeworkGrader.category, 0.25),
+ (labGrader, labGrader.category, 0.25),
+ (midtermGrader, midtermGrader.category, 0.5)])
+
+ overOneWeightsGrader = graders.WeightedSubsectionsGrader([(homeworkGrader, homeworkGrader.category, 0.5),
+ (labGrader, labGrader.category, 0.5),
+ (midtermGrader, midtermGrader.category, 0.5)])
+
+ #The midterm should have all weight on this one
+ zeroWeightsGrader = graders.WeightedSubsectionsGrader([(homeworkGrader, homeworkGrader.category, 0.0),
+ (labGrader, labGrader.category, 0.0),
+ (midtermGrader, midtermGrader.category, 0.5)])
+
+ #This should always have a final percent of zero
+ allZeroWeightsGrader = graders.WeightedSubsectionsGrader([(homeworkGrader, homeworkGrader.category, 0.0),
+ (labGrader, labGrader.category, 0.0),
+ (midtermGrader, midtermGrader.category, 0.0)])
+
+ emptyGrader = graders.WeightedSubsectionsGrader([])
+
+ graded = weightedGrader.grade(self.test_gradesheet)
+ self.assertAlmostEqual(graded['percent'], 0.5106547619047619)
+ self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1)
+ self.assertEqual(len(graded['grade_breakdown']), 3)
+
+ graded = overOneWeightsGrader.grade(self.test_gradesheet)
+ self.assertAlmostEqual(graded['percent'], 0.7688095238095238)
+ self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1)
+ self.assertEqual(len(graded['grade_breakdown']), 3)
+
+ graded = zeroWeightsGrader.grade(self.test_gradesheet)
+ self.assertAlmostEqual(graded['percent'], 0.2525)
+ self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1)
+ self.assertEqual(len(graded['grade_breakdown']), 3)
+
+ graded = allZeroWeightsGrader.grade(self.test_gradesheet)
+ self.assertAlmostEqual(graded['percent'], 0.0)
+ self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1)
+ self.assertEqual(len(graded['grade_breakdown']), 3)
+
+ for graded in [weightedGrader.grade(self.empty_gradesheet),
+ weightedGrader.grade(self.incomplete_gradesheet),
+ zeroWeightsGrader.grade(self.empty_gradesheet),
+ allZeroWeightsGrader.grade(self.empty_gradesheet)]:
+ self.assertAlmostEqual(graded['percent'], 0.0)
+ self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1)
+ self.assertEqual(len(graded['grade_breakdown']), 3)
+
+ graded = emptyGrader.grade(self.test_gradesheet)
+ self.assertAlmostEqual(graded['percent'], 0.0)
+ self.assertEqual(len(graded['section_breakdown']), 0)
+ self.assertEqual(len(graded['grade_breakdown']), 0)
+
+ def test_graderFromConf(self):
+
+ #Confs always produce a graders.WeightedSubsectionsGrader, so we test this by repeating the test
+ #in test_graders.WeightedSubsectionsGrader, but generate the graders with confs.
+
+ weightedGrader = graders.grader_from_conf([
+ {
+ 'type': "Homework",
+ 'min_count': 12,
+ 'drop_count': 2,
+ 'short_label': "HW",
+ 'weight': 0.25,
+ },
+ {
+ 'type': "Lab",
+ 'min_count': 7,
+ 'drop_count': 3,
+ 'category': "Labs",
+ 'weight': 0.25
+ },
+ {
+ 'type': "Midterm",
+ 'name': "Midterm Exam",
+ 'short_label': "Midterm",
+ 'weight': 0.5,
+ },
+ ])
+
+ emptyGrader = graders.grader_from_conf([])
+
+ graded = weightedGrader.grade(self.test_gradesheet)
+ self.assertAlmostEqual(graded['percent'], 0.5106547619047619)
+ self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1)
+ self.assertEqual(len(graded['grade_breakdown']), 3)
+
+ graded = emptyGrader.grade(self.test_gradesheet)
+ self.assertAlmostEqual(graded['percent'], 0.0)
+ self.assertEqual(len(graded['section_breakdown']), 0)
+ self.assertEqual(len(graded['grade_breakdown']), 0)
+
+ #Test that graders can also be used instead of lists of dictionaries
+ homeworkGrader = graders.AssignmentFormatGrader("Homework", 12, 2)
+ homeworkGrader2 = graders.grader_from_conf(homeworkGrader)
+
+ graded = homeworkGrader2.grade(self.test_gradesheet)
+ self.assertAlmostEqual(graded['percent'], 0.11)
+ self.assertEqual(len(graded['section_breakdown']), 12 + 1)
+
+ #TODO: How do we test failure cases? The parser only logs an error when
+ #it can't parse something. Maybe it should throw exceptions?
+
diff --git a/common/lib/xmodule/xmodule/tests/test_progress.py b/common/lib/xmodule/xmodule/tests/test_progress.py
new file mode 100644
index 0000000000..94a0a19d7c
--- /dev/null
+++ b/common/lib/xmodule/xmodule/tests/test_progress.py
@@ -0,0 +1,138 @@
+"""Module progress tests"""
+
+import unittest
+
+from xmodule.progress import Progress
+from xmodule import x_module
+
+from . import i4xs
+
+class ProgressTest(unittest.TestCase):
+ ''' Test that basic Progress objects work. A Progress represents a
+ fraction between 0 and 1.
+ '''
+ not_started = Progress(0, 17)
+ part_done = Progress(2, 6)
+ half_done = Progress(3, 6)
+ also_half_done = Progress(1, 2)
+ done = Progress(7, 7)
+
+ def test_create_object(self):
+ # These should work:
+ p = Progress(0, 2)
+ p = Progress(1, 2)
+ p = Progress(2, 2)
+
+ p = Progress(2.5, 5.0)
+ p = Progress(3.7, 12.3333)
+
+ # These shouldn't
+ self.assertRaises(ValueError, Progress, 0, 0)
+ self.assertRaises(ValueError, Progress, 2, 0)
+ self.assertRaises(ValueError, Progress, 1, -2)
+
+ self.assertRaises(TypeError, Progress, 0, "all")
+ # check complex numbers just for the heck of it :)
+ self.assertRaises(TypeError, Progress, 2j, 3)
+
+ def test_clamp(self):
+ self.assertEqual((2, 2), Progress(3, 2).frac())
+ self.assertEqual((0, 2), Progress(-2, 2).frac())
+
+ def test_frac(self):
+ p = Progress(1, 2)
+ (a, b) = p.frac()
+ self.assertEqual(a, 1)
+ self.assertEqual(b, 2)
+
+ def test_percent(self):
+ self.assertEqual(self.not_started.percent(), 0)
+ self.assertAlmostEqual(self.part_done.percent(), 33.33333333333333)
+ self.assertEqual(self.half_done.percent(), 50)
+ self.assertEqual(self.done.percent(), 100)
+
+ self.assertEqual(self.half_done.percent(), self.also_half_done.percent())
+
+ def test_started(self):
+ self.assertFalse(self.not_started.started())
+
+ self.assertTrue(self.part_done.started())
+ self.assertTrue(self.half_done.started())
+ self.assertTrue(self.done.started())
+
+ def test_inprogress(self):
+ # only true if working on it
+ self.assertFalse(self.done.inprogress())
+ self.assertFalse(self.not_started.inprogress())
+
+ self.assertTrue(self.part_done.inprogress())
+ self.assertTrue(self.half_done.inprogress())
+
+ def test_done(self):
+ self.assertTrue(self.done.done())
+ self.assertFalse(self.half_done.done())
+ self.assertFalse(self.not_started.done())
+
+ def test_str(self):
+ self.assertEqual(str(self.not_started), "0/17")
+ self.assertEqual(str(self.part_done), "2/6")
+ self.assertEqual(str(self.done), "7/7")
+
+ def test_ternary_str(self):
+ self.assertEqual(self.not_started.ternary_str(), "none")
+ self.assertEqual(self.half_done.ternary_str(), "in_progress")
+ self.assertEqual(self.done.ternary_str(), "done")
+
+ def test_to_js_status(self):
+ '''Test the Progress.to_js_status_str() method'''
+
+ self.assertEqual(Progress.to_js_status_str(self.not_started), "none")
+ self.assertEqual(Progress.to_js_status_str(self.half_done), "in_progress")
+ self.assertEqual(Progress.to_js_status_str(self.done), "done")
+ self.assertEqual(Progress.to_js_status_str(None), "NA")
+
+ def test_to_js_detail_str(self):
+ '''Test the Progress.to_js_detail_str() method'''
+ f = Progress.to_js_detail_str
+ for p in (self.not_started, self.half_done, self.done):
+ self.assertEqual(f(p), str(p))
+ # But None should be encoded as NA
+ self.assertEqual(f(None), "NA")
+
+ def test_add(self):
+ '''Test the Progress.add_counts() method'''
+ p = Progress(0, 2)
+ p2 = Progress(1, 3)
+ p3 = Progress(2, 5)
+ pNone = None
+ add = lambda a, b: Progress.add_counts(a, b).frac()
+
+ self.assertEqual(add(p, p), (0, 4))
+ self.assertEqual(add(p, p2), (1, 5))
+ self.assertEqual(add(p2, p3), (3, 8))
+
+ self.assertEqual(add(p2, pNone), p2.frac())
+ self.assertEqual(add(pNone, p2), p2.frac())
+
+ def test_equality(self):
+ '''Test that comparing Progress objects for equality
+ works correctly.'''
+ p = Progress(1, 2)
+ p2 = Progress(2, 4)
+ p3 = Progress(1, 2)
+ self.assertTrue(p == p3)
+ self.assertFalse(p == p2)
+
+ # Check != while we're at it
+ self.assertTrue(p != p2)
+ self.assertFalse(p != p3)
+
+
+class ModuleProgressTest(unittest.TestCase):
+ ''' Test that get_progress() does the right thing for the different modules
+ '''
+ def test_xmodule_default(self):
+ '''Make sure default get_progress exists, returns None'''
+ xm = x_module.XModule(i4xs, 'a://b/c/d/e', None, {})
+ p = xm.get_progress()
+ self.assertEqual(p, None)
diff --git a/common/static/js/capa/README b/common/static/js/capa/README
new file mode 100644
index 0000000000..bb698ef00e
--- /dev/null
+++ b/common/static/js/capa/README
@@ -0,0 +1 @@
+These files really should be in the capa module, but we don't have a way to load js from there at the moment. (TODO)
diff --git a/common/static/js/capa/chemical_equation_preview.js b/common/static/js/capa/chemical_equation_preview.js
new file mode 100644
index 0000000000..90ce27ad11
--- /dev/null
+++ b/common/static/js/capa/chemical_equation_preview.js
@@ -0,0 +1,24 @@
+(function () {
+ update = function() {
+ function create_handler(saved_div) {
+ return (function(response) {
+ if (response.error) {
+ saved_div.html("
" + response.error + " ");
+ } else {
+ saved_div.html(response.preview);
+ }
+ });
+ }
+
+ prev_id = "#" + this.id + "_preview";
+ preview_div = $(prev_id)
+
+ $.get("/preview/chemcalc/", {"formula" : this.value}, create_handler(preview_div));
+ }
+
+ inputs = $('.chemicalequationinput input');
+ // update on load
+ inputs.each(update);
+ // and on every change
+ inputs.bind("input", update);
+}).call(this);
diff --git a/lms/djangoapps/courseware/grades.py b/lms/djangoapps/courseware/grades.py
index 555f1c5f89..36932f9e42 100644
--- a/lms/djangoapps/courseware/grades.py
+++ b/lms/djangoapps/courseware/grades.py
@@ -329,9 +329,15 @@ def progress_summary(student, request, course, student_module_cache):
def get_score(course_id, user, problem_descriptor, module_creator, student_module_cache):
"""
Return the score for a user on a problem, as a tuple (correct, total).
+ e.g. (5,7) if you got 5 out of 7 points.
+
+ If this problem doesn't have a score, or we couldn't load it, returns (None,
+ None).
user: a Student object
- problem: an XModule
+ problem_descriptor: an XModuleDescriptor
+ module_creator: a function that takes a descriptor, and returns the corresponding XModule for this user.
+ Can return None if user doesn't have access, or if something else went wrong.
cache: A StudentModuleCache
"""
if not (problem_descriptor.stores_state and problem_descriptor.has_score):
@@ -339,14 +345,16 @@ def get_score(course_id, user, problem_descriptor, module_creator, student_modul
return (None, None)
correct = 0.0
-
+
instance_module = student_module_cache.lookup(
course_id, problem_descriptor.category, problem_descriptor.location.url())
-
+
if not instance_module:
# If the problem was not in the cache, we need to instantiate the problem.
- # Otherwise, the max score (cached in instance_module) won't be available
+ # Otherwise, the max score (cached in instance_module) won't be available
problem = module_creator(problem_descriptor)
+ if problem is None:
+ return (None, None)
instance_module = get_instance_module(course_id, user, problem, student_module_cache)
# If this problem is ungraded/ungradable, bail
@@ -361,7 +369,7 @@ def get_score(course_id, user, problem_descriptor, module_creator, student_modul
weight = getattr(problem_descriptor, 'weight', None)
if weight is not None:
if total == 0:
- log.exception("Cannot reweight a problem with zero weight. Problem: " + str(instance_module))
+ log.exception("Cannot reweight a problem with zero total points. Problem: " + str(instance_module))
return (correct, total)
correct = correct * weight / total
total = weight
diff --git a/lms/djangoapps/courseware/module_render.py b/lms/djangoapps/courseware/module_render.py
index 22ab6df67b..1e45822ebf 100644
--- a/lms/djangoapps/courseware/module_render.py
+++ b/lms/djangoapps/courseware/module_render.py
@@ -1,6 +1,7 @@
import hashlib
import json
import logging
+import pyparsing
import sys
from django.conf import settings
@@ -13,6 +14,7 @@ from django.views.decorators.csrf import csrf_exempt
from requests.auth import HTTPBasicAuth
from capa.xqueue_interface import XQueueInterface
+from capa.chem import chemcalc
from courseware.access import has_access
from mitxmako.shortcuts import render_to_string
from models import StudentModule, StudentModuleCache
@@ -471,3 +473,42 @@ def modx_dispatch(request, dispatch, location, course_id):
# Return whatever the module wanted to return to the client/caller
return HttpResponse(ajax_return)
+
+def preview_chemcalc(request):
+ """
+ Render an html preview of a chemical formula or equation. The fact that
+ this is here is a bit of hack. See the note in lms/urls.py about why it's
+ here. (Victor is to blame.)
+
+ request should be a GET, with a key 'formula' and value 'some formula string'.
+
+ Returns a json dictionary:
+ {
+ 'preview' : 'the-preview-html' or ''
+ 'error' : 'the-error' or ''
+ }
+ """
+ if request.method != "GET":
+ raise Http404
+
+ result = {'preview': '',
+ 'error': '' }
+ formula = request.GET.get('formula')
+ if formula is None:
+ result['error'] = "No formula specified."
+
+ return HttpResponse(json.dumps(result))
+
+ try:
+ result['preview'] = chemcalc.render_to_html(formula)
+ except pyparsing.ParseException as p:
+ result['error'] = "Couldn't parse formula: {0}".format(p)
+ except Exception:
+ # this is unexpected, so log
+ log.warning("Error while previewing chemical formula", exc_info=True)
+ result['error'] = "Error while rendering preview"
+
+ return HttpResponse(json.dumps(result))
+
+
+
diff --git a/lms/djangoapps/courseware/tabs.py b/lms/djangoapps/courseware/tabs.py
index e97622a5ef..adb1ab3c0f 100644
--- a/lms/djangoapps/courseware/tabs.py
+++ b/lms/djangoapps/courseware/tabs.py
@@ -15,6 +15,8 @@ import logging
from django.conf import settings
from django.core.urlresolvers import reverse
+from fs.errors import ResourceNotFoundError
+
from courseware.access import has_access
from static_replace import replace_urls
@@ -266,7 +268,8 @@ def get_static_tab_contents(course, tab):
try:
with fs.open(p) as tabfile:
# TODO: redundant with module_render.py. Want to be helper methods in static_replace or something.
- contents = replace_urls(tabfile.read(), course.metadata['data_dir'])
+ text = tabfile.read().decode('utf-8')
+ contents = replace_urls(text, course.metadata['data_dir'])
return replace_urls(contents, staticfiles_prefix='/courses/'+course.id, replace_prefix='/course/')
except (ResourceNotFoundError) as err:
log.exception("Couldn't load tab contents from '{0}': {1}".format(p, err))
diff --git a/lms/djangoapps/courseware/views.py b/lms/djangoapps/courseware/views.py
index d5b7ae0893..58bd6924af 100644
--- a/lms/djangoapps/courseware/views.py
+++ b/lms/djangoapps/courseware/views.py
@@ -356,7 +356,7 @@ def static_tab(request, course_id, tab_slug):
tab = tabs.get_static_tab_by_slug(course, tab_slug)
if tab is None:
raise Http404
-
+
contents = tabs.get_static_tab_contents(course, tab)
if contents is None:
raise Http404
@@ -413,6 +413,16 @@ def course_about(request, course_id):
'show_courseware_link' : show_courseware_link})
+@ensure_csrf_cookie
+@cache_if_anonymous
+def static_university_profile(request, org_id):
+ """
+ Return the profile for the particular org_id that does not have any courses.
+ """
+ template_file = "university_profile/{0}.html".format(org_id).lower()
+ context = dict(courses=[], org_id=org_id)
+ return render_to_response(template_file, context)
+
@ensure_csrf_cookie
@cache_if_anonymous
def university_profile(request, org_id):
@@ -485,7 +495,7 @@ def progress(request, course_id, student_id=None):
courseware_summary = grades.progress_summary(student, request, course,
student_module_cache)
grade_summary = grades.grade(student, request, course, student_module_cache)
-
+
if courseware_summary is None:
#This means the student didn't have access to the course (which the instructor requested)
raise Http404
@@ -498,4 +508,3 @@ def progress(request, course_id, student_id=None):
context.update()
return render_to_response('courseware/progress.html', context)
-
diff --git a/lms/djangoapps/django_comment_client/management/commands/seed_permissions_roles.py b/lms/djangoapps/django_comment_client/management/commands/seed_permissions_roles.py
index f303abf930..3faa846033 100644
--- a/lms/djangoapps/django_comment_client/management/commands/seed_permissions_roles.py
+++ b/lms/djangoapps/django_comment_client/management/commands/seed_permissions_roles.py
@@ -14,6 +14,7 @@ class Command(BaseCommand):
course_id = args[0]
administrator_role = Role.objects.get_or_create(name="Administrator", course_id=course_id)[0]
moderator_role = Role.objects.get_or_create(name="Moderator", course_id=course_id)[0]
+ community_ta_role = Role.objects.get_or_create(name="Community TA", course_id=course_id)[0]
student_role = Role.objects.get_or_create(name="Student", course_id=course_id)[0]
for per in ["vote", "update_thread", "follow_thread", "unfollow_thread",
@@ -30,4 +31,7 @@ class Command(BaseCommand):
moderator_role.inherit_permissions(student_role)
+ # For now, Community TA == Moderator, except for the styling.
+ community_ta_role.inherit_permissions(moderator_role)
+
administrator_role.inherit_permissions(moderator_role)
diff --git a/lms/djangoapps/instructor/management/__init__.py b/lms/djangoapps/instructor/management/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/lms/djangoapps/instructor/management/commands/__init__.py b/lms/djangoapps/instructor/management/commands/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/lms/djangoapps/instructor/management/commands/dump_grades.py b/lms/djangoapps/instructor/management/commands/dump_grades.py
new file mode 100644
index 0000000000..65825271f3
--- /dev/null
+++ b/lms/djangoapps/instructor/management/commands/dump_grades.py
@@ -0,0 +1,79 @@
+#!/usr/bin/python
+#
+# django management command: dump grades to csv files
+# for use by batch processes
+
+import os, sys, string
+import datetime
+import json
+
+from instructor.views import *
+from courseware.courses import get_course_by_id
+from xmodule.modulestore.django import modulestore
+
+from django.conf import settings
+from django.core.management.base import BaseCommand
+
+class Command(BaseCommand):
+ help = "dump grades to CSV file. Usage: dump_grades course_id_or_dir filename dump_type\n"
+ help += " course_id_or_dir: either course_id or course_dir\n"
+ help += " filename: where the output CSV is to be stored\n"
+ # help += " start_date: end date as M/D/Y H:M (defaults to end of available data)"
+ help += " dump_type: 'all' or 'raw' (see instructor dashboard)"
+
+ def handle(self, *args, **options):
+
+ # current grading logic and data schema doesn't handle dates
+ # datetime.strptime("21/11/06 16:30", "%m/%d/%y %H:%M")
+
+ print "args = ", args
+
+ course_id = 'MITx/8.01rq_MW/Classical_Mechanics_Reading_Questions_Fall_2012_MW_Section'
+ fn = "grades.csv"
+ get_raw_scores = False
+
+ if len(args)>0:
+ course_id = args[0]
+ if len(args)>1:
+ fn = args[1]
+ if len(args)>2:
+ get_raw_scores = args[2].lower()=='raw'
+
+ request = self.DummyRequest()
+ try:
+ course = get_course_by_id(course_id)
+ except Exception as err:
+ if course_id in modulestore().courses:
+ course = modulestore().courses[course_id]
+ else:
+ print "-----------------------------------------------------------------------------"
+ print "Sorry, cannot find course %s" % course_id
+ print "Please provide a course ID or course data directory name, eg content-mit-801rq"
+ return
+
+ print "-----------------------------------------------------------------------------"
+ print "Dumping grades from %s to file %s (get_raw_scores=%s)" % (course.id, fn, get_raw_scores)
+ datatable = get_student_grade_summary_data(request, course, course.id, get_raw_scores=get_raw_scores)
+
+ fp = open(fn,'w')
+
+ writer = csv.writer(fp, dialect='excel', quotechar='"', quoting=csv.QUOTE_ALL)
+ writer.writerow(datatable['header'])
+ for datarow in datatable['data']:
+ encoded_row = [unicode(s).encode('utf-8') for s in datarow]
+ writer.writerow(encoded_row)
+
+ fp.close()
+ print "Done: %d records dumped" % len(datatable['data'])
+
+ class DummyRequest(object):
+ META = {}
+ def __init__(self):
+ return
+ def get_host(self):
+ return 'edx.mit.edu'
+ def is_secure(self):
+ return False
+
+
+
diff --git a/lms/djangoapps/licenses/__init__.py b/lms/djangoapps/licenses/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/lms/djangoapps/licenses/management/__init__.py b/lms/djangoapps/licenses/management/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/lms/djangoapps/licenses/management/commands/__init__.py b/lms/djangoapps/licenses/management/commands/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/lms/djangoapps/licenses/management/commands/generate_serial_numbers.py b/lms/djangoapps/licenses/management/commands/generate_serial_numbers.py
new file mode 100644
index 0000000000..7c6b0d310e
--- /dev/null
+++ b/lms/djangoapps/licenses/management/commands/generate_serial_numbers.py
@@ -0,0 +1,65 @@
+import os.path
+from uuid import uuid4
+from optparse import make_option
+
+from django.utils.html import escape
+from django.core.management.base import BaseCommand, CommandError
+
+from xmodule.modulestore.django import modulestore
+
+from licenses.models import CourseSoftware, UserLicense
+
+
+class Command(BaseCommand):
+ help = """Generate random serial numbers for software used in a course.
+
+ Usage: generate_serial_numbers
+
+ is the number of numbers to generate.
+
+ Example:
+
+ import_serial_numbers MITx/6.002x/2012_Fall matlab 100
+
+ """
+ args = "course_id software_id count"
+
+ def handle(self, *args, **options):
+ """
+ """
+ course_id, software_name, count = self._parse_arguments(args)
+
+ software, _ = CourseSoftware.objects.get_or_create(course_id=course_id,
+ name=software_name)
+ self._generate_serials(software, count)
+
+ def _parse_arguments(self, args):
+ if len(args) != 3:
+ raise CommandError("Incorrect number of arguments")
+
+ course_id = args[0]
+ courses = modulestore().get_courses()
+ known_course_ids = set(c.id for c in courses)
+
+ if course_id not in known_course_ids:
+ raise CommandError("Unknown course_id")
+
+ software_name = escape(args[1].lower())
+
+ try:
+ count = int(args[2])
+ except ValueError:
+ raise CommandError("Invalid argument.")
+
+ return course_id, software_name, count
+
+ def _generate_serials(self, software, count):
+ print "Generating {0} serials".format(count)
+
+ # add serial numbers them to the database
+ for _ in xrange(count):
+ serial = str(uuid4())
+ license = UserLicense(software=software, serial=serial)
+ license.save()
+
+ print "{0} new serial numbers generated.".format(count)
diff --git a/lms/djangoapps/licenses/management/commands/import_serial_numbers.py b/lms/djangoapps/licenses/management/commands/import_serial_numbers.py
new file mode 100644
index 0000000000..a3a8c0bad1
--- /dev/null
+++ b/lms/djangoapps/licenses/management/commands/import_serial_numbers.py
@@ -0,0 +1,70 @@
+import os.path
+from optparse import make_option
+
+from django.utils.html import escape
+from django.core.management.base import BaseCommand, CommandError
+
+from xmodule.modulestore.django import modulestore
+
+from licenses.models import CourseSoftware, UserLicense
+
+
+class Command(BaseCommand):
+ help = """Imports serial numbers for software used in a course.
+
+ Usage: import_serial_numbers
+
+ is a text file that list one available serial number per line.
+
+ Example:
+
+ import_serial_numbers MITx/6.002x/2012_Fall matlab serials.txt
+
+ """
+ args = "course_id software_id serial_file"
+
+ def handle(self, *args, **options):
+ """
+ """
+ course_id, software_name, filename = self._parse_arguments(args)
+
+ software, _ = CourseSoftware.objects.get_or_create(course_id=course_id,
+ name=software_name)
+ self._import_serials(software, filename)
+
+ def _parse_arguments(self, args):
+ if len(args) != 3:
+ raise CommandError("Incorrect number of arguments")
+
+ course_id = args[0]
+ courses = modulestore().get_courses()
+ known_course_ids = set(c.id for c in courses)
+
+ if course_id not in known_course_ids:
+ raise CommandError("Unknown course_id")
+
+ software_name = escape(args[1].lower())
+
+ filename = os.path.abspath(args[2])
+ if not os.path.exists(filename):
+ raise CommandError("Cannot find filename {0}".format(filename))
+
+ return course_id, software_name, filename
+
+ def _import_serials(self, software, filename):
+ print "Importing serial numbers for {0}.".format(software)
+
+ serials = set(unicode(l.strip()) for l in open(filename))
+
+ # remove serial numbers we already have
+ licenses = UserLicense.objects.filter(software=software)
+ known_serials = set(l.serial for l in licenses)
+ if known_serials:
+ serials = serials.difference(known_serials)
+
+ # add serial numbers them to the database
+ for serial in serials:
+ license = UserLicense(software=software, serial=serial)
+ license.save()
+
+ print "{0} new serial numbers imported.".format(len(serials))
diff --git a/lms/djangoapps/licenses/migrations/0001_initial.py b/lms/djangoapps/licenses/migrations/0001_initial.py
new file mode 100644
index 0000000000..bdc1d3ead4
--- /dev/null
+++ b/lms/djangoapps/licenses/migrations/0001_initial.py
@@ -0,0 +1,118 @@
+# -*- coding: utf-8 -*-
+import datetime
+from south.db import db
+from south.v2 import SchemaMigration
+from django.db import models
+
+
+class Migration(SchemaMigration):
+
+ def forwards(self, orm):
+ # Adding model 'CourseSoftware'
+ db.create_table('licenses_coursesoftware', (
+ ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
+ ('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
+ ('full_name', self.gf('django.db.models.fields.CharField')(max_length=255)),
+ ('url', self.gf('django.db.models.fields.CharField')(max_length=255)),
+ ('course_id', self.gf('django.db.models.fields.CharField')(max_length=255)),
+ ))
+ db.send_create_signal('licenses', ['CourseSoftware'])
+
+ # Adding model 'UserLicense'
+ db.create_table('licenses_userlicense', (
+ ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
+ ('software', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['licenses.CourseSoftware'])),
+ ('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True)),
+ ('serial', self.gf('django.db.models.fields.CharField')(max_length=255)),
+ ))
+ db.send_create_signal('licenses', ['UserLicense'])
+
+
+ def backwards(self, orm):
+ # Deleting model 'CourseSoftware'
+ db.delete_table('licenses_coursesoftware')
+
+ # Deleting model 'UserLicense'
+ db.delete_table('licenses_userlicense')
+
+
+ models = {
+ 'auth.group': {
+ 'Meta': {'object_name': 'Group'},
+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
+ 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
+ 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
+ },
+ 'auth.permission': {
+ 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
+ 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
+ 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
+ },
+ 'auth.user': {
+ 'Meta': {'object_name': 'User'},
+ 'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
+ 'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
+ 'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
+ 'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
+ 'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
+ 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
+ 'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
+ 'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
+ 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
+ 'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
+ 'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
+ 'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
+ 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
+ 'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
+ 'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
+ 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
+ 'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
+ 'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
+ 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
+ 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
+ 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
+ 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
+ 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
+ 'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
+ 'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
+ 'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
+ 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
+ 'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
+ 'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
+ 'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
+ 'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
+ 'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
+ 'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
+ 'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
+ 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
+ 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
+ 'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
+ },
+ 'contenttypes.contenttype': {
+ 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
+ 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
+ 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
+ },
+ 'licenses.coursesoftware': {
+ 'Meta': {'object_name': 'CourseSoftware'},
+ 'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
+ 'full_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
+ 'url': ('django.db.models.fields.CharField', [], {'max_length': '255'})
+ },
+ 'licenses.userlicense': {
+ 'Meta': {'object_name': 'UserLicense'},
+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
+ 'serial': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
+ 'software': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['licenses.CourseSoftware']"}),
+ 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
+ }
+ }
+
+ complete_apps = ['licenses']
\ No newline at end of file
diff --git a/lms/djangoapps/licenses/migrations/__init__.py b/lms/djangoapps/licenses/migrations/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/lms/djangoapps/licenses/models.py b/lms/djangoapps/licenses/models.py
new file mode 100644
index 0000000000..d259892f5d
--- /dev/null
+++ b/lms/djangoapps/licenses/models.py
@@ -0,0 +1,78 @@
+import logging
+
+from django.db import models, transaction
+
+from student.models import User
+
+log = logging.getLogger("mitx.licenses")
+
+
+class CourseSoftware(models.Model):
+ name = models.CharField(max_length=255)
+ full_name = models.CharField(max_length=255)
+ url = models.CharField(max_length=255)
+ course_id = models.CharField(max_length=255)
+
+ def __unicode__(self):
+ return u'{0} for {1}'.format(self.name, self.course_id)
+
+
+class UserLicense(models.Model):
+ software = models.ForeignKey(CourseSoftware, db_index=True)
+ user = models.ForeignKey(User, null=True)
+ serial = models.CharField(max_length=255)
+
+
+def get_courses_licenses(user, courses):
+ course_ids = set(course.id for course in courses)
+ all_software = CourseSoftware.objects.filter(course_id__in=course_ids)
+
+ assigned_licenses = UserLicense.objects.filter(software__in=all_software,
+ user=user)
+
+ licenses = dict.fromkeys(all_software, None)
+ for license in assigned_licenses:
+ licenses[license.software] = license
+
+ log.info(assigned_licenses)
+ log.info(licenses)
+
+ return licenses
+
+
+def get_license(user, software):
+ try:
+ license = UserLicense.objects.get(user=user, software=software)
+ except UserLicense.DoesNotExist:
+ license = None
+
+ return license
+
+
+def get_or_create_license(user, software):
+ license = get_license(user, software)
+ if license is None:
+ license = _create_license(user, software)
+
+ return license
+
+
+def _create_license(user, software):
+ license = None
+
+ try:
+ # find one license that has not been assigned, locking the
+ # table/rows with select_for_update to prevent race conditions
+ with transaction.commit_on_success():
+ selected = UserLicense.objects.select_for_update()
+ license = selected.filter(user__isnull=True, software=software)[0]
+ license.user = user
+ license.save()
+ except IndexError:
+ # there are no free licenses
+ log.error('No serial numbers available for {0}', software)
+ license = None
+ # TODO [rocha]look if someone has unenrolled from the class
+ # and already has a serial number
+
+ return license
diff --git a/lms/djangoapps/licenses/tests.py b/lms/djangoapps/licenses/tests.py
new file mode 100644
index 0000000000..f06899d2de
--- /dev/null
+++ b/lms/djangoapps/licenses/tests.py
@@ -0,0 +1,85 @@
+import logging
+from uuid import uuid4
+from random import shuffle
+from tempfile import NamedTemporaryFile
+
+from django.test import TestCase
+from django.core.management import call_command
+
+from models import CourseSoftware, UserLicense
+
+COURSE_1 = 'MITx/6.002x/2012_Fall'
+
+SOFTWARE_1 = 'matlab'
+SOFTWARE_2 = 'stata'
+
+log = logging.getLogger(__name__)
+
+
+class CommandTest(TestCase):
+ def test_import_serial_numbers(self):
+ size = 20
+
+ log.debug('Adding one set of serials for {0}'.format(SOFTWARE_1))
+ with generate_serials_file(size) as temp_file:
+ args = [COURSE_1, SOFTWARE_1, temp_file.name]
+ call_command('import_serial_numbers', *args)
+
+ log.debug('Adding one set of serials for {0}'.format(SOFTWARE_2))
+ with generate_serials_file(size) as temp_file:
+ args = [COURSE_1, SOFTWARE_2, temp_file.name]
+ call_command('import_serial_numbers', *args)
+
+ log.debug('There should be only 2 course-software entries')
+ software_count = CourseSoftware.objects.all().count()
+ self.assertEqual(2, software_count)
+
+ log.debug('We added two sets of {0} serials'.format(size))
+ licenses_count = UserLicense.objects.all().count()
+ self.assertEqual(2 * size, licenses_count)
+
+ log.debug('Adding more serial numbers to {0}'.format(SOFTWARE_1))
+ with generate_serials_file(size) as temp_file:
+ args = [COURSE_1, SOFTWARE_1, temp_file.name]
+ call_command('import_serial_numbers', *args)
+
+ log.debug('There should be still only 2 course-software entries')
+ software_count = CourseSoftware.objects.all().count()
+ self.assertEqual(2, software_count)
+
+ log.debug('Now we should have 3 sets of 20 serials'.format(size))
+ licenses_count = UserLicense.objects.all().count()
+ self.assertEqual(3 * size, licenses_count)
+
+ cs = CourseSoftware.objects.get(pk=1)
+
+ lics = UserLicense.objects.filter(software=cs)[:size]
+ known_serials = list(l.serial for l in lics)
+ known_serials.extend(generate_serials(10))
+
+ shuffle(known_serials)
+
+ log.debug('Adding some new and old serials to {0}'.format(SOFTWARE_1))
+ with NamedTemporaryFile() as f:
+ f.write('\n'.join(known_serials))
+ f.flush()
+ args = [COURSE_1, SOFTWARE_1, f.name]
+ call_command('import_serial_numbers', *args)
+
+ log.debug('Check if we added only the new ones')
+ licenses_count = UserLicense.objects.filter(software=cs).count()
+ self.assertEqual((2 * size) + 10, licenses_count)
+
+
+def generate_serials(size=20):
+ return [str(uuid4()) for _ in range(size)]
+
+
+def generate_serials_file(size=20):
+ serials = generate_serials(size)
+
+ temp_file = NamedTemporaryFile()
+ temp_file.write('\n'.join(serials))
+ temp_file.flush()
+
+ return temp_file
diff --git a/lms/djangoapps/licenses/views.py b/lms/djangoapps/licenses/views.py
new file mode 100644
index 0000000000..7d804fbd3d
--- /dev/null
+++ b/lms/djangoapps/licenses/views.py
@@ -0,0 +1,84 @@
+import logging
+import json
+import re
+from urlparse import urlparse
+from collections import namedtuple, defaultdict
+
+
+from mitxmako.shortcuts import render_to_string
+
+from django.contrib.auth.models import User
+from django.http import HttpResponse, Http404
+from django.views.decorators.csrf import requires_csrf_token, csrf_protect
+
+from models import CourseSoftware
+from models import get_courses_licenses, get_or_create_license, get_license
+
+
+log = logging.getLogger("mitx.licenses")
+
+
+License = namedtuple('License', 'software serial')
+
+
+def get_licenses_by_course(user, courses):
+ licenses = get_courses_licenses(user, courses)
+ licenses_by_course = defaultdict(list)
+
+ # create missing licenses and group by course_id
+ for software, license in licenses.iteritems():
+ if license is None:
+ licenses[software] = get_or_create_license(user, software)
+
+ course_id = software.course_id
+ serial = license.serial if license else None
+ licenses_by_course[course_id].append(License(software, serial))
+
+ # render elements
+ data_by_course = {}
+ for course_id, licenses in licenses_by_course.iteritems():
+ context = {'licenses': licenses}
+ template = 'licenses/serial_numbers.html'
+ data_by_course[course_id] = render_to_string(template, context)
+
+ return data_by_course
+
+
+@requires_csrf_token
+def user_software_license(request):
+ if request.method != 'POST' or not request.is_ajax():
+ raise Http404
+
+ # get the course id from the referer
+ url_path = urlparse(request.META.get('HTTP_REFERER', '')).path
+ pattern = re.compile('^/courses/(?P[^/]+/[^/]+/[^/]+)/.*/?$')
+ match = re.match(pattern, url_path)
+
+ if not match:
+ raise Http404
+ course_id = match.groupdict().get('id', '')
+
+ user_id = request.session.get('_auth_user_id')
+ software_name = request.POST.get('software')
+ generate = request.POST.get('generate', False) == 'true'
+
+ try:
+ software = CourseSoftware.objects.get(name=software_name,
+ course_id=course_id)
+ print software
+ except CourseSoftware.DoesNotExist:
+ raise Http404
+
+ user = User.objects.get(id=user_id)
+
+ if generate:
+ license = get_or_create_license(user, software)
+ else:
+ license = get_license(user, software)
+
+ if license:
+ response = {'serial': license.serial}
+ else:
+ response = {'error': 'No serial number found'}
+
+ return HttpResponse(json.dumps(response), mimetype='application/json')
diff --git a/lms/envs/common.py b/lms/envs/common.py
index a927da8e98..9b98e4ecfd 100644
--- a/lms/envs/common.py
+++ b/lms/envs/common.py
@@ -626,6 +626,7 @@ INSTALLED_APPS = (
'certificates',
'instructor',
'psychometrics',
+ 'licenses',
#For the wiki
'wiki', # The new django-wiki from benjaoming
diff --git a/lms/static/coffee/src/discussion/utils.coffee b/lms/static/coffee/src/discussion/utils.coffee
index 769aaa0bb1..a032c0248f 100644
--- a/lms/static/coffee/src/discussion/utils.coffee
+++ b/lms/static/coffee/src/discussion/utils.coffee
@@ -25,6 +25,10 @@ class @DiscussionUtil
staff = _.union(@roleIds['Staff'], @roleIds['Moderator'], @roleIds['Administrator'])
_.include(staff, parseInt(user_id))
+ @isTA: (user_id) ->
+ ta = _.union(@roleIds['Community TA'])
+ _.include(ta, parseInt(user_id))
+
@bulkUpdateContentInfo: (infos) ->
for id, info of infos
Content.getContent(id).updateInfo(info)
@@ -157,7 +161,7 @@ class @DiscussionUtil
@makeWmdEditor: ($content, $local, cls_identifier) ->
elem = $local(".#{cls_identifier}")
placeholder = elem.data('placeholder')
- id = elem.data("id")
+ id = elem.attr("data-id") # use attr instead of data because we want to avoid type coercion
appended_id = "-#{cls_identifier}-#{id}"
imageUploadUrl = @urlFor('upload')
_processor = (_this) ->
@@ -170,12 +174,12 @@ class @DiscussionUtil
@getWmdEditor: ($content, $local, cls_identifier) ->
elem = $local(".#{cls_identifier}")
- id = elem.data("id")
+ id = elem.attr("data-id") # use attr instead of data because we want to avoid type coercion
@wmdEditors["#{cls_identifier}-#{id}"]
@getWmdInput: ($content, $local, cls_identifier) ->
elem = $local(".#{cls_identifier}")
- id = elem.data("id")
+ id = elem.attr("data-id") # use attr instead of data because we want to avoid type coercion
$local("#wmd-input-#{cls_identifier}-#{id}")
@getWmdContent: ($content, $local, cls_identifier) ->
diff --git a/lms/static/coffee/src/discussion/views/discussion_thread_list_view.coffee b/lms/static/coffee/src/discussion/views/discussion_thread_list_view.coffee
index e97ca364da..c5f66c87ec 100644
--- a/lms/static/coffee/src/discussion/views/discussion_thread_list_view.coffee
+++ b/lms/static/coffee/src/discussion/views/discussion_thread_list_view.coffee
@@ -156,7 +156,11 @@ if Backbone?
@$(".post-list").append(view.el)
threadSelected: (e) =>
- thread_id = $(e.target).closest("a").data("id")
+ # Use .attr('data-id') rather than .data('id') because .data does type
+ # coercion. Usually, this is fine, but when Mongo gives an object id with
+ # no letters, it casts it to a Number.
+
+ thread_id = $(e.target).closest("a").attr("data-id")
@setActiveThread(thread_id)
@trigger("thread:selected", thread_id) # This triggers a callback in the DiscussionRouter which calls the line above...
false
diff --git a/lms/static/coffee/src/discussion/views/response_comment_show_view.coffee b/lms/static/coffee/src/discussion/views/response_comment_show_view.coffee
index 7ae8f779c5..e6c8064978 100644
--- a/lms/static/coffee/src/discussion/views/response_comment_show_view.coffee
+++ b/lms/static/coffee/src/discussion/views/response_comment_show_view.coffee
@@ -32,3 +32,5 @@ if Backbone?
markAsStaff: ->
if DiscussionUtil.isStaff(@model.get("user_id"))
@$el.find("a.profile-link").after('staff ')
+ else if DiscussionUtil.isTA(@model.get("user_id"))
+ @$el.find("a.profile-link").after('')
diff --git a/lms/static/coffee/src/discussion/views/thread_response_show_view.coffee b/lms/static/coffee/src/discussion/views/thread_response_show_view.coffee
index 6da57c845d..32683fe6f6 100644
--- a/lms/static/coffee/src/discussion/views/thread_response_show_view.coffee
+++ b/lms/static/coffee/src/discussion/views/thread_response_show_view.coffee
@@ -37,6 +37,9 @@ if Backbone?
if DiscussionUtil.isStaff(@model.get("user_id"))
@$el.addClass("staff")
@$el.prepend('staff
')
+ else if DiscussionUtil.isTA(@model.get("user_id"))
+ @$el.addClass("community-ta")
+ @$el.prepend('')
toggleVote: (event) ->
event.preventDefault()
diff --git a/lms/static/images/press/uts-seal_109x84.jpg b/lms/static/images/press/uts-seal_109x84.jpg
new file mode 100644
index 0000000000..94ba3a6a0a
Binary files /dev/null and b/lms/static/images/press/uts-seal_109x84.jpg differ
diff --git a/lms/static/images/university/ut/ut-cover_2025x550.jpg b/lms/static/images/university/ut/ut-cover_2025x550.jpg
new file mode 100644
index 0000000000..4f3f7e8a9b
Binary files /dev/null and b/lms/static/images/university/ut/ut-cover_2025x550.jpg differ
diff --git a/lms/static/images/university/ut/ut-rollover_160x90.png b/lms/static/images/university/ut/ut-rollover_160x90.png
new file mode 100644
index 0000000000..1bc7f1621d
Binary files /dev/null and b/lms/static/images/university/ut/ut-rollover_160x90.png differ
diff --git a/lms/static/images/university/ut/ut-standalone_187x80.png b/lms/static/images/university/ut/ut-standalone_187x80.png
new file mode 100644
index 0000000000..386eb76142
Binary files /dev/null and b/lms/static/images/university/ut/ut-standalone_187x80.png differ
diff --git a/lms/static/images/university/ut/ut.png b/lms/static/images/university/ut/ut.png
new file mode 100644
index 0000000000..1659cd75f7
Binary files /dev/null and b/lms/static/images/university/ut/ut.png differ
diff --git a/lms/static/sass/_discussion.scss b/lms/static/sass/_discussion.scss
index 459e49577c..809c968fe6 100644
--- a/lms/static/sass/_discussion.scss
+++ b/lms/static/sass/_discussion.scss
@@ -1376,6 +1376,11 @@ body.discussion {
border-color: #009fe2;
}
+ &.community-ta{
+ padding-top: 38px;
+ border-color: #449944;
+ }
+
.staff-banner {
position: absolute;
top: 0;
@@ -1392,6 +1397,23 @@ body.discussion {
text-transform: uppercase;
}
+ .community-ta-banner{
+ position: absolute;
+ top: 0;
+ left: 0;
+ width: 100%;
+ height: 14px;
+ padding: 1px 5px;
+ @include box-sizing(border-box);
+ border-radius: 2px 2px 0 0;
+ background: #449944;
+ font-size: 9px;
+ font-weight: 700;
+ color: #fff;
+ text-transform: uppercase;
+ }
+
+
&.loading {
height: 0;
margin: 0;
@@ -1556,11 +1578,11 @@ body.discussion {
}
}
- .moderator-label {
+ .community-ta-label{
margin-left: 2px;
padding: 0 4px;
border-radius: 2px;
- background: #55dc9e;
+ background: #449944;
font-size: 9px;
font-weight: 700;
font-style: normal;
diff --git a/lms/templates/courseware/progress.html b/lms/templates/courseware/progress.html
index 87ac06bae6..81268ff081 100644
--- a/lms/templates/courseware/progress.html
+++ b/lms/templates/courseware/progress.html
@@ -65,16 +65,19 @@ ${progress_graph.body(grade_summary, course.grade_cutoffs, "grade-detail-graph")
%endif
- %if len(section['scores']) > 0:
-
+
+ %if len(section['scores']) > 0:
${ "Problem Scores: " if section['graded'] else "Practice Scores: "}
%for score in section['scores']:
${"{0:.3n}/{1:.3n}".format(float(score.earned),float(score.possible))}
%endfor
-
- %endif
+ %else:
+ No problem scores in this section
+ %endif
+
+
%endfor
diff --git a/lms/templates/feed.rss b/lms/templates/feed.rss
index 68a9f11965..0515515ba2 100644
--- a/lms/templates/feed.rss
+++ b/lms/templates/feed.rss
@@ -6,16 +6,25 @@
##
EdX Blog
- 2012-07-16T14:08:12-07:00
+ 2012-10-14T14:08:12-07:00
- tag:www.edx.org,2012:Post/5
- 2012-09-25T14:00:00-07:00
- 2012-09-25T14:00:00-07:00
-
- Elsevier collaborates with edX
- <img src="${static.url('images/press/foundations-of-analog-109x84.jpg')}" />
- <p>Free course textbook made available to edX students</p>
+ tag:www.edx.org,2012:Post/6
+ 2012-10-15T14:00:00-07:00
+ 2012-10-14T14:00:00-07:00
+
+ The University of Texas System joins edX
+ <img src="${static.url('images/press/uts-seal_109x84.jpg')}" />
+ <p>Nine universities and six health institutions</p>
+
+
+
+
+
+
+
+
+
tag:www.edx.org,2012:Post/4
2012-09-06T14:00:00-07:00
diff --git a/lms/templates/index.html b/lms/templates/index.html
index fc6f1f336b..151525f715 100644
--- a/lms/templates/index.html
+++ b/lms/templates/index.html
@@ -65,7 +65,7 @@
diff --git a/lms/templates/licenses/serial_numbers.html b/lms/templates/licenses/serial_numbers.html
new file mode 100644
index 0000000000..18f0ff8a9b
--- /dev/null
+++ b/lms/templates/licenses/serial_numbers.html
@@ -0,0 +1,10 @@
+
diff --git a/lms/templates/static_templates/faq.html b/lms/templates/static_templates/faq.html
index e2f3f9efbe..a8f6268bd5 100644
--- a/lms/templates/static_templates/faq.html
+++ b/lms/templates/static_templates/faq.html
@@ -1,107 +1,102 @@
-<%! from django.core.urlresolvers import reverse %>
-<%namespace name='static' file='../static_content.html'/>
-
-<%inherit file="../main.html" />
-
-<%block name="title">
-
-%if user.is_authenticated():
- <%include file="../signup_modal.html" />
-%endif
+<%! from django.core.urlresolvers import reverse %>
+<%namespace name='static' file='../static_content.html'/>
+
+<%inherit file="../main.html" />
+
+<%block name="title">
+
+%if user.is_authenticated():
+ <%include file="../signup_modal.html" />
+%endif
diff --git a/lms/templates/static_templates/press_releases/UT_joins_edX.html b/lms/templates/static_templates/press_releases/UT_joins_edX.html
new file mode 100644
index 0000000000..890789efc7
--- /dev/null
+++ b/lms/templates/static_templates/press_releases/UT_joins_edX.html
@@ -0,0 +1,110 @@
+<%! from django.core.urlresolvers import reverse %>
+<%inherit file="../../main.html" />
+
+<%namespace name='static' file='../../static_content.html'/>
+
+<%block name="title">
diff --git a/lms/templates/university_profile/utx.html b/lms/templates/university_profile/utx.html
new file mode 100644
index 0000000000..b9378f6ce3
--- /dev/null
+++ b/lms/templates/university_profile/utx.html
@@ -0,0 +1,24 @@
+<%inherit file="base.html" />
+<%namespace name='static' file='../static_content.html'/>
+
+<%block name="title">
Educating students, providing care for patients, conducting groundbreaking research and serving the needs of Texans and the nation for more than 130 years, The University of Texas System is one of the largest public university systems in the United States, with nine academic universities and six health science centers. Student enrollment exceeded 215,000 in the 2011 academic year. The UT System confers more than one-third of the state’s undergraduate degrees and educates nearly three-fourths of the state’s health care professionals annually. The UT System has an annual operating budget of $13.1 billion (FY 2012) including $2.3 billion in sponsored programs funded by federal, state, local and private sources. With roughly 87,000 employees, the UT System is one of the largest employers in the state.
+%block>
+
+${parent.body()}
diff --git a/lms/urls.py b/lms/urls.py
index 78ac898216..f3867cb7af 100644
--- a/lms/urls.py
+++ b/lms/urls.py
@@ -52,6 +52,7 @@ urlpatterns = ('',
url(r'^heartbeat$', include('heartbeat.urls')),
+ url(r'^university_profile/UTx$', 'courseware.views.static_university_profile', name="static_university_profile", kwargs={'org_id':'UTx'}),
url(r'^university_profile/(?P