From 9e02b3ce34e86f2dcac8b71b269d404bc967325a Mon Sep 17 00:00:00 2001 From: Victor Shnayder Date: Mon, 8 Oct 2012 15:52:18 -0400 Subject: [PATCH 01/65] Move tests around: - move progress tests into test_progress.py - move grader tests into test_graders.py - move the capa tests into capa/tests/ --- common/lib/capa/capa/tests/__init__.py | 19 + .../test_files/choiceresponse_checkbox.xml | 0 .../tests/test_files/choiceresponse_radio.xml | 0 .../capa}/tests/test_files/coderesponse.xml | 0 .../coderesponse_externalresponseformat.xml | 0 .../test_files/formularesponse_with_hint.xml | 0 .../capa}/tests/test_files/imageresponse.xml | 0 .../tests/test_files/javascriptresponse.xml | 0 .../c9a9cd4242d84c924fe5f8324e9ae79d.js | 0 .../js/compiled/javascriptresponse.js | 0 .../test_files/js/mersenne-twister-min.js | 0 .../test_files/js/test_problem_display.coffee | 0 .../test_files/js/test_problem_display.js | 0 .../js/test_problem_generator.coffee | 0 .../test_files/js/test_problem_generator.js | 0 .../test_files/js/test_problem_grader.coffee | 0 .../test_files/js/test_problem_grader.js | 0 .../capa}/tests/test_files/js/xproblem.coffee | 0 .../capa}/tests/test_files/js/xproblem.js | 0 .../capa}/tests/test_files/multi_bare.xml | 0 .../capa}/tests/test_files/multichoice.xml | 0 .../capa}/tests/test_files/optionresponse.xml | 0 .../test_files/stringresponse_with_hint.xml | 0 .../tests/test_files/symbolicresponse.xml | 0 .../capa}/tests/test_files/truefalse.xml | 0 common/lib/capa/capa/tests/test_inputtypes.py | 384 +++++++++ common/lib/xmodule/xmodule/tests/__init__.py | 745 +----------------- .../lib/xmodule/xmodule/tests/test_graders.py | 220 ++++++ .../xmodule/xmodule/tests/test_progress.py | 138 ++++ 29 files changed, 771 insertions(+), 735 deletions(-) create mode 100644 common/lib/capa/capa/tests/__init__.py rename common/lib/{xmodule/xmodule => capa/capa}/tests/test_files/choiceresponse_checkbox.xml (100%) rename common/lib/{xmodule/xmodule => capa/capa}/tests/test_files/choiceresponse_radio.xml (100%) rename common/lib/{xmodule/xmodule => capa/capa}/tests/test_files/coderesponse.xml (100%) rename common/lib/{xmodule/xmodule => capa/capa}/tests/test_files/coderesponse_externalresponseformat.xml (100%) rename common/lib/{xmodule/xmodule => capa/capa}/tests/test_files/formularesponse_with_hint.xml (100%) rename common/lib/{xmodule/xmodule => capa/capa}/tests/test_files/imageresponse.xml (100%) rename common/lib/{xmodule/xmodule => capa/capa}/tests/test_files/javascriptresponse.xml (100%) rename common/lib/{xmodule/xmodule => capa/capa}/tests/test_files/js/compiled/c9a9cd4242d84c924fe5f8324e9ae79d.js (100%) rename common/lib/{xmodule/xmodule => capa/capa}/tests/test_files/js/compiled/javascriptresponse.js (100%) rename common/lib/{xmodule/xmodule => capa/capa}/tests/test_files/js/mersenne-twister-min.js (100%) rename common/lib/{xmodule/xmodule => capa/capa}/tests/test_files/js/test_problem_display.coffee (100%) rename common/lib/{xmodule/xmodule => capa/capa}/tests/test_files/js/test_problem_display.js (100%) rename common/lib/{xmodule/xmodule => capa/capa}/tests/test_files/js/test_problem_generator.coffee (100%) rename common/lib/{xmodule/xmodule => capa/capa}/tests/test_files/js/test_problem_generator.js (100%) rename common/lib/{xmodule/xmodule => capa/capa}/tests/test_files/js/test_problem_grader.coffee (100%) rename common/lib/{xmodule/xmodule => capa/capa}/tests/test_files/js/test_problem_grader.js (100%) rename common/lib/{xmodule/xmodule => capa/capa}/tests/test_files/js/xproblem.coffee (100%) rename common/lib/{xmodule/xmodule => capa/capa}/tests/test_files/js/xproblem.js (100%) rename common/lib/{xmodule/xmodule => capa/capa}/tests/test_files/multi_bare.xml (100%) rename common/lib/{xmodule/xmodule => capa/capa}/tests/test_files/multichoice.xml (100%) rename common/lib/{xmodule/xmodule => capa/capa}/tests/test_files/optionresponse.xml (100%) rename common/lib/{xmodule/xmodule => capa/capa}/tests/test_files/stringresponse_with_hint.xml (100%) rename common/lib/{xmodule/xmodule => capa/capa}/tests/test_files/symbolicresponse.xml (100%) rename common/lib/{xmodule/xmodule => capa/capa}/tests/test_files/truefalse.xml (100%) create mode 100644 common/lib/capa/capa/tests/test_inputtypes.py create mode 100644 common/lib/xmodule/xmodule/tests/test_graders.py create mode 100644 common/lib/xmodule/xmodule/tests/test_progress.py diff --git a/common/lib/capa/capa/tests/__init__.py b/common/lib/capa/capa/tests/__init__.py new file mode 100644 index 0000000000..ebbfe16a29 --- /dev/null +++ b/common/lib/capa/capa/tests/__init__.py @@ -0,0 +1,19 @@ +import fs +import fs.osfs +import os + +from mock import Mock + +test_system = Mock( + ajax_url='courses/course_id/modx/a_location', + track_function=Mock(), + get_module=Mock(), + render_template=Mock(), + replace_urls=Mock(), + user=Mock(), + filestore=fs.osfs.OSFS(os.path.dirname(os.path.realpath(__file__))+"/test_files"), + debug=True, + xqueue={'interface':None, 'callback_url':'/', 'default_queuename': 'testqueue', 'waittime': 10}, + node_path=os.environ.get("NODE_PATH", "/usr/local/lib/node_modules"), + anonymous_student_id = 'student' +) diff --git a/common/lib/xmodule/xmodule/tests/test_files/choiceresponse_checkbox.xml b/common/lib/capa/capa/tests/test_files/choiceresponse_checkbox.xml similarity index 100% rename from common/lib/xmodule/xmodule/tests/test_files/choiceresponse_checkbox.xml rename to common/lib/capa/capa/tests/test_files/choiceresponse_checkbox.xml diff --git a/common/lib/xmodule/xmodule/tests/test_files/choiceresponse_radio.xml b/common/lib/capa/capa/tests/test_files/choiceresponse_radio.xml similarity index 100% rename from common/lib/xmodule/xmodule/tests/test_files/choiceresponse_radio.xml rename to common/lib/capa/capa/tests/test_files/choiceresponse_radio.xml diff --git a/common/lib/xmodule/xmodule/tests/test_files/coderesponse.xml b/common/lib/capa/capa/tests/test_files/coderesponse.xml similarity index 100% rename from common/lib/xmodule/xmodule/tests/test_files/coderesponse.xml rename to common/lib/capa/capa/tests/test_files/coderesponse.xml diff --git a/common/lib/xmodule/xmodule/tests/test_files/coderesponse_externalresponseformat.xml b/common/lib/capa/capa/tests/test_files/coderesponse_externalresponseformat.xml similarity index 100% rename from common/lib/xmodule/xmodule/tests/test_files/coderesponse_externalresponseformat.xml rename to common/lib/capa/capa/tests/test_files/coderesponse_externalresponseformat.xml diff --git a/common/lib/xmodule/xmodule/tests/test_files/formularesponse_with_hint.xml b/common/lib/capa/capa/tests/test_files/formularesponse_with_hint.xml similarity index 100% rename from common/lib/xmodule/xmodule/tests/test_files/formularesponse_with_hint.xml rename to common/lib/capa/capa/tests/test_files/formularesponse_with_hint.xml diff --git a/common/lib/xmodule/xmodule/tests/test_files/imageresponse.xml b/common/lib/capa/capa/tests/test_files/imageresponse.xml similarity index 100% rename from common/lib/xmodule/xmodule/tests/test_files/imageresponse.xml rename to common/lib/capa/capa/tests/test_files/imageresponse.xml diff --git a/common/lib/xmodule/xmodule/tests/test_files/javascriptresponse.xml b/common/lib/capa/capa/tests/test_files/javascriptresponse.xml similarity index 100% rename from common/lib/xmodule/xmodule/tests/test_files/javascriptresponse.xml rename to common/lib/capa/capa/tests/test_files/javascriptresponse.xml diff --git a/common/lib/xmodule/xmodule/tests/test_files/js/compiled/c9a9cd4242d84c924fe5f8324e9ae79d.js b/common/lib/capa/capa/tests/test_files/js/compiled/c9a9cd4242d84c924fe5f8324e9ae79d.js similarity index 100% rename from common/lib/xmodule/xmodule/tests/test_files/js/compiled/c9a9cd4242d84c924fe5f8324e9ae79d.js rename to common/lib/capa/capa/tests/test_files/js/compiled/c9a9cd4242d84c924fe5f8324e9ae79d.js diff --git a/common/lib/xmodule/xmodule/tests/test_files/js/compiled/javascriptresponse.js b/common/lib/capa/capa/tests/test_files/js/compiled/javascriptresponse.js similarity index 100% rename from common/lib/xmodule/xmodule/tests/test_files/js/compiled/javascriptresponse.js rename to common/lib/capa/capa/tests/test_files/js/compiled/javascriptresponse.js diff --git a/common/lib/xmodule/xmodule/tests/test_files/js/mersenne-twister-min.js b/common/lib/capa/capa/tests/test_files/js/mersenne-twister-min.js similarity index 100% rename from common/lib/xmodule/xmodule/tests/test_files/js/mersenne-twister-min.js rename to common/lib/capa/capa/tests/test_files/js/mersenne-twister-min.js diff --git a/common/lib/xmodule/xmodule/tests/test_files/js/test_problem_display.coffee b/common/lib/capa/capa/tests/test_files/js/test_problem_display.coffee similarity index 100% rename from common/lib/xmodule/xmodule/tests/test_files/js/test_problem_display.coffee rename to common/lib/capa/capa/tests/test_files/js/test_problem_display.coffee diff --git a/common/lib/xmodule/xmodule/tests/test_files/js/test_problem_display.js b/common/lib/capa/capa/tests/test_files/js/test_problem_display.js similarity index 100% rename from common/lib/xmodule/xmodule/tests/test_files/js/test_problem_display.js rename to common/lib/capa/capa/tests/test_files/js/test_problem_display.js diff --git a/common/lib/xmodule/xmodule/tests/test_files/js/test_problem_generator.coffee b/common/lib/capa/capa/tests/test_files/js/test_problem_generator.coffee similarity index 100% rename from common/lib/xmodule/xmodule/tests/test_files/js/test_problem_generator.coffee rename to common/lib/capa/capa/tests/test_files/js/test_problem_generator.coffee diff --git a/common/lib/xmodule/xmodule/tests/test_files/js/test_problem_generator.js b/common/lib/capa/capa/tests/test_files/js/test_problem_generator.js similarity index 100% rename from common/lib/xmodule/xmodule/tests/test_files/js/test_problem_generator.js rename to common/lib/capa/capa/tests/test_files/js/test_problem_generator.js diff --git a/common/lib/xmodule/xmodule/tests/test_files/js/test_problem_grader.coffee b/common/lib/capa/capa/tests/test_files/js/test_problem_grader.coffee similarity index 100% rename from common/lib/xmodule/xmodule/tests/test_files/js/test_problem_grader.coffee rename to common/lib/capa/capa/tests/test_files/js/test_problem_grader.coffee diff --git a/common/lib/xmodule/xmodule/tests/test_files/js/test_problem_grader.js b/common/lib/capa/capa/tests/test_files/js/test_problem_grader.js similarity index 100% rename from common/lib/xmodule/xmodule/tests/test_files/js/test_problem_grader.js rename to common/lib/capa/capa/tests/test_files/js/test_problem_grader.js diff --git a/common/lib/xmodule/xmodule/tests/test_files/js/xproblem.coffee b/common/lib/capa/capa/tests/test_files/js/xproblem.coffee similarity index 100% rename from common/lib/xmodule/xmodule/tests/test_files/js/xproblem.coffee rename to common/lib/capa/capa/tests/test_files/js/xproblem.coffee diff --git a/common/lib/xmodule/xmodule/tests/test_files/js/xproblem.js b/common/lib/capa/capa/tests/test_files/js/xproblem.js similarity index 100% rename from common/lib/xmodule/xmodule/tests/test_files/js/xproblem.js rename to common/lib/capa/capa/tests/test_files/js/xproblem.js diff --git a/common/lib/xmodule/xmodule/tests/test_files/multi_bare.xml b/common/lib/capa/capa/tests/test_files/multi_bare.xml similarity index 100% rename from common/lib/xmodule/xmodule/tests/test_files/multi_bare.xml rename to common/lib/capa/capa/tests/test_files/multi_bare.xml diff --git a/common/lib/xmodule/xmodule/tests/test_files/multichoice.xml b/common/lib/capa/capa/tests/test_files/multichoice.xml similarity index 100% rename from common/lib/xmodule/xmodule/tests/test_files/multichoice.xml rename to common/lib/capa/capa/tests/test_files/multichoice.xml diff --git a/common/lib/xmodule/xmodule/tests/test_files/optionresponse.xml b/common/lib/capa/capa/tests/test_files/optionresponse.xml similarity index 100% rename from common/lib/xmodule/xmodule/tests/test_files/optionresponse.xml rename to common/lib/capa/capa/tests/test_files/optionresponse.xml diff --git a/common/lib/xmodule/xmodule/tests/test_files/stringresponse_with_hint.xml b/common/lib/capa/capa/tests/test_files/stringresponse_with_hint.xml similarity index 100% rename from common/lib/xmodule/xmodule/tests/test_files/stringresponse_with_hint.xml rename to common/lib/capa/capa/tests/test_files/stringresponse_with_hint.xml diff --git a/common/lib/xmodule/xmodule/tests/test_files/symbolicresponse.xml b/common/lib/capa/capa/tests/test_files/symbolicresponse.xml similarity index 100% rename from common/lib/xmodule/xmodule/tests/test_files/symbolicresponse.xml rename to common/lib/capa/capa/tests/test_files/symbolicresponse.xml diff --git a/common/lib/xmodule/xmodule/tests/test_files/truefalse.xml b/common/lib/capa/capa/tests/test_files/truefalse.xml similarity index 100% rename from common/lib/xmodule/xmodule/tests/test_files/truefalse.xml rename to common/lib/capa/capa/tests/test_files/truefalse.xml diff --git a/common/lib/capa/capa/tests/test_inputtypes.py b/common/lib/capa/capa/tests/test_inputtypes.py new file mode 100644 index 0000000000..af3d1e87a7 --- /dev/null +++ b/common/lib/capa/capa/tests/test_inputtypes.py @@ -0,0 +1,384 @@ +""" +Tests of input types (and actually responsetypes too) +""" + + +from datetime import datetime +import json +from nose.plugins.skip import SkipTest +import os +import unittest + +from . import test_system + +import capa.capa_problem as lcp +from capa.correctmap import CorrectMap +from capa.util import convert_files_to_filenames +from capa.xqueue_interface import dateformat + +class MultiChoiceTest(unittest.TestCase): + def test_MC_grade(self): + multichoice_file = os.path.dirname(__file__) + "/test_files/multichoice.xml" + test_lcp = lcp.LoncapaProblem(open(multichoice_file).read(), '1', system=test_system) + correct_answers = {'1_2_1': 'choice_foil3'} + self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') + false_answers = {'1_2_1': 'choice_foil2'} + self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect') + + def test_MC_bare_grades(self): + multichoice_file = os.path.dirname(__file__) + "/test_files/multi_bare.xml" + test_lcp = lcp.LoncapaProblem(open(multichoice_file).read(), '1', system=test_system) + correct_answers = {'1_2_1': 'choice_2'} + self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') + false_answers = {'1_2_1': 'choice_1'} + self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect') + + def test_TF_grade(self): + truefalse_file = os.path.dirname(__file__) + "/test_files/truefalse.xml" + test_lcp = lcp.LoncapaProblem(open(truefalse_file).read(), '1', system=test_system) + correct_answers = {'1_2_1': ['choice_foil2', 'choice_foil1']} + self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') + false_answers = {'1_2_1': ['choice_foil1']} + self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect') + false_answers = {'1_2_1': ['choice_foil1', 'choice_foil3']} + self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect') + false_answers = {'1_2_1': ['choice_foil3']} + self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect') + false_answers = {'1_2_1': ['choice_foil1', 'choice_foil2', 'choice_foil3']} + self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect') + + +class ImageResponseTest(unittest.TestCase): + def test_ir_grade(self): + imageresponse_file = os.path.dirname(__file__) + "/test_files/imageresponse.xml" + test_lcp = lcp.LoncapaProblem(open(imageresponse_file).read(), '1', system=test_system) + correct_answers = {'1_2_1': '(490,11)-(556,98)', + '1_2_2': '(242,202)-(296,276)'} + test_answers = {'1_2_1': '[500,20]', + '1_2_2': '[250,300]', + } + self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_1'), 'correct') + self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_2'), 'incorrect') + + +class SymbolicResponseTest(unittest.TestCase): + def test_sr_grade(self): + raise SkipTest() # This test fails due to dependencies on a local copy of snuggletex-webapp. Until we have figured that out, we'll just skip this test + symbolicresponse_file = os.path.dirname(__file__) + "/test_files/symbolicresponse.xml" + test_lcp = lcp.LoncapaProblem(open(symbolicresponse_file).read(), '1', system=test_system) + correct_answers = {'1_2_1': 'cos(theta)*[[1,0],[0,1]] + i*sin(theta)*[[0,1],[1,0]]', + '1_2_1_dynamath': ''' + + + + cos + + ( + θ + ) + + + + + [ + + + + 1 + + + 0 + + + + + 0 + + + 1 + + + + ] + + + + i + + + sin + + ( + θ + ) + + + + + [ + + + + 0 + + + 1 + + + + + 1 + + + 0 + + + + ] + + + +''', + } + wrong_answers = {'1_2_1': '2', + '1_2_1_dynamath': ''' + + + 2 + +''', + } + self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') + self.assertEquals(test_lcp.grade_answers(wrong_answers).get_correctness('1_2_1'), 'incorrect') + + +class OptionResponseTest(unittest.TestCase): + ''' + Run this with + + python manage.py test courseware.OptionResponseTest + ''' + def test_or_grade(self): + optionresponse_file = os.path.dirname(__file__) + "/test_files/optionresponse.xml" + test_lcp = lcp.LoncapaProblem(open(optionresponse_file).read(), '1', system=test_system) + correct_answers = {'1_2_1': 'True', + '1_2_2': 'False'} + test_answers = {'1_2_1': 'True', + '1_2_2': 'True', + } + self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_1'), 'correct') + self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_2'), 'incorrect') + + +class FormulaResponseWithHintTest(unittest.TestCase): + ''' + Test Formula response problem with a hint + This problem also uses calc. + ''' + def test_or_grade(self): + problem_file = os.path.dirname(__file__) + "/test_files/formularesponse_with_hint.xml" + test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=test_system) + correct_answers = {'1_2_1': '2.5*x-5.0'} + test_answers = {'1_2_1': '0.4*x-5.0'} + self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') + cmap = test_lcp.grade_answers(test_answers) + self.assertEquals(cmap.get_correctness('1_2_1'), 'incorrect') + self.assertTrue('You have inverted' in cmap.get_hint('1_2_1')) + + +class StringResponseWithHintTest(unittest.TestCase): + ''' + Test String response problem with a hint + ''' + def test_or_grade(self): + problem_file = os.path.dirname(__file__) + "/test_files/stringresponse_with_hint.xml" + test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=test_system) + correct_answers = {'1_2_1': 'Michigan'} + test_answers = {'1_2_1': 'Minnesota'} + self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') + cmap = test_lcp.grade_answers(test_answers) + self.assertEquals(cmap.get_correctness('1_2_1'), 'incorrect') + self.assertTrue('St. Paul' in cmap.get_hint('1_2_1')) + + +class CodeResponseTest(unittest.TestCase): + ''' + Test CodeResponse + TODO: Add tests for external grader messages + ''' + @staticmethod + def make_queuestate(key, time): + timestr = datetime.strftime(time, dateformat) + return {'key': key, 'time': timestr} + + def test_is_queued(self): + """ + Simple test of whether LoncapaProblem knows when it's been queued + """ + problem_file = os.path.join(os.path.dirname(__file__), "test_files/coderesponse.xml") + with open(problem_file) as input_file: + test_lcp = lcp.LoncapaProblem(input_file.read(), '1', system=test_system) + + answer_ids = sorted(test_lcp.get_question_answers()) + + # CodeResponse requires internal CorrectMap state. Build it now in the unqueued state + cmap = CorrectMap() + for answer_id in answer_ids: + cmap.update(CorrectMap(answer_id=answer_id, queuestate=None)) + test_lcp.correct_map.update(cmap) + + self.assertEquals(test_lcp.is_queued(), False) + + # Now we queue the LCP + cmap = CorrectMap() + for i, answer_id in enumerate(answer_ids): + queuestate = CodeResponseTest.make_queuestate(i, datetime.now()) + cmap.update(CorrectMap(answer_id=answer_ids[i], queuestate=queuestate)) + test_lcp.correct_map.update(cmap) + + self.assertEquals(test_lcp.is_queued(), True) + + + def test_update_score(self): + ''' + Test whether LoncapaProblem.update_score can deliver queued result to the right subproblem + ''' + problem_file = os.path.join(os.path.dirname(__file__), "test_files/coderesponse.xml") + with open(problem_file) as input_file: + test_lcp = lcp.LoncapaProblem(input_file.read(), '1', system=test_system) + + answer_ids = sorted(test_lcp.get_question_answers()) + + # CodeResponse requires internal CorrectMap state. Build it now in the queued state + old_cmap = CorrectMap() + for i, answer_id in enumerate(answer_ids): + queuekey = 1000 + i + queuestate = CodeResponseTest.make_queuestate(1000+i, datetime.now()) + old_cmap.update(CorrectMap(answer_id=answer_ids[i], queuestate=queuestate)) + + # Message format common to external graders + grader_msg = 'MESSAGE' # Must be valid XML + correct_score_msg = json.dumps({'correct':True, 'score':1, 'msg': grader_msg}) + incorrect_score_msg = json.dumps({'correct':False, 'score':0, 'msg': grader_msg}) + + xserver_msgs = {'correct': correct_score_msg, + 'incorrect': incorrect_score_msg,} + + # Incorrect queuekey, state should not be updated + for correctness in ['correct', 'incorrect']: + test_lcp.correct_map = CorrectMap() + test_lcp.correct_map.update(old_cmap) # Deep copy + + test_lcp.update_score(xserver_msgs[correctness], queuekey=0) + self.assertEquals(test_lcp.correct_map.get_dict(), old_cmap.get_dict()) # Deep comparison + + for answer_id in answer_ids: + self.assertTrue(test_lcp.correct_map.is_queued(answer_id)) # Should be still queued, since message undelivered + + # Correct queuekey, state should be updated + for correctness in ['correct', 'incorrect']: + for i, answer_id in enumerate(answer_ids): + test_lcp.correct_map = CorrectMap() + test_lcp.correct_map.update(old_cmap) + + new_cmap = CorrectMap() + new_cmap.update(old_cmap) + npoints = 1 if correctness=='correct' else 0 + new_cmap.set(answer_id=answer_id, npoints=npoints, correctness=correctness, msg=grader_msg, queuestate=None) + + test_lcp.update_score(xserver_msgs[correctness], queuekey=1000 + i) + self.assertEquals(test_lcp.correct_map.get_dict(), new_cmap.get_dict()) + + for j, test_id in enumerate(answer_ids): + if j == i: + self.assertFalse(test_lcp.correct_map.is_queued(test_id)) # Should be dequeued, message delivered + else: + self.assertTrue(test_lcp.correct_map.is_queued(test_id)) # Should be queued, message undelivered + + + def test_recentmost_queuetime(self): + ''' + Test whether the LoncapaProblem knows about the time of queue requests + ''' + problem_file = os.path.join(os.path.dirname(__file__), "test_files/coderesponse.xml") + with open(problem_file) as input_file: + test_lcp = lcp.LoncapaProblem(input_file.read(), '1', system=test_system) + + answer_ids = sorted(test_lcp.get_question_answers()) + + # CodeResponse requires internal CorrectMap state. Build it now in the unqueued state + cmap = CorrectMap() + for answer_id in answer_ids: + cmap.update(CorrectMap(answer_id=answer_id, queuestate=None)) + test_lcp.correct_map.update(cmap) + + self.assertEquals(test_lcp.get_recentmost_queuetime(), None) + + # CodeResponse requires internal CorrectMap state. Build it now in the queued state + cmap = CorrectMap() + for i, answer_id in enumerate(answer_ids): + queuekey = 1000 + i + latest_timestamp = datetime.now() + queuestate = CodeResponseTest.make_queuestate(1000+i, latest_timestamp) + cmap.update(CorrectMap(answer_id=answer_id, queuestate=queuestate)) + test_lcp.correct_map.update(cmap) + + # Queue state only tracks up to second + latest_timestamp = datetime.strptime(datetime.strftime(latest_timestamp, dateformat), dateformat) + + self.assertEquals(test_lcp.get_recentmost_queuetime(), latest_timestamp) + + def test_convert_files_to_filenames(self): + ''' + Test whether file objects are converted to filenames without altering other structures + ''' + problem_file = os.path.join(os.path.dirname(__file__), "test_files/coderesponse.xml") + with open(problem_file) as fp: + answers_with_file = {'1_2_1': 'String-based answer', + '1_3_1': ['answer1', 'answer2', 'answer3'], + '1_4_1': [fp, fp]} + answers_converted = convert_files_to_filenames(answers_with_file) + self.assertEquals(answers_converted['1_2_1'], 'String-based answer') + self.assertEquals(answers_converted['1_3_1'], ['answer1', 'answer2', 'answer3']) + self.assertEquals(answers_converted['1_4_1'], [fp.name, fp.name]) + + +class ChoiceResponseTest(unittest.TestCase): + + def test_cr_rb_grade(self): + problem_file = os.path.dirname(__file__) + "/test_files/choiceresponse_radio.xml" + test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=test_system) + correct_answers = {'1_2_1': 'choice_2', + '1_3_1': ['choice_2', 'choice_3']} + test_answers = {'1_2_1': 'choice_2', + '1_3_1': 'choice_2', + } + self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_1'), 'correct') + self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_3_1'), 'incorrect') + + def test_cr_cb_grade(self): + problem_file = os.path.dirname(__file__) + "/test_files/choiceresponse_checkbox.xml" + test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=test_system) + correct_answers = {'1_2_1': 'choice_2', + '1_3_1': ['choice_2', 'choice_3'], + '1_4_1': ['choice_2', 'choice_3']} + test_answers = {'1_2_1': 'choice_2', + '1_3_1': 'choice_2', + '1_4_1': ['choice_2', 'choice_3'], + } + self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_1'), 'correct') + self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_3_1'), 'incorrect') + self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_4_1'), 'correct') + +class JavascriptResponseTest(unittest.TestCase): + + def test_jr_grade(self): + problem_file = os.path.dirname(__file__) + "/test_files/javascriptresponse.xml" + coffee_file_path = os.path.dirname(__file__) + "/test_files/js/*.coffee" + os.system("coffee -c %s" % (coffee_file_path)) + test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=test_system) + correct_answers = {'1_2_1': json.dumps({0: 4})} + incorrect_answers = {'1_2_1': json.dumps({0: 5})} + + self.assertEquals(test_lcp.grade_answers(incorrect_answers).get_correctness('1_2_1'), 'incorrect') + self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') + diff --git a/common/lib/xmodule/xmodule/tests/__init__.py b/common/lib/xmodule/xmodule/tests/__init__.py index 654b6beb15..ed64c45118 100644 --- a/common/lib/xmodule/xmodule/tests/__init__.py +++ b/common/lib/xmodule/xmodule/tests/__init__.py @@ -1,31 +1,22 @@ -# -# unittests for xmodule (and capa) -# -# Note: run this using a like like this: -# -# django-admin.py test --settings=lms.envs.test_ike --pythonpath=. common/lib/xmodule +""" +unittests for xmodule + +Run like this: + + rake test_common/lib/xmodule + +""" import unittest import os import fs import fs.osfs -import json -import json import numpy -import xmodule import capa.calc as calc -import capa.capa_problem as lcp -from capa.correctmap import CorrectMap -from capa.util import convert_files_to_filenames -from capa.xqueue_interface import dateformat -from datetime import datetime -from xmodule import graders, x_module +import xmodule from xmodule.x_module import ModuleSystem -from xmodule.graders import Score, aggregate_scores -from xmodule.progress import Progress -from nose.plugins.skip import SkipTest from mock import Mock i4xs = ModuleSystem( @@ -35,7 +26,7 @@ i4xs = ModuleSystem( render_template=Mock(), replace_urls=Mock(), user=Mock(), - filestore=fs.osfs.OSFS(os.path.dirname(os.path.realpath(__file__))+"/test_files"), + filestore=Mock(), debug=True, xqueue={'interface':None, 'callback_url':'/', 'default_queuename': 'testqueue', 'waittime': 10}, node_path=os.environ.get("NODE_PATH", "/usr/local/lib/node_modules"), @@ -94,719 +85,3 @@ class ModelsTest(unittest.TestCase): exception_happened = True self.assertTrue(exception_happened) -#----------------------------------------------------------------------------- -# tests of capa_problem inputtypes - - -class MultiChoiceTest(unittest.TestCase): - def test_MC_grade(self): - multichoice_file = os.path.dirname(__file__) + "/test_files/multichoice.xml" - test_lcp = lcp.LoncapaProblem(open(multichoice_file).read(), '1', system=i4xs) - correct_answers = {'1_2_1': 'choice_foil3'} - self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') - false_answers = {'1_2_1': 'choice_foil2'} - self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect') - - def test_MC_bare_grades(self): - multichoice_file = os.path.dirname(__file__) + "/test_files/multi_bare.xml" - test_lcp = lcp.LoncapaProblem(open(multichoice_file).read(), '1', system=i4xs) - correct_answers = {'1_2_1': 'choice_2'} - self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') - false_answers = {'1_2_1': 'choice_1'} - self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect') - - def test_TF_grade(self): - truefalse_file = os.path.dirname(__file__) + "/test_files/truefalse.xml" - test_lcp = lcp.LoncapaProblem(open(truefalse_file).read(), '1', system=i4xs) - correct_answers = {'1_2_1': ['choice_foil2', 'choice_foil1']} - self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') - false_answers = {'1_2_1': ['choice_foil1']} - self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect') - false_answers = {'1_2_1': ['choice_foil1', 'choice_foil3']} - self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect') - false_answers = {'1_2_1': ['choice_foil3']} - self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect') - false_answers = {'1_2_1': ['choice_foil1', 'choice_foil2', 'choice_foil3']} - self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect') - - -class ImageResponseTest(unittest.TestCase): - def test_ir_grade(self): - imageresponse_file = os.path.dirname(__file__) + "/test_files/imageresponse.xml" - test_lcp = lcp.LoncapaProblem(open(imageresponse_file).read(), '1', system=i4xs) - correct_answers = {'1_2_1': '(490,11)-(556,98)', - '1_2_2': '(242,202)-(296,276)'} - test_answers = {'1_2_1': '[500,20]', - '1_2_2': '[250,300]', - } - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_1'), 'correct') - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_2'), 'incorrect') - - -class SymbolicResponseTest(unittest.TestCase): - def test_sr_grade(self): - raise SkipTest() # This test fails due to dependencies on a local copy of snuggletex-webapp. Until we have figured that out, we'll just skip this test - symbolicresponse_file = os.path.dirname(__file__) + "/test_files/symbolicresponse.xml" - test_lcp = lcp.LoncapaProblem(open(symbolicresponse_file).read(), '1', system=i4xs) - correct_answers = {'1_2_1': 'cos(theta)*[[1,0],[0,1]] + i*sin(theta)*[[0,1],[1,0]]', - '1_2_1_dynamath': ''' - - - - cos - - ( - θ - ) - - - - - [ - - - - 1 - - - 0 - - - - - 0 - - - 1 - - - - ] - - + - i - - - sin - - ( - θ - ) - - - - - [ - - - - 0 - - - 1 - - - - - 1 - - - 0 - - - - ] - - - -''', - } - wrong_answers = {'1_2_1': '2', - '1_2_1_dynamath': ''' - - - 2 - -''', - } - self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') - self.assertEquals(test_lcp.grade_answers(wrong_answers).get_correctness('1_2_1'), 'incorrect') - - -class OptionResponseTest(unittest.TestCase): - ''' - Run this with - - python manage.py test courseware.OptionResponseTest - ''' - def test_or_grade(self): - optionresponse_file = os.path.dirname(__file__) + "/test_files/optionresponse.xml" - test_lcp = lcp.LoncapaProblem(open(optionresponse_file).read(), '1', system=i4xs) - correct_answers = {'1_2_1': 'True', - '1_2_2': 'False'} - test_answers = {'1_2_1': 'True', - '1_2_2': 'True', - } - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_1'), 'correct') - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_2'), 'incorrect') - - -class FormulaResponseWithHintTest(unittest.TestCase): - ''' - Test Formula response problem with a hint - This problem also uses calc. - ''' - def test_or_grade(self): - problem_file = os.path.dirname(__file__) + "/test_files/formularesponse_with_hint.xml" - test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=i4xs) - correct_answers = {'1_2_1': '2.5*x-5.0'} - test_answers = {'1_2_1': '0.4*x-5.0'} - self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') - cmap = test_lcp.grade_answers(test_answers) - self.assertEquals(cmap.get_correctness('1_2_1'), 'incorrect') - self.assertTrue('You have inverted' in cmap.get_hint('1_2_1')) - - -class StringResponseWithHintTest(unittest.TestCase): - ''' - Test String response problem with a hint - ''' - def test_or_grade(self): - problem_file = os.path.dirname(__file__) + "/test_files/stringresponse_with_hint.xml" - test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=i4xs) - correct_answers = {'1_2_1': 'Michigan'} - test_answers = {'1_2_1': 'Minnesota'} - self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') - cmap = test_lcp.grade_answers(test_answers) - self.assertEquals(cmap.get_correctness('1_2_1'), 'incorrect') - self.assertTrue('St. Paul' in cmap.get_hint('1_2_1')) - - -class CodeResponseTest(unittest.TestCase): - ''' - Test CodeResponse - TODO: Add tests for external grader messages - ''' - @staticmethod - def make_queuestate(key, time): - timestr = datetime.strftime(time, dateformat) - return {'key': key, 'time': timestr} - - def test_is_queued(self): - ''' - Simple test of whether LoncapaProblem knows when it's been queued - ''' - problem_file = os.path.join(os.path.dirname(__file__), "test_files/coderesponse.xml") - with open(problem_file) as input_file: - test_lcp = lcp.LoncapaProblem(input_file.read(), '1', system=i4xs) - - answer_ids = sorted(test_lcp.get_question_answers()) - - # CodeResponse requires internal CorrectMap state. Build it now in the unqueued state - cmap = CorrectMap() - for answer_id in answer_ids: - cmap.update(CorrectMap(answer_id=answer_id, queuestate=None)) - test_lcp.correct_map.update(cmap) - - self.assertEquals(test_lcp.is_queued(), False) - - # Now we queue the LCP - cmap = CorrectMap() - for i, answer_id in enumerate(answer_ids): - queuestate = CodeResponseTest.make_queuestate(i, datetime.now()) - cmap.update(CorrectMap(answer_id=answer_ids[i], queuestate=queuestate)) - test_lcp.correct_map.update(cmap) - - self.assertEquals(test_lcp.is_queued(), True) - - - def test_update_score(self): - ''' - Test whether LoncapaProblem.update_score can deliver queued result to the right subproblem - ''' - problem_file = os.path.join(os.path.dirname(__file__), "test_files/coderesponse.xml") - with open(problem_file) as input_file: - test_lcp = lcp.LoncapaProblem(input_file.read(), '1', system=i4xs) - - answer_ids = sorted(test_lcp.get_question_answers()) - - # CodeResponse requires internal CorrectMap state. Build it now in the queued state - old_cmap = CorrectMap() - for i, answer_id in enumerate(answer_ids): - queuekey = 1000 + i - queuestate = CodeResponseTest.make_queuestate(1000+i, datetime.now()) - old_cmap.update(CorrectMap(answer_id=answer_ids[i], queuestate=queuestate)) - - # Message format common to external graders - grader_msg = 'MESSAGE' # Must be valid XML - correct_score_msg = json.dumps({'correct':True, 'score':1, 'msg': grader_msg}) - incorrect_score_msg = json.dumps({'correct':False, 'score':0, 'msg': grader_msg}) - - xserver_msgs = {'correct': correct_score_msg, - 'incorrect': incorrect_score_msg,} - - # Incorrect queuekey, state should not be updated - for correctness in ['correct', 'incorrect']: - test_lcp.correct_map = CorrectMap() - test_lcp.correct_map.update(old_cmap) # Deep copy - - test_lcp.update_score(xserver_msgs[correctness], queuekey=0) - self.assertEquals(test_lcp.correct_map.get_dict(), old_cmap.get_dict()) # Deep comparison - - for answer_id in answer_ids: - self.assertTrue(test_lcp.correct_map.is_queued(answer_id)) # Should be still queued, since message undelivered - - # Correct queuekey, state should be updated - for correctness in ['correct', 'incorrect']: - for i, answer_id in enumerate(answer_ids): - test_lcp.correct_map = CorrectMap() - test_lcp.correct_map.update(old_cmap) - - new_cmap = CorrectMap() - new_cmap.update(old_cmap) - npoints = 1 if correctness=='correct' else 0 - new_cmap.set(answer_id=answer_id, npoints=npoints, correctness=correctness, msg=grader_msg, queuestate=None) - - test_lcp.update_score(xserver_msgs[correctness], queuekey=1000 + i) - self.assertEquals(test_lcp.correct_map.get_dict(), new_cmap.get_dict()) - - for j, test_id in enumerate(answer_ids): - if j == i: - self.assertFalse(test_lcp.correct_map.is_queued(test_id)) # Should be dequeued, message delivered - else: - self.assertTrue(test_lcp.correct_map.is_queued(test_id)) # Should be queued, message undelivered - - - def test_recentmost_queuetime(self): - ''' - Test whether the LoncapaProblem knows about the time of queue requests - ''' - problem_file = os.path.join(os.path.dirname(__file__), "test_files/coderesponse.xml") - with open(problem_file) as input_file: - test_lcp = lcp.LoncapaProblem(input_file.read(), '1', system=i4xs) - - answer_ids = sorted(test_lcp.get_question_answers()) - - # CodeResponse requires internal CorrectMap state. Build it now in the unqueued state - cmap = CorrectMap() - for answer_id in answer_ids: - cmap.update(CorrectMap(answer_id=answer_id, queuestate=None)) - test_lcp.correct_map.update(cmap) - - self.assertEquals(test_lcp.get_recentmost_queuetime(), None) - - # CodeResponse requires internal CorrectMap state. Build it now in the queued state - cmap = CorrectMap() - for i, answer_id in enumerate(answer_ids): - queuekey = 1000 + i - latest_timestamp = datetime.now() - queuestate = CodeResponseTest.make_queuestate(1000+i, latest_timestamp) - cmap.update(CorrectMap(answer_id=answer_id, queuestate=queuestate)) - test_lcp.correct_map.update(cmap) - - # Queue state only tracks up to second - latest_timestamp = datetime.strptime(datetime.strftime(latest_timestamp, dateformat), dateformat) - - self.assertEquals(test_lcp.get_recentmost_queuetime(), latest_timestamp) - - def test_convert_files_to_filenames(self): - ''' - Test whether file objects are converted to filenames without altering other structures - ''' - problem_file = os.path.join(os.path.dirname(__file__), "test_files/coderesponse.xml") - with open(problem_file) as fp: - answers_with_file = {'1_2_1': 'String-based answer', - '1_3_1': ['answer1', 'answer2', 'answer3'], - '1_4_1': [fp, fp]} - answers_converted = convert_files_to_filenames(answers_with_file) - self.assertEquals(answers_converted['1_2_1'], 'String-based answer') - self.assertEquals(answers_converted['1_3_1'], ['answer1', 'answer2', 'answer3']) - self.assertEquals(answers_converted['1_4_1'], [fp.name, fp.name]) - - -class ChoiceResponseTest(unittest.TestCase): - - def test_cr_rb_grade(self): - problem_file = os.path.dirname(__file__) + "/test_files/choiceresponse_radio.xml" - test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=i4xs) - correct_answers = {'1_2_1': 'choice_2', - '1_3_1': ['choice_2', 'choice_3']} - test_answers = {'1_2_1': 'choice_2', - '1_3_1': 'choice_2', - } - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_1'), 'correct') - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_3_1'), 'incorrect') - - def test_cr_cb_grade(self): - problem_file = os.path.dirname(__file__) + "/test_files/choiceresponse_checkbox.xml" - test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=i4xs) - correct_answers = {'1_2_1': 'choice_2', - '1_3_1': ['choice_2', 'choice_3'], - '1_4_1': ['choice_2', 'choice_3']} - test_answers = {'1_2_1': 'choice_2', - '1_3_1': 'choice_2', - '1_4_1': ['choice_2', 'choice_3'], - } - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_1'), 'correct') - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_3_1'), 'incorrect') - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_4_1'), 'correct') - -class JavascriptResponseTest(unittest.TestCase): - - def test_jr_grade(self): - problem_file = os.path.dirname(__file__) + "/test_files/javascriptresponse.xml" - coffee_file_path = os.path.dirname(__file__) + "/test_files/js/*.coffee" - os.system("coffee -c %s" % (coffee_file_path)) - test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=i4xs) - correct_answers = {'1_2_1': json.dumps({0: 4})} - incorrect_answers = {'1_2_1': json.dumps({0: 5})} - - self.assertEquals(test_lcp.grade_answers(incorrect_answers).get_correctness('1_2_1'), 'incorrect') - self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') - -#----------------------------------------------------------------------------- -# Grading tests - - -class GradesheetTest(unittest.TestCase): - - def test_weighted_grading(self): - scores = [] - Score.__sub__ = lambda me, other: (me.earned - other.earned) + (me.possible - other.possible) - - all, graded = aggregate_scores(scores) - self.assertEqual(all, Score(earned=0, possible=0, graded=False, section="summary")) - self.assertEqual(graded, Score(earned=0, possible=0, graded=True, section="summary")) - - scores.append(Score(earned=0, possible=5, graded=False, section="summary")) - all, graded = aggregate_scores(scores) - self.assertEqual(all, Score(earned=0, possible=5, graded=False, section="summary")) - self.assertEqual(graded, Score(earned=0, possible=0, graded=True, section="summary")) - - scores.append(Score(earned=3, possible=5, graded=True, section="summary")) - all, graded = aggregate_scores(scores) - self.assertAlmostEqual(all, Score(earned=3, possible=10, graded=False, section="summary")) - self.assertAlmostEqual(graded, Score(earned=3, possible=5, graded=True, section="summary")) - - scores.append(Score(earned=2, possible=5, graded=True, section="summary")) - all, graded = aggregate_scores(scores) - self.assertAlmostEqual(all, Score(earned=5, possible=15, graded=False, section="summary")) - self.assertAlmostEqual(graded, Score(earned=5, possible=10, graded=True, section="summary")) - - -class GraderTest(unittest.TestCase): - - empty_gradesheet = { - } - - incomplete_gradesheet = { - 'Homework': [], - 'Lab': [], - 'Midterm': [], - } - - test_gradesheet = { - 'Homework': [Score(earned=2, possible=20.0, graded=True, section='hw1'), - Score(earned=16, possible=16.0, graded=True, section='hw2')], - #The dropped scores should be from the assignments that don't exist yet - - 'Lab': [Score(earned=1, possible=2.0, graded=True, section='lab1'), # Dropped - Score(earned=1, possible=1.0, graded=True, section='lab2'), - Score(earned=1, possible=1.0, graded=True, section='lab3'), - Score(earned=5, possible=25.0, graded=True, section='lab4'), # Dropped - Score(earned=3, possible=4.0, graded=True, section='lab5'), # Dropped - Score(earned=6, possible=7.0, graded=True, section='lab6'), - Score(earned=5, possible=6.0, graded=True, section='lab7')], - - 'Midterm': [Score(earned=50.5, possible=100, graded=True, section="Midterm Exam"), ], - } - - def test_SingleSectionGrader(self): - midtermGrader = graders.SingleSectionGrader("Midterm", "Midterm Exam") - lab4Grader = graders.SingleSectionGrader("Lab", "lab4") - badLabGrader = graders.SingleSectionGrader("Lab", "lab42") - - for graded in [midtermGrader.grade(self.empty_gradesheet), - midtermGrader.grade(self.incomplete_gradesheet), - badLabGrader.grade(self.test_gradesheet)]: - self.assertEqual(len(graded['section_breakdown']), 1) - self.assertEqual(graded['percent'], 0.0) - - graded = midtermGrader.grade(self.test_gradesheet) - self.assertAlmostEqual(graded['percent'], 0.505) - self.assertEqual(len(graded['section_breakdown']), 1) - - graded = lab4Grader.grade(self.test_gradesheet) - self.assertAlmostEqual(graded['percent'], 0.2) - self.assertEqual(len(graded['section_breakdown']), 1) - - def test_AssignmentFormatGrader(self): - homeworkGrader = graders.AssignmentFormatGrader("Homework", 12, 2) - noDropGrader = graders.AssignmentFormatGrader("Homework", 12, 0) - #Even though the minimum number is 3, this should grade correctly when 7 assignments are found - overflowGrader = graders.AssignmentFormatGrader("Lab", 3, 2) - labGrader = graders.AssignmentFormatGrader("Lab", 7, 3) - - #Test the grading of an empty gradesheet - for graded in [homeworkGrader.grade(self.empty_gradesheet), - noDropGrader.grade(self.empty_gradesheet), - homeworkGrader.grade(self.incomplete_gradesheet), - noDropGrader.grade(self.incomplete_gradesheet)]: - self.assertAlmostEqual(graded['percent'], 0.0) - #Make sure the breakdown includes 12 sections, plus one summary - self.assertEqual(len(graded['section_breakdown']), 12 + 1) - - graded = homeworkGrader.grade(self.test_gradesheet) - self.assertAlmostEqual(graded['percent'], 0.11) # 100% + 10% / 10 assignments - self.assertEqual(len(graded['section_breakdown']), 12 + 1) - - graded = noDropGrader.grade(self.test_gradesheet) - self.assertAlmostEqual(graded['percent'], 0.0916666666666666) # 100% + 10% / 12 assignments - self.assertEqual(len(graded['section_breakdown']), 12 + 1) - - graded = overflowGrader.grade(self.test_gradesheet) - self.assertAlmostEqual(graded['percent'], 0.8880952380952382) # 100% + 10% / 5 assignments - self.assertEqual(len(graded['section_breakdown']), 7 + 1) - - graded = labGrader.grade(self.test_gradesheet) - self.assertAlmostEqual(graded['percent'], 0.9226190476190477) - self.assertEqual(len(graded['section_breakdown']), 7 + 1) - - def test_WeightedSubsectionsGrader(self): - #First, a few sub graders - homeworkGrader = graders.AssignmentFormatGrader("Homework", 12, 2) - labGrader = graders.AssignmentFormatGrader("Lab", 7, 3) - midtermGrader = graders.SingleSectionGrader("Midterm", "Midterm Exam") - - weightedGrader = graders.WeightedSubsectionsGrader([(homeworkGrader, homeworkGrader.category, 0.25), (labGrader, labGrader.category, 0.25), - (midtermGrader, midtermGrader.category, 0.5)]) - - overOneWeightsGrader = graders.WeightedSubsectionsGrader([(homeworkGrader, homeworkGrader.category, 0.5), (labGrader, labGrader.category, 0.5), - (midtermGrader, midtermGrader.category, 0.5)]) - - #The midterm should have all weight on this one - zeroWeightsGrader = graders.WeightedSubsectionsGrader([(homeworkGrader, homeworkGrader.category, 0.0), (labGrader, labGrader.category, 0.0), - (midtermGrader, midtermGrader.category, 0.5)]) - - #This should always have a final percent of zero - allZeroWeightsGrader = graders.WeightedSubsectionsGrader([(homeworkGrader, homeworkGrader.category, 0.0), (labGrader, labGrader.category, 0.0), - (midtermGrader, midtermGrader.category, 0.0)]) - - emptyGrader = graders.WeightedSubsectionsGrader([]) - - graded = weightedGrader.grade(self.test_gradesheet) - self.assertAlmostEqual(graded['percent'], 0.5106547619047619) - self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1) - self.assertEqual(len(graded['grade_breakdown']), 3) - - graded = overOneWeightsGrader.grade(self.test_gradesheet) - self.assertAlmostEqual(graded['percent'], 0.7688095238095238) - self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1) - self.assertEqual(len(graded['grade_breakdown']), 3) - - graded = zeroWeightsGrader.grade(self.test_gradesheet) - self.assertAlmostEqual(graded['percent'], 0.2525) - self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1) - self.assertEqual(len(graded['grade_breakdown']), 3) - - graded = allZeroWeightsGrader.grade(self.test_gradesheet) - self.assertAlmostEqual(graded['percent'], 0.0) - self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1) - self.assertEqual(len(graded['grade_breakdown']), 3) - - for graded in [weightedGrader.grade(self.empty_gradesheet), - weightedGrader.grade(self.incomplete_gradesheet), - zeroWeightsGrader.grade(self.empty_gradesheet), - allZeroWeightsGrader.grade(self.empty_gradesheet)]: - self.assertAlmostEqual(graded['percent'], 0.0) - self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1) - self.assertEqual(len(graded['grade_breakdown']), 3) - - graded = emptyGrader.grade(self.test_gradesheet) - self.assertAlmostEqual(graded['percent'], 0.0) - self.assertEqual(len(graded['section_breakdown']), 0) - self.assertEqual(len(graded['grade_breakdown']), 0) - - def test_graderFromConf(self): - - #Confs always produce a graders.WeightedSubsectionsGrader, so we test this by repeating the test - #in test_graders.WeightedSubsectionsGrader, but generate the graders with confs. - - weightedGrader = graders.grader_from_conf([ - { - 'type': "Homework", - 'min_count': 12, - 'drop_count': 2, - 'short_label': "HW", - 'weight': 0.25, - }, - { - 'type': "Lab", - 'min_count': 7, - 'drop_count': 3, - 'category': "Labs", - 'weight': 0.25 - }, - { - 'type': "Midterm", - 'name': "Midterm Exam", - 'short_label': "Midterm", - 'weight': 0.5, - }, - ]) - - emptyGrader = graders.grader_from_conf([]) - - graded = weightedGrader.grade(self.test_gradesheet) - self.assertAlmostEqual(graded['percent'], 0.5106547619047619) - self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1) - self.assertEqual(len(graded['grade_breakdown']), 3) - - graded = emptyGrader.grade(self.test_gradesheet) - self.assertAlmostEqual(graded['percent'], 0.0) - self.assertEqual(len(graded['section_breakdown']), 0) - self.assertEqual(len(graded['grade_breakdown']), 0) - - #Test that graders can also be used instead of lists of dictionaries - homeworkGrader = graders.AssignmentFormatGrader("Homework", 12, 2) - homeworkGrader2 = graders.grader_from_conf(homeworkGrader) - - graded = homeworkGrader2.grade(self.test_gradesheet) - self.assertAlmostEqual(graded['percent'], 0.11) - self.assertEqual(len(graded['section_breakdown']), 12 + 1) - - #TODO: How do we test failure cases? The parser only logs an error when it can't parse something. Maybe it should throw exceptions? - -# -------------------------------------------------------------------------- -# Module progress tests - - -class ProgressTest(unittest.TestCase): - ''' Test that basic Progress objects work. A Progress represents a - fraction between 0 and 1. - ''' - not_started = Progress(0, 17) - part_done = Progress(2, 6) - half_done = Progress(3, 6) - also_half_done = Progress(1, 2) - done = Progress(7, 7) - - def test_create_object(self): - # These should work: - p = Progress(0, 2) - p = Progress(1, 2) - p = Progress(2, 2) - - p = Progress(2.5, 5.0) - p = Progress(3.7, 12.3333) - - # These shouldn't - self.assertRaises(ValueError, Progress, 0, 0) - self.assertRaises(ValueError, Progress, 2, 0) - self.assertRaises(ValueError, Progress, 1, -2) - - self.assertRaises(TypeError, Progress, 0, "all") - # check complex numbers just for the heck of it :) - self.assertRaises(TypeError, Progress, 2j, 3) - - def test_clamp(self): - self.assertEqual((2, 2), Progress(3, 2).frac()) - self.assertEqual((0, 2), Progress(-2, 2).frac()) - - def test_frac(self): - p = Progress(1, 2) - (a, b) = p.frac() - self.assertEqual(a, 1) - self.assertEqual(b, 2) - - def test_percent(self): - self.assertEqual(self.not_started.percent(), 0) - self.assertAlmostEqual(self.part_done.percent(), 33.33333333333333) - self.assertEqual(self.half_done.percent(), 50) - self.assertEqual(self.done.percent(), 100) - - self.assertEqual(self.half_done.percent(), self.also_half_done.percent()) - - def test_started(self): - self.assertFalse(self.not_started.started()) - - self.assertTrue(self.part_done.started()) - self.assertTrue(self.half_done.started()) - self.assertTrue(self.done.started()) - - def test_inprogress(self): - # only true if working on it - self.assertFalse(self.done.inprogress()) - self.assertFalse(self.not_started.inprogress()) - - self.assertTrue(self.part_done.inprogress()) - self.assertTrue(self.half_done.inprogress()) - - def test_done(self): - self.assertTrue(self.done.done()) - self.assertFalse(self.half_done.done()) - self.assertFalse(self.not_started.done()) - - def test_str(self): - self.assertEqual(str(self.not_started), "0/17") - self.assertEqual(str(self.part_done), "2/6") - self.assertEqual(str(self.done), "7/7") - - def test_ternary_str(self): - self.assertEqual(self.not_started.ternary_str(), "none") - self.assertEqual(self.half_done.ternary_str(), "in_progress") - self.assertEqual(self.done.ternary_str(), "done") - - def test_to_js_status(self): - '''Test the Progress.to_js_status_str() method''' - - self.assertEqual(Progress.to_js_status_str(self.not_started), "none") - self.assertEqual(Progress.to_js_status_str(self.half_done), "in_progress") - self.assertEqual(Progress.to_js_status_str(self.done), "done") - self.assertEqual(Progress.to_js_status_str(None), "NA") - - def test_to_js_detail_str(self): - '''Test the Progress.to_js_detail_str() method''' - f = Progress.to_js_detail_str - for p in (self.not_started, self.half_done, self.done): - self.assertEqual(f(p), str(p)) - # But None should be encoded as NA - self.assertEqual(f(None), "NA") - - def test_add(self): - '''Test the Progress.add_counts() method''' - p = Progress(0, 2) - p2 = Progress(1, 3) - p3 = Progress(2, 5) - pNone = None - add = lambda a, b: Progress.add_counts(a, b).frac() - - self.assertEqual(add(p, p), (0, 4)) - self.assertEqual(add(p, p2), (1, 5)) - self.assertEqual(add(p2, p3), (3, 8)) - - self.assertEqual(add(p2, pNone), p2.frac()) - self.assertEqual(add(pNone, p2), p2.frac()) - - def test_equality(self): - '''Test that comparing Progress objects for equality - works correctly.''' - p = Progress(1, 2) - p2 = Progress(2, 4) - p3 = Progress(1, 2) - self.assertTrue(p == p3) - self.assertFalse(p == p2) - - # Check != while we're at it - self.assertTrue(p != p2) - self.assertFalse(p != p3) - - -class ModuleProgressTest(unittest.TestCase): - ''' Test that get_progress() does the right thing for the different modules - ''' - def test_xmodule_default(self): - '''Make sure default get_progress exists, returns None''' - xm = x_module.XModule(i4xs, 'a://b/c/d/e', None, {}) - p = xm.get_progress() - self.assertEqual(p, None) diff --git a/common/lib/xmodule/xmodule/tests/test_graders.py b/common/lib/xmodule/xmodule/tests/test_graders.py new file mode 100644 index 0000000000..fa0e94d2d5 --- /dev/null +++ b/common/lib/xmodule/xmodule/tests/test_graders.py @@ -0,0 +1,220 @@ +"""Grading tests""" +import unittest + +from xmodule import graders +from xmodule.graders import Score, aggregate_scores + +class GradesheetTest(unittest.TestCase): + + def test_weighted_grading(self): + scores = [] + Score.__sub__ = lambda me, other: (me.earned - other.earned) + (me.possible - other.possible) + + all, graded = aggregate_scores(scores) + self.assertEqual(all, Score(earned=0, possible=0, graded=False, section="summary")) + self.assertEqual(graded, Score(earned=0, possible=0, graded=True, section="summary")) + + scores.append(Score(earned=0, possible=5, graded=False, section="summary")) + all, graded = aggregate_scores(scores) + self.assertEqual(all, Score(earned=0, possible=5, graded=False, section="summary")) + self.assertEqual(graded, Score(earned=0, possible=0, graded=True, section="summary")) + + scores.append(Score(earned=3, possible=5, graded=True, section="summary")) + all, graded = aggregate_scores(scores) + self.assertAlmostEqual(all, Score(earned=3, possible=10, graded=False, section="summary")) + self.assertAlmostEqual(graded, Score(earned=3, possible=5, graded=True, section="summary")) + + scores.append(Score(earned=2, possible=5, graded=True, section="summary")) + all, graded = aggregate_scores(scores) + self.assertAlmostEqual(all, Score(earned=5, possible=15, graded=False, section="summary")) + self.assertAlmostEqual(graded, Score(earned=5, possible=10, graded=True, section="summary")) + + +class GraderTest(unittest.TestCase): + + empty_gradesheet = { + } + + incomplete_gradesheet = { + 'Homework': [], + 'Lab': [], + 'Midterm': [], + } + + test_gradesheet = { + 'Homework': [Score(earned=2, possible=20.0, graded=True, section='hw1'), + Score(earned=16, possible=16.0, graded=True, section='hw2')], + #The dropped scores should be from the assignments that don't exist yet + + 'Lab': [Score(earned=1, possible=2.0, graded=True, section='lab1'), # Dropped + Score(earned=1, possible=1.0, graded=True, section='lab2'), + Score(earned=1, possible=1.0, graded=True, section='lab3'), + Score(earned=5, possible=25.0, graded=True, section='lab4'), # Dropped + Score(earned=3, possible=4.0, graded=True, section='lab5'), # Dropped + Score(earned=6, possible=7.0, graded=True, section='lab6'), + Score(earned=5, possible=6.0, graded=True, section='lab7')], + + 'Midterm': [Score(earned=50.5, possible=100, graded=True, section="Midterm Exam"), ], + } + + def test_SingleSectionGrader(self): + midtermGrader = graders.SingleSectionGrader("Midterm", "Midterm Exam") + lab4Grader = graders.SingleSectionGrader("Lab", "lab4") + badLabGrader = graders.SingleSectionGrader("Lab", "lab42") + + for graded in [midtermGrader.grade(self.empty_gradesheet), + midtermGrader.grade(self.incomplete_gradesheet), + badLabGrader.grade(self.test_gradesheet)]: + self.assertEqual(len(graded['section_breakdown']), 1) + self.assertEqual(graded['percent'], 0.0) + + graded = midtermGrader.grade(self.test_gradesheet) + self.assertAlmostEqual(graded['percent'], 0.505) + self.assertEqual(len(graded['section_breakdown']), 1) + + graded = lab4Grader.grade(self.test_gradesheet) + self.assertAlmostEqual(graded['percent'], 0.2) + self.assertEqual(len(graded['section_breakdown']), 1) + + def test_AssignmentFormatGrader(self): + homeworkGrader = graders.AssignmentFormatGrader("Homework", 12, 2) + noDropGrader = graders.AssignmentFormatGrader("Homework", 12, 0) + #Even though the minimum number is 3, this should grade correctly when 7 assignments are found + overflowGrader = graders.AssignmentFormatGrader("Lab", 3, 2) + labGrader = graders.AssignmentFormatGrader("Lab", 7, 3) + + #Test the grading of an empty gradesheet + for graded in [homeworkGrader.grade(self.empty_gradesheet), + noDropGrader.grade(self.empty_gradesheet), + homeworkGrader.grade(self.incomplete_gradesheet), + noDropGrader.grade(self.incomplete_gradesheet)]: + self.assertAlmostEqual(graded['percent'], 0.0) + #Make sure the breakdown includes 12 sections, plus one summary + self.assertEqual(len(graded['section_breakdown']), 12 + 1) + + graded = homeworkGrader.grade(self.test_gradesheet) + self.assertAlmostEqual(graded['percent'], 0.11) # 100% + 10% / 10 assignments + self.assertEqual(len(graded['section_breakdown']), 12 + 1) + + graded = noDropGrader.grade(self.test_gradesheet) + self.assertAlmostEqual(graded['percent'], 0.0916666666666666) # 100% + 10% / 12 assignments + self.assertEqual(len(graded['section_breakdown']), 12 + 1) + + graded = overflowGrader.grade(self.test_gradesheet) + self.assertAlmostEqual(graded['percent'], 0.8880952380952382) # 100% + 10% / 5 assignments + self.assertEqual(len(graded['section_breakdown']), 7 + 1) + + graded = labGrader.grade(self.test_gradesheet) + self.assertAlmostEqual(graded['percent'], 0.9226190476190477) + self.assertEqual(len(graded['section_breakdown']), 7 + 1) + + def test_WeightedSubsectionsGrader(self): + #First, a few sub graders + homeworkGrader = graders.AssignmentFormatGrader("Homework", 12, 2) + labGrader = graders.AssignmentFormatGrader("Lab", 7, 3) + midtermGrader = graders.SingleSectionGrader("Midterm", "Midterm Exam") + + weightedGrader = graders.WeightedSubsectionsGrader([(homeworkGrader, homeworkGrader.category, 0.25), + (labGrader, labGrader.category, 0.25), + (midtermGrader, midtermGrader.category, 0.5)]) + + overOneWeightsGrader = graders.WeightedSubsectionsGrader([(homeworkGrader, homeworkGrader.category, 0.5), + (labGrader, labGrader.category, 0.5), + (midtermGrader, midtermGrader.category, 0.5)]) + + #The midterm should have all weight on this one + zeroWeightsGrader = graders.WeightedSubsectionsGrader([(homeworkGrader, homeworkGrader.category, 0.0), + (labGrader, labGrader.category, 0.0), + (midtermGrader, midtermGrader.category, 0.5)]) + + #This should always have a final percent of zero + allZeroWeightsGrader = graders.WeightedSubsectionsGrader([(homeworkGrader, homeworkGrader.category, 0.0), + (labGrader, labGrader.category, 0.0), + (midtermGrader, midtermGrader.category, 0.0)]) + + emptyGrader = graders.WeightedSubsectionsGrader([]) + + graded = weightedGrader.grade(self.test_gradesheet) + self.assertAlmostEqual(graded['percent'], 0.5106547619047619) + self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1) + self.assertEqual(len(graded['grade_breakdown']), 3) + + graded = overOneWeightsGrader.grade(self.test_gradesheet) + self.assertAlmostEqual(graded['percent'], 0.7688095238095238) + self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1) + self.assertEqual(len(graded['grade_breakdown']), 3) + + graded = zeroWeightsGrader.grade(self.test_gradesheet) + self.assertAlmostEqual(graded['percent'], 0.2525) + self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1) + self.assertEqual(len(graded['grade_breakdown']), 3) + + graded = allZeroWeightsGrader.grade(self.test_gradesheet) + self.assertAlmostEqual(graded['percent'], 0.0) + self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1) + self.assertEqual(len(graded['grade_breakdown']), 3) + + for graded in [weightedGrader.grade(self.empty_gradesheet), + weightedGrader.grade(self.incomplete_gradesheet), + zeroWeightsGrader.grade(self.empty_gradesheet), + allZeroWeightsGrader.grade(self.empty_gradesheet)]: + self.assertAlmostEqual(graded['percent'], 0.0) + self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1) + self.assertEqual(len(graded['grade_breakdown']), 3) + + graded = emptyGrader.grade(self.test_gradesheet) + self.assertAlmostEqual(graded['percent'], 0.0) + self.assertEqual(len(graded['section_breakdown']), 0) + self.assertEqual(len(graded['grade_breakdown']), 0) + + def test_graderFromConf(self): + + #Confs always produce a graders.WeightedSubsectionsGrader, so we test this by repeating the test + #in test_graders.WeightedSubsectionsGrader, but generate the graders with confs. + + weightedGrader = graders.grader_from_conf([ + { + 'type': "Homework", + 'min_count': 12, + 'drop_count': 2, + 'short_label': "HW", + 'weight': 0.25, + }, + { + 'type': "Lab", + 'min_count': 7, + 'drop_count': 3, + 'category': "Labs", + 'weight': 0.25 + }, + { + 'type': "Midterm", + 'name': "Midterm Exam", + 'short_label': "Midterm", + 'weight': 0.5, + }, + ]) + + emptyGrader = graders.grader_from_conf([]) + + graded = weightedGrader.grade(self.test_gradesheet) + self.assertAlmostEqual(graded['percent'], 0.5106547619047619) + self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1) + self.assertEqual(len(graded['grade_breakdown']), 3) + + graded = emptyGrader.grade(self.test_gradesheet) + self.assertAlmostEqual(graded['percent'], 0.0) + self.assertEqual(len(graded['section_breakdown']), 0) + self.assertEqual(len(graded['grade_breakdown']), 0) + + #Test that graders can also be used instead of lists of dictionaries + homeworkGrader = graders.AssignmentFormatGrader("Homework", 12, 2) + homeworkGrader2 = graders.grader_from_conf(homeworkGrader) + + graded = homeworkGrader2.grade(self.test_gradesheet) + self.assertAlmostEqual(graded['percent'], 0.11) + self.assertEqual(len(graded['section_breakdown']), 12 + 1) + + #TODO: How do we test failure cases? The parser only logs an error when + #it can't parse something. Maybe it should throw exceptions? + diff --git a/common/lib/xmodule/xmodule/tests/test_progress.py b/common/lib/xmodule/xmodule/tests/test_progress.py new file mode 100644 index 0000000000..94a0a19d7c --- /dev/null +++ b/common/lib/xmodule/xmodule/tests/test_progress.py @@ -0,0 +1,138 @@ +"""Module progress tests""" + +import unittest + +from xmodule.progress import Progress +from xmodule import x_module + +from . import i4xs + +class ProgressTest(unittest.TestCase): + ''' Test that basic Progress objects work. A Progress represents a + fraction between 0 and 1. + ''' + not_started = Progress(0, 17) + part_done = Progress(2, 6) + half_done = Progress(3, 6) + also_half_done = Progress(1, 2) + done = Progress(7, 7) + + def test_create_object(self): + # These should work: + p = Progress(0, 2) + p = Progress(1, 2) + p = Progress(2, 2) + + p = Progress(2.5, 5.0) + p = Progress(3.7, 12.3333) + + # These shouldn't + self.assertRaises(ValueError, Progress, 0, 0) + self.assertRaises(ValueError, Progress, 2, 0) + self.assertRaises(ValueError, Progress, 1, -2) + + self.assertRaises(TypeError, Progress, 0, "all") + # check complex numbers just for the heck of it :) + self.assertRaises(TypeError, Progress, 2j, 3) + + def test_clamp(self): + self.assertEqual((2, 2), Progress(3, 2).frac()) + self.assertEqual((0, 2), Progress(-2, 2).frac()) + + def test_frac(self): + p = Progress(1, 2) + (a, b) = p.frac() + self.assertEqual(a, 1) + self.assertEqual(b, 2) + + def test_percent(self): + self.assertEqual(self.not_started.percent(), 0) + self.assertAlmostEqual(self.part_done.percent(), 33.33333333333333) + self.assertEqual(self.half_done.percent(), 50) + self.assertEqual(self.done.percent(), 100) + + self.assertEqual(self.half_done.percent(), self.also_half_done.percent()) + + def test_started(self): + self.assertFalse(self.not_started.started()) + + self.assertTrue(self.part_done.started()) + self.assertTrue(self.half_done.started()) + self.assertTrue(self.done.started()) + + def test_inprogress(self): + # only true if working on it + self.assertFalse(self.done.inprogress()) + self.assertFalse(self.not_started.inprogress()) + + self.assertTrue(self.part_done.inprogress()) + self.assertTrue(self.half_done.inprogress()) + + def test_done(self): + self.assertTrue(self.done.done()) + self.assertFalse(self.half_done.done()) + self.assertFalse(self.not_started.done()) + + def test_str(self): + self.assertEqual(str(self.not_started), "0/17") + self.assertEqual(str(self.part_done), "2/6") + self.assertEqual(str(self.done), "7/7") + + def test_ternary_str(self): + self.assertEqual(self.not_started.ternary_str(), "none") + self.assertEqual(self.half_done.ternary_str(), "in_progress") + self.assertEqual(self.done.ternary_str(), "done") + + def test_to_js_status(self): + '''Test the Progress.to_js_status_str() method''' + + self.assertEqual(Progress.to_js_status_str(self.not_started), "none") + self.assertEqual(Progress.to_js_status_str(self.half_done), "in_progress") + self.assertEqual(Progress.to_js_status_str(self.done), "done") + self.assertEqual(Progress.to_js_status_str(None), "NA") + + def test_to_js_detail_str(self): + '''Test the Progress.to_js_detail_str() method''' + f = Progress.to_js_detail_str + for p in (self.not_started, self.half_done, self.done): + self.assertEqual(f(p), str(p)) + # But None should be encoded as NA + self.assertEqual(f(None), "NA") + + def test_add(self): + '''Test the Progress.add_counts() method''' + p = Progress(0, 2) + p2 = Progress(1, 3) + p3 = Progress(2, 5) + pNone = None + add = lambda a, b: Progress.add_counts(a, b).frac() + + self.assertEqual(add(p, p), (0, 4)) + self.assertEqual(add(p, p2), (1, 5)) + self.assertEqual(add(p2, p3), (3, 8)) + + self.assertEqual(add(p2, pNone), p2.frac()) + self.assertEqual(add(pNone, p2), p2.frac()) + + def test_equality(self): + '''Test that comparing Progress objects for equality + works correctly.''' + p = Progress(1, 2) + p2 = Progress(2, 4) + p3 = Progress(1, 2) + self.assertTrue(p == p3) + self.assertFalse(p == p2) + + # Check != while we're at it + self.assertTrue(p != p2) + self.assertFalse(p != p3) + + +class ModuleProgressTest(unittest.TestCase): + ''' Test that get_progress() does the right thing for the different modules + ''' + def test_xmodule_default(self): + '''Make sure default get_progress exists, returns None''' + xm = x_module.XModule(i4xs, 'a://b/c/d/e', None, {}) + p = xm.get_progress() + self.assertEqual(p, None) From 66604fd0b7fe4c011439129a23973c8701eb9883 Mon Sep 17 00:00:00 2001 From: Victor Shnayder Date: Mon, 8 Oct 2012 16:44:36 -0400 Subject: [PATCH 02/65] add nltk to requirements - needed for chemical equation parsing, which is about to appear in capa --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index c3322c5b7c..379d3c08d2 100644 --- a/requirements.txt +++ b/requirements.txt @@ -49,3 +49,4 @@ networkx pygraphviz -r repo-requirements.txt pil +nltk From 758e76dc14eed847c983a6ff4ce595e0a37abd22 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=90=D0=BB=D0=B5=D0=BA=D1=81=D0=B0=D0=BD=D0=B4=D1=80?= Date: Tue, 9 Oct 2012 15:08:43 +0300 Subject: [PATCH 03/65] added js to input for cr --- common/lib/capa/capa/responsetypes.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/lib/capa/capa/responsetypes.py b/common/lib/capa/capa/responsetypes.py index ab5eaf950c..462ce1dbb9 100644 --- a/common/lib/capa/capa/responsetypes.py +++ b/common/lib/capa/capa/responsetypes.py @@ -856,7 +856,7 @@ def sympy_check2(): """}] response_tag = 'customresponse' - allowed_inputfields = ['textline', 'textbox'] + allowed_inputfields = ['textline', 'textbox', 'javascriptinput'] def setup_response(self): xml = self.xml From 708c11a1ea66dd959497d1670142b6f00f1bcb6d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=90=D0=BB=D0=B5=D0=BA=D1=81=D0=B0=D0=BD=D0=B4=D1=80?= Date: Tue, 9 Oct 2012 19:13:54 +0300 Subject: [PATCH 04/65] added input type --- common/lib/capa/capa/inputtypes.py | 17 +++++++++++++++++ .../capa/capa/templates/crystallography.html | 11 +++++++++++ 2 files changed, 28 insertions(+) create mode 100644 common/lib/capa/capa/templates/crystallography.html diff --git a/common/lib/capa/capa/inputtypes.py b/common/lib/capa/capa/inputtypes.py index 466adcbf01..41b98eacae 100644 --- a/common/lib/capa/capa/inputtypes.py +++ b/common/lib/capa/capa/inputtypes.py @@ -624,3 +624,20 @@ def imageinput(element, value, status, render_template, msg=''): } html = render_template("imageinput.html", context) return etree.XML(html) + + +#----------------------------------------------------------------------------- +@register_render_function +def crystallography(element, value, status, render_template, msg=''): + eid = element.get('id') + height = element.get('height') + width = element.get('width') + display_file = element.get('display_file') + context = { + 'id': eid, + 'width': width, + 'height': height, + 'display_file': display_file, + } + html = render_template("crystallography.html", context) + return etree.XML(html) \ No newline at end of file diff --git a/common/lib/capa/capa/templates/crystallography.html b/common/lib/capa/capa/templates/crystallography.html new file mode 100644 index 0000000000..104092a2ca --- /dev/null +++ b/common/lib/capa/capa/templates/crystallography.html @@ -0,0 +1,11 @@ + + +
+
+
+
+
+ + + +
From 386480905fe4d2d347db8685bd2e44a8817e98e2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Andr=C3=A9s=20Rocha?= Date: Tue, 9 Oct 2012 17:48:46 -0400 Subject: [PATCH 05/65] Fix openid provider URL matching for usernames with non-word characters --- lms/urls.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lms/urls.py b/lms/urls.py index 662e41235e..862621b7e1 100644 --- a/lms/urls.py +++ b/lms/urls.py @@ -244,7 +244,7 @@ if settings.MITX_FEATURES.get('AUTH_USE_OPENID'): if settings.MITX_FEATURES.get('AUTH_USE_OPENID_PROVIDER'): urlpatterns += ( url(r'^openid/provider/login/$', 'external_auth.views.provider_login', name='openid-provider-login'), - url(r'^openid/provider/login/(?:[\w%\. ]+)$', 'external_auth.views.provider_identity', name='openid-provider-login-identity'), + url(r'^openid/provider/login/(?:.+)$', 'external_auth.views.provider_identity', name='openid-provider-login-identity'), url(r'^openid/provider/identity/$', 'external_auth.views.provider_identity', name='openid-provider-identity'), url(r'^openid/provider/xrds/$', 'external_auth.views.provider_xrds', name='openid-provider-xrds') ) From c4359b6f247060b90c9b974a6f8d14bf6b957bf3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=90=D0=BB=D0=B5=D0=BA=D1=81=D0=B0=D0=BD=D0=B4=D1=80?= Date: Wed, 10 Oct 2012 19:35:09 +0300 Subject: [PATCH 06/65] added crystresponse --- common/lib/capa/capa/responsetypes.py | 56 ++++++++++++++++++++++++++- 1 file changed, 54 insertions(+), 2 deletions(-) diff --git a/common/lib/capa/capa/responsetypes.py b/common/lib/capa/capa/responsetypes.py index 462ce1dbb9..e1ca420d9b 100644 --- a/common/lib/capa/capa/responsetypes.py +++ b/common/lib/capa/capa/responsetypes.py @@ -856,7 +856,7 @@ def sympy_check2(): """}] response_tag = 'customresponse' - allowed_inputfields = ['textline', 'textbox', 'javascriptinput'] + allowed_inputfields = ['textline', 'textbox', 'crystallography'] def setup_response(self): xml = self.xml @@ -1762,6 +1762,57 @@ class ImageResponse(LoncapaResponse): def get_answers(self): return dict([(ie.get('id'), ie.get('rectangle')) for ie in self.ielements]) + +#----------------------------------------------------------------------------- + + +class CrystResponse(LoncapaResponse): + + response_tag = 'crystresponse' + hint_tag = 'crystresponse' + allowed_inputfields = ['crystallography'] + required_attributes = ['answer'] + max_inputfields = 1 + + def setup_response(self): + xml = self.xml + context = self.context + self.correct_answer = contextualize_text(xml.get('answer'), context) + # try: + # self.tolerance_xml = xml.xpath('//*[@id=$id]//responseparam[@type="tolerance"]/@default', + # id=xml.get('id'))[0] + # self.tolerance = contextualize_text(self.tolerance_xml, context) + # except Exception: + # self.tolerance = 0 + try: + self.answer_id = xml.xpath('//*[@id=$id]//crystallography/@id', + id=xml.get('id'))[0] + except Exception: + self.answer_id = None + + def get_score(self, student_answers): + '''Grade a numeric response ''' + student_answer = student_answers[self.answer_id] + try: + correct = compare_with_tolerance(evaluator(dict(), dict(), student_answer), + complex(self.correct_answer), self.tolerance) + # We should catch this explicitly. + # I think this is just pyparsing.ParseException, calc.UndefinedVariable: + # But we'd need to confirm + except: + raise StudentInputError("Invalid input: could not interpret '%s' as a number" % + cgi.escape(student_answer)) + + if correct: + return CorrectMap(self.answer_id, 'correct') + else: + return CorrectMap(self.answer_id, 'incorrect') + + # TODO: add check_hint_condition(self, hxml_set, student_answers) + + def get_answers(self): + return {self.answer_id: self.correct_answer} + #----------------------------------------------------------------------------- # TEMPORARY: List of all response subclasses # FIXME: To be replaced by auto-registration @@ -1779,4 +1830,5 @@ __all__ = [CodeResponse, ChoiceResponse, MultipleChoiceResponse, TrueFalseResponse, - JavascriptResponse] + JavascriptResponse, + CrystResponse] From 42bd9e8b9e02acc97c56549e41563b12bcf33444 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=90=D0=BB=D0=B5=D0=BA=D1=81=D0=B0=D0=BD=D0=B4=D1=80?= Date: Wed, 10 Oct 2012 19:36:03 +0300 Subject: [PATCH 07/65] added test information --- common/lib/capa/capa/templates/crystallography.html | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/common/lib/capa/capa/templates/crystallography.html b/common/lib/capa/capa/templates/crystallography.html index 104092a2ca..f621f414d1 100644 --- a/common/lib/capa/capa/templates/crystallography.html +++ b/common/lib/capa/capa/templates/crystallography.html @@ -6,6 +6,6 @@
- - + + From f374a33606f90b7e0b5c1c5371478a2beb289861 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=90=D0=BB=D0=B5=D0=BA=D1=81=D0=B0=D0=BD=D0=B4=D1=80?= Date: Wed, 10 Oct 2012 20:54:17 +0300 Subject: [PATCH 08/65] added not hidden field --- common/lib/capa/capa/templates/crystallography.html | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/common/lib/capa/capa/templates/crystallography.html b/common/lib/capa/capa/templates/crystallography.html index f621f414d1..76478ac8c4 100644 --- a/common/lib/capa/capa/templates/crystallography.html +++ b/common/lib/capa/capa/templates/crystallography.html @@ -7,5 +7,6 @@
- + + From e256b2390bf4b251b559ff27751a28336b1fe96f Mon Sep 17 00:00:00 2001 From: Bridger Maxwell Date: Wed, 10 Oct 2012 14:53:39 -0400 Subject: [PATCH 09/65] Added an indicator on Progress page when there are no problem scores in a section. --- lms/templates/courseware/progress.html | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/lms/templates/courseware/progress.html b/lms/templates/courseware/progress.html index 87ac06bae6..81268ff081 100644 --- a/lms/templates/courseware/progress.html +++ b/lms/templates/courseware/progress.html @@ -65,16 +65,19 @@ ${progress_graph.body(grade_summary, course.grade_cutoffs, "grade-detail-graph") %endif

- %if len(section['scores']) > 0: -
+
+ %if len(section['scores']) > 0:

${ "Problem Scores: " if section['graded'] else "Practice Scores: "}

    %for score in section['scores']:
  1. ${"{0:.3n}/{1:.3n}".format(float(score.earned),float(score.possible))}
  2. %endfor
-
- %endif + %else: +

No problem scores in this section

+ %endif +
+ %endfor From c30f57022b3066185cd57f8e3041912f4d6f9212 Mon Sep 17 00:00:00 2001 From: Victor Shnayder Date: Mon, 8 Oct 2012 16:44:54 -0400 Subject: [PATCH 10/65] Move more tests around, start prep for inputtype refactor --- common/lib/capa/capa/inputtypes.py | 8 +- common/lib/capa/capa/tests/__init__.py | 4 +- common/lib/capa/capa/tests/test_inputtypes.py | 383 +---------------- .../lib/capa/capa/tests/test_responsetypes.py | 384 ++++++++++++++++++ 4 files changed, 410 insertions(+), 369 deletions(-) create mode 100644 common/lib/capa/capa/tests/test_responsetypes.py diff --git a/common/lib/capa/capa/inputtypes.py b/common/lib/capa/capa/inputtypes.py index 466adcbf01..2858b2171f 100644 --- a/common/lib/capa/capa/inputtypes.py +++ b/common/lib/capa/capa/inputtypes.py @@ -124,8 +124,8 @@ def register_render_function(fn, names=None, cls=SimpleInput): else: raise NotImplementedError - def wrapped(): - return fn + def wrapped(*args, **kwargs): + return fn(*args, **kwargs) return wrapped #----------------------------------------------------------------------------- @@ -146,12 +146,14 @@ def optioninput(element, value, status, render_template, msg=''): raise Exception( "[courseware.capa.inputtypes.optioninput] Missing options specification in " + etree.tostring(element)) + + # parse the set of possible options oset = shlex.shlex(options[1:-1]) oset.quotes = "'" oset.whitespace = "," oset = [x[1:-1] for x in list(oset)] - # make ordered list with (key,value) same + # make ordered list with (key, value) same osetdict = [(oset[x], oset[x]) for x in range(len(oset))] # TODO: allow ordering to be randomized diff --git a/common/lib/capa/capa/tests/__init__.py b/common/lib/capa/capa/tests/__init__.py index ebbfe16a29..c72d2a1538 100644 --- a/common/lib/capa/capa/tests/__init__.py +++ b/common/lib/capa/capa/tests/__init__.py @@ -4,6 +4,8 @@ import os from mock import Mock +TEST_DIR = os.path.dirname(os.path.realpath(__file__)) + test_system = Mock( ajax_url='courses/course_id/modx/a_location', track_function=Mock(), @@ -11,7 +13,7 @@ test_system = Mock( render_template=Mock(), replace_urls=Mock(), user=Mock(), - filestore=fs.osfs.OSFS(os.path.dirname(os.path.realpath(__file__))+"/test_files"), + filestore=fs.osfs.OSFS(os.path.join(TEST_DIR, "test_files")), debug=True, xqueue={'interface':None, 'callback_url':'/', 'default_queuename': 'testqueue', 'waittime': 10}, node_path=os.environ.get("NODE_PATH", "/usr/local/lib/node_modules"), diff --git a/common/lib/capa/capa/tests/test_inputtypes.py b/common/lib/capa/capa/tests/test_inputtypes.py index af3d1e87a7..8451f963d5 100644 --- a/common/lib/capa/capa/tests/test_inputtypes.py +++ b/common/lib/capa/capa/tests/test_inputtypes.py @@ -10,375 +10,28 @@ import os import unittest from . import test_system +from capa import inputtypes -import capa.capa_problem as lcp -from capa.correctmap import CorrectMap -from capa.util import convert_files_to_filenames -from capa.xqueue_interface import dateformat +from lxml import etree -class MultiChoiceTest(unittest.TestCase): - def test_MC_grade(self): - multichoice_file = os.path.dirname(__file__) + "/test_files/multichoice.xml" - test_lcp = lcp.LoncapaProblem(open(multichoice_file).read(), '1', system=test_system) - correct_answers = {'1_2_1': 'choice_foil3'} - self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') - false_answers = {'1_2_1': 'choice_foil2'} - self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect') - - def test_MC_bare_grades(self): - multichoice_file = os.path.dirname(__file__) + "/test_files/multi_bare.xml" - test_lcp = lcp.LoncapaProblem(open(multichoice_file).read(), '1', system=test_system) - correct_answers = {'1_2_1': 'choice_2'} - self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') - false_answers = {'1_2_1': 'choice_1'} - self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect') - - def test_TF_grade(self): - truefalse_file = os.path.dirname(__file__) + "/test_files/truefalse.xml" - test_lcp = lcp.LoncapaProblem(open(truefalse_file).read(), '1', system=test_system) - correct_answers = {'1_2_1': ['choice_foil2', 'choice_foil1']} - self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') - false_answers = {'1_2_1': ['choice_foil1']} - self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect') - false_answers = {'1_2_1': ['choice_foil1', 'choice_foil3']} - self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect') - false_answers = {'1_2_1': ['choice_foil3']} - self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect') - false_answers = {'1_2_1': ['choice_foil1', 'choice_foil2', 'choice_foil3']} - self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect') - - -class ImageResponseTest(unittest.TestCase): - def test_ir_grade(self): - imageresponse_file = os.path.dirname(__file__) + "/test_files/imageresponse.xml" - test_lcp = lcp.LoncapaProblem(open(imageresponse_file).read(), '1', system=test_system) - correct_answers = {'1_2_1': '(490,11)-(556,98)', - '1_2_2': '(242,202)-(296,276)'} - test_answers = {'1_2_1': '[500,20]', - '1_2_2': '[250,300]', - } - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_1'), 'correct') - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_2'), 'incorrect') - - -class SymbolicResponseTest(unittest.TestCase): - def test_sr_grade(self): - raise SkipTest() # This test fails due to dependencies on a local copy of snuggletex-webapp. Until we have figured that out, we'll just skip this test - symbolicresponse_file = os.path.dirname(__file__) + "/test_files/symbolicresponse.xml" - test_lcp = lcp.LoncapaProblem(open(symbolicresponse_file).read(), '1', system=test_system) - correct_answers = {'1_2_1': 'cos(theta)*[[1,0],[0,1]] + i*sin(theta)*[[0,1],[1,0]]', - '1_2_1_dynamath': ''' - - - - cos - - ( - θ - ) - - - - - [ - - - - 1 - - - 0 - - - - - 0 - - - 1 - - - - ] - - + - i - - - sin - - ( - θ - ) - - - - - [ - - - - 0 - - - 1 - - - - - 1 - - - 0 - - - - ] - - - -''', - } - wrong_answers = {'1_2_1': '2', - '1_2_1_dynamath': ''' - - - 2 - -''', - } - self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') - self.assertEquals(test_lcp.grade_answers(wrong_answers).get_correctness('1_2_1'), 'incorrect') - - -class OptionResponseTest(unittest.TestCase): +class OptionInputTest(unittest.TestCase): ''' - Run this with - - python manage.py test courseware.OptionResponseTest + Make sure option inputs work ''' - def test_or_grade(self): - optionresponse_file = os.path.dirname(__file__) + "/test_files/optionresponse.xml" - test_lcp = lcp.LoncapaProblem(open(optionresponse_file).read(), '1', system=test_system) - correct_answers = {'1_2_1': 'True', - '1_2_2': 'False'} - test_answers = {'1_2_1': 'True', - '1_2_2': 'True', - } - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_1'), 'correct') - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_2'), 'incorrect') + def test_rendering(self): + xml = """""" + element = etree.fromstring(xml) + + value = 'Down' + status = 'incorrect' + rendered_element = inputtypes.optioninput(element, value, status, test_system.render_template) + rendered_str = etree.tostring(rendered_element) + print rendered_str + self.assertTrue(False) -class FormulaResponseWithHintTest(unittest.TestCase): - ''' - Test Formula response problem with a hint - This problem also uses calc. - ''' - def test_or_grade(self): - problem_file = os.path.dirname(__file__) + "/test_files/formularesponse_with_hint.xml" - test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=test_system) - correct_answers = {'1_2_1': '2.5*x-5.0'} - test_answers = {'1_2_1': '0.4*x-5.0'} - self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') - cmap = test_lcp.grade_answers(test_answers) - self.assertEquals(cmap.get_correctness('1_2_1'), 'incorrect') - self.assertTrue('You have inverted' in cmap.get_hint('1_2_1')) - - -class StringResponseWithHintTest(unittest.TestCase): - ''' - Test String response problem with a hint - ''' - def test_or_grade(self): - problem_file = os.path.dirname(__file__) + "/test_files/stringresponse_with_hint.xml" - test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=test_system) - correct_answers = {'1_2_1': 'Michigan'} - test_answers = {'1_2_1': 'Minnesota'} - self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') - cmap = test_lcp.grade_answers(test_answers) - self.assertEquals(cmap.get_correctness('1_2_1'), 'incorrect') - self.assertTrue('St. Paul' in cmap.get_hint('1_2_1')) - - -class CodeResponseTest(unittest.TestCase): - ''' - Test CodeResponse - TODO: Add tests for external grader messages - ''' - @staticmethod - def make_queuestate(key, time): - timestr = datetime.strftime(time, dateformat) - return {'key': key, 'time': timestr} - - def test_is_queued(self): - """ - Simple test of whether LoncapaProblem knows when it's been queued - """ - problem_file = os.path.join(os.path.dirname(__file__), "test_files/coderesponse.xml") - with open(problem_file) as input_file: - test_lcp = lcp.LoncapaProblem(input_file.read(), '1', system=test_system) - - answer_ids = sorted(test_lcp.get_question_answers()) - - # CodeResponse requires internal CorrectMap state. Build it now in the unqueued state - cmap = CorrectMap() - for answer_id in answer_ids: - cmap.update(CorrectMap(answer_id=answer_id, queuestate=None)) - test_lcp.correct_map.update(cmap) - - self.assertEquals(test_lcp.is_queued(), False) - - # Now we queue the LCP - cmap = CorrectMap() - for i, answer_id in enumerate(answer_ids): - queuestate = CodeResponseTest.make_queuestate(i, datetime.now()) - cmap.update(CorrectMap(answer_id=answer_ids[i], queuestate=queuestate)) - test_lcp.correct_map.update(cmap) - - self.assertEquals(test_lcp.is_queued(), True) - - - def test_update_score(self): - ''' - Test whether LoncapaProblem.update_score can deliver queued result to the right subproblem - ''' - problem_file = os.path.join(os.path.dirname(__file__), "test_files/coderesponse.xml") - with open(problem_file) as input_file: - test_lcp = lcp.LoncapaProblem(input_file.read(), '1', system=test_system) - - answer_ids = sorted(test_lcp.get_question_answers()) - - # CodeResponse requires internal CorrectMap state. Build it now in the queued state - old_cmap = CorrectMap() - for i, answer_id in enumerate(answer_ids): - queuekey = 1000 + i - queuestate = CodeResponseTest.make_queuestate(1000+i, datetime.now()) - old_cmap.update(CorrectMap(answer_id=answer_ids[i], queuestate=queuestate)) - - # Message format common to external graders - grader_msg = 'MESSAGE' # Must be valid XML - correct_score_msg = json.dumps({'correct':True, 'score':1, 'msg': grader_msg}) - incorrect_score_msg = json.dumps({'correct':False, 'score':0, 'msg': grader_msg}) - - xserver_msgs = {'correct': correct_score_msg, - 'incorrect': incorrect_score_msg,} - - # Incorrect queuekey, state should not be updated - for correctness in ['correct', 'incorrect']: - test_lcp.correct_map = CorrectMap() - test_lcp.correct_map.update(old_cmap) # Deep copy - - test_lcp.update_score(xserver_msgs[correctness], queuekey=0) - self.assertEquals(test_lcp.correct_map.get_dict(), old_cmap.get_dict()) # Deep comparison - - for answer_id in answer_ids: - self.assertTrue(test_lcp.correct_map.is_queued(answer_id)) # Should be still queued, since message undelivered - - # Correct queuekey, state should be updated - for correctness in ['correct', 'incorrect']: - for i, answer_id in enumerate(answer_ids): - test_lcp.correct_map = CorrectMap() - test_lcp.correct_map.update(old_cmap) - - new_cmap = CorrectMap() - new_cmap.update(old_cmap) - npoints = 1 if correctness=='correct' else 0 - new_cmap.set(answer_id=answer_id, npoints=npoints, correctness=correctness, msg=grader_msg, queuestate=None) - - test_lcp.update_score(xserver_msgs[correctness], queuekey=1000 + i) - self.assertEquals(test_lcp.correct_map.get_dict(), new_cmap.get_dict()) - - for j, test_id in enumerate(answer_ids): - if j == i: - self.assertFalse(test_lcp.correct_map.is_queued(test_id)) # Should be dequeued, message delivered - else: - self.assertTrue(test_lcp.correct_map.is_queued(test_id)) # Should be queued, message undelivered - - - def test_recentmost_queuetime(self): - ''' - Test whether the LoncapaProblem knows about the time of queue requests - ''' - problem_file = os.path.join(os.path.dirname(__file__), "test_files/coderesponse.xml") - with open(problem_file) as input_file: - test_lcp = lcp.LoncapaProblem(input_file.read(), '1', system=test_system) - - answer_ids = sorted(test_lcp.get_question_answers()) - - # CodeResponse requires internal CorrectMap state. Build it now in the unqueued state - cmap = CorrectMap() - for answer_id in answer_ids: - cmap.update(CorrectMap(answer_id=answer_id, queuestate=None)) - test_lcp.correct_map.update(cmap) - - self.assertEquals(test_lcp.get_recentmost_queuetime(), None) - - # CodeResponse requires internal CorrectMap state. Build it now in the queued state - cmap = CorrectMap() - for i, answer_id in enumerate(answer_ids): - queuekey = 1000 + i - latest_timestamp = datetime.now() - queuestate = CodeResponseTest.make_queuestate(1000+i, latest_timestamp) - cmap.update(CorrectMap(answer_id=answer_id, queuestate=queuestate)) - test_lcp.correct_map.update(cmap) - - # Queue state only tracks up to second - latest_timestamp = datetime.strptime(datetime.strftime(latest_timestamp, dateformat), dateformat) - - self.assertEquals(test_lcp.get_recentmost_queuetime(), latest_timestamp) - - def test_convert_files_to_filenames(self): - ''' - Test whether file objects are converted to filenames without altering other structures - ''' - problem_file = os.path.join(os.path.dirname(__file__), "test_files/coderesponse.xml") - with open(problem_file) as fp: - answers_with_file = {'1_2_1': 'String-based answer', - '1_3_1': ['answer1', 'answer2', 'answer3'], - '1_4_1': [fp, fp]} - answers_converted = convert_files_to_filenames(answers_with_file) - self.assertEquals(answers_converted['1_2_1'], 'String-based answer') - self.assertEquals(answers_converted['1_3_1'], ['answer1', 'answer2', 'answer3']) - self.assertEquals(answers_converted['1_4_1'], [fp.name, fp.name]) - - -class ChoiceResponseTest(unittest.TestCase): - - def test_cr_rb_grade(self): - problem_file = os.path.dirname(__file__) + "/test_files/choiceresponse_radio.xml" - test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=test_system) - correct_answers = {'1_2_1': 'choice_2', - '1_3_1': ['choice_2', 'choice_3']} - test_answers = {'1_2_1': 'choice_2', - '1_3_1': 'choice_2', - } - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_1'), 'correct') - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_3_1'), 'incorrect') - - def test_cr_cb_grade(self): - problem_file = os.path.dirname(__file__) + "/test_files/choiceresponse_checkbox.xml" - test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=test_system) - correct_answers = {'1_2_1': 'choice_2', - '1_3_1': ['choice_2', 'choice_3'], - '1_4_1': ['choice_2', 'choice_3']} - test_answers = {'1_2_1': 'choice_2', - '1_3_1': 'choice_2', - '1_4_1': ['choice_2', 'choice_3'], - } - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_1'), 'correct') - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_3_1'), 'incorrect') - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_4_1'), 'correct') - -class JavascriptResponseTest(unittest.TestCase): - - def test_jr_grade(self): - problem_file = os.path.dirname(__file__) + "/test_files/javascriptresponse.xml" - coffee_file_path = os.path.dirname(__file__) + "/test_files/js/*.coffee" - os.system("coffee -c %s" % (coffee_file_path)) - test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=test_system) - correct_answers = {'1_2_1': json.dumps({0: 4})} - incorrect_answers = {'1_2_1': json.dumps({0: 5})} - - self.assertEquals(test_lcp.grade_answers(incorrect_answers).get_correctness('1_2_1'), 'incorrect') - self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') + # TODO: split each inputtype into a get_render_context function and a + # template property, and have the rendering done in one place. (and be + # able to test the logic without dealing with xml at least on the output + # end) diff --git a/common/lib/capa/capa/tests/test_responsetypes.py b/common/lib/capa/capa/tests/test_responsetypes.py new file mode 100644 index 0000000000..f2fa873080 --- /dev/null +++ b/common/lib/capa/capa/tests/test_responsetypes.py @@ -0,0 +1,384 @@ +""" +Tests of responsetypes +""" + + +from datetime import datetime +import json +from nose.plugins.skip import SkipTest +import os +import unittest + +from . import test_system + +import capa.capa_problem as lcp +from capa.correctmap import CorrectMap +from capa.util import convert_files_to_filenames +from capa.xqueue_interface import dateformat + +class MultiChoiceTest(unittest.TestCase): + def test_MC_grade(self): + multichoice_file = os.path.dirname(__file__) + "/test_files/multichoice.xml" + test_lcp = lcp.LoncapaProblem(open(multichoice_file).read(), '1', system=test_system) + correct_answers = {'1_2_1': 'choice_foil3'} + self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') + false_answers = {'1_2_1': 'choice_foil2'} + self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect') + + def test_MC_bare_grades(self): + multichoice_file = os.path.dirname(__file__) + "/test_files/multi_bare.xml" + test_lcp = lcp.LoncapaProblem(open(multichoice_file).read(), '1', system=test_system) + correct_answers = {'1_2_1': 'choice_2'} + self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') + false_answers = {'1_2_1': 'choice_1'} + self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect') + + def test_TF_grade(self): + truefalse_file = os.path.dirname(__file__) + "/test_files/truefalse.xml" + test_lcp = lcp.LoncapaProblem(open(truefalse_file).read(), '1', system=test_system) + correct_answers = {'1_2_1': ['choice_foil2', 'choice_foil1']} + self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') + false_answers = {'1_2_1': ['choice_foil1']} + self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect') + false_answers = {'1_2_1': ['choice_foil1', 'choice_foil3']} + self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect') + false_answers = {'1_2_1': ['choice_foil3']} + self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect') + false_answers = {'1_2_1': ['choice_foil1', 'choice_foil2', 'choice_foil3']} + self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect') + + +class ImageResponseTest(unittest.TestCase): + def test_ir_grade(self): + imageresponse_file = os.path.dirname(__file__) + "/test_files/imageresponse.xml" + test_lcp = lcp.LoncapaProblem(open(imageresponse_file).read(), '1', system=test_system) + correct_answers = {'1_2_1': '(490,11)-(556,98)', + '1_2_2': '(242,202)-(296,276)'} + test_answers = {'1_2_1': '[500,20]', + '1_2_2': '[250,300]', + } + self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_1'), 'correct') + self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_2'), 'incorrect') + + +class SymbolicResponseTest(unittest.TestCase): + def test_sr_grade(self): + raise SkipTest() # This test fails due to dependencies on a local copy of snuggletex-webapp. Until we have figured that out, we'll just skip this test + symbolicresponse_file = os.path.dirname(__file__) + "/test_files/symbolicresponse.xml" + test_lcp = lcp.LoncapaProblem(open(symbolicresponse_file).read(), '1', system=test_system) + correct_answers = {'1_2_1': 'cos(theta)*[[1,0],[0,1]] + i*sin(theta)*[[0,1],[1,0]]', + '1_2_1_dynamath': ''' + + + + cos + + ( + θ + ) + + + + + [ + + + + 1 + + + 0 + + + + + 0 + + + 1 + + + + ] + + + + i + + + sin + + ( + θ + ) + + + + + [ + + + + 0 + + + 1 + + + + + 1 + + + 0 + + + + ] + + + +''', + } + wrong_answers = {'1_2_1': '2', + '1_2_1_dynamath': ''' + + + 2 + +''', + } + self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') + self.assertEquals(test_lcp.grade_answers(wrong_answers).get_correctness('1_2_1'), 'incorrect') + + +class OptionResponseTest(unittest.TestCase): + ''' + Run this with + + python manage.py test courseware.OptionResponseTest + ''' + def test_or_grade(self): + optionresponse_file = os.path.dirname(__file__) + "/test_files/optionresponse.xml" + test_lcp = lcp.LoncapaProblem(open(optionresponse_file).read(), '1', system=test_system) + correct_answers = {'1_2_1': 'True', + '1_2_2': 'False'} + test_answers = {'1_2_1': 'True', + '1_2_2': 'True', + } + self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_1'), 'correct') + self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_2'), 'incorrect') + + +class FormulaResponseWithHintTest(unittest.TestCase): + ''' + Test Formula response problem with a hint + This problem also uses calc. + ''' + def test_or_grade(self): + problem_file = os.path.dirname(__file__) + "/test_files/formularesponse_with_hint.xml" + test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=test_system) + correct_answers = {'1_2_1': '2.5*x-5.0'} + test_answers = {'1_2_1': '0.4*x-5.0'} + self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') + cmap = test_lcp.grade_answers(test_answers) + self.assertEquals(cmap.get_correctness('1_2_1'), 'incorrect') + self.assertTrue('You have inverted' in cmap.get_hint('1_2_1')) + + +class StringResponseWithHintTest(unittest.TestCase): + ''' + Test String response problem with a hint + ''' + def test_or_grade(self): + problem_file = os.path.dirname(__file__) + "/test_files/stringresponse_with_hint.xml" + test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=test_system) + correct_answers = {'1_2_1': 'Michigan'} + test_answers = {'1_2_1': 'Minnesota'} + self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') + cmap = test_lcp.grade_answers(test_answers) + self.assertEquals(cmap.get_correctness('1_2_1'), 'incorrect') + self.assertTrue('St. Paul' in cmap.get_hint('1_2_1')) + + +class CodeResponseTest(unittest.TestCase): + ''' + Test CodeResponse + TODO: Add tests for external grader messages + ''' + @staticmethod + def make_queuestate(key, time): + timestr = datetime.strftime(time, dateformat) + return {'key': key, 'time': timestr} + + def test_is_queued(self): + """ + Simple test of whether LoncapaProblem knows when it's been queued + """ + problem_file = os.path.join(os.path.dirname(__file__), "test_files/coderesponse.xml") + with open(problem_file) as input_file: + test_lcp = lcp.LoncapaProblem(input_file.read(), '1', system=test_system) + + answer_ids = sorted(test_lcp.get_question_answers()) + + # CodeResponse requires internal CorrectMap state. Build it now in the unqueued state + cmap = CorrectMap() + for answer_id in answer_ids: + cmap.update(CorrectMap(answer_id=answer_id, queuestate=None)) + test_lcp.correct_map.update(cmap) + + self.assertEquals(test_lcp.is_queued(), False) + + # Now we queue the LCP + cmap = CorrectMap() + for i, answer_id in enumerate(answer_ids): + queuestate = CodeResponseTest.make_queuestate(i, datetime.now()) + cmap.update(CorrectMap(answer_id=answer_ids[i], queuestate=queuestate)) + test_lcp.correct_map.update(cmap) + + self.assertEquals(test_lcp.is_queued(), True) + + + def test_update_score(self): + ''' + Test whether LoncapaProblem.update_score can deliver queued result to the right subproblem + ''' + problem_file = os.path.join(os.path.dirname(__file__), "test_files/coderesponse.xml") + with open(problem_file) as input_file: + test_lcp = lcp.LoncapaProblem(input_file.read(), '1', system=test_system) + + answer_ids = sorted(test_lcp.get_question_answers()) + + # CodeResponse requires internal CorrectMap state. Build it now in the queued state + old_cmap = CorrectMap() + for i, answer_id in enumerate(answer_ids): + queuekey = 1000 + i + queuestate = CodeResponseTest.make_queuestate(1000+i, datetime.now()) + old_cmap.update(CorrectMap(answer_id=answer_ids[i], queuestate=queuestate)) + + # Message format common to external graders + grader_msg = 'MESSAGE' # Must be valid XML + correct_score_msg = json.dumps({'correct':True, 'score':1, 'msg': grader_msg}) + incorrect_score_msg = json.dumps({'correct':False, 'score':0, 'msg': grader_msg}) + + xserver_msgs = {'correct': correct_score_msg, + 'incorrect': incorrect_score_msg,} + + # Incorrect queuekey, state should not be updated + for correctness in ['correct', 'incorrect']: + test_lcp.correct_map = CorrectMap() + test_lcp.correct_map.update(old_cmap) # Deep copy + + test_lcp.update_score(xserver_msgs[correctness], queuekey=0) + self.assertEquals(test_lcp.correct_map.get_dict(), old_cmap.get_dict()) # Deep comparison + + for answer_id in answer_ids: + self.assertTrue(test_lcp.correct_map.is_queued(answer_id)) # Should be still queued, since message undelivered + + # Correct queuekey, state should be updated + for correctness in ['correct', 'incorrect']: + for i, answer_id in enumerate(answer_ids): + test_lcp.correct_map = CorrectMap() + test_lcp.correct_map.update(old_cmap) + + new_cmap = CorrectMap() + new_cmap.update(old_cmap) + npoints = 1 if correctness=='correct' else 0 + new_cmap.set(answer_id=answer_id, npoints=npoints, correctness=correctness, msg=grader_msg, queuestate=None) + + test_lcp.update_score(xserver_msgs[correctness], queuekey=1000 + i) + self.assertEquals(test_lcp.correct_map.get_dict(), new_cmap.get_dict()) + + for j, test_id in enumerate(answer_ids): + if j == i: + self.assertFalse(test_lcp.correct_map.is_queued(test_id)) # Should be dequeued, message delivered + else: + self.assertTrue(test_lcp.correct_map.is_queued(test_id)) # Should be queued, message undelivered + + + def test_recentmost_queuetime(self): + ''' + Test whether the LoncapaProblem knows about the time of queue requests + ''' + problem_file = os.path.join(os.path.dirname(__file__), "test_files/coderesponse.xml") + with open(problem_file) as input_file: + test_lcp = lcp.LoncapaProblem(input_file.read(), '1', system=test_system) + + answer_ids = sorted(test_lcp.get_question_answers()) + + # CodeResponse requires internal CorrectMap state. Build it now in the unqueued state + cmap = CorrectMap() + for answer_id in answer_ids: + cmap.update(CorrectMap(answer_id=answer_id, queuestate=None)) + test_lcp.correct_map.update(cmap) + + self.assertEquals(test_lcp.get_recentmost_queuetime(), None) + + # CodeResponse requires internal CorrectMap state. Build it now in the queued state + cmap = CorrectMap() + for i, answer_id in enumerate(answer_ids): + queuekey = 1000 + i + latest_timestamp = datetime.now() + queuestate = CodeResponseTest.make_queuestate(1000+i, latest_timestamp) + cmap.update(CorrectMap(answer_id=answer_id, queuestate=queuestate)) + test_lcp.correct_map.update(cmap) + + # Queue state only tracks up to second + latest_timestamp = datetime.strptime(datetime.strftime(latest_timestamp, dateformat), dateformat) + + self.assertEquals(test_lcp.get_recentmost_queuetime(), latest_timestamp) + + def test_convert_files_to_filenames(self): + ''' + Test whether file objects are converted to filenames without altering other structures + ''' + problem_file = os.path.join(os.path.dirname(__file__), "test_files/coderesponse.xml") + with open(problem_file) as fp: + answers_with_file = {'1_2_1': 'String-based answer', + '1_3_1': ['answer1', 'answer2', 'answer3'], + '1_4_1': [fp, fp]} + answers_converted = convert_files_to_filenames(answers_with_file) + self.assertEquals(answers_converted['1_2_1'], 'String-based answer') + self.assertEquals(answers_converted['1_3_1'], ['answer1', 'answer2', 'answer3']) + self.assertEquals(answers_converted['1_4_1'], [fp.name, fp.name]) + + +class ChoiceResponseTest(unittest.TestCase): + + def test_cr_rb_grade(self): + problem_file = os.path.dirname(__file__) + "/test_files/choiceresponse_radio.xml" + test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=test_system) + correct_answers = {'1_2_1': 'choice_2', + '1_3_1': ['choice_2', 'choice_3']} + test_answers = {'1_2_1': 'choice_2', + '1_3_1': 'choice_2', + } + self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_1'), 'correct') + self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_3_1'), 'incorrect') + + def test_cr_cb_grade(self): + problem_file = os.path.dirname(__file__) + "/test_files/choiceresponse_checkbox.xml" + test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=test_system) + correct_answers = {'1_2_1': 'choice_2', + '1_3_1': ['choice_2', 'choice_3'], + '1_4_1': ['choice_2', 'choice_3']} + test_answers = {'1_2_1': 'choice_2', + '1_3_1': 'choice_2', + '1_4_1': ['choice_2', 'choice_3'], + } + self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_1'), 'correct') + self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_3_1'), 'incorrect') + self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_4_1'), 'correct') + +class JavascriptResponseTest(unittest.TestCase): + + def test_jr_grade(self): + problem_file = os.path.dirname(__file__) + "/test_files/javascriptresponse.xml" + coffee_file_path = os.path.dirname(__file__) + "/test_files/js/*.coffee" + os.system("coffee -c %s" % (coffee_file_path)) + test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=test_system) + correct_answers = {'1_2_1': json.dumps({0: 4})} + incorrect_answers = {'1_2_1': json.dumps({0: 5})} + + self.assertEquals(test_lcp.grade_answers(incorrect_answers).get_correctness('1_2_1'), 'incorrect') + self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') + From d10b568c13a6adc099ab2c84a27a5899ec9b52e9 Mon Sep 17 00:00:00 2001 From: Victor Shnayder Date: Mon, 8 Oct 2012 20:08:48 -0400 Subject: [PATCH 11/65] Add chemcalc to capa package, to context for customresponse --- common/lib/capa/capa/capa_problem.py | 7 +- common/lib/capa/capa/chem/__init__.py | 1 + common/lib/capa/capa/chem/chemcalc.py | 540 ++++++++++++++++++++++++++ 3 files changed, 546 insertions(+), 2 deletions(-) create mode 100644 common/lib/capa/capa/chem/__init__.py create mode 100644 common/lib/capa/capa/chem/chemcalc.py diff --git a/common/lib/capa/capa/capa_problem.py b/common/lib/capa/capa/capa_problem.py index 9a5a15a696..29e9b7eb97 100644 --- a/common/lib/capa/capa/capa_problem.py +++ b/common/lib/capa/capa/capa_problem.py @@ -30,6 +30,8 @@ import sys from lxml import etree from xml.sax.saxutils import unescape +import chem +import chem.chemcalc import calc from correctmap import CorrectMap import eia @@ -72,7 +74,8 @@ global_context = {'random': random, 'math': math, 'scipy': scipy, 'calc': calc, - 'eia': eia} + 'eia': eia, + 'chemcalc': chem.chemcalc} # These should be removed from HTML output, including all subelements html_problem_semantics = ["codeparam", "responseparam", "answer", "script", "hintgroup"] @@ -436,7 +439,7 @@ class LoncapaProblem(object): sys.path = original_path + self._extract_system_path(script) stype = script.get('type') - + if stype: if 'javascript' in stype: continue # skip javascript diff --git a/common/lib/capa/capa/chem/__init__.py b/common/lib/capa/capa/chem/__init__.py new file mode 100644 index 0000000000..8b13789179 --- /dev/null +++ b/common/lib/capa/capa/chem/__init__.py @@ -0,0 +1 @@ + diff --git a/common/lib/capa/capa/chem/chemcalc.py b/common/lib/capa/capa/chem/chemcalc.py new file mode 100644 index 0000000000..65d1887d2e --- /dev/null +++ b/common/lib/capa/capa/chem/chemcalc.py @@ -0,0 +1,540 @@ +from __future__ import division +import copy +import logging +import math +import operator +import re +import unittest +import numpy +import numbers +import scipy.constants + +from pyparsing import Literal, Keyword, Word, nums, StringEnd, Optional, Forward, OneOrMore +from pyparsing import ParseException +import nltk +from nltk.tree import Tree + +local_debug = None + + +def log(s, output_type=None): + if local_debug: + print s + if output_type == 'html': + f.write(s + '\n
\n') + +## Defines a simple pyparsing tokenizer for chemical equations +elements = ['Ac','Ag','Al','Am','Ar','As','At','Au','B','Ba','Be', + 'Bh','Bi','Bk','Br','C','Ca','Cd','Ce','Cf','Cl','Cm', + 'Cn','Co','Cr','Cs','Cu','Db','Ds','Dy','Er','Es','Eu', + 'F','Fe','Fl','Fm','Fr','Ga','Gd','Ge','H','He','Hf', + 'Hg','Ho','Hs','I','In','Ir','K','Kr','La','Li','Lr', + 'Lu','Lv','Md','Mg','Mn','Mo','Mt','N','Na','Nb','Nd', + 'Ne','Ni','No','Np','O','Os','P','Pa','Pb','Pd','Pm', + 'Po','Pr','Pt','Pu','Ra','Rb','Re','Rf','Rg','Rh','Rn', + 'Ru','S','Sb','Sc','Se','Sg','Si','Sm','Sn','Sr','Ta', + 'Tb','Tc','Te','Th','Ti','Tl','Tm','U','Uuo','Uup', + 'Uus','Uut','V','W','Xe','Y','Yb','Zn','Zr'] +digits = map(str, range(10)) +symbols = list("[](){}^+-/") +phases = ["(s)", "(l)", "(g)", "(aq)"] +tokens = reduce(lambda a, b: a ^ b, map(Literal, elements + digits + symbols + phases)) +tokenizer = OneOrMore(tokens) + StringEnd() + + +def orjoin(l): + return "'" + "' | '".join(l) + "'" + +## Defines an NLTK parser for tokenized equations +grammar = """ + S -> multimolecule | multimolecule '+' S + multimolecule -> count molecule | molecule + count -> number | number '/' number + molecule -> unphased | unphased phase + unphased -> group | paren_group_round | paren_group_square + element -> """ + orjoin(elements) + """ + digit -> """ + orjoin(digits) + """ + phase -> """ + orjoin(phases) + """ + number -> digit | digit number + group -> suffixed | suffixed group + paren_group_round -> '(' group ')' + paren_group_square -> '[' group ']' + plus_minus -> '+' | '-' + number_suffix -> number + ion_suffix -> '^' number plus_minus | '^' plus_minus + suffix -> number_suffix | number_suffix ion_suffix | ion_suffix + unsuffixed -> element | paren_group_round | paren_group_square + + suffixed -> unsuffixed | unsuffixed suffix +""" +parser = nltk.ChartParser(nltk.parse_cfg(grammar)) + + +def clean_parse_tree(tree): + ''' The parse tree contains a lot of redundant + nodes. E.g. paren_groups have groups as children, etc. This will + clean up the tree. + ''' + def unparse_number(n): + ''' Go from a number parse tree to a number ''' + if len(n) == 1: + rv = n[0][0] + else: + rv = n[0][0] + unparse_number(n[1]) + return rv + + def null_tag(n): + ''' Remove a tag ''' + return n[0] + + def ion_suffix(n): + '''1. "if" part handles special case + 2. "else" part is general behaviour ''' + + if n[1:][0].node == 'number' and n[1:][0][0][0] == '1': + # if suffix is explicitly 1, like ^1- + # strip 1, leave only sign: ^- + return nltk.tree.Tree(n.node, n[2:]) + else: + return nltk.tree.Tree(n.node, n[1:]) + + dispatch = {'number': lambda x: nltk.tree.Tree("number", [unparse_number(x)]), + 'unphased': null_tag, + 'unsuffixed': null_tag, + 'number_suffix': lambda x: nltk.tree.Tree('number_suffix', [unparse_number(x[0])]), + 'suffixed': lambda x: len(x) > 1 and x or x[0], + 'ion_suffix': ion_suffix, + 'paren_group_square': lambda x: nltk.tree.Tree(x.node, x[1]), + 'paren_group_round': lambda x: nltk.tree.Tree(x.node, x[1])} + + if type(tree) == str: + return tree + + old_node = None + ## This loop means that if a node is processed, and returns a child, + ## the child will be processed. + while tree.node in dispatch and tree.node != old_node: + old_node = tree.node + tree = dispatch[tree.node](tree) + + children = [] + for child in tree: + child = clean_parse_tree(child) + children.append(child) + + tree = nltk.tree.Tree(tree.node, children) + + return tree + + +def merge_children(tree, tags): + ''' nltk, by documentation, cannot do arbitrary length + groups. Instead of: + (group 1 2 3 4) + It has to handle this recursively: + (group 1 (group 2 (group 3 (group 4)))) + We do the cleanup of converting from the latter to the former (as a + ''' + if type(tree) == str: + return tree + + merged_children = [] + done = False + #print '00000', tree + ## Merge current tag + while not done: + done = True + for child in tree: + if type(child) == nltk.tree.Tree and child.node == tree.node and tree.node in tags: + merged_children = merged_children + list(child) + done = False + else: + merged_children = merged_children + [child] + tree = nltk.tree.Tree(tree.node, merged_children) + merged_children = [] + #print '======',tree + + # And recurse + children = [] + for child in tree: + children.append(merge_children(child, tags)) + + #return tree + return nltk.tree.Tree(tree.node, children) + + +def render_to_html(tree): + ''' Renders a cleaned tree to HTML ''' + + def molecule_count(tree, children): + # If an integer, return that integer + if len(tree) == 1: + return tree[0][0] + # If a fraction, return the fraction + if len(tree) == 3: + return " {num}{den} ".format(num=tree[0][0], den=tree[2][0]) + return "Error" + + def subscript(tree, children): + return "{sub}".format(sub=children) + + def superscript(tree, children): + return "{sup}".format(sup=children) + + def round_brackets(tree, children): + return "({insider})".format(insider=children) + + def square_brackets(tree, children): + return "[{insider}]".format(insider=children) + + dispatch = {'count': molecule_count, + 'number_suffix': subscript, + 'ion_suffix': superscript, + 'paren_group_round': round_brackets, + 'paren_group_square': square_brackets} + + if type(tree) == str: + return tree + else: + children = "".join(map(render_to_html, tree)) + if tree.node in dispatch: + return dispatch[tree.node](tree, children) + else: + return children.replace(' ', '') + + +def clean_and_render_to_html(s): + ''' render a string to html ''' + status = render_to_html(get_finale_tree(s)) + return status + + +def get_finale_tree(s): + ''' return final tree after merge and clean ''' + tokenized = tokenizer.parseString(s) + parsed = parser.parse(tokenized) + merged = merge_children(parsed, {'S','group'}) + final = clean_parse_tree(merged) + return final + + +def check_equality(tuple1, tuple2): + ''' return True if tuples of multimolecules are equal ''' + list1 = list(tuple1) + list2 = list(tuple2) + + # Hypo: trees where are levels count+molecule vs just molecule + # cannot be sorted properly (tested on test_complex_additivity) + # But without factors and phases sorting seems to work. + + # Also for lists of multimolecules without factors and phases + # sorting seems to work fine. + list1.sort() + list2.sort() + return list1 == list2 + + +def compare_chemical_expression(s1, s2, ignore_state=False): + ''' It does comparison between two equations. + It uses divide_chemical_expression and check if division is 1 + ''' + return divide_chemical_expression(s1, s2, ignore_state) == 1 + + +def divide_chemical_expression(s1, s2, ignore_state=False): + ''' Compare chemical equations for difference + in factors. Ideas: + - extract factors and phases to standalone lists, + - compare equations without factors and phases, + - divide lists of factors for each other and check + for equality of every element in list, + - return result of factor division ''' + + # parsed final trees + treedic = {} + treedic['1'] = get_finale_tree(s1) + treedic['2'] = get_finale_tree(s2) + + # strip phases and factors + # collect factors in list + for i in ('1', '2'): + treedic[i + ' cleaned_mm_list'] = [] + treedic[i + ' factors'] = [] + treedic[i + ' phases'] = [] + for el in treedic[i].subtrees(filter=lambda t: t.node == 'multimolecule'): + count_subtree = [t for t in el.subtrees() if t.node == 'count'] + group_subtree = [t for t in el.subtrees() if t.node == 'group'] + phase_subtree = [t for t in el.subtrees() if t.node == 'phase'] + if count_subtree: + if len(count_subtree[0]) > 1: + treedic[i + ' factors'].append( + int(count_subtree[0][0][0]) / + int(count_subtree[0][2][0])) + else: + treedic[i + ' factors'].append(int(count_subtree[0][0][0])) + else: + treedic[i + ' factors'].append(1.0) + if phase_subtree: + treedic[i + ' phases'].append(phase_subtree[0][0]) + else: + treedic[i + ' phases'].append(' ') + treedic[i + ' cleaned_mm_list'].append( + Tree('multimolecule', [Tree('molecule', group_subtree)])) + + # order of factors and phases must mirror the order of multimolecules, + # use 'decorate, sort, undecorate' pattern + treedic['1 cleaned_mm_list'], treedic['1 factors'], treedic['1 phases'] = zip( + *sorted(zip(treedic['1 cleaned_mm_list'], treedic['1 factors'], treedic['1 phases']))) + + treedic['2 cleaned_mm_list'], treedic['2 factors'], treedic['2 phases'] = zip( + *sorted(zip(treedic['2 cleaned_mm_list'], treedic['2 factors'], treedic['2 phases']))) + + # check if equations are correct without factors + if not check_equality(treedic['1 cleaned_mm_list'], treedic['2 cleaned_mm_list']): + return False + + # phases are ruled by ingore_state flag + if not ignore_state: # phases matters + if treedic['1 phases'] != treedic['2 phases']: + return False + + if any(map(lambda x, y: x / y - treedic['1 factors'][0] / treedic['2 factors'][0], + treedic['1 factors'], treedic['2 factors'])): + log('factors are not proportional') + return False + else: # return ratio + return int(max(treedic['1 factors'][0] / treedic['2 factors'][0], + treedic['2 factors'][0] / treedic['1 factors'][0])) + + +class Test_Compare_Equations(unittest.TestCase): + + def test_compare_incorrect_order_of_atoms_in_molecule(self): + self.assertFalse(compare_chemical_expression("H2O + CO2", "O2C + OH2")) + + def test_compare_same_order_no_phases_no_factors_no_ions(self): + self.assertTrue(compare_chemical_expression("H2O + CO2", "CO2+H2O")) + + def test_compare_different_order_no_phases_no_factors_no_ions(self): + self.assertTrue(compare_chemical_expression("H2O + CO2", "CO2 + H2O")) + + def test_compare_different_order_three_multimolecule(self): + self.assertTrue(compare_chemical_expression("H2O + Fe(OH)3 + CO2", "CO2 + H2O + Fe(OH)3")) + + def test_compare_same_factors(self): + self.assertTrue(compare_chemical_expression("3H2O + 2CO2", "2CO2 + 3H2O ")) + + def test_compare_different_factors(self): + self.assertFalse(compare_chemical_expression("2H2O + 3CO2", "2CO2 + 3H2O ")) + + def test_compare_correct_ions(self): + self.assertTrue(compare_chemical_expression("H^+ + OH^-", " OH^- + H^+ ")) + + def test_compare_wrong_ions(self): + self.assertFalse(compare_chemical_expression("H^+ + OH^-", " OH^- + H^- ")) + + def test_compare_parent_groups_ions(self): + self.assertTrue(compare_chemical_expression("Fe(OH)^2- + (OH)^-", " (OH)^- + Fe(OH)^2- ")) + + def test_compare_correct_factors_ions_and_one(self): + self.assertTrue(compare_chemical_expression("3H^+ + 2OH^-", " 2OH^- + 3H^+ ")) + + def test_compare_wrong_factors_ions(self): + self.assertFalse(compare_chemical_expression("2H^+ + 3OH^-", " 2OH^- + 3H^+ ")) + + def test_compare_float_factors(self): + self.assertTrue(compare_chemical_expression("7/2H^+ + 3/5OH^-", " 3/5OH^- + 7/2H^+ ")) + + # Phases tests + def test_compare_phases_ignored(self): + self.assertTrue(compare_chemical_expression( + "H2O(s) + CO2", "H2O+CO2", ignore_state=True)) + + def test_compare_phases_not_ignored_explicitly(self): + self.assertFalse(compare_chemical_expression( + "H2O(s) + CO2", "H2O+CO2", ignore_state=False)) + + def test_compare_phases_not_ignored(self): # same as previous + self.assertFalse(compare_chemical_expression( + "H2O(s) + CO2", "H2O+CO2")) + + def test_compare_phases_not_ignored_explicitly(self): + self.assertTrue(compare_chemical_expression( + "H2O(s) + CO2", "H2O(s)+CO2", ignore_state=False)) + + # all in one cases + def test_complex_additivity(self): + self.assertTrue(compare_chemical_expression( + "5(H1H212)^70010- + 2H20 + 7/2HCl + H2O", + "7/2HCl + 2H20 + H2O + 5(H1H212)^70010-")) + + def test_complex_additivity_wrong(self): + self.assertFalse(compare_chemical_expression( + "5(H1H212)^70010- + 2H20 + 7/2HCl + H2O", + "2H20 + 7/2HCl + H2O + 5(H1H212)^70011-")) + + def test_complex_all_grammar(self): + self.assertTrue(compare_chemical_expression( + "5[Ni(NH3)4]^2+ + 5/2SO4^2-", + "5/2SO4^2- + 5[Ni(NH3)4]^2+")) + + # special cases + + def test_compare_one_superscript_explicitly_set(self): + self.assertTrue(compare_chemical_expression("H^+ + OH^1-", " OH^- + H^+ ")) + + def test_compare_equal_factors_differently_set(self): + self.assertTrue(compare_chemical_expression("6/2H^+ + OH^-", " OH^- + 3H^+ ")) + + def test_compare_one_subscript_explicitly_set(self): + self.assertFalse(compare_chemical_expression("H2 + CO2", "H2 + C102")) + + +class Test_Divide_Equations(unittest.TestCase): + ''' as compare_ use divide_, + tests here must consider different + division (not equality) cases ''' + + def test_divide_wrong_factors(self): + self.assertFalse(divide_chemical_expression( + "5(H1H212)^70010- + 10H2O", "5H2O + 10(H1H212)^70010-")) + + def test_divide_right(self): + self.assertEqual(divide_chemical_expression( + "5(H1H212)^70010- + 10H2O", "10H2O + 5(H1H212)^70010-"), 1) + + def test_divide_wrong_reagents(self): + self.assertFalse(divide_chemical_expression( + "H2O + CO2", "CO2")) + + def test_divide_right_simple(self): + self.assertEqual(divide_chemical_expression( + "H2O + CO2", "H2O+CO2"), 1) + + def test_divide_right_phases(self): + self.assertEqual(divide_chemical_expression( + "H2O(s) + CO2", "2H2O(s)+2CO2"), 2) + + def test_divide_wrong_phases(self): + self.assertFalse(divide_chemical_expression( + "H2O(s) + CO2", "2H2O+2CO2(s)")) + + def test_divide_wrong_phases_but_phases_ignored(self): + self.assertEqual(divide_chemical_expression( + "H2O(s) + CO2", "2H2O+2CO2(s)", ignore_state=True), 2) + + def test_divide_order(self): + self.assertEqual(divide_chemical_expression( + "2CO2 + H2O", "2H2O+4CO2"), 2) + + def test_divide_fract_to_int(self): + self.assertEqual(divide_chemical_expression( + "3/2CO2 + H2O", "2H2O+3CO2"), 2) + + def test_divide_fract_to_frac(self): + self.assertEqual(divide_chemical_expression( + "3/4CO2 + H2O", "2H2O+9/6CO2"), 2) + + def test_divide_fract_to_frac_wrog(self): + self.assertFalse(divide_chemical_expression( + "6/2CO2 + H2O", "2H2O+9/6CO2"), 2) + + +class Test_Render_Equations(unittest.TestCase): + + def test_render1(self): + s = "H2O + CO2" + out = clean_and_render_to_html(s) + correct = "H2O+CO2" + log(out + ' ------- ' + correct, 'html') + self.assertEqual(out, correct) + + def test_render_uncorrect_reaction(self): + s = "O2C + OH2" + out = clean_and_render_to_html(s) + correct = "O2C+OH2" + log(out + ' ------- ' + correct, 'html') + self.assertEqual(out, correct) + + def test_render2(self): + s = "CO2 + H2O + Fe(OH)3" + out = clean_and_render_to_html(s) + correct = "CO2+H2O+Fe(OH)3" + log(out + ' ------- ' + correct, 'html') + self.assertEqual(out, correct) + + def test_render3(self): + s = "3H2O + 2CO2" + out = clean_and_render_to_html(s) + correct = "3H2O+2CO2" + log(out + ' ------- ' + correct, 'html') + self.assertEqual(out, correct) + + def test_render4(self): + s = "H^+ + OH^-" + out = clean_and_render_to_html(s) + correct = "H++OH-" + log(out + ' ------- ' + correct, 'html') + self.assertEqual(out, correct) + + def test_render5(self): + s = "Fe(OH)^2- + (OH)^-" + out = clean_and_render_to_html(s) + correct = "Fe(OH)2-+(OH)-" + log(out + ' ------- ' + correct, 'html') + self.assertEqual(out, correct) + + def test_render6(self): + s = "7/2H^+ + 3/5OH^-" + out = clean_and_render_to_html(s) + correct = "72H++35OH-" + log(out + ' ------- ' + correct, 'html') + self.assertEqual(out, correct) + + def test_render7(self): + s = "5(H1H212)^70010- + 2H2O + 7/2HCl + H2O" + out = clean_and_render_to_html(s) + correct = "5(H1H212)70010-+2H2O+72HCl+H2O" + log(out + ' ------- ' + correct, 'html') + self.assertEqual(out, correct) + + def test_render8(self): + s = "H2O(s) + CO2" + out = clean_and_render_to_html(s) + correct = "H2O(s)+CO2" + log(out + ' ------- ' + correct, 'html') + self.assertEqual(out, correct) + + def test_render9(self): + s = "5[Ni(NH3)4]^2+ + 5/2SO4^2-" + #import ipdb; ipdb.set_trace() + out = clean_and_render_to_html(s) + correct = "5[Ni(NH3)4]2++52SO42-" + log(out + ' ------- ' + correct, 'html') + self.assertEqual(out, correct) + + def test_render_error(self): + s = "5.2H20" + self.assertRaises(ParseException, clean_and_render_to_html, s) + + def test_render_simple_brackets(self): + s = "(Ar)" + out = clean_and_render_to_html(s) + correct = "(Ar)" + log(out + ' ------- ' + correct, 'html') + self.assertEqual(out, correct) + + +def suite(): + + testcases = [Test_Compare_Equations, Test_Divide_Equations, Test_Render_Equations] + suites = [] + for testcase in testcases: + suites.append(unittest.TestLoader().loadTestsFromTestCase(testcase)) + return unittest.TestSuite(suites) + +if __name__ == "__main__": + local_debug = True + with open('render.html', 'w') as f: + unittest.TextTestRunner(verbosity=2).run(suite()) + # open render.html to look at rendered equations From db175807103172d2f7f91e5e72f1fae2c3ebca16 Mon Sep 17 00:00:00 2001 From: Victor Shnayder Date: Tue, 9 Oct 2012 18:47:32 -0400 Subject: [PATCH 12/65] Chemcalc refactor, improvement * Move tests into a separate file * add a chemical_equations_equal function to compare equations, not expressions * rename some internal functions with a leading _ --- common/lib/capa/capa/chem/chemcalc.py | 340 ++++++-------------------- common/lib/capa/capa/chem/tests.py | 296 ++++++++++++++++++++++ 2 files changed, 377 insertions(+), 259 deletions(-) create mode 100644 common/lib/capa/capa/chem/tests.py diff --git a/common/lib/capa/capa/chem/chemcalc.py b/common/lib/capa/capa/chem/chemcalc.py index 65d1887d2e..b2198b5537 100644 --- a/common/lib/capa/capa/chem/chemcalc.py +++ b/common/lib/capa/capa/chem/chemcalc.py @@ -1,27 +1,19 @@ from __future__ import division import copy +from fractions import Fraction import logging import math import operator import re -import unittest import numpy import numbers import scipy.constants -from pyparsing import Literal, Keyword, Word, nums, StringEnd, Optional, Forward, OneOrMore -from pyparsing import ParseException +from pyparsing import (Literal, Keyword, Word, nums, StringEnd, Optional, + Forward, OneOrMore, ParseException) import nltk from nltk.tree import Tree -local_debug = None - - -def log(s, output_type=None): - if local_debug: - print s - if output_type == 'html': - f.write(s + '\n
\n') ## Defines a simple pyparsing tokenizer for chemical equations elements = ['Ac','Ag','Al','Am','Ar','As','At','Au','B','Ba','Be', @@ -42,19 +34,19 @@ tokens = reduce(lambda a, b: a ^ b, map(Literal, elements + digits + symbols + p tokenizer = OneOrMore(tokens) + StringEnd() -def orjoin(l): +def _orjoin(l): return "'" + "' | '".join(l) + "'" -## Defines an NLTK parser for tokenized equations +## Defines an NLTK parser for tokenized expressions grammar = """ S -> multimolecule | multimolecule '+' S multimolecule -> count molecule | molecule count -> number | number '/' number molecule -> unphased | unphased phase unphased -> group | paren_group_round | paren_group_square - element -> """ + orjoin(elements) + """ - digit -> """ + orjoin(digits) + """ - phase -> """ + orjoin(phases) + """ + element -> """ + _orjoin(elements) + """ + digit -> """ + _orjoin(digits) + """ + phase -> """ + _orjoin(phases) + """ number -> digit | digit number group -> suffixed | suffixed group paren_group_round -> '(' group ')' @@ -70,7 +62,7 @@ grammar = """ parser = nltk.ChartParser(nltk.parse_cfg(grammar)) -def clean_parse_tree(tree): +def _clean_parse_tree(tree): ''' The parse tree contains a lot of redundant nodes. E.g. paren_groups have groups as children, etc. This will clean up the tree. @@ -119,7 +111,7 @@ def clean_parse_tree(tree): children = [] for child in tree: - child = clean_parse_tree(child) + child = _clean_parse_tree(child) children.append(child) tree = nltk.tree.Tree(tree.node, children) @@ -127,7 +119,7 @@ def clean_parse_tree(tree): return tree -def merge_children(tree, tags): +def _merge_children(tree, tags): ''' nltk, by documentation, cannot do arbitrary length groups. Instead of: (group 1 2 3 4) @@ -157,13 +149,13 @@ def merge_children(tree, tags): # And recurse children = [] for child in tree: - children.append(merge_children(child, tags)) + children.append(_merge_children(child, tags)) #return tree return nltk.tree.Tree(tree.node, children) -def render_to_html(tree): +def _render_to_html(tree): ''' Renders a cleaned tree to HTML ''' def molecule_count(tree, children): @@ -196,29 +188,29 @@ def render_to_html(tree): if type(tree) == str: return tree else: - children = "".join(map(render_to_html, tree)) + children = "".join(map(_render_to_html, tree)) if tree.node in dispatch: return dispatch[tree.node](tree, children) else: return children.replace(' ', '') -def clean_and_render_to_html(s): +def render_to_html(s): ''' render a string to html ''' - status = render_to_html(get_finale_tree(s)) + status = _render_to_html(_get_final_tree(s)) return status -def get_finale_tree(s): +def _get_final_tree(s): ''' return final tree after merge and clean ''' tokenized = tokenizer.parseString(s) parsed = parser.parse(tokenized) - merged = merge_children(parsed, {'S','group'}) - final = clean_parse_tree(merged) + merged = _merge_children(parsed, {'S','group'}) + final = _clean_parse_tree(merged) return final -def check_equality(tuple1, tuple2): +def _check_equality(tuple1, tuple2): ''' return True if tuples of multimolecules are equal ''' list1 = list(tuple1) list2 = list(tuple2) @@ -242,18 +234,31 @@ def compare_chemical_expression(s1, s2, ignore_state=False): def divide_chemical_expression(s1, s2, ignore_state=False): - ''' Compare chemical equations for difference - in factors. Ideas: + '''Compare two chemical equations for equivalence up to a multiplicative factor: + + - If they are not the same chemicals, returns False. + - If they are the same, "divide" s1 by s2 to returns a factor x such that s1 / s2 == x as a Fraction object. + - if ignore_state is True, ignores phases when doing the comparison. + + Examples: + divide_chemical_expression("H2O", "3H2O") -> Fraction(1,3) + divide_chemical_expression("3H2O", "H2O") -> 3 # actually Fraction(3, 1), but compares == to 3. + divide_chemical_expression("2H2O(s) + 2CO2", "H2O(s)+CO2") -> 2 + divide_chemical_expression("H2O(s) + CO2", "3H2O(s)+2CO2") -> False + + Implementation sketch: - extract factors and phases to standalone lists, - compare equations without factors and phases, - divide lists of factors for each other and check for equality of every element in list, - - return result of factor division ''' + - return result of factor division + + ''' # parsed final trees treedic = {} - treedic['1'] = get_finale_tree(s1) - treedic['2'] = get_finale_tree(s2) + treedic['1'] = _get_final_tree(s1) + treedic['2'] = _get_final_tree(s2) # strip phases and factors # collect factors in list @@ -290,7 +295,7 @@ def divide_chemical_expression(s1, s2, ignore_state=False): *sorted(zip(treedic['2 cleaned_mm_list'], treedic['2 factors'], treedic['2 phases']))) # check if equations are correct without factors - if not check_equality(treedic['1 cleaned_mm_list'], treedic['2 cleaned_mm_list']): + if not _check_equality(treedic['1 cleaned_mm_list'], treedic['2 cleaned_mm_list']): return False # phases are ruled by ingore_state flag @@ -300,241 +305,58 @@ def divide_chemical_expression(s1, s2, ignore_state=False): if any(map(lambda x, y: x / y - treedic['1 factors'][0] / treedic['2 factors'][0], treedic['1 factors'], treedic['2 factors'])): - log('factors are not proportional') + # factors are not proportional return False - else: # return ratio - return int(max(treedic['1 factors'][0] / treedic['2 factors'][0], - treedic['2 factors'][0] / treedic['1 factors'][0])) + else: + # return ratio + return Fraction(treedic['1 factors'][0] / treedic['2 factors'][0]) -class Test_Compare_Equations(unittest.TestCase): +def chemical_equations_equal(eq1, eq2, ignoreFactor=True): + """ + Check whether two chemical equations are the same. If ignoreFactor is True, + then they are considered equal if they differ by a constant factor. - def test_compare_incorrect_order_of_atoms_in_molecule(self): - self.assertFalse(compare_chemical_expression("H2O + CO2", "O2C + OH2")) + arrows matter: ->, and <-> are different. - def test_compare_same_order_no_phases_no_factors_no_ions(self): - self.assertTrue(compare_chemical_expression("H2O + CO2", "CO2+H2O")) + e.g. + chemical_equations_equal('H2 + O2 -> H2O2', 'O2 + H2 -> H2O2') -> True + chemical_equations_equal('H2 + O2 -> H2O2', 'O2 + 2H2 -> H2O2') -> False - def test_compare_different_order_no_phases_no_factors_no_ions(self): - self.assertTrue(compare_chemical_expression("H2O + CO2", "CO2 + H2O")) + chemical_equations_equal('H2 + O2 -> H2O2', 'O2 + H2 <-> H2O2') -> False - def test_compare_different_order_three_multimolecule(self): - self.assertTrue(compare_chemical_expression("H2O + Fe(OH)3 + CO2", "CO2 + H2O + Fe(OH)3")) + If there's a syntax error, we raise pyparsing.ParseException. + """ + # for now, we do a manual parse for the arrow. + arrows = ('<->', '->') # order matters -- need to try <-> first + def split_on_arrow(s): + """Split a string on an arrow. Returns left, arrow, right, or raises ParseException if there isn't an arrow""" + for arrow in arrows: + left, a, right = s.partition(arrow) + if a != '': + return left, a, right + raise ParseException("Could not find arrow. Legal arrows: {0}".format(arrows)) - def test_compare_same_factors(self): - self.assertTrue(compare_chemical_expression("3H2O + 2CO2", "2CO2 + 3H2O ")) + left1, arrow1, right1 = split_on_arrow(eq1) + left2, arrow2, right2 = split_on_arrow(eq2) - def test_compare_different_factors(self): - self.assertFalse(compare_chemical_expression("2H2O + 3CO2", "2CO2 + 3H2O ")) + # TODO: may want to be able to give student helpful feedback about why things didn't work. + if arrow1 != arrow2: + # arrows don't match + return False - def test_compare_correct_ions(self): - self.assertTrue(compare_chemical_expression("H^+ + OH^-", " OH^- + H^+ ")) + factor_left = divide_chemical_expression(left1, left2) + if not factor_left: + # left sides don't match + return False - def test_compare_wrong_ions(self): - self.assertFalse(compare_chemical_expression("H^+ + OH^-", " OH^- + H^- ")) + factor_right = divide_chemical_expression(right1, right2) + if not factor_right: + # right sides don't match + return False - def test_compare_parent_groups_ions(self): - self.assertTrue(compare_chemical_expression("Fe(OH)^2- + (OH)^-", " (OH)^- + Fe(OH)^2- ")) + if factor_left != factor_right: + # factors don't match (molecule counts to add up) + return False - def test_compare_correct_factors_ions_and_one(self): - self.assertTrue(compare_chemical_expression("3H^+ + 2OH^-", " 2OH^- + 3H^+ ")) - - def test_compare_wrong_factors_ions(self): - self.assertFalse(compare_chemical_expression("2H^+ + 3OH^-", " 2OH^- + 3H^+ ")) - - def test_compare_float_factors(self): - self.assertTrue(compare_chemical_expression("7/2H^+ + 3/5OH^-", " 3/5OH^- + 7/2H^+ ")) - - # Phases tests - def test_compare_phases_ignored(self): - self.assertTrue(compare_chemical_expression( - "H2O(s) + CO2", "H2O+CO2", ignore_state=True)) - - def test_compare_phases_not_ignored_explicitly(self): - self.assertFalse(compare_chemical_expression( - "H2O(s) + CO2", "H2O+CO2", ignore_state=False)) - - def test_compare_phases_not_ignored(self): # same as previous - self.assertFalse(compare_chemical_expression( - "H2O(s) + CO2", "H2O+CO2")) - - def test_compare_phases_not_ignored_explicitly(self): - self.assertTrue(compare_chemical_expression( - "H2O(s) + CO2", "H2O(s)+CO2", ignore_state=False)) - - # all in one cases - def test_complex_additivity(self): - self.assertTrue(compare_chemical_expression( - "5(H1H212)^70010- + 2H20 + 7/2HCl + H2O", - "7/2HCl + 2H20 + H2O + 5(H1H212)^70010-")) - - def test_complex_additivity_wrong(self): - self.assertFalse(compare_chemical_expression( - "5(H1H212)^70010- + 2H20 + 7/2HCl + H2O", - "2H20 + 7/2HCl + H2O + 5(H1H212)^70011-")) - - def test_complex_all_grammar(self): - self.assertTrue(compare_chemical_expression( - "5[Ni(NH3)4]^2+ + 5/2SO4^2-", - "5/2SO4^2- + 5[Ni(NH3)4]^2+")) - - # special cases - - def test_compare_one_superscript_explicitly_set(self): - self.assertTrue(compare_chemical_expression("H^+ + OH^1-", " OH^- + H^+ ")) - - def test_compare_equal_factors_differently_set(self): - self.assertTrue(compare_chemical_expression("6/2H^+ + OH^-", " OH^- + 3H^+ ")) - - def test_compare_one_subscript_explicitly_set(self): - self.assertFalse(compare_chemical_expression("H2 + CO2", "H2 + C102")) - - -class Test_Divide_Equations(unittest.TestCase): - ''' as compare_ use divide_, - tests here must consider different - division (not equality) cases ''' - - def test_divide_wrong_factors(self): - self.assertFalse(divide_chemical_expression( - "5(H1H212)^70010- + 10H2O", "5H2O + 10(H1H212)^70010-")) - - def test_divide_right(self): - self.assertEqual(divide_chemical_expression( - "5(H1H212)^70010- + 10H2O", "10H2O + 5(H1H212)^70010-"), 1) - - def test_divide_wrong_reagents(self): - self.assertFalse(divide_chemical_expression( - "H2O + CO2", "CO2")) - - def test_divide_right_simple(self): - self.assertEqual(divide_chemical_expression( - "H2O + CO2", "H2O+CO2"), 1) - - def test_divide_right_phases(self): - self.assertEqual(divide_chemical_expression( - "H2O(s) + CO2", "2H2O(s)+2CO2"), 2) - - def test_divide_wrong_phases(self): - self.assertFalse(divide_chemical_expression( - "H2O(s) + CO2", "2H2O+2CO2(s)")) - - def test_divide_wrong_phases_but_phases_ignored(self): - self.assertEqual(divide_chemical_expression( - "H2O(s) + CO2", "2H2O+2CO2(s)", ignore_state=True), 2) - - def test_divide_order(self): - self.assertEqual(divide_chemical_expression( - "2CO2 + H2O", "2H2O+4CO2"), 2) - - def test_divide_fract_to_int(self): - self.assertEqual(divide_chemical_expression( - "3/2CO2 + H2O", "2H2O+3CO2"), 2) - - def test_divide_fract_to_frac(self): - self.assertEqual(divide_chemical_expression( - "3/4CO2 + H2O", "2H2O+9/6CO2"), 2) - - def test_divide_fract_to_frac_wrog(self): - self.assertFalse(divide_chemical_expression( - "6/2CO2 + H2O", "2H2O+9/6CO2"), 2) - - -class Test_Render_Equations(unittest.TestCase): - - def test_render1(self): - s = "H2O + CO2" - out = clean_and_render_to_html(s) - correct = "H2O+CO2" - log(out + ' ------- ' + correct, 'html') - self.assertEqual(out, correct) - - def test_render_uncorrect_reaction(self): - s = "O2C + OH2" - out = clean_and_render_to_html(s) - correct = "O2C+OH2" - log(out + ' ------- ' + correct, 'html') - self.assertEqual(out, correct) - - def test_render2(self): - s = "CO2 + H2O + Fe(OH)3" - out = clean_and_render_to_html(s) - correct = "CO2+H2O+Fe(OH)3" - log(out + ' ------- ' + correct, 'html') - self.assertEqual(out, correct) - - def test_render3(self): - s = "3H2O + 2CO2" - out = clean_and_render_to_html(s) - correct = "3H2O+2CO2" - log(out + ' ------- ' + correct, 'html') - self.assertEqual(out, correct) - - def test_render4(self): - s = "H^+ + OH^-" - out = clean_and_render_to_html(s) - correct = "H++OH-" - log(out + ' ------- ' + correct, 'html') - self.assertEqual(out, correct) - - def test_render5(self): - s = "Fe(OH)^2- + (OH)^-" - out = clean_and_render_to_html(s) - correct = "Fe(OH)2-+(OH)-" - log(out + ' ------- ' + correct, 'html') - self.assertEqual(out, correct) - - def test_render6(self): - s = "7/2H^+ + 3/5OH^-" - out = clean_and_render_to_html(s) - correct = "72H++35OH-" - log(out + ' ------- ' + correct, 'html') - self.assertEqual(out, correct) - - def test_render7(self): - s = "5(H1H212)^70010- + 2H2O + 7/2HCl + H2O" - out = clean_and_render_to_html(s) - correct = "5(H1H212)70010-+2H2O+72HCl+H2O" - log(out + ' ------- ' + correct, 'html') - self.assertEqual(out, correct) - - def test_render8(self): - s = "H2O(s) + CO2" - out = clean_and_render_to_html(s) - correct = "H2O(s)+CO2" - log(out + ' ------- ' + correct, 'html') - self.assertEqual(out, correct) - - def test_render9(self): - s = "5[Ni(NH3)4]^2+ + 5/2SO4^2-" - #import ipdb; ipdb.set_trace() - out = clean_and_render_to_html(s) - correct = "5[Ni(NH3)4]2++52SO42-" - log(out + ' ------- ' + correct, 'html') - self.assertEqual(out, correct) - - def test_render_error(self): - s = "5.2H20" - self.assertRaises(ParseException, clean_and_render_to_html, s) - - def test_render_simple_brackets(self): - s = "(Ar)" - out = clean_and_render_to_html(s) - correct = "(Ar)" - log(out + ' ------- ' + correct, 'html') - self.assertEqual(out, correct) - - -def suite(): - - testcases = [Test_Compare_Equations, Test_Divide_Equations, Test_Render_Equations] - suites = [] - for testcase in testcases: - suites.append(unittest.TestLoader().loadTestsFromTestCase(testcase)) - return unittest.TestSuite(suites) - -if __name__ == "__main__": - local_debug = True - with open('render.html', 'w') as f: - unittest.TextTestRunner(verbosity=2).run(suite()) - # open render.html to look at rendered equations + return True diff --git a/common/lib/capa/capa/chem/tests.py b/common/lib/capa/capa/chem/tests.py new file mode 100644 index 0000000000..433fe6feea --- /dev/null +++ b/common/lib/capa/capa/chem/tests.py @@ -0,0 +1,296 @@ +from fractions import Fraction +from pyparsing import ParseException +import unittest + +from chemcalc import (compare_chemical_expression, divide_chemical_expression, + render_to_html, chemical_equations_equal) + +local_debug = None + +def log(s, output_type=None): + if local_debug: + print s + if output_type == 'html': + f.write(s + '\n
\n') + + +class Test_Compare_Equations(unittest.TestCase): + def test_simple_equation(self): + self.assertTrue(chemical_equations_equal('H2 + O2 -> H2O2', + 'O2 + H2 -> H2O2')) + # left sides don't match + self.assertFalse(chemical_equations_equal('H2 + O2 -> H2O2', + 'O2 + 2H2 -> H2O2')) + # right sides don't match + self.assertFalse(chemical_equations_equal('H2 + O2 -> H2O2', + 'O2 + H2 -> H2O')) + + # factors don't match + self.assertFalse(chemical_equations_equal('H2 + O2 -> H2O2', + 'O2 + H2 -> 2H2O2')) + + def test_different_factor(self): + self.assertTrue(chemical_equations_equal('H2 + O2 -> H2O2', + '2O2 + 2H2 -> 2H2O2')) + + self.assertFalse(chemical_equations_equal('2H2 + O2 -> H2O2', + '2O2 + 2H2 -> 2H2O2')) + + + def test_different_arrows(self): + self.assertTrue(chemical_equations_equal('H2 + O2 -> H2O2', + '2O2 + 2H2 -> 2H2O2')) + + self.assertFalse(chemical_equations_equal('H2 + O2 -> H2O2', + 'O2 + H2 <-> 2H2O2')) + + + def test_syntax_errors(self): + self.assertRaises(ParseException, chemical_equations_equal, + 'H2 + O2 a-> H2O2', + '2O2 + 2H2 -> 2H2O2') + + self.assertRaises(ParseException, chemical_equations_equal, + 'H2 + O2 ==> H2O2', # strange arrow + '2O2 + 2H2 -> 2H2O2') + + +class Test_Compare_Expressions(unittest.TestCase): + + def test_compare_incorrect_order_of_atoms_in_molecule(self): + self.assertFalse(compare_chemical_expression("H2O + CO2", "O2C + OH2")) + + def test_compare_same_order_no_phases_no_factors_no_ions(self): + self.assertTrue(compare_chemical_expression("H2O + CO2", "CO2+H2O")) + + def test_compare_different_order_no_phases_no_factors_no_ions(self): + self.assertTrue(compare_chemical_expression("H2O + CO2", "CO2 + H2O")) + + def test_compare_different_order_three_multimolecule(self): + self.assertTrue(compare_chemical_expression("H2O + Fe(OH)3 + CO2", "CO2 + H2O + Fe(OH)3")) + + def test_compare_same_factors(self): + self.assertTrue(compare_chemical_expression("3H2O + 2CO2", "2CO2 + 3H2O ")) + + def test_compare_different_factors(self): + self.assertFalse(compare_chemical_expression("2H2O + 3CO2", "2CO2 + 3H2O ")) + + def test_compare_correct_ions(self): + self.assertTrue(compare_chemical_expression("H^+ + OH^-", " OH^- + H^+ ")) + + def test_compare_wrong_ions(self): + self.assertFalse(compare_chemical_expression("H^+ + OH^-", " OH^- + H^- ")) + + def test_compare_parent_groups_ions(self): + self.assertTrue(compare_chemical_expression("Fe(OH)^2- + (OH)^-", " (OH)^- + Fe(OH)^2- ")) + + def test_compare_correct_factors_ions_and_one(self): + self.assertTrue(compare_chemical_expression("3H^+ + 2OH^-", " 2OH^- + 3H^+ ")) + + def test_compare_wrong_factors_ions(self): + self.assertFalse(compare_chemical_expression("2H^+ + 3OH^-", " 2OH^- + 3H^+ ")) + + def test_compare_float_factors(self): + self.assertTrue(compare_chemical_expression("7/2H^+ + 3/5OH^-", " 3/5OH^- + 7/2H^+ ")) + + # Phases tests + def test_compare_phases_ignored(self): + self.assertTrue(compare_chemical_expression( + "H2O(s) + CO2", "H2O+CO2", ignore_state=True)) + + def test_compare_phases_not_ignored_explicitly(self): + self.assertFalse(compare_chemical_expression( + "H2O(s) + CO2", "H2O+CO2", ignore_state=False)) + + def test_compare_phases_not_ignored(self): # same as previous + self.assertFalse(compare_chemical_expression( + "H2O(s) + CO2", "H2O+CO2")) + + def test_compare_phases_not_ignored_explicitly(self): + self.assertTrue(compare_chemical_expression( + "H2O(s) + CO2", "H2O(s)+CO2", ignore_state=False)) + + # all in one cases + def test_complex_additivity(self): + self.assertTrue(compare_chemical_expression( + "5(H1H212)^70010- + 2H20 + 7/2HCl + H2O", + "7/2HCl + 2H20 + H2O + 5(H1H212)^70010-")) + + def test_complex_additivity_wrong(self): + self.assertFalse(compare_chemical_expression( + "5(H1H212)^70010- + 2H20 + 7/2HCl + H2O", + "2H20 + 7/2HCl + H2O + 5(H1H212)^70011-")) + + def test_complex_all_grammar(self): + self.assertTrue(compare_chemical_expression( + "5[Ni(NH3)4]^2+ + 5/2SO4^2-", + "5/2SO4^2- + 5[Ni(NH3)4]^2+")) + + # special cases + + def test_compare_one_superscript_explicitly_set(self): + self.assertTrue(compare_chemical_expression("H^+ + OH^1-", " OH^- + H^+ ")) + + def test_compare_equal_factors_differently_set(self): + self.assertTrue(compare_chemical_expression("6/2H^+ + OH^-", " OH^- + 3H^+ ")) + + def test_compare_one_subscript_explicitly_set(self): + self.assertFalse(compare_chemical_expression("H2 + CO2", "H2 + C102")) + + +class Test_Divide_Expressions(unittest.TestCase): + ''' as compare_ use divide_, + tests here must consider different + division (not equality) cases ''' + + def test_divide_by_zero(self): + self.assertFalse(divide_chemical_expression( + "0H2O", "H2O")) + + def test_divide_wrong_factors(self): + self.assertFalse(divide_chemical_expression( + "5(H1H212)^70010- + 10H2O", "5H2O + 10(H1H212)^70010-")) + + def test_divide_right(self): + self.assertEqual(divide_chemical_expression( + "5(H1H212)^70010- + 10H2O", "10H2O + 5(H1H212)^70010-"), 1) + + def test_divide_wrong_reagents(self): + self.assertFalse(divide_chemical_expression( + "H2O + CO2", "CO2")) + + def test_divide_right_simple(self): + self.assertEqual(divide_chemical_expression( + "H2O + CO2", "H2O+CO2"), 1) + + def test_divide_right_phases(self): + self.assertEqual(divide_chemical_expression( + "H2O(s) + CO2", "2H2O(s)+2CO2"), Fraction(1, 2)) + + def test_divide_right_phases_other_order(self): + self.assertEqual(divide_chemical_expression( + "2H2O(s) + 2CO2", "H2O(s)+CO2"), 2) + + def test_divide_wrong_phases(self): + self.assertFalse(divide_chemical_expression( + "H2O(s) + CO2", "2H2O+2CO2(s)")) + + def test_divide_wrong_phases_but_phases_ignored(self): + self.assertEqual(divide_chemical_expression( + "H2O(s) + CO2", "2H2O+2CO2(s)", ignore_state=True), Fraction(1, 2)) + + def test_divide_order(self): + self.assertEqual(divide_chemical_expression( + "2CO2 + H2O", "2H2O+4CO2"), Fraction(1, 2)) + + def test_divide_fract_to_int(self): + self.assertEqual(divide_chemical_expression( + "3/2CO2 + H2O", "2H2O+3CO2"), Fraction(1, 2)) + + def test_divide_fract_to_frac(self): + self.assertEqual(divide_chemical_expression( + "3/4CO2 + H2O", "2H2O+9/6CO2"), Fraction(1, 2)) + + def test_divide_fract_to_frac_wrog(self): + self.assertFalse(divide_chemical_expression( + "6/2CO2 + H2O", "2H2O+9/6CO2"), 2) + + +class Test_Render_Equations(unittest.TestCase): + + def test_render1(self): + s = "H2O + CO2" + out = render_to_html(s) + correct = "H2O+CO2" + log(out + ' ------- ' + correct, 'html') + self.assertEqual(out, correct) + + def test_render_uncorrect_reaction(self): + s = "O2C + OH2" + out = render_to_html(s) + correct = "O2C+OH2" + log(out + ' ------- ' + correct, 'html') + self.assertEqual(out, correct) + + def test_render2(self): + s = "CO2 + H2O + Fe(OH)3" + out = render_to_html(s) + correct = "CO2+H2O+Fe(OH)3" + log(out + ' ------- ' + correct, 'html') + self.assertEqual(out, correct) + + def test_render3(self): + s = "3H2O + 2CO2" + out = render_to_html(s) + correct = "3H2O+2CO2" + log(out + ' ------- ' + correct, 'html') + self.assertEqual(out, correct) + + def test_render4(self): + s = "H^+ + OH^-" + out = render_to_html(s) + correct = "H++OH-" + log(out + ' ------- ' + correct, 'html') + self.assertEqual(out, correct) + + def test_render5(self): + s = "Fe(OH)^2- + (OH)^-" + out = render_to_html(s) + correct = "Fe(OH)2-+(OH)-" + log(out + ' ------- ' + correct, 'html') + self.assertEqual(out, correct) + + def test_render6(self): + s = "7/2H^+ + 3/5OH^-" + out = render_to_html(s) + correct = "72H++35OH-" + log(out + ' ------- ' + correct, 'html') + self.assertEqual(out, correct) + + def test_render7(self): + s = "5(H1H212)^70010- + 2H2O + 7/2HCl + H2O" + out = render_to_html(s) + correct = "5(H1H212)70010-+2H2O+72HCl+H2O" + log(out + ' ------- ' + correct, 'html') + self.assertEqual(out, correct) + + def test_render8(self): + s = "H2O(s) + CO2" + out = render_to_html(s) + correct = "H2O(s)+CO2" + log(out + ' ------- ' + correct, 'html') + self.assertEqual(out, correct) + + def test_render9(self): + s = "5[Ni(NH3)4]^2+ + 5/2SO4^2-" + #import ipdb; ipdb.set_trace() + out = render_to_html(s) + correct = "5[Ni(NH3)4]2++52SO42-" + log(out + ' ------- ' + correct, 'html') + self.assertEqual(out, correct) + + def test_render_error(self): + s = "5.2H20" + self.assertRaises(ParseException, render_to_html, s) + + def test_render_simple_brackets(self): + s = "(Ar)" + out = render_to_html(s) + correct = "(Ar)" + log(out + ' ------- ' + correct, 'html') + self.assertEqual(out, correct) + + +def suite(): + + testcases = [Test_Compare_Expressions, Test_Divide_Expressions, Test_Render_Equations] + suites = [] + for testcase in testcases: + suites.append(unittest.TestLoader().loadTestsFromTestCase(testcase)) + return unittest.TestSuite(suites) + +if __name__ == "__main__": + local_debug = True + with open('render.html', 'w') as f: + unittest.TextTestRunner(verbosity=2).run(suite()) + # open render.html to look at rendered equations From 6f60af71fd799f71ced3eb004ca2f1fca6a7d10a Mon Sep 17 00:00:00 2001 From: Tom Giannattasio Date: Wed, 10 Oct 2012 16:14:32 -0400 Subject: [PATCH 13/65] tweaked collapsible selector to target the proper section; functionality like this should have its own class to avoid selector conflicts, but it may be too late to fix this now --- common/lib/xmodule/xmodule/js/src/collapsible.coffee | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/lib/xmodule/xmodule/js/src/collapsible.coffee b/common/lib/xmodule/xmodule/js/src/collapsible.coffee index 314e7ca868..2f4b84e253 100644 --- a/common/lib/xmodule/xmodule/js/src/collapsible.coffee +++ b/common/lib/xmodule/xmodule/js/src/collapsible.coffee @@ -11,7 +11,7 @@ class @Collapsible ### el.find('.longform').hide() el.find('.shortform').append('See full output') - el.find('.collapsible section').hide() + el.find('.collapsible header + section').hide() el.find('.full').click @toggleFull el.find('.collapsible header a').click @toggleHint From 00e2f093b9b3af578eb0509e026667558571dfeb Mon Sep 17 00:00:00 2001 From: Tom Giannattasio Date: Wed, 10 Oct 2012 16:19:03 -0400 Subject: [PATCH 14/65] removed extra padding on nested sections --- common/lib/xmodule/xmodule/css/capa/display.scss | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/lib/xmodule/xmodule/css/capa/display.scss b/common/lib/xmodule/xmodule/css/capa/display.scss index aa3f96c2e7..fd67a3804e 100644 --- a/common/lib/xmodule/xmodule/css/capa/display.scss +++ b/common/lib/xmodule/xmodule/css/capa/display.scss @@ -572,7 +572,7 @@ section.problem { } } - section { + > section { padding: 9px; } } From 58b0829b7975873c75e69a576a6db81a23aac931 Mon Sep 17 00:00:00 2001 From: Arjun Singh Date: Thu, 11 Oct 2012 06:22:00 -0700 Subject: [PATCH 15/65] Fix some issues with integer ids in the discussion forum --- lms/templates/discussion/_underscore_templates.html | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/lms/templates/discussion/_underscore_templates.html b/lms/templates/discussion/_underscore_templates.html index 0a691ac36f..d1d3f6db56 100644 --- a/lms/templates/discussion/_underscore_templates.html +++ b/lms/templates/discussion/_underscore_templates.html @@ -1,5 +1,5 @@ + +
+
+

The University of Texas System joins edX

+
+
+

The University of Texas System joins Harvard, MIT and UC Berkeley in not-for-profit online learning collaborative

+ +

CAMBRIDGE, MA/AUSTIN, TX – October 15, 2012 — edX, the online non-profit learning initiative founded by Harvard University (Harvard) and the Massachusetts Institute of Technology (MIT) and launched in May, announced today the addition of The University of Texas (UT) System to its platform. The UT System, one of the largest public university systems in the United States with nine academic universities and six health institutions, will collaborate with edX to expand the group of participating “X Universities” – universities offering their courses on the edX platform.

+ +

The UT System includes the University of Texas at Austin, ranked 25th in the 2012-2013 Times Higher Education World University Rankings, UT Southwestern Medical Center, home to one of the nation's top 25 medical schools, and UT MD Anderson Cancer Center, the nation's No. 1-ranked cancer center. The system's institutions serve 212,000 students and employ 19,000 faculty members.

+ +

Through edX, the “X Universities” provide online interactive education wherever there is access to the Internet, with a goal to enhance teaching and learning through research about how students learn, and how technologies can facilitate effective teaching both on campus and online. The University of California, Berkeley (UC Berkeley) joined edX in July 2012. edX plans to add other “X Universities” from around the world to the edX platform in the coming months.

+ +

Francisco G. Cigarroa, Chancellor of The University of Texas System announced the partnership following a unanimous vote of approval by the UT System's Board of Regents on Monday.

+ +

“New technologies are positively impacting how professors teach and how course content is delivered,” Chancellor Cigarroa said. “The University of Texas System will help lead this revolution and fundamentally alter the direction of online education. We are excited about this partnership with edX and honored to be in the company of such exceptional institutions as MIT, Harvard and Berkeley. The mission of edX aligns perfectly with that of the UT System and keeps the learner as its central focus.”

+ +

The University of Texas System plans to offer at least four courses on edX within the next year.

+ +

In addition to serving a global community of online students, the UT System plans to redesign general education courses and traditional entry-level courses that are too often made up of several hundred students. Through its Institute for Transformational Learning, the UT System plans to give students more options by offering courses that are customized to student needs. For example, the UT System plans to offer courses that use a combination of technology and face-to-face interaction, courses that allow students to manage their own time by accelerating through sections they have already mastered or spending more time on areas they find challenging, and fully online courses so students are not limited by their location.

+ +

“As Texas' flagship university, UT Austin is committed not only to embracing breakthroughs in education, but helping create them,” said William Powers, Jr., President of UT Austin. “We're proud to be partnering with these top peer universities on edX.”

+ +

As part of a bold and innovative plan, the UT System also plans to offer courses through edX that will allow students to earn college credits toward a degree. “Our goal through our partnership with edX is to better meet the learning needs of a wide range of students, raise graduation rates and cut the cost of higher education, all while maintaining our commitment to education of the highest quality,” said Gene Powell, chairman of the UT System Board of Regents.

+ +

The UT System brings a large and diverse student body to the edX family. Its six health institutions offer a unique opportunity to provide groundbreaking health and medical courses via edX in the near future. The UT System also brings special expertise in analytics – assessing student learning, online course design and creating interactive learning environments.

+ +

edX courses are designed to provide students with a wealth of innovative resources, including interactive laboratories, virtual reality environments and access to online tutors and tutorials. Students who take UT System courses through edX won't work in isolation, but will have the opportunity to participate in online forums, network with instructors and fellow students and take part in exciting collaborative projects. “We are excited that The University of Texas System is joining edX's efforts to revolutionize learning,” said Anant Agarwal, President of edX. “The institutions within The University of Texas System bring a wide range of expertise to the edX mission, and with them edX is now positioned to continue to increase our offering of high-quality, online courses.”

+ +

edX was created by Harvard and MIT in May, with each university committing to contribute $30 million toward the online partnership.

+ +

“Today's announcement is another important step toward our shared objectives of expanding access to high quality educational content while enhancing teaching and learning online and in the classroom,” said Harvard President Drew Faust. “The addition of The University of Texas System to the edX platform will allow us to deepen our understanding of learning, develop new approaches to teaching that build on that knowledge, and strengthen both the on-campus and online learning experience.” + +

“At MIT, we are energetically exploring the ways that online instruction can help us reimagine our campus residential education even as it allows us to reach an unprecedented number of learners around the world,” said MIT President L. Rafael Reif. “It is thrilling to be joined by The University of Texas System in the pursuit of that dual goal.”

+ +

The edX classes to be offered by the UT System will be announced soon and will join other new edX courses planned for Spring, Summer and Fall 2013. As with all edX courses, online learners who obtain a passing grade in the UT System courses will receive a certificate of mastery. edX will also offer the option of proctored examinations for the UT System courses.

+ + +

About edX

+ +

edX is a not-for-profit enterprise of its founding partners Harvard University and the Massachusetts Institute of Technology that features learning designed specifically for interactive study via the web. Based on a long history of collaboration and their shared educational missions the founders are creating a new online-learning experience. Anant Agarwal, former Director of MIT's Computer Science and Artificial Intelligence Laboratory, serves as the first president of edX. Along with offering online courses, the institutions will use edX to research how students learn and how technology can transform learning—both on-campus and worldwide. edX is based in Cambridge, Massachusetts and is governed by MIT and Harvard.

+ +

About Harvard University

+ +

Harvard University is devoted to excellence in teaching, learning and research, and to developing leaders in many disciplines who make a difference globally. Harvard Faculty are engaged with teaching and research to push the boundaries of human knowledge. The University has twelve degree-granting Schools in addition to the Radcliffe Institute for Advanced Study.

+ +

Established in 1636, Harvard is the oldest institution of higher education in the United States. The University, which is based in Cambridge and Boston, Massachusetts, has an enrollment of over 20,000 degree candidates, including undergraduate, graduate and professional students. Harvard has more than 360,000 alumni around the world.

+ +

About MIT

+

The Massachusetts Institute of Technology — a coeducational, privately endowed research university founded in 1861 — is dedicated to advancing knowledge and educating students in science, technology and other areas of scholarship that will best serve the nation and the world in the 21st century. The Institute has close to 1,000 faculty and 10,000 undergraduate and graduate students. It is organized into five Schools: Architecture and Urban Planning; Engineering; Humanities, Arts, and Social Sciences; Sloan School of Management; and Science.

+ +

MIT's commitment to innovation has led to a host of scientific breakthroughs and technological advances. Achievements of the Institute's faculty and graduates have included the first chemical synthesis of penicillin and vitamin A, the development of inertial guidance systems, modern technologies for artificial limbs and the magnetic core memory that made possible the development of digital computers. Seventy-eight alumni, faculty, researchers and staff have won Nobel Prizes.

+ +

Current areas of research and education include neuroscience and the study of the brain and mind, bioengineering, cancer, energy, the environment and sustainable development, information sciences and technology, new media, financial technology and entrepreneurship.

+ +

About the University of California, Berkeley

+ +

The University of California, Berkeley is the world's premier public university with a mission to excel in teaching, research and public service. This longstanding mission has led to the university's distinguished record of Nobel-level scholarship, constant innovation, a concern for the betterment of our world, and consistently high rankings of its schools and departments. The campus offers superior, high value education for extraordinarily talented students from all walks of life; operational excellence and a commitment to the competitiveness and prosperity of California and the nation.

+ +

The University of California was chartered in 1868 and its flagship campus in Berkeley, on San Francisco Bay, was envisioned as a “City of Learning.” Today, there are more than 1,500 fulltime and 500 part-time faculty members dispersed among more than 130 academic departments and more than 80 interdisciplinary research units. Twenty-two Nobel Prizes have been garnered by faculty and 28 by UC Berkeley alumni. There are 9 Nobel Laureates, 32 MacArthur Fellows, and 4 Pulitzer Prize winners among the current faculty.

+ +

About The University of Texas System

+ +

Educating students, providing care for patients, conducting groundbreaking research and serving the needs of Texans and the nation for more than 130 years, The University of Texas System is one of the largest public university systems in the United States, with nine academic universities and six health science centers. Student enrollment exceeded 215,000 in the 2011 academic year. The UT System confers more than one-third of the state's undergraduate degrees and educates nearly three-fourths of the state's health care professionals annually. The UT System has an annual operating budget of $13.1 billion (FY 2012) including $2.3 billion in sponsored programs funded by federal, state, local and private sources. With roughly 87,000 employees, the UT System is one of the largest employers in the state. www.utsystem.edu

+ +
+

edX Contact: Dan O’Connell

+

oconnell@edx.org

+

617-480-6585

+
+

UT System Contact: Jenny LaCoste-Caputo

+

jcaputo@utsystem.edu

+

512-499-4361

+
+ + +
+
+
diff --git a/lms/templates/university_profile/utx.html b/lms/templates/university_profile/utx.html new file mode 100644 index 0000000000..756c9dc62b --- /dev/null +++ b/lms/templates/university_profile/utx.html @@ -0,0 +1,24 @@ +<%inherit file="base.html" /> +<%namespace name='static' file='../static_content.html'/> + +<%block name="title">UTx + +<%block name="university_header"> + + + + +<%block name="university_description"> +

Educating students, providing care for patients, conducting groundbreaking research and serving the needs of Texans and the nation for more than 130 years, The University of Texas System is one of the largest public university systems in the United States, with nine academic universities and six health science centers. Student enrollment exceeded 215,000 in the 2011 academic year. The UT System confers more than one-third of the state’s undergraduate degrees and educates nearly three-fourths of the state’s health care professionals annually. The UT System has an annual operating budget of $13.1 billion (FY 2012) including $2.3 billion in sponsored programs funded by federal, state, local and private sources. With roughly 87,000 employees, the UT System is one of the largest employers in the state.

+ + +${parent.body()} diff --git a/lms/urls.py b/lms/urls.py index 035db95596..89a541ab06 100644 --- a/lms/urls.py +++ b/lms/urls.py @@ -52,6 +52,7 @@ urlpatterns = ('', url(r'^heartbeat$', include('heartbeat.urls')), + url(r'^university_profile/UTx$', 'courseware.views.static_university_profile', name="static_university_profile", kwargs={'org_id':'UTx'}), url(r'^university_profile/(?P[^/]+)$', 'courseware.views.university_profile', name="university_profile"), #Semi-static views (these need to be rendered and have the login bar, but don't change) @@ -88,6 +89,8 @@ urlpatterns = ('', {'template': 'press_releases/edX_announces_proctored_exam_testing.html'}, name="press/edX-announces-proctored-exam-testing"), url(r'^press/elsevier-collaborates-with-edx$', 'static_template_view.views.render', {'template': 'press_releases/Elsevier_collaborates_with_edX.html'}, name="press/elsevier-collaborates-with-edx"), + url(r'^press/ut-joins-edx$', 'static_template_view.views.render', + {'template': 'press_releases/UT_joins_edX.html'}, name="press/ut-joins-edx"), # Should this always update to point to the latest press release? From 516daa47107e5af664ddf9c5e37bb2ec0f6d48a1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Andr=C3=A9s=20Rocha?= Date: Mon, 15 Oct 2012 06:58:39 -0400 Subject: [PATCH 48/65] Converted FAQ template to unix line endings --- lms/templates/static_templates/faq.html | 234 ++++++++++++------------ 1 file changed, 117 insertions(+), 117 deletions(-) diff --git a/lms/templates/static_templates/faq.html b/lms/templates/static_templates/faq.html index 2a9df7c5fc..d0f2191b8f 100644 --- a/lms/templates/static_templates/faq.html +++ b/lms/templates/static_templates/faq.html @@ -1,117 +1,117 @@ -<%! from django.core.urlresolvers import reverse %> -<%namespace name='static' file='../static_content.html'/> - -<%inherit file="../main.html" /> - -<%block name="title">FAQ - -
- -
-
-
-

Organization

-
-

What is edX?

- -

Massachusetts Institute of Technology (MIT) and Harvard University that offers online learning to on-campus students and to millions of people around the world. To do so, edX is building an open-source online learning platform and hosts an online web portal at www.edx.org for online education.

-

EdX currently offers HarvardX, MITx and BerkeleyX classes online for free. Beginning in Summer 2013, edX will also offer UTx (University of Texas) classes online for free. The University of Texas System includes nine universities and six health institutions. The edX institutions aim to extend their collective reach to build a global community of online students. Along with offering online courses, the three universities undertake research on how students learn and how technology can transform learning – both on-campus and online throughout the world.

-
-
-

What are "X Universities"?

-

Harvard, MIT and UC Berkeley, as the first universities whose courses are delivered on the edX website, are "X Universities." The three institutions will work collaboratively to establish the "X University" Consortium, whose membership will expand to include additional "X Universities" as soon as possible. Each member of the consortium will offer courses on the edX platform as an "X University." The gathering of many universities’ educational content together on one site will enable learners worldwide to access the course content of any participating university from a single website, and to use a set of online educational tools shared by all participating universities.

-
-
-

Why is UC Berkeley joining edX?

-

Like Harvard and MIT, UC Berkeley seeks to transform education in quality, efficiency and scale through technology and research, for the benefit of campus-based students and the global community of online learners.

-

UC Berkeley also shares the edX commitment to the not-for-profit and open-platform model as a way to enhance human fulfillment worldwide.

-
-
-

What will UC Berkeley's direct participation entail?

-

UC Berkeley will begin by offering two courses on edX in Fall 2012, and will collaborate on the development of the technology platform. We will explore, experiment and innovate together.

-

UC Berkeley will also serve as the inaugural chair of the "X University" Consortium for an initial 5 year period. As Chair, UC Berkeley will participate on the edX Board on behalf of the X Universities.

-
-
-

Why is The University of Texas System joining edX?

-

Joining edX not only allows UT faculty to showcase their work on a global stage, but also provides UT students the opportunity to take classes from their choice of UT institutions, as well as MIT, Harvard, UC Berkeley and future “X” Universities.

-

The UT System closely examined all the alternatives and determined that edX offered the best fit in terms of alignment of mission, platform and revenue model. The strength and reputation of the partner institutions – MIT, Harvard and UC Berkeley – was also a huge consideration. EdX is committed to both blended and online learning and to a non-profit, open source model. It is also governed by a board of academics with a commitment to excellence in learning.

-
-
-

What will The UT System’s direct participation entail?

-

The UT System will begin by offering one course on edX from The University of Texas at Austin in Summer 2013, and four courses in Fall 2013, likely at least one of those courses from one of its health institutions. The UT System is also making a $5 million investment in the edX platform. We will explore, experiment and innovate together.

-
-
-

Will edX be adding additional X Universities?

-

More than 140 institutions from around the world have expressed interest in collaborating with edX since Harvard and MIT announced its creation in May. EdX is focused above all on quality and developing the best not-for-profit model for online education. In addition to providing online courses on the edX platform, the “X University” Consortium will be a forum in which members can share experiences around online learning. Harvard, MIT, UC Berkeley and the UT System will work collaboratively to establish the “X University” Consortium, whose membership will expand to include additional “X Universities” as soon as possible. Each member of the consortium will offer courses on the edX platform as an “X University.” The gathering of many universities’ educational content together on one site will enable learners worldwide to access the course content of any participating university from a single website, and to use a set of online educational tools shared by all participating universities.

-

EdX will actively explore the addition of other institutions from around the world to the edX platform, and we look forward to adding more “X Universities” as capacity increases.

-
-
- -
-

Students

-
-

Who can take edX courses? Will there be an admissions process?

-

EdX will be available to anyone in the world with an internet connection, and in general, there will not be an admissions process.

-
-
-

Will certificates be awarded?

-

Yes. Online learners who demonstrate mastery of subjects can earn a certificate of completion. Certificates will be issued by edX under the name of the underlying "X University" from where the course originated, i.e. HarvardX, MITx or BerkeleyX. For the courses in Fall 2012, those certificates will be free. There is a plan to charge a modest fee for certificates in the future.

-
-
-

What will the scope of the online courses be? How many? Which faculty?

-

Our goal is to offer a wide variety of courses across disciplines. There are currently seven courses offered for Fall 2012.

-
-
-

Who is the learner? Domestic or international? Age range?

-

Improving teaching and learning for students on our campuses is one of our primary goals. Beyond that, we don’t have a target group of potential learners, as the goal is to make these courses available to anyone in the world – from any demographic – who has interest in advancing their own knowledge. The only requirement is to have a computer with an internet connection. More than 150,000 students from over 160 countries registered for MITx's first course, 6.002x: Circuits and Electronics. The age range of students certified in this course was from 14 to 74 years-old.

-
-
-

Will participating universities’ standards apply to all courses offered on the edX platform?

-

Yes: the reach changes exponentially, but the rigor remains the same.

-
-
-

How do you intend to test whether this approach is improving learning?

-

Edx institutions have assembled faculty members who will collect and analyze data to assess results and the impact edX is having on learning.

-
-
-

How may I apply to study with edX?

-

Simply complete the online signup form. Enrolling will create your unique student record in the edX database, allow you to register for classes, and to receive a certificate on successful completion.

-
-
-

How may another university participate in edX?

-

If you are from a university interested in discussing edX, please email university@edx.org

-
-
- -
-

Technology Platform

-
-

What technology will edX use?

-

The edX open-source online learning platform will feature interactive learning designed specifically for the web. Features will include: self-paced learning, online discussion groups, wiki-based collaborative learning, assessment of learning as a student progresses through a course, and online laboratories and other interactive learning tools. The platform will also serve as a laboratory from which data will be gathered to better understand how students learn. Because it is open source, the platform will be continuously improved by a worldwide community of collaborators, with new features added as needs arise.

-

The first version of the technology was used in the first MITx course, 6.002x Circuits and Electronics, which launched in Spring, 2012.

-
-
-

How is this different from what other universities are doing online?

-

EdX is a not-for-profit enterprise built upon the shared educational missions of its founding partners, Harvard University and MIT. The edX platform will be available as open source. Also, a primary goal of edX is to improve teaching and learning on campus by experimenting with blended models of learning and by supporting faculty in conducting significant research on how students learn.

-
-
- -
- - -
-
- -%if user.is_authenticated(): - <%include file="../signup_modal.html" /> -%endif +<%! from django.core.urlresolvers import reverse %> +<%namespace name='static' file='../static_content.html'/> + +<%inherit file="../main.html" /> + +<%block name="title">FAQ + +
+ +
+
+
+

Organization

+
+

What is edX?

+ +

Massachusetts Institute of Technology (MIT) and Harvard University that offers online learning to on-campus students and to millions of people around the world. To do so, edX is building an open-source online learning platform and hosts an online web portal at www.edx.org for online education.

+

EdX currently offers HarvardX, MITx and BerkeleyX classes online for free. Beginning in Summer 2013, edX will also offer UTx (University of Texas) classes online for free. The University of Texas System includes nine universities and six health institutions. The edX institutions aim to extend their collective reach to build a global community of online students. Along with offering online courses, the three universities undertake research on how students learn and how technology can transform learning – both on-campus and online throughout the world.

+
+
+

What are "X Universities"?

+

Harvard, MIT and UC Berkeley, as the first universities whose courses are delivered on the edX website, are "X Universities." The three institutions will work collaboratively to establish the "X University" Consortium, whose membership will expand to include additional "X Universities" as soon as possible. Each member of the consortium will offer courses on the edX platform as an "X University." The gathering of many universities’ educational content together on one site will enable learners worldwide to access the course content of any participating university from a single website, and to use a set of online educational tools shared by all participating universities.

+
+
+

Why is UC Berkeley joining edX?

+

Like Harvard and MIT, UC Berkeley seeks to transform education in quality, efficiency and scale through technology and research, for the benefit of campus-based students and the global community of online learners.

+

UC Berkeley also shares the edX commitment to the not-for-profit and open-platform model as a way to enhance human fulfillment worldwide.

+
+
+

What will UC Berkeley's direct participation entail?

+

UC Berkeley will begin by offering two courses on edX in Fall 2012, and will collaborate on the development of the technology platform. We will explore, experiment and innovate together.

+

UC Berkeley will also serve as the inaugural chair of the "X University" Consortium for an initial 5 year period. As Chair, UC Berkeley will participate on the edX Board on behalf of the X Universities.

+
+
+

Why is The University of Texas System joining edX?

+

Joining edX not only allows UT faculty to showcase their work on a global stage, but also provides UT students the opportunity to take classes from their choice of UT institutions, as well as MIT, Harvard, UC Berkeley and future “X” Universities.

+

The UT System closely examined all the alternatives and determined that edX offered the best fit in terms of alignment of mission, platform and revenue model. The strength and reputation of the partner institutions – MIT, Harvard and UC Berkeley – was also a huge consideration. EdX is committed to both blended and online learning and to a non-profit, open source model. It is also governed by a board of academics with a commitment to excellence in learning.

+
+
+

What will The UT System’s direct participation entail?

+

The UT System will begin by offering one course on edX from The University of Texas at Austin in Summer 2013, and four courses in Fall 2013, likely at least one of those courses from one of its health institutions. The UT System is also making a $5 million investment in the edX platform. We will explore, experiment and innovate together.

+
+
+

Will edX be adding additional X Universities?

+

More than 140 institutions from around the world have expressed interest in collaborating with edX since Harvard and MIT announced its creation in May. EdX is focused above all on quality and developing the best not-for-profit model for online education. In addition to providing online courses on the edX platform, the “X University” Consortium will be a forum in which members can share experiences around online learning. Harvard, MIT, UC Berkeley and the UT System will work collaboratively to establish the “X University” Consortium, whose membership will expand to include additional “X Universities” as soon as possible. Each member of the consortium will offer courses on the edX platform as an “X University.” The gathering of many universities’ educational content together on one site will enable learners worldwide to access the course content of any participating university from a single website, and to use a set of online educational tools shared by all participating universities.

+

EdX will actively explore the addition of other institutions from around the world to the edX platform, and we look forward to adding more “X Universities” as capacity increases.

+
+
+ +
+

Students

+
+

Who can take edX courses? Will there be an admissions process?

+

EdX will be available to anyone in the world with an internet connection, and in general, there will not be an admissions process.

+
+
+

Will certificates be awarded?

+

Yes. Online learners who demonstrate mastery of subjects can earn a certificate of completion. Certificates will be issued by edX under the name of the underlying "X University" from where the course originated, i.e. HarvardX, MITx or BerkeleyX. For the courses in Fall 2012, those certificates will be free. There is a plan to charge a modest fee for certificates in the future.

+
+
+

What will the scope of the online courses be? How many? Which faculty?

+

Our goal is to offer a wide variety of courses across disciplines. There are currently seven courses offered for Fall 2012.

+
+
+

Who is the learner? Domestic or international? Age range?

+

Improving teaching and learning for students on our campuses is one of our primary goals. Beyond that, we don’t have a target group of potential learners, as the goal is to make these courses available to anyone in the world – from any demographic – who has interest in advancing their own knowledge. The only requirement is to have a computer with an internet connection. More than 150,000 students from over 160 countries registered for MITx's first course, 6.002x: Circuits and Electronics. The age range of students certified in this course was from 14 to 74 years-old.

+
+
+

Will participating universities’ standards apply to all courses offered on the edX platform?

+

Yes: the reach changes exponentially, but the rigor remains the same.

+
+
+

How do you intend to test whether this approach is improving learning?

+

Edx institutions have assembled faculty members who will collect and analyze data to assess results and the impact edX is having on learning.

+
+
+

How may I apply to study with edX?

+

Simply complete the online signup form. Enrolling will create your unique student record in the edX database, allow you to register for classes, and to receive a certificate on successful completion.

+
+
+

How may another university participate in edX?

+

If you are from a university interested in discussing edX, please email university@edx.org

+
+
+ +
+

Technology Platform

+
+

What technology will edX use?

+

The edX open-source online learning platform will feature interactive learning designed specifically for the web. Features will include: self-paced learning, online discussion groups, wiki-based collaborative learning, assessment of learning as a student progresses through a course, and online laboratories and other interactive learning tools. The platform will also serve as a laboratory from which data will be gathered to better understand how students learn. Because it is open source, the platform will be continuously improved by a worldwide community of collaborators, with new features added as needs arise.

+

The first version of the technology was used in the first MITx course, 6.002x Circuits and Electronics, which launched in Spring, 2012.

+
+
+

How is this different from what other universities are doing online?

+

EdX is a not-for-profit enterprise built upon the shared educational missions of its founding partners, Harvard University and MIT. The edX platform will be available as open source. Also, a primary goal of edX is to improve teaching and learning on campus by experimenting with blended models of learning and by supporting faculty in conducting significant research on how students learn.

+
+
+ +
+ + +
+
+ +%if user.is_authenticated(): + <%include file="../signup_modal.html" /> +%endif From 40dd6fa10510f0c4ff659600777c4d5c9bbb20c6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Andr=C3=A9s=20Rocha?= Date: Mon, 15 Oct 2012 07:16:13 -0400 Subject: [PATCH 49/65] Add missing text to FAQ --- lms/templates/static_templates/faq.html | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lms/templates/static_templates/faq.html b/lms/templates/static_templates/faq.html index d0f2191b8f..7290df165e 100644 --- a/lms/templates/static_templates/faq.html +++ b/lms/templates/static_templates/faq.html @@ -18,8 +18,7 @@

Organization

What is edX?

- -

Massachusetts Institute of Technology (MIT) and Harvard University that offers online learning to on-campus students and to millions of people around the world. To do so, edX is building an open-source online learning platform and hosts an online web portal at www.edx.org for online education.

+

edX is a not-for-profit enterprise of its founding partners, the Massachusetts Institute of Technology (MIT) and Harvard University that offers online learning to on-campus students and to millions of people around the world. To do so, edX is building an open-source online learning platform and hosts an online web portal at www.edx.org for online education.

EdX currently offers HarvardX, MITx and BerkeleyX classes online for free. Beginning in Summer 2013, edX will also offer UTx (University of Texas) classes online for free. The University of Texas System includes nine universities and six health institutions. The edX institutions aim to extend their collective reach to build a global community of online students. Along with offering online courses, the three universities undertake research on how students learn and how technology can transform learning – both on-campus and online throughout the world.

From b13412d3b22d030c6a9737276d78b7b067c40ca3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Andr=C3=A9s=20Rocha?= Date: Mon, 15 Oct 2012 11:12:22 -0400 Subject: [PATCH 50/65] Make corrections of UT announcement --- .../images/university/ut/ut-rollover_160x90.png | Bin 0 -> 12266 bytes .../university/ut/ut-standalone_187x80.png | Bin 0 -> 14625 bytes lms/templates/feed.rss | 3 ++- lms/templates/index.html | 2 +- lms/templates/university_profile/utx.html | 2 +- 5 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 lms/static/images/university/ut/ut-rollover_160x90.png create mode 100644 lms/static/images/university/ut/ut-standalone_187x80.png diff --git a/lms/static/images/university/ut/ut-rollover_160x90.png b/lms/static/images/university/ut/ut-rollover_160x90.png new file mode 100644 index 0000000000000000000000000000000000000000..4c4422d5447839022e88c8f7754ce284b55cbdb8 GIT binary patch literal 12266 zcmeHtWl)^Uwl*F-xC9>@0yDS`Zh_!#L1zZ{!QDM*a0!xN!4raekO09#Ah>&Q4R%TP zKKt%d-#uT|sk-;ynW~wd?zNt0t$uoSztvT5l$we>4(1a~1Ox;eg=aDv5AExRuK_yR z!>@Sni(doea9KULrXvjQVderwkc2pzLunQ4%`BlBP&0^^^8gh502Z^>(u3SGrsHL@oDBz&E9YAXh5e4Y-DT9=qq@h;U&%9lr zn%*i}7T&fNLJ)ws7$(?L_<_J43OA$mw6}9`74{Sb{N@#Y`2TAd2%!D#0=E?f{E?KN zvKpg518 z^W<=FrT<0o2Zs#Q)xyQv32yD^K>Lf+%-qopE(!oV*lGVHhgkf@?d0ZS_nR4F0fgE? z?V%2ES0EQB7x3@?4@ziQTUx@<4g_*h8!2!TO5x`%{{{sRB{xjh(ehU|<8Qjt3Zvix+&i@?XpSb>E{mu1v@GmmB zHQWyRH>!UG{<9+fMXqAnTAE(257H<(S~{9M2w`Fi{++j-^}n3Oe@6oUx5nD854%wd z>SFKuZ|Cq&bNolzVBo*x2LFB4g{A)xQQGlg7g)d*9AJ*Wi~OH`tR*2XvYv1=h|51V z!T)QQwuXECYpce?x$(O?{sp_5g(KvlNdG9`|A8Ft@~_CXpmw^BE)OTj|A>{fH?#i# z*|lB%Vf`)RAB*?5?fq!EDPQAFY=#`{wKzJ6$JO~$shh2~kN zc?G34+Z%!!DH8*=HV!hu5m;HJVkr>z002LA5ZtP-(nb z?@1}L{Zq_3x~&WQ(y@Ph4E391N+|#f>&93!dJs|82b+A=b?M!{)bhA>Nr0ueA}}Mi za(=QuS4`1j(J-y}*~A7aPIN}jlhunE_}g=f+s+SE{F^*q7x1lZ7-$ZSAkI*Q{nXux z88=<;pE@cvjmknUj3oD0&RguYrj1L!J;OGTkro6s#9n-Qvyy)KZ(cmhSn{BK!$lc) zFO1J;!b+lJ$pOi1mp|B}H7?@%2k$68h^k~#V+0l_&n30&kvgy_z6j8FJZQlTK@3O2 zQf%{QHR8C9L`#1R|i5S0ytjfQ`0Rc;5B za+4D38pjEd#K7LnyRROP$BLFRb&n(jRx%JEh^Jnid_Hln-1;z|Ftx{&rxJc#-j3!d z57fYnem(fD5H=iRHFGJJ;v%#b#Dcb?Duo}}t?5Bfs;~3DY7s}%UwBTL8UGNCAcv*Z z6LE&0Qugj<{Tauz?jp{QM2l324jZe)22jt z%`jQa;T2A2CFL6uOfrqjAhb+Fe49FYl9<=x1mU_>;XqDltx^f_laSO(HMHP2&9*$4g)r1e`1n^Ky-e`ie1koSgG~BGdrcLeoOY z`}uW}$wJvPmC#kL*oI@Ier-0&04R@2TN|=PWhT~DAcei?e2Mz@XglampNk!J4z(Wz-ubhe$mysgj4V$=nVOd^^*_&d|>d*7E; zBKhfWh#c@u>#FIxKbyED=IQM572q6H6EhfJkz{#-e;9O{xc&@Cf%--X4X5Ty=i~Yn zg=o(_&A9GP{4{gTUS1>kZI0mE;qJM{n1oEja7HX(P$L`wdwTa|-10@xhqtAdw3dke zH2t5>c?mP!4*Q+&Mq(X#qD1tk?7!SBGBJ(}mzx*|x$Gfg=Tzl=L(cSvU-r98_V?t{MjoGDAfxc3UjRUaD#=X2>#rg6HpQ)}j?NWS-}u9}6N z^t&9#8h13@57BosgRe&w_jny0j-vGwDatBrh>t~0$oU<=^UbY2e^E;6QtwG|L4GTi z?S2#Dg;+Z^kP4GFySx)hwWdmK7c^ef(~wmaxVS6(B@%2jJ?gC-p?KvkNa?lyRWZr$ zbJl3GKTU`SZc-WQ3KI@UQBxtzcQxvM>GFw!ZqiFyoMZ=v`M!&$m%Lf;gW!A@+w4F~ zq@~l0oh6P>g(s$&AlyEa*~qqVIU=P}nc?Q$quNSzpAelJmxVB^-TLLk4-^QPic3zo z+&NNQCBZ#guA#k#2vJE+ug$ARFZNS)z>?8>PnE7MSu3*)qf8cBuwC0-fq*jfR5oEQ z^DFT5OPzeTU9M=$uVk{OdD>P#&(6au&%Q=Kw_;Po7dR4{za~N^Nl>BpOaD4-Y9!GV z8{~<+9d>VqKrqj&7#+YsOd?N3bCE<1@XLe?g=i-6WGqD&%w2HW?=>(eo(S!KTd$6# zjtNl(V>nvSxW}@}Mf67hs6Z)u2NNe!=sjnrYG9A^hk{4M> z)PeJj{`6CIY2hF^w!piMr5{w|cN4L@;^cwx3^l8*MdB+XU8Z>>f%f=wRO2z1cRtUu z6h;WSwVth^R_EO3jND^R`N{~V@x~yku#=Oz-fP*RzD%NCYTtCd=kwjKWQcJlEKSw_ z`Jyd;v@PuZE|52wv@({oK6myV;4D#P1@&VVZ`Tyo#jK2tDt~8jOmOi_oMsZ3>X7t$ z&J^v+$;wi)YNwro?2kyLcf0DV`y!6rSr>~hWl@rL{Jz!?iCfJVI25t^rQ2yQhYJ-R zcBwk_G>^B3lZxTsv7idAuSKiE0Wv$LM-KCo#Z&B=BJ=jJx=_~@M+`J z9-S^u${hrMS#J>&`oJC1B9CK_r67_rZh@26#;kggbr8~|ikRpOkCMm8cBvl6MVQI_ zS>!HTZPc)d^*IyC;)KfOPk7M4%9V4JI)yn<`P!6qyi>s@MAj5pP1|De0F zGBxl80EG%Sd)f_wLX)|RO1mJwU~0Frk#dkzKQO$P34SrXbbe_zTApZK(_u3#l~+#V z-%c~k!Ex&Mc+NrK6RnMr?-=$|6K;W{kHfHtx~j*Bm=4@SrTsOq!jfv*v3N@pf6^B< zT6WrlPS0N=`xhhI``lrmUZqH;(LjX*1~}iWBt2tbQ$p_wLR3QMUnzl|E{*17@g_OY zyMYi^hfF!ZU7PzGQC2qs74bQzLgR}GnA>lJigwe7DScINvW}<#YM4*GUo90R zQ$sdx!b++RxRP6km!=2$=!c}F&X<5iKZ+J_fkfu{Wj@5`l|AIk+~`;M9nVkyL_HH%fk2CQV(i!h3F z^4t6Y)L|qPw=P36kKY~a$%iU6b81FZ17mqrDe0ddSC-js8}Y#7J{D8Ik=3_~wFF^g zCwoqdQ$C45vJ3Aa*l-P0i?n+6S4m z^eTLkzmU$wTc&@iR5my3?77Q^Gi@VdGgQalpoMI04jC86Vj&akrA8h=@fxXur=PXU z{?r{GE?T^djoP|A_021m$g>S&;v?qSze1Da%m_Aq*aEF@JN*-?rl8^{-$dKR1Lx)_ zqfgj@}8%y zDEOx#+zl#_awlI!AUe3TqfY35>6m9g8XL_PPXBJaW;qc#b6lO5`dNZ7#FgM;*<+}G zC5(G+`ct^RmFYK;PCKE)421Lyhc?hWm6XRMhL-3piJ13}tf!&yXmVCmH=;5i4zPAA z?2}z=&-)|}5Z6HwCG&cUA|W{|Sui&#DY&bkpa++{(Fiw0GQeB$30dGgUq?Kr^efdz zCgIqL#^PnU0R(=`PiRrpzKg{pv_<8;X7kf=5mPe_mckWIax7bVO`rBi_=}`Mo0V^< zE4Ls^^+~Az1@@Lm83cCzoJv-pr@ zc|GGq>tv=)deINJ{Pxm#34nBoc&NX}W1HxcL}R1yPZoVE-80_i9mb(jX2+{yt%hYI z>r$GoJiYN0G1}!0!Jm>4fm|jE&RnoOaV-93D&Y!}zW6+byb(n)i{4_^o*$CMF}*CG zkf1i0J3rRsGlyBWUGvY+s7|%GSt6<8RA_?cwYGOI?8MfM24ZQdW+nBz-dfU#3%92#y5@9&hYtiqO0<(HBLo92L+OyO$; zFNaP9KYD7M<{+eSC|t>4$v06nZ~#MIN09r_E!uKr4aiT++`YUVp1o?K+It>M+Uvf* zDQEmt5m(Orr~scty10af{kv8?)!Ri&6jQTsZexPt0qrclbpI);t?5?eiZ!1P5g*7B zp`WHmbJ7my&mFu(5R2Xo4N1x+`?a9++Te_jkK@rNXCzz5g2+Ngc2`qOv0*qF*<6JJiJ8avW{&TIWg@9w*yN-@EQUz03RnjF_#;Cv z-p?DIaUr}jXOHn3V_2=3rbAIX$t}mz>Oq-Bagf$6fH>>p=J!0p3rWyb08H1kp{=!R zFS_p4TQNk5ljR`qDN_~ddg`}<;R=+$Orzp+rtHdI4rcUc2gVvRGa{5!(c)b~Oo;#C_I? z(KLN+rRg}y{F;&}{0Xg8wt+!pjAg8x7P`y|CZ#JyKCP;Jg+}6cZN@U%Dt5M`<^_Dc zAGNP_6VdpV6)PT|havr5_TO0?w1MTIYdJUvKD>7pB^b(F_HkF_lg1+mN zsM;4ei2K8S5zZy$5rLJmBW+BAHaqwXCe`+{5!JdL-6c9~Ni;)Kv*AxOD8|c2Oo|Mu zJJg6eZHAai`i}(EPiI`0(7Pmo@U<>&O*V_yDJ_y#t`k?qc1iq{MF?^zmhzo{#Hymc204%#13`+`R z4v_`LbGM#GI=USW;*3OE`5sKU`ZTJ$(M?lMT_-%~=4%_CHiW81rm>KB0-Ov+1D18M z>HG~qhsl9D=Tn4uk#Onvx1L^(%FJ%wW^p64>?zCEdLCGe&UMbnAj zsX(G2ib6z%BoabN!3x}C+_l{&w|#1r7_>_EO&C_9F2|GGV!@@A$FTe`vue8;nG|BZ zvFSn?)rP$l{QtcITuvBp9OV|v`9NN8~t^`gL*49UWtVr z`9^agMyj~3Wy?O7zAaU?(S8Vs9bYWpED$ML`IKn4885+BouWwS2vyAvdv&9LEaj5`Ej6YD z>@&)cY(nY#(V;tGME<0XPuZF7Uq5)M@9M4u3XwivZAH<+ln~|%X&AFQoNkIE%X!Wf zu83H7IEcB1bt&qHe&sG<$qyjH?yj0VXA(~rDiJ31aRcSAk7X=5uf6^cU=c>xAaL@14d!rat$UP8unVg$U;jHP-WU23>`F zyWe-a(bbG0D+#Tw0X3|;#rK;^!nG1FpMOY8AeuZR~N%qcG<8w=ZY&0(c2T$Wb6 z^T(T-Pu0$)s@YDYF3Gn8j6;w|r4#tkAIPvYLC43R5_WN-On6u_M9DV<6J=6S#V>!Z zvkZR3{Jzm6=wPGGh^T z$+jo`8HdsssCJTz1?8OHeUGl)S>6^t`uR=Kl0bQ7kIb>?@_Rbnjpx;+P#4LrcNbIN zm)*UVG39oWEZ0h!oSf2znR#xvyOkX3SPTeF@XVODpDP-~urtVC{lv&Fy8?a@5~EXO zG3gbmA@;pB^i@h-LwmRG-cy@$N{vl&VzLnsK}gqmYXO6ZPNmy}+u6wkV?6mdSCdR$ zmOw0*%lgmea-H_%Dx=@6uC{MR#~t3ZGx*_zG<$^J%`+)x-v`=$T~Qn@Ebz&#AsDwg z2~(AcvNysxTieSEWtzu+kx5Rk^*+)*$7-v(P$4pyP%`3);cSR)Zb-n7d)oJz{fsQ1D})X4Zcf(G9sF?p@QG)d=eG8a)Dq}h_#WB066enlt5)YB z9MgzjJkg=4kbMhAVgFF7GG)GekSDj?r5~voy05C;y|GpPQ)c*`6Xr-aD>wPb+jB8> zdlw{TWBIzOi-UfOY=|)~a_~6;c@3%}f<{-5_ja~+RWGgA2T7QlT-Oe=)P*X70b%A>mhU%hF({^d2gkp3$Ppk6Xy4?I9T}1^8w!i%7e~wXw`<(ReQ&>z{0v`6 zQz^)b*L{m<*!rx5u3f9gUylC`kc-S=vrK|&vxR!orn{Y<4VLEPiB#_d0x@(!QWO1? znK>GX1S8s;qm^l7l(as>p2sf>Nc4MB2Ykk|2LdCrlJ-r$OvV)&x#1V(p1O&-3+PI$ z6ddQO^n6vmyo~F?1E!}nD!%8#(5yPXebut1lFdnVR2q`6<{2S6zxQL>AS6KSC&6BX zjhRpt8B88~BF_KaVlyelxi+&gH3t=M8`$!%+34MNMGlxZYJ$wxo)8lK9>_l8eKBn&|zA}m+BmMKEp^X=wK7I05 zcIAEWTZHHhZ3&Zv=r4Xap{sgG?|-<=b2*pF+jE+Nvyql4@2c}`8-&$swbE~-1uuRa z)$qvhIp+=uG$NZ5W(~ioOfs9+aYE)a4aAID&twoV6~4}ocShjNrV&TsU#6a_#j0jw zZM?y&yPGIj-Npvz!E*CZ6z!>)Z;yWOlDlk}ef@sQ@ zg|Lh`*sp2BCHp?vMDne!nRf|Qcpx%c97((WN7VK2J@18dzKU4rO=O2bNz7FO$Ph!& z94{<8RfD2I96_5^@|TCgC>~-FqcI;PVAZcBbE5*>!UjK|?#ag^0+Kvlqx_9ZD8(qOmrT8;WRi=UKIB&&2{zTrI_XX;nRU zbFsb`@70)FzMk@_QISckbr6c~TZ#=aBVgxhaI-BDyGED_3mDu>VQe~QTbMpX4_S5? z16)yO^LE`(8e%r=Vzm0b-h_kCK*pQk%;|UM>j9>HtseAc3He~QzE4fY?`GR@>M)OM zR&H3+ljKj*GlL@HL-8m*G zLCy>r#z;T;$Sd^bMUf1ft*Dh*(Jz{poqU#Y#`Q|2(jA@XibpYL$5i(GF1@7mi8?DC z-b2!nRMM_r)oB&hJB406=PbETh>wTC(6f;b--G)O+5!{~xx8ZHs6KL4(c;fu<-ki;e z+Q_2LV(r4T;3#%De~KJsKWG52uz!;H+tLBDP)QW|YY!hM`I<+iwRoKR#gZWN4uL4= z^30j)kBxERfI!*h)rQH@_?s9~`Zv`mlfbq>i7JX9mB$PHA1fWUwi-7Gx6@9Iw8+I| ziELCFDT#5atJmAo3+r76(%!ma$_(Va03f7$xoF#Mgn%1u@Jqto$>q}-eQz97W)GdT zb~aa4U^tA|j(xI9x!4jMCc2$|&+fcLaIO<1@0wNLSBLJ(AD-bT#HjR5S;_LPIA5+|7x8>{Nhn^4}<)}7%K@9|Lxp^$S#t~1Qp$MuMBu;i{CAX*9mzwf< zG^U>#{>VO~zqAEChE0(b?#|@SR<+oOrMuzfe6oh8Bl6Trmm(2b+)+|PtpD|cgb{Aq zmp~^oy7%6wI#%8w1M6;Lit(`wapHA3DzTlN&dNo9nv(;cCvTK#Rt#&H(kIbEBhQY3 z*-~#kIBjxo2+t#=mjz}X!Kxo$)U-}MQBQO8?-CG2&O#Ne1^26$Gu-E`8JQ>sPi+SL zOc2_w)?k$X{)V*hJZ@^Nag&9u?E}Fot5d`LH12iHi_6t-tz;Q_IIRX@2gV-62E(uP z49hzM3~g$R4g$HJNI|XHzOeI{+a+RH9}B|XTpr=rW3drec)oU~)aV@vnOg3Mi-AcO zPgtBkrj}A4^q*s}l^ZC*M@Q*KZw$8jUQNpUttFT`3(Kc_#xl7x%_$NIxj*d+%>C?t=jd)w-Xe!in?u;D|18wOj3?w4^xUq`Z_UDS{ zDI-N&8ytV7hDAf8?l8^ItVCPZvl=d54@>PJPq6TkBdqUbGo>AG6RxG1Z_;`TAxF=BsgE|E%zGKL$;HXFu1P^Tr+%@P2H(vPt z&1wD)`N!84FdCIZDnkZz-%6X#yrdr#*MN4HBD@zwddoJ}a)IeKU#bFd+NGjcc}MQw z*mPjKF&i(`;$QE~R$GtwA0rK2u8xuI+hj+NryvS;V@A8jO?v|Zy+|m8m9`tsy3P08 z$(HQ4JN#_+95F^|CNBj&YYVVzWXUBRDI$5H+>hHFw8&uC}->O@DyW(-O zu%v1$1;4BmJxGWV1vPM~S$Ap5uU>8^f{m1d!dZ7dwP*-6nX@)n%OlobB(T5z%H+Ld zRVk@+%RiSh7Nw0Q4*5Yx#Hk;=r$Ap5UtkQpO8nutFB$SQ_~L~#9ow8!uZsN5_>ez_ z(qV#(YQ0;4v`LbqPRj?M+$1Cn*=PdAR!obkc3e?;+Vh4%1#MEuqqpU>Q#RE$h1h4tX0vdVH<-ZBZvI*};Plk|i1sg!&q5PRT6|IW zwB-}TcYWb@<&u3fFP!uy^++eh_sE^ZJnL}>A>-5?Ktcr6ZSnE1job!F_dmft7dOE- z*hR5aIn%+5wO%xD?+c1(nn+*Fj*e7V!?(g__x>WtmN zu?SC(Xt_l>w4;x@&r*Rn=dZ&c>f}Ag5$J}`f_W+IbBw_*GFEc z3FnoP_!#?|CuT*4sxi?I6K(zMh&u;nw4W{gEQ7Ke4K;%2T|No2t5Fx?Yd77my5cY8 zwyDija4{6#b?Sd$q$G&D}|PvIvo!_B}VwiNEDND z!-SU~4K$0-Jl1t}EkHurC=47#o{F+Szwl9Id>$I0OkS#%Ffe~{x~QfLJ8CIY_H**^ zD?W@#&U#~=q;!}8*)M4sa@1z&9>mP1u4ofaoyMYlPl0bdLCLS$2h{Fgi*mJq#Jt#c zd7nA4r!Cg<&A}BD{NlPwECmwWF35B#eq`ciFS1~^=uSzNlbc%G@(v7DPyq{mr#vKA z=A=pS=dPo}$3y9`hM;c|9~O}>k8b^_!2h(6{G!znH8ZEn{{yFWW)aJ!NuqG^;ERtV z>m=h0UHdW4tW45g2KD(8RT}8fDmbPJU`%gr86%wPHaAGlq`Vo|3G4cba$M^flOBOW t8<}Jz!|#_ZkY4|}B+^zO6C*_iV5_G z4wH+O?W;8e1W?e$7Gz*y1SZipGBLB}Cp&5BAR{p|5B4KA@WnyJCVCG~a;b3E8<6z}vWoIB^WoG6EFmnOem>F5Pd6_wQ*;z>bdXWhr zk^t=ujd>NtB>r;u+T$lP1%qvQ0RU%bXC`MhCL4Pb01FQf4}h5!z{<+_s=?^sY7GXt zFj_m1{{itQhM19qfxVe6*v!V7t<`Q81KR$)uJ)8 z<3GT_W?)OBe}MX%zD}}+Kan@ zL5B8!TLk}8E@}pL{nt{(*ShgLJN^Z_f`N_UYm)w%zW)n2*#2LED;rs=+StEVkpIU|C{tTjekzwKP>;(+4%F`Ygu`%Xn?<$v_Gfmuj2E6*8FDc{|x;v;Qu)J zw;28pUH_r$-(uk3QvOeN{fDl9i-CVj`9In9|BWuh|K1cCS-;FiZ zueu4MDe(y*A>4OVZ>Y-rW#r_ReYeWz@IJ_n2Ji9dBK+i(huCSg)%B*`y<<1yr0J&h zTb>XJgfPq6}TMP~I$Pb>(M;lETrs^$O2!xX`40Y6;vD5ZR;;8CP;?T1w70&B%UD zaRV4iVx=0D2=P7)0Y)Fan_wCQE+P%4PMXG`iZ+q%dxxa)`f?~j0c;Xm@^LK$bcF!0 z-|AtYWU1drf+~&r=J5&`ZHMFquc}4UpWOm0W0wKUj(%G3cLJSRotI(Vqd)N9D(8Ko zZCkGXnI*T!OQFz9ySC>*ZX`x^A64IdcuH#I0jcqgSW;g)Y<6~{yk2w3HOgfl%izoT zii(w%Wae^5Q*QkQq0<;3r8$5WbE_rn7D|Wy6I1fCU@o)g>X_L9SH%~bw#K?I{7y^r ztXOh*P|`JseS+FB-&TV!WdQy~6qo{$I86>aT>v=P6*v4=a|BiZap=DD3l8~I97|yv z*Y-if^`SL$i2!a=dP8Qt9l25yvz6cD3(N!K=36$WWh{C(OOekqa_zys&d=cfK2L=r zJlp+nV&U{mB!ql=#7rcCWa!}nnA^;DlE}&ELCMaO;PU0DgelM-ua18f=CbXOSP0vm z?PLkYsQDp|ok8=?*whkO&V~uING|5Zs%qB+KTdTurO@3<@w3b`1ngSI!loVvPYlZy zhABp(>FPJ)0RayLXk%Uf3|v)c3>G)s6lfh{Eo+G-bw6JHAbv;x?)KM~A|mfV2Wmw0x9uDI&tpT%7_A`k?!Mo}FX${mA#X zIQ%S*m!uZM;xT6L_IO*dn^ff_Z@Q>(zDBK2&{yVH-ZLBOvk-h_JjHv!Ii}oO`G!CH z)4+698Pd@YLm-&&7vD{fmA%MF1m(kqx~aN5nuaGQ#sI3n6kWGk6VluIXTA?Lid?l` z2?*0PgLi|@8g5vjV&RpebAlWt%`yiDt2eACqvb(>J>>R%MVd`a+sE2^oVqo)qx*}u zgE*P~E6b4)@ek`0QtKE>MWu>DmJUMw6{I&%cqS0ifu%9Mrjt%NkU>L*A<{Db?S?cF zjx!}LCu|8z+}H@vu(=jDoXQnTKAEM`%lOgXR=X=2?zNuYClPsb+Y2WVB_iHIs}>Ca zH58`bph7osQok?F56Zh(`?S;4^674zH(`YbH85FDvZn@h{Ua90!W}j3oNe+TuA3@S zxKh=x@mB0Ig>iy{W~~MX{p%$eXTPuPSLlU)&H|aX(}spzukS2UGjz({Rfx0Ja2`(h zbb@Zg{b9TmD5Je0%U9CFB2U5Db54wg%bKLJwItX9{ZbSgl&VEvh!{&ax(_e>-LFy@ zpqaeRKkmtdi|VWbM7AA9svXAgJdTvs-pvTLQsIUnH^<_5Ew`C|WD=j8ABj=og&aEo zYq2?#9&Ezt-9NUh0a{>f_%6*J!s3w5J7CC?d*$ybVB^z$n) zqa4d%fDCl-o~=382ql?dwdx?8nObp&SV9{Odm&N?P<}MjgH{r9)wr9na6|EA$Hs)R zCj(qDgYv6#a4vQkdn-~fj-)a|Ok%?$F?h7(s4sb7_ky>{srchR72@}0^!Xx z2w>;HrA1gcD~8&5bh2MbUQp)4sdkIoHD-+xDL&E#=Fh=~e!zI_T;3GhJZSTojKp+h zarv0#&oP{v0=%J?8lmf5D>Ia37?V(pz^i9mZp;w((P@Duwx0Jl7_)M5|K+4cAZJ)a zq3`{fJjkTD%e;Q0qSPSPd{FY|*5N4uTiQ1RHib{BsEH}!QSV09ITx9q=Q_H4RkI{d zCOHj10=o7NRG!u>d+^INc|qoH?wB&PRZ`4cyj;dY3nDCvyP56qM$3Oi@bQ6oSKd@p zqqPopbsKt2;77|jQW2%`!G*FQ_R<@jvNT^jN~TZD8}zoxvk|y?GVnQqD505fY`ZWG zQwbi-7fv|&Y8+}wzb3CAe=W+bFGh8R<%en?s^TekF&`*ytZqke9Yeud6~J%Fc0tk( z*7Q%FOim=_WPGy9=hm0mx}tZyTe;PUMQ=*HHb<4-wyXj{Ri4&=QZXEGO)PHZh`e^gW?L&nh8Xt} zlO>ypyT+)}KI-3}wt3{uqSCvs5R>HBHgn~&o2zN}!j`Qcor^Sb^szBF2e&3!AaHEg zUxd7`5zyWl?|Vi53Wv>MOq7$VC3?2DGeeH>g1AM{BTb`6bWG^3bK~ z<;x8zYZ0)ED0l+Jj&J%~D$#bhl&p6*m|F>Tx?WZlaiZ;1COSy^O%Qav#ZM*JRd;AS zE*tTOc8P+>A;~VIt}6AfT^IbcLJ}-1O(p^=);vC_TqiyZERb0{@5y8~NMhun z(tUa0y#z1LG0X-b!o`k^yAPh-~*YW4sz+cjPor>C?cBVHN3-Kb-};;7v{jKUBN4ubz0fy?6n&8M+H_)E;WQ z1iw2ZNs^HXk-OfDf(DKXn%Ip6?z1%=sC?tx^_-?ni%erACxxYByXCXh^T&{tFsJ!F zNFj1DOjPGh+GOWgmT_UhoD&n%93R09k&ArG*n+de>`7+4SJ}y$TxZJRW&(tpPlv|l zMFu*oRp!lH9GR85oTERCeIh2%oUGnz3n~Upx%^OovC0+LnQoLSiG;OhlKuflok?Bn zb%r=EcKnTI_->z19UR4~4;<$c=18Elu$DcOit)Uo;pTm?z06ErKROg$f48YOb64F> z5KL-Ny#BQM0v~UmIN$7wtn;|HiB@u_V0&_8?66~Ou)Z(a&>gl=D1AG(v2c5t*s(Se zt=mN{^Wr180!BB+>L>X;+&^i6n0aD+b9@HFw_#)2X_C&h8SU+j#I`We{xt77VVI~N zNt1XrfJn=RMk|vNyz2EN_#R8EW4X&;h6k0DHBxDy(=RhtHG1Y7chu;G&3>J%ItP^4wLm6;!TF)T;nHk`@ z`S{iDp`0tHupU`5N=5%=-5d9%x!6GH7?Q16<%b>O18FB3iVS>TkA20#U zqYjGNVJvc1!n*T|$PjFG;F~78U-4osiIoMAL9cB#$7UeXV3ZX#v^)CI;b!FOrYucF z&DSs-d3*7g&y`8e{;;FT;)B$5KJJoWgAP~o7ueb7M{1M~4NY;~kI1pNT zTymQ4ZV6;K#V+9~EBD%S3&AGt<)RyKd!JlpW~6XZpfvLlBv;hJgAy0*6f%8LvZ)3u z%Tc%(c5HTJ2y&i;cC&879r0r_6GI948Vnv9tW5QOh~+BeY?$&qc+#T~0RtGW@0tNz zoyOm?TO}wa{PS%%dyY{ePtc>^Z?m{@A3XSR<^T8^!^=sm|K4lM8a+(^9X2{DP`oqp z7YJZ1r0le@XLC6;WnVP%@O}OGGfc<>f|gcG z9t4ZfKqFjpl!%vX!Ub9^!9bR;^A6?&8My=N+1z3X>oP`P$)iaKirmJaU>@Z>>Nwid zo!+u^&gZrC!Woo}U(hMf@vT-Jvc~uBAMR1(SxU(JX>F65PBYipu*GZL@{(7r;5hgw z?P_N6glw$H%~u`}bpX;R*U>BEqk zr4?%rCiA8mlLK>*WrVc+Kt=!iI%lq&uU`+#B5kwAc{p{Uv)Nr)KYW0#hEYT5_#Q1D z{+_kvEN4TF^5V!m1y_9G+Y3pNY?`e~mH#wO6QJ?o;8JRWB+?``8o9rxJXia(Na)m2 zuz;%ePXqSQZhonjlx>3NX_P(%46c-u_G=ZZgDmUwvE z$L^IKh&v%~npz#95~?DxzxXa-J(%XpLObJp?Ix5pKj3mhncP(iQY<3q@~ARU^jN%B zJ7S+a+-_Q&yjqQ+i~M_QMB2|C5;I`Dt@Vltt8l)`=DB)u-nTiN08y{I0Kj*Lm&BA$IzJx140X+x^0caquRpfTJra z3i{r=#9U#1T)z#d3Msj@0!YtfAh}=%dES0ys?LX;nCXJE#PGtc%q39H6W_ZrUk|FnD#WcPL``uz3v6%qU}wH&hxn$H{U~J(YvDd9Fper1w2X!8CUcfsE?B zJ$uT0xG9+>s;Jw{`yc~5Vow{St2-$*-8qa-O7x(e!p)KW+D_biC&K#tepg@wG4c_u zMzkg&{PBep^VSYAd2R{T*NRp=Z9If#e!6+Nx^8`kyz)7}O~N*sN?_CzL)Ya4w8s!D z{K0exc0M1$DYb^?`o+DUj&`A9*R~U!EAPwdHuz1OAY4ZPuIfiC?F1`{PiRY@9jSe8 zVF*es`my1nNVwuDN3CgFtM!pm`QQE=C6M}&ruWNztc6(U(6NXN>S(xqGlFm+n2iVZ zqeO=ow0Io_%}=T@wr+PX>;=rghJf%`b`3Cd@RRl13l|f_E|U+udjU8=T(75R0;gzf1D3%RcgG_+|ZfN$%m3`^owVKO<6)4I)h7Y~IaL2LD!_iLhz2P24>8w((g8plQ)TTBiL}Xgol6V$ zn_CIzgcp>lmlDKXf=)Spfp*;#%53!=3p4wQAS~?(WgdbzyN-AZ!en#1S-d_b4HDJw z;~EK90dHrJHO~%`GIO<6bbXwWud;vn0zX0(#uKb2$ci&5pDZDm+R1^P;xuGca5S@RBtCQV< z-cI$CuxW(-oSs`AG|H^3`6tBtTc>t4@RZN!k=ujDLU1c-A{2m0-KVwA(6YoLU}@Kv zY)x2K^lgU6=5P!wf_#7@UjK_|8Rbt1)3_S=;N3Ui_C)@G{q4G~_dH29Q*_-kb?p7B zZyQ*jC>c?8B+{7#A5iE1X_(3A+6zCQyC+W%=K|mV|vd2@GoLp|Wj@4IcdMEpV-w%8;aI-b(t|T=>(O3|j)U?GoSa-LM z49phna6}60hH3PYCs}p8V{xJ(xTemxiop@8zm`v5Tvx;xfZh`6UqX z<=lYV^4cJqFJpcm60xJyI2;Df<1cFR`Q9%d>-}Q>5QHQW@gdY?j%~o~n*r3@#&c`p zRYIS(5l9nN=pV4iT227Vpk5R)*EQixMH~h$?(bU(?%Qoo4;r5YcH`YVVfa5y^!3O% zxdyIxgdCqDmr`W7N%^1PoS)BH)q!eLW-snIT)SosS(^>&&am2r6XnwxULvNxmBN39 z(I^%478seLIT;ZO(~~r9a(o90ya`k#NSdl|gtW=6f3mC7xVsr0?4zL%tbt3E%;tw zR<)jhH5=8f8b9kCuy%KlN>UQ5NN_LMxHfi7Ss!53Vcj_QjJz^z74j)$(xnVjBR1O3 z+sxLj?C22?bs_nX0IU`JIujN?47={^U;;b9B)$_cPbDFuP8GE3$jGkFtF~xqs|jR& zQ-I9frUR{?>PK0$KS{7e0wsBS?G1FjMDeI)AhBPb$6OcA4-KWV6`Uk}zK~3SjzHDy zSQR7L9{6Hb8#EVK+0W&ykBvSSrscS?joR9qoc!k0-jA01!2(*4g5;vcP-nXPh9+t1 z*6z4H2+243QDt4ZZxf3Z5r49d6l?o7#SdwBYZgJ|U8;wu_0ygGbpumJ!NQb5(4wXB3IHYdRRgL1k=pGF$?=zaj@)O(bII$`#8s}& zX)*NGaPiv#c_K(HPXfK~jNe!oEtTzahUfFLX51t9m<^q=S6%#!d@#d4onh8<=(Jnm z@T0mC35g~XCw8^hN$NZt;0mCu$wmt)4ulNx&tcurSbB&oI$;Q#Qb81dL-d)5Te9PT zHtY+hK-Op@3f6HU zB$VXESvSnm-U>+0byfpWdATkmpxKx_mDP`&+I{ua{sGsb3@aaj=es4Qwv=lu#tN%V z?SNDYL2>4e%O6YPrH5Bg8XEB&?k9TV#euIRsFW0PUb8y)9?%=su^e0t z{Wf-=gEYSwGozQid@*W9u$}0aCOwIP-Zu&wA@BJ$tZx&&d7Lq5(z;Bf0C!Z^$aU_| zjCY8xn=Rv#?(rB)n@?zBRq8ggRD;V1T8hg|TG7iLwcQ$Qzc z5h5rK-L9tOI!|0MCj)5vsvAoo*8yjfx*r@p7u$k`o{y}{*fqVw~+Sbv|hi35MBOy+Q8Q~TA zkja?eRRS4luh*9}=O;@(t}#ga(l8)+JTTN1zu?Z_nKZ%{L$}&9MK?b8p|3v~v1-Zf zuH+Tm$=cJqjzB=VLVZfh7R$I1@MTWk6K%8wj9P$>$;s6vuqf><$0=CxEXLQJCDoB) z;@8i6@8rV7GPh{9RuPYY`=%-f8k9&3k1*OGvYqk;pT~t{p`#@Ix(xBO_V-A`<6=Pq zL^d|7;RUU&Yob%z?`Z|BM>;J(!W9sq9^vtPDZ{Y5*oE5<0k0{)KKYU0pqqCmG`Lsq z(I0~agTS2yYe6#)^&h}tSTSsP3FH?-1ADLAe#SDL&&+;E!?FWutINJqen8;qQMyz^ zonF8wdA8JS6~m@+_V7k^ z!hsX9wd(sDVi^tUe)kwtJCfy`&hz*0I4xw>R-WHySUh7CkdxITc?BaxZQtZq>hrO^ zfMO1mPYbD8RW^Gz63jjH0bQ)eGp;;Lha>zBOcIjOZ{qJ-C_PbTQTY_pv`rUQBig)O zAqls)X*4~!w0>po$)V*_T~eF37O#Spi)$ipT?FL#&u6fb>bx{7E2t~J<~!bGgna+J zIO{DVG4tJ-IOh?>=i1~PLVVNFwFXG2v(qCeawwXt|B*2GjEXxfWbVAE zeqTuH7KeFajX8U-q-%tJ_!z%eOUW)FL#un> z{9(F8mtY=0bYp%<>Q1EFr{pjPmH%?W4DR_#*>rn&$JVKEn_?wR9~??30jKfpS8!7i zvVMPYd`(>L6@GQ}LMt~V+*^r+wTUxhS$qMUhkJ%9u)5wk4GY^CV!Wy!Y%J^%MhS}y zi-J8=D5T~dGJakDf&S1hwsKmvX|ll3$yl*U{MwDN3AW1Ta7GPqV;v5=A#?NS`?8f9 z{8U<6wT2TKMK?NIs~lYN9o;vjDlo5K&}AJN@=AbvODF4ZzwGn=X>+VNy(<(G zE*rtL1X6e9s%~Gn>3lioT1Bot(s#`KpXDNz8iANp2*a%s1^9e1F*yMe2dKwM*Dgl$ zq*M8%RIp^Qv-C=yjXdp8GAP&v>i)pJWI(zALUJ^ISTyHxf01YKos8b;$StSrEDgyN zHE7~7ea(T_?sU75nb^ud6?wWv|JSA`)yPvUS9zF>gzM3BQofDwT4|<7$j4 zK$`dP{ps1xwdA2g{9|jo)+k21pGCjHL4NecEw*k>@l4QAk(^eMft=`pxJC`p10BBY z^gQblkN+h74!^E(`@rn4euYG2?JK9}K|wriMa#0<%@O~$`KlIt3zSQcV|;XK2|VXo;usQi5rB&Su-5K|+z03=c}j zJ=p=5M-)h~nNWY{g-%J)4QC=diWf~szSrJG`4gIrZ1IFHqtM<4>S*t$k&^t97IU(~&G=@|Tgs;v7z1y3?>q1ua z!_H-WJw|OfrN27)Dtt|?9m)F5Mbzoo0(_7dmznCCC(OjD{X|QO;Zqrt_iKXL}K9$?pbXX{I2Bj)a@XBb=!9 zb-ez}8J)G?zr27sc@!(?vz6C+VYU{=9yO)5sF_py!(9acw87nugmqX`> z{)82FVhvF$&gY|>i6T>w!(z2orZ1pv6v1uVWo^fz`oa|Xy%sCcH^DnRjPj2z&x$O@ z_w+d0Ljx-)EM7@U{yu^}$?m}diBTPc)j5Ujxrzih$32Sd1BO z6Qz7zXN{LgDua8MLp!``P(%^!&khb4eTG7aK0zf_fej*En5n*4Oqw|5_h$#;IkVC& zW;Bv*!)!hg?Dd?jq3F!ugm-vVL2*#?ezIil{G|J~Clp#O!@B!(g}|+>@7lB+E+!yI zELZ64aU_VzkOsiYjWc00z_ZerzQvX~^6A@@^#M85gf|I$k?w|p<4dc`fm+;n z+@9|@Ex6B$tR76*PA^YQIYT(ZVS42jy`;uy77W$g6vn_l>VY9Bz7AqDRyLvx8sX+) zf4ewd537;RbtV=s9p&JGw4k-vMNHpW%7E7q93YeS$Lbys>5!x-DeNF;d=5n#^=!IN4we$X6&inSAnEW zdx%PNh3AQ1*Va!o%l3U0sr`X+(Bk?~;05)n*7o;;^cbL)xj;`8LKGLZsNLN+`igaK z8&!q|#Lt}4zWZl#vOn1KNthr}XA?F`p^^6DxUId)iirks+;NWgdjh!W5z~2N*o@J% za3L>M=0H0mCwUL^P<6k)1XFRmixWUK$9%di)4Z;{fOMNk>Q#h9_cIYZv|!i>t3e|E zzGsZyo0(h9#ZG!~20`-d1x2NKnNk=RN&(G+ybqJq5qW^|^n(AZBG zw92hl7+7|85$!Z~TZ!Lu3}XvMayqH4x5KsDJe}8phs7MB3|~ru5wf&Tk4zHUzZ5lC z0QYH$$0shwLmVGsygt#wjy6HM6M4f_2*A~q?$6)+_^M#iD{2iJ#_OhVbP~WQjZ~|G zkS{w1Uxm0FN$8STtN#ttnc@K*drz!&03mY{#kCgx01Fv*wHX-urwi0?0cWc zrxr&mF`NP^h0lWlm*oW~@a^RXKF1y35Alr&VmOrNt4JlAT-4{`5e&Z*Ya|%1ssvi| zi1rgAHT8rTv%0x1^IKBLXbNeXp(%~@5S@s18FwnLNL(FU$juiy4|8%wyznKi0 z28;up&nIF-hDH&R?z223s|7o|#V}yU+mP+cVK0Z%Ci3r402qihF}wvZaM`zPo4<&Z zcd6Gzjh66riEXwEi?iXCXa;XS8srTOjnP@=?Zbhn-mjfFssU0f5z44b$7DT zC}iN}!oDNom-)8Rt0j+S#8xu4n@ga4u4LA!88?x7jT~)b7aihl%r5HlB#9eWi{?f! zt$H9;i~_?A3W7L=%J)h+{_tVYOaMXXZykDP4m$S$R@HAiS7%sD{bh0z^XG$FO^_4>-@+jB}=y5j6iLyHu^=ohD-3c5SZnisj z9Ot`N@3FQXw*U2c)&F^@^?x32y^BtGq3(p}$ZB`PNC;B?<4=py;&NgYB6@!R1tZ;Z AX#fBK literal 0 HcmV?d00001 diff --git a/lms/templates/feed.rss b/lms/templates/feed.rss index fe4b620709..56ca08182e 100644 --- a/lms/templates/feed.rss +++ b/lms/templates/feed.rss @@ -13,7 +13,8 @@ 2012-10-14T14:00:00-07:00 The University of Texas System joins edX - <img src="${static.url('images/press/uts-seal_109x84.jpg')}" /> + <img src="${static.url('images/press/uts-seal_109x84.jpg')}" /> + <p>nine universities and six health institutions</p> diff --git a/lms/templates/index.html b/lms/templates/index.html index 96af61aad8..b1d9925416 100644 --- a/lms/templates/index.html +++ b/lms/templates/index.html @@ -67,7 +67,7 @@
  • - +
    UTx
    diff --git a/lms/templates/university_profile/utx.html b/lms/templates/university_profile/utx.html index 756c9dc62b..b9378f6ce3 100644 --- a/lms/templates/university_profile/utx.html +++ b/lms/templates/university_profile/utx.html @@ -8,7 +8,7 @@
  • -
    -

    What are "X Universities"?

    -

    Harvard, MIT and UC Berkeley, as the first universities whose courses are delivered on the edX website, are "X Universities." The three institutions will work collaboratively to establish the "X University" Consortium, whose membership will expand to include additional "X Universities" as soon as possible. Each member of the consortium will offer courses on the edX platform as an "X University." The gathering of many universities’ educational content together on one site will enable learners worldwide to access the course content of any participating university from a single website, and to use a set of online educational tools shared by all participating universities.

    -
    -
    -

    Why is UC Berkeley joining edX?

    -

    Like Harvard and MIT, UC Berkeley seeks to transform education in quality, efficiency and scale through technology and research, for the benefit of campus-based students and the global community of online learners.

    -

    UC Berkeley also shares the edX commitment to the not-for-profit and open-platform model as a way to enhance human fulfillment worldwide.

    -
    -
    -

    What will UC Berkeley's direct participation entail?

    -

    UC Berkeley will begin by offering two courses on edX in Fall 2012, and will collaborate on the development of the technology platform. We will explore, experiment and innovate together.

    -

    UC Berkeley will also serve as the inaugural chair of the "X University" Consortium for an initial 5 year period. As Chair, UC Berkeley will participate on the edX Board on behalf of the X Universities.

    -

    Why is The University of Texas System joining edX?

    Joining edX not only allows UT faculty to showcase their work on a global stage, but also provides UT students the opportunity to take classes from their choice of UT institutions, as well as MIT, Harvard, UC Berkeley and future “X” Universities.

    From 57f49353d8cad3cbce25730272fd122ab91eb005 Mon Sep 17 00:00:00 2001 From: kimth Date: Mon, 15 Oct 2012 22:09:19 +0000 Subject: [PATCH 55/65] Fix CR showanswer template --- common/lib/capa/capa/templates/textbox.html | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/common/lib/capa/capa/templates/textbox.html b/common/lib/capa/capa/templates/textbox.html index 91aa6d41c8..6e44712d9f 100644 --- a/common/lib/capa/capa/templates/textbox.html +++ b/common/lib/capa/capa/templates/textbox.html @@ -5,8 +5,6 @@ % endif >${value|h} - -
    % if state == 'unsubmitted': Unanswered @@ -26,6 +24,8 @@

    ${state}

    + +
    ${msg|n}
    From 24d5c9162c8bceddca1b6b19bf3af1c42f0f7e61 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Andr=C3=A9s=20Rocha?= Date: Tue, 21 Aug 2012 10:20:26 -0400 Subject: [PATCH 56/65] Created Django App with commands to import software license numbers per class. --- lms/djangoapps/licenses/__init__.py | 0 .../licenses/management/__init__.py | 0 .../licenses/management/commands/__init__.py | 0 .../management/commands/import_serials.py | 77 +++++++++++++++++++ lms/djangoapps/licenses/models.py | 19 +++++ lms/djangoapps/licenses/views.py | 0 lms/envs/common.py | 1 + 7 files changed, 97 insertions(+) create mode 100644 lms/djangoapps/licenses/__init__.py create mode 100644 lms/djangoapps/licenses/management/__init__.py create mode 100644 lms/djangoapps/licenses/management/commands/__init__.py create mode 100644 lms/djangoapps/licenses/management/commands/import_serials.py create mode 100644 lms/djangoapps/licenses/models.py create mode 100644 lms/djangoapps/licenses/views.py diff --git a/lms/djangoapps/licenses/__init__.py b/lms/djangoapps/licenses/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/lms/djangoapps/licenses/management/__init__.py b/lms/djangoapps/licenses/management/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/lms/djangoapps/licenses/management/commands/__init__.py b/lms/djangoapps/licenses/management/commands/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/lms/djangoapps/licenses/management/commands/import_serials.py b/lms/djangoapps/licenses/management/commands/import_serials.py new file mode 100644 index 0000000000..c65cc356f9 --- /dev/null +++ b/lms/djangoapps/licenses/management/commands/import_serials.py @@ -0,0 +1,77 @@ +import os.path + +from optparse import make_option + +from django.utils.html import escape +from django.core.management.base import BaseCommand, CommandError + +from xmodule.modulestore.django import modulestore + +from licenses.models import Software, StudentLicense + + +class Command(BaseCommand): + help = """Imports serial numbers for software used in a course. + + Usage: import_serials course_id software_id serial_file + + serial_file is a text file that list one available serial number per line. + + Example: + import_serials.py MITx/6.002x/2012_Fall matlab /tmp/matlab-serials.txt + """ + + args = "course_id software_id serial_file" + + def handle(self, *args, **options): + """ + """ + course_id, software_name, filename = self._parse_arguments(args) + + software = self._find_software(course_id, software_name) + + self._import_serials(software, filename) + + def _parse_arguments(self, args): + if len(args) != 3: + raise CommandError("Incorrect number of arguments") + + course_id = args[0] + courses = modulestore().get_courses() + known_course_ids = set(c.id for c in courses) + + if course_id not in known_course_ids: + raise CommandError("Unknown course_id") + + software_name = escape(args[1].lower()) + + filename = os.path.abspath(args[2]) + if not os.path.exists(filename): + raise CommandError("Cannot find filename {0}".format(filename)) + + return course_id, software_name, filename + + def _find_software(self, course_id, software_name): + try: + software = Software.objects.get(course_id=course_id, name=software_name) + except Software.DoesNotExist: + software = Software(name=software_name, course_id=course_id) + software.save() + + return software + + def _import_serials(self, software, filename): + print "Importing serial numbers for {0} {1}".format( + software.name, software.course_id) + + known_serials = set(l.serial for l in StudentLicense.objects.filter(software=software)) + + count = 0 + serials = list(l.strip() for l in open(filename)) + for s in serials: + if s not in known_serials: + license = StudentLicense(software=software, serial=s) + license.save() + count += 1 + + print "{0} new serial numbers imported.".format(count) diff --git a/lms/djangoapps/licenses/models.py b/lms/djangoapps/licenses/models.py new file mode 100644 index 0000000000..61f270c163 --- /dev/null +++ b/lms/djangoapps/licenses/models.py @@ -0,0 +1,19 @@ +""" +""" + +from django.db import models + +from student.models import User + + +class Software(models.Model): + name = models.CharField(max_length=255) + full_name = models.CharField(max_length=255) + url = models.CharField(max_length=255) + course_id = models.CharField(max_length=255) + + +class StudentLicense(models.Model): + software = models.ForeignKey(Software, db_index=True) + serial = models.CharField(max_length=255) + user = models.ForeignKey(User, null=True, blank=True) diff --git a/lms/djangoapps/licenses/views.py b/lms/djangoapps/licenses/views.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/lms/envs/common.py b/lms/envs/common.py index a927da8e98..9b98e4ecfd 100644 --- a/lms/envs/common.py +++ b/lms/envs/common.py @@ -626,6 +626,7 @@ INSTALLED_APPS = ( 'certificates', 'instructor', 'psychometrics', + 'licenses', #For the wiki 'wiki', # The new django-wiki from benjaoming From ed88708d716123acac5d8f9ca7304b79d363852a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Andr=C3=A9s=20Rocha?= Date: Tue, 21 Aug 2012 19:04:11 -0400 Subject: [PATCH 57/65] Renamed models and commands for importing serial numbers. --- ...rt_serials.py => import_serial_numbers.py} | 44 ++++++++----------- lms/djangoapps/licenses/models.py | 13 +++--- 2 files changed, 25 insertions(+), 32 deletions(-) rename lms/djangoapps/licenses/management/commands/{import_serials.py => import_serial_numbers.py} (54%) diff --git a/lms/djangoapps/licenses/management/commands/import_serials.py b/lms/djangoapps/licenses/management/commands/import_serial_numbers.py similarity index 54% rename from lms/djangoapps/licenses/management/commands/import_serials.py rename to lms/djangoapps/licenses/management/commands/import_serial_numbers.py index c65cc356f9..846327966d 100644 --- a/lms/djangoapps/licenses/management/commands/import_serials.py +++ b/lms/djangoapps/licenses/management/commands/import_serial_numbers.py @@ -7,20 +7,18 @@ from django.core.management.base import BaseCommand, CommandError from xmodule.modulestore.django import modulestore -from licenses.models import Software, StudentLicense - +from licenses.models import CourseSoftware, UserLicense class Command(BaseCommand): help = """Imports serial numbers for software used in a course. - Usage: import_serials course_id software_id serial_file + Usage: import_serial_numbers serial_file is a text file that list one available serial number per line. Example: - import_serials.py MITx/6.002x/2012_Fall matlab /tmp/matlab-serials.txt + django-admin.py import_serial_numbers MITx/6.002x/2012_Fall matlab /tmp/matlab-serials.txt """ - args = "course_id software_id serial_file" def handle(self, *args, **options): @@ -28,8 +26,8 @@ class Command(BaseCommand): """ course_id, software_name, filename = self._parse_arguments(args) - software = self._find_software(course_id, software_name) - + software, _ = CourseSoftware.objects.get_or_create(course_id=course_id, + name=software_name) self._import_serials(software, filename) def _parse_arguments(self, args): @@ -51,27 +49,21 @@ class Command(BaseCommand): return course_id, software_name, filename - def _find_software(self, course_id, software_name): - try: - software = Software.objects.get(course_id=course_id, name=software_name) - except Software.DoesNotExist: - software = Software(name=software_name, course_id=course_id) - software.save() - - return software def _import_serials(self, software, filename): - print "Importing serial numbers for {0} {1}".format( - software.name, software.course_id) + print "Importing serial numbers for {0}.".format(software) - known_serials = set(l.serial for l in StudentLicense.objects.filter(software=software)) + serials = set(unicode(l.strip()) for l in open(filename)) - count = 0 - serials = list(l.strip() for l in open(filename)) - for s in serials: - if s not in known_serials: - license = StudentLicense(software=software, serial=s) - license.save() - count += 1 + # remove serial numbers we already have + licenses = UserLicense.objects.filter(software=software) + known_serials = set(l.serial for l in licenses) + if known_serials: + serials = serials.difference(known_serials) - print "{0} new serial numbers imported.".format(count) + # add serial numbers them to the database + for serial in serials: + license = UserLicense(software=software, serial=serial) + license.save() + + print "{0} new serial numbers imported.".format(len(serials)) diff --git a/lms/djangoapps/licenses/models.py b/lms/djangoapps/licenses/models.py index 61f270c163..78da5d14cb 100644 --- a/lms/djangoapps/licenses/models.py +++ b/lms/djangoapps/licenses/models.py @@ -1,19 +1,20 @@ """ """ - from django.db import models - from student.models import User -class Software(models.Model): +class CourseSoftware(models.Model): name = models.CharField(max_length=255) full_name = models.CharField(max_length=255) url = models.CharField(max_length=255) course_id = models.CharField(max_length=255) + def __unicode__(self): + return u'{0} for {1}'.format(self.name, self.course_id) -class StudentLicense(models.Model): - software = models.ForeignKey(Software, db_index=True) + +class UserLicense(models.Model): + software = models.ForeignKey(CourseSoftware, db_index=True) + user = models.ForeignKey(User, null=True) serial = models.CharField(max_length=255) - user = models.ForeignKey(User, null=True, blank=True) From b2de8199b7670c795c9d91012ee9085156affe50 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Andr=C3=A9s=20Rocha?= Date: Tue, 21 Aug 2012 19:04:53 -0400 Subject: [PATCH 58/65] Added licenses view helper functions. --- lms/djangoapps/licenses/views.py | 60 ++++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) diff --git a/lms/djangoapps/licenses/views.py b/lms/djangoapps/licenses/views.py index e69de29bb2..b4ab4ea909 100644 --- a/lms/djangoapps/licenses/views.py +++ b/lms/djangoapps/licenses/views.py @@ -0,0 +1,60 @@ +import logging +from itertools import groupby +from collections import Iterable + +from django.db.models import Q + +from models import CourseSoftware, UserLicense + +log = logging.getLogger("mitx.licenses") + + +def get_or_create_courses_licenses(user, courses): + user_licenses = get_courses_licenses(user, courses) + + for software, license in user_licenses.iteritems(): + if license is None: + user_licenses[software] = get_or_create_user_license(user, software) + + log.info(user_licenses) + + return user_licenses + + +def get_courses_licenses(user, courses): + course_ids = set(course.id for course in courses) + all_software = CourseSoftware.objects.filter(course_id__in=course_ids) + + user_licenses = dict.fromkeys(all_software, None) + + assigned_licenses = UserLicense.objects.filter(software__in=all_software, user=user) + assigned_by_software = {lic.software:lic for lic in assigned_licenses} + + for software, license in assigned_by_software.iteritems(): + user_licenses[software] = license + + return user_licenses + + +def get_or_create_user_license(user, software): + license = None + try: + # Find a licenses associated with the user or with no user + # associated. + query = (Q(user__isnull=True) | Q(user=user)) & Q(software=software) + + # TODO fix a race condition in this code when more than one + # user is getting a license assigned + + license = UserLicense.objects.filter(query)[0] + + if license.user is not user: + license.user = user + license.save() + + except IndexError: + # TODO look if someone has unenrolled from the class and already has a serial number + log.error('No serial numbers available for {0}', software) + + + return license From 516c41f342c54559cb1e82976083ee063cc6e16b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Andr=C3=A9s=20Rocha?= Date: Wed, 22 Aug 2012 11:53:04 -0400 Subject: [PATCH 59/65] Added html view of serial numbers. Refactored code. --- .../commands/import_serial_numbers.py | 10 +-- lms/djangoapps/licenses/models.py | 64 ++++++++++++++++- lms/djangoapps/licenses/views.py | 71 ++++++------------- lms/templates/licenses/serial_numbers.html | 10 +++ 4 files changed, 100 insertions(+), 55 deletions(-) create mode 100644 lms/templates/licenses/serial_numbers.html diff --git a/lms/djangoapps/licenses/management/commands/import_serial_numbers.py b/lms/djangoapps/licenses/management/commands/import_serial_numbers.py index 846327966d..465940ce20 100644 --- a/lms/djangoapps/licenses/management/commands/import_serial_numbers.py +++ b/lms/djangoapps/licenses/management/commands/import_serial_numbers.py @@ -9,15 +9,18 @@ from xmodule.modulestore.django import modulestore from licenses.models import CourseSoftware, UserLicense + class Command(BaseCommand): help = """Imports serial numbers for software used in a course. - Usage: import_serial_numbers + Usage: import_serial_numbers - serial_file is a text file that list one available serial number per line. + is a text file that list one available serial number per line. Example: - django-admin.py import_serial_numbers MITx/6.002x/2012_Fall matlab /tmp/matlab-serials.txt + + import_serial_numbers MITx/6.002x/2012_Fall matlab serials.txt + """ args = "course_id software_id serial_file" @@ -49,7 +52,6 @@ class Command(BaseCommand): return course_id, software_name, filename - def _import_serials(self, software, filename): print "Importing serial numbers for {0}.".format(software) diff --git a/lms/djangoapps/licenses/models.py b/lms/djangoapps/licenses/models.py index 78da5d14cb..929fba10ec 100644 --- a/lms/djangoapps/licenses/models.py +++ b/lms/djangoapps/licenses/models.py @@ -1,8 +1,11 @@ -""" -""" -from django.db import models +import logging + +from django.db import models, transaction + from student.models import User +log = logging.getLogger("mitx.licenses") + class CourseSoftware(models.Model): name = models.CharField(max_length=255) @@ -18,3 +21,58 @@ class UserLicense(models.Model): software = models.ForeignKey(CourseSoftware, db_index=True) user = models.ForeignKey(User, null=True) serial = models.CharField(max_length=255) + + +def get_courses_licenses(user, courses): + course_ids = set(course.id for course in courses) + all_software = CourseSoftware.objects.filter(course_id__in=course_ids) + + assigned_licenses = UserLicense.objects.filter(software__in=all_software, + user=user) + + licenses = dict.fromkeys(all_software, None) + for license in assigned_licenses: + licenses[license.software] = license + + log.info(assigned_licenses) + log.info(licenses) + + return licenses + + +def get_license(user, software): + try: + license = UserLicense.objects.get(user=user, software=software) + except UserLicense.DoesNotExist: + license = None + + return license + + +def get_or_create_license(user, software): + license = get_license(user, software) + if license is None: + license = _create_license(user, software) + + return license + + +def _create_license(user, software): + license = None + + try: + # find one license that has not been assigned, locking the + # table/rows with select_for_update to prevent race conditions + with transaction.commit_on_success(): + selected = UserLicense.objects.select_for_update() + license = selected.filter(user__isnull=True)[0] + license.user = user + license.save() + except IndexError: + # there are no free licenses + log.error('No serial numbers available for {0}', software) + license = None + # TODO [rocha]look if someone has unenrolled from the class + # and already has a serial number + + return license diff --git a/lms/djangoapps/licenses/views.py b/lms/djangoapps/licenses/views.py index b4ab4ea909..7cf8e6591e 100644 --- a/lms/djangoapps/licenses/views.py +++ b/lms/djangoapps/licenses/views.py @@ -1,60 +1,35 @@ import logging -from itertools import groupby -from collections import Iterable +from collections import namedtuple, defaultdict -from django.db.models import Q +from mitxmako.shortcuts import render_to_string + +from models import get_courses_licenses, get_or_create_license -from models import CourseSoftware, UserLicense log = logging.getLogger("mitx.licenses") -def get_or_create_courses_licenses(user, courses): - user_licenses = get_courses_licenses(user, courses) +License = namedtuple('License', 'software serial') - for software, license in user_licenses.iteritems(): + +def get_licenses_by_course(user, courses): + licenses = get_courses_licenses(user, courses) + licenses_by_course = defaultdict(list) + + # create missing licenses and group by course_id + for software, license in licenses.iteritems(): if license is None: - user_licenses[software] = get_or_create_user_license(user, software) + licenses[software] = get_or_create_license(user, software) - log.info(user_licenses) + course_id = software.course_id + serial = license.serial if license else None + licenses_by_course[course_id].append(License(software, serial)) - return user_licenses + # render elements + data_by_course = {} + for course_id, licenses in licenses_by_course.iteritems(): + context = {'licenses': licenses} + template = 'licenses/serial_numbers.html' + data_by_course[course_id] = render_to_string(template, context) - -def get_courses_licenses(user, courses): - course_ids = set(course.id for course in courses) - all_software = CourseSoftware.objects.filter(course_id__in=course_ids) - - user_licenses = dict.fromkeys(all_software, None) - - assigned_licenses = UserLicense.objects.filter(software__in=all_software, user=user) - assigned_by_software = {lic.software:lic for lic in assigned_licenses} - - for software, license in assigned_by_software.iteritems(): - user_licenses[software] = license - - return user_licenses - - -def get_or_create_user_license(user, software): - license = None - try: - # Find a licenses associated with the user or with no user - # associated. - query = (Q(user__isnull=True) | Q(user=user)) & Q(software=software) - - # TODO fix a race condition in this code when more than one - # user is getting a license assigned - - license = UserLicense.objects.filter(query)[0] - - if license.user is not user: - license.user = user - license.save() - - except IndexError: - # TODO look if someone has unenrolled from the class and already has a serial number - log.error('No serial numbers available for {0}', software) - - - return license + return data_by_course diff --git a/lms/templates/licenses/serial_numbers.html b/lms/templates/licenses/serial_numbers.html new file mode 100644 index 0000000000..18f0ff8a9b --- /dev/null +++ b/lms/templates/licenses/serial_numbers.html @@ -0,0 +1,10 @@ +
    +% for license in licenses: +
    ${license.software.name}:
    + % if license.serial: +
    ${license.serial}
    + % else: +
    None Available
    + % endif +% endfor +
    From 9cf8c02dc65615e4750b1181a60f8e1dbd5fd4bc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Andr=C3=A9s=20Rocha?= Date: Wed, 22 Aug 2012 14:23:36 -0400 Subject: [PATCH 60/65] Added import_serial_numbers command test. --- lms/djangoapps/licenses/tests.py | 85 ++++++++++++++++++++++++++++++++ 1 file changed, 85 insertions(+) create mode 100644 lms/djangoapps/licenses/tests.py diff --git a/lms/djangoapps/licenses/tests.py b/lms/djangoapps/licenses/tests.py new file mode 100644 index 0000000000..f06899d2de --- /dev/null +++ b/lms/djangoapps/licenses/tests.py @@ -0,0 +1,85 @@ +import logging +from uuid import uuid4 +from random import shuffle +from tempfile import NamedTemporaryFile + +from django.test import TestCase +from django.core.management import call_command + +from models import CourseSoftware, UserLicense + +COURSE_1 = 'MITx/6.002x/2012_Fall' + +SOFTWARE_1 = 'matlab' +SOFTWARE_2 = 'stata' + +log = logging.getLogger(__name__) + + +class CommandTest(TestCase): + def test_import_serial_numbers(self): + size = 20 + + log.debug('Adding one set of serials for {0}'.format(SOFTWARE_1)) + with generate_serials_file(size) as temp_file: + args = [COURSE_1, SOFTWARE_1, temp_file.name] + call_command('import_serial_numbers', *args) + + log.debug('Adding one set of serials for {0}'.format(SOFTWARE_2)) + with generate_serials_file(size) as temp_file: + args = [COURSE_1, SOFTWARE_2, temp_file.name] + call_command('import_serial_numbers', *args) + + log.debug('There should be only 2 course-software entries') + software_count = CourseSoftware.objects.all().count() + self.assertEqual(2, software_count) + + log.debug('We added two sets of {0} serials'.format(size)) + licenses_count = UserLicense.objects.all().count() + self.assertEqual(2 * size, licenses_count) + + log.debug('Adding more serial numbers to {0}'.format(SOFTWARE_1)) + with generate_serials_file(size) as temp_file: + args = [COURSE_1, SOFTWARE_1, temp_file.name] + call_command('import_serial_numbers', *args) + + log.debug('There should be still only 2 course-software entries') + software_count = CourseSoftware.objects.all().count() + self.assertEqual(2, software_count) + + log.debug('Now we should have 3 sets of 20 serials'.format(size)) + licenses_count = UserLicense.objects.all().count() + self.assertEqual(3 * size, licenses_count) + + cs = CourseSoftware.objects.get(pk=1) + + lics = UserLicense.objects.filter(software=cs)[:size] + known_serials = list(l.serial for l in lics) + known_serials.extend(generate_serials(10)) + + shuffle(known_serials) + + log.debug('Adding some new and old serials to {0}'.format(SOFTWARE_1)) + with NamedTemporaryFile() as f: + f.write('\n'.join(known_serials)) + f.flush() + args = [COURSE_1, SOFTWARE_1, f.name] + call_command('import_serial_numbers', *args) + + log.debug('Check if we added only the new ones') + licenses_count = UserLicense.objects.filter(software=cs).count() + self.assertEqual((2 * size) + 10, licenses_count) + + +def generate_serials(size=20): + return [str(uuid4()) for _ in range(size)] + + +def generate_serials_file(size=20): + serials = generate_serials(size) + + temp_file = NamedTemporaryFile() + temp_file.write('\n'.join(serials)) + temp_file.flush() + + return temp_file From 3808ff85a8f867be5ef4bd7359f6a4593f4a9707 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Andr=C3=A9s=20Rocha?= Date: Tue, 28 Aug 2012 11:46:12 -0400 Subject: [PATCH 61/65] Added django command to generate random serial numbers. --- .../commands/generate_serial_numbers.py | 65 +++++++++++++++++++ .../commands/import_serial_numbers.py | 1 - 2 files changed, 65 insertions(+), 1 deletion(-) create mode 100644 lms/djangoapps/licenses/management/commands/generate_serial_numbers.py diff --git a/lms/djangoapps/licenses/management/commands/generate_serial_numbers.py b/lms/djangoapps/licenses/management/commands/generate_serial_numbers.py new file mode 100644 index 0000000000..7c6b0d310e --- /dev/null +++ b/lms/djangoapps/licenses/management/commands/generate_serial_numbers.py @@ -0,0 +1,65 @@ +import os.path +from uuid import uuid4 +from optparse import make_option + +from django.utils.html import escape +from django.core.management.base import BaseCommand, CommandError + +from xmodule.modulestore.django import modulestore + +from licenses.models import CourseSoftware, UserLicense + + +class Command(BaseCommand): + help = """Generate random serial numbers for software used in a course. + + Usage: generate_serial_numbers + + is the number of numbers to generate. + + Example: + + import_serial_numbers MITx/6.002x/2012_Fall matlab 100 + + """ + args = "course_id software_id count" + + def handle(self, *args, **options): + """ + """ + course_id, software_name, count = self._parse_arguments(args) + + software, _ = CourseSoftware.objects.get_or_create(course_id=course_id, + name=software_name) + self._generate_serials(software, count) + + def _parse_arguments(self, args): + if len(args) != 3: + raise CommandError("Incorrect number of arguments") + + course_id = args[0] + courses = modulestore().get_courses() + known_course_ids = set(c.id for c in courses) + + if course_id not in known_course_ids: + raise CommandError("Unknown course_id") + + software_name = escape(args[1].lower()) + + try: + count = int(args[2]) + except ValueError: + raise CommandError("Invalid argument.") + + return course_id, software_name, count + + def _generate_serials(self, software, count): + print "Generating {0} serials".format(count) + + # add serial numbers them to the database + for _ in xrange(count): + serial = str(uuid4()) + license = UserLicense(software=software, serial=serial) + license.save() + + print "{0} new serial numbers generated.".format(count) diff --git a/lms/djangoapps/licenses/management/commands/import_serial_numbers.py b/lms/djangoapps/licenses/management/commands/import_serial_numbers.py index 465940ce20..a3a8c0bad1 100644 --- a/lms/djangoapps/licenses/management/commands/import_serial_numbers.py +++ b/lms/djangoapps/licenses/management/commands/import_serial_numbers.py @@ -1,5 +1,4 @@ import os.path - from optparse import make_option from django.utils.html import escape From e9ec63e4b6488ad278edfa98f4566d861da796a2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Andr=C3=A9s=20Rocha?= Date: Mon, 15 Oct 2012 18:45:36 -0400 Subject: [PATCH 62/65] Add ajax endpoint to retrieve and assign course software licenses --- lms/djangoapps/licenses/models.py | 2 +- lms/djangoapps/licenses/views.py | 53 ++++++++++++++++++++++++++++++- lms/urls.py | 8 +++++ 3 files changed, 61 insertions(+), 2 deletions(-) diff --git a/lms/djangoapps/licenses/models.py b/lms/djangoapps/licenses/models.py index 929fba10ec..d259892f5d 100644 --- a/lms/djangoapps/licenses/models.py +++ b/lms/djangoapps/licenses/models.py @@ -65,7 +65,7 @@ def _create_license(user, software): # table/rows with select_for_update to prevent race conditions with transaction.commit_on_success(): selected = UserLicense.objects.select_for_update() - license = selected.filter(user__isnull=True)[0] + license = selected.filter(user__isnull=True, software=software)[0] license.user = user license.save() except IndexError: diff --git a/lms/djangoapps/licenses/views.py b/lms/djangoapps/licenses/views.py index 7cf8e6591e..9b32478e0e 100644 --- a/lms/djangoapps/licenses/views.py +++ b/lms/djangoapps/licenses/views.py @@ -1,9 +1,18 @@ import logging +import json +import re +from urlparse import urlparse from collections import namedtuple, defaultdict + from mitxmako.shortcuts import render_to_string -from models import get_courses_licenses, get_or_create_license +from django.contrib.auth.models import User +from django.http import HttpResponse, Http404 +from django.views.decorators.csrf import requires_csrf_token, csrf_protect + +from models import CourseSoftware +from models import get_courses_licenses, get_or_create_license, get_license log = logging.getLogger("mitx.licenses") @@ -33,3 +42,45 @@ def get_licenses_by_course(user, courses): data_by_course[course_id] = render_to_string(template, context) return data_by_course + + +@requires_csrf_token +def user_software_license(request): + if request.method != 'POST' or not request.is_ajax(): + raise Http404 + + # get the course id from the referer + url_path = urlparse(request.META.get('HTTP_REFERER', '')).path + pattern = re.compile('^/courses/(?P[^/]+/[^/]+/[^/]+)/.*/?$') + match = re.match(pattern, url_path) + + if not match: + raise Http404 + course_id = match.groupdict().get('id', '') + + user_id = request.session.get('_auth_user_id') + software_name = request.POST.get('software') + generate = request.POST.get('generate', False) == 'true' + + print user_id, software_name, generate + + try: + software = CourseSoftware.objects.get(name=software_name, + course_id=course_id) + print software + except CourseSoftware.DoesNotExist: + raise Http404 + + user = User.objects.get(id=user_id) + + if generate: + license = get_or_create_license(user, software) + else: + license = get_license(user, software) + + if license: + response = {'serial': license.serial} + else: + response = {'error': 'No serial number found'} + + return HttpResponse(json.dumps(response), mimetype='application/json') diff --git a/lms/urls.py b/lms/urls.py index 89a541ab06..e025478387 100644 --- a/lms/urls.py +++ b/lms/urls.py @@ -154,6 +154,14 @@ if settings.COURSEWARE_ENABLED: url(r'^preview/chemcalc', 'courseware.module_render.preview_chemcalc', name='preview_chemcalc'), + # Software Licenses + + # TODO: for now, this is the endpoint of an ajax replay + # service that retrieve and assigns license numbers for + # software assigned to a course. The numbers have to be loaded + # into the database. + url(r'^software-licenses$', 'licenses.views.user_software_license', name="user_software_license"), + url(r'^courses/(?P[^/]+/[^/]+/[^/]+)/xqueue/(?P[^/]*)/(?P.*?)/(?P[^/]*)$', 'courseware.module_render.xqueue_callback', name='xqueue_callback'), From a8cedf8ab9712534e8747d62975c7536e8698cd2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Andr=C3=A9s=20Rocha?= Date: Mon, 15 Oct 2012 21:04:10 -0300 Subject: [PATCH 63/65] Update lms/djangoapps/licenses/views.py Remove annoying print statements --- lms/djangoapps/licenses/views.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/lms/djangoapps/licenses/views.py b/lms/djangoapps/licenses/views.py index 9b32478e0e..7d804fbd3d 100644 --- a/lms/djangoapps/licenses/views.py +++ b/lms/djangoapps/licenses/views.py @@ -62,8 +62,6 @@ def user_software_license(request): software_name = request.POST.get('software') generate = request.POST.get('generate', False) == 'true' - print user_id, software_name, generate - try: software = CourseSoftware.objects.get(name=software_name, course_id=course_id) From e7c62b0fc659e3597e30181091144c0f16792f23 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Andr=C3=A9s=20Rocha?= Date: Tue, 16 Oct 2012 11:57:38 -0400 Subject: [PATCH 64/65] Add migrations to license django application --- .../licenses/migrations/0001_initial.py | 118 ++++++++++++++++++ .../licenses/migrations/__init__.py | 0 2 files changed, 118 insertions(+) create mode 100644 lms/djangoapps/licenses/migrations/0001_initial.py create mode 100644 lms/djangoapps/licenses/migrations/__init__.py diff --git a/lms/djangoapps/licenses/migrations/0001_initial.py b/lms/djangoapps/licenses/migrations/0001_initial.py new file mode 100644 index 0000000000..bdc1d3ead4 --- /dev/null +++ b/lms/djangoapps/licenses/migrations/0001_initial.py @@ -0,0 +1,118 @@ +# -*- coding: utf-8 -*- +import datetime +from south.db import db +from south.v2 import SchemaMigration +from django.db import models + + +class Migration(SchemaMigration): + + def forwards(self, orm): + # Adding model 'CourseSoftware' + db.create_table('licenses_coursesoftware', ( + ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), + ('name', self.gf('django.db.models.fields.CharField')(max_length=255)), + ('full_name', self.gf('django.db.models.fields.CharField')(max_length=255)), + ('url', self.gf('django.db.models.fields.CharField')(max_length=255)), + ('course_id', self.gf('django.db.models.fields.CharField')(max_length=255)), + )) + db.send_create_signal('licenses', ['CourseSoftware']) + + # Adding model 'UserLicense' + db.create_table('licenses_userlicense', ( + ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), + ('software', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['licenses.CourseSoftware'])), + ('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True)), + ('serial', self.gf('django.db.models.fields.CharField')(max_length=255)), + )) + db.send_create_signal('licenses', ['UserLicense']) + + + def backwards(self, orm): + # Deleting model 'CourseSoftware' + db.delete_table('licenses_coursesoftware') + + # Deleting model 'UserLicense' + db.delete_table('licenses_userlicense') + + + models = { + 'auth.group': { + 'Meta': {'object_name': 'Group'}, + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), + 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) + }, + 'auth.permission': { + 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, + 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), + 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) + }, + 'auth.user': { + 'Meta': {'object_name': 'User'}, + 'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}), + 'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}), + 'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}), + 'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), + 'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}), + 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), + 'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}), + 'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}), + 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), + 'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), + 'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}), + 'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}), + 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), + 'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}), + 'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}), + 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}), + 'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}), + 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), + 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), + 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), + 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), + 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), + 'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), + 'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), + 'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), + 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), + 'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}), + 'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), + 'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}), + 'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), + 'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), + 'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}), + 'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}), + 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), + 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}), + 'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}) + }, + 'contenttypes.contenttype': { + 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, + 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), + 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) + }, + 'licenses.coursesoftware': { + 'Meta': {'object_name': 'CourseSoftware'}, + 'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}), + 'full_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), + 'url': ('django.db.models.fields.CharField', [], {'max_length': '255'}) + }, + 'licenses.userlicense': { + 'Meta': {'object_name': 'UserLicense'}, + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'serial': ('django.db.models.fields.CharField', [], {'max_length': '255'}), + 'software': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['licenses.CourseSoftware']"}), + 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}) + } + } + + complete_apps = ['licenses'] \ No newline at end of file diff --git a/lms/djangoapps/licenses/migrations/__init__.py b/lms/djangoapps/licenses/migrations/__init__.py new file mode 100644 index 0000000000..e69de29bb2 From b479c85310571312fcfe3dd21e6eb8d74f8af915 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Andr=C3=A9s=20Rocha?= Date: Tue, 16 Oct 2012 13:02:13 -0400 Subject: [PATCH 65/65] Change order of X Universities in the front page --- lms/templates/index.html | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/lms/templates/index.html b/lms/templates/index.html index b1d9925416..151525f715 100644 --- a/lms/templates/index.html +++ b/lms/templates/index.html @@ -66,14 +66,6 @@
  • - - -
    - UTx -
    -
    -
  • -
  • @@ -81,6 +73,14 @@
  • +
  • + + +
    + UTx +
    +
    +