diff --git a/cms/djangoapps/contentstore/tests/test_contentstore.py b/cms/djangoapps/contentstore/tests/test_contentstore.py index 8e4a016a0f..c0ab9ec60e 100644 --- a/cms/djangoapps/contentstore/tests/test_contentstore.py +++ b/cms/djangoapps/contentstore/tests/test_contentstore.py @@ -263,7 +263,33 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase): # note, we know the link it should be because that's what in the 'full' course in the test data self.assertContains(resp, '/c4x/edX/full/asset/handouts_schematic_tutorial.pdf') + def test_export_course_with_unknown_metadata(self): + ms = modulestore('direct') + cs = contentstore() + import_from_xml(ms, 'common/test/data/', ['full']) + location = CourseDescriptor.id_to_location('edX/full/6.002_Spring_2012') + + root_dir = path(mkdtemp_clean()) + + course = ms.get_item(location) + + # add a bool piece of unknown metadata so we can verify we don't throw an exception + course.metadata['new_metadata'] = True + + ms.update_metadata(location, course.metadata) + + print 'Exporting to tempdir = {0}'.format(root_dir) + + # export out to a tempdir + bExported = False + try: + export_to_xml(ms, cs, location, root_dir, 'test_export') + bExported = True + except Exception: + pass + + self.assertTrue(bExported) class ContentStoreTest(ModuleStoreTestCase): """ diff --git a/cms/envs/dev.py b/cms/envs/dev.py index 3dee93a398..9164c02e3f 100644 --- a/cms/envs/dev.py +++ b/cms/envs/dev.py @@ -4,9 +4,6 @@ This config file runs the simplest dev environment""" from .common import * from logsettings import get_logger_config -import logging -import sys - DEBUG = True TEMPLATE_DEBUG = DEBUG LOGGING = get_logger_config(ENV_ROOT / "log", @@ -107,3 +104,36 @@ CACHE_TIMEOUT = 0 # Dummy secret key for dev SECRET_KEY = '85920908f28904ed733fe576320db18cabd7b6cd' + +################################ DEBUG TOOLBAR ################################# +INSTALLED_APPS += ('debug_toolbar', 'debug_toolbar_mongo') +MIDDLEWARE_CLASSES += ('django_comment_client.utils.QueryCountDebugMiddleware', + 'debug_toolbar.middleware.DebugToolbarMiddleware',) +INTERNAL_IPS = ('127.0.0.1',) + +DEBUG_TOOLBAR_PANELS = ( + 'debug_toolbar.panels.version.VersionDebugPanel', + 'debug_toolbar.panels.timer.TimerDebugPanel', + 'debug_toolbar.panels.settings_vars.SettingsVarsDebugPanel', + 'debug_toolbar.panels.headers.HeaderDebugPanel', + 'debug_toolbar.panels.request_vars.RequestVarsDebugPanel', + 'debug_toolbar.panels.sql.SQLDebugPanel', + 'debug_toolbar.panels.signals.SignalDebugPanel', + 'debug_toolbar.panels.logger.LoggingPanel', +# This is breaking Mongo updates-- Christina is investigating. +# 'debug_toolbar_mongo.panel.MongoDebugPanel', + + # Enabling the profiler has a weird bug as of django-debug-toolbar==0.9.4 and + # Django=1.3.1/1.4 where requests to views get duplicated (your method gets + # hit twice). So you can uncomment when you need to diagnose performance + # problems, but you shouldn't leave it on. + # 'debug_toolbar.panels.profiling.ProfilingDebugPanel', + ) + +DEBUG_TOOLBAR_CONFIG = { + 'INTERCEPT_REDIRECTS': False +} + +# To see stacktraces for MongoDB queries, set this to True. +# Stacktraces slow down page loads drastically (for pages with lots of queries). +# DEBUG_TOOLBAR_MONGO_STACKTRACES = False diff --git a/common/lib/capa/capa/correctmap.py b/common/lib/capa/capa/correctmap.py index 9e76fc20bf..f246b406d5 100644 --- a/common/lib/capa/capa/correctmap.py +++ b/common/lib/capa/capa/correctmap.py @@ -95,7 +95,7 @@ class CorrectMap(object): def is_correct(self, answer_id): if answer_id in self.cmap: - return self.cmap[answer_id]['correctness'] == 'correct' + return self.cmap[answer_id]['correctness'] in ['correct', 'partially-correct'] return None def is_queued(self, answer_id): diff --git a/common/lib/capa/capa/inputtypes.py b/common/lib/capa/capa/inputtypes.py index 1d6c340f37..f614743e67 100644 --- a/common/lib/capa/capa/inputtypes.py +++ b/common/lib/capa/capa/inputtypes.py @@ -45,8 +45,10 @@ import re import shlex # for splitting quoted strings import sys import os +import pyparsing from registry import TagRegistry +from capa.chem import chemcalc log = logging.getLogger('mitx.' + __name__) @@ -752,6 +754,45 @@ class ChemicalEquationInput(InputTypeBase): """ return {'previewer': '/static/js/capa/chemical_equation_preview.js', } + def handle_ajax(self, dispatch, get): + ''' + Since we only have chemcalc preview this input, check to see if it + matches the corresponding dispatch and send it through if it does + ''' + if dispatch == 'preview_chemcalc': + return self.preview_chemcalc(get) + return {} + + def preview_chemcalc(self, get): + """ + Render an html preview of a chemical formula or equation. get should + contain a key 'formula' and value 'some formula string'. + + Returns a json dictionary: + { + 'preview' : 'the-preview-html' or '' + 'error' : 'the-error' or '' + } + """ + + result = {'preview': '', + 'error': ''} + formula = get['formula'] + if formula is None: + result['error'] = "No formula specified." + return result + + try: + result['preview'] = chemcalc.render_to_html(formula) + except pyparsing.ParseException as p: + result['error'] = "Couldn't parse formula: {0}".format(p) + except Exception: + # this is unexpected, so log + log.warning("Error while previewing chemical formula", exc_info=True) + result['error'] = "Error while rendering preview" + + return result + registry.register(ChemicalEquationInput) #----------------------------------------------------------------------------- @@ -921,33 +962,142 @@ registry.register(DesignProtein2dInput) class EditAGeneInput(InputTypeBase): """ An input type for editing a gene. Integrates with the genex java applet. - + Example: - + """ - + template = "editageneinput.html" tags = ['editageneinput'] - + @classmethod def get_attributes(cls): """ - Note: width, hight, and dna_sequencee are required. - """ + Note: width, height, and dna_sequencee are required. + """ return [Attribute('width'), Attribute('height'), - Attribute('dna_sequence') + Attribute('dna_sequence'), + Attribute('genex_problem_number') ] - + def _extra_context(self): """ """ context = { 'applet_loader': '/static/js/capa/edit-a-gene.js', } - + return context registry.register(EditAGeneInput) +#--------------------------------------------------------------------- + +class AnnotationInput(InputTypeBase): + """ + Input type for annotations: students can enter some notes or other text + (currently ungraded), and then choose from a set of tags/optoins, which are graded. + + Example: + + + Annotation Exercise + + They are the ones who, at the public assembly, had put savage derangement [ate] into my thinking + [phrenes] |89 on that day when I myself deprived Achilles of his honorific portion [geras] + + Agamemnon says that ate or 'derangement' was the cause of his actions: why could Zeus say the same thing? + Type a commentary below: + Select one tag: + + + + + + + + # TODO: allow ordering to be randomized + """ + + template = "annotationinput.html" + tags = ['annotationinput'] + + def setup(self): + xml = self.xml + + self.debug = False # set to True to display extra debug info with input + self.return_to_annotation = True # return only works in conjunction with annotatable xmodule + + self.title = xml.findtext('./title', 'Annotation Exercise') + self.text = xml.findtext('./text') + self.comment = xml.findtext('./comment') + self.comment_prompt = xml.findtext('./comment_prompt', 'Type a commentary below:') + self.tag_prompt = xml.findtext('./tag_prompt', 'Select one tag:') + self.options = self._find_options() + + # Need to provide a value that JSON can parse if there is no + # student-supplied value yet. + if self.value == '': + self.value = 'null' + + self._validate_options() + + def _find_options(self): + ''' Returns an array of dicts where each dict represents an option. ''' + elements = self.xml.findall('./options/option') + return [{ + 'id': index, + 'description': option.text, + 'choice': option.get('choice') + } for (index, option) in enumerate(elements) ] + + def _validate_options(self): + ''' Raises a ValueError if the choice attribute is missing or invalid. ''' + valid_choices = ('correct', 'partially-correct', 'incorrect') + for option in self.options: + choice = option['choice'] + if choice is None: + raise ValueError('Missing required choice attribute.') + elif choice not in valid_choices: + raise ValueError('Invalid choice attribute: {0}. Must be one of: {1}'.format(choice, ', '.join(valid_choices))) + + def _unpack(self, json_value): + ''' Unpacks the json input state into a dict. ''' + d = json.loads(json_value) + if type(d) != dict: + d = {} + + comment_value = d.get('comment', '') + if not isinstance(comment_value, basestring): + comment_value = '' + + options_value = d.get('options', []) + if not isinstance(options_value, list): + options_value = [] + + return { + 'options_value': options_value, + 'has_options_value': len(options_value) > 0, # for convenience + 'comment_value': comment_value, + } + + def _extra_context(self): + extra_context = { + 'title': self.title, + 'text': self.text, + 'comment': self.comment, + 'comment_prompt': self.comment_prompt, + 'tag_prompt': self.tag_prompt, + 'options': self.options, + 'return_to_annotation': self.return_to_annotation, + 'debug': self.debug + } + + extra_context.update(self._unpack(self.value)) + + return extra_context + +registry.register(AnnotationInput) + diff --git a/common/lib/capa/capa/responsetypes.py b/common/lib/capa/capa/responsetypes.py index 97077ace8f..d49d030df5 100644 --- a/common/lib/capa/capa/responsetypes.py +++ b/common/lib/capa/capa/responsetypes.py @@ -911,7 +911,8 @@ def sympy_check2(): allowed_inputfields = ['textline', 'textbox', 'crystallography', 'chemicalequationinput', 'vsepr_input', 'drag_and_drop_input', 'editamoleculeinput', - 'designprotein2dinput', 'editageneinput'] + 'designprotein2dinput', 'editageneinput', + 'annotationinput'] def setup_response(self): xml = self.xml @@ -1943,6 +1944,117 @@ class ImageResponse(LoncapaResponse): dict([(ie.get('id'), ie.get('regions')) for ie in self.ielements])) #----------------------------------------------------------------------------- +class AnnotationResponse(LoncapaResponse): + ''' + Checking of annotation responses. + + The response contains both a comment (student commentary) and an option (student tag). + Only the tag is currently graded. Answers may be incorrect, partially correct, or correct. + ''' + response_tag = 'annotationresponse' + allowed_inputfields = ['annotationinput'] + max_inputfields = 1 + default_scoring = {'incorrect': 0, 'partially-correct': 1, 'correct': 2 } + def setup_response(self): + xml = self.xml + self.scoring_map = self._get_scoring_map() + self.answer_map = self._get_answer_map() + self.maxpoints = self._get_max_points() + + def get_score(self, student_answers): + ''' Returns a CorrectMap for the student answer, which may include + partially correct answers.''' + student_answer = student_answers[self.answer_id] + student_option = self._get_submitted_option_id(student_answer) + + scoring = self.scoring_map[self.answer_id] + is_valid = student_option is not None and student_option in scoring.keys() + + (correctness, points) = ('incorrect', None) + if is_valid: + correctness = scoring[student_option]['correctness'] + points = scoring[student_option]['points'] + + return CorrectMap(self.answer_id, correctness=correctness, npoints=points) + + def get_answers(self): + return self.answer_map + + def _get_scoring_map(self): + ''' Returns a dict of option->scoring for each input. ''' + scoring = self.default_scoring + choices = dict([(choice,choice) for choice in scoring]) + scoring_map = {} + + for inputfield in self.inputfields: + option_scoring = dict([(option['id'], { + 'correctness': choices.get(option['choice']), + 'points': scoring.get(option['choice']) + }) for option in self._find_options(inputfield) ]) + + scoring_map[inputfield.get('id')] = option_scoring + + return scoring_map + + def _get_answer_map(self): + ''' Returns a dict of answers for each input.''' + answer_map = {} + for inputfield in self.inputfields: + correct_option = self._find_option_with_choice(inputfield, 'correct') + if correct_option is not None: + answer_map[inputfield.get('id')] = correct_option.get('description') + return answer_map + + def _get_max_points(self): + ''' Returns a dict of the max points for each input: input id -> maxpoints. ''' + scoring = self.default_scoring + correct_points = scoring.get('correct') + return dict([(inputfield.get('id'), correct_points) for inputfield in self.inputfields]) + + def _find_options(self, inputfield): + ''' Returns an array of dicts where each dict represents an option. ''' + elements = inputfield.findall('./options/option') + return [{ + 'id': index, + 'description': option.text, + 'choice': option.get('choice') + } for (index, option) in enumerate(elements) ] + + def _find_option_with_choice(self, inputfield, choice): + ''' Returns the option with the given choice value, otherwise None. ''' + for option in self._find_options(inputfield): + if option['choice'] == choice: + return option + + def _unpack(self, json_value): + ''' Unpacks a student response value submitted as JSON.''' + d = json.loads(json_value) + if type(d) != dict: + d = {} + + comment_value = d.get('comment', '') + if not isinstance(d, basestring): + comment_value = '' + + options_value = d.get('options', []) + if not isinstance(options_value, list): + options_value = [] + + return { + 'options_value': options_value, + 'comment_value': comment_value + } + + def _get_submitted_option_id(self, student_answer): + ''' Return the single option that was selected, otherwise None.''' + submitted = self._unpack(student_answer) + option_ids = submitted['options_value'] + if len(option_ids) == 1: + return option_ids[0] + return None + +#----------------------------------------------------------------------------- + # TEMPORARY: List of all response subclasses # FIXME: To be replaced by auto-registration @@ -1959,4 +2071,5 @@ __all__ = [CodeResponse, ChoiceResponse, MultipleChoiceResponse, TrueFalseResponse, - JavascriptResponse] + JavascriptResponse, + AnnotationResponse] diff --git a/common/lib/capa/capa/templates/annotationinput.html b/common/lib/capa/capa/templates/annotationinput.html new file mode 100644 index 0000000000..e0172bb13b --- /dev/null +++ b/common/lib/capa/capa/templates/annotationinput.html @@ -0,0 +1,70 @@ +
+
+ +
+ ${title} + + % if return_to_annotation: + Return to Annotation
+ % endif +
+
+ +
${text}
+
${comment}
+ +
${comment_prompt}
+ + +
${tag_prompt}
+
    + % for option in options: +
  • + % if has_options_value: + % if all([c == 'correct' for c in option['choice'], status]): + + % elif all([c == 'partially-correct' for c in option['choice'], status]): + + % elif all([c == 'incorrect' for c in option['choice'], status]): + + % endif + % endif + + + ${option['description']} + +
  • + % endfor +
+ + % if debug: +
+ Rendered with value:
+
${value|h}
+ Current input value:
+ +
+ % else: + + % endif + + % if status == 'unsubmitted': + + % elif status == 'incomplete': + + % elif status == 'incorrect' and not has_options_value: + + % endif + +

+
+ + +% if msg: +${msg|n} +% endif + diff --git a/common/lib/capa/capa/templates/chemicalequationinput.html b/common/lib/capa/capa/templates/chemicalequationinput.html index dd177dc920..17c84114e5 100644 --- a/common/lib/capa/capa/templates/chemicalequationinput.html +++ b/common/lib/capa/capa/templates/chemicalequationinput.html @@ -11,7 +11,7 @@
% endif - +
+
% if status == 'unsubmitted': @@ -8,16 +9,12 @@ % elif status == 'incorrect':
% elif status == 'incomplete': -
+
% endif - - - - - - Applet failed to run. No Java plug-in was found. - - + +
+ +

@@ -37,3 +34,4 @@

% endif
+ diff --git a/common/lib/capa/capa/tests/response_xml_factory.py b/common/lib/capa/capa/tests/response_xml_factory.py index fe918ec5db..7aa299d20d 100644 --- a/common/lib/capa/capa/tests/response_xml_factory.py +++ b/common/lib/capa/capa/tests/response_xml_factory.py @@ -666,3 +666,36 @@ class StringResponseXMLFactory(ResponseXMLFactory): def create_input_element(self, **kwargs): return ResponseXMLFactory.textline_input_xml(**kwargs) + +class AnnotationResponseXMLFactory(ResponseXMLFactory): + """ Factory for creating XML trees """ + def create_response_element(self, **kwargs): + """ Create a element """ + return etree.Element("annotationresponse") + + def create_input_element(self, **kwargs): + """ Create a element.""" + + input_element = etree.Element("annotationinput") + + text_children = [ + {'tag': 'title', 'text': kwargs.get('title', 'super cool annotation') }, + {'tag': 'text', 'text': kwargs.get('text', 'texty text') }, + {'tag': 'comment', 'text':kwargs.get('comment', 'blah blah erudite comment blah blah') }, + {'tag': 'comment_prompt', 'text': kwargs.get('comment_prompt', 'type a commentary below') }, + {'tag': 'tag_prompt', 'text': kwargs.get('tag_prompt', 'select one tag') } + ] + + for child in text_children: + etree.SubElement(input_element, child['tag']).text = child['text'] + + default_options = [('green', 'correct'),('eggs', 'incorrect'),('ham', 'partially-correct')] + options = kwargs.get('options', default_options) + options_element = etree.SubElement(input_element, 'options') + + for (description, correctness) in options: + option_element = etree.SubElement(options_element, 'option', {'choice': correctness}) + option_element.text = description + + return input_element + diff --git a/common/lib/capa/capa/tests/test_html_render.py b/common/lib/capa/capa/tests/test_html_render.py index e4c54edca0..ca2a3c2e2c 100644 --- a/common/lib/capa/capa/tests/test_html_render.py +++ b/common/lib/capa/capa/tests/test_html_render.py @@ -11,6 +11,20 @@ from . import test_system class CapaHtmlRenderTest(unittest.TestCase): + def test_blank_problem(self): + """ + It's important that blank problems don't break, since that's + what you start with in studio. + """ + xml_str = " " + + # Create the problem + problem = LoncapaProblem(xml_str, '1', system=test_system) + + # Render the HTML + rendered_html = etree.XML(problem.get_html()) + # expect that we made it here without blowing up + def test_include_html(self): # Create a test file to include self._create_test_file('test_include.xml', @@ -25,7 +39,7 @@ class CapaHtmlRenderTest(unittest.TestCase): # Create the problem problem = LoncapaProblem(xml_str, '1', system=test_system) - + # Render the HTML rendered_html = etree.XML(problem.get_html()) @@ -45,7 +59,7 @@ class CapaHtmlRenderTest(unittest.TestCase): # Create the problem problem = LoncapaProblem(xml_str, '1', system=test_system) - + # Render the HTML rendered_html = etree.XML(problem.get_html()) @@ -64,7 +78,7 @@ class CapaHtmlRenderTest(unittest.TestCase): # Create the problem problem = LoncapaProblem(xml_str, '1', system=test_system) - + # Render the HTML rendered_html = etree.XML(problem.get_html()) @@ -99,11 +113,11 @@ class CapaHtmlRenderTest(unittest.TestCase): response_element = rendered_html.find("span") self.assertEqual(response_element.tag, "span") - # Expect that the response + # Expect that the response # that contains a
for the textline textline_element = response_element.find("div") self.assertEqual(textline_element.text, 'Input Template Render') - + # Expect a child
for the solution # with the rendered template solution_element = rendered_html.find("div") @@ -112,14 +126,14 @@ class CapaHtmlRenderTest(unittest.TestCase): # Expect that the template renderer was called with the correct # arguments, once for the textline input and once for # the solution - expected_textline_context = {'status': 'unsubmitted', - 'value': '', - 'preprocessor': None, - 'msg': '', - 'inline': False, - 'hidden': False, - 'do_math': False, - 'id': '1_2_1', + expected_textline_context = {'status': 'unsubmitted', + 'value': '', + 'preprocessor': None, + 'msg': '', + 'inline': False, + 'hidden': False, + 'do_math': False, + 'id': '1_2_1', 'size': None} expected_solution_context = {'id': '1_solution_1'} @@ -148,7 +162,7 @@ class CapaHtmlRenderTest(unittest.TestCase): # Create the problem and render the html problem = LoncapaProblem(xml_str, '1', system=test_system) - + # Grade the problem correctmap = problem.grade_answers({'1_2_1': 'test'}) diff --git a/common/lib/capa/capa/tests/test_inputtypes.py b/common/lib/capa/capa/tests/test_inputtypes.py index 4a5ea5c429..f670a38746 100644 --- a/common/lib/capa/capa/tests/test_inputtypes.py +++ b/common/lib/capa/capa/tests/test_inputtypes.py @@ -482,27 +482,43 @@ class ChemicalEquationTest(unittest.TestCase): ''' Check that chemical equation inputs work. ''' - - def test_rendering(self): - size = "42" - xml_str = """""".format(size=size) + def setUp(self): + self.size = "42" + xml_str = """""".format(size=self.size) element = etree.fromstring(xml_str) state = {'value': 'H2OYeah', } - the_input = lookup_tag('chemicalequationinput')(test_system, element, state) + self.the_input = lookup_tag('chemicalequationinput')(test_system, element, state) - context = the_input._get_render_context() + + def test_rendering(self): + ''' Verify that the render context matches the expected render context''' + context = self.the_input._get_render_context() expected = {'id': 'prob_1_2', 'value': 'H2OYeah', 'status': 'unanswered', 'msg': '', - 'size': size, + 'size': self.size, 'previewer': '/static/js/capa/chemical_equation_preview.js', } self.assertEqual(context, expected) + + def test_chemcalc_ajax_sucess(self): + ''' Verify that using the correct dispatch and valid data produces a valid response''' + + data = {'formula': "H"} + response = self.the_input.handle_ajax("preview_chemcalc", data) + + self.assertTrue('preview' in response) + self.assertNotEqual(response['preview'], '') + self.assertEqual(response['error'], "") + + + + class DragAndDropTest(unittest.TestCase): ''' @@ -570,3 +586,65 @@ class DragAndDropTest(unittest.TestCase): context.pop('drag_and_drop_json') expected.pop('drag_and_drop_json') self.assertEqual(context, expected) + + +class AnnotationInputTest(unittest.TestCase): + ''' + Make sure option inputs work + ''' + def test_rendering(self): + xml_str = ''' + + foo + bar + my comment + type a commentary + select a tag + + + + + + +''' + element = etree.fromstring(xml_str) + + value = {"comment": "blah blah", "options": [1]} + json_value = json.dumps(value) + state = { + 'value': json_value, + 'id': 'annotation_input', + 'status': 'answered' + } + + tag = 'annotationinput' + + the_input = lookup_tag(tag)(test_system, element, state) + + context = the_input._get_render_context() + + expected = { + 'id': 'annotation_input', + 'value': value, + 'status': 'answered', + 'msg': '', + 'title': 'foo', + 'text': 'bar', + 'comment': 'my comment', + 'comment_prompt': 'type a commentary', + 'tag_prompt': 'select a tag', + 'options': [ + {'id': 0, 'description': 'x', 'choice': 'correct'}, + {'id': 1, 'description': 'y', 'choice': 'incorrect'}, + {'id': 2, 'description': 'z', 'choice': 'partially-correct'} + ], + 'value': json_value, + 'options_value': value['options'], + 'has_options_value': len(value['options']) > 0, + 'comment_value': value['comment'], + 'debug': False, + 'return_to_annotation': True + } + + self.maxDiff = None + self.assertDictEqual(context, expected) diff --git a/common/lib/capa/capa/tests/test_responsetypes.py b/common/lib/capa/capa/tests/test_responsetypes.py index 1b78dfce5c..93a7e9628a 100644 --- a/common/lib/capa/capa/tests/test_responsetypes.py +++ b/common/lib/capa/capa/tests/test_responsetypes.py @@ -906,3 +906,40 @@ class SchematicResponseTest(ResponseTest): # (That is, our script verifies that the context # is what we expect) self.assertEqual(correct_map.get_correctness('1_2_1'), 'correct') + +class AnnotationResponseTest(ResponseTest): + from response_xml_factory import AnnotationResponseXMLFactory + xml_factory_class = AnnotationResponseXMLFactory + + def test_grade(self): + (correct, partially, incorrect) = ('correct', 'partially-correct', 'incorrect') + + answer_id = '1_2_1' + options = (('x', correct),('y', partially),('z', incorrect)) + make_answer = lambda option_ids: {answer_id: json.dumps({'options': option_ids })} + + tests = [ + {'correctness': correct, 'points': 2,'answers': make_answer([0]) }, + {'correctness': partially, 'points': 1, 'answers': make_answer([1]) }, + {'correctness': incorrect, 'points': 0, 'answers': make_answer([2]) }, + {'correctness': incorrect, 'points': 0, 'answers': make_answer([0,1,2]) }, + {'correctness': incorrect, 'points': 0, 'answers': make_answer([]) }, + {'correctness': incorrect, 'points': 0, 'answers': make_answer('') }, + {'correctness': incorrect, 'points': 0, 'answers': make_answer(None) }, + {'correctness': incorrect, 'points': 0, 'answers': {answer_id: 'null' } }, + ] + + for (index, test) in enumerate(tests): + expected_correctness = test['correctness'] + expected_points = test['points'] + answers = test['answers'] + + problem = self.build_problem(options=options) + correct_map = problem.grade_answers(answers) + actual_correctness = correct_map.get_correctness(answer_id) + actual_points = correct_map.get_npoints(answer_id) + + self.assertEqual(expected_correctness, actual_correctness, + msg="%s should be marked %s" % (answer_id, expected_correctness)) + self.assertEqual(expected_points, actual_points, + msg="%s should have %d points" % (answer_id, expected_points)) diff --git a/common/lib/xmodule/setup.py b/common/lib/xmodule/setup.py index ec369420cd..835085d8ea 100644 --- a/common/lib/xmodule/setup.py +++ b/common/lib/xmodule/setup.py @@ -46,6 +46,7 @@ setup( "custom_tag_template = xmodule.raw_module:RawDescriptor", "about = xmodule.html_module:AboutDescriptor", "graphical_slider_tool = xmodule.gst_module:GraphicalSliderToolDescriptor", + "annotatable = xmodule.annotatable_module:AnnotatableDescriptor", "foldit = xmodule.foldit_module:FolditDescriptor", ] } diff --git a/common/lib/xmodule/xmodule/annotatable_module.py b/common/lib/xmodule/xmodule/annotatable_module.py new file mode 100644 index 0000000000..f093b76f52 --- /dev/null +++ b/common/lib/xmodule/xmodule/annotatable_module.py @@ -0,0 +1,131 @@ +import logging + +from lxml import etree +from pkg_resources import resource_string, resource_listdir + +from xmodule.x_module import XModule +from xmodule.raw_module import RawDescriptor +from xmodule.modulestore.mongo import MongoModuleStore +from xmodule.modulestore.django import modulestore +from xmodule.contentstore.content import StaticContent + +log = logging.getLogger(__name__) + +class AnnotatableModule(XModule): + js = {'coffee': [resource_string(__name__, 'js/src/javascript_loader.coffee'), + resource_string(__name__, 'js/src/collapsible.coffee'), + resource_string(__name__, 'js/src/html/display.coffee'), + resource_string(__name__, 'js/src/annotatable/display.coffee')], + 'js': [] + } + js_module_name = "Annotatable" + css = {'scss': [resource_string(__name__, 'css/annotatable/display.scss')]} + icon_class = 'annotatable' + + def _get_annotation_class_attr(self, index, el): + """ Returns a dict with the CSS class attribute to set on the annotation + and an XML key to delete from the element. + """ + + attr = {} + cls = ['annotatable-span', 'highlight'] + highlight_key = 'highlight' + color = el.get(highlight_key) + + if color is not None: + if color in self.highlight_colors: + cls.append('highlight-'+color) + attr['_delete'] = highlight_key + attr['value'] = ' '.join(cls) + + return { 'class' : attr } + + def _get_annotation_data_attr(self, index, el): + """ Returns a dict in which the keys are the HTML data attributes + to set on the annotation element. Each data attribute has a + corresponding 'value' and (optional) '_delete' key to specify + an XML attribute to delete. + """ + + data_attrs = {} + attrs_map = { + 'body': 'data-comment-body', + 'title': 'data-comment-title', + 'problem': 'data-problem-id' + } + + for xml_key in attrs_map.keys(): + if xml_key in el.attrib: + value = el.get(xml_key, '') + html_key = attrs_map[xml_key] + data_attrs[html_key] = { 'value': value, '_delete': xml_key } + + return data_attrs + + def _render_annotation(self, index, el): + """ Renders an annotation element for HTML output. """ + attr = {} + attr.update(self._get_annotation_class_attr(index, el)) + attr.update(self._get_annotation_data_attr(index, el)) + + el.tag = 'span' + + for key in attr.keys(): + el.set(key, attr[key]['value']) + if '_delete' in attr[key] and attr[key]['_delete'] is not None: + delete_key = attr[key]['_delete'] + del el.attrib[delete_key] + + + def _render_content(self): + """ Renders annotatable content with annotation spans and returns HTML. """ + xmltree = etree.fromstring(self.content) + xmltree.tag = 'div' + if 'display_name' in xmltree.attrib: + del xmltree.attrib['display_name'] + + index = 0 + for el in xmltree.findall('.//annotation'): + self._render_annotation(index, el) + index += 1 + + return etree.tostring(xmltree, encoding='unicode') + + def _extract_instructions(self, xmltree): + """ Removes from the xmltree and returns them as a string, otherwise None. """ + instructions = xmltree.find('instructions') + if instructions is not None: + instructions.tag = 'div' + xmltree.remove(instructions) + return etree.tostring(instructions, encoding='unicode') + return None + + def get_html(self): + """ Renders parameters to template. """ + context = { + 'display_name': self.display_name, + 'element_id': self.element_id, + 'instructions_html': self.instructions, + 'content_html': self._render_content() + } + + return self.system.render_template('annotatable.html', context) + + def __init__(self, system, location, definition, descriptor, + instance_state=None, shared_state=None, **kwargs): + XModule.__init__(self, system, location, definition, descriptor, + instance_state, shared_state, **kwargs) + + xmltree = etree.fromstring(self.definition['data']) + + self.instructions = self._extract_instructions(xmltree) + self.content = etree.tostring(xmltree, encoding='unicode') + self.element_id = self.location.html_id() + self.highlight_colors = ['yellow', 'orange', 'purple', 'blue', 'green'] + +class AnnotatableDescriptor(RawDescriptor): + module_class = AnnotatableModule + stores_state = True + template_dir_name = "annotatable" + mako_template = "widgets/raw-edit.html" + diff --git a/common/lib/xmodule/xmodule/capa_module.py b/common/lib/xmodule/xmodule/capa_module.py index 7ab7b60239..b0d3950f06 100644 --- a/common/lib/xmodule/xmodule/capa_module.py +++ b/common/lib/xmodule/xmodule/capa_module.py @@ -135,8 +135,8 @@ class CapaModule(XModule): self.grace_period = None self.close_date = self.display_due_date - max_attempts = self.metadata.get('attempts', None) - if max_attempts is not None: + max_attempts = self.metadata.get('attempts') + if max_attempts is not None and max_attempts != '': self.max_attempts = int(max_attempts) else: self.max_attempts = None diff --git a/common/lib/xmodule/xmodule/css/annotatable/display.scss b/common/lib/xmodule/xmodule/css/annotatable/display.scss new file mode 100644 index 0000000000..308b379ec1 --- /dev/null +++ b/common/lib/xmodule/xmodule/css/annotatable/display.scss @@ -0,0 +1,169 @@ +$border-color: #C8C8C8; +$body-font-size: em(14); + +.annotatable-header { + margin-bottom: .5em; + .annotatable-title { + font-size: em(22); + text-transform: uppercase; + padding: 2px 4px; + } +} + +.annotatable-section { + position: relative; + padding: .5em 1em; + border: 1px solid $border-color; + border-radius: .5em; + margin-bottom: .5em; + + &.shaded { background-color: #EDEDED; } + + .annotatable-section-title { + font-weight: bold; + a { font-weight: normal; } + } + .annotatable-section-body { + border-top: 1px solid $border-color; + margin-top: .5em; + padding-top: .5em; + @include clearfix; + } + + ul.instructions-template { + list-style: disc; + margin-left: 4em; + b { font-weight: bold; } + i { font-style: italic; } + code { + display: inline; + white-space: pre; + font-family: Courier New, monospace; + } + } +} + +.annotatable-toggle { + position: absolute; + right: 0; + margin: 2px 1em 2px 0; + &.expanded:after { content: " \2191" } + &.collapsed:after { content: " \2193" } +} + +.annotatable-span { + display: inline; + cursor: pointer; + + @each $highlight in ( + (yellow rgba(255,255,10,0.3) rgba(255,255,10,0.9)), + (red rgba(178,19,16,0.3) rgba(178,19,16,0.9)), + (orange rgba(255,165,0,0.3) rgba(255,165,0,0.9)), + (green rgba(25,255,132,0.3) rgba(25,255,132,0.9)), + (blue rgba(35,163,255,0.3) rgba(35,163,255,0.9)), + (purple rgba(115,9,178,0.3) rgba(115,9,178,0.9))) { + + $marker: nth($highlight,1); + $color: nth($highlight,2); + $selected_color: nth($highlight,3); + + @if $marker == yellow { + &.highlight { + background-color: $color; + &.selected { background-color: $selected_color; } + } + } + &.highlight-#{$marker} { + background-color: $color; + &.selected { background-color: $selected_color; } + } + } + + &.hide { + cursor: none; + background-color: inherit; + .annotatable-icon { + display: none; + } + } + + .annotatable-comment { + display: none; + } +} + +.ui-tooltip.qtip.ui-tooltip { + font-size: $body-font-size; + border: 1px solid #333; + border-radius: 1em; + background-color: rgba(0,0,0,.85); + color: #fff; + -webkit-font-smoothing: antialiased; + + .ui-tooltip-titlebar { + font-size: em(16); + color: inherit; + background-color: transparent; + padding: 5px 10px; + border: none; + .ui-tooltip-title { + padding: 5px 0px; + border-bottom: 2px solid #333; + font-weight: bold; + } + .ui-tooltip-icon { + right: 10px; + background: #333; + } + .ui-state-hover { + color: inherit; + border: 1px solid #ccc; + } + } + .ui-tooltip-content { + color: inherit; + font-size: em(14); + text-align: left; + font-weight: 400; + padding: 0 10px 10px 10px; + background-color: transparent; + } + p { + color: inherit; + line-height: normal; + } +} + +.ui-tooltip.qtip.ui-tooltip-annotatable { + max-width: 375px; + .ui-tooltip-content { + padding: 0 10px; + .annotatable-comment { + display: block; + margin: 0px 0px 10px 0; + max-height: 225px; + overflow: auto; + } + .annotatable-reply { + display: block; + border-top: 2px solid #333; + padding: 5px 0; + margin: 0; + text-align: center; + } + } + &:after { + content: ''; + display: inline-block; + position: absolute; + bottom: -20px; + left: 50%; + height: 0; + width: 0; + margin-left: -5px; + border: 10px solid transparent; + border-top-color: rgba(0, 0, 0, .85); + } +} + + diff --git a/common/lib/xmodule/xmodule/css/capa/display.scss b/common/lib/xmodule/xmodule/css/capa/display.scss index b705f5146e..2236251ef1 100644 --- a/common/lib/xmodule/xmodule/css/capa/display.scss +++ b/common/lib/xmodule/xmodule/css/capa/display.scss @@ -231,6 +231,15 @@ section.problem { width: 25px; } + &.partially-correct { + @include inline-block(); + background: url('../images/partially-correct-icon.png') center center no-repeat; + height: 20px; + position: relative; + top: 6px; + width: 25px; + } + &.incorrect, &.ui-icon-close { @include inline-block(); background: url('../images/incorrect-icon.png') center center no-repeat; @@ -802,4 +811,91 @@ section.problem { display: none; } } + + .annotation-input { + $yellow: rgba(255,255,10,0.3); + + border: 1px solid #ccc; + border-radius: 1em; + margin: 0 0 1em 0; + + .annotation-header { + font-weight: bold; + border-bottom: 1px solid #ccc; + padding: .5em 1em; + } + .annotation-body { padding: .5em 1em; } + a.annotation-return { + float: right; + font: inherit; + font-weight: normal; + } + a.annotation-return:after { content: " \2191" } + + .block, ul.tags { + margin: .5em 0; + padding: 0; + } + .block-highlight { + padding: .5em; + color: #333; + font-style: normal; + background-color: $yellow; + border: 1px solid darken($yellow, 10%); + } + .block-comment { font-style: italic; } + + ul.tags { + display: block; + list-style-type: none; + margin-left: 1em; + li { + display: block; + margin: 1em 0 0 0; + position: relative; + .tag { + display: inline-block; + cursor: pointer; + border: 1px solid rgb(102,102,102); + margin-left: 40px; + &.selected { + background-color: $yellow; + } + } + .tag-status { + position: absolute; + left: 0; + } + .tag-status, .tag { padding: .25em .5em; } + } + } + textarea.comment { + $num-lines-to-show: 5; + $line-height: 1.4em; + $padding: .2em; + width: 100%; + padding: $padding (2 * $padding); + line-height: $line-height; + height: ($num-lines-to-show * $line-height) + (2*$padding) - (($line-height - 1)/2); + } + .answer-annotation { display: block; margin: 0; } + + /* for debugging the input value field. enable the debug flag on the inputtype */ + .debug-value { + color: #fff; + padding: 1em; + margin: 1em 0; + background-color: #999; + border: 1px solid #000; + input[type="text"] { width: 100%; } + pre { background-color: #CCC; color: #000; } + &:before { + display: block; + content: "debug input value"; + text-transform: uppercase; + font-weight: bold; + font-size: 1.5em; + } + } + } } diff --git a/common/lib/xmodule/xmodule/foldit_module.py b/common/lib/xmodule/xmodule/foldit_module.py index 3990a61183..920a5aed6d 100644 --- a/common/lib/xmodule/xmodule/foldit_module.py +++ b/common/lib/xmodule/xmodule/foldit_module.py @@ -96,8 +96,9 @@ class FolditModule(XModule): self.required_level, self.required_sublevel) - showbasic = (self.metadata.get("show_basic_score").lower() == "true") - showleader = (self.metadata.get("show_leaderboard").lower() == "true") + showbasic = (self.metadata.get("show_basic_score", "").lower() == "true") + showleader = (self.metadata.get("show_leaderboard", "").lower() == "true") + context = { 'due': self.due_str, 'success': self.is_complete(), diff --git a/common/lib/xmodule/xmodule/js/fixtures/annotatable.html b/common/lib/xmodule/xmodule/js/fixtures/annotatable.html new file mode 100644 index 0000000000..61020d95e8 --- /dev/null +++ b/common/lib/xmodule/xmodule/js/fixtures/annotatable.html @@ -0,0 +1,35 @@ +
+
+
+
First Annotation Exercise
+
+
+
+ Instructions + Collapse Instructions +
+
+

The main goal of this exercise is to start practicing the art of slow reading.

+
+
+
+
+ Guided Discussion + Hide Annotations +
+
+
+ |87 No, those who are really responsible are Zeus and Fate [Moira] and the Fury [Erinys] who roams in the mist.
+ |88 They are the ones who
+ |100 He [= Zeus], making a formal declaration [eukhesthai], spoke up at a meeting of all the gods and said:
+ |101 “hear me, all gods and all goddesses,
+ |113 but he swore a great oath. + And right then and there
+
+
+
+ +
Return to Annotation
+
Return to Annotation
+
Return to Annotation
+ diff --git a/common/lib/xmodule/xmodule/js/spec/annotatable/display_spec.coffee b/common/lib/xmodule/xmodule/js/spec/annotatable/display_spec.coffee new file mode 100644 index 0000000000..3adb028f97 --- /dev/null +++ b/common/lib/xmodule/xmodule/js/spec/annotatable/display_spec.coffee @@ -0,0 +1,9 @@ +describe 'Annotatable', -> + beforeEach -> + loadFixtures 'annotatable.html' + describe 'constructor', -> + el = $('.xmodule_display.xmodule_AnnotatableModule') + beforeEach -> + @annotatable = new Annotatable(el) + it 'works', -> + expect(1).toBe(1) \ No newline at end of file diff --git a/common/lib/xmodule/xmodule/js/src/annotatable/display.coffee b/common/lib/xmodule/xmodule/js/src/annotatable/display.coffee new file mode 100644 index 0000000000..2ad49ae6d7 --- /dev/null +++ b/common/lib/xmodule/xmodule/js/src/annotatable/display.coffee @@ -0,0 +1,197 @@ +class @Annotatable + _debug: false + + # selectors for the annotatable xmodule + toggleAnnotationsSelector: '.annotatable-toggle-annotations' + toggleInstructionsSelector: '.annotatable-toggle-instructions' + instructionsSelector: '.annotatable-instructions' + sectionSelector: '.annotatable-section' + spanSelector: '.annotatable-span' + replySelector: '.annotatable-reply' + + # these selectors are for responding to events from the annotation capa problem type + problemXModuleSelector: '.xmodule_CapaModule' + problemSelector: 'section.problem' + problemInputSelector: 'section.problem .annotation-input' + problemReturnSelector: 'section.problem .annotation-return' + + constructor: (el) -> + console.log 'loaded Annotatable' if @_debug + @el = el + @$el = $(el) + @init() + + $: (selector) -> + $(selector, @el) + + init: () -> + @initEvents() + @initTips() + + initEvents: () -> + # Initialize toggle handlers for the instructions and annotations sections + [@annotationsHidden, @instructionsHidden] = [false, false] + @$(@toggleAnnotationsSelector).bind 'click', @onClickToggleAnnotations + @$(@toggleInstructionsSelector).bind 'click', @onClickToggleInstructions + + # Initialize handler for 'reply to annotation' events that scroll to + # the associated problem. The reply buttons are part of the tooltip + # content. It's important that the tooltips be configured to render + # as descendants of the annotation module and *not* the document.body. + @$el.delegate @replySelector, 'click', @onClickReply + + # Initialize handler for 'return to annotation' events triggered from problems. + # 1) There are annotationinput capa problems rendered on the page + # 2) Each one has an embedded return link (see annotation capa problem template). + # Since the capa problem injects HTML content via AJAX, the best we can do is + # is let the click events bubble up to the body and handle them there. + $('body').delegate @problemReturnSelector, 'click', @onClickReturn + + initTips: () -> + # tooltips are used to display annotations for highlighted text spans + @$(@spanSelector).each (index, el) => + $(el).qtip(@getSpanTipOptions el) + + getSpanTipOptions: (el) -> + content: + title: + text: @makeTipTitle(el) + text: @makeTipContent(el) + position: + my: 'bottom center' # of tooltip + at: 'top center' # of target + target: $(el) # where the tooltip was triggered (i.e. the annotation span) + container: @$el + adjust: + y: -5 + show: + event: 'click mouseenter' + solo: true + hide: + event: 'click mouseleave' + delay: 500, + fixed: true # don't hide the tooltip if it is moused over + style: + classes: 'ui-tooltip-annotatable' + events: + show: @onShowTip + + onClickToggleAnnotations: (e) => @toggleAnnotations() + + onClickToggleInstructions: (e) => @toggleInstructions() + + onClickReply: (e) => @replyTo(e.currentTarget) + + onClickReturn: (e) => @returnFrom(e.currentTarget) + + onShowTip: (event, api) => + event.preventDefault() if @annotationsHidden + + getSpanForProblemReturn: (el) -> + problem_id = $(@problemReturnSelector).index(el) + @$(@spanSelector).filter("[data-problem-id='#{problem_id}']") + + getProblem: (el) -> + problem_id = @getProblemId(el) + $(@problemSelector).has(@problemInputSelector).eq(problem_id) + + getProblemId: (el) -> + $(el).data('problem-id') + + toggleAnnotations: () -> + hide = (@annotationsHidden = not @annotationsHidden) + @toggleAnnotationButtonText hide + @toggleSpans hide + @toggleTips hide + + toggleTips: (hide) -> + visible = @findVisibleTips() + @hideTips visible + + toggleAnnotationButtonText: (hide) -> + buttonText = (if hide then 'Show' else 'Hide')+' Annotations' + @$(@toggleAnnotationsSelector).text(buttonText) + + toggleInstructions: () -> + hide = (@instructionsHidden = not @instructionsHidden) + @toggleInstructionsButton hide + @toggleInstructionsText hide + + toggleInstructionsButton: (hide) -> + txt = (if hide then 'Expand' else 'Collapse')+' Instructions' + cls = (if hide then ['expanded', 'collapsed'] else ['collapsed','expanded']) + @$(@toggleInstructionsSelector).text(txt).removeClass(cls[0]).addClass(cls[1]) + + toggleInstructionsText: (hide) -> + slideMethod = (if hide then 'slideUp' else 'slideDown') + @$(@instructionsSelector)[slideMethod]() + + toggleSpans: (hide) -> + @$(@spanSelector).toggleClass 'hide', hide, 250 + + replyTo: (buttonEl) -> + offset = -20 + el = @getProblem buttonEl + if el.length > 0 + @scrollTo(el, @afterScrollToProblem, offset) + else + console.log('problem not found. event: ', e) if @_debug + + returnFrom: (buttonEl) -> + offset = -200 + el = @getSpanForProblemReturn buttonEl + if el.length > 0 + @scrollTo(el, @afterScrollToSpan, offset) + else + console.log('span not found. event:', e) if @_debug + + scrollTo: (el, after, offset = -20) -> + $('html,body').scrollTo(el, { + duration: 500 + onAfter: @_once => after?.call this, el + offset: offset + }) if $(el).length > 0 + + afterScrollToProblem: (problem_el) -> + problem_el.effect 'highlight', {}, 500 + + afterScrollToSpan: (span_el) -> + span_el.addClass 'selected', 400, 'swing', -> + span_el.removeClass 'selected', 400, 'swing' + + makeTipContent: (el) -> + (api) => + text = $(el).data('comment-body') + comment = @createComment(text) + problem_id = @getProblemId(el) + reply = @createReplyLink(problem_id) + $(comment).add(reply) + + makeTipTitle: (el) -> + (api) => + title = $(el).data('comment-title') + (if title then title else 'Commentary') + + createComment: (text) -> + $("
#{text}
") + + createReplyLink: (problem_id) -> + $("Reply to Annotation") + + findVisibleTips: () -> + visible = [] + @$(@spanSelector).each (index, el) -> + api = $(el).qtip('api') + tip = $(api?.elements.tooltip) + if tip.is(':visible') + visible.push el + visible + + hideTips: (elements) -> + $(elements).qtip('hide') + + _once: (fn) -> + done = false + return => + fn.call this unless done + done = true diff --git a/common/lib/xmodule/xmodule/templates/annotatable/default.yaml b/common/lib/xmodule/xmodule/templates/annotatable/default.yaml new file mode 100644 index 0000000000..31dd489fb4 --- /dev/null +++ b/common/lib/xmodule/xmodule/templates/annotatable/default.yaml @@ -0,0 +1,20 @@ +--- +metadata: + display_name: 'Annotation' +data: | + + +

Enter your (optional) instructions for the exercise in HTML format.

+

Annotations are specified by an <annotation> tag which may may have the following attributes:

+
    +
  • title (optional). Title of the annotation. Defaults to Commentary if omitted.
  • +
  • body (required). Text of the annotation.
  • +
  • problem (optional). Numeric index of the problem associated with this annotation. This is a zero-based index, so the first problem on the page would have problem="0".
  • +
  • highlight (optional). Possible values: yellow, red, orange, green, blue, or purple. Defaults to yellow if this attribute is omitted.
  • +
+
+

Add your HTML with annotation spans here.

+

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Ut sodales laoreet est, egestas gravida felis egestas nec. Aenean at volutpat erat. Cras commodo viverra nibh in aliquam.

+

Nulla facilisi. Pellentesque id vestibulum libero. Suspendisse potenti. Morbi scelerisque nisi vitae felis dictum mattis. Nam sit amet magna elit. Nullam volutpat cursus est, sit amet sagittis odio vulputate et. Curabitur euismod, orci in vulputate imperdiet, augue lorem tempor purus, id aliquet augue turpis a est. Aenean a sagittis libero. Praesent fringilla pretium magna, non condimentum risus elementum nec. Pellentesque faucibus elementum pharetra. Pellentesque vitae metus eros.

+
+children: [] diff --git a/common/lib/xmodule/xmodule/tests/test_annotatable_module.py b/common/lib/xmodule/xmodule/tests/test_annotatable_module.py new file mode 100644 index 0000000000..30f9c9ff92 --- /dev/null +++ b/common/lib/xmodule/xmodule/tests/test_annotatable_module.py @@ -0,0 +1,129 @@ +"""Module annotatable tests""" + +import unittest + +from lxml import etree +from mock import Mock + +from xmodule.annotatable_module import AnnotatableModule +from xmodule.modulestore import Location + +from . import test_system + +class AnnotatableModuleTestCase(unittest.TestCase): + location = Location(["i4x", "edX", "toy", "annotatable", "guided_discussion"]) + sample_xml = ''' + + Read the text. +

+ Sing, + O goddess, + the anger of Achilles son of Peleus, + that brought countless ills upon the Achaeans. Many a brave soul did it send + hurrying down to Hades, and many a hero did it yield a prey to dogs and +

vultures, for so were the counsels + of Jove fulfilled from the day on which the son of Atreus, king of men, and great + Achilles, first fell out with one another.
+

+ The Iliad of Homer by Samuel Butler +
+ ''' + definition = { 'data': sample_xml } + descriptor = Mock() + instance_state = None + shared_state = None + + def setUp(self): + self.annotatable = AnnotatableModule(test_system(), self.location, self.definition, self.descriptor, self.instance_state, self.shared_state) + + def test_annotation_data_attr(self): + el = etree.fromstring('test') + + expected_attr = { + 'data-comment-body': {'value': 'foo', '_delete': 'body' }, + 'data-comment-title': {'value': 'bar', '_delete': 'title'}, + 'data-problem-id': {'value': '0', '_delete': 'problem'} + } + + actual_attr = self.annotatable._get_annotation_data_attr(0, el) + + self.assertTrue(type(actual_attr) is dict) + self.assertDictEqual(expected_attr, actual_attr) + + def test_annotation_class_attr_default(self): + xml = 'test' + el = etree.fromstring(xml) + + expected_attr = { 'class': { 'value': 'annotatable-span highlight' } } + actual_attr = self.annotatable._get_annotation_class_attr(0, el) + + self.assertTrue(type(actual_attr) is dict) + self.assertDictEqual(expected_attr, actual_attr) + + def test_annotation_class_attr_with_valid_highlight(self): + xml = 'test' + + for color in self.annotatable.highlight_colors: + el = etree.fromstring(xml.format(highlight=color)) + value = 'annotatable-span highlight highlight-{highlight}'.format(highlight=color) + + expected_attr = { 'class': { + 'value': value, + '_delete': 'highlight' } + } + actual_attr = self.annotatable._get_annotation_class_attr(0, el) + + self.assertTrue(type(actual_attr) is dict) + self.assertDictEqual(expected_attr, actual_attr) + + def test_annotation_class_attr_with_invalid_highlight(self): + xml = 'test' + + for invalid_color in ['rainbow', 'blink', 'invisible', '', None]: + el = etree.fromstring(xml.format(highlight=invalid_color)) + expected_attr = { 'class': { + 'value': 'annotatable-span highlight', + '_delete': 'highlight' } + } + actual_attr = self.annotatable._get_annotation_class_attr(0, el) + + self.assertTrue(type(actual_attr) is dict) + self.assertDictEqual(expected_attr, actual_attr) + + def test_render_annotation(self): + expected_html = 'z' + expected_el = etree.fromstring(expected_html) + + actual_el = etree.fromstring('z') + self.annotatable._render_annotation(0, actual_el) + + self.assertEqual(expected_el.tag, actual_el.tag) + self.assertEqual(expected_el.text, actual_el.text) + self.assertDictEqual(dict(expected_el.attrib), dict(actual_el.attrib)) + + def test_render_content(self): + content = self.annotatable._render_content() + el = etree.fromstring(content) + + self.assertEqual('div', el.tag, 'root tag is a div') + + expected_num_annotations = 5 + actual_num_annotations = el.xpath('count(//span[contains(@class,"annotatable-span")])') + self.assertEqual(expected_num_annotations, actual_num_annotations, 'check number of annotations') + + def test_get_html(self): + context = self.annotatable.get_html() + for key in ['display_name', 'element_id', 'content_html', 'instructions_html']: + self.assertIn(key, context) + + def test_extract_instructions(self): + xmltree = etree.fromstring(self.sample_xml) + + expected_xml = u"
Read the text.
" + actual_xml = self.annotatable._extract_instructions(xmltree) + self.assertIsNotNone(actual_xml) + self.assertEqual(expected_xml.strip(), actual_xml.strip()) + + xmltree = etree.fromstring('foo') + actual = self.annotatable._extract_instructions(xmltree) + self.assertIsNone(actual) diff --git a/common/lib/xmodule/xmodule/tests/test_capa_module.py b/common/lib/xmodule/xmodule/tests/test_capa_module.py index a1e3d31d76..6330511fc5 100644 --- a/common/lib/xmodule/xmodule/tests/test_capa_module.py +++ b/common/lib/xmodule/xmodule/tests/test_capa_module.py @@ -44,7 +44,7 @@ class CapaFactory(object): @staticmethod def answer_key(): """ Return the key stored in the capa problem answer dict """ - return ("-".join(['i4x', 'edX', 'capa_test', 'problem', + return ("-".join(['i4x', 'edX', 'capa_test', 'problem', 'SampleProblem%d' % CapaFactory.num]) + "_2_1") @@ -144,6 +144,8 @@ class CapaModuleTest(unittest.TestCase): "Factory should be creating unique names for each problem") + + def test_correct(self): """ Check that the factory creates correct and incorrect problems properly. @@ -332,7 +334,7 @@ class CapaModuleTest(unittest.TestCase): 'input_4': None, 'input_5': [], 'input_6': 5} - + result = CapaModule.make_dict_of_responses(valid_get_dict) # Expect that we get a dict with "input" stripped from key names @@ -475,7 +477,7 @@ class CapaModuleTest(unittest.TestCase): mock_is_queued.return_value = True mock_get_queuetime.return_value = datetime.datetime.now() - + get_request_dict = { CapaFactory.input_key(): '3.14' } result = module.check_problem(get_request_dict) @@ -506,7 +508,7 @@ class CapaModuleTest(unittest.TestCase): def test_reset_problem(self): module = CapaFactory.create() - # Mock the module's capa problem + # Mock the module's capa problem # to simulate that the problem is done mock_problem = MagicMock(capa.capa_problem.LoncapaProblem) mock_problem.done = True @@ -668,7 +670,7 @@ class CapaModuleTest(unittest.TestCase): module = CapaFactory.create(max_attempts=0) self.assertFalse(module.should_show_check_button()) - # If user submitted a problem but hasn't reset, + # If user submitted a problem but hasn't reset, # do NOT show the check button # Note: we can only reset when rerandomize="always" module = CapaFactory.create(rerandomize="always") @@ -707,7 +709,7 @@ class CapaModuleTest(unittest.TestCase): module.lcp.done = True self.assertFalse(module.should_show_reset_button()) - # If the user hasn't submitted an answer yet, + # If the user hasn't submitted an answer yet, # then do NOT show the reset button module = CapaFactory.create() module.lcp.done = False @@ -770,7 +772,7 @@ class CapaModuleTest(unittest.TestCase): # If the user is out of attempts, do NOT show the save button attempts = random.randint(1,10) - module = CapaFactory.create(attempts=attempts, + module = CapaFactory.create(attempts=attempts, max_attempts=attempts, force_save_button="true") module.lcp.done = True @@ -784,6 +786,12 @@ class CapaModuleTest(unittest.TestCase): module.lcp.done = True self.assertTrue(module.should_show_save_button()) + def test_no_max_attempts(self): + module = CapaFactory.create(max_attempts='') + html = module.get_problem_html() + # assert that we got here without exploding + + def test_get_problem_html(self): module = CapaFactory.create() @@ -797,7 +805,7 @@ class CapaModuleTest(unittest.TestCase): module.should_show_reset_button = Mock(return_value=show_reset_button) module.should_show_save_button = Mock(return_value=show_save_button) - # Mock the system rendering function + # Mock the system rendering function module.system.render_template = Mock(return_value="
Test Template HTML
") # Patch the capa problem's HTML rendering @@ -809,7 +817,7 @@ class CapaModuleTest(unittest.TestCase): # Also render the problem encapsulated in a
html_encapsulated = module.get_problem_html(encapsulate=True) - + # Expect that we get the rendered template back self.assertEqual(html, "
Test Template HTML
") @@ -831,7 +839,7 @@ class CapaModuleTest(unittest.TestCase): def test_get_problem_html_error(self): - """ + """ In production, when an error occurs with the problem HTML rendering, a "dummy" problem is created with an error message to display to the user. @@ -845,10 +853,10 @@ class CapaModuleTest(unittest.TestCase): # is asked to render itself as HTML module.lcp.get_html = Mock(side_effect=Exception("Test")) - # Stub out the test_system rendering function + # Stub out the test_system rendering function module.system.render_template = Mock(return_value="
Test Template HTML
") - # Turn off DEBUG + # Turn off DEBUG module.system.DEBUG = False # Try to render the module with DEBUG turned off @@ -860,4 +868,4 @@ class CapaModuleTest(unittest.TestCase): self.assertTrue("error" in context['problem']['html']) # Expect that the module has created a new dummy problem with the error - self.assertNotEqual(original_problem, module.lcp) + self.assertNotEqual(original_problem, module.lcp) diff --git a/common/lib/xmodule/xmodule/xml_module.py b/common/lib/xmodule/xmodule/xml_module.py index 773531c528..7087a03759 100644 --- a/common/lib/xmodule/xmodule/xml_module.py +++ b/common/lib/xmodule/xmodule/xml_module.py @@ -379,7 +379,11 @@ class XmlDescriptor(XModuleDescriptor): if attr not in self.metadata_to_strip and attr not in self.metadata_to_export_to_policy: val = val_for_xml(attr) #logging.debug('location.category = {0}, attr = {1}'.format(self.location.category, attr)) - xml_object.set(attr, val) + try: + xml_object.set(attr, val) + except Exception, e: + logging.exception('Failed to serialize metadata attribute {0} with value {1}. This could mean data loss!!! Exception: {2}'.format(attr, val, e)) + pass if self.export_to_file(): # Write the definition to a file diff --git a/common/static/coffee/src/discussion/discussion_module_view.coffee b/common/static/coffee/src/discussion/discussion_module_view.coffee index 2e58b2c0b8..3dde9bf950 100644 --- a/common/static/coffee/src/discussion/discussion_module_view.coffee +++ b/common/static/coffee/src/discussion/discussion_module_view.coffee @@ -88,7 +88,7 @@ if Backbone? if @$('section.discussion').length @$('section.discussion').replaceWith($discussion) else - $(".discussion-module").append($discussion) + @$el.append($discussion) @newPostForm = $('.new-post-article') @threadviews = @discussion.map (thread) -> new DiscussionThreadInlineView el: @$("article#thread_#{thread.id}"), model: thread diff --git a/common/static/images/partially-correct-icon.png b/common/static/images/partially-correct-icon.png new file mode 100644 index 0000000000..9ac0fd32f7 Binary files /dev/null and b/common/static/images/partially-correct-icon.png differ diff --git a/common/static/js/capa/annotationinput.js b/common/static/js/capa/annotationinput.js new file mode 100644 index 0000000000..4353fd262a --- /dev/null +++ b/common/static/js/capa/annotationinput.js @@ -0,0 +1,97 @@ +(function () { + var debug = false; + + var module = { + debug: debug, + inputSelector: '.annotation-input', + tagSelector: '.tag', + tagsSelector: '.tags', + commentSelector: 'textarea.comment', + valueSelector: 'input.value', // stash tag selections and comment here as a JSON string... + + singleSelect: true, + + init: function() { + var that = this; + + if(this.debug) { console.log('annotation input loaded: '); } + + $(this.inputSelector).each(function(index, el) { + if(!$(el).data('listening')) { + $(el).delegate(that.tagSelector, 'click', $.proxy(that.onClickTag, that)); + $(el).delegate(that.commentSelector, 'change', $.proxy(that.onChangeComment, that)); + $(el).data('listening', 'yes'); + } + }); + }, + onChangeComment: function(e) { + var value_el = this.findValueEl(e.target); + var current_value = this.loadValue(value_el); + var target_value = $(e.target).val(); + + current_value.comment = target_value; + this.storeValue(value_el, current_value); + }, + onClickTag: function(e) { + var target_el = e.target, target_value, target_index; + var value_el, current_value; + + value_el = this.findValueEl(e.target); + current_value = this.loadValue(value_el); + target_value = $(e.target).data('id'); + + if(!$(target_el).hasClass('selected')) { + if(this.singleSelect) { + current_value.options = [target_value] + } else { + current_value.options.push(target_value); + } + } else { + if(this.singleSelect) { + current_value.options = [] + } else { + target_index = current_value.options.indexOf(target_value); + if(target_index !== -1) { + current_value.options.splice(target_index, 1); + } + } + } + + this.storeValue(value_el, current_value); + + if(this.singleSelect) { + $(target_el).closest(this.tagsSelector) + .find(this.tagSelector) + .not(target_el) + .removeClass('selected') + } + $(target_el).toggleClass('selected'); + }, + findValueEl: function(target_el) { + var input_el = $(target_el).closest(this.inputSelector); + return $(this.valueSelector, input_el); + }, + loadValue: function(value_el) { + var json = $(value_el).val(); + + var result = JSON.parse(json); + if(result === null) { + result = {}; + } + if(!result.hasOwnProperty('options')) { + result.options = []; + } + if(!result.hasOwnProperty('comment')) { + result.comment = ''; + } + + return result; + }, + storeValue: function(value_el, new_value) { + var json = JSON.stringify(new_value); + $(value_el).val(json); + } + } + + module.init(); +}).call(this); diff --git a/common/static/js/capa/chemical_equation_preview.js b/common/static/js/capa/chemical_equation_preview.js index 90ce27ad11..10a6b54655 100644 --- a/common/static/js/capa/chemical_equation_preview.js +++ b/common/static/js/capa/chemical_equation_preview.js @@ -11,9 +11,14 @@ } prev_id = "#" + this.id + "_preview"; - preview_div = $(prev_id) + preview_div = $(prev_id); - $.get("/preview/chemcalc/", {"formula" : this.value}, create_handler(preview_div)); + // find the closest parent problems-wrapper and use that url + url = $(this).closest('.problems-wrapper').data('url'); + // grab the input id from the input + input_id = $(this).data('input-id') + + Problem.inputAjax(url, input_id, 'preview_chemcalc', {"formula" : this.value}, create_handler(preview_div)); } inputs = $('.chemicalequationinput input'); diff --git a/common/static/js/capa/edit-a-gene.js b/common/static/js/capa/edit-a-gene.js index 48753e507d..bd6d10cc64 100644 --- a/common/static/js/capa/edit-a-gene.js +++ b/common/static/js/capa/edit-a-gene.js @@ -1,27 +1,44 @@ (function () { var timeout = 1000; - function initializeApplet(applet) { - console.log("Initializing " + applet); - waitForApplet(applet); - } + waitForGenex(); - function waitForApplet(applet) { - if (applet.isActive && applet.isActive()) { - console.log("Applet is ready."); - var answerStr = applet.checkAnswer(); - console.log(answerStr); - var input = $('.editageneinput input'); - console.log(input); - input.val(answerStr); - } else if (timeout > 30 * 1000) { - console.error("Applet did not load on time."); - } else { - console.log("Waiting for applet..."); - setTimeout(function() { waitForApplet(applet); }, timeout); + function waitForGenex() { + if (typeof(genex) !== "undefined" && genex) { + genex.onInjectionDone("genex"); + } + else { + setTimeout(function() { waitForGenex(); }, timeout); } } - var applets = $('.editageneinput object'); - applets.each(function(i, el) { initializeApplet(el); }); + //NOTE: + // Genex uses six global functions: + // genexSetDNASequence (exported from GWT) + // genexSetClickEvent (exported from GWT) + // genexSetKeyEvent (exported from GWT) + // genexSetProblemNumber (exported from GWT) + // + // It calls genexIsReady with a deferred command when it has finished + // initialization and has drawn itself + // genexStoreAnswer(answer) is called when the GWT [Store Answer] button + // is clicked + + genexIsReady = function() { + //Load DNA sequence + var dna_sequence = $('#dna_sequence').val(); + genexSetDNASequence(dna_sequence); + //Now load mouse and keyboard handlers + genexSetClickEvent(); + genexSetKeyEvent(); + //Now load problem + var genex_problem_number = $('#genex_problem_number').val(); + genexSetProblemNumber(genex_problem_number); + }; + genexStoreAnswer = function(ans) { + var problem = $('#genex_container').parents('.problem'); + var input_field = problem.find('input[type="hidden"][name!="dna_sequence"][name!="genex_problem_number"]'); + input_field.val(ans); + }; }).call(this); + diff --git a/common/static/js/capa/genex/026A6180B5959B8660E084245FEE5E9E.cache.html b/common/static/js/capa/genex/026A6180B5959B8660E084245FEE5E9E.cache.html new file mode 100644 index 0000000000..13f25ec581 --- /dev/null +++ b/common/static/js/capa/genex/026A6180B5959B8660E084245FEE5E9E.cache.html @@ -0,0 +1,649 @@ + + + + \ No newline at end of file diff --git a/common/static/js/capa/genex/1F433010E1134C95BF6CB43F552F3019.cache.html b/common/static/js/capa/genex/1F433010E1134C95BF6CB43F552F3019.cache.html new file mode 100644 index 0000000000..1e99fe0f19 --- /dev/null +++ b/common/static/js/capa/genex/1F433010E1134C95BF6CB43F552F3019.cache.html @@ -0,0 +1,649 @@ + + + + \ No newline at end of file diff --git a/common/static/js/capa/genex/2DDA730EDABB80B88A6B0DFA3AFEACA2.cache.html b/common/static/js/capa/genex/2DDA730EDABB80B88A6B0DFA3AFEACA2.cache.html new file mode 100644 index 0000000000..743492768b --- /dev/null +++ b/common/static/js/capa/genex/2DDA730EDABB80B88A6B0DFA3AFEACA2.cache.html @@ -0,0 +1,639 @@ + + + + \ No newline at end of file diff --git a/common/static/js/capa/genex/4EEB1DCF4B30D366C27968D1B5C0BD04.cache.html b/common/static/js/capa/genex/4EEB1DCF4B30D366C27968D1B5C0BD04.cache.html new file mode 100644 index 0000000000..4aa12e55d4 --- /dev/null +++ b/common/static/js/capa/genex/4EEB1DCF4B30D366C27968D1B5C0BD04.cache.html @@ -0,0 +1,651 @@ + + + + \ No newline at end of file diff --git a/common/static/js/capa/genex/5033ABB047340FB9346B622E2CC7107D.cache.html b/common/static/js/capa/genex/5033ABB047340FB9346B622E2CC7107D.cache.html new file mode 100644 index 0000000000..167a193adb --- /dev/null +++ b/common/static/js/capa/genex/5033ABB047340FB9346B622E2CC7107D.cache.html @@ -0,0 +1,625 @@ + + + \ No newline at end of file diff --git a/common/static/js/capa/genex/DF3D3A7FAEE63D711CF2D95BDB3F538C.cache.html b/common/static/js/capa/genex/DF3D3A7FAEE63D711CF2D95BDB3F538C.cache.html new file mode 100644 index 0000000000..913b90be20 --- /dev/null +++ b/common/static/js/capa/genex/DF3D3A7FAEE63D711CF2D95BDB3F538C.cache.html @@ -0,0 +1,639 @@ + + + + \ No newline at end of file diff --git a/common/static/js/capa/genex/clear.cache.gif b/common/static/js/capa/genex/clear.cache.gif new file mode 100644 index 0000000000..e565824aaf Binary files /dev/null and b/common/static/js/capa/genex/clear.cache.gif differ diff --git a/common/static/js/capa/genex/genex.css b/common/static/js/capa/genex/genex.css new file mode 100644 index 0000000000..a05f31110b --- /dev/null +++ b/common/static/js/capa/genex/genex.css @@ -0,0 +1,109 @@ +.genex-button { + margin-right: -8px; + height: 40px !important; +} + +.genex-label { + /*font: normal normal normal 10pt/normal 'Open Sans', Verdana, Geneva, sans-serif !important;*/ + /*padding: 4px 0px 0px 10px !important;*/ + font-family: sans-serif !important; + font-size: 13px !important; + font-style: normal !important; + font-variant: normal !important; + font-weight: bold !important; + padding-top: 6px !important; + margin-left: 18px; +} + +.gwt-HTML { + cursor: default; + overflow-x: auto !important; + overflow-y: auto !important; + background-color: rgb(248, 248, 248) !important; +} + +.genex-scrollpanel { + word-wrap: normal !important; + white-space: pre !important; +} + +pre, #dna-strand { + font-family: 'courier new', courier !important; + font-size: 13px !important; + font-style: normal !important; + font-variant: normal !important; + font-weight: normal !important; + border-style: none !important; + background-color: rgb(248, 248, 248) !important; + word-wrap: normal !important; + white-space: pre !important; + overflow-x: visible !important; + overflow-y: visible !important; +} + +.gwt-DialogBox .Caption { + background: #F1F1F1; + padding: 4px 8px 4px 4px; + cursor: default; + font-family: Arial Unicode MS, Arial, sans-serif; + font-weight: bold; + border-bottom: 1px solid #bbbbbb; + border-top: 1px solid #D2D2D2; +} +.gwt-DialogBox .dialogContent { +} +.gwt-DialogBox .dialogMiddleCenter { + padding: 3px; + background: white; +} +.gwt-DialogBox .dialogBottomCenter { +} +.gwt-DialogBox .dialogMiddleLeft { +} +.gwt-DialogBox .dialogMiddleRight { +} +.gwt-DialogBox .dialogTopLeftInner { + width: 10px; + height: 8px; + zoom: 1; +} +.gwt-DialogBox .dialogTopRightInner { + width: 12px; + zoom: 1; +} +.gwt-DialogBox .dialogBottomLeftInner { + width: 10px; + height: 12px; + zoom: 1; +} +.gwt-DialogBox .dialogBottomRightInner { + width: 12px; + height: 12px; + zoom: 1; +} +.gwt-DialogBox .dialogTopLeft { +} +.gwt-DialogBox .dialogTopRight { +} +.gwt-DialogBox .dialogBottomLeft { +} +.gwt-DialogBox .dialogBottomRight { +} +* html .gwt-DialogBox .dialogTopLeftInner { + width: 10px; + overflow: hidden; +} +* html .gwt-DialogBox .dialogTopRightInner { + width: 12px; + overflow: hidden; +} +* html .gwt-DialogBox .dialogBottomLeftInner { + width: 10px; + height: 12px; + overflow: hidden; +} +* html .gwt-DialogBox .dialogBottomRightInner { + width: 12px; + height: 12px; + overflow: hidden; +} \ No newline at end of file diff --git a/common/static/js/capa/genex/genex.nocache.js b/common/static/js/capa/genex/genex.nocache.js new file mode 100644 index 0000000000..07da038234 --- /dev/null +++ b/common/static/js/capa/genex/genex.nocache.js @@ -0,0 +1,18 @@ +function genex(){var P='',xb='" for "gwt:onLoadErrorFn"',vb='" for "gwt:onPropertyErrorFn"',ib='"><\/script>',Z='#',Xb='.cache.html',_='/',lb='//',Qb='026A6180B5959B8660E084245FEE5E9E',Rb='1F433010E1134C95BF6CB43F552F3019',Sb='2DDA730EDABB80B88A6B0DFA3AFEACA2',Tb='4EEB1DCF4B30D366C27968D1B5C0BD04',Ub='5033ABB047340FB9346B622E2CC7107D',Wb=':',pb='::',dc=' + +This html file is for Development Mode support. + diff --git a/doc/public/course_data_formats/course_xml.rst b/doc/public/course_data_formats/course_xml.rst index fe25aa92f2..56d831d972 100644 --- a/doc/public/course_data_formats/course_xml.rst +++ b/doc/public/course_data_formats/course_xml.rst @@ -550,15 +550,84 @@ If you want to customize the courseware tabs displayed for your course, specify ********* Textbooks ********* -Support is currently provided for image-based and PDF-based textbooks. +Support is currently provided for image-based and PDF-based textbooks. In addition to enabling the display of textbooks in tabs (see above), specific information about the location of textbook content must be configured. Image-based Textbooks -^^^^^^^^^^^^^^^^^^^^^ +===================== + +Configuration +------------- + +Image-based textbooks are configured at the course level in the XML markup. Here is an example: + +.. code-block:: xml + + + + + + + + + +Each `textbook` element is displayed on a different tab. The `title` attribute is used as the tab's name, and the `book_url` attribute points to the remote directory that contains the images of the text. Note the trailing slash on the end of the `book_url` attribute. + +The images must be stored in the same directory as the `book_url`, with filenames matching `pXXX.png`, where `XXX` is a three-digit number representing the page number (with leading zeroes as necessary). Pages start at `p001.png`. + +Each textbook must also have its own table of contents. This is read from the `book_url` location, by appending `toc.xml`. This file contains a `table_of_contents` parent element, with `entry` elements nested below it. Each `entry` has attributes for `name`, `page_label`, and `page`, as well as an optional `chapter` attribute. An arbitrary number of levels of nesting of `entry` elements within other `entry` elements is supported, but you're likely to only want two levels. The `page` represents the actual page to link to, while the `page_label` matches the displayed page number on that page. Here's an example: + +.. code-block:: xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Linking from Content +-------------------- + +It is possible to add links to specific pages in a textbook by using a URL that encodes the index of the textbook and the page number. The URL is of the form `/course/book/${bookindex}/$page}`. If the page is omitted from the URL, the first page is assumed. + +You can use a `customtag` to create a template for such links. For example, you can create a `book` template in the `customtag` directory, containing: + +.. code-block:: xml + + More information given in the text. + +The course content can then link to page 25 using the `customtag` element: + +.. code-block:: xml + + -TBD. PDF-based Textbooks -^^^^^^^^^^^^^^^^^^^ +=================== + +Configuration +------------- PDF-based textbooks are configured at the course level in the policy file. The JSON markup consists of an array of maps, with each map corresponding to a separate textbook. There are two styles to presenting PDF-based material. The first way is as a single PDF on a tab, which requires only a tab title and a URL for configuration. A second way permits the display of multiple PDFs that should be displayed together on a single view. For this view, a side panel of links is available on the left, allowing selection of a particular PDF to view. @@ -566,20 +635,51 @@ PDF-based textbooks are configured at the course level in the policy file. The "pdf_textbooks": [ {"tab_title": "Textbook 1", - "url": "https://www.example.com/book1.pdf" }, + "url": "https://www.example.com/thiscourse/book1/book1.pdf" }, {"tab_title": "Textbook 2", "chapters": [ - { "title": "Chapter 1", "url": "https://www.example.com/Chapter1.pdf" }, - { "title": "Chapter 2", "url": "https://www.example.com/Chapter2.pdf" }, - { "title": "Chapter 3", "url": "https://www.example.com/Chapter3.pdf" }, - { "title": "Chapter 4", "url": "https://www.example.com/Chapter4.pdf" }, - { "title": "Chapter 5", "url": "https://www.example.com/Chapter5.pdf" }, - { "title": "Chapter 6", "url": "https://www.example.com/Chapter6.pdf" }, - { "title": "Chapter 7", "url": "https://www.example.com/Chapter7.pdf" } + { "title": "Chapter 1", "url": "https://www.example.com/thiscourse/book2/Chapter1.pdf" }, + { "title": "Chapter 2", "url": "https://www.example.com/thiscourse/book2/Chapter2.pdf" }, + { "title": "Chapter 3", "url": "https://www.example.com/thiscourse/book2/Chapter3.pdf" }, + { "title": "Chapter 4", "url": "https://www.example.com/thiscourse/book2/Chapter4.pdf" }, + { "title": "Chapter 5", "url": "https://www.example.com/thiscourse/book2/Chapter5.pdf" }, + { "title": "Chapter 6", "url": "https://www.example.com/thiscourse/book2/Chapter6.pdf" }, + { "title": "Chapter 7", "url": "https://www.example.com/thiscourse/book2/Chapter7.pdf" } ] } ] +Some notes: + +* It is not a good idea to include a top-level URL and chapter-level URLs in the same textbook configuration. + +Linking from Content +-------------------- + +It is possible to add links to specific pages in a textbook by using a URL that encodes the index of the textbook, the chapter (if chapters are used), and the page number. For a book with no chapters, the URL is of the form `/course/pdfbook/${bookindex}/$page}`. For a book with chapters, use `/course/pdfbook/${bookindex}/chapter/${chapter}/${page}`. If the page is omitted from the URL, the first page is assumed. + +For example, for the book with no chapters configured above, page 25 can be reached using the URL `/course/pdfbook/0/25`. Reaching page 19 in the third chapter of the second book is accomplished with `/course/pdfbook/1/chapter/3/19`. + +You can use a `customtag` to create a template for such links. For example, you can create a `pdfbook` template in the `customtag` directory, containing: + +.. code-block:: xml + + More information given in the text. + +And a `pdfchapter` template containing: + +.. code-block:: xml + + More information given in the text. + +The example pages can then be linked using the `customtag` element: + +.. code-block:: xml + + + + + ************************************* Other file locations (info and about) ************************************* diff --git a/doc/public/course_data_formats/drag_and_drop/drag-n-drop-demo3.xml b/doc/public/course_data_formats/drag_and_drop/drag-n-drop-demo3.xml new file mode 100644 index 0000000000..860f488089 --- /dev/null +++ b/doc/public/course_data_formats/drag_and_drop/drag-n-drop-demo3.xml @@ -0,0 +1,262 @@ + + + + +

[Simple grading example: draggables on draggables]


+

Describe carbon molecule in LCAO-MO.


+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +

[Complex grading example: draggables on draggables]


+

Describe carbon molecule in LCAO-MO.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +

[Complex grading example: no draggables on draggables]


+

Describe carbon molecule in LCAO-MO.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
diff --git a/doc/public/course_data_formats/drag_and_drop/drag_and_drop_input.rst b/doc/public/course_data_formats/drag_and_drop/drag_and_drop_input.rst index 4d61038054..4927deeec6 100644 --- a/doc/public/course_data_formats/drag_and_drop/drag_and_drop_input.rst +++ b/doc/public/course_data_formats/drag_and_drop/drag_and_drop_input.rst @@ -83,9 +83,58 @@ the slider. If no targets are provided, then a draggable can be dragged and placed anywhere on the base image. -correct answer format +Targets on draggables --------------------- +Sometimes it is not enough to have targets only on the base image, and all of the +draggables on these targets. If a complex problem exists where a draggable must +become itself a target (or many targets), then the following extended syntax +can be used: :: + + + + + + ... + + +The attribute list in the tags above ('draggable' and 'target') is the same as for +normal 'draggable' and 'target' tags. The only difference is when you will be +specifying inner target position coordinates. Using the 'x' and 'y' attributes you +are setting the offset of the inner target from the upper-left corner of the +parent draggable (that contains the inner target). + +Limitations of targets on draggables +------------------------------------ + +1.) Currently there is a limitation to the level of nesting of targets. + +Even though you can pile up a large number of draggables on targets that themselves +are on draggables, the Drag and Drop instance will be graded only in the case if +there is a maximum of two levels of targets. The first level are the "base" targets. +They are attached to the base image. The second level are the targets defined on +draggables. + +2.) Another limitation is that the target bounds are not checked against +other targets. + +For now, it is the responsibility of the person who is constructing the course +material to make sure that there is no overlapping of targets. It is also preferable +that targets on draggables are smaller than the actual parent draggable. Technically +this is not necessary, but from the usability perspective it is desirable. + +3.) You can have targets on draggables only in the case when there are base targets +defined (base targets are attached to the base image). + +If you do not have base targets, then you can only have a single level of nesting +(draggables on the base image). In this case the client side will be reporting (x,y) +positions of each draggables on the base image. + +Correct answer format +--------------------- + +(NOTE: For specifying answers for targets on draggables please see next section.) + There are two correct answer formats: short and long If short from correct answer is mapping of 'draggable_id' to 'target_id':: @@ -180,7 +229,7 @@ Rules are: exact, anyof, unordered_equal, anyof+number, unordered_equal+number 'rule': 'unordered_equal' }] -- And sometimes you want to allow drag only two 'b' draggables, in these case you sould use 'anyof+number' of 'unordered_equal+number' rule:: +- And sometimes you want to allow drag only two 'b' draggables, in these case you should use 'anyof+number' of 'unordered_equal+number' rule:: correct_answer = [ { @@ -204,6 +253,54 @@ for same number of draggables, anyof is equal to unordered_equal If we have can_reuse=true, than one must use only long form of correct answer. +Answer format for targets on draggables +--------------------------------------- + +As with the cases described above, an answer must provide precise positioning for +each draggable (on which targets it must reside). In the case when a draggable must +be placed on a target that itself is on a draggable, then the answer must contain +the chain of target-draggable-target. It is best to understand this on an example. + +Suppose we have three draggables - 'up', 's', and 'p'. Draggables 's', and 'p' have targets +on themselves. More specifically, 'p' has three targets - '1', '2', and '3'. The first +requirement is that 's', and 'p' are positioned on specific targets on the base image. +The second requirement is that draggable 'up' is positioned on specific targets of +draggable 'p'. Below is an excerpt from a problem.:: + + + + + + + + + + + + + + ... + + correct_answer = [ + { + 'draggables': ['p'], + 'targets': ['p-left-target', 'p-right-target'], + 'rule': 'unordered_equal' + }, + { + 'draggables': ['s'], + 'targets': ['s-left-target', 's-right-target'], + 'rule': 'unordered_equal' + }, + { + 'draggables': ['up'], + 'targets': ['p-left-target[p][1]', 'p-left-target[p][2]', 'p-right-target[p][2]', 'p-right-target[p][3]',], + 'rule': 'unordered_equal' + } + ] + +Note that it is a requirement to specify rules for all draggables, even if some draggable gets included +in more than one chain. Grading logic ------------- @@ -321,3 +418,8 @@ Draggables can be reused ------------------------ .. literalinclude:: drag-n-drop-demo2.xml + +Examples of targets on draggables +------------------------ + +.. literalinclude:: drag-n-drop-demo3.xml diff --git a/lms/djangoapps/courseware/module_render.py b/lms/djangoapps/courseware/module_render.py index 7877c83bdc..23d27c72ac 100644 --- a/lms/djangoapps/courseware/module_render.py +++ b/lms/djangoapps/courseware/module_render.py @@ -16,7 +16,6 @@ from django.views.decorators.csrf import csrf_exempt from requests.auth import HTTPBasicAuth from capa.xqueue_interface import XQueueInterface -from capa.chem import chemcalc from courseware.access import has_access from mitxmako.shortcuts import render_to_string from models import StudentModule, StudentModuleCache @@ -559,42 +558,6 @@ def modx_dispatch(request, dispatch, location, course_id): return HttpResponse(ajax_return) -def preview_chemcalc(request): - """ - Render an html preview of a chemical formula or equation. The fact that - this is here is a bit of hack. See the note in lms/urls.py about why it's - here. (Victor is to blame.) - - request should be a GET, with a key 'formula' and value 'some formula string'. - - Returns a json dictionary: - { - 'preview' : 'the-preview-html' or '' - 'error' : 'the-error' or '' - } - """ - if request.method != "GET": - raise Http404 - - result = {'preview': '', - 'error': ''} - formula = request.GET.get('formula') - if formula is None: - result['error'] = "No formula specified." - - return HttpResponse(json.dumps(result)) - - try: - result['preview'] = chemcalc.render_to_html(formula) - except pyparsing.ParseException as p: - result['error'] = "Couldn't parse formula: {0}".format(p) - except Exception: - # this is unexpected, so log - log.warning("Error while previewing chemical formula", exc_info=True) - result['error'] = "Error while rendering preview" - - return HttpResponse(json.dumps(result)) - def get_score_bucket(grade, max_grade): """ diff --git a/lms/djangoapps/courseware/tests/test_tabs.py b/lms/djangoapps/courseware/tests/test_tabs.py new file mode 100644 index 0000000000..928b9ae0df --- /dev/null +++ b/lms/djangoapps/courseware/tests/test_tabs.py @@ -0,0 +1,259 @@ +from django.test import TestCase +from mock import MagicMock + +import courseware.tabs as tabs + +from django.test.utils import override_settings +from django.core.urlresolvers import reverse + + +class ProgressTestCase(TestCase): + + def setUp(self): + + self.mockuser1 = MagicMock() + self.mockuser0 = MagicMock() + self.course = MagicMock() + self.mockuser1.is_authenticated.return_value = True + self.mockuser0.is_authenticated.return_value = False + self.course.id = 'edX/full/6.002_Spring_2012' + self.tab = {'name': 'same'} + self.active_page1 = 'progress' + self.active_page0 = 'stagnation' + + def test_progress(self): + + self.assertEqual(tabs._progress(self.tab, self.mockuser0, self.course, + self.active_page0), []) + + self.assertEqual(tabs._progress(self.tab, self.mockuser1, self.course, + self.active_page1)[0].name, 'same') + + self.assertEqual(tabs._progress(self.tab, self.mockuser1, self.course, + self.active_page1)[0].link, + reverse('progress', args = [self.course.id])) + + self.assertEqual(tabs._progress(self.tab, self.mockuser1, self.course, + self.active_page0)[0].is_active, False) + + self.assertEqual(tabs._progress(self.tab, self.mockuser1, self.course, + self.active_page1)[0].is_active, True) + + +class WikiTestCase(TestCase): + + def setUp(self): + + self.user = MagicMock() + self.course = MagicMock() + self.course.id = 'edX/full/6.002_Spring_2012' + self.tab = {'name': 'same'} + self.active_page1 = 'wiki' + self.active_page0 = 'miki' + + @override_settings(WIKI_ENABLED=True) + def test_wiki_enabled(self): + + self.assertEqual(tabs._wiki(self.tab, self.user, + self.course, self.active_page1)[0].name, + 'same') + + self.assertEqual(tabs._wiki(self.tab, self.user, + self.course, self.active_page1)[0].link, + reverse('course_wiki', args=[self.course.id])) + + self.assertEqual(tabs._wiki(self.tab, self.user, + self.course, self.active_page1)[0].is_active, + True) + + self.assertEqual(tabs._wiki(self.tab, self.user, + self.course, self.active_page0)[0].is_active, + False) + + @override_settings(WIKI_ENABLED=False) + def test_wiki_enabled_false(self): + + self.assertEqual(tabs._wiki(self.tab, self.user, + self.course, self.active_page1), []) + + +class ExternalLinkTestCase(TestCase): + + def setUp(self): + + self.user = MagicMock() + self.course = MagicMock() + self.tabby = {'name': 'same', 'link': 'blink'} + self.active_page0 = None + self.active_page00 = True + + def test_external_link(self): + + self.assertEqual(tabs._external_link(self.tabby, self.user, + self.course, self.active_page0)[0].name, + 'same') + + self.assertEqual(tabs._external_link(self.tabby, self.user, + self.course, self.active_page0)[0].link, + 'blink') + + self.assertEqual(tabs._external_link(self.tabby, self.user, + self.course, self.active_page0)[0].is_active, + False) + + self.assertEqual(tabs._external_link(self.tabby, self.user, + self.course, self.active_page00)[0].is_active, + False) + + +class StaticTabTestCase(TestCase): + + def setUp(self): + + self.user = MagicMock() + self.course = MagicMock() + self.tabby = {'name': 'same', 'url_slug': 'schmug'} + self.course.id = 'edX/full/6.002_Spring_2012' + self.active_page1 = 'static_tab_schmug' + self.active_page0 = 'static_tab_schlug' + + def test_static_tab(self): + + self.assertEqual(tabs._static_tab(self.tabby, self.user, + self.course, self.active_page1)[0].name, + 'same') + + self.assertEqual(tabs._static_tab(self.tabby, self.user, + self.course, self.active_page1)[0].link, + reverse('static_tab', args = [self.course.id, + self.tabby['url_slug']])) + + self.assertEqual(tabs._static_tab(self.tabby, self.user, + self.course, self.active_page1)[0].is_active, + True) + + + self.assertEqual(tabs._static_tab(self.tabby, self.user, + self.course, self.active_page0)[0].is_active, + False) + + +class TextbooksTestCase(TestCase): + + def setUp(self): + + self.mockuser1 = MagicMock() + self.mockuser0 = MagicMock() + self.course = MagicMock() + self.tab = MagicMock() + A = MagicMock() + T = MagicMock() + self.mockuser1.is_authenticated.return_value = True + self.mockuser0.is_authenticated.return_value = False + self.course.id = 'edX/full/6.002_Spring_2012' + self.active_page0 = 'textbook/0' + self.active_page1 = 'textbook/1' + self.active_pageX = 'you_shouldnt_be_seein_this' + A.title = 'Algebra' + T.title = 'Topology' + self.course.textbooks = [A, T] + + @override_settings(MITX_FEATURES={'ENABLE_TEXTBOOK': True}) + def test_textbooks1(self): + + self.assertEqual(tabs._textbooks(self.tab, self.mockuser1, + self.course, self.active_page0)[0].name, + 'Algebra') + + self.assertEqual(tabs._textbooks(self.tab, self.mockuser1, + self.course, self.active_page0)[0].link, + reverse('book', args=[self.course.id, 0])) + + self.assertEqual(tabs._textbooks(self.tab, self.mockuser1, + self.course, self.active_page0)[0].is_active, + True) + + self.assertEqual(tabs._textbooks(self.tab, self.mockuser1, + self.course, self.active_pageX)[0].is_active, + False) + + self.assertEqual(tabs._textbooks(self.tab, self.mockuser1, + self.course, self.active_page1)[1].name, + 'Topology') + + self.assertEqual(tabs._textbooks(self.tab, self.mockuser1, + self.course, self.active_page1)[1].link, + reverse('book', args=[self.course.id, 1])) + + self.assertEqual(tabs._textbooks(self.tab, self.mockuser1, + self.course, self.active_page1)[1].is_active, + True) + + self.assertEqual(tabs._textbooks(self.tab, self.mockuser1, + self.course, self.active_pageX)[1].is_active, + False) + + @override_settings(MITX_FEATURES={'ENABLE_TEXTBOOK': False}) + def test_textbooks0(self): + + self.assertEqual(tabs._textbooks(self.tab, self.mockuser1, + self.course, self.active_pageX), []) + + self.assertEqual(tabs._textbooks(self.tab, self.mockuser0, + self.course, self.active_pageX), []) + +class KeyCheckerTestCase(TestCase): + + def setUp(self): + + self.expected_keys1 = ['a', 'b'] + self.expected_keys0 = ['a', 'v', 'g'] + self.dictio = {'a': 1, 'b': 2, 'c': 3} + + def test_key_checker(self): + + self.assertIsNone(tabs.key_checker(self.expected_keys1)(self.dictio)) + self.assertRaises(tabs.InvalidTabsException, + tabs.key_checker(self.expected_keys0), self.dictio) + + +class NullValidatorTestCase(TestCase): + + def setUp(self): + + self.d = {} + + def test_null_validator(self): + + self.assertIsNone(tabs.null_validator(self.d)) + + +class ValidateTabsTestCase(TestCase): + + def setUp(self): + + self.courses = [MagicMock() for i in range(0,5)] + + self.courses[0].tabs = None + + self.courses[1].tabs = [{'type':'courseware'}, {'type': 'fax'}] + + self.courses[2].tabs = [{'type':'shadow'}, {'type': 'course_info'}] + + self.courses[3].tabs = [{'type':'courseware'},{'type':'course_info', 'name': 'alice'}, + {'type': 'wiki', 'name':'alice'}, {'type':'discussion', 'name': 'alice'}, + {'type':'external_link', 'name': 'alice', 'link':'blink'}, + {'type':'textbooks'}, {'type':'progress', 'name': 'alice'}, + {'type':'static_tab', 'name':'alice', 'url_slug':'schlug'}, + {'type': 'staff_grading'}] + + self.courses[4].tabs = [{'type':'courseware'},{'type': 'course_info'}, {'type': 'flying'}] + + + def test_validate_tabs(self): + + self.assertIsNone(tabs.validate_tabs(self.courses[0])) + self.assertRaises(tabs.InvalidTabsException, tabs.validate_tabs, self.courses[1]) + self.assertRaises(tabs.InvalidTabsException, tabs.validate_tabs, self.courses[2]) + self.assertIsNone(tabs.validate_tabs(self.courses[3])) + self.assertRaises(tabs.InvalidTabsException, tabs.validate_tabs, self.courses[4]) diff --git a/lms/djangoapps/django_comment_client/tests/test_models.py b/lms/djangoapps/django_comment_client/tests/test_models.py new file mode 100644 index 0000000000..6f90b3c4b8 --- /dev/null +++ b/lms/djangoapps/django_comment_client/tests/test_models.py @@ -0,0 +1,55 @@ +import django_comment_client.models as models +import django_comment_client.permissions as permissions +from django.test import TestCase + + +class RoleClassTestCase(TestCase): + def setUp(self): + # For course ID, syntax edx/classname/classdate is important + # because xmodel.course_module.id_to_location looks for a string to split + + self.course_id = "edX/toy/2012_Fall" + self.student_role = models.Role.objects.get_or_create(name="Student", \ + course_id=self.course_id)[0] + self.student_role.add_permission("delete_thread") + self.student_2_role = models.Role.objects.get_or_create(name="Student", \ + course_id=self.course_id)[0] + self.TA_role = models.Role.objects.get_or_create(name="Community TA",\ + course_id=self.course_id)[0] + self.course_id_2 = "edx/6.002x/2012_Fall" + self.TA_role_2 = models.Role.objects.get_or_create(name="Community TA",\ + course_id=self.course_id_2)[0] + class Dummy(): + def render_template(): + pass + d = {"data": { + "textbooks": [], + 'wiki_slug': True, + } + } + + def testHasPermission(self): + # Whenever you add a permission to student_role, + # Roles with the same FORUM_ROLE in same class also receives the same + # permission. + # Is this desirable behavior? + self.assertTrue(self.student_role.has_permission("delete_thread")) + self.assertTrue(self.student_2_role.has_permission("delete_thread")) + self.assertFalse(self.TA_role.has_permission("delete_thread")) + + def testInheritPermissions(self): + + self.TA_role.inherit_permissions(self.student_role) + self.assertTrue(self.TA_role.has_permission("delete_thread")) + # Despite being from 2 different courses, TA_role_2 can still inherit + # permissions from TA_role without error + self.TA_role_2.inherit_permissions(self.TA_role) + + +class PermissionClassTestCase(TestCase): + + def setUp(self): + self.permission = permissions.Permission.objects.get_or_create(name="test")[0] + + def testUnicode(self): + self.assertEqual(str(self.permission), "test") diff --git a/lms/djangoapps/django_comment_client/tests/test_mustache_helpers.py b/lms/djangoapps/django_comment_client/tests/test_mustache_helpers.py index 5b788b3cc4..7db3ba6e86 100644 --- a/lms/djangoapps/django_comment_client/tests/test_mustache_helpers.py +++ b/lms/djangoapps/django_comment_client/tests/test_mustache_helpers.py @@ -3,26 +3,40 @@ import random import collections from django.test import TestCase +from mock import MagicMock +from django.test.utils import override_settings +import django.core.urlresolvers as urlresolvers import django_comment_client.mustache_helpers as mustache_helpers - -class PluralizeTestCase(TestCase): - - def test_pluralize(self): - self.text1 = '0 goat' - self.text2 = '1 goat' - self.text3 = '7 goat' - self.content = 'unused argument' - self.assertEqual(mustache_helpers.pluralize(self.content, self.text1), 'goats') - self.assertEqual(mustache_helpers.pluralize(self.content, self.text2), 'goat') - self.assertEqual(mustache_helpers.pluralize(self.content, self.text3), 'goats') +######################################################################################### -class CloseThreadTextTestCase(TestCase): +class PluralizeTest(TestCase): + + def setUp(self): + self.text1 = '0 goat' + self.text2 = '1 goat' + self.text3 = '7 goat' + self.content = 'unused argument' + + def test_pluralize(self): + self.assertEqual(mustache_helpers.pluralize(self.content, self.text1), 'goats') + self.assertEqual(mustache_helpers.pluralize(self.content, self.text2), 'goat') + self.assertEqual(mustache_helpers.pluralize(self.content, self.text3), 'goats') + +######################################################################################### + + +class CloseThreadTextTest(TestCase): + + def setUp(self): + self.contentClosed = {'closed': True} + self.contentOpen = {'closed': False} + + def test_close_thread_text(self): + self.assertEqual(mustache_helpers.close_thread_text(self.contentClosed), 'Re-open thread') + self.assertEqual(mustache_helpers.close_thread_text(self.contentOpen), 'Close thread') + +######################################################################################### - def test_close_thread_text(self): - self.contentClosed = {'closed': True} - self.contentOpen = {'closed': False} - self.assertEqual(mustache_helpers.close_thread_text(self.contentClosed), 'Re-open thread') - self.assertEqual(mustache_helpers.close_thread_text(self.contentOpen), 'Close thread') diff --git a/lms/djangoapps/foldit/views.py b/lms/djangoapps/foldit/views.py index 988c113d23..da361a2a82 100644 --- a/lms/djangoapps/foldit/views.py +++ b/lms/djangoapps/foldit/views.py @@ -130,7 +130,7 @@ def save_scores(user, puzzle_scores): current_score=current_score, best_score=best_score, score_version=score_version) - obj.save() + obj.save() score_responses.append({'PuzzleID': puzzle_id, 'Status': 'Success'}) diff --git a/lms/templates/annotatable.html b/lms/templates/annotatable.html new file mode 100644 index 0000000000..f010305744 --- /dev/null +++ b/lms/templates/annotatable.html @@ -0,0 +1,29 @@ +
+
+ % if display_name is not UNDEFINED and display_name is not None: +
${display_name}
+ % endif +
+ + % if instructions_html is not UNDEFINED and instructions_html is not None: +
+
+ Instructions + Collapse Instructions +
+
+ ${instructions_html} +
+
+ % endif + +
+
+ Guided Discussion + Hide Annotations +
+
+ ${content_html} +
+
+
diff --git a/lms/urls.py b/lms/urls.py index 5e5ac9a7f2..5972b49266 100644 --- a/lms/urls.py +++ b/lms/urls.py @@ -224,14 +224,6 @@ if settings.COURSEWARE_ENABLED: 'courseware.module_render.modx_dispatch', name='modx_dispatch'), - # TODO (vshnayder): This is a hack. It creates a direct connection from - # the LMS to capa functionality, and really wants to go through the - # input types system so that previews can be context-specific. - # Unfortunately, we don't have time to think through the right way to do - # that (and implement it), and it's not a terrible thing to provide a - # generic chemical-equation rendering service. - url(r'^preview/chemcalc', 'courseware.module_render.preview_chemcalc', - name='preview_chemcalc'), # Software Licenses