[prompt]')
+
+ describe 'insertRubric', ->
+ it 'inserts the template if selection is empty', ->
+ revisedSelection = OpenEndedMarkdownEditingDescriptor.insertRubric('')
+ expect(revisedSelection).toEqual(OpenEndedMarkdownEditingDescriptor.rubricTemplate)
+ it 'recognizes a proper rubric', ->
+ revisedSelection = OpenEndedMarkdownEditingDescriptor.insertRubric('[rubric]\n+1\n-1\n-2\n[rubric]')
+ expect(revisedSelection).toEqual('[rubric]\n+1\n-1\n-2\n[rubric]')
+
+ describe 'insertTasks', ->
+ it 'inserts the template if selection is empty', ->
+ revisedSelection = OpenEndedMarkdownEditingDescriptor.insertTasks('')
+ expect(revisedSelection).toEqual(OpenEndedMarkdownEditingDescriptor.tasksTemplate)
+ it 'recognizes a proper task string', ->
+ revisedSelection = OpenEndedMarkdownEditingDescriptor.insertTasks('[tasks](Self)[tasks]')
+ expect(revisedSelection).toEqual('[tasks](Self)[tasks]')
+
+ describe 'markdownToXml', ->
+ # test default templates
+ it 'converts prompt to xml', ->
+ data = OpenEndedMarkdownEditingDescriptor.markdownToXml("""[prompt]
+
Prompt!
+ This is my super awesome prompt.
+ [prompt]
+ """)
+ data = data.replace(/[\t\n\s]/gmi,'')
+ expect(data).toEqual("""
+
+
+
Prompt!
+ This is my super awesome prompt.
+
+
+ """.replace(/[\t\n\s]/gmi,''))
+
+ it 'converts rubric to xml', ->
+ data = OpenEndedMarkdownEditingDescriptor.markdownToXml("""[rubric]
+ + 1
+ -1
+ -2
+ + 2
+ -1
+ -2
+ +3
+ -1
+ -2
+ -3
+ [rubric]
+ """)
+ data = data.replace(/[\t\n\s]/gmi,'')
+ expect(data).toEqual("""
+
+
+
+
+ 1
+
+
+
+
+ 2
+
+
+
+
+ 3
+
+
+
+
+
+
+
+ """.replace(/[\t\n\s]/gmi,''))
+
+ it 'converts tasks to xml', ->
+ data = OpenEndedMarkdownEditingDescriptor.markdownToXml("""[tasks]
+ (Self), ({1-2}AI), ({1-4}AI), ({1-2}Peer
+ [tasks]
+ """)
+ data = data.replace(/[\t\n\s]/gmi,'')
+ equality_list = """
+
+
+
+
+
+ ml_grading.conf
+
+
+ ml_grading.conf
+
+
+ peer_grading.conf
+
+
+ """
+ expect(data).toEqual(equality_list.replace(/[\t\n\s]/gmi,''))
diff --git a/common/lib/xmodule/xmodule/js/src/combinedopenended/edit.coffee b/common/lib/xmodule/xmodule/js/src/combinedopenended/edit.coffee
new file mode 100644
index 0000000000..1b7f9bb4fb
--- /dev/null
+++ b/common/lib/xmodule/xmodule/js/src/combinedopenended/edit.coffee
@@ -0,0 +1,282 @@
+class @OpenEndedMarkdownEditingDescriptor extends XModule.Descriptor
+ # TODO really, these templates should come from or also feed the cheatsheet
+ @rubricTemplate : """
+ [rubric]
+ + Ideas
+ - Difficult for the reader to discern the main idea. Too brief or too repetitive to establish or maintain a focus.
+ - Attempts a main idea. Sometimes loses focus or ineffectively displays focus.
+ - Presents a unifying theme or main idea, but may include minor tangents. Stays somewhat focused on topic and task.
+ - Presents a unifying theme or main idea without going off on tangents. Stays completely focused on topic and task.
+ + Content
+ - Includes little information with few or no details or unrelated details. Unsuccessful in attempts to explore any facets of the topic.
+ - Includes little information and few or no details. Explores only one or two facets of the topic.
+ - Includes sufficient information and supporting details. (Details may not be fully developed; ideas may be listed.) Explores some facets of the topic.
+ - Includes in-depth information and exceptional supporting details that are fully developed. Explores all facets of the topic.
+ + Organization
+ - Ideas organized illogically, transitions weak, and response difficult to follow.
+ - Attempts to logically organize ideas. Attempts to progress in an order that enhances meaning, and demonstrates use of transitions.
+ - Ideas organized logically. Progresses in an order that enhances meaning. Includes smooth transitions.
+ + Style
+ - Contains limited vocabulary, with many words used incorrectly. Demonstrates problems with sentence patterns.
+ - Contains basic vocabulary, with words that are predictable and common. Contains mostly simple sentences (although there may be an attempt at more varied sentence patterns).
+ - Includes vocabulary to make explanations detailed and precise. Includes varied sentence patterns, including complex sentences.
+ + Voice
+ - Demonstrates language and tone that may be inappropriate to task and reader.
+ - Demonstrates an attempt to adjust language and tone to task and reader.
+ - Demonstrates effective adjustment of language and tone to task and reader.
+ [rubric]
+ """
+
+ @tasksTemplate: "[tasks]\n(Self), ({4-12}AI), ({9-12}Peer)\n[tasks]\n"
+ @promptTemplate: """
+ [prompt]\n
+
Censorship in the Libraries
+
+
'All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us.' --Katherine Paterson, Author
+
+
+
+Write a persuasive essay to a newspaper reflecting your vies on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.
+
+ [prompt]\n
+ """
+
+ constructor: (element) ->
+ @element = element
+
+ if $(".markdown-box", @element).length != 0
+ @markdown_editor = CodeMirror.fromTextArea($(".markdown-box", element)[0], {
+ lineWrapping: true
+ mode: null
+ })
+ @setCurrentEditor(@markdown_editor)
+ # Add listeners for toolbar buttons (only present for markdown editor)
+ @element.on('click', '.xml-tab', @onShowXMLButton)
+ @element.on('click', '.format-buttons a', @onToolbarButton)
+ @element.on('click', '.cheatsheet-toggle', @toggleCheatsheet)
+ # Hide the XML text area
+ $(@element.find('.xml-box')).hide()
+ else
+ @createXMLEditor()
+
+ ###
+ Creates the XML Editor and sets it as the current editor. If text is passed in,
+ it will replace the text present in the HTML template.
+
+ text: optional argument to override the text passed in via the HTML template
+ ###
+ createXMLEditor: (text) ->
+ @xml_editor = CodeMirror.fromTextArea($(".xml-box", @element)[0], {
+ mode: "xml"
+ lineNumbers: true
+ lineWrapping: true
+ })
+ if text
+ @xml_editor.setValue(text)
+ @setCurrentEditor(@xml_editor)
+
+ ###
+ User has clicked to show the XML editor. Before XML editor is swapped in,
+ the user will need to confirm the one-way conversion.
+ ###
+ onShowXMLButton: (e) =>
+ e.preventDefault();
+ if @confirmConversionToXml()
+ @createXMLEditor(OpenEndedMarkdownEditingDescriptor.markdownToXml(@markdown_editor.getValue()))
+ # Need to refresh to get line numbers to display properly (and put cursor position to 0)
+ @xml_editor.setCursor(0)
+ @xml_editor.refresh()
+ # Hide markdown-specific toolbar buttons
+ $(@element.find('.editor-bar')).hide()
+
+ ###
+ Have the user confirm the one-way conversion to XML.
+ Returns true if the user clicked OK, else false.
+ ###
+ confirmConversionToXml: ->
+ # TODO: use something besides a JavaScript confirm dialog?
+ return confirm("If you use the Advanced Editor, this problem will be converted to XML and you will not be able to return to the Simple Editor Interface.\n\nProceed to the Advanced Editor and convert this problem to XML?")
+
+ ###
+ Event listener for toolbar buttons (only possible when markdown editor is visible).
+ ###
+ onToolbarButton: (e) =>
+ e.preventDefault();
+ selection = @markdown_editor.getSelection()
+ revisedSelection = null
+ switch $(e.currentTarget).attr('class')
+ when "rubric-button" then revisedSelection = OpenEndedMarkdownEditingDescriptor.insertRubric(selection)
+ when "prompt-button" then revisedSelection = OpenEndedMarkdownEditingDescriptor.insertPrompt(selection)
+ when "tasks-button" then revisedSelection = OpenEndedMarkdownEditingDescriptor.insertTasks(selection)
+ else # ignore click
+
+ if revisedSelection != null
+ @markdown_editor.replaceSelection(revisedSelection)
+ @markdown_editor.focus()
+
+ ###
+ Event listener for toggling cheatsheet (only possible when markdown editor is visible).
+ ###
+ toggleCheatsheet: (e) =>
+ e.preventDefault();
+ if !$(@markdown_editor.getWrapperElement()).find('.simple-editor-open-ended-cheatsheet')[0]
+ @cheatsheet = $($('#simple-editor-open-ended-cheatsheet').html())
+ $(@markdown_editor.getWrapperElement()).append(@cheatsheet)
+
+ setTimeout (=> @cheatsheet.toggleClass('shown')), 10
+
+ ###
+ Stores the current editor and hides the one that is not displayed.
+ ###
+ setCurrentEditor: (editor) ->
+ if @current_editor
+ $(@current_editor.getWrapperElement()).hide()
+ @current_editor = editor
+ $(@current_editor.getWrapperElement()).show()
+ $(@current_editor).focus();
+
+ ###
+ Called when save is called. Listeners are unregistered because editing the block again will
+ result in a new instance of the descriptor. Note that this is NOT the case for cancel--
+ when cancel is called the instance of the descriptor is reused if edit is selected again.
+ ###
+ save: ->
+ @element.off('click', '.xml-tab', @changeEditor)
+ @element.off('click', '.format-buttons a', @onToolbarButton)
+ @element.off('click', '.cheatsheet-toggle', @toggleCheatsheet)
+ if @current_editor == @markdown_editor
+ {
+ data: OpenEndedMarkdownEditingDescriptor.markdownToXml(@markdown_editor.getValue())
+ metadata:
+ markdown: @markdown_editor.getValue()
+ }
+ else
+ {
+ data: @xml_editor.getValue()
+ metadata:
+ markdown: null
+ }
+
+ @insertRubric: (selectedText) ->
+ return OpenEndedMarkdownEditingDescriptor.insertGenericInput(selectedText, '[rubric]', '[rubric]', OpenEndedMarkdownEditingDescriptor.rubricTemplate)
+
+ @insertPrompt: (selectedText) ->
+ return OpenEndedMarkdownEditingDescriptor.insertGenericInput(selectedText, '[prompt]', '[prompt]', OpenEndedMarkdownEditingDescriptor.promptTemplate)
+
+ @insertTasks: (selectedText) ->
+ return OpenEndedMarkdownEditingDescriptor.insertGenericInput(selectedText, '[tasks]', '[tasks]', OpenEndedMarkdownEditingDescriptor.tasksTemplate)
+
+ @insertGenericInput: (selectedText, lineStart, lineEnd, template) ->
+ if selectedText.length > 0
+ new_string = selectedText.replace(/^\s+|\s+$/g,'')
+ if new_string.substring(0,lineStart.length) != lineStart
+ new_string = lineStart + new_string
+ if new_string.substring((new_string.length)-lineEnd.length,new_string.length) != lineEnd
+ new_string = new_string + lineEnd
+ return new_string
+ else
+ return template
+
+ @markdownToXml: (markdown)->
+ toXml = `function(markdown) {
+
+ function template(template_html,data){
+ return template_html.replace(/%(\w*)%/g,function(m,key){return data.hasOwnProperty(key)?data[key]:"";});
+ }
+
+ var xml = markdown;
+
+ // group rubrics
+ xml = xml.replace(/\[rubric\]\n?([^\]]*)\[\/?rubric\]/gmi, function(match, p) {
+ var groupString = '\n\n';
+ var options = p.split('\n');
+ var category_open = false;
+ for(var i = 0; i < options.length; i++) {
+ if(options[i].length > 0) {
+ var value = options[i].replace(/^\s+|\s+$/g,'');
+ if (value.charAt(0)=="+") {
+ if(i>0){
+ if(category_open==true){
+ groupString += "\n";
+ category_open = false;
+ }
+ }
+ groupString += "\n\n";
+ category_open = true;
+ text = value.substr(1);
+ text = text.replace(/^\s+|\s+$/g,'');
+ groupString += text;
+ groupString += "\n\n";
+ } else if (value.charAt(0) == "-") {
+ groupString += "\n";
+ }
+ }
+ if(i==options.length-1 && category_open == true){
+ groupString += "\n\n";
+ }
+ }
+ groupString += '\n\n';
+ return groupString;
+ });
+
+ // group tasks
+ xml = xml.replace(/\[tasks\]\n?([^\]]*)\[\/?tasks\]/gmi, function(match, p) {
+ var open_ended_template = $('#open-ended-template').html();
+ if(open_ended_template == null) {
+ open_ended_template = "%grading_config%";
+ }
+ var groupString = '';
+ var options = p.split(",");
+ for(var i = 0; i < options.length; i++) {
+ if(options[i].length > 0) {
+ var value = options[i].replace(/^\s+|\s+$/g,'');
+ var lower_option = value.toLowerCase();
+ type = lower_option.match(/(peer|self|ai)/gmi)
+ if(type != null) {
+ type = type[0]
+ var min_max = value.match(/\{\n?([^\]]*)\}/gmi);
+ var min_max_string = "";
+ if(min_max!=null) {
+ min_max = min_max[0].replace(/^{|}/gmi,'');
+ min_max = min_max.split("-");
+ min = min_max[0];
+ max = min_max[1];
+ min_max_string = 'min_score_to_attempt="' + min + '" max_score_to_attempt="' + max + '" ';
+ }
+ groupString += "\n"
+ if(type=="self") {
+ groupString +=""
+ } else if (type=="peer") {
+ config = "peer_grading.conf"
+ groupString += template(open_ended_template,{min_max_string: min_max_string, grading_config: config});
+ } else if (type=="ai") {
+ config = "ml_grading.conf"
+ groupString += template(open_ended_template,{min_max_string: min_max_string, grading_config: config});
+ }
+ groupString += "\n"
+ }
+ }
+ }
+ return groupString;
+ });
+
+ // replace prompts
+ xml = xml.replace(/\[prompt\]\n?([^\]]*)\[\/?prompt\]/gmi, function(match, p1) {
+ var selectString = '\n' + p1 + '\n';
+ return selectString;
+ });
+
+ // rid white space
+ xml = xml.replace(/\n\n\n/g, '\n');
+
+ // surround w/ combinedopenended tag
+ xml = '\n' + xml + '\n';
+
+ return xml;
+ }
+ `
+ return toXml markdown
diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py
index 1404f52300..e289ba72f1 100644
--- a/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py
+++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py
@@ -847,8 +847,8 @@ class CombinedOpenEndedV1Descriptor():
if len(xml_object.xpath(child)) == 0:
#This is a staff_facing_error
raise ValueError(
- "Combined Open Ended definition must include at least one '{0}' tag. Contact the learning sciences group for assistance.".format(
- child))
+ "Combined Open Ended definition must include at least one '{0}' tag. Contact the learning sciences group for assistance. {1}".format(
+ child, xml_object))
def parse_task(k):
"""Assumes that xml_object has child k"""
diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/grading_service_module.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/grading_service_module.py
index b16f0618bb..3e3f943cd7 100644
--- a/common/lib/xmodule/xmodule/open_ended_grading_classes/grading_service_module.py
+++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/grading_service_module.py
@@ -53,8 +53,9 @@ class GradingService(object):
except (RequestException, ConnectionError, HTTPError) as err:
# reraise as promised GradingServiceError, but preserve stacktrace.
#This is a dev_facing_error
- log.error("Problem posting data to the grading controller. URL: {0}, data: {1}".format(url, data))
- raise GradingServiceError, str(err), sys.exc_info()[2]
+ error_string = "Problem posting data to the grading controller. URL: {0}, data: {1}".format(url, data)
+ log.error(error_string)
+ raise GradingServiceError(error_string)
return r.text
@@ -71,8 +72,9 @@ class GradingService(object):
except (RequestException, ConnectionError, HTTPError) as err:
# reraise as promised GradingServiceError, but preserve stacktrace.
#This is a dev_facing_error
- log.error("Problem getting data from the grading controller. URL: {0}, params: {1}".format(url, params))
- raise GradingServiceError, str(err), sys.exc_info()[2]
+ error_string = "Problem getting data from the grading controller. URL: {0}, params: {1}".format(url, params)
+ log.error(error_string)
+ raise GradingServiceError(error_string)
return r.text
diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/open_ended_module.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/open_ended_module.py
index 7ba046b2ad..4f772fe0a1 100644
--- a/common/lib/xmodule/xmodule/open_ended_grading_classes/open_ended_module.py
+++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/open_ended_module.py
@@ -168,7 +168,10 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
#This is a student_facing_error
return {'success': False, 'msg': "There was an error saving your feedback. Please contact course staff."}
- qinterface = system.xqueue['interface']
+ xqueue = system.get('xqueue')
+ if xqueue is None:
+ return {'success': False, 'msg': "Couldn't submit feedback."}
+ qinterface = xqueue['interface']
qtime = datetime.strftime(datetime.now(), xqueue_interface.dateformat)
anonymous_student_id = system.anonymous_student_id
queuekey = xqueue_interface.make_hashkey(str(system.seed) + qtime +
@@ -176,7 +179,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
str(len(self.child_history)))
xheader = xqueue_interface.make_xheader(
- lms_callback_url=system.xqueue['construct_callback'](),
+ lms_callback_url=xqueue['construct_callback'](),
lms_key=queuekey,
queue_name=self.message_queue_name
)
@@ -219,7 +222,10 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
# Prepare xqueue request
#------------------------------------------------------------
- qinterface = system.xqueue['interface']
+ xqueue = system.get('xqueue')
+ if xqueue is None:
+ return False
+ qinterface = xqueue['interface']
qtime = datetime.strftime(datetime.now(), xqueue_interface.dateformat)
anonymous_student_id = system.anonymous_student_id
@@ -230,7 +236,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
str(len(self.child_history)))
xheader = xqueue_interface.make_xheader(
- lms_callback_url=system.xqueue['construct_callback'](),
+ lms_callback_url=xqueue['construct_callback'](),
lms_key=queuekey,
queue_name=self.queue_name
)
diff --git a/common/lib/xmodule/xmodule/peer_grading_module.py b/common/lib/xmodule/xmodule/peer_grading_module.py
index 33249c1fb9..531c4a167b 100644
--- a/common/lib/xmodule/xmodule/peer_grading_module.py
+++ b/common/lib/xmodule/xmodule/peer_grading_module.py
@@ -10,7 +10,7 @@ from .x_module import XModule
from xmodule.raw_module import RawDescriptor
from xmodule.modulestore.django import modulestore
from .timeinfo import TimeInfo
-from xblock.core import Object, String, Scope
+from xblock.core import Object, Integer, Boolean, String, Scope
from xmodule.fields import Date, StringyFloat, StringyInteger, StringyBoolean
from xmodule.open_ended_grading_classes.peer_grading_service import PeerGradingService, GradingServiceError, MockPeerGradingService
@@ -32,13 +32,20 @@ class PeerGradingFields(object):
help='When True, only the single problem specified by "Link to Problem Location" is shown. '
'When False, a panel is displayed with all problems available for peer grading.',
default=USE_FOR_SINGLE_LOCATION, scope=Scope.settings)
- link_to_location = String(display_name="Link to Problem Location",
- help='The location of the problem being graded. Only used when "Show Single Problem" is True.',
- default=LINK_TO_LOCATION, scope=Scope.settings)
- # TODO: move boolean default into xfields
- is_graded = StringyBoolean(display_name="Graded",
- help='Whether the student gets credit for grading this problem. Only used when "Show Single Problem" is True.',
- default=IS_GRADED, scope=Scope.settings)
+link_to_location = String(display_name="Link to Problem Location",
+ help='The location of the problem being graded. Only used when "Show Single Problem" is True.',
+ default=LINK_TO_LOCATION, scope=Scope.settings)
+# TODO: move boolean default into xfields
+is_graded = StringyBoolean(display_name="Graded",
+ help='Whether the student gets credit for grading this problem. Only used when "Show Single Problem" is True.',
+ default=IS_GRADED, scope=Scope.settings)
+
+
+ use_for_single_location = StringyBoolean(help="Whether to use this for a single location or as a panel.",
+ default=USE_FOR_SINGLE_LOCATION, scope=Scope.settings)
+ link_to_location = String(help="The location this problem is linked to.", default=LINK_TO_LOCATION,
+ scope=Scope.settings)
+ is_graded = StringyBoolean(help="Whether or not this module is scored.", default=IS_GRADED, scope=Scope.settings)
due_date = Date(help="Due date that should be displayed.", default=None, scope=Scope.settings)
grace_period_string = String(help="Amount of grace to give on the due date.", default=None, scope=Scope.settings)
max_grade = StringyInteger(help="The maximum grade that a student can receive for this problem.", default=MAX_SCORE,
@@ -596,6 +603,9 @@ class PeerGradingDescriptor(PeerGradingFields, RawDescriptor):
always_recalculate_grades = True
template_dir_name = "peer_grading"
+ #Specify whether or not to pass in open ended interface
+ needs_open_ended_interface = True
+
@property
def non_editable_metadata_fields(self):
non_editable_fields = super(PeerGradingDescriptor, self).non_editable_metadata_fields
diff --git a/common/lib/xmodule/xmodule/templates/combinedopenended/default.yaml b/common/lib/xmodule/xmodule/templates/combinedopenended/default.yaml
index 4c35771b0f..789a186402 100644
--- a/common/lib/xmodule/xmodule/templates/combinedopenended/default.yaml
+++ b/common/lib/xmodule/xmodule/templates/combinedopenended/default.yaml
@@ -2,6 +2,7 @@
metadata:
display_name: Open Ended Response
version: 1
+ markdown: ""
data: |
diff --git a/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py b/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py
index 917e90e575..409347882f 100644
--- a/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py
+++ b/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py
@@ -407,7 +407,7 @@ class CombinedOpenEndedModuleTest(unittest.TestCase):
self.assertTrue(changed)
def test_get_max_score(self):
- changed = self.combinedoe.update_task_states()
+ self.combinedoe.update_task_states()
self.combinedoe.state = "done"
self.combinedoe.is_scored = True
max_score = self.combinedoe.max_score()
@@ -611,11 +611,11 @@ class OpenEndedModuleXmlTest(unittest.TestCase, DummyModulestore):
self.assertEqual(module.current_task_number, 1)
#Get html and other data client will request
- html = module.get_html()
+ module.get_html()
legend = module.handle_ajax("get_legend", {})
self.assertTrue(isinstance(legend, basestring))
- status = module.handle_ajax("get_status", {})
+ module.handle_ajax("get_status", {})
module.handle_ajax("skip_post_assessment", {})
self.assertTrue(isinstance(legend, basestring))
diff --git a/lms/djangoapps/courseware/module_render.py b/lms/djangoapps/courseware/module_render.py
index 2a665cd8a0..284b746249 100644
--- a/lms/djangoapps/courseware/module_render.py
+++ b/lms/djangoapps/courseware/module_render.py
@@ -214,22 +214,27 @@ def get_module_for_descriptor(user, request, descriptor, model_data_cache, cours
#This is a hacky way to pass settings to the combined open ended xmodule
#It needs an S3 interface to upload images to S3
#It needs the open ended grading interface in order to get peer grading to be done
- #TODO: refactor these settings into module-specific settings when possible.
#this first checks to see if the descriptor is the correct one, and only sends settings if it is
- is_descriptor_combined_open_ended = (descriptor.__class__.__name__ == 'CombinedOpenEndedDescriptor')
- is_descriptor_peer_grading = (descriptor.__class__.__name__ == 'PeerGradingDescriptor')
+
+ #Get descriptor metadata fields indicating needs for various settings
+ needs_open_ended_interface = getattr(descriptor, "needs_open_ended_interface", False)
+ needs_s3_interface = getattr(descriptor, "needs_s3_interface", False)
+
+ #Initialize interfaces to None
open_ended_grading_interface = None
s3_interface = None
- if is_descriptor_combined_open_ended or is_descriptor_peer_grading:
+
+ #Create interfaces if needed
+ if needs_open_ended_interface:
open_ended_grading_interface = settings.OPEN_ENDED_GRADING_INTERFACE
open_ended_grading_interface['mock_peer_grading'] = settings.MOCK_PEER_GRADING
open_ended_grading_interface['mock_staff_grading'] = settings.MOCK_STAFF_GRADING
- if is_descriptor_combined_open_ended:
- s3_interface = {
- 'access_key' : getattr(settings,'AWS_ACCESS_KEY_ID',''),
- 'secret_access_key' : getattr(settings,'AWS_SECRET_ACCESS_KEY',''),
- 'storage_bucket_name' : getattr(settings,'AWS_STORAGE_BUCKET_NAME','openended')
- }
+ if needs_s3_interface:
+ s3_interface = {
+ 'access_key': getattr(settings, 'AWS_ACCESS_KEY_ID', ''),
+ 'secret_access_key': getattr(settings, 'AWS_SECRET_ACCESS_KEY', ''),
+ 'storage_bucket_name': getattr(settings, 'AWS_STORAGE_BUCKET_NAME', 'openended')
+ }
def inner_get_module(descriptor):
"""