diff --git a/common/lib/xmodule/xmodule/capa_base.py b/common/lib/xmodule/xmodule/capa_base.py
index b6c0dee2fd..f15f18219e 100644
--- a/common/lib/xmodule/xmodule/capa_base.py
+++ b/common/lib/xmodule/xmodule/capa_base.py
@@ -492,27 +492,31 @@ class CapaMixin(CapaFields):
if answer_id.find(hidden_state_keyword) >= 0:
student_answers.pop(answer_id)
- # Next, generate a fresh LoncapaProblem
+ # Next, generate a fresh LoncapaProblem
self.lcp = self.new_lcp(None)
self.set_state_from_lcp()
# Prepend a scary warning to the student
- warning = '
'\
- '
Warning: The problem has been reset to its initial state!
'\
- 'The problem\'s state was corrupted by an invalid submission. ' \
- 'The submission consisted of:'\
- '
'
+ _ = self.runtime.service(self, "i18n").ugettext
+ warning_msg = _("Warning: The problem has been reset to its initial state!")
+ warning = ' ' + warning_msg + '
'
+
+ # Translators: Following this message, there will be a bulleted list of items.
+ warning_msg = _("The problem's state was corrupted by an invalid submission. The submission consisted of:")
+ warning += warning_msg + '
'
+
for student_answer in student_answers.values():
if student_answer != '':
warning += '- ' + cgi.escape(student_answer) + '
'
- warning += '
'\
- 'If this error persists, please contact the course staff.'\
- '
'
+
+ warning_msg = _('If this error persists, please contact the course staff.')
+ warning += '
' + warning_msg + '
'
html = warning
try:
html += self.lcp.get_html()
- except Exception: # Couldn't do it. Give up
+ except Exception: # pylint: disable=broad-except
+ # Couldn't do it. Give up.
log.exception("Unable to generate html from LoncapaProblem")
raise
@@ -541,20 +545,22 @@ class CapaMixin(CapaFields):
else:
check_button = False
- content = {'name': self.display_name_with_default,
- 'html': html,
- 'weight': self.weight,
- }
+ content = {
+ 'name': self.display_name_with_default,
+ 'html': html,
+ 'weight': self.weight,
+ }
- context = {'problem': content,
- 'id': self.id,
- 'check_button': check_button,
- 'reset_button': self.should_show_reset_button(),
- 'save_button': self.should_show_save_button(),
- 'answer_available': self.answer_available(),
- 'attempts_used': self.attempts,
- 'attempts_allowed': self.max_attempts,
- }
+ context = {
+ 'problem': content,
+ 'id': self.id,
+ 'check_button': check_button,
+ 'reset_button': self.should_show_reset_button(),
+ 'save_button': self.should_show_save_button(),
+ 'answer_available': self.answer_available(),
+ 'attempts_used': self.attempts,
+ 'attempts_allowed': self.max_attempts,
+ }
html = self.runtime.render_template('problem.html', context)
@@ -563,7 +569,7 @@ class CapaMixin(CapaFields):
id=self.location.html_id(), ajax_url=self.runtime.ajax_url
) + html + ""
- # now do all the substitutions which the LMS module_render normally does, but
+ # Now do all the substitutions which the LMS module_render normally does, but
# we need to do here explicitly since we can get called for our HTML via AJAX
html = self.runtime.replace_urls(html)
if self.runtime.replace_course_urls:
@@ -855,17 +861,19 @@ class CapaMixin(CapaFields):
answers = self.make_dict_of_responses(data)
event_info['answers'] = convert_files_to_filenames(answers)
+ _ = self.runtime.service(self, "i18n").ugettext
+
# Too late. Cannot submit
if self.closed():
event_info['failure'] = 'closed'
self.runtime.track_function('problem_check_fail', event_info)
- raise NotFoundError('Problem is closed')
+ raise NotFoundError(_("Problem is closed."))
# Problem submitted. Student should reset before checking again
if self.done and self.rerandomize == "always":
event_info['failure'] = 'unreset'
self.runtime.track_function('problem_check_fail', event_info)
- raise NotFoundError('Problem must be reset before it can be checked again')
+ raise NotFoundError(_("Problem must be reset before it can be checked again."))
# Problem queued. Students must wait a specified waittime before they are allowed to submit
if self.lcp.is_queued():
@@ -873,7 +881,7 @@ class CapaMixin(CapaFields):
prev_submit_time = self.lcp.get_recentmost_queuetime()
waittime_between_requests = self.runtime.xqueue['waittime']
if (current_time - prev_submit_time).total_seconds() < waittime_between_requests:
- msg = u'You must wait at least {wait} seconds between submissions'.format(
+ msg = _(u"You must wait at least {wait} seconds between submissions.").format(
wait=waittime_between_requests)
return {'success': msg, 'html': ''} # Prompts a modal dialog in ajax callback
@@ -899,7 +907,8 @@ class CapaMixin(CapaFields):
# Otherwise, display just an error message,
# without a stack trace
else:
- msg = u"Error: {msg}".format(msg=inst.message)
+ # Translators: {msg} will be replaced with a problem's error message.
+ msg = _(u"Error: {msg}").format(msg=inst.message)
return {'success': msg}
@@ -936,9 +945,10 @@ class CapaMixin(CapaFields):
# render problem into HTML
html = self.get_problem_html(encapsulate=False)
- return {'success': success,
- 'contents': html,
- }
+ return {
+ 'success': success,
+ 'contents': html,
+ }
def rescore_problem(self):
"""
@@ -958,15 +968,18 @@ class CapaMixin(CapaFields):
"""
event_info = {'state': self.lcp.get_state(), 'problem_id': self.location.url()}
+ _ = self.runtime.service(self, "i18n").ugettext
+
if not self.lcp.supports_rescoring():
event_info['failure'] = 'unsupported'
self.runtime.track_function('problem_rescore_fail', event_info)
- raise NotImplementedError("Problem's definition does not support rescoring")
+ # Translators: 'rescoring' refers to the act of re-submitting a student's solution so it can get a new score.
+ raise NotImplementedError(_("Problem's definition does not support rescoring."))
if not self.done:
event_info['failure'] = 'unanswered'
self.runtime.track_function('problem_rescore_fail', event_info)
- raise NotFoundError('Problem must be answered before it can be graded again')
+ raise NotFoundError(_("Problem must be answered before it can be graded again."))
# get old score, for comparison:
orig_score = self.lcp.get_score()
@@ -1032,32 +1045,40 @@ class CapaMixin(CapaFields):
answers = self.make_dict_of_responses(data)
event_info['answers'] = answers
+ _ = self.runtime.service(self, "i18n").ugettext
# Too late. Cannot submit
if self.closed() and not self.max_attempts == 0:
event_info['failure'] = 'closed'
self.runtime.track_function('save_problem_fail', event_info)
- return {'success': False,
- 'msg': "Problem is closed"}
+ return {
+ 'success': False,
+ # Translators: 'closed' means the problem's due date has passed. You may no longer attempt to solve the problem.
+ 'msg': _("Problem is closed.")
+ }
# Problem submitted. Student should reset before saving
# again.
if self.done and self.rerandomize == "always":
event_info['failure'] = 'done'
self.runtime.track_function('save_problem_fail', event_info)
- return {'success': False,
- 'msg': "Problem needs to be reset prior to save"}
+ return {
+ 'success': False,
+ 'msg': _("Problem needs to be reset prior to save.")
+ }
self.lcp.student_answers = answers
self.set_state_from_lcp()
self.runtime.track_function('save_problem_success', event_info)
- msg = "Your answers have been saved"
+ msg = _("Your answers have been saved.")
if not self.max_attempts == 0:
- msg += " but not graded. Hit 'Check' to grade them."
- return {'success': True,
- 'msg': msg}
+ msg = _("Your answers have been saved but not graded. Click 'Check' to grade them.")
+ return {
+ 'success': True,
+ 'msg': msg,
+ }
def reset_problem(self, _data):
"""
@@ -1074,18 +1095,24 @@ class CapaMixin(CapaFields):
event_info = dict()
event_info['old_state'] = self.lcp.get_state()
event_info['problem_id'] = self.location.url()
+ _ = self.runtime.service(self, "i18n").ugettext
if self.closed():
event_info['failure'] = 'closed'
self.runtime.track_function('reset_problem_fail', event_info)
- return {'success': False,
- 'error': "Problem is closed"}
+ return {
+ 'success': False,
+ # Translators: 'closed' means the problem's due date has passed. You may no longer attempt to solve the problem.
+ 'error': _("Problem is closed."),
+ }
if not self.done:
event_info['failure'] = 'not_done'
self.runtime.track_function('reset_problem_fail', event_info)
- return {'success': False,
- 'error': "Refresh the page and make an attempt before resetting."}
+ return {
+ 'success': False,
+ 'error': _("Refresh the page and make an attempt before resetting."),
+ }
if self.rerandomize in ["always", "onreset"]:
# Reset random number generator seed.
@@ -1100,5 +1127,7 @@ class CapaMixin(CapaFields):
event_info['new_state'] = self.lcp.get_state()
self.runtime.track_function('reset_problem', event_info)
- return {'success': True,
- 'html': self.get_problem_html(encapsulate=False)}
+ return {
+ 'success': True,
+ 'html': self.get_problem_html(encapsulate=False),
+ }
diff --git a/common/lib/xmodule/xmodule/capa_module.py b/common/lib/xmodule/xmodule/capa_module.py
index d00fe6cf76..a6151011b5 100644
--- a/common/lib/xmodule/xmodule/capa_module.py
+++ b/common/lib/xmodule/xmodule/capa_module.py
@@ -62,12 +62,14 @@ class CapaModule(CapaMixin, XModule):
'ungraded_response': self.handle_ungraded_response
}
- generic_error_message = (
+ _ = self.runtime.service(self, "i18n").ugettext
+
+ generic_error_message = _(
"We're sorry, there was an error with processing your request. "
"Please try reloading your page and trying again."
)
- not_found_error_message = (
+ not_found_error_message = _(
"The state of this problem has changed since you loaded this page. "
"Please refresh your page."
)
diff --git a/lms/djangoapps/instructor_task/tests/test_integration.py b/lms/djangoapps/instructor_task/tests/test_integration.py
index 56cb0211a0..e6cdb92183 100644
--- a/lms/djangoapps/instructor_task/tests/test_integration.py
+++ b/lms/djangoapps/instructor_task/tests/test_integration.py
@@ -265,10 +265,10 @@ class TestRescoringTask(TestIntegrationTask):
self.assertEqual(instructor_task.task_state, FAILURE)
status = json.loads(instructor_task.task_output)
self.assertEqual(status['exception'], 'NotImplementedError')
- self.assertEqual(status['message'], "Problem's definition does not support rescoring")
+ self.assertEqual(status['message'], "Problem's definition does not support rescoring.")
status = InstructorTaskModuleTestCase.get_task_status(instructor_task.task_id)
- self.assertEqual(status['message'], "Problem's definition does not support rescoring")
+ self.assertEqual(status['message'], "Problem's definition does not support rescoring.")
def define_randomized_custom_response_problem(self, problem_url_name, redefine=False):
"""