diff --git a/common/djangoapps/student/models.py b/common/djangoapps/student/models.py index 8220e5507c..7b4a5fb9be 100644 --- a/common/djangoapps/student/models.py +++ b/common/djangoapps/student/models.py @@ -271,8 +271,9 @@ class TestCenterUserForm(ModelForm): new_user = self.save(commit=False) # create additional values here: new_user.user_updated_at = datetime.utcnow() + new_user.upload_status = '' new_user.save() - log.info("Updated demographic information for user's test center exam registration: username \"{}\" ".format(new_user.username)) + log.info("Updated demographic information for user's test center exam registration: username \"{}\" ".format(new_user.user.username)) # add validation: @@ -533,6 +534,7 @@ class TestCenterRegistrationForm(ModelForm): registration = self.save(commit=False) # create additional values here: registration.user_updated_at = datetime.utcnow() + registration.upload_status = '' registration.save() log.info("Updated registration information for user's test center exam registration: username \"{}\" course \"{}\", examcode \"{}\"".format(registration.testcenter_user.user.username, registration.course_id, registration.exam_series_code)) diff --git a/common/djangoapps/student/views.py b/common/djangoapps/student/views.py index 8696c2ba28..1a9648835e 100644 --- a/common/djangoapps/student/views.py +++ b/common/djangoapps/student/views.py @@ -632,15 +632,18 @@ def begin_exam_registration(request, course_id): user = request.user try: - course = (course_from_id(course_id)) + course = course_from_id(course_id) except ItemNotFoundError: - # TODO: do more than just log!! The rest will fail, so we should fail right now. - log.error("User {0} enrolled in non-existent course {1}" - .format(user.username, course_id)) + log.error("User {0} enrolled in non-existent course {1}".format(user.username, course_id)) + raise Http404 # get the exam to be registered for: # (For now, we just assume there is one at most.) + # if there is no exam now (because someone bookmarked this stupid page), + # then return a 404: exam_info = course.current_test_center_exam + if exam_info is None: + raise Http404 # determine if the user is registered for this course: registration = exam_registration_info(user, course) @@ -675,11 +678,18 @@ def create_exam_registration(request, post_override=None): username = post_vars['username'] user = User.objects.get(username=username) course_id = post_vars['course_id'] - course = (course_from_id(course_id)) # assume it will be found.... + course = course_from_id(course_id) # assume it will be found.... + + # make sure that any demographic data values received from the page have been stripped. + # Whitespace is not an acceptable response for any of these values + demographic_data = {} + for fieldname in TestCenterUser.user_provided_fields(): + if fieldname in post_vars: + demographic_data[fieldname] = (post_vars[fieldname]).strip() try: testcenter_user = TestCenterUser.objects.get(user=user) - needs_updating = testcenter_user.needs_update(post_vars) + needs_updating = testcenter_user.needs_update(demographic_data) log.info("User {0} enrolled in course {1} {2}updating demographic info for exam registration".format(user.username, course_id, "" if needs_updating else "not ")) except TestCenterUser.DoesNotExist: # do additional initialization here: @@ -691,7 +701,7 @@ def create_exam_registration(request, post_override=None): if needs_updating: # first perform validation on the user information # using a Django Form. - form = TestCenterUserForm(instance=testcenter_user, data=post_vars) + form = TestCenterUserForm(instance=testcenter_user, data=demographic_data) if form.is_valid(): form.update_and_save() else: diff --git a/common/lib/xmodule/xmodule/combined_open_ended_module.py b/common/lib/xmodule/xmodule/combined_open_ended_module.py index 5fd0bcc618..0b6b1050d2 100644 --- a/common/lib/xmodule/xmodule/combined_open_ended_module.py +++ b/common/lib/xmodule/xmodule/combined_open_ended_module.py @@ -24,8 +24,6 @@ import open_ended_module from combined_open_ended_rubric import CombinedOpenEndedRubric from .stringify import stringify_children -from mitxmako.shortcuts import render_to_string - log = logging.getLogger("mitx.courseware") # Set the default number of max attempts. Should be 1 for production @@ -142,7 +140,7 @@ class CombinedOpenEndedModule(XModule): # completion (doesn't matter if you self-assessed correct/incorrect). self._max_score = int(self.metadata.get('max_score', MAX_SCORE)) - rubric_renderer = CombinedOpenEndedRubric(True) + rubric_renderer = CombinedOpenEndedRubric(self.system, True) success, rubric_feedback = rubric_renderer.render_rubric(stringify_children(definition['rubric'])) if not success: error_message="Could not parse rubric : {0}".format(definition['rubric']) @@ -329,7 +327,7 @@ class CombinedOpenEndedModule(XModule): Output: HTML rendered directly via Mako """ context = self.get_context() - html = render_to_string('combined_open_ended.html', context) + html = self.system.render_template('combined_open_ended.html', context) return html def get_html_base(self): @@ -379,17 +377,17 @@ class CombinedOpenEndedModule(XModule): self.static_data, instance_state=task_state) last_response = task.latest_answer() last_score = task.latest_score() - last_post_assessment = task.latest_post_assessment() + last_post_assessment = task.latest_post_assessment(self.system) last_post_feedback = "" if task_type == "openended": - last_post_assessment = task.latest_post_assessment(short_feedback=False, join_feedback=False) + last_post_assessment = task.latest_post_assessment(self.system, short_feedback=False, join_feedback=False) if isinstance(last_post_assessment, list): eval_list = [] for i in xrange(0, len(last_post_assessment)): - eval_list.append(task.format_feedback_with_evaluation(last_post_assessment[i])) + eval_list.append(task.format_feedback_with_evaluation(self.system, last_post_assessment[i])) last_post_evaluation = "".join(eval_list) else: - last_post_evaluation = task.format_feedback_with_evaluation(last_post_assessment) + last_post_evaluation = task.format_feedback_with_evaluation(self.system, last_post_assessment) last_post_assessment = last_post_evaluation last_correctness = task.is_last_response_correct() max_score = task.max_score() @@ -452,7 +450,7 @@ class CombinedOpenEndedModule(XModule): self.update_task_states() response_dict = self.get_last_response(task_number) context = {'results': response_dict['post_assessment'], 'task_number': task_number + 1} - html = render_to_string('combined_open_ended_results.html', context) + html = self.system.render_template('combined_open_ended_results.html', context) return {'html': html, 'success': True} def handle_ajax(self, dispatch, get): diff --git a/common/lib/xmodule/xmodule/combined_open_ended_rubric.py b/common/lib/xmodule/xmodule/combined_open_ended_rubric.py index 37ce18e4f1..ee66f81470 100644 --- a/common/lib/xmodule/xmodule/combined_open_ended_rubric.py +++ b/common/lib/xmodule/xmodule/combined_open_ended_rubric.py @@ -1,14 +1,14 @@ -from mitxmako.shortcuts import render_to_string import logging from lxml import etree log=logging.getLogger(__name__) -class CombinedOpenEndedRubric: +class CombinedOpenEndedRubric(object): - def __init__ (self, view_only = False): + def __init__ (self, system, view_only = False): self.has_score = False self.view_only = view_only + self.system = system ''' render_rubric: takes in an xml string and outputs the corresponding @@ -23,7 +23,7 @@ class CombinedOpenEndedRubric: success = False try: rubric_categories = self.extract_categories(rubric_xml) - html = render_to_string('open_ended_rubric.html', + html = system.render_template('open_ended_rubric.html', {'categories' : rubric_categories, 'has_score': self.has_score, 'view_only': self.view_only}) diff --git a/common/lib/xmodule/xmodule/open_ended_module.py b/common/lib/xmodule/xmodule/open_ended_module.py index 9942a77470..c674d330e1 100644 --- a/common/lib/xmodule/xmodule/open_ended_module.py +++ b/common/lib/xmodule/xmodule/open_ended_module.py @@ -30,7 +30,6 @@ from xmodule.modulestore import Location from capa.util import * import openendedchild -from mitxmako.shortcuts import render_to_string from numpy import median from datetime import datetime @@ -257,7 +256,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): @param system: Modulesystem @return: Boolean True (not useful currently) """ - new_score_msg = self._parse_score_msg(score_msg) + new_score_msg = self._parse_score_msg(score_msg, system) if not new_score_msg['valid']: score_msg['feedback'] = 'Invalid grader reply. Please contact the course staff.' @@ -371,7 +370,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): return u"\n".join([feedback_list_part1, feedback_list_part2]) - def _format_feedback(self, response_items): + def _format_feedback(self, response_items, system): """ Input: Dictionary called feedback. Must contain keys seen below. @@ -383,14 +382,14 @@ class OpenEndedModule(openendedchild.OpenEndedChild): rubric_feedback="" feedback = self._convert_longform_feedback_to_html(response_items) if response_items['rubric_scores_complete']==True: - rubric_renderer = CombinedOpenEndedRubric(True) + rubric_renderer = CombinedOpenEndedRubric(system, True) success, rubric_feedback = rubric_renderer.render_rubric(response_items['rubric_xml']) if not response_items['success']: return system.render_template("open_ended_error.html", {'errors': feedback}) - feedback_template = render_to_string("open_ended_feedback.html", { + feedback_template = system.render_template("open_ended_feedback.html", { 'grader_type': response_items['grader_type'], 'score': "{0} / {1}".format(response_items['score'], self.max_score()), 'feedback': feedback, @@ -400,7 +399,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): return feedback_template - def _parse_score_msg(self, score_msg, join_feedback=True): + def _parse_score_msg(self, score_msg, system, join_feedback=True): """ Grader reply is a JSON-dump of the following dict { 'correct': True/False, @@ -452,7 +451,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): 'rubric_scores_complete' : score_result['rubric_scores_complete'][i], 'rubric_xml' : score_result['rubric_xml'][i], } - feedback_items.append(self._format_feedback(new_score_result)) + feedback_items.append(self._format_feedback(new_score_result, system)) if join_feedback: feedback = "".join(feedback_items) else: @@ -460,7 +459,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): score = int(median(score_result['score'])) else: #This is for instructor and ML grading - feedback = self._format_feedback(score_result) + feedback = self._format_feedback(score_result, system) score = score_result['score'] self.submission_id = score_result['submission_id'] @@ -468,7 +467,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): return {'valid': True, 'score': score, 'feedback': feedback} - def latest_post_assessment(self, short_feedback=False, join_feedback=True): + def latest_post_assessment(self, system, short_feedback=False, join_feedback=True): """ Gets the latest feedback, parses, and returns @param short_feedback: If the long feedback is wanted or not @@ -477,7 +476,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): if not self.history: return "" - feedback_dict = self._parse_score_msg(self.history[-1].get('post_assessment', ""), join_feedback=join_feedback) + feedback_dict = self._parse_score_msg(self.history[-1].get('post_assessment', ""), system, join_feedback=join_feedback) if not short_feedback: return feedback_dict['feedback'] if feedback_dict['valid'] else '' if feedback_dict['valid']: @@ -485,14 +484,14 @@ class OpenEndedModule(openendedchild.OpenEndedChild): json.loads(self.history[-1].get('post_assessment', ""))) return short_feedback if feedback_dict['valid'] else '' - def format_feedback_with_evaluation(self, feedback): + def format_feedback_with_evaluation(self, system, feedback): """ Renders a given html feedback into an evaluation template @param feedback: HTML feedback @return: Rendered html """ context = {'msg': feedback, 'id': "1", 'rows': 50, 'cols': 50} - html = render_to_string('open_ended_evaluation.html', context) + html = system.render_template('open_ended_evaluation.html', context) return html def handle_ajax(self, dispatch, get, system): @@ -584,7 +583,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): if self.state != self.INITIAL: latest = self.latest_answer() previous_answer = latest if latest is not None else self.initial_display - post_assessment = self.latest_post_assessment() + post_assessment = self.latest_post_assessment(system) score = self.latest_score() correct = 'correct' if self.is_submission_correct(score) else 'incorrect' else: diff --git a/common/lib/xmodule/xmodule/openendedchild.py b/common/lib/xmodule/xmodule/openendedchild.py index 8626a6bb16..62d203987a 100644 --- a/common/lib/xmodule/xmodule/openendedchild.py +++ b/common/lib/xmodule/xmodule/openendedchild.py @@ -35,7 +35,7 @@ MAX_ATTEMPTS = 1 # Overriden by max_score specified in xml. MAX_SCORE = 1 -class OpenEndedChild(): +class OpenEndedChild(object): """ States: @@ -124,7 +124,7 @@ class OpenEndedChild(): return None return self.history[-1].get('score') - def latest_post_assessment(self): + def latest_post_assessment(self, system): """None if not available""" if not self.history: return "" diff --git a/common/lib/xmodule/xmodule/self_assessment_module.py b/common/lib/xmodule/xmodule/self_assessment_module.py index f6b330be5a..d509970503 100644 --- a/common/lib/xmodule/xmodule/self_assessment_module.py +++ b/common/lib/xmodule/xmodule/self_assessment_module.py @@ -122,7 +122,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): if self.state == self.INITIAL: return '' - rubric_renderer = CombinedOpenEndedRubric(True) + rubric_renderer = CombinedOpenEndedRubric(system, True) success, rubric_html = rubric_renderer.render_rubric(self.rubric) @@ -149,7 +149,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): if self.state == self.DONE: # display the previous hint - latest = self.latest_post_assessment() + latest = self.latest_post_assessment(system) hint = latest if latest is not None else '' else: hint = '' diff --git a/lms/djangoapps/open_ended_grading/grading_service.py b/lms/djangoapps/open_ended_grading/grading_service.py index 3b17a015fe..e8af5f09f6 100644 --- a/lms/djangoapps/open_ended_grading/grading_service.py +++ b/lms/djangoapps/open_ended_grading/grading_service.py @@ -13,6 +13,8 @@ from util.json_request import expect_json from xmodule.course_module import CourseDescriptor from xmodule.combined_open_ended_rubric import CombinedOpenEndedRubric from lxml import etree +from mitxmako.shortcuts import render_to_string +from xmodule.x_module import ModuleSystem log = logging.getLogger(__name__) @@ -29,6 +31,7 @@ class GradingService(object): self.url = config['url'] self.login_url = self.url + '/login/' self.session = requests.session() + self.system = ModuleSystem(None, None, None, render_to_string, None) def _login(self): """ @@ -109,7 +112,7 @@ class GradingService(object): response_json = json.loads(response) if response_json.has_key('rubric'): rubric = response_json['rubric'] - rubric_renderer = CombinedOpenEndedRubric(False) + rubric_renderer = CombinedOpenEndedRubric(self.system, False) success, rubric_html = rubric_renderer.render_rubric(rubric) if not success: error_message = "Could not render rubric: {0}".format(rubric) diff --git a/lms/static/sass/multicourse/_dashboard.scss b/lms/static/sass/multicourse/_dashboard.scss index c22bc14105..4555a426d3 100644 --- a/lms/static/sass/multicourse/_dashboard.scss +++ b/lms/static/sass/multicourse/_dashboard.scss @@ -426,6 +426,14 @@ font-size: 1.2rem; font-weight: bold; } + + strong { + font-weight: 700; + + a { + font-weight: 700; + } + } } .actions { diff --git a/lms/static/sass/multicourse/_testcenter-register.scss b/lms/static/sass/multicourse/_testcenter-register.scss index 961fffd5d0..6d85fc167f 100644 --- a/lms/static/sass/multicourse/_testcenter-register.scss +++ b/lms/static/sass/multicourse/_testcenter-register.scss @@ -232,8 +232,9 @@ $red: rgb(178, 6, 16); } input, textarea { + height: 100%; width: 100%; - padding: $baseline ($baseline*.75); + padding: ($baseline/2); &.long { width: 100%; diff --git a/lms/templates/dashboard.html b/lms/templates/dashboard.html index 0182a8edf1..8ec58a6a28 100644 --- a/lms/templates/dashboard.html +++ b/lms/templates/dashboard.html @@ -243,22 +243,15 @@ % endif % if registration.is_rejected:
% endif % if not registration.is_accepted and not registration.is_rejected: % endif % endif - % endif <% diff --git a/lms/templates/test_center_register.html b/lms/templates/test_center_register.html index 03883d907c..f6c53c0e89 100644 --- a/lms/templates/test_center_register.html +++ b/lms/templates/test_center_register.html @@ -128,7 +128,7 @@ % if registration.registration_is_rejected: % endif @@ -246,25 +246,25 @@If you have a specific question pertaining to your registration, you may contact exam-help@edx.org.
+If you have a specific question pertaining to your registration, you may contact edX at exam-help@edx.org.