diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/controller_query_service.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/controller_query_service.py index 3668cd6cc9..0577a9034b 100644 --- a/common/lib/xmodule/xmodule/open_ended_grading_classes/controller_query_service.py +++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/controller_query_service.py @@ -1,3 +1,5 @@ +from dogapi import dog_stats_api + import logging from .grading_service_module import GradingService @@ -9,33 +11,28 @@ class ControllerQueryService(GradingService): Interface to controller query backend. """ + METRIC_NAME = 'edxapp.open_ended_grading.controller_query_service' + def __init__(self, config, system): config['system'] = system super(ControllerQueryService, self).__init__(config) self.url = config['url'] + config['grading_controller'] self.login_url = self.url + '/login/' self.check_eta_url = self.url + '/get_submission_eta/' - self.is_unique_url = self.url + '/is_name_unique/' self.combined_notifications_url = self.url + '/combined_notifications/' self.grading_status_list_url = self.url + '/get_grading_status_list/' self.flagged_problem_list_url = self.url + '/get_flagged_problem_list/' self.take_action_on_flags_url = self.url + '/take_action_on_flags/' - def check_if_name_is_unique(self, location, problem_id, course_id): - params = { - 'course_id': course_id, - 'location': location, - 'problem_id': problem_id - } - response = self.get(self.is_unique_url, params) - return response - def check_for_eta(self, location): params = { 'location': location, } - response = self.get(self.check_eta_url, params) - return response + data = self.get(self.check_eta_url, params) + self._record_result('check_for_eta', data) + dog_stats_api.histogram(self._metric_name('check_for_eta.eta'), data.get('eta', 0)) + + return data def check_combined_notifications(self, course_id, student_id, user_is_staff, last_time_viewed): params = { @@ -45,8 +42,16 @@ class ControllerQueryService(GradingService): 'last_time_viewed': last_time_viewed, } log.debug(self.combined_notifications_url) - response = self.get(self.combined_notifications_url, params) - return response + data = self.get(self.combined_notifications_url, params) + + tags = [u'course_id:{}'.format(course_id), u'user_is_staff:{}'.format(user_is_staff)] + tags.extend( + u'{}:{}'.format(key, value) + for key, value in data.items() + if key not in ('success', 'version', 'error') + ) + self._record_result('check_combined_notifications', data, tags) + return data def get_grading_status_list(self, course_id, student_id): params = { @@ -54,16 +59,31 @@ class ControllerQueryService(GradingService): 'course_id': course_id, } - response = self.get(self.grading_status_list_url, params) - return response + data = self.get(self.grading_status_list_url, params) + + tags = [u'course_id:{}'.format(course_id)] + self._record_result('get_grading_status_list', data, tags) + dog_stats_api.histogram( + self._metric_name('get_grading_status_list.length'), + len(data.get('problem_list', [])), + tags=tags + ) + return data def get_flagged_problem_list(self, course_id): params = { 'course_id': course_id, } - response = self.get(self.flagged_problem_list_url, params) - return response + data = self.get(self.flagged_problem_list_url, params) + + tags = [u'course_id:{}'.format(course_id)] + self._record_result('get_flagged_problem_list', data, tags) + dog_stats_api.histogram( + self._metric_name('get_flagged_problem_list.length'), + len(data.get('flagged_submissions', [])) + ) + return data def take_action_on_flags(self, course_id, student_id, submission_id, action_type): params = { @@ -73,8 +93,11 @@ class ControllerQueryService(GradingService): 'action_type': action_type } - response = self.post(self.take_action_on_flags_url, params) - return response + data = self.post(self.take_action_on_flags_url, params) + + tags = [u'course_id:{}'.format(course_id), u'action_type:{}'.format(action_type)] + self._record_result('take_action_on_flags', data, tags) + return data class MockControllerQueryService(object): @@ -85,14 +108,6 @@ class MockControllerQueryService(object): def __init__(self, config, system): pass - def check_if_name_is_unique(self, *args, **kwargs): - """ - Mock later if needed. Stub function for now. - @param params: - @return: - """ - pass - def check_for_eta(self, *args, **kwargs): """ Mock later if needed. Stub function for now. @@ -102,15 +117,47 @@ class MockControllerQueryService(object): pass def check_combined_notifications(self, *args, **kwargs): - combined_notifications = '{"flagged_submissions_exist": false, "version": 1, "new_student_grading_to_view": false, "success": true, "staff_needs_to_grade": false, "student_needs_to_peer_grade": true, "overall_need_to_check": true}' + combined_notifications = { + "flagged_submissions_exist": False, + "version": 1, + "new_student_grading_to_view": False, + "success": True, + "staff_needs_to_grade": False, + "student_needs_to_peer_grade": True, + "overall_need_to_check": True + } return combined_notifications def get_grading_status_list(self, *args, **kwargs): - grading_status_list = '{"version": 1, "problem_list": [{"problem_name": "Science Question -- Machine Assessed", "grader_type": "NA", "eta_available": true, "state": "Waiting to be Graded", "eta": 259200, "location": "i4x://MITx/oe101x/combinedopenended/Science_SA_ML"}, {"problem_name": "Humanities Question -- Peer Assessed", "grader_type": "NA", "eta_available": true, "state": "Waiting to be Graded", "eta": 259200, "location": "i4x://MITx/oe101x/combinedopenended/Humanities_SA_Peer"}], "success": true}' + grading_status_list = { + "version": 1, + "problem_list": [ + { + "problem_name": "Science Question -- Machine Assessed", + "grader_type": "NA", + "eta_available": True, + "state": "Waiting to be Graded", + "eta": 259200, + "location": "i4x://MITx/oe101x/combinedopenended/Science_SA_ML" + }, { + "problem_name": "Humanities Question -- Peer Assessed", + "grader_type": "NA", + "eta_available": True, + "state": "Waiting to be Graded", + "eta": 259200, + "location": "i4x://MITx/oe101x/combinedopenended/Humanities_SA_Peer" + } + ], + "success": True + } return grading_status_list def get_flagged_problem_list(self, *args, **kwargs): - flagged_problem_list = '{"version": 1, "success": false, "error": "No flagged submissions exist for course: MITx/oe101x/2012_Fall"}' + flagged_problem_list = { + "version": 1, + "success": False, + "error": "No flagged submissions exist for course: MITx/oe101x/2012_Fall" + } return flagged_problem_list def take_action_on_flags(self, *args, **kwargs): @@ -131,5 +178,4 @@ def convert_seconds_to_human_readable(seconds): else: human_string = "{0} days".format(round(seconds / (60 * 60 * 24), 1)) - eta_string = "{0}".format(human_string) - return eta_string + return human_string diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/grading_service_module.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/grading_service_module.py index 6248c63091..e6c947da53 100644 --- a/common/lib/xmodule/xmodule/open_ended_grading_classes/grading_service_module.py +++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/grading_service_module.py @@ -2,9 +2,10 @@ import json import logging import requests +from dogapi import dog_stats_api from requests.exceptions import RequestException, ConnectionError, HTTPError -from .combined_open_ended_rubric import CombinedOpenEndedRubric +from .combined_open_ended_rubric import CombinedOpenEndedRubric, RubricParsingError from lxml import etree log = logging.getLogger(__name__) @@ -44,40 +45,66 @@ class GradingService(object): return response.json() + def _metric_name(self, suffix): + """ + Return a metric name for datadog, using `self.METRIC_NAME` as + a prefix, and `suffix` as the suffix. + + Arguments: + suffix (str): The metric suffix to use. + """ + return '{}.{}'.format(self.METRIC_NAME, suffix) + + def _record_result(self, action, data, tags=None): + """ + Log results from an API call to an ORA service to datadog. + + Arguments: + action (str): The ORA action being recorded. + data (dict): The data returned from the ORA service. Should contain the key 'success'. + tags (list): A list of tags to attach to the logged metric. + """ + if tags is None: + tags = [] + + tags.append(u'result:{}'.format(data.get('success', False))) + tags.append(u'action:{}'.format(action)) + dog_stats_api.increment(self._metric_name('request.count'), tags=tags) + def post(self, url, data, allow_redirects=False): """ - Make a post request to the grading controller + Make a post request to the grading controller. Returns the parsed json results of that request. """ try: op = lambda: self.session.post(url, data=data, allow_redirects=allow_redirects) - r = self._try_with_login(op) - except (RequestException, ConnectionError, HTTPError) as err: + response_json = self._try_with_login(op) + except (RequestException, ConnectionError, HTTPError, ValueError) as err: # reraise as promised GradingServiceError, but preserve stacktrace. #This is a dev_facing_error error_string = "Problem posting data to the grading controller. URL: {0}, data: {1}".format(url, data) log.error(error_string) raise GradingServiceError(error_string) - return r.text + return response_json def get(self, url, params, allow_redirects=False): """ - Make a get request to the grading controller + Make a get request to the grading controller. Returns the parsed json results of that request. """ op = lambda: self.session.get(url, allow_redirects=allow_redirects, params=params) try: - r = self._try_with_login(op) - except (RequestException, ConnectionError, HTTPError) as err: + response_json = self._try_with_login(op) + except (RequestException, ConnectionError, HTTPError, ValueError) as err: # reraise as promised GradingServiceError, but preserve stacktrace. #This is a dev_facing_error error_string = "Problem getting data from the grading controller. URL: {0}, params: {1}".format(url, params) log.error(error_string) raise GradingServiceError(error_string) - return r.text + return response_json def _try_with_login(self, operation): """ @@ -101,35 +128,29 @@ class GradingService(object): response = operation() response.raise_for_status() - return response + return resp_json def _render_rubric(self, response, view_only=False): """ - Given an HTTP Response with the key 'rubric', render out the html + Given an HTTP Response json with the key 'rubric', render out the html required to display the rubric and put it back into the response returns the updated response as a dictionary that can be serialized later """ try: - response_json = json.loads(response) - except: - response_json = response - - try: - if 'rubric' in response_json: - rubric = response_json['rubric'] + if 'rubric' in response: + rubric = response['rubric'] rubric_renderer = CombinedOpenEndedRubric(self.system, view_only) rubric_dict = rubric_renderer.render_rubric(rubric) success = rubric_dict['success'] rubric_html = rubric_dict['html'] - response_json['rubric'] = rubric_html - return response_json + response['rubric'] = rubric_html + return response # if we can't parse the rubric into HTML, - except etree.XMLSyntaxError, RubricParsingError: + except (etree.XMLSyntaxError, RubricParsingError): #This is a dev_facing_error - log.exception("Cannot parse rubric string. Raw string: {0}" - .format(rubric)) + log.exception("Cannot parse rubric string. Raw string: {0}".format(response['rubric'])) return {'success': False, 'error': 'Error displaying submission'} except ValueError: diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/openendedchild.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/openendedchild.py index 47f6921994..b68d8ad72a 100644 --- a/common/lib/xmodule/xmodule/open_ended_grading_classes/openendedchild.py +++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/openendedchild.py @@ -533,10 +533,6 @@ class OpenEndedChild(object): def get_eta(self): if self.controller_qs: response = self.controller_qs.check_for_eta(self.location_string) - try: - response = json.loads(response) - except: - pass else: return "" diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/peer_grading_service.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/peer_grading_service.py index 6dc2e15e9a..875cc5f680 100644 --- a/common/lib/xmodule/xmodule/open_ended_grading_classes/peer_grading_service.py +++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/peer_grading_service.py @@ -1,5 +1,6 @@ import json import logging +from dogapi import dog_stats_api from .grading_service_module import GradingService, GradingServiceError @@ -11,6 +12,8 @@ class PeerGradingService(GradingService): Interface with the grading controller for peer grading """ + METRIC_NAME = 'edxapp.open_ended_grading.peer_grading_service' + def __init__(self, config, system): config['system'] = system super(PeerGradingService, self).__init__(config) @@ -28,54 +31,78 @@ class PeerGradingService(GradingService): def get_data_for_location(self, problem_location, student_id): params = {'location': problem_location, 'student_id': student_id} - response = self.get(self.get_data_for_location_url, params) - return self.try_to_decode(response) + result = self.get(self.get_data_for_location_url, params) + self._record_result('get_data_for_location', result) + for key in result.keys(): + if key in ('success', 'error', 'version'): + continue + + dog_stats_api.histogram( + self._metric_name('get_data_for_location.{}'.format(key)), + result[key], + ) + return result def get_next_submission(self, problem_location, grader_id): - response = self.get( + result = self._render_rubric(self.get( self.get_next_submission_url, { 'location': problem_location, 'grader_id': grader_id } - ) - return self.try_to_decode(self._render_rubric(response)) + )) + self._record_result('get_next_submission', result) + return result def save_grade(self, **kwargs): data = kwargs data.update({'rubric_scores_complete': True}) - return self.try_to_decode(self.post(self.save_grade_url, data)) + result = self.post(self.save_grade_url, data) + self._record_result('save_grade', result) + return result def is_student_calibrated(self, problem_location, grader_id): params = {'problem_id': problem_location, 'student_id': grader_id} - return self.try_to_decode(self.get(self.is_student_calibrated_url, params)) + result = self.get(self.is_student_calibrated_url, params) + self._record_result( + 'is_student_calibrated', + result, + tags=['calibrated:{}'.format(result.get('calibrated'))] + ) + return result def show_calibration_essay(self, problem_location, grader_id): params = {'problem_id': problem_location, 'student_id': grader_id} - response = self.get(self.show_calibration_essay_url, params) - return self.try_to_decode(self._render_rubric(response)) + result = self._render_rubric(self.get(self.show_calibration_essay_url, params)) + self._record_result('show_calibration_essay', result) + return result def save_calibration_essay(self, **kwargs): data = kwargs data.update({'rubric_scores_complete': True}) - return self.try_to_decode(self.post(self.save_calibration_essay_url, data)) + result = self.post(self.save_calibration_essay_url, data) + self._record_result('show_calibration_essay', result) + return result def get_problem_list(self, course_id, grader_id): params = {'course_id': course_id, 'student_id': grader_id} - response = self.get(self.get_problem_list_url, params) - return self.try_to_decode(response) + result = self.get(self.get_problem_list_url, params) + self._record_result('get_problem_list', result) + dog_stats_api.histogram( + self._metric_name('get_problem_list.result.length'), + len(result.get('problem_list',[])) + ) + return result def get_notifications(self, course_id, grader_id): params = {'course_id': course_id, 'student_id': grader_id} - response = self.get(self.get_notifications_url, params) - return self.try_to_decode(response) - - def try_to_decode(self, text): - try: - text = json.loads(text) - except: - pass - return text + result = self.get(self.get_notifications_url, params) + self._record_result( + 'get_notifications', + result, + tags=['needs_to_peer_grade:{}'.format(result.get('student_needs_to_peer_grade'))] + ) + return result """ diff --git a/common/lib/xmodule/xmodule/peer_grading_module.py b/common/lib/xmodule/xmodule/peer_grading_module.py index e6a30e4450..191c05b46f 100644 --- a/common/lib/xmodule/xmodule/peer_grading_module.py +++ b/common/lib/xmodule/xmodule/peer_grading_module.py @@ -539,8 +539,7 @@ class PeerGradingModule(PeerGradingFields, XModule): error_text = "" problem_list = [] try: - problem_list_json = self.peer_gs.get_problem_list(self.course_id, self.system.anonymous_student_id) - problem_list_dict = problem_list_json + problem_list_dict = self.peer_gs.get_problem_list(self.course_id, self.system.anonymous_student_id) success = problem_list_dict['success'] if 'error' in problem_list_dict: error_text = problem_list_dict['error'] diff --git a/lms/djangoapps/open_ended_grading/open_ended_notifications.py b/lms/djangoapps/open_ended_grading/open_ended_notifications.py index 5826560ab4..426b93dd3c 100644 --- a/lms/djangoapps/open_ended_grading/open_ended_notifications.py +++ b/lms/djangoapps/open_ended_grading/open_ended_notifications.py @@ -165,9 +165,8 @@ def combined_notifications(course, user): try: #Get the notifications from the grading controller - controller_response = controller_qs.check_combined_notifications(course.id, student_id, user_is_staff, + notifications = controller_qs.check_combined_notifications(course.id, student_id, user_is_staff, last_time_viewed) - notifications = json.loads(controller_response) if notifications.get('success'): if (notifications.get('staff_needs_to_grade') or notifications.get('student_needs_to_peer_grade')): diff --git a/lms/djangoapps/open_ended_grading/staff_grading_service.py b/lms/djangoapps/open_ended_grading/staff_grading_service.py index db8672aee3..843d112b65 100644 --- a/lms/djangoapps/open_ended_grading/staff_grading_service.py +++ b/lms/djangoapps/open_ended_grading/staff_grading_service.py @@ -44,28 +44,28 @@ class MockStaffGradingService(object): def get_next(self, course_id, location, grader_id): self.cnt += 1 - return json.dumps({'success': True, - 'submission_id': self.cnt, - 'submission': 'Test submission {cnt}'.format(cnt=self.cnt), - 'num_graded': 3, - 'min_for_ml': 5, - 'num_pending': 4, - 'prompt': 'This is a fake prompt', - 'ml_error_info': 'ML info', - 'max_score': 2 + self.cnt % 3, - 'rubric': 'A rubric'}) + return {'success': True, + 'submission_id': self.cnt, + 'submission': 'Test submission {cnt}'.format(cnt=self.cnt), + 'num_graded': 3, + 'min_for_ml': 5, + 'num_pending': 4, + 'prompt': 'This is a fake prompt', + 'ml_error_info': 'ML info', + 'max_score': 2 + self.cnt % 3, + 'rubric': 'A rubric'} def get_problem_list(self, course_id, grader_id): self.cnt += 1 - return json.dumps({'success': True, - 'problem_list': [ - json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo1', - 'problem_name': "Problem 1", 'num_graded': 3, 'num_pending': 5, - 'min_for_ml': 10}), - json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo2', - 'problem_name': "Problem 2", 'num_graded': 1, 'num_pending': 5, - 'min_for_ml': 10}) - ]}) + return {'success': True, + 'problem_list': [ + json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo1', + 'problem_name': "Problem 1", 'num_graded': 3, 'num_pending': 5, + 'min_for_ml': 10}), + json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo2', + 'problem_name': "Problem 2", 'num_graded': 1, 'num_pending': 5, + 'min_for_ml': 10}) + ]} def save_grade(self, course_id, grader_id, submission_id, score, feedback, skipped, rubric_scores, submission_flagged): @@ -77,6 +77,8 @@ class StaffGradingService(GradingService): Interface to staff grading backend. """ + METRIC_NAME = 'edxapp.open_ended_grading.staff_grading_service' + def __init__(self, config): config['system'] = LmsModuleSystem( static_url='/static', @@ -106,7 +108,7 @@ class StaffGradingService(GradingService): grader_id: who is grading this? The anonymous user_id of the grader. Returns: - json string with the response from the service. (Deliberately not + dict with the response from the service. (Deliberately not writing out the fields here--see the docs on the staff_grading view in the grading_controller repo) @@ -114,7 +116,14 @@ class StaffGradingService(GradingService): GradingServiceError: something went wrong with the connection. """ params = {'course_id': course_id, 'grader_id': grader_id} - return self.get(self.get_problem_list_url, params) + result = self.get(self.get_problem_list_url, params) + tags = [u'course_id:{}'.format(course_id)] + self._record_result('get_problem_list', result, tags) + dog_stats_api.histogram( + self._metric_name('get_problem_list.result.length'), + len(result.get('problem_list', [])) + ) + return result def get_next(self, course_id, location, grader_id): """ @@ -127,17 +136,25 @@ class StaffGradingService(GradingService): grader_id: who is grading this? The anonymous user_id of the grader. Returns: - json string with the response from the service. (Deliberately not + dict with the response from the service. (Deliberately not writing out the fields here--see the docs on the staff_grading view in the grading_controller repo) Raises: GradingServiceError: something went wrong with the connection. """ - response = self.get(self.get_next_url, - params={'location': location, - 'grader_id': grader_id}) - return json.dumps(self._render_rubric(response)) + result = self._render_rubric( + self.get( + self.get_next_url, + params={ + 'location': location, + 'grader_id': grader_id + } + ) + ) + tags = [u'course_id:{}'.format(course_id)] + self._record_result('get_next', result, tags) + return result def save_grade(self, course_id, grader_id, submission_id, score, feedback, skipped, rubric_scores, submission_flagged): @@ -145,7 +162,7 @@ class StaffGradingService(GradingService): Save a score and feedback for a submission. Returns: - json dict with keys + dict with keys 'success': bool 'error': error msg, if something went wrong. @@ -162,12 +179,20 @@ class StaffGradingService(GradingService): 'rubric_scores_complete': True, 'submission_flagged': submission_flagged} - return self.post(self.save_grade_url, data=data) + result = self._render_rubric(self.post(self.save_grade_url, data=data)) + tags = [u'course_id:{}'.format(course_id)] + self._record_result('save_grade', result, tags) + return result def get_notifications(self, course_id): params = {'course_id': course_id} - response = self.get(self.get_notifications_url, params) - return response + result = self.get(self.get_notifications_url, params) + tags = [ + u'course_id:{}'.format(course_id), + u'staff_needs_to_grade:{}'.format(result.get('staff_needs_to_grade')) + ] + self._record_result('get_notifications', result, tags) + return result # don't initialize until staff_grading_service() is called--means that just @@ -249,7 +274,7 @@ def get_next(request, course_id): p = request.POST location = p['location'] - return HttpResponse(_get_next(course_id, grader_id, location), + return HttpResponse(json.dumps(_get_next(course_id, grader_id, location)), mimetype="application/json") def get_problem_list(request, course_id): @@ -278,7 +303,6 @@ def get_problem_list(request, course_id): _check_access(request.user, course_id) try: response = staff_grading_service().get_problem_list(course_id, unique_id_for_user(request.user)) - response = json.loads(response) # If 'problem_list' is in the response, then we got a list of problems from the ORA server. # If it is not, then ORA could not find any problems. @@ -373,14 +397,14 @@ def save_grade(request, course_id): location = p['location'] try: - result_json = staff_grading_service().save_grade(course_id, - grader_id, - p['submission_id'], - p['score'], - p['feedback'], - skipped, - p.getlist('rubric_scores[]'), - p['submission_flagged']) + result = staff_grading_service().save_grade(course_id, + grader_id, + p['submission_id'], + p['score'], + p['feedback'], + skipped, + p.getlist('rubric_scores[]'), + p['submission_flagged']) except GradingServiceError: #This is a dev_facing_error log.exception( @@ -388,9 +412,6 @@ def save_grade(request, course_id): request, course_id)) #This is a staff_facing_error return _err_response(STAFF_ERROR_MESSAGE) - - try: - result = json.loads(result_json) except ValueError: #This is a dev_facing_error log.exception( @@ -406,7 +427,7 @@ def save_grade(request, course_id): return _err_response(STAFF_ERROR_MESSAGE) # Ok, save_grade seemed to work. Get the next submission to grade. - return HttpResponse(_get_next(course_id, grader_id, location), + return HttpResponse(json.dumps(_get_next(course_id, grader_id, location)), mimetype="application/json") diff --git a/lms/djangoapps/open_ended_grading/tests.py b/lms/djangoapps/open_ended_grading/tests.py index 8ac9bab50f..75020ef024 100644 --- a/lms/djangoapps/open_ended_grading/tests.py +++ b/lms/djangoapps/open_ended_grading/tests.py @@ -45,7 +45,7 @@ class EmptyStaffGradingService(object): """ Return a staff grading response that is missing a problem list key. """ - return json.dumps({'success': True, 'error': 'No problems found.'}) + return {'success': True, 'error': 'No problems found.'} def make_instructor(course, user_email): @@ -62,10 +62,9 @@ class StudentProblemListMockQuery(object): def get_grading_status_list(self, *args, **kwargs): """ Get a mock grading status list with locations from the open_ended test course. - @returns: json formatted grading status message. + @returns: grading status message dictionary. """ - grading_status_list = json.dumps( - { + return { "version": 1, "problem_list": [ { @@ -95,8 +94,6 @@ class StudentProblemListMockQuery(object): ], "success": True } - ) - return grading_status_list @override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE) diff --git a/lms/djangoapps/open_ended_grading/utils.py b/lms/djangoapps/open_ended_grading/utils.py index fb523df17e..4833d01fc1 100644 --- a/lms/djangoapps/open_ended_grading/utils.py +++ b/lms/djangoapps/open_ended_grading/utils.py @@ -118,12 +118,10 @@ class StudentProblemList(object): self.success = False try: #Get list of all open ended problems that the grading server knows about - problem_list_json = self.controller_qs.get_grading_status_list(self.course_id, self.user_id) + problem_list_dict = self.controller_qs.get_grading_status_list(self.course_id, self.user_id) except GradingServiceError: log.error("Problem contacting open ended grading service " + self.course_error_ending) return self.success - try: - problem_list_dict = json.loads(problem_list_json) except ValueError: log.error("Problem with results from external grading service for open ended" + self.course_error_ending) return self.success diff --git a/lms/djangoapps/open_ended_grading/views.py b/lms/djangoapps/open_ended_grading/views.py index 50f34f6395..c045d6e56f 100644 --- a/lms/djangoapps/open_ended_grading/views.py +++ b/lms/djangoapps/open_ended_grading/views.py @@ -197,8 +197,7 @@ def flagged_problem_list(request, course_id): # Make a service that can query edX ORA. controller_qs = create_controller_query_service() try: - problem_list_json = controller_qs.get_flagged_problem_list(course_id) - problem_list_dict = json.loads(problem_list_json) + problem_list_dict = controller_qs.get_flagged_problem_list(course_id) success = problem_list_dict['success'] if 'error' in problem_list_dict: error_text = problem_list_dict['error'] @@ -326,7 +325,7 @@ def take_action_on_flags(request, course_id): controller_qs = create_controller_query_service() try: response = controller_qs.take_action_on_flags(course_id, student_id, submission_id, action_type) - return HttpResponse(response, mimetype="application/json") + return HttpResponse(json.dumps(response), mimetype="application/json") except GradingServiceError: log.exception( u"Error taking action on flagged peer grading submissions, "