diff --git a/common/djangoapps/xmodule_modifiers.py b/common/djangoapps/xmodule_modifiers.py index e0fda01eef..b171b402ee 100644 --- a/common/djangoapps/xmodule_modifiers.py +++ b/common/djangoapps/xmodule_modifiers.py @@ -64,7 +64,6 @@ def replace_static_urls(get_html, data_dir, course_namespace=None): return static_replace.replace_static_urls(get_html(), data_dir, course_namespace) return _get_html - def grade_histogram(module_id): ''' Print out a histogram of grades on a given problem. Part of staff member debug info. diff --git a/common/lib/capa/capa/tests/test_files/js/test_problem_display.js b/common/lib/capa/capa/tests/test_files/js/test_problem_display.js index 35b619c6ec..b61569acea 100644 --- a/common/lib/capa/capa/tests/test_files/js/test_problem_display.js +++ b/common/lib/capa/capa/tests/test_files/js/test_problem_display.js @@ -1,4 +1,4 @@ -// Generated by CoffeeScript 1.3.3 +// Generated by CoffeeScript 1.4.0 (function() { var MinimaxProblemDisplay, root, __hasProp = {}.hasOwnProperty, diff --git a/common/lib/capa/capa/tests/test_files/js/test_problem_generator.js b/common/lib/capa/capa/tests/test_files/js/test_problem_generator.js index b2f01ed252..4b1d133723 100644 --- a/common/lib/capa/capa/tests/test_files/js/test_problem_generator.js +++ b/common/lib/capa/capa/tests/test_files/js/test_problem_generator.js @@ -1,4 +1,4 @@ -// Generated by CoffeeScript 1.3.3 +// Generated by CoffeeScript 1.4.0 (function() { var TestProblemGenerator, root, __hasProp = {}.hasOwnProperty, diff --git a/common/lib/capa/capa/tests/test_files/js/test_problem_grader.js b/common/lib/capa/capa/tests/test_files/js/test_problem_grader.js index 34dfff35cc..80d7ad1690 100644 --- a/common/lib/capa/capa/tests/test_files/js/test_problem_grader.js +++ b/common/lib/capa/capa/tests/test_files/js/test_problem_grader.js @@ -1,4 +1,4 @@ -// Generated by CoffeeScript 1.3.3 +// Generated by CoffeeScript 1.4.0 (function() { var TestProblemGrader, root, __hasProp = {}.hasOwnProperty, diff --git a/common/lib/capa/capa/tests/test_files/js/xproblem.js b/common/lib/capa/capa/tests/test_files/js/xproblem.js index 512cf22739..55a469f7c1 100644 --- a/common/lib/capa/capa/tests/test_files/js/xproblem.js +++ b/common/lib/capa/capa/tests/test_files/js/xproblem.js @@ -1,4 +1,4 @@ -// Generated by CoffeeScript 1.3.3 +// Generated by CoffeeScript 1.4.0 (function() { var XProblemDisplay, XProblemGenerator, XProblemGrader, root; diff --git a/common/lib/xmodule/setup.py b/common/lib/xmodule/setup.py index 506e3a829a..817af9c10d 100644 --- a/common/lib/xmodule/setup.py +++ b/common/lib/xmodule/setup.py @@ -27,6 +27,7 @@ setup( "html = xmodule.html_module:HtmlDescriptor", "image = xmodule.backcompat_module:TranslateCustomTagDescriptor", "error = xmodule.error_module:ErrorDescriptor", + "peergrading = xmodule.peer_grading_module:PeerGradingDescriptor", "problem = xmodule.capa_module:CapaDescriptor", "problemset = xmodule.seq_module:SequenceDescriptor", "randomize = xmodule.randomize_module:RandomizeDescriptor", diff --git a/common/lib/xmodule/xmodule/combined_open_ended_rubric.py b/common/lib/xmodule/xmodule/combined_open_ended_rubric.py index f8b23a2774..6d4a3eebdf 100644 --- a/common/lib/xmodule/xmodule/combined_open_ended_rubric.py +++ b/common/lib/xmodule/xmodule/combined_open_ended_rubric.py @@ -33,7 +33,9 @@ class CombinedOpenEndedRubric(object): 'view_only': self.view_only}) success = True except: - raise RubricParsingError("[render_rubric] Could not parse the rubric with xml: {0}".format(rubric_xml)) + error_message = "[render_rubric] Could not parse the rubric with xml: {0}".format(rubric_xml) + log.error(error_message) + raise RubricParsingError(error_message) return success, html def check_if_rubric_is_parseable(self, rubric_string, location, max_score_allowed): diff --git a/lms/djangoapps/open_ended_grading/grading_service.py b/common/lib/xmodule/xmodule/grading_service_module.py similarity index 78% rename from lms/djangoapps/open_ended_grading/grading_service.py rename to common/lib/xmodule/xmodule/grading_service_module.py index 63febb105f..7c18731f53 100644 --- a/lms/djangoapps/open_ended_grading/grading_service.py +++ b/common/lib/xmodule/xmodule/grading_service_module.py @@ -5,16 +5,8 @@ import requests from requests.exceptions import RequestException, ConnectionError, HTTPError import sys -from django.conf import settings -from django.http import HttpResponse, Http404 - -from courseware.access import has_access -from util.json_request import expect_json -from xmodule.course_module import CourseDescriptor from xmodule.combined_open_ended_rubric import CombinedOpenEndedRubric, RubricParsingError from lxml import etree -from mitxmako.shortcuts import render_to_string -from xmodule.x_module import ModuleSystem log = logging.getLogger(__name__) @@ -31,7 +23,7 @@ class GradingService(object): self.url = config['url'] self.login_url = self.url + '/login/' self.session = requests.session() - self.system = ModuleSystem(None, None, None, render_to_string, None) + self.system = config['system'] def _login(self): """ @@ -42,20 +34,20 @@ class GradingService(object): Returns the decoded json dict of the response. """ response = self.session.post(self.login_url, - {'username': self.username, - 'password': self.password,}) + {'username': self.username, + 'password': self.password,}) response.raise_for_status() return response.json - def post(self, url, data, allow_redirects=False): + def post(self, url, data, allow_redirects=False): """ Make a post request to the grading controller """ try: op = lambda: self.session.post(url, data=data, - allow_redirects=allow_redirects) + allow_redirects=allow_redirects) r = self._try_with_login(op) except (RequestException, ConnectionError, HTTPError) as err: # reraise as promised GradingServiceError, but preserve stacktrace. @@ -69,8 +61,8 @@ class GradingService(object): """ log.debug(params) op = lambda: self.session.get(url, - allow_redirects=allow_redirects, - params=params) + allow_redirects=allow_redirects, + params=params) try: r = self._try_with_login(op) except (RequestException, ConnectionError, HTTPError) as err: @@ -78,7 +70,7 @@ class GradingService(object): raise GradingServiceError, str(err), sys.exc_info()[2] return r.text - + def _try_with_login(self, operation): """ @@ -96,8 +88,8 @@ class GradingService(object): r = self._login() if r and not r.get('success'): log.warning("Couldn't log into staff_grading backend. Response: %s", - r) - # try again + r) + # try again response = operation() response.raise_for_status() @@ -113,23 +105,23 @@ class GradingService(object): """ try: response_json = json.loads(response) + except: + response_json = response + + try: if 'rubric' in response_json: rubric = response_json['rubric'] rubric_renderer = CombinedOpenEndedRubric(self.system, False) success, rubric_html = rubric_renderer.render_rubric(rubric) response_json['rubric'] = rubric_html return response_json - # if we can't parse the rubric into HTML, + # if we can't parse the rubric into HTML, except etree.XMLSyntaxError, RubricParsingError: log.exception("Cannot parse rubric string. Raw string: {0}" - .format(rubric)) + .format(rubric)) return {'success': False, - 'error': 'Error displaying submission'} + 'error': 'Error displaying submission'} except ValueError: log.exception("Error parsing response: {0}".format(response)) return {'success': False, - 'error': "Error displaying submission"} - - - - + 'error': "Error displaying submission"} \ No newline at end of file diff --git a/common/lib/xmodule/xmodule/js/src/peergrading/peer_grading.coffee b/common/lib/xmodule/xmodule/js/src/peergrading/peer_grading.coffee new file mode 100644 index 0000000000..45c678bad9 --- /dev/null +++ b/common/lib/xmodule/xmodule/js/src/peergrading/peer_grading.coffee @@ -0,0 +1,49 @@ +# This is a simple class that just hides the error container +# and message container when they are empty +# Can (and should be) expanded upon when our problem list +# becomes more sophisticated +class @PeerGrading + constructor: (element) -> + @peer_grading_container = $('.peer-grading') + @use_single_location = @peer_grading_container.data('use-single-location') + @peer_grading_outer_container = $('.peer-grading-container') + @ajax_url = @peer_grading_container.data('ajax-url') + @error_container = $('.error-container') + @error_container.toggle(not @error_container.is(':empty')) + + @message_container = $('.message-container') + @message_container.toggle(not @message_container.is(':empty')) + + @problem_button = $('.problem-button') + @problem_button.click @show_results + + @problem_list = $('.problem-list') + @construct_progress_bar() + + if @use_single_location + @activate_problem() + + construct_progress_bar: () => + problems = @problem_list.find('tr').next() + problems.each( (index, element) => + problem = $(element) + progress_bar = problem.find('.progress-bar') + bar_value = parseInt(problem.data('graded')) + bar_max = parseInt(problem.data('required')) + bar_value + progress_bar.progressbar({value: bar_value, max: bar_max}) + ) + + show_results: (event) => + location_to_fetch = $(event.target).data('location') + data = {'location' : location_to_fetch} + $.postWithPrefix "#{@ajax_url}problem", data, (response) => + if response.success + @peer_grading_outer_container.after(response.html).remove() + backend = new PeerGradingProblemBackend(@ajax_url, false) + new PeerGradingProblem(backend) + else + @gentle_alert response.error + + activate_problem: () => + backend = new PeerGradingProblemBackend(@ajax_url, false) + new PeerGradingProblem(backend) \ No newline at end of file diff --git a/lms/static/coffee/src/peer_grading/peer_grading_problem.coffee b/common/lib/xmodule/xmodule/js/src/peergrading/peer_grading_problem.coffee similarity index 58% rename from lms/static/coffee/src/peer_grading/peer_grading_problem.coffee rename to common/lib/xmodule/xmodule/js/src/peergrading/peer_grading_problem.coffee index ab16b34d12..ee98905cda 100644 --- a/lms/static/coffee/src/peer_grading/peer_grading_problem.coffee +++ b/common/lib/xmodule/xmodule/js/src/peergrading/peer_grading_problem.coffee @@ -7,7 +7,7 @@ # Should not be run when we don't have a location to send back # to the server # -# PeerGradingProblemBackend - +# PeerGradingProblemBackend - # makes all the ajax requests and provides a mock interface # for testing purposes # @@ -15,7 +15,7 @@ # handles the rendering and user interactions with the interface # ################################## -class PeerGradingProblemBackend +class @PeerGradingProblemBackend constructor: (ajax_url, mock_backend) -> @mock_backend = mock_backend @ajax_url = ajax_url @@ -32,141 +32,140 @@ class PeerGradingProblemBackend mock: (cmd, data) -> if cmd == 'is_student_calibrated' # change to test each version - response = - success: true + response = + success: true calibrated: @mock_cnt >= 2 else if cmd == 'show_calibration_essay' - #response = + #response = # success: false # error: "There was an error" @mock_cnt++ - response = + response = success: true submission_id: 1 submission_key: 'abcd' student_response: ''' - Contrary to popular belief, Lorem Ipsum is not simply random text. It has roots in a piece of classical Latin literature from 45 BC, making it over 2000 years old. Richard McClintock, a Latin professor at Hampden-Sydney College in Virginia, looked up one of the more obscure Latin words, consectetur, from a Lorem Ipsum passage, and going through the cites of the word in classical literature, discovered the undoubtable source. Lorem Ipsum comes from sections 1.10.32 and 1.10.33 of "de Finibus Bonorum et Malorum" (The Extremes of Good and Evil) by Cicero, written in 45 BC. This book is a treatise on the theory of ethics, very popular during the Renaissance. The first line of Lorem Ipsum, "Lorem ipsum dolor sit amet..", comes from a line in section 1.10.32. + Contrary to popular belief, Lorem Ipsum is not simply random text. It has roots in a piece of classical Latin literature from 45 BC, making it over 2000 years old. Richard McClintock, a Latin professor at Hampden-Sydney College in Virginia, looked up one of the more obscure Latin words, consectetur, from a Lorem Ipsum passage, and going through the cites of the word in classical literature, discovered the undoubtable source. Lorem Ipsum comes from sections 1.10.32 and 1.10.33 of "de Finibus Bonorum et Malorum" (The Extremes of Good and Evil) by Cicero, written in 45 BC. This book is a treatise on the theory of ethics, very popular during the Renaissance. The first line of Lorem Ipsum, "Lorem ipsum dolor sit amet..", comes from a line in section 1.10.32. -The standard chunk of Lorem Ipsum used since the 1500s is reproduced below for those interested. Sections 1.10.32 and 1.10.33 from "de Finibus Bonorum et Malorum" by Cicero are also reproduced in their exact original form, accompanied by English versions from the 1914 translation by H. Rackham. - ''' + The standard chunk of Lorem Ipsum used since the 1500s is reproduced below for those interested. Sections 1.10.32 and 1.10.33 from "de Finibus Bonorum et Malorum" by Cicero are also reproduced in their exact original form, accompanied by English versions from the 1914 translation by H. Rackham. + ''' prompt: ''' -

S11E3: Metal Bands

-

Shown below are schematic band diagrams for two different metals. Both diagrams appear different, yet both of the elements are undisputably metallic in nature.

-

* Why is it that both sodium and magnesium behave as metals, even though the s-band of magnesium is filled?

-

This is a self-assessed open response question. Please use as much space as you need in the box below to answer the question.

- ''' +

S11E3: Metal Bands

+

Shown below are schematic band diagrams for two different metals. Both diagrams appear different, yet both of the elements are undisputably metallic in nature.

+

* Why is it that both sodium and magnesium behave as metals, even though the s-band of magnesium is filled?

+

This is a self-assessed open response question. Please use as much space as you need in the box below to answer the question.

+ ''' rubric: ''' - - - - - - - - - - - - - - - - - - -
Purpose - - - - - - - -
Organization - - - - - - - -
- ''' + + + + + + + + + + + + + + + + + + +
Purpose + + + + + + + +
Organization + + + + + + + +
+ ''' max_score: 4 else if cmd == 'get_next_submission' - response = + response = success: true submission_id: 1 submission_key: 'abcd' student_response: '''Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed nec tristique ante. Proin at mauris sapien, quis varius leo. Morbi laoreet leo nisi. Morbi aliquam lacus ante. Cras iaculis velit sed diam mattis a fermentum urna luctus. Duis consectetur nunc vitae felis facilisis eget vulputate risus viverra. Cras consectetur ullamcorper lobortis. Nam eu gravida lorem. Nulla facilisi. Nullam quis felis enim. Mauris orci lectus, dictum id cursus in, vulputate in massa. -Phasellus non varius sem. Nullam commodo lacinia odio sit amet egestas. Donec ullamcorper sapien sagittis arcu volutpat placerat. Phasellus ut pretium ante. Nam dictum pulvinar nibh dapibus tristique. Sed at tellus mi, fringilla convallis justo. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Phasellus tristique rutrum nulla sed eleifend. Praesent at nunc arcu. Mauris condimentum faucibus nibh, eget commodo quam viverra sed. Morbi in tincidunt dolor. Morbi sed augue et augue interdum fermentum. + Phasellus non varius sem. Nullam commodo lacinia odio sit amet egestas. Donec ullamcorper sapien sagittis arcu volutpat placerat. Phasellus ut pretium ante. Nam dictum pulvinar nibh dapibus tristique. Sed at tellus mi, fringilla convallis justo. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Phasellus tristique rutrum nulla sed eleifend. Praesent at nunc arcu. Mauris condimentum faucibus nibh, eget commodo quam viverra sed. Morbi in tincidunt dolor. Morbi sed augue et augue interdum fermentum. -Curabitur tristique purus ac arcu consequat cursus. Cras diam felis, dignissim quis placerat at, aliquet ac metus. Mauris vulputate est eu nibh imperdiet varius. Cras aliquet rhoncus elit a laoreet. Mauris consectetur erat et erat scelerisque eu faucibus dolor consequat. Nam adipiscing sagittis nisl, eu mollis massa tempor ac. Nulla scelerisque tempus blandit. Phasellus ac ipsum eros, id posuere arcu. Nullam non sapien arcu. Vivamus sit amet lorem justo, ac tempus turpis. Suspendisse pharetra gravida imperdiet. Pellentesque lacinia mi eu elit luctus pellentesque. Sed accumsan libero a magna elementum varius. Nunc eget pellentesque metus. ''' + Curabitur tristique purus ac arcu consequat cursus. Cras diam felis, dignissim quis placerat at, aliquet ac metus. Mauris vulputate est eu nibh imperdiet varius. Cras aliquet rhoncus elit a laoreet. Mauris consectetur erat et erat scelerisque eu faucibus dolor consequat. Nam adipiscing sagittis nisl, eu mollis massa tempor ac. Nulla scelerisque tempus blandit. Phasellus ac ipsum eros, id posuere arcu. Nullam non sapien arcu. Vivamus sit amet lorem justo, ac tempus turpis. Suspendisse pharetra gravida imperdiet. Pellentesque lacinia mi eu elit luctus pellentesque. Sed accumsan libero a magna elementum varius. Nunc eget pellentesque metus. ''' prompt: ''' -

S11E3: Metal Bands

-

Shown below are schematic band diagrams for two different metals. Both diagrams appear different, yet both of the elements are undisputably metallic in nature.

-

* Why is it that both sodium and magnesium behave as metals, even though the s-band of magnesium is filled?

-

This is a self-assessed open response question. Please use as much space as you need in the box below to answer the question.

- ''' +

S11E3: Metal Bands

+

Shown below are schematic band diagrams for two different metals. Both diagrams appear different, yet both of the elements are undisputably metallic in nature.

+

* Why is it that both sodium and magnesium behave as metals, even though the s-band of magnesium is filled?

+

This is a self-assessed open response question. Please use as much space as you need in the box below to answer the question.

+ ''' rubric: ''' - - - - - - - - - - - - - - - - - - -
Purpose - - - - - - - -
Organization - - - - - - - -
- ''' + + + + + + + + + + + + + + + + + + +
Purpose + + + + + + + +
Organization + + + + + + + +
+ ''' max_score: 4 else if cmd == 'save_calibration_essay' - response = + response = success: true actual_score: 2 else if cmd == 'save_grade' - response = + response = success: true return response - -class PeerGradingProblem +class @PeerGradingProblem constructor: (backend) -> @prompt_wrapper = $('.prompt-wrapper') @backend = backend - + # get the location of the problem @location = $('.peer-grading').data('location') - # prevent this code from trying to run + # prevent this code from trying to run # when we don't have a location if(!@location) return @@ -208,7 +207,7 @@ class PeerGradingProblem # Set up the click event handlers @action_button.click -> history.back() - @calibration_feedback_button.click => + @calibration_feedback_button.click => @calibration_feedback_panel.hide() @grading_wrapper.show() @is_calibrated_check() @@ -266,7 +265,7 @@ class PeerGradingProblem submit_grade: () => data = @construct_data() @backend.post('save_grade', data, @submission_callback) - + ########## # @@ -301,7 +300,7 @@ class PeerGradingProblem @render_calibration_feedback(response) else if response.error @render_error(response.error) - else + else @render_error("Error saving calibration score") # called after we submit a submission score @@ -330,8 +329,8 @@ class PeerGradingProblem # show button if we have scores for all categories @show_submit_button() - - + + ########## # # Rendering methods and helpers @@ -344,7 +343,7 @@ class PeerGradingProblem # load in all the data @submission_container.html("

Training Essay

") @render_submission_data(response) - # TODO: indicate that we're in calibration mode + # TODO: indicate that we're in calibration mode @calibration_panel.addClass('current-state') @grading_panel.removeClass('current-state') @@ -428,12 +427,12 @@ class PeerGradingProblem if score == actual_score calibration_wrapper.append("

Congratulations! Your score matches the actual score!

") else - calibration_wrapper.append("

Please try to understand the grading critera better to be more accurate next time.

") + calibration_wrapper.append("

Please try to understand the grading critera better to be more accurate next time.

") # disable score selection and submission from the grading interface $("input[name='score-selection']").attr('disabled', true) @submit_button.hide() - + render_interstitial_page: () => @content_panel.hide() @interstitial_page.show() @@ -449,7 +448,7 @@ class PeerGradingProblem @submit_button.show() setup_score_selection: (max_score) => - + # first, get rid of all the old inputs, if any. @score_selection_container.html("""

Overall Score

@@ -460,7 +459,7 @@ class PeerGradingProblem for score in [0..max_score] id = 'score-' + score label = """""" - + input = """ """ # " fix broken parsing in emacs @@ -470,9 +469,7 @@ class PeerGradingProblem $("input[name='score-selection']").change @graded_callback $("input[name='grade-selection']").change @graded_callback - - -mock_backend = false -ajax_url = $('.peer-grading').data('ajax_url') -backend = new PeerGradingProblemBackend(ajax_url, mock_backend) -$(document).ready(() -> new PeerGradingProblem(backend)) +#mock_backend = false +#ajax_url = $('.peer-grading').data('ajax_url') +#backend = new PeerGradingProblemBackend(ajax_url, mock_backend) +#$(document).ready(() -> new PeerGradingProblem(backend)) diff --git a/common/lib/xmodule/xmodule/open_ended_image_submission.py b/common/lib/xmodule/xmodule/open_ended_image_submission.py index abfb2d80ba..8fa4d721d3 100644 --- a/common/lib/xmodule/xmodule/open_ended_image_submission.py +++ b/common/lib/xmodule/xmodule/open_ended_image_submission.py @@ -13,6 +13,10 @@ from urlparse import urlparse import requests from boto.s3.connection import S3Connection from boto.s3.key import Key +#TODO: Settings import is needed now in order to specify the URL and keys for amazon s3 (to upload images). +#Eventually, the goal is to replace the global django settings import with settings specifically +#for this module. There is no easy way to do this now, so piggybacking on the django settings +#makes sense. from django.conf import settings import pickle import logging diff --git a/common/lib/xmodule/xmodule/peer_grading_module.py b/common/lib/xmodule/xmodule/peer_grading_module.py new file mode 100644 index 0000000000..e853160f4a --- /dev/null +++ b/common/lib/xmodule/xmodule/peer_grading_module.py @@ -0,0 +1,537 @@ +""" +This module provides an interface on the grading-service backend +for peer grading + +Use peer_grading_service() to get the version specified +in settings.PEER_GRADING_INTERFACE + +""" +import json +import logging +import requests +import sys + +from django.conf import settings + +from combined_open_ended_rubric import CombinedOpenEndedRubric +from lxml import etree + +import copy +import itertools +import json +import logging +from lxml.html import rewrite_links +import os + +from pkg_resources import resource_string +from .capa_module import only_one, ComplexEncoder +from .editing_module import EditingDescriptor +from .html_checker import check_html +from progress import Progress +from .stringify import stringify_children +from .x_module import XModule +from .xml_module import XmlDescriptor +from xmodule.modulestore import Location + +from peer_grading_service import peer_grading_service, GradingServiceError + +log = logging.getLogger(__name__) + +USE_FOR_SINGLE_LOCATION = False +LINK_TO_LOCATION = "" +TRUE_DICT = [True, "True", "true", "TRUE"] +MAX_SCORE = 1 +IS_GRADED = True + +class PeerGradingModule(XModule): + _VERSION = 1 + + js = {'coffee': [resource_string(__name__, 'js/src/peergrading/peer_grading.coffee'), + resource_string(__name__, 'js/src/peergrading/peer_grading_problem.coffee'), + resource_string(__name__, 'js/src/collapsible.coffee'), + resource_string(__name__, 'js/src/javascript_loader.coffee'), + ]} + js_module_name = "PeerGrading" + + css = {'scss': [resource_string(__name__, 'css/combinedopenended/display.scss')]} + + def __init__(self, system, location, definition, descriptor, + instance_state=None, shared_state=None, **kwargs): + XModule.__init__(self, system, location, definition, descriptor, + instance_state, shared_state, **kwargs) + + # Load instance state + if instance_state is not None: + instance_state = json.loads(instance_state) + else: + instance_state = {} + + #We need to set the location here so the child modules can use it + system.set('location', location) + self.system = system + self.peer_gs = peer_grading_service(self.system) + + self.use_for_single_location = self.metadata.get('use_for_single_location', USE_FOR_SINGLE_LOCATION) + if isinstance(self.use_for_single_location, basestring): + self.use_for_single_location = (self.use_for_single_location in TRUE_DICT) + + self.is_graded = self.metadata.get('is_graded', IS_GRADED) + if isinstance(self.is_graded, basestring): + self.is_graded = (self.is_graded in TRUE_DICT) + + self.link_to_location = self.metadata.get('link_to_location', USE_FOR_SINGLE_LOCATION) + if self.use_for_single_location ==True: + #This will raise an exception if the location is invalid + link_to_location_object = Location(self.link_to_location) + + self.ajax_url = self.system.ajax_url + if not self.ajax_url.endswith("/"): + self.ajax_url = self.ajax_url + "/" + + self.student_data_for_location = instance_state.get('student_data_for_location', {}) + self.max_grade = instance_state.get('max_grade', MAX_SCORE) + if not isinstance(self.max_grade, (int, long)): + #This could result in an exception, but not wrapping in a try catch block so it moves up the stack + self.max_grade = int(self.max_grade) + + def _err_response(self, msg): + """ + Return a HttpResponse with a json dump with success=False, and the given error message. + """ + return {'success': False, 'error': msg} + + def _check_required(self, get, required): + actual = set(get.keys()) + missing = required - actual + if len(missing) > 0: + return False, "Missing required keys: {0}".format(', '.join(missing)) + else: + return True, "" + + def get_html(self): + """ + Needs to be implemented by inheritors. Renders the HTML that students see. + @return: + """ + if not self.use_for_single_location: + return self.peer_grading() + else: + return self.peer_grading_problem({'location' : self.link_to_location})['html'] + + def handle_ajax(self, dispatch, get): + """ + Needs to be implemented by child modules. Handles AJAX events. + @return: + """ + handlers = { + 'get_next_submission': self.get_next_submission, + 'show_calibration_essay': self.show_calibration_essay, + 'is_student_calibrated': self.is_student_calibrated, + 'save_grade': self.save_grade, + 'save_calibration_essay' : self.save_calibration_essay, + 'problem' : self.peer_grading_problem, + } + + if dispatch not in handlers: + return 'Error' + + d = handlers[dispatch](get) + + return json.dumps(d, cls=ComplexEncoder) + + def query_data_for_location(self): + student_id = self.system.anonymous_student_id + location = self.system.location + success = False + response = {} + + try: + response = self.peer_gs.get_data_for_location(location, student_id) + count_graded = response['count_graded'] + count_required = response['count_required'] + success = True + except GradingServiceError: + log.exception("Error getting location data from controller for location {0}, student {1}" + .format(location, student_id)) + + return success, response + + def get_progress(self): + pass + + def get_score(self): + if not self.use_for_single_location or not self.is_graded: + return None + + try: + count_graded = self.student_data_for_location['count_graded'] + count_required = self.student_data_for_location['count_required'] + except: + success, response = self.query_data_for_location() + if not success: + log.exception("No instance data found and could not get data from controller for loc {0} student {1}".format( + self.system.location, self.system.anonymous_student_id + )) + return None + count_graded = response['count_graded'] + count_required = response['count_required'] + if count_required>0 and count_graded>=count_required: + self.student_data_for_location = response + + score_dict = { + 'score': int(count_graded>=count_required), + 'total': self.max_grade, + } + + return score_dict + + def max_score(self): + ''' Maximum score. Two notes: + + * This is generic; in abstract, a problem could be 3/5 points on one + randomization, and 5/7 on another + ''' + max_grade = None + if self.use_for_single_location and self.is_graded: + max_grade = self.max_grade + return max_grade + + def get_next_submission(self, get): + """ + Makes a call to the grading controller for the next essay that should be graded + Returns a json dict with the following keys: + + 'success': bool + + 'submission_id': a unique identifier for the submission, to be passed back + with the grade. + + 'submission': the submission, rendered as read-only html for grading + + 'rubric': the rubric, also rendered as html. + + 'submission_key': a key associated with the submission for validation reasons + + 'error': if success is False, will have an error message with more info. + """ + required = set(['location']) + success, message = self._check_required(get, required) + if not success: + return self._err_response(message) + grader_id = self.system.anonymous_student_id + location = get['location'] + + try: + response = self.peer_gs.get_next_submission(location, grader_id) + return response + except GradingServiceError: + log.exception("Error getting next submission. server url: {0} location: {1}, grader_id: {2}" + .format(self.peer_gs.url, location, grader_id)) + return {'success': False, + 'error': 'Could not connect to grading service'} + + def save_grade(self, get): + """ + Saves the grade of a given submission. + Input: + The request should have the following keys: + location - problem location + submission_id - id associated with this submission + submission_key - submission key given for validation purposes + score - the grade that was given to the submission + feedback - the feedback from the student + Returns + A json object with the following keys: + success: bool indicating whether the save was a success + error: if there was an error in the submission, this is the error message + """ + + required = set(['location', 'submission_id', 'submission_key', 'score', 'feedback', 'rubric_scores[]', 'submission_flagged']) + success, message = self._check_required(get, required) + if not success: + return self._err_response(message) + grader_id = self.system.anonymous_student_id + + location = get.get('location') + submission_id = get.get('submission_id') + score = get.get('score') + feedback = get.get('feedback') + submission_key = get.get('submission_key') + rubric_scores = get.getlist('rubric_scores[]') + submission_flagged = get.get('submission_flagged') + + try: + response = self.peer_gs.save_grade(location, grader_id, submission_id, + score, feedback, submission_key, rubric_scores, submission_flagged) + return response + except GradingServiceError: + log.exception("""Error saving grade. server url: {0}, location: {1}, submission_id:{2}, + submission_key: {3}, score: {4}""" + .format(self.peer_gs.url, + location, submission_id, submission_key, score) + ) + return { + 'success': False, + 'error': 'Could not connect to grading service' + } + + def is_student_calibrated(self, get): + """ + Calls the grading controller to see if the given student is calibrated + on the given problem + + Input: + In the request, we need the following arguments: + location - problem location + + Returns: + Json object with the following keys + success - bool indicating whether or not the call was successful + calibrated - true if the grader has fully calibrated and can now move on to grading + - false if the grader is still working on calibration problems + total_calibrated_on_so_far - the number of calibration essays for this problem + that this grader has graded + """ + + required = set(['location']) + success, message = self._check_required(get, required) + if not success: + return self._err_response(message) + grader_id = self.system.anonymous_student_id + + location = get['location'] + + try: + response = self.peer_gs.is_student_calibrated(location, grader_id) + return response + except GradingServiceError: + log.exception("Error from grading service. server url: {0}, grader_id: {0}, location: {1}" + .format(self.peer_gs.url, grader_id, location)) + return { + 'success': False, + 'error': 'Could not connect to grading service' + } + + def show_calibration_essay(self, get): + """ + Fetch the next calibration essay from the grading controller and return it + Inputs: + In the request + location - problem location + + Returns: + A json dict with the following keys + 'success': bool + + 'submission_id': a unique identifier for the submission, to be passed back + with the grade. + + 'submission': the submission, rendered as read-only html for grading + + 'rubric': the rubric, also rendered as html. + + 'submission_key': a key associated with the submission for validation reasons + + 'error': if success is False, will have an error message with more info. + + """ + + required = set(['location']) + success, message = self._check_required(get, required) + if not success: + return self._err_response(message) + + grader_id = self.system.anonymous_student_id + + location = get['location'] + try: + response = self.peer_gs.show_calibration_essay(location, grader_id) + return response + except GradingServiceError: + log.exception("Error from grading service. server url: {0}, location: {0}" + .format(self.peer_gs.url, location)) + return {'success': False, + 'error': 'Could not connect to grading service'} + # if we can't parse the rubric into HTML, + except etree.XMLSyntaxError: + log.exception("Cannot parse rubric string. Raw string: {0}" + .format(rubric)) + return {'success': False, + 'error': 'Error displaying submission'} + + + def save_calibration_essay(self, get): + """ + Saves the grader's grade of a given calibration. + Input: + The request should have the following keys: + location - problem location + submission_id - id associated with this submission + submission_key - submission key given for validation purposes + score - the grade that was given to the submission + feedback - the feedback from the student + Returns + A json object with the following keys: + success: bool indicating whether the save was a success + error: if there was an error in the submission, this is the error message + actual_score: the score that the instructor gave to this calibration essay + + """ + + required = set(['location', 'submission_id', 'submission_key', 'score', 'feedback', 'rubric_scores[]']) + success, message = self._check_required(get, required) + if not success: + return self._err_response(message) + grader_id = self.system.anonymous_student_id + + location = get.get('location') + calibration_essay_id = get.get('submission_id') + submission_key = get.get('submission_key') + score = get.get('score') + feedback = get.get('feedback') + rubric_scores = get.getlist('rubric_scores[]') + + try: + response = self.peer_gs.save_calibration_essay(location, grader_id, calibration_essay_id, + submission_key, score, feedback, rubric_scores) + return response + except GradingServiceError: + log.exception("Error saving calibration grade, location: {0}, submission_id: {1}, submission_key: {2}, grader_id: {3}".format(location, submission_id, submission_key, grader_id)) + return self._err_response('Could not connect to grading service') + + def peer_grading(self, get = None): + ''' + Show a peer grading interface + ''' + + # call problem list service + success = False + error_text = "" + problem_list = [] + try: + problem_list_json = self.peer_gs.get_problem_list(self.system.course_id, self.system.anonymous_student_id) + problem_list_dict = problem_list_json + success = problem_list_dict['success'] + if 'error' in problem_list_dict: + error_text = problem_list_dict['error'] + + problem_list = problem_list_dict['problem_list'] + + except GradingServiceError: + error_text = "Error occured while contacting the grading service" + success = False + # catch error if if the json loads fails + except ValueError: + error_text = "Could not get problem list" + success = False + + ajax_url = self.ajax_url + html = self.system.render_template('peer_grading/peer_grading.html', { + 'course_id': self.system.course_id, + 'ajax_url': ajax_url, + 'success': success, + 'problem_list': problem_list, + 'error_text': error_text, + # Checked above + 'staff_access': False, + 'use_single_location' : self.use_for_single_location, + }) + + return html + + def peer_grading_problem(self, get = None): + ''' + Show individual problem interface + ''' + if get == None or get.get('location')==None: + if not self.use_for_single_location: + #This is an error case, because it must be set to use a single location to be called without get parameters + return {'html' : "", 'success' : False} + problem_location = self.link_to_location + + elif get.get('location') is not None: + problem_location = get.get('location') + + ajax_url = self.ajax_url + html = self.system.render_template('peer_grading/peer_grading_problem.html', { + 'view_html': '', + 'problem_location': problem_location, + 'course_id': self.system.course_id, + 'ajax_url': ajax_url, + # Checked above + 'staff_access': False, + 'use_single_location' : self.use_for_single_location, + }) + + return {'html' : html, 'success' : True} + + def get_instance_state(self): + """ + Returns the current instance state. The module can be recreated from the instance state. + Input: None + Output: A dictionary containing the instance state. + """ + + state = { + 'student_data_for_location' : self.student_data_for_location, + } + + return json.dumps(state) + +class PeerGradingDescriptor(XmlDescriptor, EditingDescriptor): + """ + Module for adding combined open ended questions + """ + mako_template = "widgets/html-edit.html" + module_class = PeerGradingModule + filename_extension = "xml" + + stores_state = True + has_score = True + template_dir_name = "peer_grading" + + js = {'coffee': [resource_string(__name__, 'js/src/html/edit.coffee')]} + js_module_name = "HTMLEditingDescriptor" + + @classmethod + def definition_from_xml(cls, xml_object, system): + """ + Pull out the individual tasks, the rubric, and the prompt, and parse + + Returns: + { + 'rubric': 'some-html', + 'prompt': 'some-html', + 'task_xml': dictionary of xml strings, + } + """ + log.debug("In definition") + expected_children = [] + for child in expected_children: + if len(xml_object.xpath(child)) == 0: + raise ValueError("Peer grading definition must include at least one '{0}' tag".format(child)) + + def parse_task(k): + """Assumes that xml_object has child k""" + return [stringify_children(xml_object.xpath(k)[i]) for i in xrange(0, len(xml_object.xpath(k)))] + + def parse(k): + """Assumes that xml_object has child k""" + return xml_object.xpath(k)[0] + + return {} + + + def definition_to_xml(self, resource_fs): + '''Return an xml element representing this definition.''' + elt = etree.Element('peergrading') + + def add_child(k): + child_str = '<{tag}>{body}'.format(tag=k, body=self.definition[k]) + child_node = etree.fromstring(child_str) + elt.append(child_node) + + for child in ['task']: + add_child(child) + + return elt \ No newline at end of file diff --git a/common/lib/xmodule/xmodule/peer_grading_service.py b/common/lib/xmodule/xmodule/peer_grading_service.py new file mode 100644 index 0000000000..6b30f4e043 --- /dev/null +++ b/common/lib/xmodule/xmodule/peer_grading_service.py @@ -0,0 +1,160 @@ +import json +import logging +import requests +from requests.exceptions import RequestException, ConnectionError, HTTPError +import sys + +#TODO: Settings import is needed now in order to specify the URL where to find the peer grading service. +#Eventually, the goal is to replace the global django settings import with settings specifically +#for this xmodule. There is no easy way to do this now, so piggybacking on the django settings +#makes sense. +from django.conf import settings + +from combined_open_ended_rubric import CombinedOpenEndedRubric, RubricParsingError +from lxml import etree +from grading_service_module import GradingService, GradingServiceError + +log=logging.getLogger(__name__) + +class GradingServiceError(Exception): + pass + +class PeerGradingService(GradingService): + """ + Interface with the grading controller for peer grading + """ + def __init__(self, config, system): + config['system'] = system + super(PeerGradingService, self).__init__(config) + self.get_next_submission_url = self.url + '/get_next_submission/' + self.save_grade_url = self.url + '/save_grade/' + self.is_student_calibrated_url = self.url + '/is_student_calibrated/' + self.show_calibration_essay_url = self.url + '/show_calibration_essay/' + self.save_calibration_essay_url = self.url + '/save_calibration_essay/' + self.get_problem_list_url = self.url + '/get_problem_list/' + self.get_notifications_url = self.url + '/get_notifications/' + self.get_data_for_location_url = self.url + '/get_data_for_location/' + self.system = system + + def get_data_for_location(self, problem_location, student_id): + response = self.get(self.get_data_for_location_url, + {'location': problem_location, 'student_id': student_id}) + return self.try_to_decode(response) + + def get_next_submission(self, problem_location, grader_id): + response = self.get(self.get_next_submission_url, + {'location': problem_location, 'grader_id': grader_id}) + return self.try_to_decode(self._render_rubric(response)) + + def save_grade(self, location, grader_id, submission_id, score, feedback, submission_key, rubric_scores, submission_flagged): + data = {'grader_id' : grader_id, + 'submission_id' : submission_id, + 'score' : score, + 'feedback' : feedback, + 'submission_key': submission_key, + 'location': location, + 'rubric_scores': rubric_scores, + 'rubric_scores_complete': True, + 'submission_flagged' : submission_flagged} + return self.try_to_decode(self.post(self.save_grade_url, data)) + + def is_student_calibrated(self, problem_location, grader_id): + params = {'problem_id' : problem_location, 'student_id': grader_id} + return self.try_to_decode(self.get(self.is_student_calibrated_url, params)) + + def show_calibration_essay(self, problem_location, grader_id): + params = {'problem_id' : problem_location, 'student_id': grader_id} + response = self.get(self.show_calibration_essay_url, params) + return self.try_to_decode(self._render_rubric(response)) + + def save_calibration_essay(self, problem_location, grader_id, calibration_essay_id, submission_key, + score, feedback, rubric_scores): + data = {'location': problem_location, + 'student_id': grader_id, + 'calibration_essay_id': calibration_essay_id, + 'submission_key': submission_key, + 'score': score, + 'feedback': feedback, + 'rubric_scores[]': rubric_scores, + 'rubric_scores_complete': True} + return self.try_to_decode(self.post(self.save_calibration_essay_url, data)) + + def get_problem_list(self, course_id, grader_id): + params = {'course_id': course_id, 'student_id': grader_id} + response = self.get(self.get_problem_list_url, params) + return self.try_to_decode(response) + + def get_notifications(self, course_id, grader_id): + params = {'course_id': course_id, 'student_id': grader_id} + response = self.get(self.get_notifications_url, params) + return self.try_to_decode(response) + + def try_to_decode(self, text): + try: + text = json.loads(text) + except: + pass + return text + +""" +This is a mock peer grading service that can be used for unit tests +without making actual service calls to the grading controller +""" +class MockPeerGradingService(object): + def get_next_submission(self, problem_location, grader_id): + return json.dumps({'success': True, + 'submission_id':1, + 'submission_key': "", + 'student_response': 'fake student response', + 'prompt': 'fake submission prompt', + 'rubric': 'fake rubric', + 'max_score': 4}) + + def save_grade(self, location, grader_id, submission_id, + score, feedback, submission_key): + return json.dumps({'success': True}) + + def is_student_calibrated(self, problem_location, grader_id): + return json.dumps({'success': True, 'calibrated': True}) + + def show_calibration_essay(self, problem_location, grader_id): + return json.dumps({'success': True, + 'submission_id':1, + 'submission_key': '', + 'student_response': 'fake student response', + 'prompt': 'fake submission prompt', + 'rubric': 'fake rubric', + 'max_score': 4}) + + def save_calibration_essay(self, problem_location, grader_id, + calibration_essay_id, submission_key, score, feedback): + return {'success': True, 'actual_score': 2} + + def get_problem_list(self, course_id, grader_id): + return json.dumps({'success': True, + 'problem_list': [ + json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo1', + 'problem_name': "Problem 1", 'num_graded': 3, 'num_pending': 5}), + json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo2', + 'problem_name': "Problem 2", 'num_graded': 1, 'num_pending': 5}) + ]}) + +_service = None +def peer_grading_service(system): + """ + Return a peer grading service instance--if settings.MOCK_PEER_GRADING is True, + returns a mock one, otherwise a real one. + + Caches the result, so changing the setting after the first call to this + function will have no effect. + """ + global _service + if _service is not None: + return _service + + if settings.MOCK_PEER_GRADING: + _service = MockPeerGradingService() + else: + _service = PeerGradingService(settings.PEER_GRADING_INTERFACE, system) + + return _service diff --git a/common/test/data/toy/peergrading/init.xml b/common/test/data/toy/peergrading/init.xml new file mode 100644 index 0000000000..2fe3a8fd85 --- /dev/null +++ b/common/test/data/toy/peergrading/init.xml @@ -0,0 +1 @@ + diff --git a/lms/djangoapps/open_ended_grading/controller_query_service.py b/lms/djangoapps/open_ended_grading/controller_query_service.py index d40c9b4428..5d2c40b6ce 100644 --- a/lms/djangoapps/open_ended_grading/controller_query_service.py +++ b/lms/djangoapps/open_ended_grading/controller_query_service.py @@ -3,11 +3,12 @@ import logging import requests from requests.exceptions import RequestException, ConnectionError, HTTPError import sys -from grading_service import GradingService -from grading_service import GradingServiceError +from xmodule.grading_service_module import GradingService, GradingServiceError from django.conf import settings from django.http import HttpResponse, Http404 +from xmodule.x_module import ModuleSystem +from mitxmako.shortcuts import render_to_string log = logging.getLogger(__name__) @@ -16,6 +17,7 @@ class ControllerQueryService(GradingService): Interface to staff grading backend. """ def __init__(self, config): + config['system'] = ModuleSystem(None,None,None,render_to_string,None) super(ControllerQueryService, self).__init__(config) self.check_eta_url = self.url + '/get_submission_eta/' self.is_unique_url = self.url + '/is_name_unique/' diff --git a/lms/djangoapps/open_ended_grading/open_ended_notifications.py b/lms/djangoapps/open_ended_grading/open_ended_notifications.py index fec893894f..26f7339291 100644 --- a/lms/djangoapps/open_ended_grading/open_ended_notifications.py +++ b/lms/djangoapps/open_ended_grading/open_ended_notifications.py @@ -1,6 +1,5 @@ from django.conf import settings from staff_grading_service import StaffGradingService -from peer_grading_service import PeerGradingService from open_ended_grading.controller_query_service import ControllerQueryService import json from student.models import unique_id_for_user @@ -10,6 +9,7 @@ import logging from courseware.access import has_access from util.cache import cache import datetime +from xmodule import peer_grading_service log=logging.getLogger(__name__) diff --git a/lms/djangoapps/open_ended_grading/peer_grading_service.py b/lms/djangoapps/open_ended_grading/peer_grading_service.py deleted file mode 100644 index 23e1488d9b..0000000000 --- a/lms/djangoapps/open_ended_grading/peer_grading_service.py +++ /dev/null @@ -1,389 +0,0 @@ -""" -This module provides an interface on the grading-service backend -for peer grading - -Use peer_grading_service() to get the version specified -in settings.PEER_GRADING_INTERFACE - -""" -import json -import logging -import requests -from requests.exceptions import RequestException, ConnectionError, HTTPError -import sys - -from django.conf import settings -from django.http import HttpResponse, Http404 -from grading_service import GradingService -from grading_service import GradingServiceError - -from courseware.access import has_access -from util.json_request import expect_json -from xmodule.course_module import CourseDescriptor -from xmodule.combined_open_ended_rubric import CombinedOpenEndedRubric -from student.models import unique_id_for_user -from lxml import etree - -log = logging.getLogger(__name__) - -""" -This is a mock peer grading service that can be used for unit tests -without making actual service calls to the grading controller -""" -class MockPeerGradingService(object): - # TODO: get this rubric parsed and working - rubric = """ - - Description - - - - """ - - def get_next_submission(self, problem_location, grader_id): - return json.dumps({'success': True, - 'submission_id':1, - 'submission_key': "", - 'student_response': 'fake student response', - 'prompt': 'fake submission prompt', - 'rubric': 'fake rubric', - 'max_score': 4}) - - def save_grade(self, location, grader_id, submission_id, - score, feedback, submission_key, rubric_scores, submission_flagged): - return json.dumps({'success': True}) - - def is_student_calibrated(self, problem_location, grader_id): - return json.dumps({'success': True, 'calibrated': True}) - - def show_calibration_essay(self, problem_location, grader_id): - return json.dumps({'success': True, - 'submission_id':1, - 'submission_key': '', - 'student_response': 'fake student response', - 'prompt': 'fake submission prompt', - 'rubric': 'fake rubric', - 'max_score': 4}) - - def save_calibration_essay(self, problem_location, grader_id, - calibration_essay_id, submission_key, score, feedback, rubric_scores): - return json.dumps({'success': True, 'actual_score': 2}) - - def get_problem_list(self, course_id, grader_id): - return json.dumps({'success': True, - 'problem_list': [ - json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo1', - 'problem_name': "Problem 1", 'num_graded': 3, 'num_pending': 5, 'num_required': 7}), - json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo2', - 'problem_name': "Problem 2", 'num_graded': 1, 'num_pending': 5, 'num_required': 8}) - ]}) - -class PeerGradingService(GradingService): - """ - Interface with the grading controller for peer grading - """ - def __init__(self, config): - super(PeerGradingService, self).__init__(config) - self.get_next_submission_url = self.url + '/get_next_submission/' - self.save_grade_url = self.url + '/save_grade/' - self.is_student_calibrated_url = self.url + '/is_student_calibrated/' - self.show_calibration_essay_url = self.url + '/show_calibration_essay/' - self.save_calibration_essay_url = self.url + '/save_calibration_essay/' - self.get_problem_list_url = self.url + '/get_problem_list/' - self.get_notifications_url = self.url + '/get_notifications/' - - def get_next_submission(self, problem_location, grader_id): - response = self.get(self.get_next_submission_url, - {'location': problem_location, 'grader_id': grader_id}) - return json.dumps(self._render_rubric(response)) - - def save_grade(self, location, grader_id, submission_id, score, feedback, submission_key, rubric_scores, submission_flagged): - data = {'grader_id' : grader_id, - 'submission_id' : submission_id, - 'score' : score, - 'feedback' : feedback, - 'submission_key': submission_key, - 'location': location, - 'rubric_scores': rubric_scores, - 'rubric_scores_complete': True, - 'submission_flagged' : submission_flagged} - return self.post(self.save_grade_url, data) - - def is_student_calibrated(self, problem_location, grader_id): - params = {'problem_id' : problem_location, 'student_id': grader_id} - return self.get(self.is_student_calibrated_url, params) - - def show_calibration_essay(self, problem_location, grader_id): - params = {'problem_id' : problem_location, 'student_id': grader_id} - response = self.get(self.show_calibration_essay_url, params) - return json.dumps(self._render_rubric(response)) - - def save_calibration_essay(self, problem_location, grader_id, calibration_essay_id, submission_key, - score, feedback, rubric_scores): - data = {'location': problem_location, - 'student_id': grader_id, - 'calibration_essay_id': calibration_essay_id, - 'submission_key': submission_key, - 'score': score, - 'feedback': feedback, - 'rubric_scores[]': rubric_scores, - 'rubric_scores_complete': True} - return self.post(self.save_calibration_essay_url, data) - - def get_problem_list(self, course_id, grader_id): - params = {'course_id': course_id, 'student_id': grader_id} - response = self.get(self.get_problem_list_url, params) - return response - - def get_notifications(self, course_id, grader_id): - params = {'course_id': course_id, 'student_id': grader_id} - response = self.get(self.get_notifications_url, params) - return response - - -_service = None -def peer_grading_service(): - """ - Return a peer grading service instance--if settings.MOCK_PEER_GRADING is True, - returns a mock one, otherwise a real one. - - Caches the result, so changing the setting after the first call to this - function will have no effect. - """ - global _service - if _service is not None: - return _service - - if settings.MOCK_PEER_GRADING: - _service = MockPeerGradingService() - else: - _service = PeerGradingService(settings.PEER_GRADING_INTERFACE) - - return _service - -def _err_response(msg): - """ - Return a HttpResponse with a json dump with success=False, and the given error message. - """ - return HttpResponse(json.dumps({'success': False, 'error': msg}), - mimetype="application/json") - -def _check_required(request, required): - actual = set(request.POST.keys()) - missing = required - actual - if len(missing) > 0: - return False, "Missing required keys: {0}".format(', '.join(missing)) - else: - return True, "" - -def _check_post(request): - if request.method != 'POST': - raise Http404 - - -def get_next_submission(request, course_id): - """ - Makes a call to the grading controller for the next essay that should be graded - Returns a json dict with the following keys: - - 'success': bool - - 'submission_id': a unique identifier for the submission, to be passed back - with the grade. - - 'submission': the submission, rendered as read-only html for grading - - 'rubric': the rubric, also rendered as html. - - 'submission_key': a key associated with the submission for validation reasons - - 'error': if success is False, will have an error message with more info. - """ - _check_post(request) - required = set(['location']) - success, message = _check_required(request, required) - if not success: - return _err_response(message) - grader_id = unique_id_for_user(request.user) - p = request.POST - location = p['location'] - - try: - response = peer_grading_service().get_next_submission(location, grader_id) - return HttpResponse(response, - mimetype="application/json") - except GradingServiceError: - log.exception("Error getting next submission. server url: {0} location: {1}, grader_id: {2}" - .format(peer_grading_service().url, location, grader_id)) - return json.dumps({'success': False, - 'error': 'Could not connect to grading service'}) - -def save_grade(request, course_id): - """ - Saves the grade of a given submission. - Input: - The request should have the following keys: - location - problem location - submission_id - id associated with this submission - submission_key - submission key given for validation purposes - score - the grade that was given to the submission - feedback - the feedback from the student - Returns - A json object with the following keys: - success: bool indicating whether the save was a success - error: if there was an error in the submission, this is the error message - """ - _check_post(request) - required = set(['location', 'submission_id', 'submission_key', 'score', 'feedback', 'rubric_scores[]', 'submission_flagged']) - success, message = _check_required(request, required) - if not success: - return _err_response(message) - grader_id = unique_id_for_user(request.user) - p = request.POST - location = p['location'] - submission_id = p['submission_id'] - score = p['score'] - feedback = p['feedback'] - submission_key = p['submission_key'] - rubric_scores = p.getlist('rubric_scores[]') - submission_flagged = p['submission_flagged'] - try: - response = peer_grading_service().save_grade(location, grader_id, submission_id, - score, feedback, submission_key, rubric_scores, submission_flagged) - return HttpResponse(response, mimetype="application/json") - except GradingServiceError: - log.exception("""Error saving grade. server url: {0}, location: {1}, submission_id:{2}, - submission_key: {3}, score: {4}""" - .format(peer_grading_service().url, - location, submission_id, submission_key, score) - ) - return json.dumps({'success': False, - 'error': 'Could not connect to grading service'}) - - - -def is_student_calibrated(request, course_id): - """ - Calls the grading controller to see if the given student is calibrated - on the given problem - - Input: - In the request, we need the following arguments: - location - problem location - - Returns: - Json object with the following keys - success - bool indicating whether or not the call was successful - calibrated - true if the grader has fully calibrated and can now move on to grading - - false if the grader is still working on calibration problems - total_calibrated_on_so_far - the number of calibration essays for this problem - that this grader has graded - """ - _check_post(request) - required = set(['location']) - success, message = _check_required(request, required) - if not success: - return _err_response(message) - grader_id = unique_id_for_user(request.user) - p = request.POST - location = p['location'] - - try: - response = peer_grading_service().is_student_calibrated(location, grader_id) - return HttpResponse(response, mimetype="application/json") - except GradingServiceError: - log.exception("Error from grading service. server url: {0}, grader_id: {0}, location: {1}" - .format(peer_grading_service().url, grader_id, location)) - return json.dumps({'success': False, - 'error': 'Could not connect to grading service'}) - - - -def show_calibration_essay(request, course_id): - """ - Fetch the next calibration essay from the grading controller and return it - Inputs: - In the request - location - problem location - - Returns: - A json dict with the following keys - 'success': bool - - 'submission_id': a unique identifier for the submission, to be passed back - with the grade. - - 'submission': the submission, rendered as read-only html for grading - - 'rubric': the rubric, also rendered as html. - - 'submission_key': a key associated with the submission for validation reasons - - 'error': if success is False, will have an error message with more info. - - """ - _check_post(request) - - required = set(['location']) - success, message = _check_required(request, required) - if not success: - return _err_response(message) - - grader_id = unique_id_for_user(request.user) - p = request.POST - location = p['location'] - try: - response = peer_grading_service().show_calibration_essay(location, grader_id) - return HttpResponse(response, mimetype="application/json") - except GradingServiceError: - log.exception("Error from grading service. server url: {0}, location: {0}" - .format(peer_grading_service().url, location)) - return json.dumps({'success': False, - 'error': 'Could not connect to grading service'}) - # if we can't parse the rubric into HTML, - except etree.XMLSyntaxError: - log.exception("Cannot parse rubric string. Raw string: {0}" - .format(rubric)) - return json.dumps({'success': False, - 'error': 'Error displaying submission'}) - - -def save_calibration_essay(request, course_id): - """ - Saves the grader's grade of a given calibration. - Input: - The request should have the following keys: - location - problem location - submission_id - id associated with this submission - submission_key - submission key given for validation purposes - score - the grade that was given to the submission - feedback - the feedback from the student - Returns - A json object with the following keys: - success: bool indicating whether the save was a success - error: if there was an error in the submission, this is the error message - actual_score: the score that the instructor gave to this calibration essay - - """ - _check_post(request) - - required = set(['location', 'submission_id', 'submission_key', 'score', 'feedback', 'rubric_scores[]']) - success, message = _check_required(request, required) - if not success: - return _err_response(message) - grader_id = unique_id_for_user(request.user) - p = request.POST - location = p['location'] - calibration_essay_id = p['submission_id'] - submission_key = p['submission_key'] - score = p['score'] - feedback = p['feedback'] - rubric_scores = p.getlist('rubric_scores[]') - - try: - response = peer_grading_service().save_calibration_essay(location, grader_id, calibration_essay_id, - submission_key, score, feedback, rubric_scores) - return HttpResponse(response, mimetype="application/json") - except GradingServiceError: - log.exception("Error saving calibration grade, location: {0}, submission_id: {1}, submission_key: {2}, grader_id: {3}".format(location, submission_id, submission_key, grader_id)) - return _err_response('Could not connect to grading service') diff --git a/lms/djangoapps/open_ended_grading/staff_grading_service.py b/lms/djangoapps/open_ended_grading/staff_grading_service.py index 4e776b688b..d8bee99ac7 100644 --- a/lms/djangoapps/open_ended_grading/staff_grading_service.py +++ b/lms/djangoapps/open_ended_grading/staff_grading_service.py @@ -7,8 +7,7 @@ import logging import requests from requests.exceptions import RequestException, ConnectionError, HTTPError import sys -from grading_service import GradingService -from grading_service import GradingServiceError +from xmodule.grading_service_module import GradingService, GradingServiceError from django.conf import settings from django.http import HttpResponse, Http404 @@ -22,8 +21,6 @@ from mitxmako.shortcuts import render_to_string log = logging.getLogger(__name__) - - class MockStaffGradingService(object): """ A simple mockup of a staff grading service, testing. @@ -64,6 +61,7 @@ class StaffGradingService(GradingService): Interface to staff grading backend. """ def __init__(self, config): + config['system'] = ModuleSystem(None,None,None,render_to_string,None) super(StaffGradingService, self).__init__(config) self.get_next_url = self.url + '/get_next_submission/' self.save_grade_url = self.url + '/save_grade/' diff --git a/lms/djangoapps/open_ended_grading/tests.py b/lms/djangoapps/open_ended_grading/tests.py index 131fe5ad9f..3ee8352c5c 100644 --- a/lms/djangoapps/open_ended_grading/tests.py +++ b/lms/djangoapps/open_ended_grading/tests.py @@ -6,7 +6,7 @@ django-admin.py test --settings=lms.envs.test --pythonpath=. lms/djangoapps/open from django.test import TestCase from open_ended_grading import staff_grading_service -from open_ended_grading import peer_grading_service +from xmodule import peer_grading_service, peer_grading_module from django.core.urlresolvers import reverse from django.contrib.auth.models import Group @@ -17,10 +17,13 @@ import xmodule.modulestore.django from nose import SkipTest from mock import patch, Mock import json +from xmodule.x_module import ModuleSystem +from mitxmako.shortcuts import render_to_string import logging log = logging.getLogger(__name__) from override_settings import override_settings +from django.http import QueryDict @override_settings(MODULESTORE=ct.TEST_DATA_XML_MODULESTORE) @@ -98,6 +101,7 @@ class TestStaffGradingService(ct.PageLoader): 'submission_id': '123', 'location': self.location, 'rubric_scores[]': ['1', '2']} + r = self.check_for_post_code(200, url, data) d = json.loads(r.content) self.assertTrue(d['success'], str(d)) @@ -136,19 +140,21 @@ class TestPeerGradingService(ct.PageLoader): self.course_id = "edX/toy/2012_Fall" self.toy = modulestore().get_course(self.course_id) + location = "i4x://edX/toy/peergrading/init" - self.mock_service = peer_grading_service.peer_grading_service() + self.mock_service = peer_grading_service.MockPeerGradingService() + self.system = ModuleSystem(location, None, None, render_to_string, None) + self.descriptor = peer_grading_module.PeerGradingDescriptor(self.system) + self.peer_module = peer_grading_module.PeerGradingModule(self.system,location,"",self.descriptor) + self.peer_module.peer_gs = self.mock_service self.logout() def test_get_next_submission_success(self): - self.login(self.student, self.password) - - url = reverse('peer_grading_get_next_submission', kwargs={'course_id': self.course_id}) data = {'location': self.location} - r = self.check_for_post_code(200, url, data) - d = json.loads(r.content) + r = self.peer_module.get_next_submission(data) + d = json.loads(r) self.assertTrue(d['success']) self.assertIsNotNone(d['submission_id']) self.assertIsNotNone(d['prompt']) @@ -156,63 +162,48 @@ class TestPeerGradingService(ct.PageLoader): self.assertIsNotNone(d['max_score']) def test_get_next_submission_missing_location(self): - self.login(self.student, self.password) - url = reverse('peer_grading_get_next_submission', kwargs={'course_id': self.course_id}) data = {} - r = self.check_for_post_code(200, url, data) - d = json.loads(r.content) + r = self.peer_module.get_next_submission(data) + d = r self.assertFalse(d['success']) self.assertEqual(d['error'], "Missing required keys: location") def test_save_grade_success(self): - self.login(self.student, self.password) - url = reverse('peer_grading_save_grade', kwargs={'course_id': self.course_id}) - data = {'location': self.location, - 'submission_id': '1', - 'submission_key': 'fake key', - 'score': '2', - 'feedback': 'This is feedback', - 'rubric_scores[]': [1, 2], - 'submission_flagged' : False} - r = self.check_for_post_code(200, url, data) - d = json.loads(r.content) + raise SkipTest() + data = 'rubric_scores[]=1|rubric_scores[]=2|location=' + self.location + '|submission_id=1|submission_key=fake key|score=2|feedback=feedback|submission_flagged=False' + qdict = QueryDict(data.replace("|","&")) + r = self.peer_module.save_grade(qdict) + d = r self.assertTrue(d['success']) def test_save_grade_missing_keys(self): - self.login(self.student, self.password) - url = reverse('peer_grading_save_grade', kwargs={'course_id': self.course_id}) data = {} - r = self.check_for_post_code(200, url, data) - d = json.loads(r.content) + r = self.peer_module.save_grade(data) + d = r self.assertFalse(d['success']) self.assertTrue(d['error'].find('Missing required keys:') > -1) def test_is_calibrated_success(self): - self.login(self.student, self.password) - url = reverse('peer_grading_is_student_calibrated', kwargs={'course_id': self.course_id}) data = {'location': self.location} - r = self.check_for_post_code(200, url, data) - d = json.loads(r.content) + r = self.peer_module.is_student_calibrated(data) + d = json.loads(r) self.assertTrue(d['success']) self.assertTrue('calibrated' in d) def test_is_calibrated_failure(self): - self.login(self.student, self.password) - url = reverse('peer_grading_is_student_calibrated', kwargs={'course_id': self.course_id}) data = {} - r = self.check_for_post_code(200, url, data) - d = json.loads(r.content) + r = self.peer_module.is_student_calibrated(data) + d = r self.assertFalse(d['success']) self.assertFalse('calibrated' in d) def test_show_calibration_essay_success(self): - self.login(self.student, self.password) - - url = reverse('peer_grading_show_calibration_essay', kwargs={'course_id': self.course_id}) data = {'location': self.location} - r = self.check_for_post_code(200, url, data) - d = json.loads(r.content) + r = self.peer_module.show_calibration_essay(data) + d = json.loads(r) + log.debug(d) + log.debug(type(d)) self.assertTrue(d['success']) self.assertIsNotNone(d['submission_id']) self.assertIsNotNone(d['prompt']) @@ -220,37 +211,27 @@ class TestPeerGradingService(ct.PageLoader): self.assertIsNotNone(d['max_score']) def test_show_calibration_essay_missing_key(self): - self.login(self.student, self.password) - - url = reverse('peer_grading_show_calibration_essay', kwargs={'course_id': self.course_id}) data = {} - r = self.check_for_post_code(200, url, data) - d = json.loads(r.content) + r = self.peer_module.show_calibration_essay(data) + d = r self.assertFalse(d['success']) self.assertEqual(d['error'], "Missing required keys: location") def test_save_calibration_essay_success(self): - self.login(self.student, self.password) - url = reverse('peer_grading_save_calibration_essay', kwargs={'course_id': self.course_id}) - data = {'location': self.location, - 'submission_id': '1', - 'submission_key': 'fake key', - 'score': '2', - 'feedback': 'This is feedback', - 'rubric_scores[]': [1, 2]} - r = self.check_for_post_code(200, url, data) - d = json.loads(r.content) + raise SkipTest() + data = 'rubric_scores[]=1|rubric_scores[]=2|location=' + self.location + '|submission_id=1|submission_key=fake key|score=2|feedback=feedback|submission_flagged=False' + qdict = QueryDict(data.replace("|","&")) + r = self.peer_module.save_calibration_essay(qdict) + d = r self.assertTrue(d['success']) self.assertTrue('actual_score' in d) def test_save_calibration_essay_missing_keys(self): - self.login(self.student, self.password) - url = reverse('peer_grading_save_calibration_essay', kwargs={'course_id': self.course_id}) data = {} - r = self.check_for_post_code(200, url, data) - d = json.loads(r.content) + r = self.peer_module.save_calibration_essay(data) + d = r self.assertFalse(d['success']) self.assertTrue(d['error'].find('Missing required keys:') > -1) self.assertFalse('actual_score' in d) diff --git a/lms/djangoapps/open_ended_grading/views.py b/lms/djangoapps/open_ended_grading/views.py index 156bdadddd..af7f930207 100644 --- a/lms/djangoapps/open_ended_grading/views.py +++ b/lms/djangoapps/open_ended_grading/views.py @@ -2,6 +2,7 @@ import logging import urllib +import re from django.conf import settings from django.views.decorators.cache import cache_control @@ -11,10 +12,8 @@ from django.core.urlresolvers import reverse from student.models import unique_id_for_user from courseware.courses import get_course_with_access -from peer_grading_service import PeerGradingService -from peer_grading_service import MockPeerGradingService from controller_query_service import ControllerQueryService -from grading_service import GradingServiceError +from xmodule.grading_service_module import GradingServiceError import json from .staff_grading import StaffGrading from student.models import unique_id_for_user @@ -25,15 +24,11 @@ import open_ended_notifications from xmodule.modulestore.django import modulestore from xmodule.modulestore import search -from django.http import HttpResponse, Http404 +from django.http import HttpResponse, Http404, HttpResponseRedirect log = logging.getLogger(__name__) template_imports = {'urllib': urllib} -if settings.MOCK_PEER_GRADING: - peer_gs = MockPeerGradingService() -else: - peer_gs = PeerGradingService(settings.PEER_GRADING_INTERFACE) controller_url = open_ended_util.get_controller_url() controller_qs = ControllerQueryService(controller_url) @@ -81,66 +76,44 @@ def staff_grading(request, course_id): # Checked above 'staff_access': True, }) - @cache_control(no_cache=True, no_store=True, must_revalidate=True) def peer_grading(request, course_id): ''' Show a peer grading interface ''' - course = get_course_with_access(request.user, course_id, 'load') - # call problem list service - success = False - error_text = "" - problem_list = [] + course = get_course_with_access(request.user, course_id, 'load') + course_id_parts = course.id.split("/") + course_id_norun = "/".join(course_id_parts[0:2]) + pg_location = "i4x://" + course_id_norun + "/peergrading/init" + + base_course_url = reverse('courses') try: - problem_list_json = peer_gs.get_problem_list(course_id, unique_id_for_user(request.user)) - problem_list_dict = json.loads(problem_list_json) - success = problem_list_dict['success'] - if 'error' in problem_list_dict: - error_text = problem_list_dict['error'] + problem_url_parts = search.path_to_location(modulestore(), course.id, pg_location) + problem_url = generate_problem_url(problem_url_parts, base_course_url) - problem_list = problem_list_dict['problem_list'] + return HttpResponseRedirect(problem_url) + except: + error_message = "Error with initializing peer grading. Centralized module does not exist. Please contact course staff." + log.exception(error_message + "Current course is: {0}".format(course_id)) + return HttpResponse(error_message) - except GradingServiceError: - error_text = "Error occured while contacting the grading service" - success = False - # catch error if if the json loads fails - except ValueError: - error_text = "Could not get problem list" - success = False +def generate_problem_url(problem_url_parts, base_course_url): + """ + From a list of problem url parts generated by search.path_to_location and a base course url, generates a url to a problem + @param problem_url_parts: Output of search.path_to_location + @param base_course_url: Base url of a given course + @return: A path to the problem + """ + problem_url = base_course_url + "/" + for z in xrange(0,len(problem_url_parts)): + part = problem_url_parts[z] + if part is not None: + if z==1: + problem_url += "courseware/" + problem_url += part + "/" + return problem_url - ajax_url = _reverse_with_slash('peer_grading', course_id) - - return render_to_response('peer_grading/peer_grading.html', { - 'course': course, - 'course_id': course_id, - 'ajax_url': ajax_url, - 'success': success, - 'problem_list': problem_list, - 'error_text': error_text, - # Checked above - 'staff_access': False, }) - - -@cache_control(no_cache=True, no_store=True, must_revalidate=True) -def peer_grading_problem(request, course_id): - ''' - Show individual problem interface - ''' - course = get_course_with_access(request.user, course_id, 'load') - problem_location = request.GET.get("location") - - ajax_url = _reverse_with_slash('peer_grading', course_id) - - return render_to_response('peer_grading/peer_grading_problem.html', { - 'view_html': '', - 'course': course, - 'problem_location': problem_location, - 'course_id': course_id, - 'ajax_url': ajax_url, - # Checked above - 'staff_access': False, }) @cache_control(no_cache=True, no_store=True, must_revalidate=True) def student_problem_list(request, course_id): @@ -156,28 +129,22 @@ def student_problem_list(request, course_id): problem_list = [] base_course_url = reverse('courses') - try: - problem_list_json = controller_qs.get_grading_status_list(course_id, unique_id_for_user(request.user)) - problem_list_dict = json.loads(problem_list_json) - success = problem_list_dict['success'] - if 'error' in problem_list_dict: - error_text = problem_list_dict['error'] - problem_list = [] - else: - problem_list = problem_list_dict['problem_list'] + #try: + problem_list_json = controller_qs.get_grading_status_list(course_id, unique_id_for_user(request.user)) + problem_list_dict = json.loads(problem_list_json) + success = problem_list_dict['success'] + if 'error' in problem_list_dict: + error_text = problem_list_dict['error'] + problem_list = [] + else: + problem_list = problem_list_dict['problem_list'] - for i in xrange(0,len(problem_list)): - problem_url_parts = search.path_to_location(modulestore(), course.id, problem_list[i]['location']) - problem_url = base_course_url + "/" - for z in xrange(0,len(problem_url_parts)): - part = problem_url_parts[z] - if part is not None: - if z==1: - problem_url += "courseware/" - problem_url += part + "/" - - problem_list[i].update({'actual_url' : problem_url}) + for i in xrange(0,len(problem_list)): + problem_url_parts = search.path_to_location(modulestore(), course.id, problem_list[i]['location']) + problem_url = generate_problem_url(problem_url_parts, base_course_url) + problem_list[i].update({'actual_url' : problem_url}) + """ except GradingServiceError: error_text = "Error occured while contacting the grading service" success = False @@ -185,6 +152,7 @@ def student_problem_list(request, course_id): except ValueError: error_text = "Could not get problem list" success = False + """ ajax_url = _reverse_with_slash('open_ended_problems', course_id) @@ -231,16 +199,17 @@ def flagged_problem_list(request, course_id): success = False ajax_url = _reverse_with_slash('open_ended_flagged_problems', course_id) - - return render_to_response('open_ended_problems/open_ended_flagged_problems.html', { - 'course': course, - 'course_id': course_id, - 'ajax_url': ajax_url, - 'success': success, - 'problem_list': problem_list, - 'error_text': error_text, - # Checked above - 'staff_access': True, }) + context = { + 'course': course, + 'course_id': course_id, + 'ajax_url': ajax_url, + 'success': success, + 'problem_list': problem_list, + 'error_text': error_text, + # Checked above + 'staff_access': True, + } + return render_to_response('open_ended_problems/open_ended_flagged_problems.html', context) @cache_control(no_cache=True, no_store=True, must_revalidate=True) def combined_notifications(request, course_id): @@ -322,7 +291,7 @@ def take_action_on_flags(request, course_id): response = controller_qs.take_action_on_flags(course_id, student_id, submission_id, action_type) return HttpResponse(response, mimetype="application/json") except GradingServiceError: - log.exception("Error saving calibration grade, location: {0}, submission_id: {1}, submission_key: {2}, grader_id: {3}".format(location, submission_id, submission_key, grader_id)) + log.exception("Error saving calibration grade, submission_id: {0}, submission_key: {1}, grader_id: {2}".format(submission_id, submission_key, grader_id)) return _err_response('Could not connect to grading service') diff --git a/lms/envs/common.py b/lms/envs/common.py index bf85d1692a..18fe4d2f58 100644 --- a/lms/envs/common.py +++ b/lms/envs/common.py @@ -419,7 +419,6 @@ main_vendor_js = [ discussion_js = sorted(rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/discussion/**/*.coffee')) staff_grading_js = sorted(rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/staff_grading/**/*.coffee')) -peer_grading_js = sorted(rooted_glob(PROJECT_ROOT / 'static','coffee/src/peer_grading/**/*.coffee')) open_ended_js = sorted(rooted_glob(PROJECT_ROOT / 'static','coffee/src/open_ended/**/*.coffee')) PIPELINE_CSS = { @@ -451,7 +450,7 @@ PIPELINE_JS = { 'source_filenames': sorted( set(rooted_glob(COMMON_ROOT / 'static', 'coffee/src/**/*.coffee') + rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/**/*.coffee')) - - set(courseware_js + discussion_js + staff_grading_js + peer_grading_js + open_ended_js) + set(courseware_js + discussion_js + staff_grading_js + open_ended_js) ) + [ 'js/form.ext.js', 'js/my_courses_dropdown.js', @@ -481,10 +480,6 @@ PIPELINE_JS = { 'source_filenames': staff_grading_js, 'output_filename': 'js/staff_grading.js' }, - 'peer_grading' : { - 'source_filenames': peer_grading_js, - 'output_filename': 'js/peer_grading.js' - }, 'open_ended' : { 'source_filenames': open_ended_js, 'output_filename': 'js/open_ended.js' diff --git a/lms/static/coffee/src/peer_grading/peer_grading.coffee b/lms/static/coffee/src/peer_grading/peer_grading.coffee deleted file mode 100644 index ed79ba9c71..0000000000 --- a/lms/static/coffee/src/peer_grading/peer_grading.coffee +++ /dev/null @@ -1,27 +0,0 @@ -# This is a simple class that just hides the error container -# and message container when they are empty -# Can (and should be) expanded upon when our problem list -# becomes more sophisticated -class PeerGrading - constructor: () -> - @error_container = $('.error-container') - @error_container.toggle(not @error_container.is(':empty')) - - @message_container = $('.message-container') - @message_container.toggle(not @message_container.is(':empty')) - - @problem_list = $('.problem-list') - @construct_progress_bar() - - construct_progress_bar: () => - problems = @problem_list.find('tr').next() - problems.each( (index, element) => - problem = $(element) - progress_bar = problem.find('.progress-bar') - bar_value = parseInt(problem.data('graded')) - bar_max = parseInt(problem.data('required')) + bar_value - progress_bar.progressbar({value: bar_value, max: bar_max}) - ) - - -$(document).ready(() -> new PeerGrading()) diff --git a/lms/static/sass/course/_staff_grading.scss b/lms/static/sass/course/_staff_grading.scss index 177bd9e5e2..e3de7c8eab 100644 --- a/lms/static/sass/course/_staff_grading.scss +++ b/lms/static/sass/course/_staff_grading.scss @@ -120,7 +120,7 @@ div.peer-grading{ margin-right:20px; > div { - padding: 10px; + padding: 2px; margin: 0px; background: #eee; height: 10em; diff --git a/lms/templates/peer_grading/peer_grading.html b/lms/templates/peer_grading/peer_grading.html index bd32b33ec2..d309b4486c 100644 --- a/lms/templates/peer_grading/peer_grading.html +++ b/lms/templates/peer_grading/peer_grading.html @@ -1,21 +1,5 @@ -<%inherit file="/main.html" /> -<%block name="bodyclass">${course.css_class} -<%namespace name='static' file='/static_content.html'/> - -<%block name="headextra"> - <%static:css group='course'/> - - -<%block name="title">${course.number} Peer Grading - -<%include file="/courseware/course_navigation.html" args="active_page='peer_grading'" /> - -<%block name="js_extra"> - <%static:js group='peer_grading'/> - - -
-
+
+
${error_text}

Peer Grading

Instructions

@@ -38,7 +22,7 @@ %for problem in problem_list: - ${problem['problem_name']} + ${problem['problem_name']} ${problem['num_graded']} diff --git a/lms/templates/peer_grading/peer_grading_problem.html b/lms/templates/peer_grading/peer_grading_problem.html index 04ee7415ec..007fd42c8d 100644 --- a/lms/templates/peer_grading/peer_grading_problem.html +++ b/lms/templates/peer_grading/peer_grading_problem.html @@ -1,23 +1,5 @@ - -<%inherit file="/main.html" /> -<%block name="bodyclass">${course.css_class} -<%namespace name='static' file='/static_content.html'/> - -<%block name="headextra"> - <%static:css group='course'/> - - -<%block name="title">${course.number} Peer Grading. - -<%include file="/courseware/course_navigation.html" args="active_page='peer_grading'" /> - -<%block name="js_extra"> - <%static:js group='peer_grading'/> - - - -
-
+
+
diff --git a/lms/urls.py b/lms/urls.py index 7b7a70b6f2..e9746c2338 100644 --- a/lms/urls.py +++ b/lms/urls.py @@ -268,23 +268,6 @@ if settings.COURSEWARE_ENABLED: url(r'^courses/(?P[^/]+/[^/]+/[^/]+)/staff_grading/get_problem_list$', 'open_ended_grading.staff_grading_service.get_problem_list', name='staff_grading_get_problem_list'), - - # Peer Grading - url(r'^courses/(?P[^/]+/[^/]+/[^/]+)/peer_grading$', - 'open_ended_grading.views.peer_grading', name='peer_grading'), - url(r'^courses/(?P[^/]+/[^/]+/[^/]+)/peer_grading/problem$', - 'open_ended_grading.views.peer_grading_problem', name='peer_grading_problem'), - url(r'^courses/(?P[^/]+/[^/]+/[^/]+)/peer_grading/get_next_submission$', - 'open_ended_grading.peer_grading_service.get_next_submission', name='peer_grading_get_next_submission'), - url(r'^courses/(?P[^/]+/[^/]+/[^/]+)/peer_grading/show_calibration_essay$', - 'open_ended_grading.peer_grading_service.show_calibration_essay', name='peer_grading_show_calibration_essay'), - url(r'^courses/(?P[^/]+/[^/]+/[^/]+)/peer_grading/is_student_calibrated$', - 'open_ended_grading.peer_grading_service.is_student_calibrated', name='peer_grading_is_student_calibrated'), - url(r'^courses/(?P[^/]+/[^/]+/[^/]+)/peer_grading/save_grade$', - 'open_ended_grading.peer_grading_service.save_grade', name='peer_grading_save_grade'), - url(r'^courses/(?P[^/]+/[^/]+/[^/]+)/peer_grading/save_calibration_essay$', - 'open_ended_grading.peer_grading_service.save_calibration_essay', name='peer_grading_save_calibration_essay'), - # Open Ended problem list url(r'^courses/(?P[^/]+/[^/]+/[^/]+)/open_ended_problems$', 'open_ended_grading.views.student_problem_list', name='open_ended_problems'), @@ -317,6 +300,9 @@ if settings.COURSEWARE_ENABLED: # Open Ended Notifications url(r'^courses/(?P[^/]+/[^/]+/[^/]+)/open_ended_notifications$', 'open_ended_grading.views.combined_notifications', name='open_ended_notifications'), + + url(r'^courses/(?P[^/]+/[^/]+/[^/]+)/peer_grading$', + 'open_ended_grading.views.peer_grading', name='peer_grading'), ) # discussion forums live within courseware, so courseware must be enabled first