From e41172d55df9f1a0cb142b6a59625eef59dfa519 Mon Sep 17 00:00:00 2001
From: Victor Shnayder
Date: Sun, 20 Jan 2013 11:50:51 -0500
Subject: [PATCH 026/126] Add start of test framework for capa
---
.../xmodule/xmodule/tests/test_capa_module.py | 60 +++++++++++++++++++
1 file changed, 60 insertions(+)
create mode 100644 common/lib/xmodule/xmodule/tests/test_capa_module.py
diff --git a/common/lib/xmodule/xmodule/tests/test_capa_module.py b/common/lib/xmodule/xmodule/tests/test_capa_module.py
new file mode 100644
index 0000000000..148fd893ff
--- /dev/null
+++ b/common/lib/xmodule/xmodule/tests/test_capa_module.py
@@ -0,0 +1,60 @@
+import json
+from mock import Mock
+import unittest
+
+from xmodule.capa_module import CapaModule
+from xmodule.modulestore import Location
+from lxml import etree
+
+from . import test_system
+
+class CapaFactory(object):
+ """
+ A helper class to create problem modules with various parameters for testing.
+ """
+
+ sample_problem_xml = """
+
+
+
What is pi, to two decimal placs?
+
+
+
+
+
+"""
+
+ num = 0
+ @staticmethod
+ def next_num():
+ CapaFactory.num += 1
+ return CapaFactory.num
+
+ @staticmethod
+ def create():
+ definition = {'data': CapaFactory.sample_problem_xml,}
+ location = Location(["i4x", "edX", "capa_test", "problem",
+ "SampleProblem{0}".format(CapaFactory.next_num())])
+ metadata = {}
+ descriptor = Mock(weight="1")
+ instance_state = None
+
+ module = CapaModule(test_system, location,
+ definition, descriptor,
+ instance_state, None, metadata=metadata)
+
+ return module
+
+
+
+class CapaModuleTest(unittest.TestCase):
+
+ def test_import(self):
+ module = CapaFactory.create()
+ self.assertEqual(module.get_score()['score'], 0)
+
+ other_module = CapaFactory.create()
+ self.assertEqual(module.get_score()['score'], 0)
+ self.assertNotEqual(module.url_name, other_module.url_name,
+ "Factory should be creating unique names for each problem")
+
From 025b074b87b5fc60c712292d541449d0d470152b Mon Sep 17 00:00:00 2001
From: Victor Shnayder
Date: Sun, 20 Jan 2013 12:17:22 -0500
Subject: [PATCH 027/126] Add simple test for showanswer, fix test_system
---
common/lib/xmodule/xmodule/tests/__init__.py | 2 +-
.../xmodule/xmodule/tests/test_capa_module.py | 60 ++++++++++++++++++-
2 files changed, 59 insertions(+), 3 deletions(-)
diff --git a/common/lib/xmodule/xmodule/tests/__init__.py b/common/lib/xmodule/xmodule/tests/__init__.py
index a07f1ddfaf..1f323834a9 100644
--- a/common/lib/xmodule/xmodule/tests/__init__.py
+++ b/common/lib/xmodule/xmodule/tests/__init__.py
@@ -26,7 +26,7 @@ test_system = ModuleSystem(
# "render" to just the context...
render_template=lambda template, context: str(context),
replace_urls=Mock(),
- user=Mock(),
+ user=Mock(is_staff=False),
filestore=Mock(),
debug=True,
xqueue={'interface':None, 'callback_url':'/', 'default_queuename': 'testqueue', 'waittime': 10},
diff --git a/common/lib/xmodule/xmodule/tests/test_capa_module.py b/common/lib/xmodule/xmodule/tests/test_capa_module.py
index 148fd893ff..7537cb537c 100644
--- a/common/lib/xmodule/xmodule/tests/test_capa_module.py
+++ b/common/lib/xmodule/xmodule/tests/test_capa_module.py
@@ -1,7 +1,9 @@
import json
from mock import Mock
+from pprint import pprint
import unittest
+
from xmodule.capa_module import CapaModule
from xmodule.modulestore import Location
from lxml import etree
@@ -31,13 +33,59 @@ class CapaFactory(object):
return CapaFactory.num
@staticmethod
- def create():
+ def create(graceperiod=None,
+ due=None,
+ max_attempts=None,
+ showanswer=None,
+ rerandomize=None,
+ force_save_button=None,
+ attempts=None,
+ problem_state=None,
+ ):
+ """
+ All parameters are optional, and are added to the created problem if specified.
+
+ Arguments:
+ graceperiod:
+ due:
+ max_attempts:
+ showanswer:
+ force_save_button:
+ rerandomize: all strings, as specified in the policy for the problem
+
+ problem_state: a dict to to be serialized into the instance_state of the
+ module.
+
+ attempts: also added to instance state. Should be a number.
+ """
definition = {'data': CapaFactory.sample_problem_xml,}
location = Location(["i4x", "edX", "capa_test", "problem",
"SampleProblem{0}".format(CapaFactory.next_num())])
metadata = {}
+ if graceperiod is not None:
+ metadata['graceperiod'] = graceperiod
+ if due is not None:
+ metadata['due'] = due
+ if max_attempts is not None:
+ metadata['attempts'] = max_attempts
+ if showanswer is not None:
+ metadata['showanswer'] = showanswer
+ if force_save_button is not None:
+ metadata['force_save_button'] = force_save_button
+ if rerandomize is not None:
+ metadata['rerandomize'] = rerandomize
+
+
descriptor = Mock(weight="1")
- instance_state = None
+ instance_state_dict = {}
+ if problem_state is not None:
+ instance_state_dict = problem_state
+ if attempts is not None:
+ instance_state_dict['attempts'] = attempts
+ if len(instance_state_dict) > 0:
+ instance_state = json.dumps(instance_state_dict)
+ else:
+ instance_state = None
module = CapaModule(test_system, location,
definition, descriptor,
@@ -58,3 +106,11 @@ class CapaModuleTest(unittest.TestCase):
self.assertNotEqual(module.url_name, other_module.url_name,
"Factory should be creating unique names for each problem")
+ def test_showanswer(self):
+ """
+ Make sure the show answer logic does the right thing.
+ """
+ # default, no due date, showanswer 'closed'
+ problem = CapaFactory.create()
+ pprint(problem.__dict__)
+ self.assertFalse(problem.answer_available())
From ea091a6eb83b09fbc5bafbe4f0f5011b69c8db7b Mon Sep 17 00:00:00 2001
From: Victor Shnayder
Date: Sun, 20 Jan 2013 12:49:05 -0500
Subject: [PATCH 028/126] Add tests for showanswer
---
.../xmodule/xmodule/tests/test_capa_module.py | 68 +++++++++++++++++--
1 file changed, 62 insertions(+), 6 deletions(-)
diff --git a/common/lib/xmodule/xmodule/tests/test_capa_module.py b/common/lib/xmodule/xmodule/tests/test_capa_module.py
index 7537cb537c..506c7faf9f 100644
--- a/common/lib/xmodule/xmodule/tests/test_capa_module.py
+++ b/common/lib/xmodule/xmodule/tests/test_capa_module.py
@@ -1,9 +1,9 @@
+import datetime
import json
from mock import Mock
from pprint import pprint
import unittest
-
from xmodule.capa_module import CapaModule
from xmodule.modulestore import Location
from lxml import etree
@@ -56,7 +56,7 @@ class CapaFactory(object):
problem_state: a dict to to be serialized into the instance_state of the
module.
- attempts: also added to instance state. Should be a number.
+ attempts: also added to instance state. Will be converted to an int.
"""
definition = {'data': CapaFactory.sample_problem_xml,}
location = Location(["i4x", "edX", "capa_test", "problem",
@@ -81,7 +81,9 @@ class CapaFactory(object):
if problem_state is not None:
instance_state_dict = problem_state
if attempts is not None:
- instance_state_dict['attempts'] = attempts
+ # converting to int here because I keep putting "0" and "1" in the tests
+ # since everything else is a string.
+ instance_state_dict['attempts'] = int(attempts)
if len(instance_state_dict) > 0:
instance_state = json.dumps(instance_state_dict)
else:
@@ -97,6 +99,17 @@ class CapaFactory(object):
class CapaModuleTest(unittest.TestCase):
+
+ def setUp(self):
+ now = datetime.datetime.now()
+ day_delta = datetime.timedelta(days=1)
+ self.yesterday_str = str(now - day_delta)
+ self.today_str = str(now)
+ self.tomorrow_str = str(now + day_delta)
+
+ # in the capa grace period format, not in time delta format
+ self.two_day_delta_str = "2 days"
+
def test_import(self):
module = CapaFactory.create()
self.assertEqual(module.get_score()['score'], 0)
@@ -106,11 +119,54 @@ class CapaModuleTest(unittest.TestCase):
self.assertNotEqual(module.url_name, other_module.url_name,
"Factory should be creating unique names for each problem")
- def test_showanswer(self):
+ def test_showanswer_default(self):
"""
Make sure the show answer logic does the right thing.
"""
- # default, no due date, showanswer 'closed'
+ # default, no due date, showanswer 'closed', so problem is open, and show_answer
+ # not visible.
problem = CapaFactory.create()
- pprint(problem.__dict__)
self.assertFalse(problem.answer_available())
+
+
+ def test_showanswer_attempted(self):
+ problem = CapaFactory.create(showanswer='attempted')
+ self.assertFalse(problem.answer_available())
+ problem.attempts = 1
+ self.assertTrue(problem.answer_available())
+
+
+ def test_showanswer_closed(self):
+
+ # can see after attempts used up
+ used_all_attempts = CapaFactory.create(showanswer='closed',
+ max_attempts="1",
+ attempts="1")
+ self.assertTrue(used_all_attempts.answer_available())
+
+
+ # can see after due date
+ after_due_date = CapaFactory.create(showanswer='closed',
+ max_attempts="1",
+ attempts="0",
+ due=self.yesterday_str)
+ self.assertTrue(after_due_date.answer_available())
+
+ # can't see because attempts left
+ attempts_left_open = CapaFactory.create(showanswer='closed',
+ max_attempts="1",
+ attempts="0",
+ due=self.tomorrow_str)
+ self.assertFalse(attempts_left_open.answer_available())
+
+ # Can't see because grace period hasn't expired
+ still_in_grace = CapaFactory.create(showanswer='closed',
+ max_attempts="1",
+ attempts="0",
+ due=self.yesterday_str,
+ graceperiod=self.two_day_delta_str)
+ self.assertFalse(still_in_grace.answer_available())
+
+
+
+
From 6088a926cc0697094c1bd6ae095581895fcc4563 Mon Sep 17 00:00:00 2001
From: Victor Shnayder
Date: Sun, 20 Jan 2013 17:35:03 -0500
Subject: [PATCH 029/126] Add showanswer="past_due" and tests
---
common/lib/xmodule/xmodule/capa_module.py | 35 ++++++++------
.../xmodule/xmodule/tests/test_capa_module.py | 47 ++++++++++++++++++-
2 files changed, 65 insertions(+), 17 deletions(-)
diff --git a/common/lib/xmodule/xmodule/capa_module.py b/common/lib/xmodule/xmodule/capa_module.py
index f33da6e3a4..6d258e61ed 100644
--- a/common/lib/xmodule/xmodule/capa_module.py
+++ b/common/lib/xmodule/xmodule/capa_module.py
@@ -389,38 +389,43 @@ class CapaModule(XModule):
})
return json.dumps(d, cls=ComplexEncoder)
+ def is_past_due(self):
+ """
+ Is it now past this problem's due date, including grace period?
+ """
+ return (self.close_date is not None and
+ datetime.datetime.utcnow() > self.close_date)
+
def closed(self):
''' Is the student still allowed to submit answers? '''
if self.attempts == self.max_attempts:
return True
- if self.close_date is not None and datetime.datetime.utcnow() > self.close_date:
+ if self.is_past_due():
return True
return False
def answer_available(self):
- ''' Is the user allowed to see an answer?
+ '''
+ Is the user allowed to see an answer?
'''
if self.show_answer == '':
return False
-
- if self.show_answer == "never":
+ elif self.show_answer == "never":
return False
-
- # Admins can see the answer, unless the problem explicitly prevents it
- if self.system.user_is_staff:
+ elif self.system.user_is_staff:
+ # This i after the 'never' check because admins can see the answer
+ # unless the problem explicitly prevents it
return True
-
- if self.show_answer == 'attempted':
+ elif self.show_answer == 'attempted':
return self.attempts > 0
-
- if self.show_answer == 'answered':
+ elif self.show_answer == 'answered':
return self.lcp.done
-
- if self.show_answer == 'closed':
+ elif self.show_answer == 'closed':
return self.closed()
-
- if self.show_answer == 'always':
+ elif self.show_answer == 'past_due':
+ return self.is_past_due()
+ elif self.show_answer == 'always':
return True
return False
diff --git a/common/lib/xmodule/xmodule/tests/test_capa_module.py b/common/lib/xmodule/xmodule/tests/test_capa_module.py
index 506c7faf9f..e8f639e3c9 100644
--- a/common/lib/xmodule/xmodule/tests/test_capa_module.py
+++ b/common/lib/xmodule/xmodule/tests/test_capa_module.py
@@ -138,10 +138,11 @@ class CapaModuleTest(unittest.TestCase):
def test_showanswer_closed(self):
- # can see after attempts used up
+ # can see after attempts used up, even with due date in the future
used_all_attempts = CapaFactory.create(showanswer='closed',
max_attempts="1",
- attempts="1")
+ attempts="1",
+ due=self.tomorrow_str)
self.assertTrue(used_all_attempts.answer_available())
@@ -152,6 +153,7 @@ class CapaModuleTest(unittest.TestCase):
due=self.yesterday_str)
self.assertTrue(after_due_date.answer_available())
+
# can't see because attempts left
attempts_left_open = CapaFactory.create(showanswer='closed',
max_attempts="1",
@@ -169,4 +171,45 @@ class CapaModuleTest(unittest.TestCase):
+ def test_showanswer_past_due(self):
+ """
+ With showanswer="past_due" should only show answer after the problem is closed
+ for everyone--e.g. after due date + grace period.
+ """
+
+ # can see after attempts used up, even with due date in the future
+ used_all_attempts = CapaFactory.create(showanswer='past_due',
+ max_attempts="1",
+ attempts="1",
+ due=self.tomorrow_str)
+ self.assertFalse(used_all_attempts.answer_available())
+
+
+ # can see after due date
+ past_due_date = CapaFactory.create(showanswer='past_due',
+ max_attempts="1",
+ attempts="0",
+ due=self.yesterday_str)
+ self.assertTrue(past_due_date.answer_available())
+
+
+ # can't see because attempts left
+ attempts_left_open = CapaFactory.create(showanswer='past_due',
+ max_attempts="1",
+ attempts="0",
+ due=self.tomorrow_str)
+ self.assertFalse(attempts_left_open.answer_available())
+
+ # Can't see because grace period hasn't expired, even though have no more
+ # attempts.
+ still_in_grace = CapaFactory.create(showanswer='past_due',
+ max_attempts="1",
+ attempts="1",
+ due=self.yesterday_str,
+ graceperiod=self.two_day_delta_str)
+ self.assertFalse(still_in_grace.answer_available())
+
+
+
+
From f3f509da3b7a63b9d5a14939c02f9a9780104337 Mon Sep 17 00:00:00 2001
From: Vik Paruchuri
Date: Thu, 31 Jan 2013 12:45:48 -0500
Subject: [PATCH 030/126] Fix input area styling
---
.../xmodule/xmodule/css/combinedopenended/display.scss | 5 +++--
.../xmodule/js/src/combinedopenended/display.coffee | 2 +-
lms/static/coffee/src/open_ended/open_ended.coffee | 9 +++++----
3 files changed, 9 insertions(+), 7 deletions(-)
diff --git a/common/lib/xmodule/xmodule/css/combinedopenended/display.scss b/common/lib/xmodule/xmodule/css/combinedopenended/display.scss
index 41896e6173..38fd6ba01c 100644
--- a/common/lib/xmodule/xmodule/css/combinedopenended/display.scss
+++ b/common/lib/xmodule/xmodule/css/combinedopenended/display.scss
@@ -442,12 +442,13 @@ section.open-ended-child {
margin: 10px;
}
- span.short-form-response {
- padding: 9px;
+ div.short-form-response {
background: #F6F6F6;
border: 1px solid #ddd;
border-top: 0;
margin-bottom: 20px;
+ overflow-y: auto;
+ height: 200px;
@include clearfix;
}
diff --git a/common/lib/xmodule/xmodule/js/src/combinedopenended/display.coffee b/common/lib/xmodule/xmodule/js/src/combinedopenended/display.coffee
index 2aabd35771..89954deb23 100644
--- a/common/lib/xmodule/xmodule/js/src/combinedopenended/display.coffee
+++ b/common/lib/xmodule/xmodule/js/src/combinedopenended/display.coffee
@@ -351,5 +351,5 @@ class @CombinedOpenEnded
answer_id = @answer_area.attr('id')
answer_val = @answer_area.val()
new_text = ''
- new_text = "#{answer_val}"
+ new_text = "
')
+ blah = "blah"
gentle_alert: (msg) =>
if $('.message-container').length
From 52f3e9daafa96ee5a589e79e09aaa5611d58c229 Mon Sep 17 00:00:00 2001
From: Vik Paruchuri
Date: Thu, 31 Jan 2013 18:05:57 -0500
Subject: [PATCH 031/126] Start moving peer grading to xmodule
---
.../js/src/peergrading/peer_grading.coffee | 27 +
.../peergrading/peer_grading_problem.coffee | 478 ++++++++++++++++++
.../xmodule/xmodule/peer_grading_module.py | 439 ++++++++++++++++
.../xmodule/xmodule/peer_grading_service.py | 256 ++++++++++
4 files changed, 1200 insertions(+)
create mode 100644 common/lib/xmodule/xmodule/js/src/peergrading/peer_grading.coffee
create mode 100644 common/lib/xmodule/xmodule/js/src/peergrading/peer_grading_problem.coffee
create mode 100644 common/lib/xmodule/xmodule/peer_grading_module.py
create mode 100644 common/lib/xmodule/xmodule/peer_grading_service.py
diff --git a/common/lib/xmodule/xmodule/js/src/peergrading/peer_grading.coffee b/common/lib/xmodule/xmodule/js/src/peergrading/peer_grading.coffee
new file mode 100644
index 0000000000..ed79ba9c71
--- /dev/null
+++ b/common/lib/xmodule/xmodule/js/src/peergrading/peer_grading.coffee
@@ -0,0 +1,27 @@
+# This is a simple class that just hides the error container
+# and message container when they are empty
+# Can (and should be) expanded upon when our problem list
+# becomes more sophisticated
+class PeerGrading
+ constructor: () ->
+ @error_container = $('.error-container')
+ @error_container.toggle(not @error_container.is(':empty'))
+
+ @message_container = $('.message-container')
+ @message_container.toggle(not @message_container.is(':empty'))
+
+ @problem_list = $('.problem-list')
+ @construct_progress_bar()
+
+ construct_progress_bar: () =>
+ problems = @problem_list.find('tr').next()
+ problems.each( (index, element) =>
+ problem = $(element)
+ progress_bar = problem.find('.progress-bar')
+ bar_value = parseInt(problem.data('graded'))
+ bar_max = parseInt(problem.data('required')) + bar_value
+ progress_bar.progressbar({value: bar_value, max: bar_max})
+ )
+
+
+$(document).ready(() -> new PeerGrading())
diff --git a/common/lib/xmodule/xmodule/js/src/peergrading/peer_grading_problem.coffee b/common/lib/xmodule/xmodule/js/src/peergrading/peer_grading_problem.coffee
new file mode 100644
index 0000000000..ab16b34d12
--- /dev/null
+++ b/common/lib/xmodule/xmodule/js/src/peergrading/peer_grading_problem.coffee
@@ -0,0 +1,478 @@
+##################################
+#
+# This is the JS that renders the peer grading problem page.
+# Fetches the correct problem and/or calibration essay
+# and sends back the grades
+#
+# Should not be run when we don't have a location to send back
+# to the server
+#
+# PeerGradingProblemBackend -
+# makes all the ajax requests and provides a mock interface
+# for testing purposes
+#
+# PeerGradingProblem -
+# handles the rendering and user interactions with the interface
+#
+##################################
+class PeerGradingProblemBackend
+ constructor: (ajax_url, mock_backend) ->
+ @mock_backend = mock_backend
+ @ajax_url = ajax_url
+ @mock_cnt = 0
+
+ post: (cmd, data, callback) ->
+ if @mock_backend
+ callback(@mock(cmd, data))
+ else
+ # if this post request fails, the error callback will catch it
+ $.post(@ajax_url + cmd, data, callback)
+ .error => callback({success: false, error: "Error occured while performing this operation"})
+
+ mock: (cmd, data) ->
+ if cmd == 'is_student_calibrated'
+ # change to test each version
+ response =
+ success: true
+ calibrated: @mock_cnt >= 2
+ else if cmd == 'show_calibration_essay'
+ #response =
+ # success: false
+ # error: "There was an error"
+ @mock_cnt++
+ response =
+ success: true
+ submission_id: 1
+ submission_key: 'abcd'
+ student_response: '''
+ Contrary to popular belief, Lorem Ipsum is not simply random text. It has roots in a piece of classical Latin literature from 45 BC, making it over 2000 years old. Richard McClintock, a Latin professor at Hampden-Sydney College in Virginia, looked up one of the more obscure Latin words, consectetur, from a Lorem Ipsum passage, and going through the cites of the word in classical literature, discovered the undoubtable source. Lorem Ipsum comes from sections 1.10.32 and 1.10.33 of "de Finibus Bonorum et Malorum" (The Extremes of Good and Evil) by Cicero, written in 45 BC. This book is a treatise on the theory of ethics, very popular during the Renaissance. The first line of Lorem Ipsum, "Lorem ipsum dolor sit amet..", comes from a line in section 1.10.32.
+
+The standard chunk of Lorem Ipsum used since the 1500s is reproduced below for those interested. Sections 1.10.32 and 1.10.33 from "de Finibus Bonorum et Malorum" by Cicero are also reproduced in their exact original form, accompanied by English versions from the 1914 translation by H. Rackham.
+ '''
+ prompt: '''
+
S11E3: Metal Bands
+
Shown below are schematic band diagrams for two different metals. Both diagrams appear different, yet both of the elements are undisputably metallic in nature.
+
* Why is it that both sodium and magnesium behave as metals, even though the s-band of magnesium is filled?
+
This is a self-assessed open response question. Please use as much space as you need in the box below to answer the question.
+ '''
+ rubric: '''
+
Purpose
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Organization
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ '''
+ max_score: 4
+ else if cmd == 'get_next_submission'
+ response =
+ success: true
+ submission_id: 1
+ submission_key: 'abcd'
+ student_response: '''Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed nec tristique ante. Proin at mauris sapien, quis varius leo. Morbi laoreet leo nisi. Morbi aliquam lacus ante. Cras iaculis velit sed diam mattis a fermentum urna luctus. Duis consectetur nunc vitae felis facilisis eget vulputate risus viverra. Cras consectetur ullamcorper lobortis. Nam eu gravida lorem. Nulla facilisi. Nullam quis felis enim. Mauris orci lectus, dictum id cursus in, vulputate in massa.
+
+Phasellus non varius sem. Nullam commodo lacinia odio sit amet egestas. Donec ullamcorper sapien sagittis arcu volutpat placerat. Phasellus ut pretium ante. Nam dictum pulvinar nibh dapibus tristique. Sed at tellus mi, fringilla convallis justo. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Phasellus tristique rutrum nulla sed eleifend. Praesent at nunc arcu. Mauris condimentum faucibus nibh, eget commodo quam viverra sed. Morbi in tincidunt dolor. Morbi sed augue et augue interdum fermentum.
+
+Curabitur tristique purus ac arcu consequat cursus. Cras diam felis, dignissim quis placerat at, aliquet ac metus. Mauris vulputate est eu nibh imperdiet varius. Cras aliquet rhoncus elit a laoreet. Mauris consectetur erat et erat scelerisque eu faucibus dolor consequat. Nam adipiscing sagittis nisl, eu mollis massa tempor ac. Nulla scelerisque tempus blandit. Phasellus ac ipsum eros, id posuere arcu. Nullam non sapien arcu. Vivamus sit amet lorem justo, ac tempus turpis. Suspendisse pharetra gravida imperdiet. Pellentesque lacinia mi eu elit luctus pellentesque. Sed accumsan libero a magna elementum varius. Nunc eget pellentesque metus. '''
+ prompt: '''
+
S11E3: Metal Bands
+
Shown below are schematic band diagrams for two different metals. Both diagrams appear different, yet both of the elements are undisputably metallic in nature.
+
* Why is it that both sodium and magnesium behave as metals, even though the s-band of magnesium is filled?
+
This is a self-assessed open response question. Please use as much space as you need in the box below to answer the question.
+ '''
+ rubric: '''
+
Purpose
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Organization
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ '''
+ max_score: 4
+ else if cmd == 'save_calibration_essay'
+ response =
+ success: true
+ actual_score: 2
+ else if cmd == 'save_grade'
+ response =
+ success: true
+
+ return response
+
+
+class PeerGradingProblem
+ constructor: (backend) ->
+ @prompt_wrapper = $('.prompt-wrapper')
+ @backend = backend
+
+
+ # get the location of the problem
+ @location = $('.peer-grading').data('location')
+ # prevent this code from trying to run
+ # when we don't have a location
+ if(!@location)
+ return
+
+ # get the other elements we want to fill in
+ @submission_container = $('.submission-container')
+ @prompt_container = $('.prompt-container')
+ @rubric_container = $('.rubric-container')
+ @flag_student_container = $('.flag-student-container')
+ @calibration_panel = $('.calibration-panel')
+ @grading_panel = $('.grading-panel')
+ @content_panel = $('.content-panel')
+ @grading_message = $('.grading-message')
+ @grading_message.hide()
+
+ @grading_wrapper =$('.grading-wrapper')
+ @calibration_feedback_panel = $('.calibration-feedback')
+ @interstitial_page = $('.interstitial-page')
+ @interstitial_page.hide()
+
+ @error_container = $('.error-container')
+
+ @submission_key_input = $("input[name='submission-key']")
+ @essay_id_input = $("input[name='essay-id']")
+ @feedback_area = $('.feedback-area')
+
+ @score_selection_container = $('.score-selection-container')
+ @rubric_selection_container = $('.rubric-selection-container')
+ @grade = null
+ @calibration = null
+
+ @submit_button = $('.submit-button')
+ @action_button = $('.action-button')
+ @calibration_feedback_button = $('.calibration-feedback-button')
+ @interstitial_page_button = $('.interstitial-page-button')
+ @flag_student_checkbox = $('.flag-checkbox')
+
+ Collapsible.setCollapsibles(@content_panel)
+
+ # Set up the click event handlers
+ @action_button.click -> history.back()
+ @calibration_feedback_button.click =>
+ @calibration_feedback_panel.hide()
+ @grading_wrapper.show()
+ @is_calibrated_check()
+
+ @interstitial_page_button.click =>
+ @interstitial_page.hide()
+ @is_calibrated_check()
+
+ @is_calibrated_check()
+
+
+ ##########
+ #
+ # Ajax calls to the backend
+ #
+ ##########
+ is_calibrated_check: () =>
+ @backend.post('is_student_calibrated', {location: @location}, @calibration_check_callback)
+
+ fetch_calibration_essay: () =>
+ @backend.post('show_calibration_essay', {location: @location}, @render_calibration)
+
+ fetch_submission_essay: () =>
+ @backend.post('get_next_submission', {location: @location}, @render_submission)
+
+ # finds the scores for each rubric category
+ get_score_list: () =>
+ # find the number of categories:
+ num_categories = $('table.rubric tr').length
+
+ score_lst = []
+ # get the score for each one
+ for i in [0..(num_categories-1)]
+ score = $("input[name='score-selection-#{i}']:checked").val()
+ score_lst.push(score)
+
+ return score_lst
+
+ construct_data: () ->
+ data =
+ rubric_scores: @get_score_list()
+ score: @grade
+ location: @location
+ submission_id: @essay_id_input.val()
+ submission_key: @submission_key_input.val()
+ feedback: @feedback_area.val()
+ submission_flagged: @flag_student_checkbox.is(':checked')
+ return data
+
+
+ submit_calibration_essay: ()=>
+ data = @construct_data()
+ @backend.post('save_calibration_essay', data, @calibration_callback)
+
+ submit_grade: () =>
+ data = @construct_data()
+ @backend.post('save_grade', data, @submission_callback)
+
+
+ ##########
+ #
+ # Callbacks for various events
+ #
+ ##########
+
+ # called after we perform an is_student_calibrated check
+ calibration_check_callback: (response) =>
+ if response.success
+ # if we haven't been calibrating before
+ if response.calibrated and (@calibration == null or @calibration == false)
+ @calibration = false
+ @fetch_submission_essay()
+ # If we were calibrating before and no longer need to,
+ # show the interstitial page
+ else if response.calibrated and @calibration == true
+ @calibration = false
+ @render_interstitial_page()
+ else
+ @calibration = true
+ @fetch_calibration_essay()
+ else if response.error
+ @render_error(response.error)
+ else
+ @render_error("Error contacting the grading service")
+
+
+ # called after we submit a calibration score
+ calibration_callback: (response) =>
+ if response.success
+ @render_calibration_feedback(response)
+ else if response.error
+ @render_error(response.error)
+ else
+ @render_error("Error saving calibration score")
+
+ # called after we submit a submission score
+ submission_callback: (response) =>
+ if response.success
+ @is_calibrated_check()
+ @grading_message.fadeIn()
+ @grading_message.html("
Grade sent successfully.
")
+ else
+ if response.error
+ @render_error(response.error)
+ else
+ @render_error("Error occurred while submitting grade")
+
+ # called after a grade is selected on the interface
+ graded_callback: (event) =>
+ @grade = $("input[name='grade-selection']:checked").val()
+ if @grade == undefined
+ return
+ # check to see whether or not any categories have not been scored
+ num_categories = $('table.rubric tr').length
+ for i in [0..(num_categories-1)]
+ score = $("input[name='score-selection-#{i}']:checked").val()
+ if score == undefined
+ return
+ # show button if we have scores for all categories
+ @show_submit_button()
+
+
+
+ ##########
+ #
+ # Rendering methods and helpers
+ #
+ ##########
+ # renders a calibration essay
+ render_calibration: (response) =>
+ if response.success
+
+ # load in all the data
+ @submission_container.html("
Training Essay
")
+ @render_submission_data(response)
+ # TODO: indicate that we're in calibration mode
+ @calibration_panel.addClass('current-state')
+ @grading_panel.removeClass('current-state')
+
+ # Display the right text
+ # both versions of the text are written into the template itself
+ # we only need to show/hide the correct ones at the correct time
+ @calibration_panel.find('.calibration-text').show()
+ @grading_panel.find('.calibration-text').show()
+ @calibration_panel.find('.grading-text').hide()
+ @grading_panel.find('.grading-text').hide()
+ @flag_student_container.hide()
+
+ @submit_button.unbind('click')
+ @submit_button.click @submit_calibration_essay
+
+ else if response.error
+ @render_error(response.error)
+ else
+ @render_error("An error occurred while retrieving the next calibration essay")
+
+ # Renders a student submission to be graded
+ render_submission: (response) =>
+ if response.success
+ @submit_button.hide()
+ @submission_container.html("
Submitted Essay
")
+ @render_submission_data(response)
+
+ @calibration_panel.removeClass('current-state')
+ @grading_panel.addClass('current-state')
+
+ # Display the correct text
+ # both versions of the text are written into the template itself
+ # we only need to show/hide the correct ones at the correct time
+ @calibration_panel.find('.calibration-text').hide()
+ @grading_panel.find('.calibration-text').hide()
+ @calibration_panel.find('.grading-text').show()
+ @grading_panel.find('.grading-text').show()
+ @flag_student_container.show()
+
+ @submit_button.unbind('click')
+ @submit_button.click @submit_grade
+ else if response.error
+ @render_error(response.error)
+ else
+ @render_error("An error occured when retrieving the next submission.")
+
+
+ make_paragraphs: (text) ->
+ paragraph_split = text.split(/\n\s*\n/)
+ new_text = ''
+ for paragraph in paragraph_split
+ new_text += "
From c1583dbba2861434fb37635d031f7b2b7a61c50b Mon Sep 17 00:00:00 2001
From: Vik Paruchuri
Date: Thu, 31 Jan 2013 19:57:35 -0500
Subject: [PATCH 039/126] Properly load javascript, fix templates to work with
xmodule, modify AJAX handlers
---
.../js/src/peergrading/peer_grading.coffee | 14 +-
.../peergrading/peer_grading_problem.coffee | 229 +++++++++---------
.../xmodule/xmodule/peer_grading_module.py | 73 +++---
.../xmodule/xmodule/peer_grading_service.py | 2 +
lms/templates/peer_grading/peer_grading.html | 2 +-
.../peer_grading/peer_grading_problem.html | 2 +-
6 files changed, 159 insertions(+), 163 deletions(-)
diff --git a/common/lib/xmodule/xmodule/js/src/peergrading/peer_grading.coffee b/common/lib/xmodule/xmodule/js/src/peergrading/peer_grading.coffee
index 113f5e02a6..b8196838f3 100644
--- a/common/lib/xmodule/xmodule/js/src/peergrading/peer_grading.coffee
+++ b/common/lib/xmodule/xmodule/js/src/peergrading/peer_grading.coffee
@@ -2,11 +2,11 @@
# and message container when they are empty
# Can (and should be) expanded upon when our problem list
# becomes more sophisticated
-class PeerGrading
- constructor: () ->
+class @PeerGrading
+ constructor: (element) ->
@peer_grading_container = $('.peer-grading')
@peer_grading_outer_container = $('.peer-grading-container')
- @ajax_url = peer_grading_container.data('ajax-url')
+ @ajax_url = @peer_grading_container.data('ajax-url')
@error_container = $('.error-container')
@error_container.toggle(not @error_container.is(':empty'))
@@ -14,7 +14,7 @@ class PeerGrading
@message_container.toggle(not @message_container.is(':empty'))
@problem_button = $('.problem-button')
- @problem_button.click show_results
+ @problem_button.click @show_results
@problem_list = $('.problem-list')
@construct_progress_bar()
@@ -35,7 +35,7 @@ class PeerGrading
$.postWithPrefix "#{@ajax_url}problem", data, (response) =>
if response.success
@peer_grading_outer_container.after(response.html).remove()
+ backend = new PeerGradingProblemBackend(@ajax_url, false)
+ new PeerGradingProblem(backend)
else
- @gentle_alert response.error
-
-$(document).ready(() -> new PeerGrading())
+ @gentle_alert response.error
\ No newline at end of file
diff --git a/common/lib/xmodule/xmodule/js/src/peergrading/peer_grading_problem.coffee b/common/lib/xmodule/xmodule/js/src/peergrading/peer_grading_problem.coffee
index ab16b34d12..ee98905cda 100644
--- a/common/lib/xmodule/xmodule/js/src/peergrading/peer_grading_problem.coffee
+++ b/common/lib/xmodule/xmodule/js/src/peergrading/peer_grading_problem.coffee
@@ -7,7 +7,7 @@
# Should not be run when we don't have a location to send back
# to the server
#
-# PeerGradingProblemBackend -
+# PeerGradingProblemBackend -
# makes all the ajax requests and provides a mock interface
# for testing purposes
#
@@ -15,7 +15,7 @@
# handles the rendering and user interactions with the interface
#
##################################
-class PeerGradingProblemBackend
+class @PeerGradingProblemBackend
constructor: (ajax_url, mock_backend) ->
@mock_backend = mock_backend
@ajax_url = ajax_url
@@ -32,141 +32,140 @@ class PeerGradingProblemBackend
mock: (cmd, data) ->
if cmd == 'is_student_calibrated'
# change to test each version
- response =
- success: true
+ response =
+ success: true
calibrated: @mock_cnt >= 2
else if cmd == 'show_calibration_essay'
- #response =
+ #response =
# success: false
# error: "There was an error"
@mock_cnt++
- response =
+ response =
success: true
submission_id: 1
submission_key: 'abcd'
student_response: '''
- Contrary to popular belief, Lorem Ipsum is not simply random text. It has roots in a piece of classical Latin literature from 45 BC, making it over 2000 years old. Richard McClintock, a Latin professor at Hampden-Sydney College in Virginia, looked up one of the more obscure Latin words, consectetur, from a Lorem Ipsum passage, and going through the cites of the word in classical literature, discovered the undoubtable source. Lorem Ipsum comes from sections 1.10.32 and 1.10.33 of "de Finibus Bonorum et Malorum" (The Extremes of Good and Evil) by Cicero, written in 45 BC. This book is a treatise on the theory of ethics, very popular during the Renaissance. The first line of Lorem Ipsum, "Lorem ipsum dolor sit amet..", comes from a line in section 1.10.32.
+ Contrary to popular belief, Lorem Ipsum is not simply random text. It has roots in a piece of classical Latin literature from 45 BC, making it over 2000 years old. Richard McClintock, a Latin professor at Hampden-Sydney College in Virginia, looked up one of the more obscure Latin words, consectetur, from a Lorem Ipsum passage, and going through the cites of the word in classical literature, discovered the undoubtable source. Lorem Ipsum comes from sections 1.10.32 and 1.10.33 of "de Finibus Bonorum et Malorum" (The Extremes of Good and Evil) by Cicero, written in 45 BC. This book is a treatise on the theory of ethics, very popular during the Renaissance. The first line of Lorem Ipsum, "Lorem ipsum dolor sit amet..", comes from a line in section 1.10.32.
-The standard chunk of Lorem Ipsum used since the 1500s is reproduced below for those interested. Sections 1.10.32 and 1.10.33 from "de Finibus Bonorum et Malorum" by Cicero are also reproduced in their exact original form, accompanied by English versions from the 1914 translation by H. Rackham.
- '''
+ The standard chunk of Lorem Ipsum used since the 1500s is reproduced below for those interested. Sections 1.10.32 and 1.10.33 from "de Finibus Bonorum et Malorum" by Cicero are also reproduced in their exact original form, accompanied by English versions from the 1914 translation by H. Rackham.
+ '''
prompt: '''
-
S11E3: Metal Bands
-
Shown below are schematic band diagrams for two different metals. Both diagrams appear different, yet both of the elements are undisputably metallic in nature.
-
* Why is it that both sodium and magnesium behave as metals, even though the s-band of magnesium is filled?
-
This is a self-assessed open response question. Please use as much space as you need in the box below to answer the question.
- '''
+
S11E3: Metal Bands
+
Shown below are schematic band diagrams for two different metals. Both diagrams appear different, yet both of the elements are undisputably metallic in nature.
+
* Why is it that both sodium and magnesium behave as metals, even though the s-band of magnesium is filled?
+
This is a self-assessed open response question. Please use as much space as you need in the box below to answer the question.
+ '''
rubric: '''
-
Purpose
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
Organization
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- '''
+
Purpose
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Organization
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ '''
max_score: 4
else if cmd == 'get_next_submission'
- response =
+ response =
success: true
submission_id: 1
submission_key: 'abcd'
student_response: '''Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed nec tristique ante. Proin at mauris sapien, quis varius leo. Morbi laoreet leo nisi. Morbi aliquam lacus ante. Cras iaculis velit sed diam mattis a fermentum urna luctus. Duis consectetur nunc vitae felis facilisis eget vulputate risus viverra. Cras consectetur ullamcorper lobortis. Nam eu gravida lorem. Nulla facilisi. Nullam quis felis enim. Mauris orci lectus, dictum id cursus in, vulputate in massa.
-Phasellus non varius sem. Nullam commodo lacinia odio sit amet egestas. Donec ullamcorper sapien sagittis arcu volutpat placerat. Phasellus ut pretium ante. Nam dictum pulvinar nibh dapibus tristique. Sed at tellus mi, fringilla convallis justo. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Phasellus tristique rutrum nulla sed eleifend. Praesent at nunc arcu. Mauris condimentum faucibus nibh, eget commodo quam viverra sed. Morbi in tincidunt dolor. Morbi sed augue et augue interdum fermentum.
+ Phasellus non varius sem. Nullam commodo lacinia odio sit amet egestas. Donec ullamcorper sapien sagittis arcu volutpat placerat. Phasellus ut pretium ante. Nam dictum pulvinar nibh dapibus tristique. Sed at tellus mi, fringilla convallis justo. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Phasellus tristique rutrum nulla sed eleifend. Praesent at nunc arcu. Mauris condimentum faucibus nibh, eget commodo quam viverra sed. Morbi in tincidunt dolor. Morbi sed augue et augue interdum fermentum.
-Curabitur tristique purus ac arcu consequat cursus. Cras diam felis, dignissim quis placerat at, aliquet ac metus. Mauris vulputate est eu nibh imperdiet varius. Cras aliquet rhoncus elit a laoreet. Mauris consectetur erat et erat scelerisque eu faucibus dolor consequat. Nam adipiscing sagittis nisl, eu mollis massa tempor ac. Nulla scelerisque tempus blandit. Phasellus ac ipsum eros, id posuere arcu. Nullam non sapien arcu. Vivamus sit amet lorem justo, ac tempus turpis. Suspendisse pharetra gravida imperdiet. Pellentesque lacinia mi eu elit luctus pellentesque. Sed accumsan libero a magna elementum varius. Nunc eget pellentesque metus. '''
+ Curabitur tristique purus ac arcu consequat cursus. Cras diam felis, dignissim quis placerat at, aliquet ac metus. Mauris vulputate est eu nibh imperdiet varius. Cras aliquet rhoncus elit a laoreet. Mauris consectetur erat et erat scelerisque eu faucibus dolor consequat. Nam adipiscing sagittis nisl, eu mollis massa tempor ac. Nulla scelerisque tempus blandit. Phasellus ac ipsum eros, id posuere arcu. Nullam non sapien arcu. Vivamus sit amet lorem justo, ac tempus turpis. Suspendisse pharetra gravida imperdiet. Pellentesque lacinia mi eu elit luctus pellentesque. Sed accumsan libero a magna elementum varius. Nunc eget pellentesque metus. '''
prompt: '''
-
S11E3: Metal Bands
-
Shown below are schematic band diagrams for two different metals. Both diagrams appear different, yet both of the elements are undisputably metallic in nature.
-
* Why is it that both sodium and magnesium behave as metals, even though the s-band of magnesium is filled?
-
This is a self-assessed open response question. Please use as much space as you need in the box below to answer the question.
- '''
+
S11E3: Metal Bands
+
Shown below are schematic band diagrams for two different metals. Both diagrams appear different, yet both of the elements are undisputably metallic in nature.
+
* Why is it that both sodium and magnesium behave as metals, even though the s-band of magnesium is filled?
+
This is a self-assessed open response question. Please use as much space as you need in the box below to answer the question.
+ '''
rubric: '''
-
Purpose
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
Organization
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- '''
+
Purpose
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Organization
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ '''
max_score: 4
else if cmd == 'save_calibration_essay'
- response =
+ response =
success: true
actual_score: 2
else if cmd == 'save_grade'
- response =
+ response =
success: true
return response
-
-class PeerGradingProblem
+class @PeerGradingProblem
constructor: (backend) ->
@prompt_wrapper = $('.prompt-wrapper')
@backend = backend
-
+
# get the location of the problem
@location = $('.peer-grading').data('location')
- # prevent this code from trying to run
+ # prevent this code from trying to run
# when we don't have a location
if(!@location)
return
@@ -208,7 +207,7 @@ class PeerGradingProblem
# Set up the click event handlers
@action_button.click -> history.back()
- @calibration_feedback_button.click =>
+ @calibration_feedback_button.click =>
@calibration_feedback_panel.hide()
@grading_wrapper.show()
@is_calibrated_check()
@@ -266,7 +265,7 @@ class PeerGradingProblem
submit_grade: () =>
data = @construct_data()
@backend.post('save_grade', data, @submission_callback)
-
+
##########
#
@@ -301,7 +300,7 @@ class PeerGradingProblem
@render_calibration_feedback(response)
else if response.error
@render_error(response.error)
- else
+ else
@render_error("Error saving calibration score")
# called after we submit a submission score
@@ -330,8 +329,8 @@ class PeerGradingProblem
# show button if we have scores for all categories
@show_submit_button()
-
-
+
+
##########
#
# Rendering methods and helpers
@@ -344,7 +343,7 @@ class PeerGradingProblem
# load in all the data
@submission_container.html("
Training Essay
")
@render_submission_data(response)
- # TODO: indicate that we're in calibration mode
+ # TODO: indicate that we're in calibration mode
@calibration_panel.addClass('current-state')
@grading_panel.removeClass('current-state')
@@ -428,12 +427,12 @@ class PeerGradingProblem
if score == actual_score
calibration_wrapper.append("
Congratulations! Your score matches the actual score!
")
else
- calibration_wrapper.append("
Please try to understand the grading critera better to be more accurate next time.
")
+ calibration_wrapper.append("
Please try to understand the grading critera better to be more accurate next time.
")
# disable score selection and submission from the grading interface
$("input[name='score-selection']").attr('disabled', true)
@submit_button.hide()
-
+
render_interstitial_page: () =>
@content_panel.hide()
@interstitial_page.show()
@@ -449,7 +448,7 @@ class PeerGradingProblem
@submit_button.show()
setup_score_selection: (max_score) =>
-
+
# first, get rid of all the old inputs, if any.
@score_selection_container.html("""
Overall Score
@@ -460,7 +459,7 @@ class PeerGradingProblem
for score in [0..max_score]
id = 'score-' + score
label = """"""
-
+
input = """
""" # " fix broken parsing in emacs
@@ -470,9 +469,7 @@ class PeerGradingProblem
$("input[name='score-selection']").change @graded_callback
$("input[name='grade-selection']").change @graded_callback
-
-
-mock_backend = false
-ajax_url = $('.peer-grading').data('ajax_url')
-backend = new PeerGradingProblemBackend(ajax_url, mock_backend)
-$(document).ready(() -> new PeerGradingProblem(backend))
+#mock_backend = false
+#ajax_url = $('.peer-grading').data('ajax_url')
+#backend = new PeerGradingProblemBackend(ajax_url, mock_backend)
+#$(document).ready(() -> new PeerGradingProblem(backend))
diff --git a/common/lib/xmodule/xmodule/peer_grading_module.py b/common/lib/xmodule/xmodule/peer_grading_module.py
index c5a08e0812..be09751e29 100644
--- a/common/lib/xmodule/xmodule/peer_grading_module.py
+++ b/common/lib/xmodule/xmodule/peer_grading_module.py
@@ -68,7 +68,6 @@ class PeerGradingModule(XModule):
system.set('location', location)
self.system = system
self.peer_gs = peer_grading_service()
- log.debug(self.system)
self.use_for_single_location = self.metadata.get('use_for_single_location', USE_FOR_SINGLE_LOCATION)
if isinstance(self.use_for_single_location, basestring):
@@ -108,7 +107,7 @@ class PeerGradingModule(XModule):
Needs to be implemented by child modules. Handles AJAX events.
@return:
"""
-
+ log.debug(get)
handlers = {
'get_next_submission': self.get_next_submission,
'show_calibration_essay': self.show_calibration_essay,
@@ -123,6 +122,8 @@ class PeerGradingModule(XModule):
d = handlers[dispatch](get)
+ log.debug(d)
+
return json.dumps(d, cls=ComplexEncoder)
def get_progress(self):
@@ -149,14 +150,12 @@ class PeerGradingModule(XModule):
'error': if success is False, will have an error message with more info.
"""
- _check_post(request)
required = set(['location'])
- success, message = _check_required(request, required)
+ success, message = self._check_required(get, required)
if not success:
return _err_response(message)
- grader_id = unique_id_for_user(request.user)
- p = request.POST
- location = p['location']
+ grader_id = self.system.anonymous_student_id
+ location = get['location']
try:
response = self.peer_gs.get_next_submission(location, grader_id)
@@ -183,20 +182,20 @@ class PeerGradingModule(XModule):
success: bool indicating whether the save was a success
error: if there was an error in the submission, this is the error message
"""
- _check_post(request)
+
required = set(['location', 'submission_id', 'submission_key', 'score', 'feedback', 'rubric_scores[]', 'submission_flagged'])
- success, message = _check_required(request, required)
+ success, message = self._check_required(get, required)
if not success:
return _err_response(message)
- grader_id = unique_id_for_user(request.user)
- p = request.POST
- location = p['location']
- submission_id = p['submission_id']
- score = p['score']
- feedback = p['feedback']
- submission_key = p['submission_key']
- rubric_scores = p.getlist('rubric_scores[]')
- submission_flagged = p['submission_flagged']
+ grader_id = self.system.anonymous_student_id
+
+ location = get['location']
+ submission_id = get['submission_id']
+ score = get['score']
+ feedback = get['feedback']
+ submission_key = get['submission_key']
+ rubric_scores = get['rubric_scores']
+ submission_flagged = get['submission_flagged']
try:
response = self.peer_gs.save_grade(location, grader_id, submission_id,
score, feedback, submission_key, rubric_scores, submission_flagged)
@@ -227,14 +226,14 @@ class PeerGradingModule(XModule):
total_calibrated_on_so_far - the number of calibration essays for this problem
that this grader has graded
"""
- _check_post(request)
+
required = set(['location'])
- success, message = _check_required(request, required)
+ success, message = self._check_required(get, required)
if not success:
return _err_response(message)
- grader_id = unique_id_for_user(request.user)
- p = request.POST
- location = p['location']
+ grader_id = self.system.anonymous_student_id
+
+ location = get['location']
try:
response = self.peer_gs.is_student_calibrated(location, grader_id)
@@ -268,16 +267,15 @@ class PeerGradingModule(XModule):
'error': if success is False, will have an error message with more info.
"""
- _check_post(request)
required = set(['location'])
- success, message = _check_required(request, required)
+ success, message = self._check_required(get, required)
if not success:
return _err_response(message)
- grader_id = unique_id_for_user(request.user)
- p = request.POST
- location = p['location']
+ grader_id = self.system.anonymous_student_id
+
+ location = get['location']
try:
response = self.peer_gs.show_calibration_essay(location, grader_id)
return HttpResponse(response, mimetype="application/json")
@@ -311,20 +309,19 @@ class PeerGradingModule(XModule):
actual_score: the score that the instructor gave to this calibration essay
"""
- _check_post(request)
required = set(['location', 'submission_id', 'submission_key', 'score', 'feedback', 'rubric_scores[]'])
- success, message = _check_required(request, required)
+ success, message = self._check_required(get, required)
if not success:
return _err_response(message)
- grader_id = unique_id_for_user(request.user)
- p = request.POST
- location = p['location']
- calibration_essay_id = p['submission_id']
- submission_key = p['submission_key']
- score = p['score']
- feedback = p['feedback']
- rubric_scores = p.getlist('rubric_scores[]')
+ grader_id = self.system.anonymous_student_id
+
+ location = get['location']
+ calibration_essay_id = get['submission_id']
+ submission_key = get['submission_key']
+ score = get['score']
+ feedback = get['feedback']
+ rubric_scores = get['rubric_scores']
try:
response = self.peer_gs.save_calibration_essay(location, grader_id, calibration_essay_id,
diff --git a/common/lib/xmodule/xmodule/peer_grading_service.py b/common/lib/xmodule/xmodule/peer_grading_service.py
index 172a981a96..a8e74dd3cc 100644
--- a/common/lib/xmodule/xmodule/peer_grading_service.py
+++ b/common/lib/xmodule/xmodule/peer_grading_service.py
@@ -48,6 +48,7 @@ class PeerGradingService():
'rubric_scores': rubric_scores,
'rubric_scores_complete': True,
'submission_flagged' : submission_flagged}
+ log.debug(data)
return self.post(self.save_grade_url, data)
def is_student_calibrated(self, problem_location, grader_id):
@@ -69,6 +70,7 @@ class PeerGradingService():
'feedback': feedback,
'rubric_scores[]': rubric_scores,
'rubric_scores_complete': True}
+ log.debug(data)
return self.post(self.save_calibration_essay_url, data)
def get_problem_list(self, course_id, grader_id):
diff --git a/lms/templates/peer_grading/peer_grading.html b/lms/templates/peer_grading/peer_grading.html
index 99ef288e5f..1dd74d74e4 100644
--- a/lms/templates/peer_grading/peer_grading.html
+++ b/lms/templates/peer_grading/peer_grading.html
@@ -1,5 +1,5 @@
-