Merge pull request #10845 from edx/kill-ora1

Remove ORA1: Main Pull Request
This commit is contained in:
Sarina Canelake
2015-12-11 17:31:41 -05:00
145 changed files with 130 additions and 19037 deletions

View File

@@ -698,7 +698,7 @@ class MiscCourseTests(ContentStoreTestCase):
self.check_components_on_page(
ADVANCED_COMPONENT_TYPES,
['Word cloud', 'Annotation', 'Text Annotation', 'Video Annotation', 'Image Annotation',
'Open Response Assessment', 'Peer Grading Interface', 'split_test'],
'split_test'],
)
@ddt.data('/Fake/asset/displayname', '\\Fake\\asset\\displayname')

View File

@@ -771,7 +771,7 @@ class CourseMetadataEditingTest(CourseTestCase):
{
"advertised_start": {"value": "start A"},
"days_early_for_beta": {"value": 2},
"advanced_modules": {"value": ['combinedopenended']},
"advanced_modules": {"value": ['notes']},
},
user=self.user
)
@@ -781,7 +781,7 @@ class CourseMetadataEditingTest(CourseTestCase):
# Tab gets tested in test_advanced_settings_munge_tabs
self.assertIn('advanced_modules', test_model, 'Missing advanced_modules')
self.assertEqual(test_model['advanced_modules']['value'], ['combinedopenended'], 'advanced_module is not updated')
self.assertEqual(test_model['advanced_modules']['value'], ['notes'], 'advanced_module is not updated')
def test_validate_from_json_wrong_inputs(self):
# input incorrectly formatted data
@@ -905,48 +905,21 @@ class CourseMetadataEditingTest(CourseTestCase):
"""
Test that adding and removing specific advanced components adds and removes tabs.
"""
open_ended_tab = {"type": "open_ended", "name": "Open Ended Panel"}
peer_grading_tab = {"type": "peer_grading", "name": "Peer grading"}
# First ensure that none of the tabs are visible
self.assertNotIn(open_ended_tab, self.course.tabs)
self.assertNotIn(peer_grading_tab, self.course.tabs)
self.assertNotIn(self.notes_tab, self.course.tabs)
# Now add the "combinedopenended" component and verify that the tab has been added
self.client.ajax_post(self.course_setting_url, {
ADVANCED_COMPONENT_POLICY_KEY: {"value": ["combinedopenended"]}
})
course = modulestore().get_course(self.course.id)
self.assertIn(open_ended_tab, course.tabs)
self.assertIn(peer_grading_tab, course.tabs)
self.assertNotIn(self.notes_tab, course.tabs)
# Now enable student notes and verify that the "My Notes" tab has also been added
self.client.ajax_post(self.course_setting_url, {
ADVANCED_COMPONENT_POLICY_KEY: {"value": ["combinedopenended", "notes"]}
})
course = modulestore().get_course(self.course.id)
self.assertIn(open_ended_tab, course.tabs)
self.assertIn(peer_grading_tab, course.tabs)
self.assertIn(self.notes_tab, course.tabs)
# Now remove the "combinedopenended" component and verify that the tab is gone
# Now enable student notes and verify that the "My Notes" tab has been added
self.client.ajax_post(self.course_setting_url, {
ADVANCED_COMPONENT_POLICY_KEY: {"value": ["notes"]}
})
course = modulestore().get_course(self.course.id)
self.assertNotIn(open_ended_tab, course.tabs)
self.assertNotIn(peer_grading_tab, course.tabs)
self.assertIn(self.notes_tab, course.tabs)
# Finally disable student notes and verify that the "My Notes" tab is gone
# Disable student notes and verify that the "My Notes" tab is gone
self.client.ajax_post(self.course_setting_url, {
ADVANCED_COMPONENT_POLICY_KEY: {"value": [""]}
})
course = modulestore().get_course(self.course.id)
self.assertNotIn(open_ended_tab, course.tabs)
self.assertNotIn(peer_grading_tab, course.tabs)
self.assertNotIn(self.notes_tab, course.tabs)
def test_advanced_components_munge_tabs_validation_failure(self):

View File

@@ -219,26 +219,6 @@ class ContentStoreImportTest(SignalDisconnectTestMixin, ModuleStoreTestCase):
conditional_module.show_tag_list
)
def test_rewrite_reference(self):
module_store = modulestore()
target_id = module_store.make_course_key('testX', 'peergrading_copy', 'copy_run')
import_course_from_xml(
module_store,
self.user.id,
TEST_DATA_DIR,
['open_ended'],
target_id=target_id,
create_if_not_present=True
)
peergrading_module = module_store.get_item(
target_id.make_usage_key('peergrading', 'PeerGradingLinked')
)
self.assertIsNotNone(peergrading_module)
self.assertEqual(
target_id.make_usage_key('combinedopenended', 'SampleQuestion'),
peergrading_module.link_to_location
)
def test_rewrite_reference_value_dict_published(self):
"""
Test rewriting references in ReferenceValueDict, specifically with published content.

View File

@@ -30,11 +30,11 @@ from student.auth import has_course_author_access
from django.utils.translation import ugettext as _
from models.settings.course_grading import CourseGradingModel
__all__ = ['OPEN_ENDED_COMPONENT_TYPES',
'ADVANCED_COMPONENT_POLICY_KEY',
'container_handler',
'component_handler'
]
__all__ = [
'ADVANCED_COMPONENT_POLICY_KEY',
'container_handler',
'component_handler'
]
log = logging.getLogger(__name__)
@@ -43,7 +43,6 @@ COMPONENT_TYPES = ['discussion', 'html', 'problem', 'video']
# Constants for determining if these components should be enabled for this course
SPLIT_TEST_COMPONENT_TYPE = 'split_test'
OPEN_ENDED_COMPONENT_TYPES = ["combinedopenended", "peergrading"]
NOTE_COMPONENT_TYPES = ['notes']
if settings.FEATURES.get('ALLOW_ALL_ADVANCED_COMPONENTS'):

View File

@@ -10,6 +10,7 @@ import pytz
from django.conf import settings
from django.core.exceptions import PermissionDenied
from django.test.utils import override_settings
from django.utils.translation import ugettext as _
from contentstore.courseware_index import CoursewareSearchIndexer, SearchIndexingError
@@ -440,6 +441,7 @@ class TestCourseOutline(CourseTestCase):
info['block_types_enabled'],
any(component in advanced_modules for component in deprecated_block_types)
)
self.assertItemsEqual(info['blocks'], expected_blocks)
self.assertEqual(
info['advance_settings_url'],
@@ -455,27 +457,29 @@ class TestCourseOutline(CourseTestCase):
"""
Verify deprecated warning info for single deprecated feature.
"""
block_types = settings.DEPRECATED_BLOCK_TYPES
course_module = modulestore().get_item(self.course.location)
self._create_test_data(course_module, create_blocks=True, block_types=block_types, publish=publish)
info = _deprecated_blocks_info(course_module, block_types)
self._verify_deprecated_info(
course_module.id,
course_module.advanced_modules,
info,
block_types
)
block_types = ['notes']
with override_settings(DEPRECATED_BLOCK_TYPES=block_types):
course_module = modulestore().get_item(self.course.location)
self._create_test_data(course_module, create_blocks=True, block_types=block_types, publish=publish)
info = _deprecated_blocks_info(course_module, block_types)
self._verify_deprecated_info(
course_module.id,
course_module.advanced_modules,
info,
block_types
)
def test_verify_deprecated_warning_message_with_multiple_features(self):
"""
Verify deprecated warning info for multiple deprecated features.
"""
block_types = ['peergrading', 'combinedopenended', 'openassessment']
course_module = modulestore().get_item(self.course.location)
self._create_test_data(course_module, create_blocks=True, block_types=block_types)
block_types = ['notes', 'lti']
with override_settings(DEPRECATED_BLOCK_TYPES=block_types):
course_module = modulestore().get_item(self.course.location)
self._create_test_data(course_module, create_blocks=True, block_types=block_types)
info = _deprecated_blocks_info(course_module, block_types)
self._verify_deprecated_info(course_module.id, course_module.advanced_modules, info, block_types)
info = _deprecated_blocks_info(course_module, block_types)
self._verify_deprecated_info(course_module.id, course_module.advanced_modules, info, block_types)
@ddt.data(
{'delete_vertical': True},
@@ -492,7 +496,7 @@ class TestCourseOutline(CourseTestCase):
un-published block(s). This behavior should be same if we delete
unpublished vertical or problem.
"""
block_types = ['peergrading']
block_types = ['notes']
course_module = modulestore().get_item(self.course.location)
vertical1 = ItemFactory.create(
@@ -500,8 +504,8 @@ class TestCourseOutline(CourseTestCase):
)
problem1 = ItemFactory.create(
parent_location=vertical1.location,
category='peergrading',
display_name='peergrading problem in vert1',
category='notes',
display_name='notes problem in vert1',
publish_item=False
)
@@ -515,8 +519,8 @@ class TestCourseOutline(CourseTestCase):
)
ItemFactory.create(
parent_location=vertical2.location,
category='peergrading',
display_name='peergrading problem in vert2',
category='notes',
display_name='notes problem in vert2',
pubish_item=True
)
# At this point CourseStructure will contain both the above
@@ -526,8 +530,8 @@ class TestCourseOutline(CourseTestCase):
self.assertItemsEqual(
info['blocks'],
[
[reverse_usage_url('container_handler', vertical1.location), 'peergrading problem in vert1'],
[reverse_usage_url('container_handler', vertical2.location), 'peergrading problem in vert2']
[reverse_usage_url('container_handler', vertical1.location), 'notes problem in vert1'],
[reverse_usage_url('container_handler', vertical2.location), 'notes problem in vert2']
]
)
@@ -542,7 +546,7 @@ class TestCourseOutline(CourseTestCase):
# There shouldn't be any info present about un-published vertical1
self.assertEqual(
info['blocks'],
[[reverse_usage_url('container_handler', vertical2.location), 'peergrading problem in vert2']]
[[reverse_usage_url('container_handler', vertical2.location), 'notes problem in vert2']]
)

View File

@@ -1388,28 +1388,28 @@ class TestComponentTemplates(CourseTestCase):
Test the handling of advanced problem templates.
"""
problem_templates = self.get_templates_of_type('problem')
ora_template = self.get_template(problem_templates, u'Peer Assessment')
self.assertIsNotNone(ora_template)
self.assertEqual(ora_template.get('category'), 'openassessment')
self.assertIsNone(ora_template.get('boilerplate_name', None))
circuit_template = self.get_template(problem_templates, u'Circuit Schematic Builder')
self.assertIsNotNone(circuit_template)
self.assertEqual(circuit_template.get('category'), 'problem')
self.assertEqual(circuit_template.get('boilerplate_name'), 'circuitschematic.yaml')
@patch('django.conf.settings.DEPRECATED_ADVANCED_COMPONENT_TYPES', ["combinedopenended", "peergrading"])
def test_ora1_no_advance_component_button(self):
@patch('django.conf.settings.DEPRECATED_ADVANCED_COMPONENT_TYPES', ["poll", "survey"])
def test_deprecated_no_advance_component_button(self):
"""
Test that there will be no `Advanced` button on unit page if `combinedopenended` and `peergrading` are
deprecated provided that there are only 'combinedopenended', 'peergrading' modules in `Advanced Module List`
Test that there will be no `Advanced` button on unit page if units are
deprecated provided that they are the only modules in `Advanced Module List`
"""
self.course.advanced_modules.extend(['combinedopenended', 'peergrading'])
self.course.advanced_modules.extend(['poll', 'survey'])
templates = get_component_templates(self.course)
button_names = [template['display_name'] for template in templates]
self.assertNotIn('Advanced', button_names)
@patch('django.conf.settings.DEPRECATED_ADVANCED_COMPONENT_TYPES', ["combinedopenended", "peergrading"])
def test_cannot_create_ora1_problems(self):
@patch('django.conf.settings.DEPRECATED_ADVANCED_COMPONENT_TYPES', ["poll", "survey"])
def test_cannot_create_deprecated_problems(self):
"""
Test that we can't create ORA1 problems if `combinedopenended` and `peergrading` are deprecated
Test that we can't create problems if they are deprecated
"""
self.course.advanced_modules.extend(['annotatable', 'combinedopenended', 'peergrading'])
self.course.advanced_modules.extend(['annotatable', 'poll', 'survey'])
templates = get_component_templates(self.course)
button_names = [template['display_name'] for template in templates]
self.assertIn('Advanced', button_names)
@@ -1418,17 +1418,17 @@ class TestComponentTemplates(CourseTestCase):
self.assertEqual(template_display_names, ['Annotation'])
@patch('django.conf.settings.DEPRECATED_ADVANCED_COMPONENT_TYPES', [])
def test_create_ora1_problems(self):
def test_create_non_deprecated_problems(self):
"""
Test that we can create ORA1 problems if `combinedopenended` and `peergrading` are not deprecated
Test that we can create problems if they are not deprecated
"""
self.course.advanced_modules.extend(['annotatable', 'combinedopenended', 'peergrading'])
self.course.advanced_modules.extend(['annotatable', 'poll', 'survey'])
templates = get_component_templates(self.course)
button_names = [template['display_name'] for template in templates]
self.assertIn('Advanced', button_names)
self.assertEqual(len(templates[0]['templates']), 3)
template_display_names = [template['display_name'] for template in templates[0]['templates']]
self.assertEqual(template_display_names, ['Annotation', 'Open Response Assessment', 'Peer Grading Interface'])
self.assertEqual(template_display_names, ['Annotation', 'Poll', 'Survey'])
@ddt.ddt

View File

@@ -81,14 +81,6 @@
}
}
},
"OPEN_ENDED_GRADING_INTERFACE": {
"grading_controller": "grading_controller",
"password": "password",
"peer_grading": "peer_grading",
"staff_grading": "staff_grading",
"url": "http://localhost:18060/",
"username": "lms"
},
"DJFS": {
"type": "s3fs",
"bucket": "test",

View File

@@ -104,5 +104,9 @@
"THEME_NAME": "",
"TIME_ZONE": "America/New_York",
"WIKI_ENABLED": true,
"OAUTH_OIDC_ISSUER": "https://www.example.com/oauth2"
"OAUTH_OIDC_ISSUER": "https://www.example.com/oauth2",
"DEPRECATED_BLOCK_TYPES": [
"poll",
"survey"
]
}

View File

@@ -96,6 +96,9 @@ FEATURES['ENABLE_VIDEO_BUMPER'] = True # Enable video bumper in Studio settings
# Enable partner support link in Studio footer
FEATURES['PARTNER_SUPPORT_EMAIL'] = 'partner-support@example.com'
# Disable some block types to test block deprecation logic
DEPRECATED_BLOCK_TYPES = ['poll', 'survey']
########################### Entrance Exams #################################
FEATURES['ENTRANCE_EXAMS'] = True

View File

@@ -1016,8 +1016,6 @@ ADVANCED_COMPONENT_TYPES = [
'rate', # Allows up-down voting of course content. See https://github.com/pmitros/RateXBlock
'split_test',
'combinedopenended',
'peergrading',
'notes',
'schoolyourself_review',
'schoolyourself_lesson',

View File

@@ -1,97 +0,0 @@
<div class="wrapper-comp-editor" id="editor-tab">
<section class="combinedopenended-editor editor">
<div class="row">
%if enable_markdown:
<div class="editor-bar">
<ul class="format-buttons">
<li><a href="#" class="prompt-button" data-tooltip="Prompt"><span
class="combinedopenended-editor-icon fa fa-quote-left"></span></a></li>
<li><a href="#" class="rubric-button" data-tooltip="Rubric"><span
class="combinedopenended-editor-icon fa fa-table"></span></a></li>
<li><a href="#" class="tasks-button" data-tooltip="Tasks"><span
class="combinedopenended-editor-icon fa fa-sitemap"></span></a></li>
</ul>
<ul class="editor-tabs">
<li><a href="#" class="xml-tab advanced-toggle" data-tab="xml">Advanced Editor</a></li>
<li><a href="#" class="cheatsheet-toggle" data-tooltip="Toggle Cheatsheet">?</a></li>
</ul>
</div>
<textarea class="markdown-box">${markdown | h}</textarea>
%endif
<textarea class="xml-box" rows="8" cols="40">${data | h}</textarea>
</div>
</section>
<script type="text/template" id="open-ended-template">
<openended %min_max_string%>
<openendedparam>
<initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display>
<grader_payload>{"grader_settings" : "%grading_config%", "problem_id" : "6.002x/Welcome/OETest"}</grader_payload>
</openendedparam>
</openended>
</script>
<script type="text/template" id="simple-editor-open-ended-cheatsheet">
<article class="simple-editor-open-ended-cheatsheet">
<div class="cheatsheet-wrapper">
<div class="row">
<h6>Prompt</h6>
<div class="col prompt">
</div>
<div class="col">
<pre><code>
[prompt]
Why is the sky blue?
[prompt]
</code></pre>
</div>
<div class="col">
<p>The student will respond to the prompt. The prompt can contain any html tags, such as paragraph tags and header tags.</p>
</div>
</div>
<div class="row">
<h6>Rubric</h6>
<div class="col sample rubric"><!DOCTYPE html>
</div>
<div class="col">
<pre><code>
[rubric]
+ Color Identification
- Incorrect
- Correct
+ Grammar
- Poor
- Acceptable
- Superb
[rubric]
</code></pre>
</div>
<div class="col">
<p>The rubric is used for feedback and self-assessment. The rubric can have as many categories (+) and options (-) as desired. </p>
<p>The total score for the problem will be the sum of all the points possible on the rubric. The options will be numbered sequentially from zero in each category, so each category will be worth as many points as its number of options minus one. </p>
</div>
</div>
<div class="row">
<h6>Tasks</h6>
<div class="col sample tasks">
</div>
<div class="col">
<pre><code>
[tasks]
(Self), ({1-3}AI), ({2-3}Peer)
[tasks]
</code></pre>
</div>
<div class="col">
<p>The tasks define what feedback the student will get from the problem.</p>
<p>Each task is defined with parentheses around it. Brackets (ie {2-3} above), specify the minimum and maximum score needed to attempt the given task.</p>
<p>In the example above, the student will first be asked to self-assess. If they give themselves greater than or equal to a 1/3 and less than or equal to a 3/3 on the problem, then they will be moved to AI assessment. If they score themselves a 2/3 or 3/3 on AI assessment, they will move to peer assessment.</p>
<p>Students will be given feedback from each task, and their final score for a given attempt of the problem will be their score last task that is completed.</p>
</div>
</div>
</div>
</article>
</script>
</div>
<%include file="metadata-edit.html" />

View File

@@ -1,535 +0,0 @@
"""
Stub implementation of ORA service.
This is an extremely simple version of the service, with most
business logic removed. In particular, the stub:
1) Provides an infinite number of peer and calibration essays,
with dummy data.
2) Simulates a set number of pending submissions for each student;
grades submitted by one student are not used for any other student.
3) Ignores the scores/feedback students submit.
4) Ignores problem location: an essay graded for *any* problem is graded
for *every* problem.
Basically, the stub tracks only the *number* of peer/calibration essays
submitted by each student.
"""
import json
import pkg_resources
from .http import StubHttpRequestHandler, StubHttpService, require_params
class StudentState(object):
"""
Store state about the student that the stub
ORA implementation needs to keep track of.
"""
INITIAL_ESSAYS_AVAILABLE = 3
NUM_ESSAYS_REQUIRED = 1
NUM_CALIBRATION_REQUIRED = 1
def __init__(self):
self.num_graded = 0
self.num_calibrated = 0
def grade_peer_essay(self):
self.num_graded += 1
def grade_calibration_essay(self):
self.num_calibrated += 1
@property
def num_pending(self):
return max(self.INITIAL_ESSAYS_AVAILABLE - self.num_graded, 0)
@property
def num_required(self):
return max(self.NUM_ESSAYS_REQUIRED - self.num_graded, 0)
@property
def is_calibrated(self):
return self.num_calibrated >= self.NUM_CALIBRATION_REQUIRED
class StubOraHandler(StubHttpRequestHandler):
"""
Handler for ORA requests.
"""
GET_URL_HANDLERS = {
'/peer_grading/get_next_submission': '_get_next_submission',
'/peer_grading/is_student_calibrated': '_is_student_calibrated',
'/peer_grading/show_calibration_essay': '_show_calibration_essay',
'/peer_grading/get_notifications': '_get_notifications',
'/peer_grading/get_data_for_location': '_get_data_for_location',
'/peer_grading/get_problem_list': '_get_problem_list',
}
POST_URL_HANDLERS = {
'/peer_grading/save_grade': '_save_grade',
'/peer_grading/save_calibration_essay': '_save_calibration_essay',
# Test-specific, used by the XQueue stub to register a new submission,
# which we use to discover valid problem locations in the LMS
'/test/register_submission': '_register_submission'
}
def do_GET(self):
"""
Handle GET methods to the ORA API stub.
"""
self._send_handler_response('GET')
def do_POST(self):
"""
Handle POST methods to the ORA API stub.
"""
self._send_handler_response('POST')
def _send_handler_response(self, method):
"""
Delegate response to handler methods.
If no handler defined, send a 404 response.
"""
# Choose the list of handlers based on the HTTP method
if method == 'GET':
handler_list = self.GET_URL_HANDLERS
elif method == 'POST':
handler_list = self.POST_URL_HANDLERS
else:
self.log_error('Unrecognized method "{method}"'.format(method=method))
return
# Check the path (without querystring params) against our list of handlers
handler_name = handler_list.get(self.path_only)
if handler_name is not None:
handler = getattr(self, handler_name, None)
else:
handler = None
# Delegate to the handler to send a response
if handler is not None:
handler()
# If we don't have a handler for this URL and/or HTTP method,
# respond with a 404. This is the same behavior as the ORA API.
else:
self.send_response(404)
@require_params('GET', 'student_id', 'problem_id')
def _is_student_calibrated(self):
"""
Query whether the student has completed enough calibration
essays to begin peer grading.
Method: GET
Params:
- student_id
- problem_id
Result (JSON):
- success (bool)
- total_calibrated_on_so_far (int)
- calibrated (bool)
"""
student = self._student('GET')
if student is None:
self._error_response()
else:
self._success_response({
'total_calibrated_on_so_far': student.num_calibrated,
'calibrated': student.is_calibrated
})
@require_params('GET', 'student_id', 'problem_id')
def _show_calibration_essay(self):
"""
Retrieve a calibration essay for the student to grade.
Method: GET
Params:
- student_id
- problem_id
Result (JSON):
- success (bool)
- submission_id (str)
- submission_key (str)
- student_response (str)
- prompt (str)
- rubric (str)
- max_score (int)
"""
self._success_response({
'submission_id': self.server.DUMMY_DATA['submission_id'],
'submission_key': self.server.DUMMY_DATA['submission_key'],
'student_response': self.server.DUMMY_DATA['student_response'],
'prompt': self.server.DUMMY_DATA['prompt'],
'rubric': self.server.DUMMY_DATA['rubric'],
'max_score': self.server.DUMMY_DATA['max_score']
})
@require_params('GET', 'student_id', 'course_id')
def _get_notifications(self):
"""
Query counts of submitted, required, graded, and available peer essays
for a particular student.
Method: GET
Params:
- student_id
- course_id
Result (JSON):
- success (bool)
- student_sub_count (int)
- count_required (int)
- count_graded (int)
- count_available (int)
"""
student = self._student('GET')
if student is None:
self._error_response()
else:
self._success_response({
'student_sub_count': self.server.DUMMY_DATA['student_sub_count'],
'count_required': student.num_required,
'count_graded': student.num_graded,
'count_available': student.num_pending
})
@require_params('GET', 'student_id', 'location')
def _get_data_for_location(self):
"""
Query counts of submitted, required, graded, and available peer essays
for a problem location.
This will send an error response if the problem has not
been registered at the given `location`. This allows us
to ignore problems that are self- or ai-graded.
Method: GET
Params:
- student_id
- location
Result (JSON):
- success (bool)
- student_sub_count (int)
- count_required (int)
- count_graded (int)
- count_available (int)
"""
student = self._student('GET')
location = self.get_params.get('location')
# Do not return data if we're missing the student param
# or the problem has not yet been registered.
if student is None or location not in self.server.problems:
self._error_response()
else:
self._success_response({
'student_sub_count': self.server.DUMMY_DATA['student_sub_count'],
'count_required': student.num_required,
'count_graded': student.num_graded,
'count_available': student.num_pending
})
@require_params('GET', 'grader_id', 'location')
def _get_next_submission(self):
"""
Retrieve the next submission for the student to peer-grade.
Method: GET
Params:
- grader_id
- location
Result (JSON):
- success (bool)
- submission_id (str)
- submission_key (str)
- student_response (str)
- prompt (str, HTML)
- rubric (str, XML)
- max_score (int)
"""
self._success_response({
'submission_id': self.server.DUMMY_DATA['submission_id'],
'submission_key': self.server.DUMMY_DATA['submission_key'],
'student_response': self.server.DUMMY_DATA['student_response'],
'prompt': self.server.DUMMY_DATA['prompt'],
'rubric': self.server.DUMMY_DATA['rubric'],
'max_score': self.server.DUMMY_DATA['max_score']
})
@require_params('GET', 'course_id')
def _get_problem_list(self):
"""
Retrieve the list of problems available for peer grading.
Method: GET
Params:
- course_id
Result (JSON):
- success (bool)
- problem_list (list)
where `problem_list` is a list of dictionaries with keys:
- location (str)
- problem_name (str)
- num_graded (int)
- num_pending (int)
- num_required (int)
"""
self._success_response({'problem_list': self.server.problem_list})
@require_params('POST', 'grader_id', 'location', 'submission_id', 'score', 'feedback', 'submission_key')
def _save_grade(self):
"""
Save a score and feedback for an essay the student has graded.
Method: POST
Params:
- grader_id
- location
- submission_id
- score
- feedback
- submission_key
Result (JSON):
- success (bool)
"""
student = self._student('POST', key='grader_id')
if student is None:
self._error_response()
else:
# Update the number of essays the student has graded
student.grade_peer_essay()
return self._success_response({})
@require_params('POST', 'student_id', 'location', 'calibration_essay_id', 'score', 'feedback', 'submission_key')
def _save_calibration_essay(self):
"""
Save a score and feedback for a calibration essay the student has graded.
Returns the scores/feedback that the instructor gave for the essay.
Method: POST
Params:
- student_id
- location
- calibration_essay_id
- score
- feedback
- submission_key
Result (JSON):
- success (bool)
- message (str)
- actual_score (int)
- actual_rubric (str, XML)
- actual_feedback (str)
"""
student = self._student('POST')
if student is None:
self._error_response()
else:
# Increment the student calibration count
student.grade_calibration_essay()
self._success_response({
'message': self.server.DUMMY_DATA['message'],
'actual_score': self.server.DUMMY_DATA['actual_score'],
'actual_rubric': self.server.DUMMY_DATA['actual_rubric'],
'actual_feedback': self.server.DUMMY_DATA['actual_feedback']
})
@require_params('POST', 'grader_payload')
def _register_submission(self):
"""
Test-specific method to register a new submission.
This is used by `get_problem_list` to return valid locations in the LMS courseware.
In tests, this end-point gets called by the XQueue stub when it receives new submissions,
much like ORA discovers locations when students submit peer-graded problems to the XQueue.
Since the LMS sends *all* open-ended problems to the XQueue (including self- and ai-graded),
we have to ignore everything except peer-graded problems. We do so by looking
for the text 'peer' in the problem's name. This is a little bit of a hack,
but it makes the implementation much simpler.
Method: POST
Params:
- grader_payload (JSON dict)
Result: Empty
The only keys we use in `grader_payload` are 'location' and 'problem_id'.
"""
# Since this is a required param, we know it is in the post dict
try:
payload = json.loads(self.post_dict['grader_payload'])
except ValueError:
self.log_message(
"Could not decode grader payload as JSON: '{0}'".format(
self.post_dict['grader_payload']))
self.send_response(400)
else:
location = payload.get('location')
name = payload.get('problem_id')
if location is not None and name is not None:
if "peer" in name.lower():
self.server.register_problem(location, name)
self.send_response(200)
else:
self.log_message(
"Problem '{0}' does not have 'peer' in its name. Ignoring...".format(name)
)
self.send_response(200)
else:
self.log_message(
"Grader payload should contain 'location' and 'problem_id' keys: {0}".format(payload)
)
self.send_response(400)
def _student(self, method, key='student_id'):
"""
Return the `StudentState` instance for the student ID given
in the request parameters.
`method` is the HTTP request method (either "GET" or "POST")
and `key` is the parameter key.
"""
if method == 'GET':
student_id = self.get_params.get(key)
elif method == 'POST':
student_id = self.post_dict.get(key)
else:
self.log_error("Unrecognized method '{method}'".format(method=method))
return None
if student_id is None:
self.log_error("Could not get student ID from parameters")
return None
return self.server.student_state(student_id)
def _success_response(self, response_dict):
"""
Send a success response.
`response_dict` is a Python dictionary to JSON-encode.
"""
response_dict['success'] = True
response_dict['version'] = 1
self.send_response(
200, content=json.dumps(response_dict),
headers={'Content-type': 'application/json'}
)
def _error_response(self):
"""
Send an error response.
"""
response_dict = {'success': False, 'version': 1}
self.send_response(
400, content=json.dumps(response_dict),
headers={'Content-type': 'application/json'}
)
class StubOraService(StubHttpService):
"""
Stub ORA service.
"""
HANDLER_CLASS = StubOraHandler
DUMMY_DATA = {
'submission_id': 1,
'submission_key': 'test key',
'student_response': 'Test response',
'prompt': 'Test prompt',
'rubric': pkg_resources.resource_string(__name__, "data/ora_rubric.xml"),
'max_score': 2,
'message': 'Successfully saved calibration record.',
'actual_score': 2,
'actual_rubric': pkg_resources.resource_string(__name__, "data/ora_graded_rubric.xml"),
'actual_feedback': 'Great job!',
'student_sub_count': 1,
'problem_name': 'test problem',
'problem_list_num_graded': 1,
'problem_list_num_pending': 1,
'problem_list_num_required': 0,
}
def __init__(self, *args, **kwargs):
"""
Initialize student submission state.
"""
super(StubOraService, self).__init__(*args, **kwargs)
# Create a dict to map student ID's to their state
self._students = dict()
# By default, no problems are available for peer grading
# You can add to this list using the `register_location` HTTP end-point
# This is a dict mapping problem locations to problem names
self.problems = dict()
def student_state(self, student_id):
"""
Return the `StudentState` (named tuple) for the student
with ID `student_id`. The student state can be modified by the caller.
"""
# Create the student state if it does not already exist
if student_id not in self._students:
student = StudentState()
self._students[student_id] = student
# Retrieve the student state
return self._students[student_id]
@property
def problem_list(self):
"""
Return a list of problems available for peer grading.
"""
return [{
'location': location, 'problem_name': name,
'num_graded': self.DUMMY_DATA['problem_list_num_graded'],
'num_pending': self.DUMMY_DATA['problem_list_num_pending'],
'num_required': self.DUMMY_DATA['problem_list_num_required']
} for location, name in self.problems.items()]
def register_problem(self, location, name):
"""
Register a new problem with `location` and `name` for peer grading.
"""
self.problems[location] = name

View File

@@ -7,7 +7,6 @@ import logging
from .comments import StubCommentsService
from .xqueue import StubXQueueService
from .youtube import StubYouTubeService
from .ora import StubOraService
from .lti import StubLtiService
from .video_source import VideoSourceHttpService
from .edxnotes import StubEdxNotesService
@@ -19,7 +18,6 @@ USAGE = "USAGE: python -m stubs.start SERVICE_NAME PORT_NUM [CONFIG_KEY=CONFIG_V
SERVICES = {
'xqueue': StubXQueueService,
'youtube': StubYouTubeService,
'ora': StubOraService,
'comments': StubCommentsService,
'lti': StubLtiService,
'video': VideoSourceHttpService,

View File

@@ -1,282 +0,0 @@
"""
Unit tests for stub ORA implementation.
"""
import unittest
import requests
import json
from ..ora import StubOraService, StudentState
class StubOraServiceTest(unittest.TestCase):
def setUp(self):
"""
Start the stub server.
"""
super(StubOraServiceTest, self).setUp()
self.server = StubOraService()
self.addCleanup(self.server.shutdown)
def test_calibration(self):
# Ensure that we use the same student ID throughout
student_id = '1234'
# Initially, student should not be calibrated
response = requests.get(
self._peer_url('is_student_calibrated'),
params={'student_id': student_id, 'problem_id': '5678'}
)
self._assert_response(response, {
'version': 1, 'success': True,
'total_calibrated_on_so_far': 0,
'calibrated': False
})
# Retrieve a calibration essay
response = requests.get(
self._peer_url('show_calibration_essay'),
params={'student_id': student_id, 'problem_id': '5678'}
)
self._assert_response(response, {
'version': 1, 'success': True,
'submission_id': self.server.DUMMY_DATA['submission_id'],
'submission_key': self.server.DUMMY_DATA['submission_key'],
'student_response': self.server.DUMMY_DATA['student_response'],
'prompt': self.server.DUMMY_DATA['prompt'],
'rubric': self.server.DUMMY_DATA['rubric'],
'max_score': self.server.DUMMY_DATA['max_score']
})
# Grade the calibration essay
response = requests.post(
self._peer_url('save_calibration_essay'),
data={
'student_id': student_id,
'location': 'test location',
'calibration_essay_id': 1,
'score': 2,
'submission_key': 'key',
'feedback': 'Good job!'
}
)
self._assert_response(response, {
'version': 1, 'success': True,
'message': self.server.DUMMY_DATA['message'],
'actual_score': self.server.DUMMY_DATA['actual_score'],
'actual_rubric': self.server.DUMMY_DATA['actual_rubric'],
'actual_feedback': self.server.DUMMY_DATA['actual_feedback']
})
# Now the student should be calibrated
response = requests.get(
self._peer_url('is_student_calibrated'),
params={'student_id': student_id, 'problem_id': '5678'}
)
self._assert_response(response, {
'version': 1, 'success': True,
'total_calibrated_on_so_far': 1,
'calibrated': True
})
# But a student with a different ID should NOT be calibrated.
response = requests.get(
self._peer_url('is_student_calibrated'),
params={'student_id': 'another', 'problem_id': '5678'}
)
self._assert_response(response, {
'version': 1, 'success': True,
'total_calibrated_on_so_far': 0,
'calibrated': False
})
def test_grade_peers(self):
# Ensure a consistent student ID
student_id = '1234'
# Check initial number of submissions
# Should be none graded and 1 required
self._assert_num_graded(student_id, None, 0, 1)
# Register a problem that DOES have "peer" in the name
self._register_problem('test_location', 'Peer Assessed Problem')
# Retrieve the next submission
response = requests.get(
self._peer_url('get_next_submission'),
params={'grader_id': student_id, 'location': 'test_location'}
)
self._assert_response(response, {
'version': 1, 'success': True,
'submission_id': self.server.DUMMY_DATA['submission_id'],
'submission_key': self.server.DUMMY_DATA['submission_key'],
'student_response': self.server.DUMMY_DATA['student_response'],
'prompt': self.server.DUMMY_DATA['prompt'],
'rubric': self.server.DUMMY_DATA['rubric'],
'max_score': self.server.DUMMY_DATA['max_score']
})
# Grade the submission
response = requests.post(
self._peer_url('save_grade'),
data={
'location': 'test_location',
'grader_id': student_id,
'submission_id': 1,
'score': 2,
'feedback': 'Good job!',
'submission_key': 'key'
}
)
self._assert_response(response, {'version': 1, 'success': True})
# Check final number of submissions
# Shoud be one graded and none required
self._assert_num_graded(student_id, 'test_location', 1, 0)
# Grade the next submission the submission
response = requests.post(
self._peer_url('save_grade'),
data={
'location': 'test_location',
'grader_id': student_id,
'submission_id': 1,
'score': 2,
'feedback': 'Good job!',
'submission_key': 'key'
}
)
self._assert_response(response, {'version': 1, 'success': True})
# Check final number of submissions
# Shoud be two graded and none required
self._assert_num_graded(student_id, 'test_location', 2, 0)
def test_problem_list(self):
self._register_problem('test_location', 'Peer Grading Problem')
# The problem list returns dummy counts which are not updated
# The location we use is ignored by the LMS, and we ignore it in the stub,
# so we use a dummy value there too.
response = requests.get(
self._peer_url('get_problem_list'),
params={'course_id': 'test course'}
)
self._assert_response(response, {
'version': 1, 'success': True,
'problem_list': [{
'location': 'test_location',
'problem_name': 'Peer Grading Problem',
'num_graded': self.server.DUMMY_DATA['problem_list_num_graded'],
'num_pending': self.server.DUMMY_DATA['problem_list_num_pending'],
'num_required': self.server.DUMMY_DATA['problem_list_num_required']
}]
})
def test_ignore_non_peer_problem(self):
# Register a problem that does NOT have "peer" in the name
self._register_problem('test_location', 'Self Assessed Problem')
# Expect that the problem list is empty
response = requests.get(
self._peer_url('get_problem_list'),
params={'course_id': 'test course'}
)
self._assert_response(
response,
{'version': 1, 'success': True, 'problem_list': []}
)
# Expect that no data is available for the problem location
response = requests.get(
self._peer_url('get_data_for_location'),
params={'location': 'test_location', 'student_id': 'test'}
)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.json(), {'version': 1, 'success': False})
def test_empty_problem_list(self):
# Without configuring any problem location, should return an empty list
response = requests.get(
self._peer_url('get_problem_list'),
params={'course_id': 'test course'}
)
self._assert_response(response, {'version': 1, 'success': True, 'problem_list': []})
def _peer_url(self, path):
"""
Construt a URL to the stub ORA peer-grading service.
"""
return "http://127.0.0.1:{port}/peer_grading/{path}/".format(
port=self.server.port, path=path
)
def _register_problem(self, location, name):
"""
Configure the stub to use a particular problem location
The actual implementation discovers problem locations by submission
to the XQueue; we do something similar by having the XQueue stub
register submitted locations with the ORA stub.
"""
grader_payload = json.dumps({'location': location, 'problem_id': name})
url = "http://127.0.0.1:{port}/test/register_submission".format(port=self.server.port)
response = requests.post(url, data={'grader_payload': grader_payload})
self.assertTrue(response.ok)
def _assert_response(self, response, expected_json):
"""
Assert that the `response` was successful and contained
`expected_json` (dict) as its content.
"""
self.assertTrue(response.ok)
self.assertEqual(response.json(), expected_json)
def _assert_num_graded(self, student_id, location, num_graded, num_required):
"""
ORA provides two distinct ways to get the submitted/graded counts.
Here we check both of them to ensure that the number that we've graded
is consistently `num_graded`.
"""
# Unlike the actual ORA service,
# we keep track of counts on a per-student basis.
# This means that every user starts with N essays to grade,
# and as they grade essays, that number decreases.
# We do NOT simulate students adding more essays to the queue,
# and essays that the current student submits are NOT graded
# by other students.
num_pending = StudentState.INITIAL_ESSAYS_AVAILABLE - num_graded
# Notifications
response = requests.get(
self._peer_url('get_notifications'),
params={'student_id': student_id, 'course_id': 'test course'}
)
self._assert_response(response, {
'version': 1, 'success': True,
'count_required': num_required,
'student_sub_count': self.server.DUMMY_DATA['student_sub_count'],
'count_graded': num_graded,
'count_available': num_pending
})
# Location data
if location is not None:
response = requests.get(
self._peer_url('get_data_for_location'),
params={'location': location, 'student_id': student_id}
)
self._assert_response(response, {
'version': 1, 'success': True,
'count_required': num_required,
'student_sub_count': self.server.DUMMY_DATA['student_sub_count'],
'count_graded': num_graded,
'count_available': num_pending
})

View File

@@ -115,19 +115,6 @@ class StubXQueueServiceTest(unittest.TestCase):
self.assertFalse(self.post.called)
self.assertTrue(logger.error.called)
def test_register_submission_url(self):
# Configure the XQueue stub to notify another service
# when it receives a submission.
register_url = 'http://127.0.0.1:8000/register_submission'
self.server.config['register_submission_url'] = register_url
callback_url = 'http://127.0.0.1:8000/test_callback'
submission = json.dumps({'grader_payload': 'test payload'})
self._post_submission(callback_url, 'test_queuekey', 'test_queue', submission)
# Check that a notification was sent
self.post.assert_any_call(register_url, data={'grader_payload': u'test payload'})
def _post_submission(self, callback_url, lms_key, queue_name, xqueue_body):
"""
Post a submission to the stub XQueue implementation.

View File

@@ -39,7 +39,8 @@ class StubXQueueHandler(StubHttpRequestHandler):
if self._is_grade_request():
# If configured, send the grader payload to other services.
self._register_submission(self.post_dict['xqueue_body'])
# TODO TNL-3906
# self._register_submission(self.post_dict['xqueue_body'])
try:
xqueue_header = json.loads(self.post_dict['xqueue_header'])

View File

@@ -3,7 +3,6 @@ from setuptools import setup, find_packages
XMODULES = [
"book = xmodule.backcompat_module:TranslateCustomTagDescriptor",
"chapter = xmodule.seq_module:SequenceDescriptor",
"combinedopenended = xmodule.combined_open_ended_module:CombinedOpenEndedDescriptor",
"conditional = xmodule.conditional_module:ConditionalDescriptor",
"course = xmodule.course_module:CourseDescriptor",
"customtag = xmodule.template_module:CustomTagDescriptor",
@@ -12,7 +11,6 @@ XMODULES = [
"image = xmodule.backcompat_module:TranslateCustomTagDescriptor",
"library_content = xmodule.library_content_module:LibraryContentDescriptor",
"error = xmodule.error_module:ErrorDescriptor",
"peergrading = xmodule.peer_grading_module:PeerGradingDescriptor",
"poll_question = xmodule.poll_module:PollDescriptor",
"problem = xmodule.capa_module:CapaDescriptor",
"problemset = xmodule.seq_module:SequenceDescriptor",

View File

@@ -1,550 +0,0 @@
"""
ORA1. Deprecated.
"""
import logging
from lxml import etree
from pkg_resources import resource_string
from xmodule.raw_module import RawDescriptor
from .x_module import XModule, module_attr
from xblock.fields import Integer, Scope, String, List, Float, Boolean
from xmodule.open_ended_grading_classes.combined_open_ended_modulev1 import CombinedOpenEndedV1Module, CombinedOpenEndedV1Descriptor
from xmodule.validation import StudioValidation, StudioValidationMessage
from collections import namedtuple
from .fields import Date, Timedelta
import textwrap
log = logging.getLogger("edx.courseware")
# Make '_' a no-op so we can scrape strings. Using lambda instead of
# `django.utils.translation.ugettext_noop` because Django cannot be imported in this file
_ = lambda text: text
V1_SETTINGS_ATTRIBUTES = [
"display_name",
"max_attempts",
"graded",
"accept_file_upload",
"skip_spelling_checks",
"due",
"graceperiod",
"weight",
"min_to_calibrate",
"max_to_calibrate",
"peer_grader_count",
"required_peer_grading",
"peer_grade_finished_submissions_when_none_pending",
]
V1_STUDENT_ATTRIBUTES = [
"current_task_number",
"task_states",
"state",
"student_attempts",
"ready_to_reset",
"old_task_states",
]
V1_ATTRIBUTES = V1_SETTINGS_ATTRIBUTES + V1_STUDENT_ATTRIBUTES
VersionTuple = namedtuple('VersionTuple', ['descriptor', 'module', 'settings_attributes', 'student_attributes'])
VERSION_TUPLES = {
1: VersionTuple(CombinedOpenEndedV1Descriptor, CombinedOpenEndedV1Module, V1_SETTINGS_ATTRIBUTES,
V1_STUDENT_ATTRIBUTES),
}
DEFAULT_VERSION = 1
DEFAULT_DATA = textwrap.dedent("""\
<combinedopenended>
<prompt>
<h3>Censorship in the Libraries</h3>
<p>'All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us.' --Katherine Paterson, Author
</p>
<p>
Write a persuasive essay to a newspaper reflecting your views on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.
</p>
</prompt>
<rubric>
<rubric>
<category>
<description>
Ideas
</description>
<option>
Difficult for the reader to discern the main idea. Too brief or too repetitive to establish or maintain a focus.
</option>
<option>
Attempts a main idea. Sometimes loses focus or ineffectively displays focus.
</option>
<option>
Presents a unifying theme or main idea, but may include minor tangents. Stays somewhat focused on topic and task.
</option>
<option>
Presents a unifying theme or main idea without going off on tangents. Stays completely focused on topic and task.
</option>
</category>
<category>
<description>
Content
</description>
<option>
Includes little information with few or no details or unrelated details. Unsuccessful in attempts to explore any facets of the topic.
</option>
<option>
Includes little information and few or no details. Explores only one or two facets of the topic.
</option>
<option>
Includes sufficient information and supporting details. (Details may not be fully developed; ideas may be listed.) Explores some facets of the topic.
</option>
<option>
Includes in-depth information and exceptional supporting details that are fully developed. Explores all facets of the topic.
</option>
</category>
<category>
<description>
Organization
</description>
<option>
Ideas organized illogically, transitions weak, and response difficult to follow.
</option>
<option>
Attempts to logically organize ideas. Attempts to progress in an order that enhances meaning, and demonstrates use of transitions.
</option>
<option>
Ideas organized logically. Progresses in an order that enhances meaning. Includes smooth transitions.
</option>
</category>
<category>
<description>
Style
</description>
<option>
Contains limited vocabulary, with many words used incorrectly. Demonstrates problems with sentence patterns.
</option>
<option>
Contains basic vocabulary, with words that are predictable and common. Contains mostly simple sentences (although there may be an attempt at more varied sentence patterns).
</option>
<option>
Includes vocabulary to make explanations detailed and precise. Includes varied sentence patterns, including complex sentences.
</option>
</category>
<category>
<description>
Voice
</description>
<option>
Demonstrates language and tone that may be inappropriate to task and reader.
</option>
<option>
Demonstrates an attempt to adjust language and tone to task and reader.
</option>
<option>
Demonstrates effective adjustment of language and tone to task and reader.
</option>
</category>
</rubric>
</rubric>
<task>
<selfassessment/></task>
<task>
<openended min_score_to_attempt="4" max_score_to_attempt="12" >
<openendedparam>
<initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display>
<grader_payload>{"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}</grader_payload>
</openendedparam>
</openended>
</task>
<task>
<openended min_score_to_attempt="9" max_score_to_attempt="12" >
<openendedparam>
<initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display>
<grader_payload>{"grader_settings" : "peer_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}</grader_payload>
</openendedparam>
</openended>
</task>
</combinedopenended>
""")
class VersionInteger(Integer):
"""
A model type that converts from strings to integers when reading from json.
Also does error checking to see if version is correct or not.
"""
def from_json(self, value):
try:
value = int(value)
if value not in VERSION_TUPLES:
version_error_string = "Could not find version {0}, using version {1} instead"
log.error(version_error_string.format(value, DEFAULT_VERSION))
value = DEFAULT_VERSION
except:
value = DEFAULT_VERSION
return value
class CombinedOpenEndedFields(object):
display_name = String(
display_name=_("Display Name"),
help=_("This name appears in the horizontal navigation at the top of the page."),
default=_("Open Response Assessment"),
scope=Scope.settings
)
current_task_number = Integer(
help=_("Current task that the student is on."),
default=0,
scope=Scope.user_state
)
old_task_states = List(
help=_("A list of lists of state dictionaries for student states that are saved. "
"This field is only populated if the instructor changes tasks after "
"the module is created and students have attempted it (for example, if a self assessed problem is "
"changed to self and peer assessed)."),
scope=Scope.user_state,
)
task_states = List(
help=_("List of state dictionaries of each task within this module."),
scope=Scope.user_state
)
state = String(
help=_("Which step within the current task that the student is on."),
default="initial",
scope=Scope.user_state
)
graded = Boolean(
display_name=_("Graded"),
help=_("Defines whether the student gets credit for this problem. Credit is based on peer grades of this problem."),
default=False,
scope=Scope.settings
)
student_attempts = Integer(
help=_("Number of attempts taken by the student on this problem"),
default=0,
scope=Scope.user_state
)
ready_to_reset = Boolean(
help=_("If the problem is ready to be reset or not."),
default=False,
scope=Scope.user_state
)
max_attempts = Integer(
display_name=_("Maximum Attempts"),
help=_("The number of times the student can try to answer this problem."),
default=1,
scope=Scope.settings,
values={"min": 1}
)
accept_file_upload = Boolean(
display_name=_("Allow File Uploads"),
help=_("Whether or not the student can submit files as a response."),
default=False,
scope=Scope.settings
)
skip_spelling_checks = Boolean(
display_name=_("Disable Quality Filter"),
help=_("If False, the Quality Filter is enabled and submissions with poor spelling, short length, or poor grammar will not be peer reviewed."),
default=False,
scope=Scope.settings
)
due = Date(
help=_("Date that this problem is due by"),
scope=Scope.settings
)
graceperiod = Timedelta(
help=_("Amount of time after the due date that submissions will be accepted"),
scope=Scope.settings
)
version = VersionInteger(
help=_("Current version number"),
default=DEFAULT_VERSION,
scope=Scope.settings)
data = String(
help=_("XML data for the problem"),
scope=Scope.content,
default=DEFAULT_DATA)
weight = Float(
display_name=_("Problem Weight"),
help=_("Defines the number of points each problem is worth. If the value is not set, each problem is worth one point."),
scope=Scope.settings,
values={"min": 0, "step": ".1"},
default=1
)
min_to_calibrate = Integer(
display_name=_("Minimum Peer Grading Calibrations"),
help=_("The minimum number of calibration essays each student will need to complete for peer grading."),
default=3,
scope=Scope.settings,
values={"min": 1, "max": 20, "step": "1"}
)
max_to_calibrate = Integer(
display_name=_("Maximum Peer Grading Calibrations"),
help=_("The maximum number of calibration essays each student will need to complete for peer grading."),
default=6,
scope=Scope.settings,
values={"min": 1, "max": 20, "step": "1"}
)
peer_grader_count = Integer(
display_name=_("Peer Graders per Response"),
help=_("The number of peers who will grade each submission."),
default=3,
scope=Scope.settings,
values={"min": 1, "step": "1", "max": 5}
)
required_peer_grading = Integer(
display_name=_("Required Peer Grading"),
help=_("The number of other students each student making a submission will have to grade."),
default=3,
scope=Scope.settings,
values={"min": 1, "step": "1", "max": 5}
)
peer_grade_finished_submissions_when_none_pending = Boolean(
display_name=_('Allow "overgrading" of peer submissions'),
help=_(
"EXPERIMENTAL FEATURE. Allow students to peer grade submissions that already have the requisite number of graders, "
"but ONLY WHEN all submissions they are eligible to grade already have enough graders. "
"This is intended for use when settings for `Required Peer Grading` > `Peer Graders per Response`"
),
default=False,
scope=Scope.settings,
)
markdown = String(
help=_("Markdown source of this module"),
default=textwrap.dedent("""\
[prompt]
<h3>Censorship in the Libraries</h3>
<p>'All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us.' --Katherine Paterson, Author
</p>
<p>
Write a persuasive essay to a newspaper reflecting your views on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.
</p>
[prompt]
[rubric]
+ Ideas
- Difficult for the reader to discern the main idea. Too brief or too repetitive to establish or maintain a focus.
- Attempts a main idea. Sometimes loses focus or ineffectively displays focus.
- Presents a unifying theme or main idea, but may include minor tangents. Stays somewhat focused on topic and task.
- Presents a unifying theme or main idea without going off on tangents. Stays completely focused on topic and task.
+ Content
- Includes little information with few or no details or unrelated details. Unsuccessful in attempts to explore any facets of the topic.
- Includes little information and few or no details. Explores only one or two facets of the topic.
- Includes sufficient information and supporting details. (Details may not be fully developed; ideas may be listed.) Explores some facets of the topic.
- Includes in-depth information and exceptional supporting details that are fully developed. Explores all facets of the topic.
+ Organization
- Ideas organized illogically, transitions weak, and response difficult to follow.
- Attempts to logically organize ideas. Attempts to progress in an order that enhances meaning, and demonstrates use of transitions.
- Ideas organized logically. Progresses in an order that enhances meaning. Includes smooth transitions.
+ Style
- Contains limited vocabulary, with many words used incorrectly. Demonstrates problems with sentence patterns.
- Contains basic vocabulary, with words that are predictable and common. Contains mostly simple sentences (although there may be an attempt at more varied sentence patterns).
- Includes vocabulary to make explanations detailed and precise. Includes varied sentence patterns, including complex sentences.
+ Voice
- Demonstrates language and tone that may be inappropriate to task and reader.
- Demonstrates an attempt to adjust language and tone to task and reader.
- Demonstrates effective adjustment of language and tone to task and reader.
[rubric]
[tasks]
(Self), ({4-12}AI), ({9-12}Peer)
[tasks]
"""),
scope=Scope.settings
)
class CombinedOpenEndedModule(CombinedOpenEndedFields, XModule):
"""
This is a module that encapsulates all open ended grading (self assessment, peer assessment, etc).
It transitions between problems, and support arbitrary ordering.
Each combined open ended module contains one or multiple "child" modules.
Child modules track their own state, and can transition between states. They also implement get_html and
handle_ajax.
The combined open ended module transitions between child modules as appropriate, tracks its own state, and passess
ajax requests from the browser to the child module or handles them itself (in the cases of reset and next problem)
ajax actions implemented by all children are:
'save_answer' -- Saves the student answer
'save_assessment' -- Saves the student assessment (or external grader assessment)
'save_post_assessment' -- saves a post assessment (hint, feedback on feedback, etc)
ajax actions implemented by combined open ended module are:
'reset' -- resets the whole combined open ended module and returns to the first child module
'next_problem' -- moves to the next child module
'get_results' -- gets results from a given child module
Types of children. Task is synonymous with child module, so each combined open ended module
incorporates multiple children (tasks):
openendedmodule
selfassessmentmodule
CombinedOpenEndedModule.__init__ takes the same arguments as xmodule.x_module:XModule.__init__
"""
STATE_VERSION = 1
# states
INITIAL = 'initial'
ASSESSING = 'assessing'
INTERMEDIATE_DONE = 'intermediate_done'
DONE = 'done'
icon_class = 'problem'
js = {
'coffee': [
resource_string(__name__, 'js/src/combinedopenended/display.coffee'),
resource_string(__name__, 'js/src/javascript_loader.coffee'),
],
'js': [
resource_string(__name__, 'js/src/collapsible.js'),
]
}
js_module_name = "CombinedOpenEnded"
css = {'scss': [resource_string(__name__, 'css/combinedopenended/display.scss')]}
def __init__(self, *args, **kwargs):
"""
Definition file should have one or many task blocks, a rubric block, and a prompt block.
See DEFAULT_DATA for a sample.
"""
super(CombinedOpenEndedModule, self).__init__(*args, **kwargs)
self.system.set('location', self.location)
if self.task_states is None:
self.task_states = []
if self.old_task_states is None:
self.old_task_states = []
version_tuple = VERSION_TUPLES[self.version]
self.student_attributes = version_tuple.student_attributes
self.settings_attributes = version_tuple.settings_attributes
attributes = self.student_attributes + self.settings_attributes
static_data = {}
instance_state = {k: getattr(self, k) for k in attributes}
self.child_descriptor = version_tuple.descriptor(self.system)
self.child_definition = version_tuple.descriptor.definition_from_xml(etree.fromstring(self.data), self.system)
self.child_module = version_tuple.module(self.system, self.location, self.child_definition, self.child_descriptor,
instance_state=instance_state, static_data=static_data,
attributes=attributes)
self.save_instance_data()
def get_html(self):
self.save_instance_data()
return_value = self.child_module.get_html()
return return_value
def handle_ajax(self, dispatch, data):
self.save_instance_data()
return_value = self.child_module.handle_ajax(dispatch, data)
self.save_instance_data()
return return_value
def get_instance_state(self):
return self.child_module.get_instance_state()
def get_score(self):
return self.child_module.get_score()
def max_score(self):
return self.child_module.max_score()
def get_progress(self):
return self.child_module.get_progress()
@property
def due_date(self):
return self.child_module.due_date
def save_instance_data(self):
for attribute in self.student_attributes:
setattr(self, attribute, getattr(self.child_module, attribute))
def validate(self):
"""
Message for either error or warning validation message/s.
Returns message and type. Priority given to error type message.
"""
return self.descriptor.validate()
class CombinedOpenEndedDescriptor(CombinedOpenEndedFields, RawDescriptor):
"""
Module for adding combined open ended questions
"""
mako_template = "widgets/open-ended-edit.html"
module_class = CombinedOpenEndedModule
has_score = True
always_recalculate_grades = True
template_dir_name = "combinedopenended"
#Specify whether or not to pass in S3 interface
needs_s3_interface = True
#Specify whether or not to pass in open ended interface
needs_open_ended_interface = True
js = {'coffee': [resource_string(__name__, 'js/src/combinedopenended/edit.coffee')]}
js_module_name = "OpenEndedMarkdownEditingDescriptor"
css = {'scss': [resource_string(__name__, 'css/editor/edit.scss'), resource_string(__name__, 'css/combinedopenended/edit.scss')]}
metadata_translations = {
'is_graded': 'graded',
'attempts': 'max_attempts',
}
def get_context(self):
_context = RawDescriptor.get_context(self)
_context.update({'markdown': self.markdown,
'enable_markdown': self.markdown is not None})
return _context
@property
def non_editable_metadata_fields(self):
non_editable_fields = super(CombinedOpenEndedDescriptor, self).non_editable_metadata_fields
non_editable_fields.extend([CombinedOpenEndedDescriptor.due, CombinedOpenEndedDescriptor.graceperiod,
CombinedOpenEndedDescriptor.markdown, CombinedOpenEndedDescriptor.version])
return non_editable_fields
# Proxy to CombinedOpenEndedModule so that external callers don't have to know if they're working
# with a module or a descriptor
child_module = module_attr('child_module')
def validate(self):
"""
Validates the state of this instance. This is the override of the general XBlock method,
and it will also ask its superclass to validate.
"""
validation = super(CombinedOpenEndedDescriptor, self).validate()
validation = StudioValidation.copy(validation)
i18n_service = self.runtime.service(self, "i18n")
validation.summary = StudioValidationMessage(
StudioValidationMessage.ERROR,
i18n_service.ugettext(
"ORA1 is no longer supported. To use this assessment, "
"replace this ORA1 component with an ORA2 component."
)
)
return validation

View File

@@ -1,993 +0,0 @@
// lms - xmodule - combinedopenended
// ====================
h2 {
margin-top: 0;
margin-bottom: ($baseline*0.75);
&.problem-header {
section.staff {
margin-top: ($baseline*1.5);
font-size: 80%;
}
}
@media print {
display: block;
width: auto;
border-right: 0;
}
}
// Problem Header
div.name{
padding-bottom: ($baseline*0.75);
h2 {
display: inline;
}
.progress-container {
display: inline;
float: right;
padding-top: 3px;
}
}
.inline-error {
color: darken($error-color, 10%);
}
section.combined-open-ended {
@include clearfix();
.written-feedback {
position: relative;
margin: 0;
height: 150px;
border: 1px solid lightgray;
padding: ($baseline/4);
resize: vertical;
width: 99%;
overflow: auto;
.del {
text-decoration: line-through;
background-color: #ffc3c3;
}
.ins {
background-color: #c3ffc3;
}
}
}
div.problemwrapper {
border: 1px solid lightgray;
border-radius: ($baseline/2);
.status-bar {
background-color: #eee;
border-radius: ($baseline/2) ($baseline/2) 0 0;
border-bottom: 1px solid lightgray;
.statustable {
width: 100%;
padding: $baseline;
}
.status-container {
display: table-cell;
text-align: center;
.status-elements {
border-radius: ($baseline/4);
border: 1px solid lightgray;
}
}
.problemtype-container {
padding: ($baseline/2);
width: 60%;
}
.problemtype{
padding: ($baseline/2);
}
.assessments-container {
float: right;
padding: ($baseline/2) $baseline ($baseline/2) ($baseline/2);
.assessment-text {
display: inline-block;
display: table-cell;
padding-right: ($baseline/2);
}
}
}
.item-container {
padding-bottom: ($baseline/2);
margin: 15px;
}
.result-container {
float: left;
width: 100%;
position: relative;
}
}
section.legend-container {
margin: 15px;
border-radius: ($baseline/4);
.legenditem {
display: inline;
padding: ($baseline/2);
width: 20%;
background-color: #eee;
font-size: .9em;
}
}
section.combined-open-ended-status {
vertical-align: center;
.statusitem {
display: table-cell;
padding: ($baseline/2);
width: 30px;
border-right: 1px solid lightgray;
background-color: #eee;
color: #2c2c2c;
font-size: .9em;
&:first-child {
border-radius: ($baseline/4) 0 0 ($baseline/4);
}
&:last-child {
border-right: 0;
border-radius: 0 ($baseline/4) ($baseline/4) 0;
}
&:only-child {
border-radius: ($baseline/4);
}
.show-results {
margin-top: .3em;
text-align:right;
}
.show-results-button {
font: 1em monospace;
}
}
.statusitem-current {
background-color: $white;
color: #222;
}
span {
&.unanswered {
display: inline-block;
position: relative;
float: right;
width: 14px;
height: 14px;
background: url('#{$static-path}/images/unanswered-icon.png') center center no-repeat;
}
&.correct {
display: inline-block;
position: relative;
float: right;
width: 25px;
height: 20px;
background: url('#{$static-path}/images/correct-icon.png') center center no-repeat;
}
&.incorrect {
display: inline-block;
position: relative;
float: right;
width: 20px;
height: 20px;
background: url('#{$static-path}/images/incorrect-icon.png') center center no-repeat;
}
}
.icon-caret-right {
display: inline-block;
margin-right: ($baseline/4);
vertical-align: baseline;
}
}
// Problem Section Controls
.visibility-control, .visibility-control-prompt {
display: block;
width: 100%;
height: 40px;
.inner {
float: left;
margin-top: $baseline;
width: 85%;
height: 5px;
border-top: 1px dotted #ddd;
}
}
.section-header {
display: block;
float: right;
padding-top: ($baseline/2);
width: 15%;
text-align: center;
font-size: .9em;
}
// Rubric Styling
.wrapper-score-selection {
display: table-cell;
padding: 0 ($baseline/2);
width: 20px;
vertical-align: middle;
}
.wrappable {
display: table-cell;
padding: ($baseline/4);
}
.rubric-list-item {
margin-bottom: ($baseline/10);
padding: ($baseline/2);
&:hover, &:focus {
background-color: #eee;
}
.rubric-label-selected{
border-radius: ($baseline/4);
background-color: #eee;
}
}
span.rubric-category {
display: block;
margin-bottom: ($baseline/2);
padding-top: ($baseline/2);
width: 100%;
border-bottom: 1px solid lightgray;
font-size: 1.1em;
}
div.combined-rubric-container {
margin: 15px;
padding-top: ($baseline/2);
padding-bottom: ($baseline/4);
ul.rubric-list {
margin: 0 $baseline ($baseline/2) $baseline;
padding: 0;
list-style-type: none;
li {
&.rubric-list-item {
margin-bottom: ($baseline/10);
padding: ($baseline/2);
}
}
}
h4 {
padding-top: ($baseline/2);
}
span.rubric-category {
display: block;
width: 100%;
border-bottom: 1px solid lightgray;
font-weight: bold;
font-size: .9em;
}
label.choicegroup_correct {
&:before {
margin-right: ($baseline*0.75);
content: url('#{$static-path}/images/correct-icon.png');
}
}
label.choicegroup_partialcorrect {
&:before {
margin-right: ($baseline*0.75);
content: url('#{$static-path}/images/partially-correct-icon.png');
}
}
label.choicegroup_incorrect {
&:before {
margin-right: ($baseline*0.75);
content: url('#{$static-path}/images/incorrect-icon.png');
}
}
div.written-feedback {
background: $gray-l6;
padding: ($baseline/4);
}
}
div.result-container {
padding-top: ($baseline/2);
padding-bottom: ($baseline/4);
.evaluation {
p {
margin-bottom: 1px;
}
}
.feedback-on-feedback {
height: 100px;
margin-right: 0;
}
.evaluation-response {
margin-bottom: ($baseline/10);
header {
a {
font-size: .85em;
}
}
}
.evaluation-scoring {
.scoring-list {
margin-left: 3px;
list-style-type: none;
li {
display:inline;
margin-left: 0;
&:first-child {
margin-left: 0;
}
label {
font-size: .9em;
}
}
}
}
.submit-message-container {
margin: ($baseline/2) 0;
}
.external-grader-message {
margin-bottom: ($baseline/4);
section {
padding-left: $baseline;
background-color: #fafafa;
color: #2c2c2c;
font-family: monospace;
font-size: 1em;
padding-top: ($baseline/2);
padding-bottom: 30px;
header {
font-size: 1.4em;
}
.shortform {
font-weight: bold;
}
.longform {
padding: 0;
margin: 0;
.result-errors {
margin: ($baseline/4);
padding: ($baseline/2) ($baseline/2) ($baseline/2) ($baseline*2);
background: url('#{$static-path}/images/incorrect-icon.png') center left no-repeat;
li {
color: #B00;
}
}
.result-output {
margin: ($baseline/4);
padding: $baseline 0 ($baseline*0.75) ($baseline*2.5);
border-top: 1px solid #ddd;
border-left: 20px solid #fafafa;
h4 {
font-size: 1em;
font-family: monospace;
}
dl {
margin: 0;
}
dt {
margin-top: $baseline;
}
dd {
margin-left: 24pt;
}
}
.markup-text{
margin: ($baseline/4);
padding: $baseline 0 ($baseline*0.75) ($baseline*2.5);
border-top: 1px solid #ddd;
border-left: 20px solid #fafafa;
bs {
color: #bb0000;
}
bg {
color: #bda046;
}
}
}
}
}
.rubric-result-container {
padding: ($baseline/10);
margin: 0;
display: inline;
.rubric-result {
font-size: .9em;
padding: ($baseline/10);
display: inline-table;
}
}
}
div.rubric {
ul.rubric-list{
margin: 0 $baseline ($baseline/2) $baseline;
padding: 0;
list-style: none;
list-style-type: none;
li {
&.rubric-list-item {
margin-bottom: ($baseline/10);
padding: ($baseline/2);
border-radius: ($baseline/4);
&:hover, &:focus {
background-color: #eee;
}
.wrapper-score-selection {
display: table-cell;
padding: 0 ($baseline/2);
width: 20px;
vertical-align: middle;
}
.wrappable {
display: table-cell;
padding: ($baseline/4);
}
}
}
}
span.rubric-category {
display: block;
width: 100%;
border-bottom: 1px solid lightgray;
font-weight: bold;
font-size: .9em;
}
}
section.open-ended-child {
@media print {
display: block;
padding: 0;
width: auto;
canvas, img {
page-break-inside: avoid;
}
}
.inline {
display: inline;
}
ol.enumerate {
li {
&:before {
display: block;
visibility: hidden;
height: 0;
content: " ";
}
}
}
.solution-span {
> span {
position: relative;
display: block;
margin: $baseline 0;
padding: 9px 15px $baseline;
border: 1px solid #ddd;
border-radius: 3px;
background: $white;
box-shadow: inset 0 0 0 1px #eee;
&:empty {
display: none;
}
}
}
p {
&.answer {
margin-top: -2px;
}
&.status {
margin: 8px 0 0 ($baseline/2);
text-indent: -9999px;
}
}
div.unanswered {
p.status {
display: inline-block;
width: 14px;
height: 14px;
background: url('#{$static-path}/images/unanswered-icon.png') center center no-repeat;
}
}
div.correct, div.ui-icon-check {
p.status {
display: inline-block;
width: 25px;
height: 20px;
background: url('#{$static-path}/images/correct-icon.png') center center no-repeat;
}
input {
border-color: green;
}
}
div.processing {
p.status {
display: inline-block;
width: 20px;
height: 20px;
background: url('#{$static-path}/images/spinner.gif') center center no-repeat;
}
input {
border-color: #aaa;
}
}
div.incorrect, div.ui-icon-close {
p.status {
display: inline-block;
width: 20px;
height: 20px;
background: url('#{$static-path}/images/incorrect-icon.png') center center no-repeat;
text-indent: -9999px;
}
input {
border-color: red;
}
}
> span {
display: block;
margin-bottom: lh(0.5);
}
p.answer {
display: inline-block;
margin-bottom: 0;
margin-left: ($baseline/2);
&:before {
content: "Answer: ";
font-weight: bold;
display: inline;
}
&:empty {
&:before {
display: none;
}
}
}
span {
&.unanswered, &.ui-icon-bullet {
display: inline-block;
position: relative;
top: 4px;
width: 14px;
height: 14px;
background: url('#{$static-path}/images/unanswered-icon.png') center center no-repeat;
}
&.processing, &.ui-icon-processing {
display: inline-block;
position: relative;
top: 6px;
width: 25px;
height: 20px;
background: url('#{$static-path}/images/spinner.gif') center center no-repeat;
}
&.correct, &.ui-icon-check {
display: inline-block;
position: relative;
top: 6px;
width: 25px;
height: 20px;
background: url('#{$static-path}/images/correct-icon.png') center center no-repeat;
}
&.incorrect, &.ui-icon-close {
display: inline-block;
position: relative;
top: 6px;
width: 20px;
height: 20px;
background: url('#{$static-path}/images/incorrect-icon.png') center center no-repeat;
}
}
.reload {
float:right;
margin: ($baseline/2);
}
div.short-form-response {
@include clearfix();
overflow-y: auto;
margin-bottom: 0;
padding: ($baseline/2);
min-height: 20px;
height: auto;
border: 1px solid #ddd;
background: $gray-l6;
}
.grader-status {
@include clearfix();
margin: ($baseline/2) 0;
padding: ($baseline/2);
border-radius: 5px;
background: $gray-l6;
span {
display: block;
float: left;
overflow: hidden;
margin: -7px 7px 0 0;
text-indent: -9999px;
}
.grading {
margin: 0 7px 0 0;
padding-left: 25px;
background: url('#{$static-path}/images/info-icon.png') left center no-repeat;
text-indent: 0;
}
p {
float: left;
margin-bottom: 0;
line-height: 20px;
}
&.file {
margin-top: $baseline;
padding: $baseline 0 0 0;
border: 0;
border-top: 1px solid #eee;
background: $white;
p.debug {
display: none;
}
input {
float: left;
}
}
}
form.option-input {
margin: -($baseline/2) 0 $baseline;
padding-bottom: $baseline;
select {
margin-right: flex-gutter();
}
}
ul {
margin-bottom: lh();
margin-left: 0.75em;
margin-left: 0.75rem;
}
ul.rubric-list{
margin: 0;
padding: 0;
list-style-type: none;
list-style: none;
li {
&.rubric-list-item {
margin-bottom: 0;
padding: 0;
border-radius: ($baseline/4);
}
}
}
ol {
margin-bottom: lh();
margin-left: .75em;
margin-left: .75rem;
list-style: decimal outside none;
}
dl {
line-height: 1.4em;
}
dl dt {
font-weight: bold;
}
dl dd {
margin-bottom: 0;
}
dd {
margin-left: .5em;
margin-left: .5rem;
}
li {
margin-bottom: 0;
padding: 0;
&:last-child {
margin-bottom: 0;
}
}
p {
margin-bottom: lh();
}
hr {
float: none;
clear: both;
margin: 0 0 .75rem;
width: 100%;
height: 1px;
border: none;
background: #ddd;
color: #ddd;
}
.hidden {
display: none;
visibility: hidden;
}
#{$all-text-inputs} {
display: inline;
width: auto;
}
div.action {
margin-top: $baseline;
input.save {
@extend .blue-button !optional;
}
.submission_feedback {
display: inline-block;
margin: 8px 0 0 ($baseline/2);
color: #666;
font-style: italic;
-webkit-font-smoothing: antialiased;
}
}
.detailed-solution {
> p:first-child {
color: #aaa;
text-transform: uppercase;
font-weight: bold;
font-style: normal;
font-size: 0.9em;
}
p:last-child {
margin-bottom: 0;
}
}
div.open-ended-alert,
.save_message {
margin-top: ($baseline/2);
margin-bottom: ($baseline/4);
padding: 8px 12px;
border: 1px solid #ebe8bf;
border-radius: 3px;
background: #fffcdd;
font-size: 0.9em;
}
div.capa_reset {
margin-top: ($baseline/2);
margin-bottom: ($baseline/2);
padding: 25px;
border: 1px solid $error-color;
border-radius: 3px;
background-color: lighten($error-color, 25%);
font-size: 1em;
}
.capa_reset > h2 {
color: #aa0000;
}
.capa_reset li {
font-size: 0.9em;
}
.assessment-container {
margin: ($baseline*2) 0 ($baseline*1.5) 0;
.scoring-container {
p {
margin-bottom: 1em;
}
label {
display: inline-block;
margin: ($baseline/2);
padding: ($baseline/4);
min-width: 50px;
background-color: $gray-l3;
text-size: 1.5em;
}
input[type=radio]:checked + label {
background: #666;
color: white;
}
input[class='grade-selection'] {
display: none;
}
}
}
div.prompt {
background-color: white;
}
h4 {
padding: $baseline/2 0;
}
}
//OE Tool Area Styling
.oe-tools {
display: inline-block;
width: 100%;
border-radius: 5px;
.oe-tools-label, .oe-tools-scores-label {
display: inline-block;
padding: $baseline/2;
vertical-align: middle;
font-size: 0.8em;
}
.rubric-button {
padding: 8px $baseline/4;
}
.rubric-previous-button {
margin-right: $baseline/4;
}
.rubric-next-button {
margin-left: $baseline/4;
}
.next-step-button {
margin: $baseline/2;
}
.reset-button {
vertical-align: middle;
}
}
// Staff Grading
.problem-list-container {
margin: $baseline/2;
.instructions {
padding-bottom: $baseline/2;
}
}
.staff-grading {
.breadcrumbs {
padding: ($baseline/10);
background-color: $gray-l6;
border-radius: 5px;
margin-bottom: ($baseline/2);
}
.prompt-wrapper {
padding-top: ($baseline/2);
.meta-info-wrapper {
padding: ($baseline/2);
border-radius: 5px;
}
}
}
section.peer-grading-container{
div.peer-grading{
section.calibration-feedback {
padding: $baseline;
}
}
}
div.staff-info{
background-color: #eee;
border-radius: 10px;
border-bottom: 1px solid lightgray;
padding: ($baseline/2);
margin: ($baseline/2) 0 ($baseline/2) 0;
}

View File

@@ -1,105 +0,0 @@
.editor-bar {
.editor-tabs {
.advanced-toggle {
@include white-button;
height: auto;
margin-top: -1px;
padding: 3px 9px;
font-size: 12px;
&.current {
border: 1px solid $lightGrey !important;
border-radius: 3px !important;
background: $lightGrey !important;
color: $darkGrey !important;
pointer-events: none;
cursor: none;
&:hover, &:focus {
box-shadow: 0 0 0 0 !important;
}
}
}
.cheatsheet-toggle {
width: 21px;
height: 21px;
padding: 0;
margin: 0 ($baseline/4) 0 ($baseline*0.75);
border-radius: 22px;
border: 1px solid #a5aaaf;
background: #e5ecf3;
font-size: 13px;
font-weight: 700;
color: #565d64;
text-align: center;
}
}
}
.simple-editor-open-ended-cheatsheet {
position: absolute;
top: 0;
left: 100%;
width: 0;
border-radius: 0 3px 3px 0;
@include linear-gradient(left, $shadow-l1, $transparent 4px);
background-color: $white;
overflow: hidden;
@include transition(width .3s linear 0s);
&.shown {
width: 20%;
height: 100%;
overflow-y: scroll;
}
.cheatsheet-wrapper {
padding: 10%;
}
h6 {
margin-bottom: 7px;
font-size: 15px;
font-weight: 700;
}
.row {
@include clearfix();
padding-bottom: 5px !important;
margin-bottom: 10px !important;
border-bottom: 1px solid #ddd !important;
&:last-child {
border-bottom: none !important;
margin-bottom: 0 !important;
}
}
.col {
float: left;
&.sample {
width: 60px;
margin-right: 30px;
}
}
pre {
font-size: 12px;
line-height: 18px;
}
code {
padding: 0;
background: none;
}
}
.combinedopenended-editor-icon {
display: inline-block;
vertical-align: middle;
color: #333;
}

View File

@@ -1,128 +0,0 @@
<section class="course-content">
<section class="xblock xblock-student_view xmodule_display xmodule_CombinedOpenEndedModule" data-type="CombinedOpenEnded">
<section id="combined-open-ended" class="combined-open-ended" data-ajax-url="/courses/MITx/6.002x/2012_Fall/modx/i4x://MITx/6.002x/combinedopenended/CombinedOE" data-allow_reset="False" data-state="assessing" data-task-count="2" data-task-number="1">
<h2>Problem 1</h2>
<div class="status-container">
<h4>Status</h4>
<div class="status-elements">
<section id="combined-open-ended-status" class="combined-open-ended-status">
<div class="statusitem" data-status-number="0">
Step 1 (Problem complete) : 1 / 1
<span class="correct" id="status"></span>
</div>
<div class="statusitem statusitem-current" data-status-number="1">
Step 2 (Being scored) : None / 1
<span class="grading" id="status"></span>
</div>
</section>
</div>
</div>
<div class="item-container">
<h4>Problem</h4>
<div class="problem-container">
<div class="item">
<section id="openended_open_ended" class="open-ended-child" data-state="assessing" data-child-type="openended">
<div class="error">
</div>
<div class="prompt">
Some prompt.
</div>
<textarea rows="30" cols="80" name="answer" class="answer short-form-response" id="input_open_ended" disabled="disabled">
Test submission. Yaaaaaaay!
</textarea>
<div class="message-wrapper"></div>
<div class="grader-status">
<span class="grading" id="status_open_ended">Submitted for grading.</span>
</div>
<input type="button" value="Submit assessment" class="submit-button" name="show" style="display: none;">
<input name="skip" class="skip-button" type="button" value="Skip Post-Assessment" style="display: none;">
<div class="open-ended-action"></div>
<span id="answer_open_ended"></span>
</section>
</div>
</div>
<div class="oe-tools response-tools">
<span class="oe-tools-label"></span>
<input type="button" value="Reset" class="reset-button" name="reset" style="display: none;">
</div>
<input type="button" value="Next Step" class="next-step-button" name="reset" style="display: none;">
</div>
<a name="results">
<div class="result-container">
</div>
</a>
</section>
<a name="results">
</a>
</section>
<a name="results">
</a>
<div>
<a name="results">
</a>
<a href="https://github.com/MITx/content-mit-6002x/tree/master/combinedopenended/CombinedOE.xml">
Edit
</a> /
<a href="#i4x_MITx_6_002x_combinedopenended_CombinedOE_xqa-modal" onclick="javascript:getlog('i4x_MITx_6_002x_combinedopenended_CombinedOE', {
'location': 'i4x://MITx/6.002x/combinedopenended/CombinedOE',
'xqa_key': 'KUBrWtK3RAaBALLbccHrXeD3RHOpmZ2A',
'category': 'CombinedOpenEndedModule',
'user': 'blah'
})" id="i4x_MITx_6_002x_combinedopenended_CombinedOE_xqa_log">QA</a>
</div>
<div>
<a href="#i4x_MITx_6_002x_combinedopenended_CombinedOE_debug" id="i4x_MITx_6_002x_combinedopenended_CombinedOE_trig">
Staff Debug Info
</a>
</div>
<section id="i4x_MITx_6_002x_combinedopenended_CombinedOE_xqa-modal" class="modal xqa-modal" style="width:80%; left:20%; height:80%; overflow:auto">
<div class="inner-wrapper">
<header>
<h2>edX Content Quality Assessment</h2>
</header>
<form id="i4x_MITx_6_002x_combinedopenended_CombinedOE_xqa_form" class="xqa_form">
<label>Comment</label>
<input id="i4x_MITx_6_002x_combinedopenended_CombinedOE_xqa_entry" type="text" placeholder="comment">
<label>Tag</label>
<span style="color:black;vertical-align: -10pt">Optional tag (eg "done" or "broken"):&nbsp; </span>
<input id="i4x_MITx_6_002x_combinedopenended_CombinedOE_xqa_tag" type="text" placeholder="tag" style="width:80px;display:inline">
<div class="submit">
<button name="submit" type="submit">Add comment</button>
</div>
<hr>
<div id="i4x_MITx_6_002x_combinedopenended_CombinedOE_xqa_log_data"></div>
</form>
</div>
</section>
<section class="modal staff-modal" id="i4x_MITx_6_002x_combinedopenended_CombinedOE_debug" style="width:80%; left:20%; height:80%; overflow:auto;">
<div class="inner-wrapper" style="color:black">
<header>
<h2>Staff Debug</h2>
</header>
<div class="staff_info" style="display:block">
is_released = <font color="red">Yes!</font>
location = i4x://MITx/6.002x/combinedopenended/CombinedOE
github = <a href="https://github.com/MITx/content-mit-6002x/tree/master/combinedopenended/CombinedOE.xml">https://github.com/MITx/content-mit-6002x/tree/master/combinedopenended/CombinedOE.xml</a>
definition = <pre>None</pre>
metadata = {
"showanswer": "attempted",
"display_name": "Problem 1",
"graceperiod": "1 day 12 hours 59 minutes 59 seconds",
"xqa_key": "KUBrWtK3RAaBALLbccHrXeD3RHOpmZ2A",
"rerandomize": "never",
"start": "2012-09-05T12:00",
"attempts": "10000",
"data_dir": "content-mit-6002x",
"max_score": "1"
}
category = CombinedOpenEndedModule
</div>
</div>
</section>
<div id="i4x_MITx_6_002x_combinedopenended_CombinedOE_setup"></div>
</section>

View File

@@ -1,6 +0,0 @@
<section class="combinedopenended-editor editor">
<div class="row">
<textarea class="markdown-box">markdown</textarea>
<textarea class="xml-box" rows="8" cols="40">xml</textarea>
</div>
</section>

View File

@@ -1,5 +0,0 @@
<section class="combinedopenended-editor editor">
<div class="row">
<textarea class="xml-box" rows="8" cols="40">xml only</textarea>
</div>
</section>

View File

@@ -1,321 +0,0 @@
<section id="combined-open-ended" class="combined-open-ended" data-location="i4x://test/2323/combinedopenended/b893eedec151441f8644187266ccce00" data-ajax-url="/courses/test/2323/Test2/modx/i4x://test/2323/combinedopenended/b893eedec151441f8644187266ccce00" data-allow_reset="False" data-state="initial" data-task-count="1" data-task-number="1" data-accept-file-upload="False">
<div class="name">
<h2>Open Response Assessment</h2>
<div class="progress-container">
</div>
</div>
<div class="problemwrapper">
<div class="status-bar">
<table class="statustable">
<tbody><tr>
<td class="problemtype-container">
<div class="problemtype">
Open Response
</div>
</td>
<td class="assessments-container">
<div class="assessment-text">
Assessments:
</div>
<div class="status-container">
<div class="status-elements">
<section id="combined-open-ended-status" class="combined-open-ended-status">
<div class="statusitem statusitem-current" data-status-number="0">
Peer
</div>
</section>
</div>
</div>
</td>
</tr>
</tbody></table>
</div>
<div class="item-container">
<div class="visibility-control visibility-control-prompt">
<div class="inner">
</div>
<a href="" class="question-header">Show Question</a>
</div>
<div class="problem-container">
<div class="item">
<section id="openended_open_ended" class="open-ended-child" data-state="post_assessment" data-child-type="openended">
<div class="error"></div>
<div class="prompt open" style="display: none;">
<h3>Censorship in the Libraries</h3><p>'All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us.' --Katherine Paterson, Author
</p><p>
Write a persuasive essay to a newspaper reflecting your views on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.
</p>
</div>
<div class="visibility-control visibility-control-response">
<div class="inner">
</div>
<span class="section-header section-header-response">Response</span>
</div>
<div class="answer short-form-response" id="input_open_ended"></div>
<div class="message-wrapper"></div>
<div class="grader-status">
</div>
<div class="file-upload"></div>
<input type="button" value="Submit post-assessment" class="submit-button" name="show" style="display: none;">
<input name="skip" class="skip-button" type="button" value="Skip Post-Assessment" style="display: none;">
<div class="open-ended-action"></div>
<span id="answer_open_ended"></span>
</section>
</div>
</div>
<div class="oe-tools response-tools">
<span class="oe-tools-label"></span>
<input type="button" value="Reset" class="reset-button" name="reset" style="display: inline-block;">
</div>
</div>
<div class="combined-rubric-container" data-status="shown" data-number="0" style="">
<div class="visibility-control visibility-control-rubric">
<div class="inner">
</div>
<span class="section-header section-header-rubric">Submitted Rubric</span>
</div>
<div class="rubric-header">
<button class="rubric-collapse" href="#">Show Full Rubric</button>
Scored rubric from grader 1
</div>
<div class="rubric">
<span class="rubric-category">
Ideas
</span> <br>
<ul class="rubric-list">
<li class="rubric-list-item">
<div class="rubric-label">
<label class="choicegroup_incorrect wrapper-score-selection"></label>
<span class="wrappable"> 0 points :
Difficult for the reader to discern the main idea. Too brief or too repetitive to establish or maintain a focus.
</span>
</div>
</li>
<li class="rubric-list-item rubric-info-item" style="display: none;">
<div class="rubric-label">
<label class="rubric-elements-info">
<span class="wrapper-score-selection"> </span>
<span class="wrappable"> 1 points :
Attempts a main idea. Sometimes loses focus or ineffectively displays focus.
</span>
</label>
</div>
</li>
<li class="rubric-list-item rubric-info-item" style="display: none;">
<div class="rubric-label">
<label class="rubric-elements-info">
<span class="wrapper-score-selection"> </span>
<span class="wrappable"> 2 points :
Presents a unifying theme or main idea, but may include minor tangents. Stays somewhat focused on topic and task.
</span>
</label>
</div>
</li>
<li class="rubric-list-item rubric-info-item" style="display: none;">
<div class="rubric-label">
<label class="rubric-elements-info">
<span class="wrapper-score-selection"> </span>
<span class="wrappable"> 3 points :
Presents a unifying theme or main idea without going off on tangents. Stays completely focused on topic and task.
</span>
</label>
</div>
</li>
</ul>
<span class="rubric-category">
Content
</span> <br>
<ul class="rubric-list">
<li class="rubric-list-item">
<div class="rubric-label">
<label class="choicegroup_incorrect wrapper-score-selection"></label>
<span class="wrappable"> 0 points :
Includes little information with few or no details or unrelated details. Unsuccessful in attempts to explore any facets of the topic.
</span>
</div>
</li>
<li class="rubric-list-item rubric-info-item" style="display: none;">
<div class="rubric-label">
<label class="rubric-elements-info">
<span class="wrapper-score-selection"> </span>
<span class="wrappable"> 1 points :
Includes little information and few or no details. Explores only one or two facets of the topic.
</span>
</label>
</div>
</li>
<li class="rubric-list-item rubric-info-item" style="display: none;">
<div class="rubric-label">
<label class="rubric-elements-info">
<span class="wrapper-score-selection"> </span>
<span class="wrappable"> 2 points :
Includes sufficient information and supporting details. (Details may not be fully developed; ideas may be listed.) Explores some facets of the topic.
</span>
</label>
</div>
</li>
<li class="rubric-list-item rubric-info-item" style="display: none;">
<div class="rubric-label">
<label class="rubric-elements-info">
<span class="wrapper-score-selection"> </span>
<span class="wrappable"> 3 points :
Includes in-depth information and exceptional supporting details that are fully developed. Explores all facets of the topic.
</span>
</label>
</div>
</li>
</ul>
<span class="rubric-category">
Organization
</span> <br>
<ul class="rubric-list">
<li class="rubric-list-item">
<div class="rubric-label">
<label class="choicegroup_incorrect wrapper-score-selection"></label>
<span class="wrappable"> 0 points :
Ideas organized illogically, transitions weak, and response difficult to follow.
</span>
</div>
</li>
<li class="rubric-list-item rubric-info-item" style="display: none;">
<div class="rubric-label">
<label class="rubric-elements-info">
<span class="wrapper-score-selection"> </span>
<span class="wrappable"> 1 points :
Attempts to logically organize ideas. Attempts to progress in an order that enhances meaning, and demonstrates use of transitions.
</span>
</label>
</div>
</li>
<li class="rubric-list-item rubric-info-item" style="display: none;">
<div class="rubric-label">
<label class="rubric-elements-info">
<span class="wrapper-score-selection"> </span>
<span class="wrappable"> 2 points :
Ideas organized logically. Progresses in an order that enhances meaning. Includes smooth transitions.
</span>
</label>
</div>
</li>
</ul>
<span class="rubric-category">
Style
</span> <br>
<ul class="rubric-list">
<li class="rubric-list-item">
<div class="rubric-label">
<label class="choicegroup_incorrect wrapper-score-selection"></label>
<span class="wrappable"> 0 points :
Contains limited vocabulary, with many words used incorrectly. Demonstrates problems with sentence patterns.
</span>
</div>
</li>
<li class="rubric-list-item rubric-info-item" style="display: none;">
<div class="rubric-label">
<label class="rubric-elements-info">
<span class="wrapper-score-selection"> </span>
<span class="wrappable"> 1 points :
Contains basic vocabulary, with words that are predictable and common. Contains mostly simple sentences (although there may be an attempt at more varied sentence patterns).
</span>
</label>
</div>
</li>
<li class="rubric-list-item rubric-info-item" style="display: none;">
<div class="rubric-label">
<label class="rubric-elements-info">
<span class="wrapper-score-selection"> </span>
<span class="wrappable"> 2 points :
Includes vocabulary to make explanations detailed and precise. Includes varied sentence patterns, including complex sentences.
</span>
</label>
</div>
</li>
</ul>
<span class="rubric-category">
Voice
</span> <br>
<ul class="rubric-list">
<li class="rubric-list-item">
<div class="rubric-label">
<label class="choicegroup_incorrect wrapper-score-selection"></label>
<span class="wrappable"> 0 points :
Demonstrates language and tone that may be inappropriate to task and reader.
</span>
</div>
</li>
<li class="rubric-list-item rubric-info-item" style="display: none;">
<div class="rubric-label">
<label class="rubric-elements-info">
<span class="wrapper-score-selection"> </span>
<span class="wrappable"> 1 points :
Demonstrates an attempt to adjust language and tone to task and reader.
</span>
</label>
</div>
</li>
<li class="rubric-list-item rubric-info-item" style="display: none;">
<div class="rubric-label">
<label class="rubric-elements-info">
<span class="wrapper-score-selection"> </span>
<span class="wrappable"> 2 points :
Demonstrates effective adjustment of language and tone to task and reader.
</span>
</label>
</div>
</li>
</ul>
</div>
<div class="written-feedback">
</div>
</div>
<input type="button" value="Next Step" class="next-step-button" name="reset" style="display: none;">
<section class="legend-container">
</section>
<div class="result-container">
</div>
</div>
</section>

View File

@@ -1,170 +0,0 @@
describe 'Rubric', ->
beforeEach ->
spyOn Logger, 'log'
# load up some fixtures
loadFixtures 'rubric.html'
jasmine.Clock.useMock()
@element = $('.combined-open-ended')
@location = @element.data('location')
describe 'constructor', ->
beforeEach ->
@rub = new Rubric @element
it 'rubric should properly grab the element', ->
expect(@rub.el).toEqual @element
describe 'initialize', ->
beforeEach ->
@rub = new Rubric @element
@rub.initialize @location
it 'rubric correctly sets location', ->
expect($(@rub.rubric_sel).data('location')).toEqual @location
it 'rubric correctly read', ->
expect(@rub.categories.length).toEqual 5
describe 'CombinedOpenEnded', ->
beforeEach ->
spyOn Logger, 'log'
# load up some fixtures
loadFixtures 'combined-open-ended.html'
jasmine.Clock.useMock()
@element = $('.course-content')
describe 'constructor', ->
beforeEach ->
spyOn(Collapsible, 'setCollapsibles')
@combined = new CombinedOpenEnded @element
it 'set the element', ->
expect(@combined.el).toEqual @element
it 'get the correct values from data fields', ->
expect(@combined.ajax_url).toEqual '/courses/MITx/6.002x/2012_Fall/modx/i4x://MITx/6.002x/combinedopenended/CombinedOE'
expect(@combined.state).toEqual 'assessing'
expect(@combined.task_count).toEqual 2
expect(@combined.task_number).toEqual 1
it 'subelements are made collapsible', ->
expect(Collapsible.setCollapsibles).toHaveBeenCalled()
describe 'poll', ->
# We will store default window.setTimeout() function here.
oldSetTimeout = null
beforeEach =>
# setup the spies
@combined = new CombinedOpenEnded @element
spyOn(@combined, 'reload').andCallFake -> return 0
# Store original window.setTimeout() function. If we do not do this, then
# all other tests that rely on code which uses window.setTimeout()
# function might (and probably will) fail.
oldSetTimeout = window.setTimeout
# Redefine window.setTimeout() function as a spy.
window.setTimeout = jasmine.createSpy().andCallFake (callback, timeout) -> return 5
afterEach =>
# Reset the default window.setTimeout() function. If we do not do this,
# then all other tests that rely on code which uses window.setTimeout()
# function might (and probably will) fail.
window.setTimeout = oldSetTimeout
it 'polls at the correct intervals', =>
fakeResponseContinue = state: 'not done'
spyOn($, 'postWithPrefix').andCallFake (url, callback) -> callback(fakeResponseContinue)
@combined.poll()
expect(window.setTimeout).toHaveBeenCalledWith(@combined.poll, 10000)
expect(window.queuePollerID).toBe(5)
xit 'polling stops properly', =>
fakeResponseDone = state: "done"
spyOn($, 'postWithPrefix').andCallFake (url, callback) -> callback(fakeResponseDone)
@combined.poll()
expect(window.queuePollerID).toBeUndefined()
expect(window.setTimeout).not.toHaveBeenCalled()
describe 'rebind', ->
# We will store default window.setTimeout() function here.
oldSetTimeout = null
beforeEach ->
@combined = new CombinedOpenEnded @element
spyOn(@combined, 'queueing').andCallFake -> return 0
spyOn(@combined, 'skip_post_assessment').andCallFake -> return 0
# Store original window.setTimeout() function. If we do not do this, then
# all other tests that rely on code which uses window.setTimeout()
# function might (and probably will) fail.
oldSetTimeout = window.setTimeout
# Redefine window.setTimeout() function as a spy.
window.setTimeout = jasmine.createSpy().andCallFake (callback, timeout) -> return 5
afterEach =>
# Reset the default window.setTimeout() function. If we do not do this,
# then all other tests that rely on code which uses window.setTimeout()
# function might (and probably will) fail.
window.setTimeout = oldSetTimeout
it 'when our child is in an assessing state', ->
@combined.child_state = 'assessing'
@combined.rebind()
expect(@combined.answer_area.attr("disabled")).toBe("disabled")
expect(@combined.submit_button.val()).toBe("Submit assessment")
expect(@combined.queueing).toHaveBeenCalled()
it 'when our child state is initial', ->
@combined.child_state = 'initial'
@combined.rebind()
expect(@combined.answer_area.attr("disabled")).toBeUndefined()
expect(@combined.submit_button.val()).toBe("Submit")
it 'when our child state is post_assessment', ->
@combined.child_state = 'post_assessment'
@combined.rebind()
expect(@combined.answer_area.attr("disabled")).toBe("disabled")
expect(@combined.submit_button.val()).toBe("Submit post-assessment")
it 'when our child state is done', ->
spyOn(@combined, 'next_problem').andCallFake ->
@combined.child_state = 'done'
@combined.rebind()
expect(@combined.answer_area.attr("disabled")).toBe("disabled")
expect(@combined.next_problem_button).toBe(":visible")
describe 'next_problem', ->
beforeEach ->
@combined = new CombinedOpenEnded @element
@combined.child_state = 'done'
it 'handling a successful call', ->
fakeResponse =
success: true
html: "dummy html"
allow_reset: false
spyOn($, 'postWithPrefix').andCallFake (url, val, callback) -> callback(fakeResponse)
spyOn(@combined, 'reinitialize')
spyOn(@combined, 'rebind')
@combined.next_problem()
expect($.postWithPrefix).toHaveBeenCalled()
expect(@combined.reinitialize).toHaveBeenCalledWith(@combined.element)
expect(@combined.rebind).toHaveBeenCalled()
expect(@combined.answer_area.val()).toBe('')
expect(@combined.child_state).toBe('initial')
it 'handling an unsuccessful call', ->
fakeResponse =
success: false
error: 'This is an error'
spyOn($, 'postWithPrefix').andCallFake (url, val, callback) -> callback(fakeResponse)
@combined.next_problem()
expect(@combined.errors_area.html()).toBe(fakeResponse.error)

View File

@@ -1,139 +0,0 @@
describe 'OpenEndedMarkdownEditingDescriptor', ->
describe 'save stores the correct data', ->
it 'saves markdown from markdown editor', ->
loadFixtures 'combinedopenended-with-markdown.html'
@descriptor = new OpenEndedMarkdownEditingDescriptor($('.combinedopenended-editor'))
saveResult = @descriptor.save()
expect(saveResult.metadata.markdown).toEqual('markdown')
expect(saveResult.data).toEqual('<combinedopenended>\nmarkdown\n</combinedopenended>')
it 'clears markdown when xml editor is selected', ->
loadFixtures 'combinedopenended-with-markdown.html'
@descriptor = new OpenEndedMarkdownEditingDescriptor($('.combinedopenended-editor'))
@descriptor.createXMLEditor('replace with markdown')
saveResult = @descriptor.save()
expect(saveResult.nullout).toEqual(['markdown'])
expect(saveResult.data).toEqual('replace with markdown')
it 'saves xml from the xml editor', ->
loadFixtures 'combinedopenended-without-markdown.html'
@descriptor = new OpenEndedMarkdownEditingDescriptor($('.combinedopenended-editor'))
saveResult = @descriptor.save()
expect(saveResult.nullout).toEqual(['markdown'])
expect(saveResult.data).toEqual('xml only')
describe 'advanced editor opens correctly', ->
it 'click on advanced editor should work', ->
loadFixtures 'combinedopenended-with-markdown.html'
@descriptor = new OpenEndedMarkdownEditingDescriptor($('.combinedopenended-editor'))
spyOn(@descriptor, 'confirmConversionToXml').andReturn(true)
expect(@descriptor.confirmConversionToXml).not.toHaveBeenCalled()
e = jasmine.createSpyObj('e', [ 'preventDefault' ])
@descriptor.onShowXMLButton(e)
expect(e.preventDefault).toHaveBeenCalled()
expect(@descriptor.confirmConversionToXml).toHaveBeenCalled()
expect($('.editor-bar').length).toEqual(0)
describe 'insertPrompt', ->
it 'inserts the template if selection is empty', ->
revisedSelection = OpenEndedMarkdownEditingDescriptor.insertPrompt('')
expect(revisedSelection).toEqual(OpenEndedMarkdownEditingDescriptor.promptTemplate)
it 'recognizes html in the prompt', ->
revisedSelection = OpenEndedMarkdownEditingDescriptor.insertPrompt('[prompt]<h1>Hello</h1>[prompt]')
expect(revisedSelection).toEqual('[prompt]<h1>Hello</h1>[prompt]')
describe 'insertRubric', ->
it 'inserts the template if selection is empty', ->
revisedSelection = OpenEndedMarkdownEditingDescriptor.insertRubric('')
expect(revisedSelection).toEqual(OpenEndedMarkdownEditingDescriptor.rubricTemplate)
it 'recognizes a proper rubric', ->
revisedSelection = OpenEndedMarkdownEditingDescriptor.insertRubric('[rubric]\n+1\n-1\n-2\n[rubric]')
expect(revisedSelection).toEqual('[rubric]\n+1\n-1\n-2\n[rubric]')
describe 'insertTasks', ->
it 'inserts the template if selection is empty', ->
revisedSelection = OpenEndedMarkdownEditingDescriptor.insertTasks('')
expect(revisedSelection).toEqual(OpenEndedMarkdownEditingDescriptor.tasksTemplate)
it 'recognizes a proper task string', ->
revisedSelection = OpenEndedMarkdownEditingDescriptor.insertTasks('[tasks](Self)[tasks]')
expect(revisedSelection).toEqual('[tasks](Self)[tasks]')
describe 'markdownToXml', ->
# test default templates
it 'converts prompt to xml', ->
data = OpenEndedMarkdownEditingDescriptor.markdownToXml("""[prompt]
<h1>Prompt!</h1>
This is my super awesome prompt.
[prompt]
""")
data = data.replace(/[\t\n\s]/gmi,'')
expect(data).toEqual("""
<combinedopenended>
<prompt>
<h1>Prompt!</h1>
This is my super awesome prompt.
</prompt>
</combinedopenended>
""".replace(/[\t\n\s]/gmi,''))
it 'converts rubric to xml', ->
data = OpenEndedMarkdownEditingDescriptor.markdownToXml("""[rubric]
+ 1
-1
-2
+ 2
-1
-2
+3
-1
-2
-3
[rubric]
""")
data = data.replace(/[\t\n\s]/gmi,'')
expect(data).toEqual("""
<combinedopenended>
<rubric>
<rubric>
<category>
<description>1</description>
<option>1</option>
<option>2</option>
</category>
<category>
<description>2</description>
<option>1</option>
<option>2</option>
</category>
<category>
<description>3</description>
<option>1</option>
<option>2</option>
<option>3</option>
</category>
</rubric>
</rubric>
</combinedopenended>
""".replace(/[\t\n\s]/gmi,''))
it 'converts tasks to xml', ->
data = OpenEndedMarkdownEditingDescriptor.markdownToXml("""[tasks]
(Self), ({1-2}AI), ({1-4}AI), ({1-2}Peer
[tasks]
""")
data = data.replace(/[\t\n\s]/gmi,'')
equality_list = """
<combinedopenended>
<task>
<selfassessment/>
</task>
<task>
<openended min_score_to_attempt="1" max_score_to_attempt="2">ml_grading.conf</openended>
</task>
<task>
<openended min_score_to_attempt="1" max_score_to_attempt="4">ml_grading.conf</openended>
</task>
<task>
<openended min_score_to_attempt="1" max_score_to_attempt="2">peer_grading.conf</openended>
</task>
</combinedopenended>
"""
expect(data).toEqual(equality_list.replace(/[\t\n\s]/gmi,''))

View File

@@ -1,725 +0,0 @@
class @Rubric
rubric_category_sel: '.rubric-category'
rubric_sel: '.rubric'
constructor: (el) ->
@el = el
initialize: (location) =>
@$(@rubric_sel).data("location", location)
@$('input[class="score-selection"]').change @tracking_callback
# set up the hotkeys
$(window).unbind('keydown', @keypress_callback)
$(window).keydown @keypress_callback
# display the 'current' carat
@categories = @$(@rubric_category_sel)
@category = @$(@categories.first())
@category_index = 0
# locally scoped jquery.
$: (selector) ->
$(selector, @el)
keypress_callback: (event) =>
# don't try to do this when user is typing in a text input
if @$(event.target).is('input, textarea')
return
# for when we select via top row
if event.which >= 48 and event.which <= 57
selected = event.which - 48
# for when we select via numpad
else if event.which >= 96 and event.which <= 105
selected = event.which - 96
# we don't want to do anything since we haven't pressed a number
else
return
# if we actually have a current category (not past the end)
if(@category_index <= @categories.length)
# find the valid selections for this category
inputs = @$("input[name='score-selection-#{@category_index}']")
max_score = inputs.length - 1
if selected > max_score or selected < 0
return
inputs.filter("input[value=#{selected}]").click()
@category_index++
@category = @$(@categories[@category_index])
tracking_callback: (event) =>
target_selection = @$(event.target).val()
# chop off the beginning of the name so that we can get the number of the category
category = @$(event.target).data("category")
location = @$(@rubric_sel).data('location')
# probably want the original problem location as well
data = {location: location, selection: target_selection, category: category}
Logger.log 'rubric_select', data
# finds the scores for each rubric category
get_score_list: () =>
# find the number of categories:
num_categories = @$(@rubric_category_sel).length
score_lst = []
# get the score for each one
for i in [0..(num_categories-1)]
score = @$("input[name='score-selection-#{i}']:checked").val()
score_lst.push(score)
return score_lst
get_total_score: () =>
score_lst = @get_score_list()
tot = 0
for score in score_lst
tot += parseInt(score)
return tot
check_complete: () =>
# check to see whether or not any categories have not been scored
num_categories = @$(@rubric_category_sel).length
for i in [0..(num_categories-1)]
score = @$("input[name='score-selection-#{i}']:checked").val()
if score == undefined
return false
return true
class @CombinedOpenEnded
wrapper_sel: 'section.xmodule_CombinedOpenEndedModule'
coe_sel: 'section.combined-open-ended'
reset_button_sel: '.reset-button'
next_step_sel: '.next-step-button'
question_header_sel: '.question-header'
submit_evaluation_sel: '.submit-evaluation-button'
result_container_sel: 'div.result-container'
combined_rubric_sel: '.combined-rubric-container'
open_ended_child_sel: 'section.open-ended-child'
error_sel: '.error'
answer_area_sel: 'textarea.answer'
answer_area_div_sel : 'div.answer'
prompt_sel: '.prompt'
rubric_wrapper_sel: '.rubric-wrapper'
hint_wrapper_sel: '.hint-wrapper'
message_wrapper_sel: '.message-wrapper'
submit_button_sel: '.submit-button'
skip_button_sel: '.skip-button'
file_upload_sel: '.file-upload'
file_upload_box_sel: '.file-upload-box'
file_upload_preview_sel: '.file-upload-preview'
fof_sel: 'textarea.feedback-on-feedback'
sub_id_sel: 'input.submission_id'
grader_id_sel: 'input.grader_id'
grader_status_sel: '.grader-status'
info_rubric_elements_sel: '.rubric-info-item'
rubric_collapse_sel: '.rubric-collapse'
next_rubric_sel: '.rubric-next-button'
previous_rubric_sel: '.rubric-previous-button'
oe_alert_sel: '.open-ended-alert'
save_button_sel: '.save-button'
constructor: (el) ->
@el=el
@$el = $(el)
@reinitialize(el)
$(window).keydown @keydown_handler
$(window).keyup @keyup_handler
# locally scoped jquery.
$: (selector) ->
$(selector, @el)
reinitialize: () ->
@has_been_reset = false
@wrapper=@$(@wrapper_sel)
@coe = @$(@coe_sel)
@ajax_url = @coe.data('ajax-url')
@get_html()
@coe = @$(@coe_sel)
#Get data from combinedopenended
@allow_reset = @coe.data('allow_reset')
@id = @coe.data('id')
@state = @coe.data('state')
@task_count = @coe.data('task-count')
@task_number = @coe.data('task-number')
@accept_file_upload = @coe.data('accept-file-upload')
@location = @coe.data('location')
# set up handlers for click tracking
@rub = new Rubric(@coe)
@rub.initialize(@location)
@is_ctrl = false
#Setup reset
@reset_button = @$(@reset_button_sel)
@reset_button.click @confirm_reset
#Setup next problem
@next_problem_button = @$(@next_step_sel)
@next_problem_button.click @next_problem
@question_header = @$(@question_header_sel)
@question_header.click @collapse_question
# valid states: 'initial', 'assessing', 'post_assessment', 'done'
Collapsible.setCollapsibles(@$el)
@submit_evaluation_button = @$(@submit_evaluation_sel)
@submit_evaluation_button.click @message_post
@results_container = @$(@result_container_sel)
@combined_rubric_container = @$(@combined_rubric_sel)
# Where to put the rubric once we load it
@oe = @$(@open_ended_child_sel)
@errors_area = @$(@oe).find(@error_sel)
@answer_area = @$(@oe).find(@answer_area_sel)
@prompt_container = @$(@oe).find(@prompt_sel)
@rubric_wrapper = @$(@oe).find(@rubric_wrapper_sel)
@hint_wrapper = @$(@oe).find(@hint_wrapper_sel)
@message_wrapper = @$(@oe).find(@message_wrapper_sel)
@submit_button = @$(@oe).find(@submit_button_sel)
@save_button = @$(@oe).find(@save_button_sel)
@child_state = @oe.data('state')
@child_type = @oe.data('child-type')
if @child_type=="openended"
@skip_button = @$(@oe).find(@skip_button_sel)
@skip_button.click @skip_post_assessment
@file_upload_area = @$(@oe).find(@file_upload_sel)
@can_upload_files = false
@open_ended_child= @$(@oe).find(@open_ended_child_sel)
@out_of_sync_message = 'The problem state got out of sync. Try reloading the page.'
if @task_number>1
@prompt_hide()
else if @task_number==1 and @child_state!='initial'
@prompt_hide()
@find_assessment_elements()
@find_hint_elements()
@rebind()
get_html_callback: (response) =>
@coe.replaceWith(response.html)
get_html: () =>
url = "#{@ajax_url}/get_html"
$.ajaxWithPrefix({
type: 'POST',
url: url,
data: {},
success: @get_html_callback,
async:false
});
show_combined_rubric_current: () =>
data = {}
$.postWithPrefix "#{@ajax_url}/get_combined_rubric", data, (response) =>
if response.success
@combined_rubric_container.after(response.html).remove()
@combined_rubric_container= @$(@combined_rubric_sel)
@toggle_rubric("")
@rubric_collapse = @$(@rubric_collapse_sel)
@rubric_collapse.click @toggle_rubric
@hide_rubrics()
@$(@previous_rubric_sel).click @previous_rubric
@$(@next_rubric_sel).click @next_rubric
if response.hide_reset
@reset_button.hide()
message_post: (event)=>
external_grader_message=$(event.target).parent().parent().parent()
evaluation_scoring = $(event.target).parent()
fd = new FormData()
feedback = @$(evaluation_scoring).find(@fof_sel)[0].value
submission_id = @$(external_grader_message).find(@sub_id_sel)[0].value
grader_id = @$(external_grader_message).find(@grader_id_sel)[0].value
score = @$(evaluation_scoring).find("input:radio[name='evaluation-score']:checked").val()
fd.append('feedback', feedback)
fd.append('submission_id', submission_id)
fd.append('grader_id', grader_id)
if(!score)
###
Translators: A "rating" is a score a student gives to indicate how well
they feel they were graded on this problem
###
@gentle_alert gettext "You need to pick a rating before you can submit."
return
else
fd.append('score', score)
settings =
type: "POST"
data: fd
processData: false
contentType: false
success: (response) =>
@gentle_alert response.msg
@$('section.evaluation').slideToggle()
@message_wrapper.html(response.message_html)
$.ajaxWithPrefix("#{@ajax_url}/save_post_assessment", settings)
rebind: () =>
# rebind to the appropriate function for the current state
@submit_button.unbind('click')
@submit_button.show()
@save_button.unbind('click')
@save_button.hide()
@reset_button.hide()
@hide_file_upload()
@next_problem_button.hide()
@hint_area.attr('disabled', false)
if @task_number==1 and @child_state=='assessing'
@prompt_hide()
if @child_state == 'done'
@rubric_wrapper.hide()
if @child_type=="openended"
@skip_button.hide()
if @allow_reset=="True"
@show_combined_rubric_current()
@reset_button.show()
@submit_button.hide()
@answer_area.attr("disabled", true)
@replace_text_inputs()
@hint_area.attr('disabled', true)
if @task_number<@task_count
###
Translators: this message appears when transitioning between openended grading
types (i.e. self assesment to peer assessment). Sometimes, if a student
did not perform well at one step, they cannot move on to the next one.
###
@gentle_alert gettext "Your score did not meet the criteria to move to the next step."
else if @child_state == 'initial'
@answer_area.attr("disabled", false)
@submit_button.prop('value', gettext 'Submit')
@submit_button.click @confirm_save_answer
@setup_file_upload()
@save_button.click @store_answer
@save_button.show()
else if @child_state == 'assessing'
@answer_area.attr("disabled", true)
@replace_text_inputs()
@hide_file_upload()
###
Translators: one clicks this button after one has finished filling out the grading
form for an openended assessment
###
@submit_button.prop('value', gettext 'Submit assessment')
@submit_button.click @save_assessment
@submit_button.attr("disabled",true)
if @child_type == "openended"
@submit_button.hide()
@queueing()
@grader_status = @$(@grader_status_sel)
@grader_status.html("<span class='grading'>" + (gettext "Your response has been submitted. Please check back later for your grade.") + "</span>")
else if @child_type == "selfassessment"
@setup_score_selection()
else if @child_state == 'post_assessment'
if @child_type=="openended"
@skip_button.show()
@skip_post_assessment()
@answer_area.attr("disabled", true)
@replace_text_inputs()
###
Translators: this button is clicked to submit a student's rating of
an evaluator's assessment
###
@submit_button.prop('value', gettext 'Submit post-assessment')
if @child_type=="selfassessment"
@submit_button.click @save_hint
else
@submit_button.click @message_post
else if @child_state == 'done'
@show_combined_rubric_current()
@rubric_wrapper.hide()
@answer_area.attr("disabled", true)
@replace_text_inputs()
@hint_area.attr('disabled', true)
@submit_button.hide()
if @child_type=="openended"
@skip_button.hide()
if @task_number<@task_count
@next_problem_button.show()
else
@reset_button.show()
find_assessment_elements: ->
@assessment = @$('input[name="grade-selection"]')
find_hint_elements: ->
@hint_area = @$('textarea.post_assessment')
store_answer: (event) =>
event.preventDefault()
if @child_state == 'initial'
data = {'student_answer' : @answer_area.val()}
@save_button.attr("disabled",true)
$.postWithPrefix "#{@ajax_url}/store_answer", data, (response) =>
if response.success
@gentle_alert(gettext "Answer saved, but not yet submitted.")
else
@errors_area.html(response.error)
@save_button.attr("disabled",false)
else
@errors_area.html(@out_of_sync_message)
replace_answer: (response) =>
if response.success
@rubric_wrapper.html(response.rubric_html)
@rubric_wrapper.show()
@rub = new Rubric(@coe)
@rub.initialize(@location)
@child_state = 'assessing'
@find_assessment_elements()
@answer_area.val(response.student_response)
@rebind()
answer_area_div = @$(@answer_area_div_sel)
answer_area_div.html(response.student_response)
else
@submit_button.show()
@submit_button.attr('disabled', false)
@gentle_alert response.error
confirm_save_answer: (event) =>
###
Translators: This string appears in a confirmation box after one tries to submit
an openended problem
###
confirmation_text = gettext 'Please confirm that you wish to submit your work. You will not be able to make any changes after submitting.'
accessible_confirm confirmation_text, =>
@save_answer(event)
save_answer: (event) =>
@$el.find(@oe_alert_sel).remove()
@submit_button.attr("disabled",true)
@submit_button.hide()
event.preventDefault()
@answer_area.attr("disabled", true)
max_filesize = 2*1000*1000 #2MB
if @child_state == 'initial'
files = ""
valid_files_attached = false
if @can_upload_files == true
files = @$(@file_upload_box_sel)[0].files[0]
if files != undefined
valid_files_attached = true
if files.size > max_filesize
files = ""
# Don't submit the file in the case of it being too large, deal with the error locally.
@submit_button.show()
@submit_button.attr('disabled', false)
@gentle_alert gettext "You are trying to upload a file that is too large for our system. Please choose a file under 2MB or paste a link to it into the answer box."
return
fd = new FormData()
fd.append('student_answer', @answer_area.val())
fd.append('student_file', files)
fd.append('valid_files_attached', valid_files_attached)
that=this
settings =
type: "POST"
data: fd
processData: false
contentType: false
async: false
success: (response) =>
@replace_answer(response)
$.ajaxWithPrefix("#{@ajax_url}/save_answer",settings)
else
@errors_area.html(@out_of_sync_message)
keydown_handler: (event) =>
# Previously, responses were submitted when hitting enter. Add in a modifier that ensures that ctrl+enter is needed.
if event.which == 17 && @is_ctrl==false
@is_ctrl=true
else if @is_ctrl==true && event.which == 13 && @child_state == 'assessing' && @rub.check_complete()
@save_assessment(event)
keyup_handler: (event) =>
# Handle keyup event when ctrl key is released
if event.which == 17 && @is_ctrl==true
@is_ctrl=false
save_assessment: (event) =>
@submit_button.attr("disabled",true)
@submit_button.hide()
event.preventDefault()
if @child_state == 'assessing' && @rub.check_complete()
checked_assessment = @rub.get_total_score()
score_list = @rub.get_score_list()
data = {'assessment' : checked_assessment, 'score_list' : score_list}
$.postWithPrefix "#{@ajax_url}/save_assessment", data, (response) =>
if response.success
@child_state = response.state
if @child_state == 'post_assessment'
@hint_wrapper.html(response.hint_html)
@find_hint_elements()
else if @child_state == 'done'
@rubric_wrapper.hide()
@rebind()
else
@gentle_alert response.error
else
@errors_area.html(@out_of_sync_message)
save_hint: (event) =>
event.preventDefault()
if @child_state == 'post_assessment'
data = {'hint' : @hint_area.val()}
$.postWithPrefix "#{@ajax_url}/save_post_assessment", data, (response) =>
if response.success
@message_wrapper.html(response.message_html)
@child_state = 'done'
@rebind()
else
@errors_area.html(response.error)
else
@errors_area.html(@out_of_sync_message)
skip_post_assessment: =>
if @child_state == 'post_assessment'
$.postWithPrefix "#{@ajax_url}/skip_post_assessment", {}, (response) =>
if response.success
@child_state = 'done'
@rebind()
else
@errors_area.html(response.error)
else
@errors_area.html(@out_of_sync_message)
confirm_reset: (event) =>
message = gettext 'Are you sure you want to remove your previous response to this question?'
accessible_confirm message, =>
@reset(event)
reset: (event) =>
event.preventDefault()
if @child_state == 'done' or @allow_reset=="True"
$.postWithPrefix "#{@ajax_url}/reset", {}, (response) =>
if response.success
@answer_area.val('')
@rubric_wrapper.html('')
@hint_wrapper.html('')
@message_wrapper.html('')
@child_state = 'initial'
@coe.after(response.html).remove()
@allow_reset="False"
@reinitialize(@element)
@has_been_reset = true
@rebind()
@reset_button.hide()
else
@errors_area.html(response.error)
else
@errors_area.html(@out_of_sync_message)
next_problem: =>
if @child_state == 'done'
$.postWithPrefix "#{@ajax_url}/next_problem", {}, (response) =>
if response.success
@answer_area.val('')
@rubric_wrapper.html('')
@hint_wrapper.html('')
@message_wrapper.html('')
@child_state = 'initial'
@coe.after(response.html).remove()
@reinitialize(@element)
@rebind()
@next_problem_button.hide()
if !response.allow_reset
@gentle_alert gettext "Moved to next step."
else
###
Translators: this message appears when transitioning between openended grading
types (i.e. self assesment to peer assessment). Sometimes, if a student
did not perform well at one step, they cannot move on to the next one.
###
@gentle_alert gettext "Your score did not meet the criteria to move to the next step."
@show_combined_rubric_current()
else
@errors_area.html(response.error)
else
@errors_area.html(@out_of_sync_message)
gentle_alert: (msg) =>
if @$el.find(@oe_alert_sel).length
@$el.find(@oe_alert_sel).remove()
alert_elem = "<div class='open-ended-alert' role='alert'>" + msg + "</div>"
@$el.find('.open-ended-action').after(alert_elem)
@$el.find(@oe_alert_sel).css(opacity: 0).animate(opacity: 1, 700)
queueing: =>
if @child_state=="assessing" and @child_type=="openended"
if window.queuePollerID # Only one poller 'thread' per Problem
window.clearTimeout(window.queuePollerID)
window.queuePollerID = window.setTimeout(@poll, 10000)
poll: =>
$.postWithPrefix "#{@ajax_url}/check_for_score", (response) =>
if response.state == "done" or response.state=="post_assessment"
delete window.queuePollerID
@reload()
else
window.queuePollerID = window.setTimeout(@poll, 10000)
setup_file_upload: =>
if @accept_file_upload == "True"
if window.File and window.FileReader and window.FileList and window.Blob
@can_upload_files = true
@file_upload_area.html('<input type="file" class="file-upload-box"><img class="file-upload-preview" src="#" alt="Uploaded image" />')
@file_upload_area.show()
@$(@file_upload_preview_sel).hide()
@$(@file_upload_box_sel).change @preview_image
else
@gentle_alert gettext 'File uploads are required for this question, but are not supported in your browser. Try the newest version of Google Chrome. Alternatively, if you have uploaded the image to another website, you can paste a link to it into the answer box.'
hide_file_upload: =>
if @accept_file_upload == "True"
@file_upload_area.hide()
replace_text_inputs: =>
answer_class = @answer_area.attr('class')
answer_id = @answer_area.attr('id')
answer_val = @answer_area.val()
new_text = ''
new_text = "<div class='#{answer_class}' id='#{answer_id}'>#{answer_val}</div>"
@answer_area.replaceWith(new_text)
# wrap this so that it can be mocked
reload: ->
@reinitialize()
collapse_question: (event) =>
@prompt_container.slideToggle()
@prompt_container.toggleClass('open')
if @prompt_container.hasClass('open')
###
Translators: "Show Question" is some text that, when clicked, shows a question's
content that had been hidden
###
new_text = gettext "Show Question"
Logger.log 'oe_show_question', {location: @location}
else
###
Translators: "Hide Question" is some text that, when clicked, hides a question's
content
###
Logger.log 'oe_hide_question', {location: @location}
new_text = gettext "Hide Question"
@question_header.text(new_text)
return false
hide_rubrics: () =>
rubrics = @$(@combined_rubric_sel)
for rub in rubrics
if @$(rub).data('status')=="shown"
@$(rub).show()
else
@$(rub).hide()
next_rubric: =>
@shift_rubric(1)
return false
previous_rubric: =>
@shift_rubric(-1)
return false
shift_rubric: (i) =>
rubrics = @$(@combined_rubric_sel)
number = 0
for rub in rubrics
if @$(rub).data('status')=="shown"
number = @$(rub).data('number')
@$(rub).data('status','hidden')
if i==1 and number < rubrics.length - 1
number = number + i
if i==-1 and number>0
number = number + i
@$(rubrics[number]).data('status', 'shown')
@hide_rubrics()
prompt_show: () =>
if @prompt_container.is(":hidden")==true
@prompt_container.slideToggle()
@prompt_container.toggleClass('open')
@question_header.text(gettext "Hide Question")
prompt_hide: () =>
if @prompt_container.is(":visible")==true
@prompt_container.slideToggle()
@prompt_container.toggleClass('open')
@question_header.text(gettext "Show Question")
log_feedback_click: (event) ->
target = @$(event.target)
if target.hasClass('see-full-feedback')
Logger.log 'oe_show_full_feedback', {}
else if target.hasClass('respond-to-feedback')
Logger.log 'oe_show_respond_to_feedback', {}
else
generated_event_type = link_text.toLowerCase().replace(" ","_")
Logger.log "oe_" + generated_event_type, {}
log_feedback_selection: (event) ->
target_selection = @$(event.target).val()
Logger.log 'oe_feedback_response_selected', {value: target_selection}
remove_attribute: (name) =>
if @$(@file_upload_preview_sel).attr(name)
@$(@file_upload_preview_sel)[0].removeAttribute(name)
preview_image: () =>
if @$(@file_upload_box_sel)[0].files && @$(@file_upload_box_sel)[0].files[0]
reader = new FileReader()
reader.onload = (e) =>
max_dim = 150
@remove_attribute('src')
@remove_attribute('height')
@remove_attribute('width')
@$(@file_upload_preview_sel).attr('src', e.target.result)
height_px = @$(@file_upload_preview_sel)[0].height
width_px = @$(@file_upload_preview_sel)[0].width
scale_factor = 0
if height_px>width_px
scale_factor = height_px/max_dim
else
scale_factor = width_px/max_dim
@$(@file_upload_preview_sel)[0].width = width_px/scale_factor
@$(@file_upload_preview_sel)[0].height = height_px/scale_factor
@$(@file_upload_preview_sel).show()
reader.readAsDataURL(@$(@file_upload_box_sel)[0].files[0])
toggle_rubric: (event) =>
info_rubric_elements = @$(@info_rubric_elements_sel)
info_rubric_elements.slideToggle()
return false
setup_score_selection: () =>
@$("input[class='score-selection']").change @graded_callback
graded_callback: () =>
if @rub.check_complete()
@submit_button.attr("disabled",false)
@submit_button.show()

View File

@@ -1,304 +0,0 @@
class @OpenEndedMarkdownEditingDescriptor extends XModule.Descriptor
# TODO really, these templates should come from or also feed the cheatsheet
@rubricTemplate : """
[rubric]
+ Ideas
- Difficult for the reader to discern the main idea. Too brief or too repetitive to establish or maintain a focus.
- Attempts a main idea. Sometimes loses focus or ineffectively displays focus.
- Presents a unifying theme or main idea, but may include minor tangents. Stays somewhat focused on topic and task.
- Presents a unifying theme or main idea without going off on tangents. Stays completely focused on topic and task.
+ Content
- Includes little information with few or no details or unrelated details. Unsuccessful in attempts to explore any facets of the topic.
- Includes little information and few or no details. Explores only one or two facets of the topic.
- Includes sufficient information and supporting details. (Details may not be fully developed; ideas may be listed.) Explores some facets of the topic.
- Includes in-depth information and exceptional supporting details that are fully developed. Explores all facets of the topic.
+ Organization
- Ideas organized illogically, transitions weak, and response difficult to follow.
- Attempts to logically organize ideas. Attempts to progress in an order that enhances meaning, and demonstrates use of transitions.
- Ideas organized logically. Progresses in an order that enhances meaning. Includes smooth transitions.
+ Style
- Contains limited vocabulary, with many words used incorrectly. Demonstrates problems with sentence patterns.
- Contains basic vocabulary, with words that are predictable and common. Contains mostly simple sentences (although there may be an attempt at more varied sentence patterns).
- Includes vocabulary to make explanations detailed and precise. Includes varied sentence patterns, including complex sentences.
+ Voice
- Demonstrates language and tone that may be inappropriate to task and reader.
- Demonstrates an attempt to adjust language and tone to task and reader.
- Demonstrates effective adjustment of language and tone to task and reader.
[rubric]
"""
@tasksTemplate: "[tasks]\n(Self), ({4-12}AI), ({9-12}Peer)\n[tasks]\n"
@promptTemplate: """
[prompt]\n
<h3>Censorship in the Libraries</h3>
<p>'All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us.' --Katherine Paterson, Author
</p>
<p>
Write a persuasive essay to a newspaper reflecting your views on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.
</p>
[prompt]\n
"""
constructor: (element) ->
@element = element
if $(".markdown-box", @element).length != 0
@markdown_editor = CodeMirror.fromTextArea($(".markdown-box", element)[0], {
lineWrapping: true
mode: null
})
@setCurrentEditor(@markdown_editor)
selection = @markdown_editor.getSelection()
#Auto-add in the needed template if it isn't already in there.
if(@markdown_editor.getValue() == "")
@markdown_editor.setValue(OpenEndedMarkdownEditingDescriptor.promptTemplate + "\n" + OpenEndedMarkdownEditingDescriptor.rubricTemplate + "\n" + OpenEndedMarkdownEditingDescriptor.tasksTemplate)
# Add listeners for toolbar buttons (only present for markdown editor)
@element.on('click', '.xml-tab', @onShowXMLButton)
@element.on('click', '.format-buttons a', @onToolbarButton)
@element.on('click', '.cheatsheet-toggle', @toggleCheatsheet)
# Hide the XML text area
$(@element.find('.xml-box')).hide()
else
@createXMLEditor()
@alertTaskRubricModification()
###
Creates the XML Editor and sets it as the current editor. If text is passed in,
it will replace the text present in the HTML template.
text: optional argument to override the text passed in via the HTML template
###
createXMLEditor: (text) ->
@xml_editor = CodeMirror.fromTextArea($(".xml-box", @element)[0], {
mode: "xml"
lineNumbers: true
lineWrapping: true
})
if text
@xml_editor.setValue(text)
@setCurrentEditor(@xml_editor)
$(@xml_editor.getWrapperElement()).toggleClass("CodeMirror-advanced");
# Need to refresh to get line numbers to display properly.
@xml_editor.refresh()
###
User has clicked to show the XML editor. Before XML editor is swapped in,
the user will need to confirm the one-way conversion.
###
onShowXMLButton: (e) =>
e.preventDefault();
if @cheatsheet && @cheatsheet.hasClass('shown')
@cheatsheet.toggleClass('shown')
@toggleCheatsheetVisibility()
if @confirmConversionToXml()
@createXMLEditor(OpenEndedMarkdownEditingDescriptor.markdownToXml(@markdown_editor.getValue()))
# Put cursor position to 0.
@xml_editor.setCursor(0)
# Hide markdown-specific toolbar buttons
$(@element.find('.editor-bar')).hide()
alertTaskRubricModification: ->
return alert("Before you edit, please note that if you alter the tasks block or the rubric block of this question after students have submitted responses, it may result in their responses and grades being deleted! Use caution when altering problems that have already been released to students.")
###
Have the user confirm the one-way conversion to XML.
Returns true if the user clicked OK, else false.
###
confirmConversionToXml: ->
# TODO: use something besides a JavaScript confirm dialog?
return confirm("If you use the Advanced Editor, this problem will be converted to XML and you will not be able to return to the Simple Editor Interface.\n\nProceed to the Advanced Editor and convert this problem to XML?")
###
Event listener for toolbar buttons (only possible when markdown editor is visible).
###
onToolbarButton: (e) =>
e.preventDefault();
selection = @markdown_editor.getSelection()
revisedSelection = null
switch $(e.currentTarget).attr('class')
when "rubric-button" then revisedSelection = OpenEndedMarkdownEditingDescriptor.insertRubric(selection)
when "prompt-button" then revisedSelection = OpenEndedMarkdownEditingDescriptor.insertPrompt(selection)
when "tasks-button" then revisedSelection = OpenEndedMarkdownEditingDescriptor.insertTasks(selection)
else # ignore click
if revisedSelection != null
@markdown_editor.replaceSelection(revisedSelection)
@markdown_editor.focus()
###
Event listener for toggling cheatsheet (only possible when markdown editor is visible).
###
toggleCheatsheet: (e) =>
e.preventDefault();
if !$(@markdown_editor.getWrapperElement()).find('.simple-editor-open-ended-cheatsheet')[0]
@cheatsheet = $($('#simple-editor-open-ended-cheatsheet').html())
$(@markdown_editor.getWrapperElement()).append(@cheatsheet)
@toggleCheatsheetVisibility()
setTimeout (=> @cheatsheet.toggleClass('shown')), 10
###
Function to toggle cheatsheet visibility.
###
toggleCheatsheetVisibility: () =>
$('.modal-content').toggleClass('cheatsheet-is-shown')
###
Stores the current editor and hides the one that is not displayed.
###
setCurrentEditor: (editor) ->
if @current_editor
$(@current_editor.getWrapperElement()).hide()
@current_editor = editor
$(@current_editor.getWrapperElement()).show()
$(@current_editor).focus();
###
Called when save is called. Listeners are unregistered because editing the block again will
result in a new instance of the descriptor. Note that this is NOT the case for cancel--
when cancel is called the instance of the descriptor is reused if edit is selected again.
###
save: ->
@element.off('click', '.xml-tab', @changeEditor)
@element.off('click', '.format-buttons a', @onToolbarButton)
@element.off('click', '.cheatsheet-toggle', @toggleCheatsheet)
if @current_editor == @markdown_editor
{
data: OpenEndedMarkdownEditingDescriptor.markdownToXml(@markdown_editor.getValue())
metadata:
markdown: @markdown_editor.getValue()
}
else
{
data: @xml_editor.getValue()
nullout: ['markdown']
}
@insertRubric: (selectedText) ->
return OpenEndedMarkdownEditingDescriptor.insertGenericInput(selectedText, '[rubric]', '[rubric]', OpenEndedMarkdownEditingDescriptor.rubricTemplate)
@insertPrompt: (selectedText) ->
return OpenEndedMarkdownEditingDescriptor.insertGenericInput(selectedText, '[prompt]', '[prompt]', OpenEndedMarkdownEditingDescriptor.promptTemplate)
@insertTasks: (selectedText) ->
return OpenEndedMarkdownEditingDescriptor.insertGenericInput(selectedText, '[tasks]', '[tasks]', OpenEndedMarkdownEditingDescriptor.tasksTemplate)
@insertGenericInput: (selectedText, lineStart, lineEnd, template) ->
if selectedText.length > 0
new_string = selectedText.replace(/^\s+|\s+$/g,'')
if new_string.substring(0,lineStart.length) != lineStart
new_string = lineStart + new_string
if new_string.substring((new_string.length)-lineEnd.length,new_string.length) != lineEnd
new_string = new_string + lineEnd
return new_string
else
return template
@markdownToXml: (markdown)->
toXml = `function(markdown) {
function template(template_html,data){
return template_html.replace(/%(\w*)%/g,function(m,key){return data.hasOwnProperty(key)?data[key]:"";});
}
var xml = markdown;
// group rubrics
xml = xml.replace(/\[rubric\]\n?([^\]]*)\[\/?rubric\]/gmi, function(match, p) {
var groupString = '<rubric>\n<rubric>\n';
var options = p.split('\n');
var category_open = false;
for(var i = 0; i < options.length; i++) {
if(options[i].length > 0) {
var value = options[i].replace(/^\s+|\s+$/g,'');
if (value.charAt(0)=="+") {
if(i>0){
if(category_open==true){
groupString += "</category>\n";
category_open = false;
}
}
groupString += "<category>\n<description>\n";
category_open = true;
text = value.substr(1);
text = text.replace(/^\s+|\s+$/g,'');
groupString += text;
groupString += "\n</description>\n";
} else if (value.charAt(0) == "-") {
groupString += "<option>\n";
text = value.substr(1);
text = text.replace(/^\s+|\s+$/g,'');
groupString += text;
groupString += "\n</option>\n";
}
}
if(i==options.length-1 && category_open == true){
groupString += "\n</category>\n";
}
}
groupString += '</rubric>\n</rubric>\n';
return groupString;
});
// group tasks
xml = xml.replace(/\[tasks\]\n?([^\]]*)\[\/?tasks\]/gmi, function(match, p) {
var open_ended_template = $('#open-ended-template').html();
if(open_ended_template == null) {
open_ended_template = "<openended %min_max_string%>%grading_config%</openended>";
}
var groupString = '';
var options = p.split(",");
for(var i = 0; i < options.length; i++) {
if(options[i].length > 0) {
var value = options[i].replace(/^\s+|\s+$/g,'');
var lower_option = value.toLowerCase();
type = lower_option.match(/(peer|self|ai)/gmi)
if(type != null) {
type = type[0]
var min_max = value.match(/\{\n?([^\]]*)\}/gmi);
var min_max_string = "";
if(min_max!=null) {
min_max = min_max[0].replace(/^{|}/gmi,'');
min_max = min_max.split("-");
min = min_max[0];
max = min_max[1];
min_max_string = 'min_score_to_attempt="' + min + '" max_score_to_attempt="' + max + '" ';
}
groupString += "<task>\n"
if(type=="self") {
groupString +="<selfassessment" + min_max_string + "/>"
} else if (type=="peer") {
config = "peer_grading.conf"
groupString += template(open_ended_template,{min_max_string: min_max_string, grading_config: config});
} else if (type=="ai") {
config = "ml_grading.conf"
groupString += template(open_ended_template,{min_max_string: min_max_string, grading_config: config});
}
groupString += "</task>\n"
}
}
}
return groupString;
});
// replace prompts
xml = xml.replace(/\[prompt\]\n?([^\]]*)\[\/?prompt\]/gmi, function(match, p1) {
var selectString = '<prompt>\n' + p1 + '\n</prompt>';
return selectString;
});
// rid white space
xml = xml.replace(/\n\n\n/g, '\n');
// surround w/ combinedopenended tag
xml = '<combinedopenended>\n' + xml + '\n</combinedopenended>';
return xml;
}
`
return toXml markdown

View File

@@ -1,66 +0,0 @@
# This is a simple class that just hides the error container
# and message container when they are empty
# Can (and should be) expanded upon when our problem list
# becomes more sophisticated
class @PeerGrading
peer_grading_sel: '.peer-grading'
peer_grading_container_sel: '.peer-grading-container'
error_container_sel: '.error-container'
message_container_sel: '.message-container'
problem_button_sel: '.problem-button'
problem_list_sel: '.problem-list'
progress_bar_sel: '.progress-bar'
constructor: (element) ->
@el = element
@peer_grading_container = @$(@peer_grading_sel)
@use_single_location = @peer_grading_container.data('use-single-location')
@peer_grading_outer_container = @$(@peer_grading_container_sel)
@ajax_url = @peer_grading_container.data('ajax-url')
if @use_single_location.toLowerCase() == "true"
#If the peer grading element is linked to a single location, then activate the backend for that location
@activate_problem()
else
#Otherwise, activate the panel view.
@error_container = @$(@error_container_sel)
@error_container.toggle(not @error_container.is(':empty'))
@message_container = @$(@message_container_sel)
@message_container.toggle(not @message_container.is(':empty'))
@problem_button = @$(@problem_button_sel)
@problem_button.click @show_results
@problem_list = @$(@problem_list_sel)
@construct_progress_bar()
# locally scoped jquery.
$: (selector) ->
$(selector, @el)
construct_progress_bar: () =>
problems = @problem_list.find('tr').next()
problems.each( (index, element) =>
problem = $(element)
progress_bar = problem.find(@progress_bar_sel)
bar_value = parseInt(problem.data('graded'))
bar_max = parseInt(problem.data('required')) + bar_value
progress_bar.progressbar({value: bar_value, max: bar_max})
)
show_results: (event) =>
location_to_fetch = $(event.target).data('location')
data = {'location' : location_to_fetch}
$.postWithPrefix "#{@ajax_url}problem", data, (response) =>
if response.success
@peer_grading_outer_container.after(response.html).remove()
backend = new PeerGradingProblemBackend(@ajax_url, false)
new PeerGradingProblem(backend, @el)
else
@gentle_alert response.error
activate_problem: () =>
backend = new PeerGradingProblemBackend(@ajax_url, false)
new PeerGradingProblem(backend, @el)

View File

@@ -1,615 +0,0 @@
##################################
#
# This is the JS that renders the peer grading problem page.
# Fetches the correct problem and/or calibration essay
# and sends back the grades
#
# Should not be run when we don't have a location to send back
# to the server
#
# PeerGradingProblemBackend -
# makes all the ajax requests and provides a mock interface
# for testing purposes
#
# PeerGradingProblem -
# handles the rendering and user interactions with the interface
#
##################################
class @PeerGradingProblemBackend
constructor: (ajax_url, mock_backend) ->
@mock_backend = mock_backend
@ajax_url = ajax_url
@mock_cnt = 0
post: (cmd, data, callback) ->
if @mock_backend
callback(@mock(cmd, data))
else
# if this post request fails, the error callback will catch it
$.post(@ajax_url + cmd, data, callback)
.error => callback({success: false, error: "Error occurred while performing this operation"})
mock: (cmd, data) ->
if cmd == 'is_student_calibrated'
# change to test each version
response =
success: true
calibrated: @mock_cnt >= 2
else if cmd == 'show_calibration_essay'
#response =
# success: false
# error: "There was an error"
@mock_cnt++
response =
success: true
submission_id: 1
submission_key: 'abcd'
student_response: '''
Contrary to popular belief, Lorem Ipsum is not simply random text. It has roots in a piece of classical Latin literature from 45 BC, making it over 2000 years old. Richard McClintock, a Latin professor at Hampden-Sydney College in Virginia, looked up one of the more obscure Latin words, consectetur, from a Lorem Ipsum passage, and going through the cites of the word in classical literature, discovered the undoubtable source. Lorem Ipsum comes from sections 1.10.32 and 1.10.33 of "de Finibus Bonorum et Malorum" (The Extremes of Good and Evil) by Cicero, written in 45 BC. This book is a treatise on the theory of ethics, very popular during the Renaissance. The first line of Lorem Ipsum, "Lorem ipsum dolor sit amet..", comes from a line in section 1.10.32.
The standard chunk of Lorem Ipsum used since the 1500s is reproduced below for those interested. Sections 1.10.32 and 1.10.33 from "de Finibus Bonorum et Malorum" by Cicero are also reproduced in their exact original form, accompanied by English versions from the 1914 translation by H. Rackham.
'''
prompt: '''
<h2>S11E3: Metal Bands</h2>
<p>Shown below are schematic band diagrams for two different metals. Both diagrams appear different, yet both of the elements are undisputably metallic in nature.</p>
<p>* Why is it that both sodium and magnesium behave as metals, even though the s-band of magnesium is filled? </p>
<p>This is a self-assessed open response question. Please use as much space as you need in the box below to answer the question.</p>
'''
rubric: '''
<table class="rubric"><tbody><tr><th>Purpose</th>
<td>
<input type="radio" class="score-selection" name="score-selection-0" id="score-0-0" value="0"><label for="score-0-0">No product</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-0" id="score-0-1" value="1"><label for="score-0-1">Unclear purpose or main idea</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-0" id="score-0-2" value="2"><label for="score-0-2">Communicates an identifiable purpose and/or main idea for an audience</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-0" id="score-0-3" value="3"><label for="score-0-3">Achieves a clear and distinct purpose for a targeted audience and communicates main ideas with effectively used techniques to introduce and represent ideas and insights</label>
</td>
</tr><tr><th>Organization</th>
<td>
<input type="radio" class="score-selection" name="score-selection-1" id="score-1-0" value="0"><label for="score-1-0">No product</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-1" id="score-1-1" value="1"><label for="score-1-1">Organization is unclear; introduction, body, and/or conclusion are underdeveloped, missing or confusing.</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-1" id="score-1-2" value="2"><label for="score-1-2">Organization is occasionally unclear; introduction, body or conclusion may be underdeveloped.</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-1" id="score-1-3" value="3"><label for="score-1-3">Organization is clear and easy to follow; introduction, body and conclusion are defined and aligned with purpose.</label>
</td>
</tr></tbody></table>
'''
max_score: 4
else if cmd == 'get_next_submission'
response =
success: true
submission_id: 1
submission_key: 'abcd'
student_response: '''Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed nec tristique ante. Proin at mauris sapien, quis varius leo. Morbi laoreet leo nisi. Morbi aliquam lacus ante. Cras iaculis velit sed diam mattis a fermentum urna luctus. Duis consectetur nunc vitae felis facilisis eget vulputate risus viverra. Cras consectetur ullamcorper lobortis. Nam eu gravida lorem. Nulla facilisi. Nullam quis felis enim. Mauris orci lectus, dictum id cursus in, vulputate in massa.
Phasellus non varius sem. Nullam commodo lacinia odio sit amet egestas. Donec ullamcorper sapien sagittis arcu volutpat placerat. Phasellus ut pretium ante. Nam dictum pulvinar nibh dapibus tristique. Sed at tellus mi, fringilla convallis justo. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Phasellus tristique rutrum nulla sed eleifend. Praesent at nunc arcu. Mauris condimentum faucibus nibh, eget commodo quam viverra sed. Morbi in tincidunt dolor. Morbi sed augue et augue interdum fermentum.
Curabitur tristique purus ac arcu consequat cursus. Cras diam felis, dignissim quis placerat at, aliquet ac metus. Mauris vulputate est eu nibh imperdiet varius. Cras aliquet rhoncus elit a laoreet. Mauris consectetur erat et erat scelerisque eu faucibus dolor consequat. Nam adipiscing sagittis nisl, eu mollis massa tempor ac. Nulla scelerisque tempus blandit. Phasellus ac ipsum eros, id posuere arcu. Nullam non sapien arcu. Vivamus sit amet lorem justo, ac tempus turpis. Suspendisse pharetra gravida imperdiet. Pellentesque lacinia mi eu elit luctus pellentesque. Sed accumsan libero a magna elementum varius. Nunc eget pellentesque metus. '''
prompt: '''
<h2>S11E3: Metal Bands</h2>
<p>Shown below are schematic band diagrams for two different metals. Both diagrams appear different, yet both of the elements are undisputably metallic in nature.</p>
<p>* Why is it that both sodium and magnesium behave as metals, even though the s-band of magnesium is filled? </p>
<p>This is a self-assessed open response question. Please use as much space as you need in the box below to answer the question.</p>
'''
rubric: '''
<table class="rubric"><tbody><tr><th>Purpose</th>
<td>
<input type="radio" class="score-selection" name="score-selection-0" id="score-0-0" value="0"><label for="score-0-0">No product</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-0" id="score-0-1" value="1"><label for="score-0-1">Unclear purpose or main idea</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-0" id="score-0-2" value="2"><label for="score-0-2">Communicates an identifiable purpose and/or main idea for an audience</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-0" id="score-0-3" value="3"><label for="score-0-3">Achieves a clear and distinct purpose for a targeted audience and communicates main ideas with effectively used techniques to introduce and represent ideas and insights</label>
</td>
</tr><tr><th>Organization</th>
<td>
<input type="radio" class="score-selection" name="score-selection-1" id="score-1-0" value="0"><label for="score-1-0">No product</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-1" id="score-1-1" value="1"><label for="score-1-1">Organization is unclear; introduction, body, and/or conclusion are underdeveloped, missing or confusing.</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-1" id="score-1-2" value="2"><label for="score-1-2">Organization is occasionally unclear; introduction, body or conclusion may be underdeveloped.</label>
</td>
<td>
<input type="radio" class="score-selection" name="score-selection-1" id="score-1-3" value="3"><label for="score-1-3">Organization is clear and easy to follow; introduction, body and conclusion are defined and aligned with purpose.</label>
</td>
</tr></tbody></table>
'''
max_score: 4
else if cmd == 'save_calibration_essay'
response =
success: true
actual_score: 2
else if cmd == 'save_grade'
response =
success: true
return response
class @PeerGradingProblem
prompt_wrapper_sel: '.prompt-wrapper'
peer_grading_container_sel: '.peer-grading-container'
submission_container_sel: '.submission-container'
prompt_container_sel: '.prompt-container'
rubric_container_sel: '.rubric-container'
flag_student_container_sel: '.flag-student-container'
calibration_panel_sel: '.calibration-panel'
grading_panel_sel: '.grading-panel'
content_panel_sel: '.content-panel'
grading_message_sel: '.grading-message'
question_header_sel: '.question-header'
flag_submission_confirmation_sel: '.flag-submission-confirmation'
flag_submission_confirmation_button_sel: '.flag-submission-confirmation-button'
flag_submission_removal_button_sel: '.flag-submission-removal-button'
grading_wrapper_sel: '.grading-wrapper'
calibration_feedback_sel: '.calibration-feedback'
interstitial_page_sel: '.interstitial-page'
calibration_interstitial_page_sel: '.calibration-interstitial-page'
error_container_sel: '.error-container'
peer_grading_instructions_sel: '.peer-grading-instructions'
feedback_area_sel: '.feedback-area'
ice_legend_sel: '.ice-legend'
score_selection_container_sel: '.score-selection-container'
rubric_selection_container_sel: '.rubric-selection-container'
submit_button_sel: '.submit-button'
action_button_sel: '.action-button'
calibration_feedback_button_sel: '.calibration-feedback-button'
interstitial_page_button_sel: '.interstitial-page-button'
calibration_interstitial_page_button_sel: '.calibration-interstitial-page-button'
flag_checkbox_sel: '.flag-checkbox'
calibration_text_sel: '.calibration-text'
grading_text_sel: '.grading-text'
calibration_feedback_wrapper_sel: '.calibration-feedback-wrapper'
constructor: (backend, el) ->
@el = el
@prompt_wrapper = $(@prompt_wrapper_sel)
@backend = backend
@is_ctrl = false
@el = $(@peer_grading_container_sel)
# get the location of the problem
@location = $('.peer-grading').data('location')
# prevent this code from trying to run
# when we don't have a location
if(!@location)
return
# get the other elements we want to fill in
@submission_container = @$(@submission_container_sel)
@prompt_container = @$(@prompt_container_sel)
@rubric_container = @$(@rubric_container_sel)
@flag_student_container = @$(@flag_student_container_sel)
@calibration_panel = @$(@calibration_panel_sel)
@grading_panel = @$(@grading_panel_sel)
@content_panel = @$(@content_panel_sel)
@grading_message = @$(@grading_message_sel)
@grading_message.hide()
@question_header = @$(@question_header_sel)
@question_header.click @collapse_question
@flag_submission_confirmation = @$(@flag_submission_confirmation_sel)
@flag_submission_confirmation_button = @$(@flag_submission_confirmation_button_sel)
@flag_submission_removal_button = @$(@flag_submission_removal_button_sel)
@flag_submission_confirmation_button.click @close_dialog_box
@flag_submission_removal_button.click @remove_flag
@grading_wrapper = @$(@grading_wrapper_sel)
@calibration_feedback_panel = @$(@calibration_feedback_sel)
@interstitial_page = @$(@interstitial_page_sel)
@interstitial_page.hide()
@calibration_interstitial_page = @$(@calibration_interstitial_page_sel)
@calibration_interstitial_page.hide()
@error_container = @$(@error_container_sel)
@submission_key_input = $("input[name='submission-key']")
@essay_id_input = @$("input[name='essay-id']")
@peer_grading_instructions = @$(@peer_grading_instructions_sel)
@feedback_area = @$(@feedback_area_sel)
@ice_legend = @$(@ice_legend_sel)
@score_selection_container = @$(@score_selection_container_sel)
@rubric_selection_container = @$(@rubric_selection_container_sel)
@grade = null
@calibration = null
@submit_button = @$(@submit_button_sel)
@action_button = @$(@action_button_sel)
@calibration_feedback_button = @$(@calibration_feedback_button_sel)
@interstitial_page_button = @$(@interstitial_page_button_sel)
@calibration_interstitial_page_button = @$(@calibration_interstitial_page_button_sel)
@flag_student_checkbox = @$(@flag_checkbox_sel)
$(window).keydown @keydown_handler
$(window).keyup @keyup_handler
Collapsible.setCollapsibles(@content_panel)
# Set up the click event handlers
@action_button.click -> history.back()
@calibration_feedback_button.click =>
@calibration_feedback_panel.hide()
@grading_wrapper.show()
@gentle_alert "Calibration essay saved. Fetching the next essay."
@is_calibrated_check()
@interstitial_page_button.click =>
@interstitial_page.hide()
@is_calibrated_check()
@calibration_interstitial_page_button.click =>
@calibration_interstitial_page.hide()
@is_calibrated_check()
@flag_student_checkbox.click =>
@flag_box_checked()
@calibration_feedback_button.hide()
@calibration_feedback_panel.hide()
@error_container.hide()
@flag_submission_confirmation.hide()
if @tracking_changes()
@change_tracker = new TrackChanges(@el)
@is_calibrated_check()
# locally scoped jquery.
$: (selector) ->
$(selector, @el)
##########
#
# Ajax calls to the backend
#
##########
is_calibrated_check: () =>
@backend.post('is_student_calibrated', {location: @location}, @calibration_check_callback)
fetch_calibration_essay: () =>
@backend.post('show_calibration_essay', {location: @location}, @render_calibration)
fetch_submission_essay: () =>
@backend.post('get_next_submission', {location: @location}, @render_submission)
construct_data: () ->
if @tracking_changes()
feedback_content = @feedback_area.html()
else
feedback_content = @feedback_area.val()
data =
rubric_scores: @rub.get_score_list()
score: @rub.get_total_score()
location: @location
submission_id: @essay_id_input.val()
submission_key: @submission_key_input.val()
feedback: feedback_content
submission_flagged: @flag_student_checkbox.is(':checked')
# hardcoding answer_unknown to false
answer_unknown: false
return data
submit_calibration_essay: ()=>
data = @construct_data()
@submit_button.hide()
@backend.post('save_calibration_essay', data, @calibration_callback)
submit_grade: () =>
data = @construct_data()
@submit_button.hide()
@backend.post('save_grade', data, @submission_callback)
##########
#
# Callbacks for various events
#
##########
remove_flag: () =>
@flag_student_checkbox.removeAttr("checked")
@close_dialog_box()
@submit_button.attr('disabled', true)
close_dialog_box: () =>
$(@flag_submission_confirmation_sel).dialog('close')
flag_box_checked: () =>
if @flag_student_checkbox.is(':checked')
@$(@flag_submission_confirmation_sel).dialog({ height: 400, width: 400 })
@submit_button.attr('disabled', false)
# called after we perform an is_student_calibrated check
calibration_check_callback: (response) =>
if response.success
# if we haven't been calibrating before
if response.calibrated and (@calibration == null or @calibration == false)
@calibration = false
@fetch_submission_essay()
# If we were calibrating before and no longer need to,
# show the interstitial page
else if response.calibrated and @calibration == true
@calibration = false
@render_interstitial_page()
else if not response.calibrated and @calibration==null
@calibration=true
@render_calibration_interstitial_page()
else
@calibration = true
@fetch_calibration_essay()
else if response.error
@render_error(response.error)
else
@render_error("Error contacting the grading service")
# called after we submit a calibration score
calibration_callback: (response) =>
if response.success
@render_calibration_feedback(response)
else if response.error
@render_error(response.error)
else
@render_error("Error saving calibration score")
# called after we submit a submission score
submission_callback: (response) =>
if response.success
@is_calibrated_check()
@grading_message.fadeIn()
message = "<p>Successfully saved your feedback. Fetching the next essay."
if response.required_done
message = message + " You have done the required number of peer assessments but may continue grading if you like."
message = message + "</p>"
@grading_message.html(message)
else
if response.error
@render_error(response.error)
else
@render_error("Error occurred while submitting grade")
# called after a grade is selected on the interface
graded_callback: (event) =>
ev = @$(event.target).parent().parent()
ul = ev.parent().parent()
ul.find(".rubric-label-selected").removeClass('rubric-label-selected')
ev.addClass('rubric-label-selected')
# check to see whether or not any categories have not been scored
if @rub.check_complete()
# show button if we have scores for all categories
@grading_message.hide()
@show_submit_button()
@grade = @rub.get_total_score()
keydown_handler: (event) =>
#Previously, responses were submitted when hitting enter. Add in a modifier that ensures that ctrl+enter is needed.
if event.which == 17 && @is_ctrl==false
@is_ctrl=true
else if event.which == 13 && @submit_button.is(':visible') && @is_ctrl==true
if @calibration
@submit_calibration_essay()
else
@submit_grade()
keyup_handler: (event) =>
#Handle keyup event when ctrl key is released
if event.which == 17 && @is_ctrl==true
@is_ctrl=false
##########
#
# Rendering methods and helpers
#
##########
# renders a calibration essay
render_calibration: (response) =>
if response.success
# load in all the data
@submission_container.html("")
@render_submission_data(response)
# TODO: indicate that we're in calibration mode
@calibration_panel.addClass('current-state')
@grading_panel.removeClass('current-state')
# Display the right text
# both versions of the text are written into the template itself
# we only need to show/hide the correct ones at the correct time
@calibration_panel.find(@calibration_text_sel).show()
@grading_panel.find(@calibration_text_sel).show()
@calibration_panel.find(@grading_text_sel).hide()
@grading_panel.find(@grading_text_sel).hide()
@flag_student_container.hide()
@peer_grading_instructions.hide()
@feedback_area.attr('disabled', true)
feedback_text = "Once you are done learning to grade, and are grading your peers' work, you will be asked to share written feedback with them in addition to scoring them."
if @tracking_changes()
@ice_legend.hide()
@feedback_area.attr('contenteditable', false)
@feedback_area.text(feedback_text)
else
@feedback_area.val(feedback_text)
@submit_button.show()
@submit_button.unbind('click')
@submit_button.click @submit_calibration_essay
@submit_button.attr('disabled', true)
@scroll_to_top()
else if response.error
@render_error(response.error)
else
@render_error("An error occurred while retrieving the next calibration essay")
tracking_changes: () =>
return @grading_wrapper.data('track-changes') == true
# Renders a student submission to be graded
render_submission: (response) =>
if response.success
@submit_button.hide()
@submission_container.html("")
@render_submission_data(response)
@calibration_panel.removeClass('current-state')
@grading_panel.addClass('current-state')
# Display the correct text
# both versions of the text are written into the template itself
# we only need to show/hide the correct ones at the correct time
@calibration_panel.find(@calibration_text_sel).hide()
@grading_panel.find(@calibration_text_sel).hide()
@calibration_panel.find(@grading_text_sel).show()
@grading_panel.find(@grading_text_sel).show()
@flag_student_container.show()
@peer_grading_instructions.show()
if @tracking_changes()
@ice_legend.show()
@feedback_area.html(@make_paragraphs(response.student_response))
@change_tracker.rebindTracker()
else
@feedback_area.val("")
@feedback_area.attr('disabled', false)
@flag_student_checkbox.removeAttr("checked")
@submit_button.show()
@submit_button.unbind('click')
@submit_button.click @submit_grade
@submit_button.attr('disabled', true)
@scroll_to_top()
else if response.error
@render_error(response.error)
else
@render_error("An error occurred when retrieving the next submission.")
make_paragraphs: (text) ->
paragraph_split = text.split(/\n\s*\n/)
new_text = ''
for paragraph in paragraph_split
new_text += "<p>#{paragraph}</p>"
return new_text
# render common information between calibration and grading
render_submission_data: (response) =>
@content_panel.show()
@error_container.hide()
@submission_container.append(@make_paragraphs(response.student_response))
@prompt_container.html(response.prompt)
@rubric_selection_container.html(response.rubric)
@submission_key_input.val(response.submission_key)
@essay_id_input.val(response.submission_id)
@setup_score_selection(response.max_score)
@submit_button.hide()
@action_button.hide()
@calibration_feedback_panel.hide()
@rub = new Rubric(@el)
@rub.initialize(@location)
render_calibration_feedback: (response) =>
# display correct grade
@calibration_feedback_panel.slideDown()
calibration_wrapper = @$(@calibration_feedback_wrapper_sel)
calibration_wrapper.html("<p>The score you gave was: #{@grade}. The instructor score is: #{response.actual_score}</p>")
score = parseInt(@grade)
actual_score = parseInt(response.actual_score)
if score == actual_score
calibration_wrapper.append("<p>Your score matches the instructor score!</p>")
else
calibration_wrapper.append("<p>You may want to review the rubric again.</p>")
if response.actual_rubric != undefined
calibration_wrapper.append("<div>Instructor Scored Rubric: #{response.actual_rubric}</div>")
if response.actual_feedback.feedback!=undefined
calibration_wrapper.append("<div>Instructor Feedback: #{response.actual_feedback}</div>")
# disable score selection and submission from the grading interface
@$("input[name='score-selection']").attr('disabled', true)
@submit_button.hide()
@calibration_feedback_button.show()
render_interstitial_page: () =>
@content_panel.hide()
@grading_message.hide()
@interstitial_page.show()
render_calibration_interstitial_page: () =>
@content_panel.hide()
@action_button.hide()
@calibration_interstitial_page.show()
render_error: (error_message) =>
@error_container.show()
@calibration_feedback_panel.hide()
@error_container.html(error_message)
@content_panel.hide()
@action_button.show()
show_submit_button: () =>
@submit_button.attr('disabled', false)
@submit_button.show()
setup_score_selection: (max_score) =>
# And now hook up an event handler again
@$("input[class='score-selection']").change @graded_callback
gentle_alert: (msg) =>
@grading_message.fadeIn()
@grading_message.html("<p>" + msg + "</p>")
collapse_question: (event) =>
@prompt_container.slideToggle()
@prompt_container.toggleClass('open')
if @question_header.text() == "Hide Question"
new_text = "Show Question"
Logger.log 'oe_hide_question', {location: @location}
else
Logger.log 'oe_show_question', {location: @location}
new_text = "Hide Question"
@question_header.text(new_text)
return false
scroll_to_top: () =>
$('html, body').animate({
scrollTop: $(".peer-grading").offset().top
}, 200)

View File

@@ -210,13 +210,7 @@ class TestMongoModuleStore(TestMongoModuleStoreBase):
'''Make sure the course objects loaded properly'''
courses = self.draft_store.get_courses()
# note, the number of courses expected is really
# 6, but due to a lack of cache flushing between
# test case runs, we will get back 7.
# When we fix the caching issue, we should reduce this
# to 6 and remove the 'treexport_peer_component' course_id
# from the list below
assert_equals(len(courses), 7)
assert_equals(len(courses), 6)
course_ids = [course.id for course in courses]
for course_key in [
@@ -229,9 +223,6 @@ class TestMongoModuleStore(TestMongoModuleStoreBase):
['edX', 'test_unicode', '2012_Fall'],
['edX', 'toy', '2012_Fall'],
['guestx', 'foo', 'bar'],
# This course below is due to a caching issue in the modulestore
# which is not cleared between test runs. This means
['edX', 'treeexport_peer_component', 'export_peer_component'],
]
]:
assert_in(course_key, course_ids)
@@ -263,13 +254,7 @@ class TestMongoModuleStore(TestMongoModuleStoreBase):
assert_in(course_key, course_ids)
courses = self.draft_store.get_courses(org='edX')
# note, the number of courses expected is really
# 5, but due to a lack of cache flushing between
# test case runs, we will get back 6.
# When we fix the caching issue, we should reduce this
# to 6 and remove the 'treexport_peer_component' course_id
# from the list below
assert_equals(len(courses), 6)
assert_equals(len(courses), 5)
course_ids = [course.id for course in courses]
for course_key in [
@@ -280,9 +265,6 @@ class TestMongoModuleStore(TestMongoModuleStoreBase):
['edX', 'test_import_course', '2012_Fall'],
['edX', 'test_unicode', '2012_Fall'],
['edX', 'toy', '2012_Fall'],
# This course below is due to a caching issue in the modulestore
# which is not cleared between test runs. This means
['edX', 'treeexport_peer_component', 'export_peer_component'],
]
]:
assert_in(course_key, course_ids)
@@ -678,57 +660,6 @@ class TestMongoModuleStore(TestMongoModuleStoreBase):
self.assertEqual(component.published_on, published_date)
self.assertEqual(component.published_by, published_by)
def test_export_course_with_peer_component(self):
"""
Test export course when link_to_location is given in peer grading interface settings.
"""
name = "export_peer_component"
locations = self._create_test_tree(name)
# Insert the test block directly into the module store
problem_location = Location('edX', 'tree{}'.format(name), name, 'combinedopenended', 'test_peer_problem')
self.draft_store.create_child(
self.dummy_user,
locations["child"],
problem_location.block_type,
block_id=problem_location.block_id
)
interface_location = Location('edX', 'tree{}'.format(name), name, 'peergrading', 'test_peer_interface')
self.draft_store.create_child(
self.dummy_user,
locations["child"],
interface_location.block_type,
block_id=interface_location.block_id
)
self.draft_store._update_single_item(
as_draft(interface_location),
{
'definition.data': {},
'metadata': {
'link_to_location': unicode(problem_location),
'use_for_single_location': True,
},
},
)
component = self.draft_store.get_item(interface_location)
self.assertEqual(unicode(component.link_to_location), unicode(problem_location))
root_dir = path(mkdtemp())
self.addCleanup(shutil.rmtree, root_dir)
# export_course_to_xml should work.
export_course_to_xml(
self.draft_store, self.content_store, interface_location.course_key,
root_dir, 'test_export'
)
def test_draft_modulestore_create_child_with_position(self):
"""
This test is designed to hit a specific set of use cases having to do with

View File

@@ -95,27 +95,27 @@ class CountMongoCallsCourseTraversal(TestCase):
# These two lines show the way this traversal *should* be done
# (if you'll eventually access all the fields and load all the definitions anyway).
# 'lazy' does not matter in old Mongo.
(MIXED_OLD_MONGO_MODULESTORE_BUILDER, None, False, True, 189),
(MIXED_OLD_MONGO_MODULESTORE_BUILDER, None, True, True, 189),
(MIXED_OLD_MONGO_MODULESTORE_BUILDER, 0, False, True, 387),
(MIXED_OLD_MONGO_MODULESTORE_BUILDER, 0, True, True, 387),
(MIXED_OLD_MONGO_MODULESTORE_BUILDER, None, False, True, 175),
(MIXED_OLD_MONGO_MODULESTORE_BUILDER, None, True, True, 175),
(MIXED_OLD_MONGO_MODULESTORE_BUILDER, 0, False, True, 359),
(MIXED_OLD_MONGO_MODULESTORE_BUILDER, 0, True, True, 359),
# As shown in these two lines: whether or not the XBlock fields are accessed,
# the same number of mongo calls are made in old Mongo for depth=None.
(MIXED_OLD_MONGO_MODULESTORE_BUILDER, None, False, False, 189),
(MIXED_OLD_MONGO_MODULESTORE_BUILDER, None, True, False, 189),
(MIXED_OLD_MONGO_MODULESTORE_BUILDER, 0, False, False, 387),
(MIXED_OLD_MONGO_MODULESTORE_BUILDER, 0, True, False, 387),
(MIXED_OLD_MONGO_MODULESTORE_BUILDER, None, False, False, 175),
(MIXED_OLD_MONGO_MODULESTORE_BUILDER, None, True, False, 175),
(MIXED_OLD_MONGO_MODULESTORE_BUILDER, 0, False, False, 359),
(MIXED_OLD_MONGO_MODULESTORE_BUILDER, 0, True, False, 359),
# The line below shows the way this traversal *should* be done
# (if you'll eventually access all the fields and load all the definitions anyway).
(MIXED_SPLIT_MODULESTORE_BUILDER, None, False, True, 4),
(MIXED_SPLIT_MODULESTORE_BUILDER, None, True, True, 41),
(MIXED_SPLIT_MODULESTORE_BUILDER, 0, False, True, 143),
(MIXED_SPLIT_MODULESTORE_BUILDER, 0, True, True, 41),
(MIXED_SPLIT_MODULESTORE_BUILDER, None, True, True, 38),
(MIXED_SPLIT_MODULESTORE_BUILDER, 0, False, True, 131),
(MIXED_SPLIT_MODULESTORE_BUILDER, 0, True, True, 38),
(MIXED_SPLIT_MODULESTORE_BUILDER, None, False, False, 4),
(MIXED_SPLIT_MODULESTORE_BUILDER, None, True, False, 4),
# TODO: The call count below seems like a bug - should be 4?
# Seems to be related to using self.lazy in CachingDescriptorSystem.get_module_data().
(MIXED_SPLIT_MODULESTORE_BUILDER, 0, False, False, 143),
(MIXED_SPLIT_MODULESTORE_BUILDER, 0, False, False, 131),
(MIXED_SPLIT_MODULESTORE_BUILDER, 0, True, False, 4)
)
@ddt.unpack

View File

@@ -1 +0,0 @@
__author__ = 'vik'

View File

@@ -1,1246 +0,0 @@
import json
import logging
import traceback
from lxml import etree
from xmodule.timeinfo import TimeInfo
from xmodule.capa_module import ComplexEncoder
from xmodule.progress import Progress
from xmodule.stringify import stringify_children
from xmodule.open_ended_grading_classes import self_assessment_module
from xmodule.open_ended_grading_classes import open_ended_module
from .combined_open_ended_rubric import CombinedOpenEndedRubric, GRADER_TYPE_IMAGE_DICT, HUMAN_GRADER_TYPE, LEGEND_LIST
from xmodule.open_ended_grading_classes.peer_grading_service import PeerGradingService, MockPeerGradingService
from xmodule.open_ended_grading_classes.openendedchild import OpenEndedChild
from xmodule.open_ended_grading_classes.grading_service_module import GradingServiceError
log = logging.getLogger("edx.courseware")
# Set the default number of max attempts. Should be 1 for production
# Set higher for debugging/testing
# attempts specified in xml definition overrides this.
MAX_ATTEMPTS = 1
# The highest score allowed for the overall xmodule and for each rubric point
MAX_SCORE_ALLOWED = 50
# If true, default behavior is to score module as a practice problem. Otherwise, no grade at all is shown in progress
# Metadata overrides this.
IS_SCORED = False
# If true, then default behavior is to require a file upload or pasted link from a student for this problem.
# Metadata overrides this.
ACCEPT_FILE_UPLOAD = False
# Contains all reasonable bool and case combinations of True
TRUE_DICT = ["True", True, "TRUE", "true"]
# Make '_' a no-op so we can scrape strings. Using lambda instead of
# `django.utils.translation.ugettext_noop` because Django cannot be imported in this file
_ = lambda text: text
HUMAN_TASK_TYPE = {
# Translators: "Self" is used to denote an openended response that is self-graded
'selfassessment': _("Self"),
'openended': "edX",
# Translators: "AI" is used to denote an openended response that is machine-graded
'ml_grading.conf': _("AI"),
# Translators: "Peer" is used to denote an openended response that is peer-graded
'peer_grading.conf': _("Peer"),
}
HUMAN_STATES = {
# Translators: "Not started" is used to communicate to a student that their response
# has not yet been graded
'intitial': _("Not started."),
# Translators: "Being scored." is used to communicate to a student that their response
# are in the process of being scored
'assessing': _("Being scored."),
# Translators: "Scoring finished" is used to communicate to a student that their response
# have been scored, but the full scoring process is not yet complete
'intermediate_done': _("Scoring finished."),
# Translators: "Complete" is used to communicate to a student that their
# openended response has been fully scored
'done': _("Complete."),
}
# Default value that controls whether or not to skip basic spelling checks in the controller
# Metadata overrides this
SKIP_BASIC_CHECKS = False
class CombinedOpenEndedV1Module(object):
"""
This is a module that encapsulates all open ended grading (self assessment, peer assessment, etc).
It transitions between problems, and support arbitrary ordering.
Each combined open ended module contains one or multiple "child" modules.
Child modules track their own state, and can transition between states. They also implement get_html and
handle_ajax.
The combined open ended module transitions between child modules as appropriate, tracks its own state, and passess
ajax requests from the browser to the child module or handles them itself (in the cases of reset and next problem)
ajax actions implemented by all children are:
'save_answer' -- Saves the student answer
'save_assessment' -- Saves the student assessment (or external grader assessment)
'save_post_assessment' -- saves a post assessment (hint, feedback on feedback, etc)
ajax actions implemented by combined open ended module are:
'reset' -- resets the whole combined open ended module and returns to the first child moduleresource_string
'next_problem' -- moves to the next child module
Types of children. Task is synonymous with child module, so each combined open ended module
incorporates multiple children (tasks):
openendedmodule
selfassessmentmodule
"""
STATE_VERSION = 1
# states
INITIAL = 'initial'
ASSESSING = 'assessing'
INTERMEDIATE_DONE = 'intermediate_done'
DONE = 'done'
# Where the templates live for this problem
TEMPLATE_DIR = "combinedopenended"
# hack: included to make this class act enough like an xblock to get i18n
_services_requested = {"i18n": "need"}
_combined_services = _services_requested
def __init__(self, system, location, definition, descriptor,
instance_state=None, shared_state=None, metadata=None, static_data=None, **kwargs):
"""
Definition file should have one or many task blocks, a rubric block, and a prompt block. See DEFAULT_DATA in combined_open_ended_module for a sample.
"""
self.instance_state = instance_state
self.display_name = instance_state.get('display_name', "Open Ended")
# We need to set the location here so the child modules can use it
system.set('location', location)
self.system = system
# Tells the system which xml definition to load
self.current_task_number = instance_state.get('current_task_number', 0)
# This loads the states of the individual children
self.task_states = instance_state.get('task_states', [])
#This gets any old task states that have been persisted after the instructor changed the tasks.
self.old_task_states = instance_state.get('old_task_states', [])
# Overall state of the combined open ended module
self.state = instance_state.get('state', self.INITIAL)
self.student_attempts = instance_state.get('student_attempts', 0)
self.weight = instance_state.get('weight', 1)
# Allow reset is true if student has failed the criteria to move to the next child task
self.ready_to_reset = instance_state.get('ready_to_reset', False)
self.max_attempts = instance_state.get('max_attempts', MAX_ATTEMPTS)
self.is_scored = instance_state.get('graded', IS_SCORED) in TRUE_DICT
self.accept_file_upload = instance_state.get('accept_file_upload', ACCEPT_FILE_UPLOAD) in TRUE_DICT
self.skip_basic_checks = instance_state.get('skip_spelling_checks', SKIP_BASIC_CHECKS) in TRUE_DICT
if system.open_ended_grading_interface:
self.peer_gs = PeerGradingService(system.open_ended_grading_interface, system.render_template)
else:
self.peer_gs = MockPeerGradingService()
self.required_peer_grading = instance_state.get('required_peer_grading', 3)
self.peer_grader_count = instance_state.get('peer_grader_count', 3)
self.min_to_calibrate = instance_state.get('min_to_calibrate', 3)
self.max_to_calibrate = instance_state.get('max_to_calibrate', 6)
self.peer_grade_finished_submissions_when_none_pending = instance_state.get(
'peer_grade_finished_submissions_when_none_pending', False
)
due_date = instance_state.get('due', None)
grace_period_string = instance_state.get('graceperiod', None)
try:
self.timeinfo = TimeInfo(due_date, grace_period_string)
except Exception:
log.error("Error parsing due date information in location {0}".format(location))
raise
self.display_due_date = self.timeinfo.display_due_date
self.rubric_renderer = CombinedOpenEndedRubric(system.render_template, True)
rubric_string = stringify_children(definition['rubric'])
self._max_score = self.rubric_renderer.check_if_rubric_is_parseable(rubric_string, location, MAX_SCORE_ALLOWED)
# Static data is passed to the child modules to render
self.static_data = {
'max_score': self._max_score,
'max_attempts': self.max_attempts,
'prompt': definition['prompt'],
'rubric': definition['rubric'],
'display_name': self.display_name,
'accept_file_upload': self.accept_file_upload,
'close_date': self.timeinfo.close_date,
's3_interface': self.system.s3_interface,
'skip_basic_checks': self.skip_basic_checks,
'control': {
'required_peer_grading': self.required_peer_grading,
'peer_grader_count': self.peer_grader_count,
'min_to_calibrate': self.min_to_calibrate,
'max_to_calibrate': self.max_to_calibrate,
'peer_grade_finished_submissions_when_none_pending': (
self.peer_grade_finished_submissions_when_none_pending
),
}
}
self.task_xml = definition['task_xml']
self.location = location
self.fix_invalid_state()
self.setup_next_task()
def validate_task_states(self, tasks_xml, task_states):
"""
Check whether the provided task_states are valid for the supplied task_xml.
Returns a list of messages indicating what is invalid about the state.
If the list is empty, then the state is valid
"""
msgs = []
#Loop through each task state and make sure it matches the xml definition
for task_xml, task_state in zip(tasks_xml, task_states):
tag_name = self.get_tag_name(task_xml)
children = self.child_modules()
task_descriptor = children['descriptors'][tag_name](self.system)
task_parsed_xml = task_descriptor.definition_from_xml(etree.fromstring(task_xml), self.system)
try:
task = children['modules'][tag_name](
self.system,
self.location,
task_parsed_xml,
task_descriptor,
self.static_data,
instance_state=task_state,
)
#Loop through each attempt of the task and see if it is valid.
for attempt in task.child_history:
if "post_assessment" not in attempt:
continue
post_assessment = attempt['post_assessment']
try:
post_assessment = json.loads(post_assessment)
except ValueError:
#This is okay, the value may or may not be json encoded.
pass
if tag_name == "openended" and isinstance(post_assessment, list):
msgs.append("Type is open ended and post assessment is a list.")
break
elif tag_name == "selfassessment" and not isinstance(post_assessment, list):
msgs.append("Type is self assessment and post assessment is not a list.")
break
#See if we can properly render the task. Will go into the exception clause below if not.
task.get_html(self.system)
except Exception:
#If one task doesn't match, the state is invalid.
msgs.append("Could not parse task with xml {xml!r} and states {state!r}: {err}".format(
xml=task_xml,
state=task_state,
err=traceback.format_exc()
))
break
return msgs
def is_initial_child_state(self, task_child):
"""
Returns true if this is a child task in an initial configuration
"""
task_child = json.loads(task_child)
return (
task_child['child_state'] == self.INITIAL and
task_child['child_history'] == []
)
def is_reset_task_states(self, task_state):
"""
Returns True if this task_state is from something that was just reset
"""
return all(self.is_initial_child_state(child) for child in task_state)
def states_sort_key(self, idx_task_states):
"""
Return a key for sorting a list of indexed task_states, by how far the student got
through the tasks, what their highest score was, and then the index of the submission.
"""
idx, task_states = idx_task_states
state_values = {
OpenEndedChild.INITIAL: 0,
OpenEndedChild.ASSESSING: 1,
OpenEndedChild.POST_ASSESSMENT: 2,
OpenEndedChild.DONE: 3
}
if not task_states:
return (0, 0, state_values[OpenEndedChild.INITIAL], idx)
final_task_xml = self.task_xml[-1]
final_child_state_json = task_states[-1]
final_child_state = json.loads(final_child_state_json)
tag_name = self.get_tag_name(final_task_xml)
children = self.child_modules()
task_descriptor = children['descriptors'][tag_name](self.system)
task_parsed_xml = task_descriptor.definition_from_xml(etree.fromstring(final_task_xml), self.system)
task = children['modules'][tag_name](
self.system,
self.location,
task_parsed_xml,
task_descriptor,
self.static_data,
instance_state=final_child_state_json,
)
scores = task.all_scores()
if scores:
best_score = max(scores)
else:
best_score = 0
return (
len(task_states),
best_score,
state_values.get(final_child_state.get('child_state', OpenEndedChild.INITIAL), 0),
idx
)
def fix_invalid_state(self):
"""
Sometimes a teacher will change the xml definition of a problem in Studio.
This means that the state passed to the module is invalid.
If that is the case, moved it to old_task_states and delete task_states.
"""
# If we are on a task that is greater than the number of available tasks,
# it is an invalid state. If the current task number is greater than the number of tasks
# we have in the definition, our state is invalid.
if self.current_task_number > len(self.task_states) or self.current_task_number > len(self.task_xml):
self.current_task_number = max(min(len(self.task_states), len(self.task_xml)) - 1, 0)
#If the length of the task xml is less than the length of the task states, state is invalid
if len(self.task_xml) < len(self.task_states):
self.current_task_number = len(self.task_xml) - 1
self.task_states = self.task_states[:len(self.task_xml)]
if not self.old_task_states and not self.task_states:
# No validation needed when a student first looks at the problem
return
# Pick out of self.task_states and self.old_task_states the state that is
# a) valid for the current task definition
# b) not the result of a reset due to not having a valid task state
# c) has the highest total score
# d) is the most recent (if the other two conditions are met)
valid_states = [
task_states
for task_states
in self.old_task_states + [self.task_states]
if (
len(self.validate_task_states(self.task_xml, task_states)) == 0 and
not self.is_reset_task_states(task_states)
)
]
# If there are no valid states, don't try and use an old state
if len(valid_states) == 0:
# If this isn't an initial task state, then reset to an initial state
if not self.is_reset_task_states(self.task_states):
self.reset_task_state('\n'.join(self.validate_task_states(self.task_xml, self.task_states)))
return
sorted_states = sorted(enumerate(valid_states), key=self.states_sort_key, reverse=True)
idx, best_task_states = sorted_states[0]
if best_task_states == self.task_states:
return
log.warning(
"Updating current task state for %s to %r for student with anonymous id %r",
self.system.location,
best_task_states,
self.system.anonymous_student_id
)
self.old_task_states.remove(best_task_states)
self.old_task_states.append(self.task_states)
self.task_states = best_task_states
# The state is ASSESSING unless all of the children are done, or all
# of the children haven't been started yet
children = [json.loads(child) for child in best_task_states]
if all(child['child_state'] == self.DONE for child in children):
self.state = self.DONE
elif all(child['child_state'] == self.INITIAL for child in children):
self.state = self.INITIAL
else:
self.state = self.ASSESSING
# The current task number is the index of the last completed child + 1,
# limited by the number of tasks
last_completed_child = next((i for i, child in reversed(list(enumerate(children))) if child['child_state'] == self.DONE), 0)
self.current_task_number = min(last_completed_child + 1, len(best_task_states) - 1)
def create_task(self, task_state, task_xml):
"""Create task object for given task state and task xml."""
tag_name = self.get_tag_name(task_xml)
children = self.child_modules()
task_descriptor = children['descriptors'][tag_name](self.system)
task_parsed_xml = task_descriptor.definition_from_xml(etree.fromstring(task_xml), self.system)
task = children['modules'][tag_name](
self.system,
self.location,
task_parsed_xml,
task_descriptor,
self.static_data,
instance_state=task_state,
)
return task
def get_task_number(self, task_number):
"""Return task object at task_index."""
task_states_count = len(self.task_states)
if task_states_count > 0 and task_number < task_states_count:
task_state = self.task_states[task_number]
task_xml = self.task_xml[task_number]
return self.create_task(task_state, task_xml)
return None
def reset_task_state(self, message=""):
"""
Resets the task states. Moves current task state to an old_state variable, and then makes the task number 0.
:param message: A message to put in the log.
:return: None
"""
info_message = "Combined open ended user state for user {0} in location {1} was invalid. It has been reset, and you now have a new attempt. {2}".format(self.system.anonymous_student_id, self.location.to_deprecated_string(), message)
self.current_task_number = 0
self.student_attempts = 0
self.old_task_states.append(self.task_states)
self.task_states = []
log.info(info_message)
def get_tag_name(self, xml):
"""
Gets the tag name of a given xml block.
Input: XML string
Output: The name of the root tag
"""
tag = etree.fromstring(xml).tag
return tag
def overwrite_state(self, current_task_state):
"""
Overwrites an instance state and sets the latest response to the current response. This is used
to ensure that the student response is carried over from the first child to the rest.
Input: Task state json string
Output: Task state json string
"""
last_response_data = self.get_last_response(self.current_task_number - 1)
last_response = last_response_data['response']
loaded_task_state = json.loads(current_task_state)
if loaded_task_state['child_state'] == self.INITIAL:
loaded_task_state['child_state'] = self.ASSESSING
loaded_task_state['child_created'] = True
loaded_task_state['child_history'].append({'answer': last_response})
current_task_state = json.dumps(loaded_task_state)
return current_task_state
def child_modules(self):
"""
Returns the constructors associated with the child modules in a dictionary. This makes writing functions
simpler (saves code duplication)
Input: None
Output: A dictionary of dictionaries containing the descriptor functions and module functions
"""
child_modules = {
'openended': open_ended_module.OpenEndedModule,
'selfassessment': self_assessment_module.SelfAssessmentModule,
}
child_descriptors = {
'openended': open_ended_module.OpenEndedDescriptor,
'selfassessment': self_assessment_module.SelfAssessmentDescriptor,
}
children = {
'modules': child_modules,
'descriptors': child_descriptors,
}
return children
def setup_next_task(self, reset=False):
"""
Sets up the next task for the module. Creates an instance state if none exists, carries over the answer
from the last instance state to the next if needed.
Input: A boolean indicating whether or not the reset function is calling.
Output: Boolean True (not useful right now)
"""
current_task_state = None
if len(self.task_states) > self.current_task_number:
current_task_state = self.task_states[self.current_task_number]
self.current_task_xml = self.task_xml[self.current_task_number]
if self.current_task_number > 0:
self.ready_to_reset = self.check_allow_reset()
if self.ready_to_reset:
self.current_task_number = self.current_task_number - 1
current_task_type = self.get_tag_name(self.current_task_xml)
children = self.child_modules()
child_task_module = children['modules'][current_task_type]
self.current_task_descriptor = children['descriptors'][current_task_type](self.system)
# This is the xml object created from the xml definition of the current task
etree_xml = etree.fromstring(self.current_task_xml)
# This sends the etree_xml object through the descriptor module of the current task, and
# returns the xml parsed by the descriptor
self.current_task_parsed_xml = self.current_task_descriptor.definition_from_xml(etree_xml, self.system)
if current_task_state is None and self.current_task_number == 0:
self.current_task = child_task_module(self.system, self.location,
self.current_task_parsed_xml, self.current_task_descriptor,
self.static_data)
self.task_states.append(self.current_task.get_instance_state())
self.state = self.ASSESSING
elif current_task_state is None and self.current_task_number > 0:
last_response_data = self.get_last_response(self.current_task_number - 1)
last_response = last_response_data['response']
current_task_state = json.dumps({
'child_state': self.ASSESSING,
'version': self.STATE_VERSION,
'max_score': self._max_score,
'child_attempts': 0,
'child_created': True,
'child_history': [{'answer': last_response}],
})
self.current_task = child_task_module(self.system, self.location,
self.current_task_parsed_xml, self.current_task_descriptor,
self.static_data,
instance_state=current_task_state)
self.task_states.append(self.current_task.get_instance_state())
self.state = self.ASSESSING
else:
if self.current_task_number > 0 and not reset:
current_task_state = self.overwrite_state(current_task_state)
self.current_task = child_task_module(self.system, self.location,
self.current_task_parsed_xml, self.current_task_descriptor,
self.static_data,
instance_state=current_task_state)
return True
def check_allow_reset(self):
"""
Checks to see if the student has passed the criteria to move to the next module. If not, sets
allow_reset to true and halts the student progress through the tasks.
Input: None
Output: the allow_reset attribute of the current module.
"""
if not self.ready_to_reset:
if self.current_task_number > 0:
last_response_data = self.get_last_response(self.current_task_number - 1)
current_response_data = self.get_current_attributes(self.current_task_number)
if current_response_data['min_score_to_attempt'] > last_response_data['score'] or\
current_response_data['max_score_to_attempt'] < last_response_data['score']:
self.state = self.DONE
self.ready_to_reset = True
return self.ready_to_reset
def get_context(self):
"""
Generates a context dictionary that is used to render html.
Input: None
Output: A dictionary that can be rendered into the combined open ended template.
"""
task_html = self.get_html_base()
# set context variables and render template
ugettext = self.system.service(self, "i18n").ugettext
context = {
'items': [{'content': task_html}],
'ajax_url': self.system.ajax_url,
'allow_reset': self.ready_to_reset,
'state': self.state,
'task_count': len(self.task_xml),
'task_number': self.current_task_number + 1,
'status': ugettext(self.get_status(False)), # pylint: disable=translation-of-non-string
'display_name': self.display_name,
'accept_file_upload': self.accept_file_upload,
'location': self.location,
'legend_list': LEGEND_LIST,
'human_state': ugettext(HUMAN_STATES.get(self.state, HUMAN_STATES["intitial"])), # pylint: disable=translation-of-non-string
'is_staff': self.system.user_is_staff,
}
return context
def get_html(self):
"""
Gets HTML for rendering.
Input: None
Output: rendered html
"""
context = self.get_context()
html = self.system.render_template(
'{0}/combined_open_ended.html'.format(self.TEMPLATE_DIR), context
)
return html
def get_html_nonsystem(self):
"""
Gets HTML for rendering via AJAX. Does not use system, because system contains some additional
html, which is not appropriate for returning via ajax calls.
Input: None
Output: HTML rendered directly via Mako
"""
context = self.get_context()
html = self.system.render_template(
'{0}/combined_open_ended.html'.format(self.TEMPLATE_DIR), context
)
return html
def get_html_base(self):
"""
Gets the HTML associated with the current child task
Input: None
Output: Child task HTML
"""
self.update_task_states()
return self.current_task.get_html(self.system)
def get_html_ajax(self, data):
"""
Get HTML in AJAX callback
data - Needed to preserve AJAX structure
Output: Dictionary with html attribute
"""
return {'html': self.get_html()}
def get_current_attributes(self, task_number):
"""
Gets the min and max score to attempt attributes of the specified task.
Input: The number of the task.
Output: The minimum and maximum scores needed to move on to the specified task.
"""
task_xml = self.task_xml[task_number]
etree_xml = etree.fromstring(task_xml)
min_score_to_attempt = int(etree_xml.attrib.get('min_score_to_attempt', 0))
max_score_to_attempt = int(etree_xml.attrib.get('max_score_to_attempt', self._max_score))
return {'min_score_to_attempt': min_score_to_attempt, 'max_score_to_attempt': max_score_to_attempt}
def get_last_response(self, task_number):
"""
Returns data associated with the specified task number, such as the last response, score, etc.
Input: The number of the task.
Output: A dictionary that contains information about the specified task.
"""
last_response = ""
task_state = self.task_states[task_number]
task_xml = self.task_xml[task_number]
task_type = self.get_tag_name(task_xml)
children = self.child_modules()
task_descriptor = children['descriptors'][task_type](self.system)
etree_xml = etree.fromstring(task_xml)
min_score_to_attempt = int(etree_xml.attrib.get('min_score_to_attempt', 0))
max_score_to_attempt = int(etree_xml.attrib.get('max_score_to_attempt', self._max_score))
task_parsed_xml = task_descriptor.definition_from_xml(etree_xml, self.system)
task = children['modules'][task_type](self.system, self.location, task_parsed_xml, task_descriptor,
self.static_data, instance_state=task_state)
last_response = task.latest_answer()
last_score = task.latest_score()
all_scores = task.all_scores()
last_post_assessment = task.latest_post_assessment(self.system)
last_post_feedback = ""
feedback_dicts = [{}]
grader_ids = [0]
submission_ids = [0]
if task_type == "openended":
last_post_assessment = task.latest_post_assessment(self.system, short_feedback=False, join_feedback=False)
if isinstance(last_post_assessment, list):
eval_list = []
for assess in last_post_assessment:
eval_list.append(task.format_feedback_with_evaluation(self.system, assess))
last_post_evaluation = "".join(eval_list)
else:
last_post_evaluation = task.format_feedback_with_evaluation(self.system, last_post_assessment)
last_post_assessment = last_post_evaluation
try:
rubric_data = task._parse_score_msg(task.child_history[-1].get('post_assessment', "{}"), self.system)
except Exception:
log.debug("Could not parse rubric data from child history. "
"Likely we have not yet initialized a previous step, so this is perfectly fine.")
rubric_data = {}
rubric_scores = rubric_data.get('rubric_scores')
grader_types = rubric_data.get('grader_types')
feedback_items = rubric_data.get('feedback_items')
feedback_dicts = rubric_data.get('feedback_dicts')
grader_ids = rubric_data.get('grader_ids')
submission_ids = rubric_data.get('submission_ids')
elif task_type == "selfassessment":
rubric_scores = last_post_assessment
grader_types = ['SA']
feedback_items = ['']
last_post_assessment = ""
last_correctness = task.is_last_response_correct()
max_score = task.max_score()
state = task.child_state
if task_type in HUMAN_TASK_TYPE:
human_task_name = HUMAN_TASK_TYPE[task_type]
else:
human_task_name = task_type
if state in task.HUMAN_NAMES:
human_state = task.HUMAN_NAMES[state]
else:
human_state = state
if grader_types is not None and len(grader_types) > 0:
grader_type = grader_types[0]
else:
grader_type = "IN"
grader_types = ["IN"]
if grader_type in HUMAN_GRADER_TYPE:
human_grader_name = HUMAN_GRADER_TYPE[grader_type]
else:
human_grader_name = grader_type
last_response_dict = {
'response': last_response,
'score': last_score,
'all_scores': all_scores,
'post_assessment': last_post_assessment,
'type': task_type,
'max_score': max_score,
'state': state,
'human_state': human_state,
'human_task': human_task_name,
'correct': last_correctness,
'min_score_to_attempt': min_score_to_attempt,
'max_score_to_attempt': max_score_to_attempt,
'rubric_scores': rubric_scores,
'grader_types': grader_types,
'feedback_items': feedback_items,
'grader_type': grader_type,
'human_grader_type': human_grader_name,
'feedback_dicts': feedback_dicts,
'grader_ids': grader_ids,
'submission_ids': submission_ids,
'success': True
}
return last_response_dict
def extract_human_name_from_task(self, task_xml):
"""
Given the xml for a task, pull out the human name for it.
Input: xml string
Output: a human readable task name (ie Self Assessment)
"""
tree = etree.fromstring(task_xml)
payload = tree.xpath("/openended/openendedparam/grader_payload")
if len(payload) == 0:
task_name = "selfassessment"
else:
inner_payload = json.loads(payload[0].text)
task_name = inner_payload['grader_settings']
human_task = HUMAN_TASK_TYPE[task_name]
return human_task
def update_task_states(self):
"""
Updates the task state of the combined open ended module with the task state of the current child module.
Input: None
Output: boolean indicating whether or not the task state changed.
"""
changed = False
if not self.ready_to_reset:
self.task_states[self.current_task_number] = self.current_task.get_instance_state()
current_task_state = json.loads(self.task_states[self.current_task_number])
if current_task_state['child_state'] == self.DONE:
self.current_task_number += 1
if self.current_task_number >= (len(self.task_xml)):
self.state = self.DONE
self.current_task_number = len(self.task_xml) - 1
else:
self.state = self.INITIAL
changed = True
self.setup_next_task()
return changed
def update_task_states_ajax(self, return_html):
"""
Runs the update task states function for ajax calls. Currently the same as update_task_states
Input: The html returned by the handle_ajax function of the child
Output: New html that should be rendered
"""
changed = self.update_task_states()
if changed:
pass
return return_html
def check_if_student_has_done_needed_grading(self):
"""
Checks with the ORA server to see if the student has completed the needed peer grading to be shown their grade.
For example, if a student submits one response, and three peers grade their response, the student
cannot see their grades and feedback unless they reciprocate.
Output:
success - boolean indicator of success
allowed_to_submit - boolean indicator of whether student has done their needed grading or not
error_message - If not success, explains why
"""
student_id = self.system.anonymous_student_id
success = False
allowed_to_submit = True
try:
response = self.peer_gs.get_data_for_location(self.location, student_id)
count_graded = response['count_graded']
count_required = response['count_required']
student_sub_count = response['student_sub_count']
count_available = response['count_available']
success = True
except GradingServiceError:
# This is a dev_facing_error
log.error("Could not contact external open ended graders for location {0} and student {1}".format(
self.location, student_id))
# This is a student_facing_error
error_message = "Could not contact the graders. Please notify course staff."
return success, allowed_to_submit, error_message
except KeyError:
log.error("Invalid response from grading server for location {0} and student {1}".format(self.location, student_id))
error_message = "Received invalid response from the graders. Please notify course staff."
return success, allowed_to_submit, error_message
if count_graded >= count_required or count_available == 0:
error_message = ""
return success, allowed_to_submit, error_message
else:
allowed_to_submit = False
# This is a student_facing_error
error_string = ("<h4>Feedback not available yet</h4>"
"<p>You need to peer grade {0} more submissions in order to see your feedback.</p>"
"<p>You have graded responses from {1} students, and {2} students have graded your submissions. </p>"
"<p>You have made {3} submissions.</p>")
error_message = error_string.format(count_required - count_graded, count_graded, count_required,
student_sub_count)
return success, allowed_to_submit, error_message
def get_rubric(self, _data):
"""
Gets the results of a given grader via ajax.
Input: AJAX data dictionary
Output: Dictionary to be rendered via ajax that contains the result html.
"""
ugettext = self.system.service(self, "i18n").ugettext
all_responses = []
success, can_see_rubric, error = self.check_if_student_has_done_needed_grading()
if not can_see_rubric:
return {
'html': self.system.render_template(
'{0}/combined_open_ended_hidden_results.html'.format(self.TEMPLATE_DIR),
{'error': error}),
'success': True,
'hide_reset': True
}
contexts = []
rubric_number = self.current_task_number
if self.ready_to_reset:
rubric_number += 1
response = self.get_last_response(rubric_number)
score_length = len(response['grader_types'])
for z in xrange(score_length):
if response['grader_types'][z] in HUMAN_GRADER_TYPE:
try:
feedback = response['feedback_dicts'][z].get('feedback', '')
except TypeError:
return {'success': False}
rubric_scores = [[response['rubric_scores'][z]]]
grader_types = [[response['grader_types'][z]]]
feedback_items = [[response['feedback_items'][z]]]
rubric_html = self.rubric_renderer.render_combined_rubric(
stringify_children(self.static_data['rubric']),
rubric_scores,
grader_types,
feedback_items
)
contexts.append({
'result': rubric_html,
# Translators: "Scored rubric" appears to a user as part of a longer
# string that looks something like: "Scored rubric from grader 1".
# "Scored" is an adjective that modifies the noun "rubric".
# That longer string appears when a user is viewing a graded rubric
# returned from one of the graders of their openended response problem.
'task_name': ugettext('Scored rubric'),
'feedback': feedback
})
context = {
'results': contexts,
}
html = self.system.render_template('{0}/combined_open_ended_results.html'.format(self.TEMPLATE_DIR), context)
return {'html': html, 'success': True, 'hide_reset': False}
def get_legend(self, _data):
"""
Gets the results of a given grader via ajax.
Input: AJAX data dictionary
Output: Dictionary to be rendered via ajax that contains the result html.
"""
context = {
'legend_list': LEGEND_LIST,
}
html = self.system.render_template('{0}/combined_open_ended_legend.html'.format(self.TEMPLATE_DIR), context)
return {'html': html, 'success': True}
def handle_ajax(self, dispatch, data):
"""
This is called by courseware.module_render, to handle an AJAX call.
"data" is request.POST.
Returns a json dictionary:
{ 'progress_changed' : True/False,
'progress': 'none'/'in_progress'/'done',
<other request-specific values here > }
"""
handlers = {
'next_problem': self.next_problem,
'reset': self.reset,
'get_combined_rubric': self.get_rubric,
'get_legend': self.get_legend,
'get_last_response': self.get_last_response_ajax,
'get_current_state': self.get_current_state,
'get_html': self.get_html_ajax,
}
if dispatch not in handlers:
return_html = self.current_task.handle_ajax(dispatch, data, self.system)
return self.update_task_states_ajax(return_html)
d = handlers[dispatch](data)
return json.dumps(d, cls=ComplexEncoder)
def get_current_state(self, data):
"""
Gets the current state of the module.
"""
return self.get_context()
def get_last_response_ajax(self, data):
"""
Get the last response via ajax callback
data - Needed to preserve ajax callback structure
Output: Last response dictionary
"""
return self.get_last_response(self.current_task_number)
def next_problem(self, _data):
"""
Called via ajax to advance to the next problem.
Input: AJAX data request.
Output: Dictionary to be rendered
"""
self.update_task_states()
return {'success': True, 'html': self.get_html_nonsystem(), 'allow_reset': self.ready_to_reset}
def reset(self, data):
"""
If resetting is allowed, reset the state of the combined open ended module.
Input: AJAX data dictionary
Output: AJAX dictionary to tbe rendered
"""
ugettext = self.system.service(self, "i18n").ugettext
if self.state != self.DONE:
if not self.ready_to_reset:
return self.out_of_sync_error(data)
success, can_reset, error = self.check_if_student_has_done_needed_grading()
if not can_reset:
return {'error': error, 'success': False}
if self.student_attempts >= self.max_attempts - 1:
if self.student_attempts == self.max_attempts - 1:
self.student_attempts += 1
return {
'success': False,
# This is a student_facing_error
'error': ugettext(
'You have attempted this question {number_of_student_attempts} times. '
'You are only allowed to attempt it {max_number_of_attempts} times.'
).format(
number_of_student_attempts=self.student_attempts,
max_number_of_attempts=self.max_attempts
)
}
self.student_attempts += 1
self.state = self.INITIAL
self.ready_to_reset = False
for i in xrange(len(self.task_xml)):
self.current_task_number = i
self.setup_next_task(reset=True)
self.current_task.reset(self.system)
self.task_states[self.current_task_number] = self.current_task.get_instance_state()
self.current_task_number = 0
self.ready_to_reset = False
self.setup_next_task()
return {'success': True, 'html': self.get_html_nonsystem()}
def get_instance_state(self):
"""
Returns the current instance state. The module can be recreated from the instance state.
Input: None
Output: A dictionary containing the instance state.
"""
state = {
'version': self.STATE_VERSION,
'current_task_number': self.current_task_number,
'state': self.state,
'task_states': self.task_states,
'student_attempts': self.student_attempts,
'ready_to_reset': self.ready_to_reset,
}
return json.dumps(state)
def get_status(self, render_via_ajax):
"""
Gets the status panel to be displayed at the top right.
Input: None
Output: The status html to be rendered
"""
ugettext = self.system.service(self, "i18n").ugettext
status_list = []
current_task_human_name = ""
for i in xrange(len(self.task_xml)):
human_task_name = self.extract_human_name_from_task(self.task_xml[i])
human_task_name = ugettext(human_task_name) # pylint: disable=translation-of-non-string
# Extract the name of the current task for screen readers.
if self.current_task_number == i:
current_task_human_name = human_task_name
task_data = {
'task_number': i + 1,
'human_task': human_task_name,
'current': self.current_task_number == i
}
status_list.append(task_data)
context = {
'status_list': status_list,
'grader_type_image_dict': GRADER_TYPE_IMAGE_DICT,
'legend_list': LEGEND_LIST,
'render_via_ajax': render_via_ajax,
'current_task_human_name': current_task_human_name,
}
status_html = self.system.render_template(
"{0}/combined_open_ended_status.html".format(self.TEMPLATE_DIR), context
)
return status_html
def check_if_done_and_scored(self):
"""
Checks if the object is currently in a finished state (either student didn't meet criteria to move
to next step, in which case they are in the allow_reset state, or they are done with the question
entirely, in which case they will be in the self.DONE state), and if it is scored or not.
@return: Boolean corresponding to the above.
"""
return (self.state == self.DONE or self.ready_to_reset) and self.is_scored
def get_weight(self):
"""
Return the weight of the problem. The old default weight was None, so set to 1 in that case.
Output - int weight
"""
weight = self.weight
if weight is None:
weight = 1
return weight
def get_score(self):
"""
Score the student received on the problem, or None if there is no
score.
Returns:
dictionary
{'score': integer, from 0 to get_max_score(),
'total': get_max_score()}
"""
max_score = None
score = None
#The old default was None, so set to 1 if it is the old default weight
weight = self.get_weight()
if self.is_scored:
# Finds the maximum score of all student attempts and keeps it.
score_mat = []
for i in xrange(len(self.task_states)):
# For each task, extract all student scores on that task (each attempt for each task)
last_response = self.get_last_response(i)
score = last_response.get('all_scores', None)
if score is not None:
# Convert none scores and weight scores properly
for j in xrange(len(score)):
if score[j] is None:
score[j] = 0
score[j] *= float(weight)
score_mat.append(score)
if len(score_mat) > 0:
# Currently, assume that the final step is the correct one, and that those are the final scores.
# This will change in the future, which is why the machinery above exists to extract all scores on all steps
scores = score_mat[-1]
score = max(scores)
else:
score = 0
if self._max_score is not None:
# Weight the max score if it is not None
max_score = self._max_score * float(weight)
else:
# Without a max_score, we cannot have a score!
score = None
score_dict = {
'score': score,
'total': max_score,
}
return score_dict
def max_score(self):
"""
Maximum score possible in this module. Returns the max score if finished, None if not.
"""
max_score = None
if self.check_if_done_and_scored():
max_score = self._max_score
return max_score
def get_progress(self):
"""
Generate a progress object. Progress objects represent how far the
student has gone in this module. Must be implemented to get correct
progress tracking behavior in nested modules like sequence and
vertical. This behavior is consistent with capa.
If the module is unscored, return None (consistent with capa).
"""
d = self.get_score()
if d['total'] > 0 and self.is_scored:
try:
return Progress(d['score'], d['total'])
except (TypeError, ValueError):
log.exception("Got bad progress")
return None
return None
def out_of_sync_error(self, data, msg=''):
"""
return dict out-of-sync error message, and also log.
"""
ugettext = self.system.service(self, "i18n").ugettext
#This is a dev_facing_error
log.warning(
"Combined module state out sync. state: %r, data: %r. %s",
self.state,
data,
msg
)
#This is a student_facing_error
return {
'success': False,
'error': ugettext('The problem state got out-of-sync. Please try reloading the page.')
}
@classmethod
def service_declaration(cls, service_name):
"""
This classmethod is copied from XBlock's service_declaration.
It is included to make this class act enough like an XBlock
to get i18n working on it.
This is currently only used for i18n, and will return "need"
in that case.
Arguments:
service_name (string): the name of the service requested.
Returns:
One of "need", "want", or None.
"""
declaration = cls._combined_services.get(service_name)
return declaration
class CombinedOpenEndedV1Descriptor(object):
"""
Module for adding combined open ended questions
"""
mako_template = "widgets/html-edit.html"
module_class = CombinedOpenEndedV1Module
filename_extension = "xml"
has_score = True
def __init__(self, system):
self.system = system
@classmethod
def definition_from_xml(cls, xml_object, system):
"""
Pull out the individual tasks, the rubric, and the prompt, and parse
Returns:
{
'rubric': 'some-html',
'prompt': 'some-html',
'task_xml': dictionary of xml strings,
}
"""
expected_children = ['task', 'rubric', 'prompt']
for child in expected_children:
if len(xml_object.xpath(child)) == 0:
# This is a staff_facing_error
raise ValueError(
u"Combined Open Ended definition must include at least one '{0}' tag. Contact the learning sciences group for assistance. {1}".format(
child, xml_object))
def parse_task(k):
"""Assumes that xml_object has child k"""
return [stringify_children(xml_object.xpath(k)[i]) for i in xrange(len(xml_object.xpath(k)))]
def parse(k):
"""Assumes that xml_object has child k"""
return xml_object.xpath(k)[0]
return {'task_xml': parse_task('task'), 'prompt': parse('prompt'), 'rubric': parse('rubric')}
def definition_to_xml(self, resource_fs):
'''Return an xml element representing this definition.'''
elt = etree.Element('combinedopenended')
def add_child(k):
child_str = u'<{tag}>{body}</{tag}>'.format(tag=k, body=self.definition[k])
child_node = etree.fromstring(child_str)
elt.append(child_node)
for child in ['task']:
add_child(child)
return elt

View File

@@ -1,366 +0,0 @@
import logging
from lxml import etree
log = logging.getLogger(__name__)
GRADER_TYPE_IMAGE_DICT = {
'SA': '/static/images/self_assessment_icon.png',
'PE': '/static/images/peer_grading_icon.png',
'ML': '/static/images/ml_grading_icon.png',
'IN': '/static/images/peer_grading_icon.png',
'BC': '/static/images/ml_grading_icon.png',
}
# Make '_' a no-op so we can scrape strings. Using lambda instead of
# `django.utils.translation.ugettext_noop` because Django cannot be imported in this file
_ = lambda text: text
HUMAN_GRADER_TYPE = {
# Translators: "Self-Assessment" refers to the self-assessed mode of openended evaluation
'SA': _('Self-Assessment'),
# Translators: "Peer-Assessment" refers to the peer-assessed mode of openended evaluation
'PE': _('Peer-Assessment'),
# Translators: "Instructor-Assessment" refers to the instructor-assessed mode of openended evaluation
'IN': _('Instructor-Assessment'),
# Translators: "AI-Assessment" refers to the machine-graded mode of openended evaluation
'ML': _('AI-Assessment'),
# Translators: "AI-Assessment" refers to the machine-graded mode of openended evaluation
'BC': _('AI-Assessment'),
}
DO_NOT_DISPLAY = ['BC', 'IN']
LEGEND_LIST = [{'name': HUMAN_GRADER_TYPE[k], 'image': GRADER_TYPE_IMAGE_DICT[k]} for k in GRADER_TYPE_IMAGE_DICT.keys()
if k not in DO_NOT_DISPLAY]
class RubricParsingError(Exception):
def __init__(self, msg):
self.msg = msg
class CombinedOpenEndedRubric(object):
TEMPLATE_DIR = "combinedopenended/openended"
def __init__(self, render_template, view_only=False):
self.has_score = False
self.view_only = view_only
self.render_template = render_template
def render_rubric(self, rubric_xml, score_list=None):
'''
render_rubric: takes in an xml string and outputs the corresponding
html for that xml, given the type of rubric we're generating
Input:
rubric_xml: an string that has not been parsed into xml that
represents this particular rubric
Output:
html: the html that corresponds to the xml given
'''
success = False
try:
rubric_categories = self.extract_categories(rubric_xml)
if score_list and len(score_list) == len(rubric_categories):
for i in xrange(len(rubric_categories)):
category = rubric_categories[i]
for j in xrange(len(category['options'])):
if score_list[i] == j:
rubric_categories[i]['options'][j]['selected'] = True
rubric_scores = [cat['score'] for cat in rubric_categories]
max_scores = map((lambda cat: cat['options'][-1]['points']), rubric_categories)
max_score = max(max_scores)
rubric_template = '{0}/open_ended_rubric.html'.format(self.TEMPLATE_DIR)
if self.view_only:
rubric_template = '{0}/open_ended_view_only_rubric.html'.format(self.TEMPLATE_DIR)
html = self.render_template(
rubric_template,
{
'categories': rubric_categories,
'has_score': self.has_score,
'view_only': self.view_only,
'max_score': max_score,
'combined_rubric': False,
}
)
success = True
except:
#This is a staff_facing_error
error_message = "[render_rubric] Could not parse the rubric with xml: {0}. Contact the learning sciences group for assistance.".format(
rubric_xml)
log.exception(error_message)
raise RubricParsingError(error_message)
return {'success': success, 'html': html, 'rubric_scores': rubric_scores}
def check_if_rubric_is_parseable(self, rubric_string, location, max_score_allowed):
rubric_dict = self.render_rubric(rubric_string)
success = rubric_dict['success']
rubric_feedback = rubric_dict['html']
if not success:
#This is a staff_facing_error
error_message = "Could not parse rubric : {0} for location {1}. Contact the learning sciences group for assistance.".format(
rubric_string, location.to_deprecated_string())
log.error(error_message)
raise RubricParsingError(error_message)
rubric_categories = self.extract_categories(rubric_string)
total = 0
for category in rubric_categories:
total = total + len(category['options']) - 1
if len(category['options']) > (max_score_allowed + 1):
#This is a staff_facing_error
error_message = "Number of score points in rubric {0} higher than the max allowed, which is {1}. Contact the learning sciences group for assistance.".format(
len(category['options']), max_score_allowed)
log.error(error_message)
raise RubricParsingError(error_message)
return int(total)
def extract_categories(self, element):
'''
Contstruct a list of categories such that the structure looks like:
[ { category: "Category 1 Name",
options: [{text: "Option 1 Name", points: 0}, {text:"Option 2 Name", points: 5}]
},
{ category: "Category 2 Name",
options: [{text: "Option 1 Name", points: 0},
{text: "Option 2 Name", points: 1},
{text: "Option 3 Name", points: 2]}]
'''
if isinstance(element, basestring):
element = etree.fromstring(element)
categories = []
for category in element:
if category.tag != 'category':
#This is a staff_facing_error
raise RubricParsingError(
"[extract_categories] Expected a <category> tag: got {0} instead. Contact the learning sciences group for assistance.".format(
category.tag))
else:
categories.append(self.extract_category(category))
return categories
def extract_category(self, category):
'''
construct an individual category
{category: "Category 1 Name",
options: [{text: "Option 1 text", points: 1},
{text: "Option 2 text", points: 2}]}
all sorting and auto-point generation occurs in this function
'''
descriptionxml = category[0]
optionsxml = category[1:]
scorexml = category[1]
score = None
if scorexml.tag == 'score':
score_text = scorexml.text
optionsxml = category[2:]
score = int(score_text)
self.has_score = True
# if we are missing the score tag and we are expecting one
elif self.has_score:
#This is a staff_facing_error
raise RubricParsingError(
"[extract_category] Category {0} is missing a score. Contact the learning sciences group for assistance.".format(
descriptionxml.text))
# parse description
if descriptionxml.tag != 'description':
#This is a staff_facing_error
raise RubricParsingError(
"[extract_category]: expected description tag, got {0} instead. Contact the learning sciences group for assistance.".format(
descriptionxml.tag))
description = descriptionxml.text
cur_points = 0
options = []
autonumbering = True
# parse options
for option in optionsxml:
if option.tag != 'option':
#This is a staff_facing_error
raise RubricParsingError(
"[extract_category]: expected option tag, got {0} instead. Contact the learning sciences group for assistance.".format(
option.tag))
else:
pointstr = option.get("points")
if pointstr:
autonumbering = False
# try to parse this into an int
try:
points = int(pointstr)
except ValueError:
#This is a staff_facing_error
raise RubricParsingError(
"[extract_category]: expected points to have int, got {0} instead. Contact the learning sciences group for assistance.".format(
pointstr))
elif autonumbering:
# use the generated one if we're in the right mode
points = cur_points
cur_points = cur_points + 1
else:
raise Exception(
"[extract_category]: missing points attribute. Cannot continue to auto-create points values after a points value is explicitly defined.")
selected = score == points
optiontext = option.text
options.append({'text': option.text, 'points': points, 'selected': selected})
# sort and check for duplicates
options = sorted(options, key=lambda option: option['points'])
CombinedOpenEndedRubric.validate_options(options)
return {'description': description, 'options': options, 'score': score}
def render_combined_rubric(self, rubric_xml, scores, score_types, feedback_types):
success, score_tuples = CombinedOpenEndedRubric.reformat_scores_for_rendering(scores, score_types,
feedback_types)
#Get all the categories in the rubric
rubric_categories = self.extract_categories(rubric_xml)
#Get a list of max scores, each entry belonging to a rubric category
max_scores = map((lambda cat: cat['options'][-1]['points']), rubric_categories)
actual_scores = []
#Get the highest possible score across all categories
max_score = max(max_scores)
#Loop through each category
for i, category in enumerate(rubric_categories):
#Loop through each option in the category
for j in xrange(len(category['options'])):
#Intialize empty grader types list
rubric_categories[i]['options'][j]['grader_types'] = []
#Score tuples are a flat data structure with (category, option, grader_type_list) for selected graders
for tup in score_tuples:
if tup[1] == i and tup[2] == j:
for grader_type in tup[3]:
#Set the rubric grader type to the tuple grader types
rubric_categories[i]['options'][j]['grader_types'].append(grader_type)
#Grab the score and add it to the actual scores. J will be the score for the selected
#grader type
if len(actual_scores) <= i:
#Initialize a new list in the list of lists
actual_scores.append([j])
else:
#If a list in the list of lists for this position exists, append to it
actual_scores[i] += [j]
actual_scores = [sum(i) / len(i) for i in actual_scores]
correct = []
#Define if the student is "correct" (1) "incorrect" (0) or "partially correct" (.5)
for (i, a) in enumerate(actual_scores):
if int(a) == max_scores[i]:
correct.append(1)
elif int(a) == 0:
correct.append(0)
else:
correct.append(.5)
html = self.render_template(
'{0}/open_ended_combined_rubric.html'.format(self.TEMPLATE_DIR),
{
'categories': rubric_categories,
'max_scores': max_scores,
'correct': correct,
'has_score': True,
'view_only': True,
'max_score': max_score,
'combined_rubric': True,
'grader_type_image_dict': GRADER_TYPE_IMAGE_DICT,
'human_grader_types': HUMAN_GRADER_TYPE,
}
)
return html
@staticmethod
def validate_options(options):
'''
Validates a set of options. This can and should be extended to filter out other bad edge cases
'''
if len(options) == 0:
#This is a staff_facing_error
raise RubricParsingError(
"[extract_category]: no options associated with this category. Contact the learning sciences group for assistance.")
if len(options) == 1:
return
prev = options[0]['points']
for option in options[1:]:
if prev == option['points']:
#This is a staff_facing_error
raise RubricParsingError(
"[extract_category]: found duplicate point values between two different options. Contact the learning sciences group for assistance.")
else:
prev = option['points']
@staticmethod
def reformat_scores_for_rendering(scores, score_types, feedback_types):
"""
Takes in a list of rubric scores, the types of those scores, and feedback associated with them
Outputs a reformatted list of score tuples (count, rubric category, rubric score, [graders that gave this score], [feedback types])
@param scores:
@param score_types:
@param feedback_types:
@return:
"""
success = False
if len(scores) == 0:
#This is a dev_facing_error
log.error("Score length is 0 when trying to reformat rubric scores for rendering.")
return success, ""
if len(scores) != len(score_types) or len(feedback_types) != len(scores):
#This is a dev_facing_error
log.error("Length mismatches when trying to reformat rubric scores for rendering. "
"Scores: {0}, Score Types: {1} Feedback Types: {2}".format(scores, score_types, feedback_types))
return success, ""
score_lists = []
score_type_list = []
feedback_type_list = []
for i in xrange(len(scores)):
score_cont_list = scores[i]
for j in xrange(len(score_cont_list)):
score_list = score_cont_list[j]
score_lists.append(score_list)
score_type_list.append(score_types[i][j])
feedback_type_list.append(feedback_types[i][j])
score_list_len = len(score_lists[0])
for score_list in score_lists:
if len(score_list) != score_list_len:
return success, ""
score_tuples = []
for i in xrange(len(score_lists)):
for j in xrange(len(score_lists[i])):
tuple = [1, j, score_lists[i][j], [], []]
score_tuples, tup_ind = CombinedOpenEndedRubric.check_for_tuple_matches(score_tuples, tuple)
score_tuples[tup_ind][0] += 1
score_tuples[tup_ind][3].append(score_type_list[i])
score_tuples[tup_ind][4].append(feedback_type_list[i])
success = True
return success, score_tuples
@staticmethod
def check_for_tuple_matches(tuples, tuple):
"""
Checks to see if a tuple in a list of tuples is a match for tuple.
If not match, creates a new tuple matching tuple.
@param tuples: list of tuples
@param tuple: tuples to match
@return: a new list of tuples, and the index of the tuple that matches tuple
"""
category = tuple[1]
score = tuple[2]
tup_ind = -1
for ind in xrange(len(tuples)):
if tuples[ind][1] == category and tuples[ind][2] == score:
tup_ind = ind
break
if tup_ind == -1:
tuples.append([0, category, score, [], []])
tup_ind = len(tuples) - 1
return tuples, tup_ind

View File

@@ -1,182 +0,0 @@
import dogstats_wrapper as dog_stats_api
import logging
from .grading_service_module import GradingService
log = logging.getLogger(__name__)
class ControllerQueryService(GradingService):
"""
Interface to controller query backend.
"""
METRIC_NAME = 'edxapp.open_ended_grading.controller_query_service'
def __init__(self, config, render_template):
config['render_template'] = render_template
super(ControllerQueryService, self).__init__(config)
self.url = config['url'] + config['grading_controller']
self.login_url = self.url + '/login/'
self.check_eta_url = self.url + '/get_submission_eta/'
self.combined_notifications_url = self.url + '/combined_notifications/'
self.grading_status_list_url = self.url + '/get_grading_status_list/'
self.flagged_problem_list_url = self.url + '/get_flagged_problem_list/'
self.take_action_on_flags_url = self.url + '/take_action_on_flags/'
def check_for_eta(self, location):
params = {
'location': location,
}
data = self.get(self.check_eta_url, params)
self._record_result('check_for_eta', data)
dog_stats_api.histogram(self._metric_name('check_for_eta.eta'), data.get('eta', 0))
return data
def check_combined_notifications(self, course_id, student_id, user_is_staff, last_time_viewed):
params = {
'student_id': student_id,
'course_id': course_id.to_deprecated_string(),
'user_is_staff': user_is_staff,
'last_time_viewed': last_time_viewed,
}
log.debug(self.combined_notifications_url)
data = self.get(self.combined_notifications_url, params)
tags = [u'course_id:{}'.format(course_id.to_deprecated_string()), u'user_is_staff:{}'.format(user_is_staff)]
tags.extend(
u'{}:{}'.format(key, value)
for key, value in data.items()
if key not in ('success', 'version', 'error')
)
self._record_result('check_combined_notifications', data, tags)
return data
def get_grading_status_list(self, course_id, student_id):
params = {
'student_id': student_id,
'course_id': course_id.to_deprecated_string(),
}
data = self.get(self.grading_status_list_url, params)
tags = [u'course_id:{}'.format(course_id.to_deprecated_string())]
self._record_result('get_grading_status_list', data, tags)
dog_stats_api.histogram(
self._metric_name('get_grading_status_list.length'),
len(data.get('problem_list', [])),
tags=tags
)
return data
def get_flagged_problem_list(self, course_id):
params = {
'course_id': course_id.to_deprecated_string(),
}
data = self.get(self.flagged_problem_list_url, params)
tags = [u'course_id:{}'.format(course_id.to_deprecated_string())]
self._record_result('get_flagged_problem_list', data, tags)
dog_stats_api.histogram(
self._metric_name('get_flagged_problem_list.length'),
len(data.get('flagged_submissions', []))
)
return data
def take_action_on_flags(self, course_id, student_id, submission_id, action_type):
params = {
'course_id': course_id.to_deprecated_string(),
'student_id': student_id,
'submission_id': submission_id,
'action_type': action_type
}
data = self.post(self.take_action_on_flags_url, params)
tags = [u'course_id:{}'.format(course_id.to_deprecated_string()), u'action_type:{}'.format(action_type)]
self._record_result('take_action_on_flags', data, tags)
return data
class MockControllerQueryService(object):
"""
Mock controller query service for testing
"""
def __init__(self, config, render_template):
pass
def check_for_eta(self, *args, **kwargs):
"""
Mock later if needed. Stub function for now.
@param params:
@return:
"""
pass
def check_combined_notifications(self, *args, **kwargs):
combined_notifications = {
"flagged_submissions_exist": False,
"version": 1,
"new_student_grading_to_view": False,
"success": True,
"staff_needs_to_grade": False,
"student_needs_to_peer_grade": True,
"overall_need_to_check": True
}
return combined_notifications
def get_grading_status_list(self, *args, **kwargs):
grading_status_list = {
"version": 1,
"problem_list": [
{
"problem_name": "Science Question -- Machine Assessed",
"grader_type": "NA",
"eta_available": True,
"state": "Waiting to be Graded",
"eta": 259200,
"location": "i4x://MITx/oe101x/combinedopenended/Science_SA_ML"
}, {
"problem_name": "Humanities Question -- Peer Assessed",
"grader_type": "NA",
"eta_available": True,
"state": "Waiting to be Graded",
"eta": 259200,
"location": "i4x://MITx/oe101x/combinedopenended/Humanities_SA_Peer"
}
],
"success": True
}
return grading_status_list
def get_flagged_problem_list(self, *args, **kwargs):
flagged_problem_list = {
"version": 1,
"success": False,
"error": "No flagged submissions exist for course: MITx/oe101x/2012_Fall"
}
return flagged_problem_list
def take_action_on_flags(self, *args, **kwargs):
"""
Mock later if needed. Stub function for now.
@param params:
@return:
"""
pass
def convert_seconds_to_human_readable(seconds):
if seconds < 60:
human_string = "{0} seconds".format(seconds)
elif seconds < 60 * 60:
human_string = "{0} minutes".format(round(seconds / 60, 1))
elif seconds < (24 * 60 * 60):
human_string = "{0} hours".format(round(seconds / (60 * 60), 1))
else:
human_string = "{0} days".format(round(seconds / (60 * 60 * 24), 1))
return human_string

View File

@@ -1,162 +0,0 @@
# This class gives a common interface for logging into the grading controller
import logging
import requests
import dogstats_wrapper as dog_stats_api
from lxml import etree
from requests.exceptions import RequestException, ConnectionError, HTTPError
from .combined_open_ended_rubric import CombinedOpenEndedRubric, RubricParsingError
log = logging.getLogger(__name__)
class GradingServiceError(Exception):
"""
Exception for grading service. Shown when Open Response Assessment servers cannot be reached.
"""
pass
class GradingService(object):
"""
Interface to staff grading backend.
"""
def __init__(self, config):
self.username = config['username']
self.password = config['password']
self.session = requests.Session()
self.render_template = config['render_template']
def _login(self):
"""
Log into the staff grading service.
Raises requests.exceptions.HTTPError if something goes wrong.
Returns the decoded json dict of the response.
"""
response = self.session.post(self.login_url,
{'username': self.username,
'password': self.password, })
response.raise_for_status()
return response.json()
def _metric_name(self, suffix):
"""
Return a metric name for datadog, using `self.METRIC_NAME` as
a prefix, and `suffix` as the suffix.
Arguments:
suffix (str): The metric suffix to use.
"""
return '{}.{}'.format(self.METRIC_NAME, suffix)
def _record_result(self, action, data, tags=None):
"""
Log results from an API call to an ORA service to datadog.
Arguments:
action (str): The ORA action being recorded.
data (dict): The data returned from the ORA service. Should contain the key 'success'.
tags (list): A list of tags to attach to the logged metric.
"""
if tags is None:
tags = []
tags.append(u'result:{}'.format(data.get('success', False)))
tags.append(u'action:{}'.format(action))
dog_stats_api.increment(self._metric_name('request.count'), tags=tags)
def post(self, url, data, allow_redirects=False):
"""
Make a post request to the grading controller. Returns the parsed json results of that request.
"""
try:
op = lambda: self.session.post(url, data=data,
allow_redirects=allow_redirects)
response_json = self._try_with_login(op)
except (RequestException, ConnectionError, HTTPError, ValueError) as err:
# reraise as promised GradingServiceError, but preserve stacktrace.
#This is a dev_facing_error
error_string = "Problem posting data to the grading controller. URL: {0}, data: {1}".format(url, data)
log.error(error_string)
raise GradingServiceError(error_string)
return response_json
def get(self, url, params, allow_redirects=False):
"""
Make a get request to the grading controller. Returns the parsed json results of that request.
"""
op = lambda: self.session.get(url,
allow_redirects=allow_redirects,
params=params)
try:
response_json = self._try_with_login(op)
except (RequestException, ConnectionError, HTTPError, ValueError) as err:
# reraise as promised GradingServiceError, but preserve stacktrace.
#This is a dev_facing_error
error_string = "Problem getting data from the grading controller. URL: {0}, params: {1}".format(url, params)
log.error(error_string)
raise GradingServiceError(error_string)
return response_json
def _try_with_login(self, operation):
"""
Call operation(), which should return a requests response object. If
the request fails with a 'login_required' error, call _login() and try
the operation again.
Returns the result of operation(). Does not catch exceptions.
"""
response = operation()
resp_json = response.json()
if (resp_json
and resp_json.get('success') is False
and resp_json.get('error') == 'login_required'):
# apparently we aren't logged in. Try to fix that.
r = self._login()
if r and not r.get('success'):
log.warning("Couldn't log into ORA backend. Response: %s",
r)
# try again
response = operation()
response.raise_for_status()
resp_json = response.json()
return resp_json
def _render_rubric(self, response, view_only=False):
"""
Given an HTTP Response json with the key 'rubric', render out the html
required to display the rubric and put it back into the response
returns the updated response as a dictionary that can be serialized later
"""
try:
if 'rubric' in response:
rubric = response['rubric']
rubric_renderer = CombinedOpenEndedRubric(self.render_template, view_only)
rubric_dict = rubric_renderer.render_rubric(rubric)
success = rubric_dict['success']
rubric_html = rubric_dict['html']
response['rubric'] = rubric_html
return response
# if we can't parse the rubric into HTML,
except (etree.XMLSyntaxError, RubricParsingError):
#This is a dev_facing_error
log.exception("Cannot parse rubric string. Raw string: {0}".format(response['rubric']))
return {'success': False,
'error': 'Error displaying submission'}
except ValueError:
#This is a dev_facing_error
log.exception("Error parsing response: {0}".format(response))
return {'success': False,
'error': "Error displaying submission"}

View File

@@ -1,899 +0,0 @@
"""
A Self Assessment module that allows students to write open-ended responses,
submit, then see a rubric and rate themselves. Persists student supplied
hints, answers, and assessment judgment (currently only correct/incorrect).
Parses xml definition file--see below for exact format.
"""
import json
import logging
from lxml import etree
import capa.xqueue_interface as xqueue_interface
from xmodule.capa_module import ComplexEncoder
from xmodule.progress import Progress
from xmodule.stringify import stringify_children
from capa.util import *
import openendedchild
from numpy import median
from datetime import datetime
from pytz import UTC
from .combined_open_ended_rubric import CombinedOpenEndedRubric
log = logging.getLogger("edx.courseware")
class OpenEndedModule(openendedchild.OpenEndedChild):
"""
The open ended module supports all external open ended grader problems.
Sample XML file:
<openended min_score_to_attempt="1" max_score_to_attempt="1">
<openendedparam>
<initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display>
<grader_payload>{"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}</grader_payload>
</openendedparam>
</openended>
"""
TEMPLATE_DIR = "combinedopenended/openended"
def setup_response(self, system, location, definition, descriptor):
"""
Sets up the response type.
@param system: Modulesystem object
@param location: The location of the problem
@param definition: The xml definition of the problem
@param descriptor: The OpenEndedDescriptor associated with this
@return: None
"""
oeparam = definition['oeparam']
self.url = definition.get('url', None)
self.queue_name = definition.get('queuename', self.DEFAULT_QUEUE)
self.message_queue_name = definition.get('message-queuename', self.DEFAULT_MESSAGE_QUEUE)
# This is needed to attach feedback to specific responses later
self.submission_id = None
self.grader_id = None
error_message = "No {0} found in problem xml for open ended problem. Contact the learning sciences group for assistance."
if oeparam is None:
# This is a staff_facing_error
raise ValueError(error_message.format('oeparam'))
if self.child_prompt is None:
raise ValueError(error_message.format('prompt'))
if self.child_rubric is None:
raise ValueError(error_message.format('rubric'))
self._parse(oeparam, self.child_prompt, self.child_rubric, system)
# If there are multiple tasks (like self-assessment followed by ai), once
# the the status of the first task is set to DONE, setup_next_task() will
# create the OpenEndedChild with parameter child_created=True so that the
# submission can be sent to the grader. Keep trying each time this module
# is loaded until it succeeds.
if self.child_created is True and self.child_state == self.ASSESSING:
success, message = self.send_to_grader(self.latest_answer(), system)
if success:
self.child_created = False
def _parse(self, oeparam, prompt, rubric, system):
'''
Parse OpenEndedResponse XML:
self.initial_display
self.payload - dict containing keys --
'grader' : path to grader settings file, 'problem_id' : id of the problem
self.answer - What to display when show answer is clicked
'''
# Note that OpenEndedResponse is agnostic to the specific contents of grader_payload
prompt_string = stringify_children(prompt)
rubric_string = stringify_children(rubric)
self.child_prompt = prompt_string
self.child_rubric = rubric_string
grader_payload = oeparam.find('grader_payload')
grader_payload = grader_payload.text if grader_payload is not None else ''
# Update grader payload with student id. If grader payload not json, error.
try:
parsed_grader_payload = json.loads(grader_payload)
# NOTE: self.system.location is valid because the capa_module
# __init__ adds it (easiest way to get problem location into
# response types)
except (TypeError, ValueError):
# This is a dev_facing_error
log.exception(
"Grader payload from external open ended grading server is not a json object! Object: {0}".format(
grader_payload))
self.initial_display = find_with_default(oeparam, 'initial_display', '')
self.answer = find_with_default(oeparam, 'answer_display', 'No answer given.')
parsed_grader_payload.update({
'location': self.location_string,
'course_id': system.course_id.to_deprecated_string(),
'prompt': prompt_string,
'rubric': rubric_string,
'initial_display': self.initial_display,
'answer': self.answer,
'problem_id': self.display_name,
'skip_basic_checks': self.skip_basic_checks,
'control': json.dumps(self.control),
})
updated_grader_payload = json.dumps(parsed_grader_payload)
self.payload = {'grader_payload': updated_grader_payload}
def skip_post_assessment(self, _data, system):
"""
Ajax function that allows one to skip the post assessment phase
@param data: AJAX dictionary
@param system: ModuleSystem
@return: Success indicator
"""
self.child_state = self.DONE
return {'success': True}
def message_post(self, data, system):
"""
Handles a student message post (a reaction to the grade they received from an open ended grader type)
Returns a boolean success/fail and an error message
"""
event_info = dict()
event_info['problem_id'] = self.location_string
event_info['student_id'] = system.anonymous_student_id
event_info['survey_responses'] = data
_ = self.system.service(self, "i18n").ugettext
survey_responses = event_info['survey_responses']
for tag in ['feedback', 'submission_id', 'grader_id', 'score']:
if tag not in survey_responses:
# This is a student_facing_error
return {
'success': False,
# Translators: 'tag' is one of 'feedback', 'submission_id',
# 'grader_id', or 'score'. They are categories that a student
# responds to when filling out a post-assessment survey
# of his or her grade from an openended problem.
'msg': _("Could not find needed tag {tag_name} in the "
"survey responses. Please try submitting "
"again.").format(tag_name=tag)
}
try:
submission_id = int(survey_responses['submission_id'])
grader_id = int(survey_responses['grader_id'])
feedback = str(survey_responses['feedback'].encode('ascii', 'ignore'))
score = int(survey_responses['score'])
except:
# This is a dev_facing_error
error_message = (
"Could not parse submission id, grader id, "
"or feedback from message_post ajax call. "
"Here is the message data: {0}".format(survey_responses)
)
log.exception(error_message)
# This is a student_facing_error
return {
'success': False,
'msg': _(
"There was an error saving your feedback. Please "
"contact course staff."
)
}
xqueue = system.get('xqueue')
if xqueue is None:
return {'success': False, 'msg': _("Couldn't submit feedback.")}
qinterface = xqueue['interface']
qtime = datetime.strftime(datetime.now(UTC), xqueue_interface.dateformat)
anonymous_student_id = system.anonymous_student_id
queuekey = xqueue_interface.make_hashkey(str(system.seed) + qtime +
anonymous_student_id +
str(len(self.child_history)))
xheader = xqueue_interface.make_xheader(
lms_callback_url=xqueue['construct_callback'](),
lms_key=queuekey,
queue_name=self.message_queue_name
)
student_info = {
'anonymous_student_id': anonymous_student_id,
'submission_time': qtime,
}
contents = {
'feedback': feedback,
'submission_id': submission_id,
'grader_id': grader_id,
'score': score,
'student_info': json.dumps(student_info),
}
error, error_message = qinterface.send_to_queue(
header=xheader,
body=json.dumps(contents)
)
# Convert error to a success value
success = True
message = _("Successfully saved your feedback.")
if error:
success = False
message = _("Unable to save your feedback. Please try again later.")
log.error("Unable to send feedback to grader. location: {0}, error_message: {1}".format(
self.location_string, error_message
))
else:
self.child_state = self.DONE
# This is a student_facing_message
return {'success': success, 'msg': message}
def send_to_grader(self, submission, system):
"""
Send a given submission to the grader, via the xqueue
@param submission: The student submission to send to the grader
@param system: Modulesystem
@return: Boolean true (not useful right now)
"""
# Prepare xqueue request
#------------------------------------------------------------
xqueue = system.get('xqueue')
if xqueue is None:
return False
qinterface = xqueue['interface']
qtime = datetime.strftime(datetime.now(UTC), xqueue_interface.dateformat)
anonymous_student_id = system.anonymous_student_id
# Generate header
queuekey = xqueue_interface.make_hashkey(str(system.seed) + qtime +
anonymous_student_id +
str(len(self.child_history)))
xheader = xqueue_interface.make_xheader(
lms_callback_url=xqueue['construct_callback'](),
lms_key=queuekey,
queue_name=self.queue_name
)
contents = self.payload.copy()
# Metadata related to the student submission revealed to the external grader
student_info = {
'anonymous_student_id': anonymous_student_id,
'submission_time': qtime,
}
# Update contents with student response and student info
contents.update({
'student_info': json.dumps(student_info),
'student_response': submission,
'max_score': self.max_score(),
})
# Submit request. When successful, 'msg' is the prior length of the queue
error, error_message = qinterface.send_to_queue(
header=xheader,
body=json.dumps(contents)
)
# State associated with the queueing request
queuestate = {
'key': queuekey,
'time': qtime,
}
_ = self.system.service(self, "i18n").ugettext
success = True
message = _("Successfully saved your submission.")
if error:
success = False
# Translators: the `grader` refers to the grading service open response problems
# are sent to, either to be machine-graded, peer-graded, or instructor-graded.
message = _('Unable to submit your submission to the grader. Please try again later.')
log.error("Unable to submit to grader. location: {0}, error_message: {1}".format(
self.location_string, error_message
))
return (success, message)
def _update_score(self, score_msg, queuekey, system):
"""
Called by xqueue to update the score
@param score_msg: The message from xqueue
@param queuekey: The key sent by xqueue
@param system: Modulesystem
@return: Boolean True (not useful currently)
"""
_ = self.system.service(self, "i18n").ugettext
new_score_msg = self._parse_score_msg(score_msg, system)
if not new_score_msg['valid']:
# Translators: the `grader` refers to the grading service open response problems
# are sent to, either to be machine-graded, peer-graded, or instructor-graded.
new_score_msg['feedback'] = _('Invalid grader reply. Please contact the course staff.')
# self.child_history is initialized as []. record_latest_score() and record_latest_post_assessment()
# operate on self.child_history[-1]. Thus we have to make sure child_history is not [].
# Handle at this level instead of in record_*() because this is a good place to reduce the number of conditions
# and also keep the persistent state from changing.
if self.child_history:
self.record_latest_score(new_score_msg['score'])
self.record_latest_post_assessment(score_msg)
self.child_state = self.POST_ASSESSMENT
else:
log.error(
"Trying to update score without existing studentmodule child_history:\n"
" location: {location}\n"
" score: {score}\n"
" grader_ids: {grader_ids}\n"
" submission_ids: {submission_ids}".format(
location=self.location_string,
score=new_score_msg['score'],
grader_ids=new_score_msg['grader_ids'],
submission_ids=new_score_msg['submission_ids'],
)
)
return True
def get_answers(self):
"""
Gets and shows the answer for this problem.
@return: Answer html
"""
anshtml = '<span class="openended-answer"><pre><code>{0}</code></pre></span>'.format(self.answer)
return {self.answer_id: anshtml}
def get_initial_display(self):
"""
Gets and shows the initial display for the input box.
@return: Initial display html
"""
return {self.answer_id: self.initial_display}
def _convert_longform_feedback_to_html(self, response_items):
"""
Take in a dictionary, and return html strings for display to student.
Input:
response_items: Dictionary with keys success, feedback.
if success is True, feedback should be a dictionary, with keys for
types of feedback, and the corresponding feedback values.
if success is False, feedback is actually an error string.
NOTE: this will need to change when we integrate peer grading, because
that will have more complex feedback.
Output:
String -- html that can be displayincorrect-icon.pnged to the student.
"""
# We want to display available feedback in a particular order.
# This dictionary specifies which goes first--lower first.
priorities = {
# These go at the start of the feedback
'spelling': 0,
'grammar': 1,
# needs to be after all the other feedback
'markup_text': 3
}
do_not_render = ['topicality', 'prompt-overlap']
default_priority = 2
def get_priority(elt):
"""
Args:
elt: a tuple of feedback-type, feedback
Returns:
the priority for this feedback type
"""
return priorities.get(elt[0], default_priority)
def encode_values(feedback_type, value):
feedback_type = str(feedback_type).encode('ascii', 'ignore')
if not isinstance(value, basestring):
value = str(value)
value = value.encode('ascii', 'ignore')
return feedback_type, value
def format_feedback(feedback_type, value):
feedback_type, value = encode_values(feedback_type, value)
feedback = u"""
<div class="{feedback_type}">
{value}
</div>
""".format(feedback_type=feedback_type, value=value)
return feedback
def format_feedback_hidden(feedback_type, value):
feedback_type, value = encode_values(feedback_type, value)
feedback = """
<input class="{feedback_type}" type="hidden" value="{value}" />
""".format(feedback_type=feedback_type, value=value)
return feedback
# TODO (vshnayder): design and document the details of this format so
# that we can do proper escaping here (e.g. are the graders allowed to
# include HTML?)
_ = self.system.service(self, "i18n").ugettext
for tag in ['success', 'feedback', 'submission_id', 'grader_id']:
if tag not in response_items:
# This is a student_facing_error
return format_feedback(
# Translators: the `grader` refers to the grading service open response problems
# are sent to, either to be machine-graded, peer-graded, or instructor-graded.
'errors', _('Error getting feedback from grader.')
)
feedback_items = response_items['feedback']
try:
feedback = json.loads(feedback_items)
except (TypeError, ValueError):
# This is a dev_facing_error
log.exception("feedback_items from external open ended grader have invalid json {0}".format(feedback_items))
# This is a student_facing_error
return format_feedback(
# Translators: the `grader` refers to the grading service open response problems
# are sent to, either to be machine-graded, peer-graded, or instructor-graded.
'errors', _('Error getting feedback from grader.')
)
if response_items['success']:
if len(feedback) == 0:
# This is a student_facing_error
return format_feedback(
# Translators: the `grader` refers to the grading service open response problems
# are sent to, either to be machine-graded, peer-graded, or instructor-graded.
'errors', _('No feedback available from grader.')
)
for tag in do_not_render:
if tag in feedback:
feedback.pop(tag)
feedback_lst = sorted(feedback.items(), key=get_priority)
feedback_list_part1 = u"\n".join(format_feedback(k, v) for k, v in feedback_lst)
else:
# This is a student_facing_error
feedback_list_part1 = format_feedback('errors', response_items['feedback'])
feedback_list_part2 = (u"\n".join([format_feedback_hidden(feedback_type, value)
for feedback_type, value in response_items.items()
if feedback_type in ['submission_id', 'grader_id']]))
return u"\n".join([feedback_list_part1, feedback_list_part2])
def _format_feedback(self, response_items, system):
"""
Input:
Dictionary called feedback. Must contain keys seen below.
Output:
Return error message or feedback template
"""
rubric_feedback = ""
feedback = self._convert_longform_feedback_to_html(response_items)
rubric_scores = []
if response_items['rubric_scores_complete'] is True:
rubric_renderer = CombinedOpenEndedRubric(system.render_template, True)
rubric_dict = rubric_renderer.render_rubric(response_items['rubric_xml'])
success = rubric_dict['success']
rubric_feedback = rubric_dict['html']
rubric_scores = rubric_dict['rubric_scores']
if not response_items['success']:
return system.render_template(
"{0}/open_ended_error.html".format(self.TEMPLATE_DIR),
{'errors': feedback}
)
feedback_template = system.render_template("{0}/open_ended_feedback.html".format(self.TEMPLATE_DIR), {
'grader_type': response_items['grader_type'],
'score': "{0} / {1}".format(response_items['score'], self.max_score()),
'feedback': feedback,
'rubric_feedback': rubric_feedback
})
return feedback_template, rubric_scores
def _parse_score_msg(self, score_msg, system, join_feedback=True):
"""
Grader reply is a JSON-dump of the following dict
{ 'correct': True/False,
'score': Numeric value (floating point is okay) to assign to answer
'msg': grader_msg
'feedback' : feedback from grader
'grader_type': what type of grader resulted in this score
'grader_id': id of the grader
'submission_id' : id of the submission
'success': whether or not this submission was successful
'rubric_scores': a list of rubric scores
'rubric_scores_complete': boolean if rubric scores are complete
'rubric_xml': the xml of the rubric in string format
}
Returns (valid_score_msg, correct, score, msg):
valid_score_msg: Flag indicating valid score_msg format (Boolean)
correct: Correctness of submission (Boolean)
score: Points to be assigned (numeric, can be float)
"""
fail = {
'valid': False,
'score': 0,
'feedback': '',
'rubric_scores': [[0]],
'grader_types': [''],
'feedback_items': [''],
'feedback_dicts': [{}],
'grader_ids': [0],
'submission_ids': [0],
}
try:
score_result = json.loads(score_msg)
except (TypeError, ValueError):
# This is a dev_facing_error
error_message = ("External open ended grader message should be a JSON-serialized dict."
" Received score_msg = {0}".format(score_msg))
log.error(error_message)
fail['feedback'] = error_message
return fail
if not isinstance(score_result, dict):
# This is a dev_facing_error
error_message = ("External open ended grader message should be a JSON-serialized dict."
" Received score_result = {0}".format(score_result))
log.error(error_message)
fail['feedback'] = error_message
return fail
if not score_result:
return fail
for tag in ['score', 'feedback', 'grader_type', 'success', 'grader_id', 'submission_id']:
if tag not in score_result:
# This is a dev_facing_error
error_message = ("External open ended grader message is missing required tag: {0}"
.format(tag))
log.error(error_message)
fail['feedback'] = error_message
return fail
# This is to support peer grading
if isinstance(score_result['score'], list):
feedback_items = []
rubric_scores = []
grader_types = []
feedback_dicts = []
grader_ids = []
submission_ids = []
for i in xrange(len(score_result['score'])):
new_score_result = {
'score': score_result['score'][i],
'feedback': score_result['feedback'][i],
'grader_type': score_result['grader_type'],
'success': score_result['success'],
'grader_id': score_result['grader_id'][i],
'submission_id': score_result['submission_id'],
'rubric_scores_complete': score_result['rubric_scores_complete'][i],
'rubric_xml': score_result['rubric_xml'][i],
}
feedback_template, rubric_score = self._format_feedback(new_score_result, system)
feedback_items.append(feedback_template)
rubric_scores.append(rubric_score)
grader_types.append(score_result['grader_type'])
try:
feedback_dict = json.loads(score_result['feedback'][i])
except Exception:
feedback_dict = score_result['feedback'][i]
feedback_dicts.append(feedback_dict)
grader_ids.append(score_result['grader_id'][i])
submission_ids.append(score_result['submission_id'])
if join_feedback:
feedback = "".join(feedback_items)
else:
feedback = feedback_items
score = int(round(median(score_result['score'])))
else:
# This is for instructor and ML grading
feedback, rubric_score = self._format_feedback(score_result, system)
score = score_result['score']
rubric_scores = [rubric_score]
grader_types = [score_result['grader_type']]
feedback_items = [feedback]
try:
feedback_dict = json.loads(score_result['feedback'])
except Exception:
feedback_dict = score_result.get('feedback', '')
feedback_dicts = [feedback_dict]
grader_ids = [score_result['grader_id']]
submission_ids = [score_result['submission_id']]
self.submission_id = score_result['submission_id']
self.grader_id = score_result['grader_id']
return {
'valid': True,
'score': score,
'feedback': feedback,
'rubric_scores': rubric_scores,
'grader_types': grader_types,
'feedback_items': feedback_items,
'feedback_dicts': feedback_dicts,
'grader_ids': grader_ids,
'submission_ids': submission_ids,
}
def latest_post_assessment(self, system, short_feedback=False, join_feedback=True):
"""
Gets the latest feedback, parses, and returns
@param short_feedback: If the long feedback is wanted or not
@return: Returns formatted feedback
"""
if not self.child_history:
return ""
feedback_dict = self._parse_score_msg(
self.child_history[-1].get('post_assessment', "{}"),
system,
join_feedback=join_feedback
)
if not short_feedback:
return feedback_dict['feedback'] if feedback_dict['valid'] else ''
if feedback_dict['valid']:
short_feedback = self._convert_longform_feedback_to_html(
json.loads(self.child_history[-1].get('post_assessment', "")))
return short_feedback if feedback_dict['valid'] else ''
def format_feedback_with_evaluation(self, system, feedback):
"""
Renders a given html feedback into an evaluation template
@param feedback: HTML feedback
@return: Rendered html
"""
context = {'msg': feedback, 'id': "1", 'rows': 50, 'cols': 50}
html = system.render_template('{0}/open_ended_evaluation.html'.format(self.TEMPLATE_DIR), context)
return html
def handle_ajax(self, dispatch, data, system):
'''
This is called by courseware.module_render, to handle an AJAX call.
"data" is request.POST.
Returns a json dictionary:
{ 'progress_changed' : True/False,
'progress' : 'none'/'in_progress'/'done',
<other request-specific values here > }
'''
handlers = {
'save_answer': self.save_answer,
'score_update': self.update_score,
'save_post_assessment': self.message_post,
'skip_post_assessment': self.skip_post_assessment,
'check_for_score': self.check_for_score,
'store_answer': self.store_answer,
}
_ = self.system.service(self, "i18n").ugettext
if dispatch not in handlers:
# This is a dev_facing_error
log.error("Cannot find {0} in handlers in handle_ajax function for open_ended_module.py".format(dispatch))
# This is a dev_facing_error
return json.dumps(
{'error': _('Error handling action. Please try again.'), 'success': False}
)
before = self.get_progress()
d = handlers[dispatch](data, system)
after = self.get_progress()
d.update({
'progress_changed': after != before,
'progress_status': Progress.to_js_status_str(after),
})
return json.dumps(d, cls=ComplexEncoder)
def check_for_score(self, _data, system):
"""
Checks to see if a score has been received yet.
@param data: AJAX dictionary
@param system: Modulesystem (needed to align with other ajax functions)
@return: Returns the current state
"""
state = self.child_state
return {'state': state}
def save_answer(self, data, system):
"""
Saves a student answer
@param data: AJAX dictionary
@param system: modulesystem
@return: Success indicator
"""
# Once we close the problem, we should not allow students
# to save answers
error_message = ""
closed, msg = self.check_if_closed()
if closed:
return msg
if self.child_state != self.INITIAL:
return self.out_of_sync_error(data)
message = "Successfully saved your submission."
# add new history element with answer and empty score and hint.
success, error_message, data = self.append_file_link_to_student_answer(data)
if not success:
message = error_message
else:
data['student_answer'] = OpenEndedModule.sanitize_html(data['student_answer'])
success, error_message = self.send_to_grader(data['student_answer'], system)
if not success:
message = error_message
# Store the answer instead
self.store_answer(data, system)
else:
self.new_history_entry(data['student_answer'])
self.change_state(self.ASSESSING)
return {
'success': success,
'error': message,
'student_response': data['student_answer'].replace("\n", "<br/>")
}
def update_score(self, data, system):
"""
Updates the current score via ajax. Called by xqueue.
Input: AJAX data dictionary, modulesystem
Output: None
"""
queuekey = data['queuekey']
score_msg = data['xqueue_body']
# TODO: Remove need for cmap
self._update_score(score_msg, queuekey, system)
return dict() # No AJAX return is needed
def get_html(self, system):
"""
Gets the HTML for this problem and renders it
Input: Modulesystem object
Output: Rendered HTML
"""
_ = self.system.service(self, "i18n").ugettext
# set context variables and render template
eta_string = None
if self.child_state != self.INITIAL:
post_assessment = self.latest_post_assessment(system)
score = self.latest_score()
correct = 'correct' if self.is_submission_correct(score) else 'incorrect'
if self.child_state == self.ASSESSING:
# Translators: this string appears once an openended response
# is submitted but before it has been graded
eta_string = _("Your response has been submitted. Please check back later for your grade.")
else:
post_assessment = ""
correct = ""
previous_answer = self.get_display_answer()
# Use the module name as a unique id to pass to the template.
try:
module_id = self.system.location.name
except AttributeError:
# In cases where we don't have a system or a location, use a fallback.
module_id = "open_ended"
context = {
'prompt': self.child_prompt,
'previous_answer': previous_answer,
'state': self.child_state,
'allow_reset': self._allow_reset(),
'rows': 30,
'cols': 80,
'module_id': module_id,
'msg': post_assessment,
'child_type': 'openended',
'correct': correct,
'accept_file_upload': self.accept_file_upload,
'eta_message': eta_string,
}
html = system.render_template('{0}/open_ended.html'.format(self.TEMPLATE_DIR), context)
return html
def latest_score(self):
"""None if not available"""
if not self.child_history:
return None
return self.score_for_attempt(-1)
def all_scores(self):
"""None if not available"""
if not self.child_history:
return None
return [self.score_for_attempt(index) for index in xrange(len(self.child_history))]
def score_for_attempt(self, index):
"""
Return sum of rubric scores for ML grading otherwise return attempt["score"].
"""
attempt = self.child_history[index]
score = attempt.get('score')
post_assessment_data = self._parse_score_msg(attempt.get('post_assessment', "{}"), self.system)
grader_types = post_assessment_data.get('grader_types')
# According to _parse_score_msg in ML grading there should be only one grader type.
if len(grader_types) == 1 and grader_types[0] == 'ML':
rubric_scores = post_assessment_data.get("rubric_scores")
# Similarly there should be only one list of rubric scores.
if len(rubric_scores) == 1:
rubric_scores_sum = sum(rubric_scores[0])
log.debug("""Score normalized for location={loc}, old_score={old_score},
new_score={new_score}, rubric_score={rubric_score}""".format(
loc=self.location_string,
old_score=score,
new_score=rubric_scores_sum,
rubric_score=rubric_scores
))
return rubric_scores_sum
return score
class OpenEndedDescriptor(object):
"""
Module for adding open ended response questions to courses
"""
mako_template = "widgets/html-edit.html"
module_class = OpenEndedModule
filename_extension = "xml"
has_score = True
def __init__(self, system):
self.system = system
@classmethod
def definition_from_xml(cls, xml_object, system):
"""
Pull out the open ended parameters into a dictionary.
Returns:
{
'oeparam': 'some-html'
}
"""
for child in ['openendedparam']:
if len(xml_object.xpath(child)) != 1:
# This is a staff_facing_error
raise ValueError(
u"Open Ended definition must include exactly one '{0}' tag. Contact the learning sciences group for assistance.".format(
child))
def parse(k):
"""Assumes that xml_object has child k"""
return xml_object.xpath(k)[0]
return {
'oeparam': parse('openendedparam')
}
def definition_to_xml(self, resource_fs):
'''Return an xml element representing this definition.'''
elt = etree.Element('openended')
def add_child(k):
child_str = u'<{tag}>{body}</{tag}>'.format(tag=k, body=self.definition[k])
child_node = etree.fromstring(child_str)
elt.append(child_node)
for child in ['openendedparam']:
add_child(child)
return elt

View File

@@ -1,577 +0,0 @@
"""
ORA1. Deprecated.
"""
from datetime import datetime
import json
import logging
import re
import bleach
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from html5lib.tokenizer import HTMLTokenizer
from pytz import UTC
from xmodule.progress import Progress
import capa.xqueue_interface as xqueue_interface
from capa.util import *
from .peer_grading_service import PeerGradingService, MockPeerGradingService
import controller_query_service
log = logging.getLogger("edx.courseware")
# Make '_' a no-op so we can scrape strings. Using lambda instead of
# `django.utils.translation.ugettext_noop` because Django cannot be imported in this file
_ = lambda text: text
# Set the default number of max attempts. Should be 1 for production
# Set higher for debugging/testing
# attempts specified in xml definition overrides this.
MAX_ATTEMPTS = 1
# Set maximum available number of points.
# Overriden by max_score specified in xml.
MAX_SCORE = 1
def upload_to_s3(file_to_upload, keyname, s3_interface):
'''
Upload file to S3 using provided keyname.
Returns:
public_url: URL to access uploaded file
'''
conn = S3Connection(s3_interface['access_key'], s3_interface['secret_access_key'])
bucketname = str(s3_interface['storage_bucket_name'])
bucket = conn.lookup(bucketname.lower())
if not bucket:
bucket = conn.create_bucket(bucketname.lower())
k = Key(bucket)
k.key = keyname
k.set_metadata('filename', file_to_upload.name)
k.set_contents_from_file(file_to_upload)
k.set_acl("public-read")
public_url = k.generate_url(60 * 60 * 24 * 365) # URL timeout in seconds.
return public_url
# Used by sanitize_html
ALLOWED_HTML_ATTRS = {
'*': ['id', 'class', 'height', 'width', 'alt'],
'a': ['href', 'title', 'rel', 'target'],
'embed': ['src'],
'iframe': ['src'],
'img': ['src'],
}
class OpenEndedChild(object):
"""
States:
initial (prompt, textbox shown)
|
assessing (read-only textbox, rubric + assessment input shown for self assessment, response queued for open ended)
|
post_assessment (read-only textbox, read-only rubric and assessment, hint input box shown)
|
done (submitted msg, green checkmark, everything else read-only. If attempts < max, shows
a reset button that goes back to initial state. Saves previous
submissions too.)
"""
DEFAULT_QUEUE = 'open-ended'
DEFAULT_MESSAGE_QUEUE = 'open-ended-message'
max_inputfields = 1
STATE_VERSION = 1
# states
INITIAL = 'initial'
ASSESSING = 'assessing'
POST_ASSESSMENT = 'post_assessment'
DONE = 'done'
# This is used to tell students where they are at in the module
HUMAN_NAMES = {
# Translators: "Not started" communicates to a student that their response
# has not yet been graded
'initial': _('Not started'),
# Translators: "In progress" communicates to a student that their response
# is currently in the grading process
'assessing': _('In progress'),
# Translators: "Done" communicates to a student that their response
# has been fully graded
'post_assessment': _('Done'),
'done': _('Done'),
}
# included to make this act enough like an xblock to get i18n
_services_requested = {"i18n": "need"}
_combined_services = _services_requested
def __init__(self, system, location, definition, descriptor, static_data,
instance_state=None, shared_state=None, **kwargs):
# Load instance state
if instance_state is not None:
try:
instance_state = json.loads(instance_state)
except:
log.error(
"Could not load instance state for open ended. Setting it to nothing.: {0}".format(instance_state))
instance_state = {}
else:
instance_state = {}
# History is a list of tuples of (answer, score, hint), where hint may be
# None for any element, and score and hint can be None for the last (current)
# element.
# Scores are on scale from 0 to max_score
self.child_history = instance_state.get('child_history', [])
self.child_state = instance_state.get('child_state', self.INITIAL)
self.child_created = instance_state.get('child_created', False)
self.child_attempts = instance_state.get('child_attempts', 0)
self.stored_answer = instance_state.get('stored_answer', None)
self.max_attempts = static_data['max_attempts']
self.child_prompt = static_data['prompt']
self.child_rubric = static_data['rubric']
self.display_name = static_data['display_name']
self.accept_file_upload = static_data['accept_file_upload']
self.close_date = static_data['close_date']
self.s3_interface = static_data['s3_interface']
self.skip_basic_checks = static_data['skip_basic_checks']
self._max_score = static_data['max_score']
self.control = static_data['control']
# Used for progress / grading. Currently get credit just for
# completion (doesn't matter if you self-assessed correct/incorrect).
if system.open_ended_grading_interface:
self.peer_gs = PeerGradingService(system.open_ended_grading_interface, system.render_template)
self.controller_qs = controller_query_service.ControllerQueryService(
system.open_ended_grading_interface, system.render_template
)
else:
self.peer_gs = MockPeerGradingService()
self.controller_qs = None
self.system = system
self.location_string = location
try:
self.location_string = self.location_string.to_deprecated_string()
except:
pass
self.setup_response(system, location, definition, descriptor)
def setup_response(self, system, location, definition, descriptor):
"""
Needs to be implemented by the inheritors of this module. Sets up additional fields used by the child modules.
@param system: Modulesystem
@param location: Module location
@param definition: XML definition
@param descriptor: Descriptor of the module
@return: None
"""
pass
def closed(self):
if self.close_date is not None and datetime.now(UTC) > self.close_date:
return True
return False
def check_if_closed(self):
if self.closed():
return True, {
'success': False,
# This is a student_facing_error
'error': 'The problem close date has passed, and this problem is now closed.'
}
elif self.child_attempts > self.max_attempts:
return True, {
'success': False,
# This is a student_facing_error
'error': 'You have attempted this problem {0} times. You are allowed {1} attempts.'.format(
self.child_attempts, self.max_attempts
)
}
else:
return False, {}
def latest_answer(self):
"""Empty string if not available"""
if not self.child_history:
return ""
return self.child_history[-1].get('answer', "")
def latest_score(self):
"""None if not available"""
if not self.child_history:
return None
return self.child_history[-1].get('score')
def all_scores(self):
"""None if not available"""
if not self.child_history:
return None
return [child_hist.get('score') for child_hist in self.child_history]
def latest_post_assessment(self, system):
"""Empty string if not available"""
if not self.child_history:
return ""
return self.child_history[-1].get('post_assessment', "")
@staticmethod
def sanitize_html(answer):
"""
Take a student response and sanitize the HTML to prevent malicious script injection
or other unwanted content.
answer - any string
return - a cleaned version of the string
"""
clean_html = bleach.clean(answer,
tags=['embed', 'iframe', 'a', 'img', 'br'],
attributes=ALLOWED_HTML_ATTRS,
strip=True)
autolinked = bleach.linkify(clean_html,
callbacks=[bleach.callbacks.target_blank],
skip_pre=True,
tokenizer=HTMLTokenizer)
return OpenEndedChild.replace_newlines(autolinked)
@staticmethod
def replace_newlines(html):
"""
Replaces "\n" newlines with <br/>
"""
retv = re.sub(r'</p>$', '', re.sub(r'^<p>', '', html))
return re.sub("\n", "<br/>", retv)
def new_history_entry(self, answer):
"""
Adds a new entry to the history dictionary
@param answer: The student supplied answer
@return: None
"""
answer = OpenEndedChild.sanitize_html(answer)
self.child_history.append({'answer': answer})
self.stored_answer = None
def record_latest_score(self, score):
"""Assumes that state is right, so we're adding a score to the latest
history element"""
self.child_history[-1]['score'] = score
def record_latest_post_assessment(self, post_assessment):
"""Assumes that state is right, so we're adding a score to the latest
history element"""
self.child_history[-1]['post_assessment'] = post_assessment
def change_state(self, new_state):
"""
A centralized place for state changes--allows for hooks. If the
current state matches the old state, don't run any hooks.
"""
if self.child_state == new_state:
return
self.child_state = new_state
if self.child_state == self.DONE:
self.child_attempts += 1
def get_instance_state(self):
"""
Get the current score and state
"""
state = {
'version': self.STATE_VERSION,
'child_history': self.child_history,
'child_state': self.child_state,
'max_score': self._max_score,
'child_attempts': self.child_attempts,
'child_created': self.child_created,
'stored_answer': self.stored_answer,
}
return json.dumps(state)
def _allow_reset(self):
"""Can the module be reset?"""
return self.child_state == self.DONE and self.child_attempts < self.max_attempts
def max_score(self):
"""
Return max_score
"""
return self._max_score
def get_score(self):
"""
Returns the last score in the list
"""
score = self.latest_score()
return {'score': score if score is not None else 0,
'total': self._max_score}
def reset(self, system):
"""
If resetting is allowed, reset the state.
Returns {'success': bool, 'error': msg}
(error only present if not success)
"""
self.change_state(self.INITIAL)
return {'success': True}
def get_display_answer(self):
latest = self.latest_answer()
if self.child_state == self.INITIAL:
if self.stored_answer is not None:
previous_answer = self.stored_answer
elif latest is not None and len(latest) > 0:
previous_answer = latest
else:
previous_answer = ""
previous_answer = previous_answer.replace("<br/>", "\n").replace("<br>", "\n")
else:
if latest is not None and len(latest) > 0:
previous_answer = latest
else:
previous_answer = ""
previous_answer = previous_answer.replace("\n", "<br/>")
return previous_answer
def store_answer(self, data, system):
if self.child_state != self.INITIAL:
# We can only store an answer if the problem has not moved into the assessment phase.
return self.out_of_sync_error(data)
self.stored_answer = data['student_answer']
return {'success': True}
def get_progress(self):
'''
For now, just return last score / max_score
'''
if self._max_score > 0:
try:
return Progress(int(self.get_score()['score']), int(self._max_score))
except Exception as err:
# This is a dev_facing_error
log.exception("Got bad progress from open ended child module. Max Score: {0}".format(self._max_score))
return None
return None
def out_of_sync_error(self, data, msg=''):
"""
return dict out-of-sync error message, and also log.
"""
# This is a dev_facing_error
log.warning("Open ended child state out sync. state: %r, data: %r. %s",
self.child_state, data, msg)
# This is a student_facing_error
return {'success': False,
'error': 'The problem state got out-of-sync. Please try reloading the page.'}
def get_html(self):
"""
Needs to be implemented by inheritors. Renders the HTML that students see.
@return:
"""
pass
def handle_ajax(self):
"""
Needs to be implemented by child modules. Handles AJAX events.
@return:
"""
pass
def is_submission_correct(self, score):
"""
Checks to see if a given score makes the answer correct. Very naive right now (>66% is correct)
@param score: Numeric score.
@return: Boolean correct.
"""
correct = False
if isinstance(score, (int, long, float, complex)):
score_ratio = int(score) / float(self.max_score())
correct = (score_ratio >= 0.66)
return correct
def is_last_response_correct(self):
"""
Checks to see if the last response in the module is correct.
@return: 'correct' if correct, otherwise 'incorrect'
"""
score = self.get_score()['score']
correctness = 'correct' if self.is_submission_correct(score) else 'incorrect'
return correctness
def upload_file_to_s3(self, file_data):
"""
Uploads a file to S3.
file_data: InMemoryUploadedFileObject that responds to read() and seek().
@return: A URL corresponding to the uploaded object.
"""
file_key = file_data.name + datetime.now(UTC).strftime(
xqueue_interface.dateformat
)
file_data.seek(0)
s3_public_url = upload_to_s3(
file_data, file_key, self.s3_interface
)
return s3_public_url
def check_for_file_and_upload(self, data):
"""
Checks to see if a file was passed back by the student. If so, it will be uploaded to S3.
@param data: AJAX post dictionary containing keys student_file and valid_files_attached.
@return: has_file_to_upload, whether or not a file was in the data dictionary,
and image_tag, the html needed to create a link to the uploaded file.
"""
has_file_to_upload = False
image_tag = ""
# Ensure that a valid file was uploaded.
if 'valid_files_attached' in data and \
data['valid_files_attached'] in ['true', '1', True] and \
data['student_file'] is not None and \
len(data['student_file']) > 0:
has_file_to_upload = True
student_file = data['student_file'][0]
# Upload the file to S3 and generate html to embed a link.
s3_public_url = self.upload_file_to_s3(student_file)
image_tag = self.generate_file_link_html_from_url(s3_public_url, student_file.name)
return has_file_to_upload, image_tag
def generate_file_link_html_from_url(self, s3_public_url, file_name):
"""
Create an html link to a given URL.
@param s3_public_url: URL of the file.
@param file_name: Name of the file.
@return: Boolean success, updated AJAX data.
"""
image_link = """
<a href="{0}" target="_blank">{1}</a>
""".format(s3_public_url, file_name)
return image_link
def append_file_link_to_student_answer(self, data):
"""
Adds a file to a student answer after uploading it to S3.
@param data: AJAX data containing keys student_answer, valid_files_attached, and student_file.
@return: Boolean success, and updated AJAX data dictionary.
"""
_ = self.system.service(self, "i18n").ugettext
error_message = ""
if not self.accept_file_upload:
# If the question does not accept file uploads, do not do anything
return True, error_message, data
try:
# Try to upload the file to S3.
has_file_to_upload, image_tag = self.check_for_file_and_upload(data)
data['student_answer'] += image_tag
success = True
if not has_file_to_upload:
# If there is no file to upload, probably the student has embedded the link in the answer text
success, data['student_answer'] = self.check_for_url_in_text(data['student_answer'])
# If success is False, we have not found a link, and no file was attached.
# Show error to student.
if success is False:
error_message = _(
"We could not find a file in your submission. "
"Please try choosing a file or pasting a URL to your "
"file into the answer box."
)
except Exception:
# In this case, an image was submitted by the student, but the image could not be uploaded to S3. Likely
# a config issue (development vs deployment).
log.exception("Student AJAX post to combined open ended xmodule indicated that it contained a file, "
"but the image was not able to be uploaded to S3. This could indicate a configuration "
"issue with this deployment and the S3_INTERFACE setting.")
success = False
error_message = _(
"We are having trouble saving your file. Please try another "
"file or paste a URL to your file into the answer box."
)
return success, error_message, data
def check_for_url_in_text(self, string):
"""
Checks for urls in a string.
@param string: Arbitrary string.
@return: Boolean success, and the edited string.
"""
has_link = False
# Find all links in the string.
links = re.findall(r'(https?://\S+)', string)
if len(links) > 0:
has_link = True
# Autolink by wrapping links in anchor tags.
for link in links:
string = re.sub(link, self.generate_file_link_html_from_url(link, link), string)
return has_link, string
def get_eta(self):
if self.controller_qs:
response = self.controller_qs.check_for_eta(self.location_string)
else:
return ""
success = response['success']
if isinstance(success, basestring):
success = (success.lower() == "true")
if success:
eta = controller_query_service.convert_seconds_to_human_readable(response['eta'])
eta_string = "Please check back for your response in at most {0}.".format(eta)
else:
eta_string = ""
return eta_string
@classmethod
def service_declaration(cls, service_name):
"""
This classmethod is copied from XBlock's service_declaration.
It is included to make this class act enough like an XBlock
to get i18n working on it.
This is currently only used for i18n, and will return "need"
in that case.
Arguments:
service_name (string): the name of the service requested.
Returns:
One of "need", "want", or None.
"""
declaration = cls._combined_services.get(service_name)
return declaration

View File

@@ -1,168 +0,0 @@
import logging
import dogstats_wrapper as dog_stats_api
from .grading_service_module import GradingService
from opaque_keys.edx.keys import UsageKey
log = logging.getLogger(__name__)
class PeerGradingService(GradingService):
"""
Interface with the grading controller for peer grading
"""
METRIC_NAME = 'edxapp.open_ended_grading.peer_grading_service'
def __init__(self, config, render_template):
config['render_template'] = render_template
super(PeerGradingService, self).__init__(config)
self.url = config['url'] + config['peer_grading']
self.login_url = self.url + '/login/'
self.get_next_submission_url = self.url + '/get_next_submission/'
self.save_grade_url = self.url + '/save_grade/'
self.is_student_calibrated_url = self.url + '/is_student_calibrated/'
self.show_calibration_essay_url = self.url + '/show_calibration_essay/'
self.save_calibration_essay_url = self.url + '/save_calibration_essay/'
self.get_problem_list_url = self.url + '/get_problem_list/'
self.get_notifications_url = self.url + '/get_notifications/'
self.get_data_for_location_url = self.url + '/get_data_for_location/'
def get_data_for_location(self, problem_location, student_id):
if isinstance(problem_location, UsageKey):
problem_location = problem_location.to_deprecated_string()
params = {'location': problem_location, 'student_id': student_id}
result = self.get(self.get_data_for_location_url, params)
self._record_result('get_data_for_location', result)
for key in result.keys():
if key in ('success', 'error', 'version'):
continue
dog_stats_api.histogram(
self._metric_name('get_data_for_location.{}'.format(key)),
result[key],
)
return result
def get_next_submission(self, problem_location, grader_id):
if isinstance(problem_location, UsageKey):
problem_location = problem_location.to_deprecated_string()
result = self._render_rubric(self.get(
self.get_next_submission_url,
{
'location': problem_location,
'grader_id': grader_id
}
))
self._record_result('get_next_submission', result)
return result
def save_grade(self, **kwargs):
data = kwargs
data.update({'rubric_scores_complete': True})
result = self.post(self.save_grade_url, data)
self._record_result('save_grade', result)
return result
def is_student_calibrated(self, problem_location, grader_id):
if isinstance(problem_location, UsageKey):
problem_location = problem_location.to_deprecated_string()
params = {'problem_id': problem_location, 'student_id': grader_id}
result = self.get(self.is_student_calibrated_url, params)
self._record_result(
'is_student_calibrated',
result,
tags=['calibrated:{}'.format(result.get('calibrated'))]
)
return result
def show_calibration_essay(self, problem_location, grader_id):
if isinstance(problem_location, UsageKey):
problem_location = problem_location.to_deprecated_string()
params = {'problem_id': problem_location, 'student_id': grader_id}
result = self._render_rubric(self.get(self.show_calibration_essay_url, params))
self._record_result('show_calibration_essay', result)
return result
def save_calibration_essay(self, **kwargs):
data = kwargs
data.update({'rubric_scores_complete': True})
result = self.post(self.save_calibration_essay_url, data)
self._record_result('show_calibration_essay', result)
return result
def get_problem_list(self, course_id, grader_id):
params = {'course_id': course_id.to_deprecated_string(), 'student_id': grader_id}
result = self.get(self.get_problem_list_url, params)
if 'problem_list' in result:
for problem in result['problem_list']:
problem['location'] = course_id.make_usage_key_from_deprecated_string(problem['location'])
self._record_result('get_problem_list', result)
dog_stats_api.histogram(
self._metric_name('get_problem_list.result.length'),
len(result.get('problem_list', [])),
)
return result
def get_notifications(self, course_id, grader_id):
params = {'course_id': course_id.to_deprecated_string(), 'student_id': grader_id}
result = self.get(self.get_notifications_url, params)
self._record_result(
'get_notifications',
result,
tags=['needs_to_peer_grade:{}'.format(result.get('student_needs_to_peer_grade'))]
)
return result
class MockPeerGradingService(object):
"""
This is a mock peer grading service that can be used for unit tests
without making actual service calls to the grading controller
"""
def get_next_submission(self, problem_location, grader_id):
return {
'success': True,
'submission_id': 1,
'submission_key': "",
'student_response': 'Sample student response.',
'prompt': 'Sample submission prompt.',
'rubric': 'Placeholder text for the full rubric.',
'max_score': 4
}
def save_grade(self, **kwargs):
return {'success': True}
def is_student_calibrated(self, problem_location, grader_id):
return {'success': True, 'calibrated': True}
def show_calibration_essay(self, problem_location, grader_id):
return {'success': True,
'submission_id': 1,
'submission_key': '',
'student_response': 'Sample student response.',
'prompt': 'Sample submission prompt.',
'rubric': 'Placeholder text for the full rubric.',
'max_score': 4}
def save_calibration_essay(self, **kwargs):
return {'success': True, 'actual_score': 2}
def get_problem_list(self, course_id, grader_id):
return {'success': True,
'problem_list': [
]}
def get_data_for_location(self, problem_location, student_id):
return {
"version": 1,
"count_graded": 3,
"count_required": 3,
"success": True,
"student_sub_count": 1,
'submissions_available': 0,
}

View File

@@ -1,339 +0,0 @@
import json
import logging
from lxml import etree
from xmodule.capa_module import ComplexEncoder
from xmodule.progress import Progress
from xmodule.stringify import stringify_children
import openendedchild
from .combined_open_ended_rubric import CombinedOpenEndedRubric
log = logging.getLogger("edx.courseware")
class SelfAssessmentModule(openendedchild.OpenEndedChild):
"""
A Self Assessment module that allows students to write open-ended responses,
submit, then see a rubric and rate themselves. Persists student supplied
hints, answers, and assessment judgment (currently only correct/incorrect).
Parses xml definition file--see below for exact format.
Sample XML format:
<selfassessment>
<hintprompt>
What hint about this problem would you give to someone?
</hintprompt>
<submitmessage>
Save Succcesful. Thanks for participating!
</submitmessage>
</selfassessment>
"""
TEMPLATE_DIR = "combinedopenended/selfassessment"
# states
INITIAL = 'initial'
ASSESSING = 'assessing'
REQUEST_HINT = 'request_hint'
DONE = 'done'
def setup_response(self, system, location, definition, descriptor):
"""
Sets up the module
@param system: Modulesystem
@param location: location, to let the module know where it is.
@param definition: XML definition of the module.
@param descriptor: SelfAssessmentDescriptor
@return: None
"""
self.child_prompt = stringify_children(self.child_prompt)
self.child_rubric = stringify_children(self.child_rubric)
def get_html(self, system):
"""
Gets context and renders HTML that represents the module
@param system: Modulesystem
@return: Rendered HTML
"""
# set context variables and render template
previous_answer = self.get_display_answer()
# Use the module name as a unique id to pass to the template.
try:
module_id = self.system.location.name
except AttributeError:
# In cases where we don't have a system or a location, use a fallback.
module_id = "self_assessment"
context = {
'prompt': self.child_prompt,
'previous_answer': previous_answer,
'ajax_url': system.ajax_url,
'initial_rubric': self.get_rubric_html(system),
'state': self.child_state,
'allow_reset': self._allow_reset(),
'child_type': 'selfassessment',
'accept_file_upload': self.accept_file_upload,
'module_id': module_id,
}
html = system.render_template('{0}/self_assessment_prompt.html'.format(self.TEMPLATE_DIR), context)
return html
def handle_ajax(self, dispatch, data, system):
"""
This is called by courseware.module_render, to handle an AJAX call.
"data" is request.POST.
Returns a json dictionary:
{ 'progress_changed' : True/False,
'progress': 'none'/'in_progress'/'done',
<other request-specific values here > }
"""
handlers = {
'save_answer': self.save_answer,
'save_assessment': self.save_assessment,
'save_post_assessment': self.save_hint,
'store_answer': self.store_answer,
}
if dispatch not in handlers:
# This is a dev_facing_error
log.error("Cannot find {0} in handlers in handle_ajax function for open_ended_module.py".format(dispatch))
# This is a dev_facing_error
return json.dumps({'error': 'Error handling action. Please try again.', 'success': False})
before = self.get_progress()
d = handlers[dispatch](data, system)
after = self.get_progress()
d.update({
'progress_changed': after != before,
'progress_status': Progress.to_js_status_str(after),
})
return json.dumps(d, cls=ComplexEncoder)
def get_rubric_html(self, system):
"""
Return the appropriate version of the rubric, based on the state.
"""
if self.child_state == self.INITIAL:
return ''
rubric_renderer = CombinedOpenEndedRubric(system.render_template, False)
rubric_dict = rubric_renderer.render_rubric(self.child_rubric)
success = rubric_dict['success']
rubric_html = rubric_dict['html']
# we'll render it
context = {
'rubric': rubric_html,
'max_score': self._max_score,
}
if self.child_state == self.ASSESSING:
context['read_only'] = False
elif self.child_state in (self.POST_ASSESSMENT, self.DONE):
context['read_only'] = True
else:
# This is a dev_facing_error
raise ValueError("Self assessment module is in an illegal state '{0}'".format(self.child_state))
return system.render_template('{0}/self_assessment_rubric.html'.format(self.TEMPLATE_DIR), context)
def get_hint_html(self, system):
"""
Return the appropriate version of the hint view, based on state.
"""
if self.child_state in (self.INITIAL, self.ASSESSING):
return ''
if self.child_state == self.DONE:
# display the previous hint
latest = self.latest_post_assessment(system)
hint = latest if latest is not None else ''
else:
hint = ''
context = {'hint': hint}
if self.child_state == self.POST_ASSESSMENT:
context['read_only'] = False
elif self.child_state == self.DONE:
context['read_only'] = True
else:
# This is a dev_facing_error
raise ValueError("Self Assessment module is in an illegal state '{0}'".format(self.child_state))
return system.render_template('{0}/self_assessment_hint.html'.format(self.TEMPLATE_DIR), context)
def save_answer(self, data, system):
"""
After the answer is submitted, show the rubric.
Args:
data: the request dictionary passed to the ajax request. Should contain
a key 'student_answer'
Returns:
Dictionary with keys 'success' and either 'error' (if not success),
or 'rubric_html' (if success).
"""
# Check to see if this problem is closed
closed, msg = self.check_if_closed()
if closed:
return msg
if self.child_state != self.INITIAL:
return self.out_of_sync_error(data)
error_message = ""
# add new history element with answer and empty score and hint.
success, error_message, data = self.append_file_link_to_student_answer(data)
if success:
data['student_answer'] = SelfAssessmentModule.sanitize_html(data['student_answer'])
self.new_history_entry(data['student_answer'])
self.change_state(self.ASSESSING)
return {
'success': success,
'rubric_html': self.get_rubric_html(system),
'error': error_message,
'student_response': data['student_answer'].replace("\n", "<br/>"),
}
def save_assessment(self, data, _system):
"""
Save the assessment. If the student said they're right, don't ask for a
hint, and go straight to the done state. Otherwise, do ask for a hint.
Returns a dict { 'success': bool, 'state': state,
'hint_html': hint_html OR 'message_html': html and 'allow_reset',
'error': error-msg},
with 'error' only present if 'success' is False, and 'hint_html' or
'message_html' only if success is true
:param data: A `webob.multidict.MultiDict` containing the keys
asasssment: The sum of assessment scores
score_list[]: A multivalue key containing all the individual scores
"""
closed, msg = self.check_if_closed()
if closed:
return msg
if self.child_state != self.ASSESSING:
return self.out_of_sync_error(data)
try:
score = int(data.get('assessment'))
score_list = [int(x) for x in data.getall('score_list[]')]
except (ValueError, TypeError):
# This is a dev_facing_error
log.error("Non-integer score value passed to save_assessment, or no score list present.")
# This is a student_facing_error
_ = self.system.service(self, "i18n").ugettext
return {
'success': False,
'error': _("Error saving your score. Please notify course staff.")
}
# Record score as assessment and rubric scores as post assessment
self.record_latest_score(score)
self.record_latest_post_assessment(json.dumps(score_list))
d = {'success': True, }
self.change_state(self.DONE)
d['allow_reset'] = self._allow_reset()
d['state'] = self.child_state
return d
def save_hint(self, data, _system):
'''
Not used currently, as hints have been removed from the system.
Save the hint.
Returns a dict { 'success': bool,
'message_html': message_html,
'error': error-msg,
'allow_reset': bool},
with the error key only present if success is False and message_html
only if True.
'''
if self.child_state != self.POST_ASSESSMENT:
# Note: because we only ask for hints on wrong answers, may not have
# the same number of hints and answers.
return self.out_of_sync_error(data)
self.record_latest_post_assessment(data['hint'])
self.change_state(self.DONE)
return {
'success': True,
'message_html': '',
'allow_reset': self._allow_reset(),
}
def latest_post_assessment(self, system):
latest_post_assessment = super(SelfAssessmentModule, self).latest_post_assessment(system)
try:
rubric_scores = json.loads(latest_post_assessment)
except:
rubric_scores = []
return [rubric_scores]
class SelfAssessmentDescriptor(object):
"""
Module for adding self assessment questions to courses
"""
mako_template = "widgets/html-edit.html"
module_class = SelfAssessmentModule
filename_extension = "xml"
has_score = True
def __init__(self, system):
self.system = system
@classmethod
def definition_from_xml(cls, xml_object, system):
"""
Pull out the rubric, prompt, and submitmessage into a dictionary.
Returns:
{
'submitmessage': 'some-html'
'hintprompt': 'some-html'
}
"""
expected_children = []
for child in expected_children:
if len(xml_object.xpath(child)) != 1:
# This is a staff_facing_error
raise ValueError(
u"Self assessment definition must include exactly one '{0}' tag. Contact the learning sciences group for assistance.".format(
child))
def parse(k):
"""Assumes that xml_object has child k"""
return stringify_children(xml_object.xpath(k)[0])
return {}
def definition_to_xml(self, resource_fs):
'''Return an xml element representing this definition.'''
elt = etree.Element('selfassessment')
def add_child(k):
child_str = u'<{tag}>{body}</{tag}>'.format(tag=k, body=getattr(self, k))
child_node = etree.fromstring(child_str)
elt.append(child_node)
for child in []:
add_child(child)
return elt

View File

@@ -1,744 +0,0 @@
"""
ORA1. Deprecated.
"""
import json
import logging
from datetime import datetime
from django.utils.timezone import UTC
from lxml import etree
from pkg_resources import resource_string
from xblock.fields import Dict, String, Scope, Boolean, Float, Reference
from xmodule.capa_module import ComplexEncoder
from xmodule.fields import Date, Timedelta
from xmodule.modulestore.exceptions import ItemNotFoundError, NoPathToItem
from xmodule.raw_module import RawDescriptor
from xmodule.timeinfo import TimeInfo
from xmodule.x_module import XModule, module_attr
from xmodule.open_ended_grading_classes.peer_grading_service import PeerGradingService, MockPeerGradingService
from xmodule.open_ended_grading_classes.grading_service_module import GradingServiceError
from xmodule.validation import StudioValidation, StudioValidationMessage
from open_ended_grading_classes import combined_open_ended_rubric
log = logging.getLogger(__name__)
# Make '_' a no-op so we can scrape strings. Using lambda instead of
# `django.utils.translation.ugettext_noop` because Django cannot be imported in this file
_ = lambda text: text
EXTERNAL_GRADER_NO_CONTACT_ERROR = "Failed to contact external graders. Please notify course staff."
MAX_ALLOWED_FEEDBACK_LENGTH = 5000
class PeerGradingFields(object):
use_for_single_location = Boolean(
display_name=_("Show Single Problem"),
help=_('When True, only the single problem specified by "Link to Problem Location" is shown. '
'When False, a panel is displayed with all problems available for peer grading.'),
default=False,
scope=Scope.settings
)
link_to_location = Reference(
display_name=_("Link to Problem Location"),
help=_('The location of the problem being graded. Only used when "Show Single Problem" is True.'),
default="",
scope=Scope.settings
)
graded = Boolean(
display_name=_("Graded"),
help=_('Defines whether the student gets credit for grading this problem. Only used when "Show Single Problem" is True.'),
default=False,
scope=Scope.settings
)
due = Date(
help=_("Due date that should be displayed."),
scope=Scope.settings)
graceperiod = Timedelta(
help=_("Amount of grace to give on the due date."),
scope=Scope.settings
)
student_data_for_location = Dict(
help=_("Student data for a given peer grading problem."),
scope=Scope.user_state
)
weight = Float(
display_name=_("Problem Weight"),
help=_("Defines the number of points each problem is worth. If the value is not set, each problem is worth one point."),
scope=Scope.settings, values={"min": 0, "step": ".1"},
default=1
)
display_name = String(
display_name=_("Display Name"),
help=_("Display name for this module"),
scope=Scope.settings,
default=_("Peer Grading Interface")
)
data = String(
help=_("Html contents to display for this module"),
default='<peergrading></peergrading>',
scope=Scope.content
)
class InvalidLinkLocation(Exception):
"""
Exception for the case in which a peer grading module tries to link to an invalid location.
"""
pass
class PeerGradingModule(PeerGradingFields, XModule):
"""
PeerGradingModule.__init__ takes the same arguments as xmodule.x_module:XModule.__init__
"""
_VERSION = 1
js = {
'coffee': [
resource_string(__name__, 'js/src/peergrading/peer_grading.coffee'),
resource_string(__name__, 'js/src/peergrading/peer_grading_problem.coffee'),
resource_string(__name__, 'js/src/javascript_loader.coffee'),
],
'js': [
resource_string(__name__, 'js/src/collapsible.js'),
]
}
js_module_name = "PeerGrading"
css = {'scss': [resource_string(__name__, 'css/combinedopenended/display.scss')]}
def __init__(self, *args, **kwargs):
super(PeerGradingModule, self).__init__(*args, **kwargs)
# Copy this to a new variable so that we can edit it if needed.
# We need to edit it if the linked module cannot be found, so
# we can revert to panel model.
self.use_for_single_location_local = self.use_for_single_location
# We need to set the location here so the child modules can use it.
self.runtime.set('location', self.location)
if self.runtime.open_ended_grading_interface:
self.peer_gs = PeerGradingService(self.system.open_ended_grading_interface, self.system.render_template)
else:
self.peer_gs = MockPeerGradingService()
if self.use_for_single_location_local:
linked_descriptors = self.descriptor.get_required_module_descriptors()
if len(linked_descriptors) == 0:
error_msg = "Peer grading module {0} is trying to use single problem mode without "
"a location specified.".format(self.location)
log.error(error_msg)
# Change module over to panel mode from single problem mode.
self.use_for_single_location_local = False
else:
self.linked_problem = self.system.get_module(linked_descriptors[0])
try:
self.timeinfo = TimeInfo(self.due, self.graceperiod)
except Exception:
log.error("Error parsing due date information in location {0}".format(self.location))
raise
self.display_due_date = self.timeinfo.display_due_date
try:
self.student_data_for_location = json.loads(self.student_data_for_location)
except Exception: # pylint: disable=broad-except
# OK with this broad exception because we just want to continue on any error
pass
@property
def ajax_url(self):
"""
Returns the `ajax_url` from the system, with any trailing '/' stripped off.
"""
ajax_url = self.system.ajax_url
if not ajax_url.endswith("/"):
ajax_url += "/"
return ajax_url
def closed(self):
return self._closed(self.timeinfo)
def _closed(self, timeinfo):
if timeinfo.close_date is not None and datetime.now(UTC()) > timeinfo.close_date:
return True
return False
def _err_response(self, msg):
"""
Return a HttpResponse with a json dump with success=False, and the given error message.
"""
return {'success': False, 'error': msg}
def _check_required(self, data, required):
actual = set(data.keys())
missing = required - actual
if len(missing) > 0:
return False, "Missing required keys: {0}".format(', '.join(missing))
else:
return True, ""
def get_html(self):
"""
Needs to be implemented by inheritors. Renders the HTML that students see.
@return:
"""
if self.closed():
return self.peer_grading_closed()
if not self.use_for_single_location_local:
return self.peer_grading()
else:
# b/c handle_ajax expects serialized data payload and directly calls peer_grading
return self.peer_grading_problem({'location': self.link_to_location.to_deprecated_string()})['html']
def handle_ajax(self, dispatch, data):
"""
Needs to be implemented by child modules. Handles AJAX events.
@return:
"""
handlers = {
'get_next_submission': self.get_next_submission,
'show_calibration_essay': self.show_calibration_essay,
'is_student_calibrated': self.is_student_calibrated,
'save_grade': self.save_grade,
'save_calibration_essay': self.save_calibration_essay,
'problem': self.peer_grading_problem,
}
if dispatch not in handlers:
# This is a dev_facing_error
log.error("Cannot find {0} in handlers in handle_ajax function for open_ended_module.py".format(dispatch))
# This is a dev_facing_error
return json.dumps({'error': 'Error handling action. Please try again.', 'success': False})
data_dict = handlers[dispatch](data)
return json.dumps(data_dict, cls=ComplexEncoder)
def query_data_for_location(self, location):
student_id = self.system.anonymous_student_id
success = False
response = {}
try:
response = self.peer_gs.get_data_for_location(location, student_id)
_count_graded = response['count_graded']
_count_required = response['count_required']
success = True
except GradingServiceError:
# This is a dev_facing_error
log.exception("Error getting location data from controller for location %s, student %s", location, student_id)
return success, response
def get_progress(self):
pass
def get_score(self):
max_score = None
score = None
weight = self.weight
#The old default was None, so set to 1 if it is the old default weight
if weight is None:
weight = 1
score_dict = {
'score': score,
'total': max_score,
}
if not self.use_for_single_location_local or not self.graded:
return score_dict
try:
count_graded = self.student_data_for_location['count_graded']
count_required = self.student_data_for_location['count_required']
except:
success, response = self.query_data_for_location(self.link_to_location)
if not success:
log.exception(
"No instance data found and could not get data from controller for loc {0} student {1}".format(
self.system.location.to_deprecated_string(), self.system.anonymous_student_id
))
return None
count_graded = response['count_graded']
count_required = response['count_required']
if count_required > 0 and count_graded >= count_required:
# Ensures that once a student receives a final score for peer grading, that it does not change.
self.student_data_for_location = response
score = int(count_graded >= count_required and count_graded > 0) * float(weight)
total = float(weight)
score_dict['score'] = score
score_dict['total'] = total
return score_dict
def max_score(self):
''' Maximum score. Two notes:
* This is generic; in abstract, a problem could be 3/5 points on one
randomization, and 5/7 on another
'''
max_grade = None
if self.use_for_single_location_local and self.graded:
max_grade = self.weight
return max_grade
def get_next_submission(self, data):
"""
Makes a call to the grading controller for the next essay that should be graded
Returns a json dict with the following keys:
'success': bool
'submission_id': a unique identifier for the submission, to be passed back
with the grade.
'submission': the submission, rendered as read-only html for grading
'rubric': the rubric, also rendered as html.
'submission_key': a key associated with the submission for validation reasons
'error': if success is False, will have an error message with more info.
"""
required = set(['location'])
success, message = self._check_required(data, required)
if not success:
return self._err_response(message)
grader_id = self.system.anonymous_student_id
location = data['location']
try:
response = self.peer_gs.get_next_submission(location, grader_id)
return response
except GradingServiceError:
# This is a dev_facing_error
log.exception("Error getting next submission. server url: %s location: %s, grader_id: %s", self.peer_gs.url, location, grader_id)
# This is a student_facing_error
return {'success': False,
'error': EXTERNAL_GRADER_NO_CONTACT_ERROR}
def save_grade(self, data):
"""
Saves the grade of a given submission.
Input:
The request should have the following keys:
location - problem location
submission_id - id associated with this submission
submission_key - submission key given for validation purposes
score - the grade that was given to the submission
feedback - the feedback from the student
Returns
A json object with the following keys:
success: bool indicating whether the save was a success
error: if there was an error in the submission, this is the error message
"""
required = ['location', 'submission_id', 'submission_key', 'score', 'feedback', 'submission_flagged', 'answer_unknown']
if data.get("submission_flagged", False) in ["false", False, "False", "FALSE"]:
required.append("rubric_scores[]")
success, message = self._check_required(data, set(required))
if not success:
return self._err_response(message)
success, message = self._check_feedback_length(data)
if not success:
return self._err_response(message)
data_dict = {k: data.get(k) for k in required}
if 'rubric_scores[]' in required:
data_dict['rubric_scores'] = data.getall('rubric_scores[]')
data_dict['grader_id'] = self.system.anonymous_student_id
try:
response = self.peer_gs.save_grade(**data_dict)
success, location_data = self.query_data_for_location(data_dict['location'])
#Don't check for success above because the response = statement will raise the same Exception as the one
#that will cause success to be false.
response.update({'required_done': False})
if 'count_graded' in location_data and 'count_required' in location_data and int(location_data['count_graded']) >= int(location_data['count_required']):
response['required_done'] = True
return response
except GradingServiceError:
# This is a dev_facing_error
log.exception("Error saving grade to open ended grading service. server url: %s", self.peer_gs.url)
# This is a student_facing_error
return {
'success': False,
'error': EXTERNAL_GRADER_NO_CONTACT_ERROR
}
def is_student_calibrated(self, data):
"""
Calls the grading controller to see if the given student is calibrated
on the given problem
Input:
In the request, we need the following arguments:
location - problem location
Returns:
Json object with the following keys
success - bool indicating whether or not the call was successful
calibrated - true if the grader has fully calibrated and can now move on to grading
- false if the grader is still working on calibration problems
total_calibrated_on_so_far - the number of calibration essays for this problem
that this grader has graded
"""
required = set(['location'])
success, message = self._check_required(data, required)
if not success:
return self._err_response(message)
grader_id = self.system.anonymous_student_id
location = data['location']
try:
response = self.peer_gs.is_student_calibrated(location, grader_id)
return response
except GradingServiceError:
# This is a dev_facing_error
log.exception("Error from open ended grading service. server url: %s, grader_id: %s, location: %s", self.peer_gs.url, grader_id, location)
# This is a student_facing_error
return {
'success': False,
'error': EXTERNAL_GRADER_NO_CONTACT_ERROR
}
def show_calibration_essay(self, data):
"""
Fetch the next calibration essay from the grading controller and return it
Inputs:
In the request
location - problem location
Returns:
A json dict with the following keys
'success': bool
'submission_id': a unique identifier for the submission, to be passed back
with the grade.
'submission': the submission, rendered as read-only html for grading
'rubric': the rubric, also rendered as html.
'submission_key': a key associated with the submission for validation reasons
'error': if success is False, will have an error message with more info.
"""
required = set(['location'])
success, message = self._check_required(data, required)
if not success:
return self._err_response(message)
grader_id = self.system.anonymous_student_id
location = data['location']
try:
response = self.peer_gs.show_calibration_essay(location, grader_id)
return response
except GradingServiceError:
# This is a dev_facing_error
log.exception("Error from open ended grading service. server url: %s, location: %s", self.peer_gs.url, location)
# This is a student_facing_error
return {'success': False,
'error': EXTERNAL_GRADER_NO_CONTACT_ERROR}
# if we can't parse the rubric into HTML,
except etree.XMLSyntaxError:
# This is a dev_facing_error
log.exception("Cannot parse rubric string.")
# This is a student_facing_error
return {'success': False,
'error': 'Error displaying submission. Please notify course staff.'}
def save_calibration_essay(self, data):
"""
Saves the grader's grade of a given calibration.
Input:
The request should have the following keys:
location - problem location
submission_id - id associated with this submission
submission_key - submission key given for validation purposes
score - the grade that was given to the submission
feedback - the feedback from the student
Returns
A json object with the following keys:
success: bool indicating whether the save was a success
error: if there was an error in the submission, this is the error message
actual_score: the score that the instructor gave to this calibration essay
"""
required = set(['location', 'submission_id', 'submission_key', 'score', 'feedback', 'rubric_scores[]'])
success, message = self._check_required(data, required)
if not success:
return self._err_response(message)
data_dict = {k: data.get(k) for k in required}
data_dict['rubric_scores'] = data.getall('rubric_scores[]')
data_dict['student_id'] = self.system.anonymous_student_id
data_dict['calibration_essay_id'] = data_dict['submission_id']
try:
response = self.peer_gs.save_calibration_essay(**data_dict)
if 'actual_rubric' in response:
rubric_renderer = combined_open_ended_rubric.CombinedOpenEndedRubric(self.system.render_template, True)
response['actual_rubric'] = rubric_renderer.render_rubric(response['actual_rubric'])['html']
return response
except GradingServiceError:
# This is a dev_facing_error
log.exception("Error saving calibration grade")
# This is a student_facing_error
return self._err_response('There was an error saving your score. Please notify course staff.')
def peer_grading_closed(self):
'''
Show the Peer grading closed template
'''
html = self.system.render_template('peer_grading/peer_grading_closed.html', {
'use_for_single_location': self.use_for_single_location_local
})
return html
def _find_corresponding_module_for_location(self, location):
"""
Find the peer grading module that exists at the given location.
"""
try:
return self.descriptor.system.load_item(location)
except ItemNotFoundError:
# The linked problem doesn't exist.
log.error("Problem {0} does not exist in this course.".format(location))
raise
except NoPathToItem:
# The linked problem does not have a path to it (ie is in a draft or other strange state).
log.error("Cannot find a path to problem {0} in this course.".format(location))
raise
def peer_grading(self, _data=None):
'''
Show a peer grading interface
'''
# call problem list service
success = False
error_text = ""
problem_list = []
try:
problem_list_dict = self.peer_gs.get_problem_list(self.course_id, self.system.anonymous_student_id)
success = problem_list_dict['success']
if 'error' in problem_list_dict:
error_text = problem_list_dict['error']
problem_list = problem_list_dict['problem_list']
except GradingServiceError:
# This is a student_facing_error
error_text = EXTERNAL_GRADER_NO_CONTACT_ERROR
log.error(error_text)
success = False
# catch error if if the json loads fails
except ValueError:
# This is a student_facing_error
error_text = "Could not get list of problems to peer grade. Please notify course staff."
log.error(error_text)
success = False
except Exception:
log.exception("Could not contact peer grading service.")
success = False
good_problem_list = []
for problem in problem_list:
problem_location = problem['location']
try:
descriptor = self._find_corresponding_module_for_location(problem_location)
except (NoPathToItem, ItemNotFoundError):
continue
if descriptor:
problem['due'] = descriptor.due
grace_period = descriptor.graceperiod
try:
problem_timeinfo = TimeInfo(problem['due'], grace_period)
except Exception:
log.error("Malformed due date or grace period string for location {0}".format(problem_location))
raise
if self._closed(problem_timeinfo):
problem['closed'] = True
else:
problem['closed'] = False
else:
# if we can't find the due date, assume that it doesn't have one
problem['due'] = None
problem['closed'] = False
good_problem_list.append(problem)
ajax_url = self.ajax_url
html = self.system.render_template('peer_grading/peer_grading.html', {
'ajax_url': ajax_url,
'success': success,
'problem_list': good_problem_list,
'error_text': error_text,
# Checked above
'staff_access': False,
'use_single_location': self.use_for_single_location_local,
})
return html
def peer_grading_problem(self, data=None):
'''
Show individual problem interface
'''
if data is None or data.get('location') is None:
if not self.use_for_single_location_local:
# This is an error case, because it must be set to use a single location to be called without get parameters
# This is a dev_facing_error
log.error(
"Peer grading problem in peer_grading_module called with no get parameters, but use_for_single_location is False.")
return {'html': "", 'success': False}
problem_location = self.link_to_location
elif data.get('location') is not None:
problem_location = self.course_id.make_usage_key_from_deprecated_string(data.get('location'))
self._find_corresponding_module_for_location(problem_location)
ajax_url = self.ajax_url
html = self.system.render_template('peer_grading/peer_grading_problem.html', {
'view_html': '',
'problem_location': problem_location,
'course_id': self.course_id,
'ajax_url': ajax_url,
# Checked above
'staff_access': False,
'use_single_location': self.use_for_single_location_local,
})
return {'html': html, 'success': True}
def get_instance_state(self):
"""
Returns the current instance state. The module can be recreated from the instance state.
Input: None
Output: A dictionary containing the instance state.
"""
state = {
'student_data_for_location': self.student_data_for_location,
}
return json.dumps(state)
def _check_feedback_length(self, data):
feedback = data.get("feedback")
if feedback and len(feedback) > MAX_ALLOWED_FEEDBACK_LENGTH:
return False, "Feedback is too long, Max length is {0} characters.".format(
MAX_ALLOWED_FEEDBACK_LENGTH
)
else:
return True, ""
def validate(self):
"""
Message for either error or warning validation message/s.
Returns message and type. Priority given to error type message.
"""
return self.descriptor.validate()
class PeerGradingDescriptor(PeerGradingFields, RawDescriptor):
"""
Module for adding peer grading questions
"""
mako_template = "widgets/raw-edit.html"
module_class = PeerGradingModule
filename_extension = "xml"
has_score = True
always_recalculate_grades = True
#Specify whether or not to pass in open ended interface
needs_open_ended_interface = True
metadata_translations = {
'is_graded': 'graded',
'attempts': 'max_attempts',
'due_data': 'due'
}
@property
def non_editable_metadata_fields(self):
non_editable_fields = super(PeerGradingDescriptor, self).non_editable_metadata_fields
non_editable_fields.extend([PeerGradingFields.due, PeerGradingFields.graceperiod])
return non_editable_fields
def get_required_module_descriptors(self):
"""
Returns a list of XModuleDescriptor instances upon which this module depends, but are
not children of this module.
"""
# If use_for_single_location is True, this is linked to an open ended problem.
if self.use_for_single_location:
# Try to load the linked module.
# If we can't load it, return empty list to avoid exceptions on progress page.
try:
linked_module = self.system.load_item(self.link_to_location)
return [linked_module]
except (NoPathToItem, ItemNotFoundError):
error_message = ("Cannot find the combined open ended module "
"at location {0} being linked to from peer "
"grading module {1}").format(self.link_to_location, self.location)
log.error(error_message)
return []
else:
return []
# Proxy to PeerGradingModule so that external callers don't have to know if they're working
# with a module or a descriptor
closed = module_attr('closed')
get_instance_state = module_attr('get_instance_state')
get_next_submission = module_attr('get_next_submission')
graded = module_attr('graded')
is_student_calibrated = module_attr('is_student_calibrated')
peer_grading = module_attr('peer_grading')
peer_grading_closed = module_attr('peer_grading_closed')
peer_grading_problem = module_attr('peer_grading_problem')
peer_gs = module_attr('peer_gs')
query_data_for_location = module_attr('query_data_for_location')
save_calibration_essay = module_attr('save_calibration_essay')
save_grade = module_attr('save_grade')
show_calibration_essay = module_attr('show_calibration_essay')
use_for_single_location_local = module_attr('use_for_single_location_local')
_find_corresponding_module_for_location = module_attr('_find_corresponding_module_for_location')
def validate(self):
"""
Validates the state of this instance. This is the override of the general XBlock method,
and it will also ask its superclass to validate.
"""
validation = super(PeerGradingDescriptor, self).validate()
validation = StudioValidation.copy(validation)
i18n_service = self.runtime.service(self, "i18n")
validation.summary = StudioValidationMessage(
StudioValidationMessage.ERROR,
i18n_service.ugettext(
"ORA1 is no longer supported. To use this assessment, "
"replace this ORA1 component with an ORA2 component."
)
)
return validation

View File

@@ -41,16 +41,6 @@ MODULE_DIR = path(__file__).dirname()
DATA_DIR = MODULE_DIR.parent.parent.parent.parent / "test" / "data"
open_ended_grading_interface = {
'url': 'blah/',
'username': 'incorrect_user',
'password': 'incorrect_pass',
'staff_grading': 'staff_grading',
'peer_grading': 'peer_grading',
'grading_controller': 'grading_controller',
}
class TestModuleSystem(ModuleSystem): # pylint: disable=abstract-method
"""
ModuleSystem for testing
@@ -150,7 +140,6 @@ def get_test_system(course_id=SlashSeparatedCourseKey('org', 'course', 'run')):
},
node_path=os.environ.get("NODE_PATH", "/usr/local/lib/node_modules"),
anonymous_student_id='student',
open_ended_grading_interface=open_ended_grading_interface,
course_id=course_id,
error_descriptor_class=ErrorDescriptor,
get_user_role=Mock(name='get_test_system.get_user_role', is_staff=False),

View File

@@ -1,1607 +0,0 @@
"""
Tests for the various pieces of the CombinedOpenEndedGrading system
OpenEndedChild
OpenEndedModule
"""
import json
import logging
import unittest
from datetime import datetime
from lxml import etree
from lxml.html import fragment_fromstring
from mock import Mock, MagicMock, patch
from pytz import UTC
from webob.multidict import MultiDict
from xmodule.open_ended_grading_classes.openendedchild import OpenEndedChild
from xmodule.open_ended_grading_classes.open_ended_module import OpenEndedModule
from xmodule.open_ended_grading_classes.self_assessment_module import SelfAssessmentModule
from xmodule.open_ended_grading_classes.combined_open_ended_modulev1 import CombinedOpenEndedV1Module
from xmodule.combined_open_ended_module import CombinedOpenEndedModule
from opaque_keys.edx.locations import Location
from xmodule.tests import get_test_system, test_util_open_ended
from xmodule.progress import Progress
from xmodule.validation import StudioValidationMessage
from xmodule.x_module import STUDENT_VIEW
from xmodule.tests.test_util_open_ended import (
DummyModulestore, TEST_STATE_SA_IN,
MOCK_INSTANCE_STATE, TEST_STATE_SA, TEST_STATE_AI, TEST_STATE_AI2, TEST_STATE_AI2_INVALID,
TEST_STATE_SINGLE, TEST_STATE_PE_SINGLE, MockUploadedFile, INSTANCE_INCONSISTENT_STATE,
INSTANCE_INCONSISTENT_STATE2, INSTANCE_INCONSISTENT_STATE3, INSTANCE_INCONSISTENT_STATE4,
INSTANCE_INCONSISTENT_STATE5
)
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
import capa.xqueue_interface as xqueue_interface
log = logging.getLogger(__name__)
ORG = 'edX'
COURSE = 'open_ended' # name of directory with course data
class OpenEndedChildTest(unittest.TestCase):
"""
Test the open ended child class
"""
location = Location("edX", "sa_test", "2012_Fall", "selfassessment", "SampleQuestion")
metadata = json.dumps({'attempts': '10'})
prompt = etree.XML("<prompt>This is a question prompt</prompt>")
rubric = '''<rubric><rubric>
<category>
<description>Response Quality</description>
<option>
The response is not a satisfactory answer to the question.
It either fails to address the question or does so in a limited way,
with no evidence of higher-order thinking.
</option>
<option>Second option</option>
</category>
</rubric></rubric>'''
max_score = 1
static_data = {
'max_attempts': 20,
'prompt': prompt,
'rubric': rubric,
'max_score': max_score,
'display_name': 'Name',
'accept_file_upload': False,
'close_date': None,
's3_interface': "",
'open_ended_grading_interface': {},
'skip_basic_checks': False,
'control': {
'required_peer_grading': 1,
'peer_grader_count': 1,
'min_to_calibrate': 3,
'max_to_calibrate': 6,
'peer_grade_finished_submissions_when_none_pending': False,
}
}
definition = Mock()
descriptor = Mock()
def setUp(self):
super(OpenEndedChildTest, self).setUp()
self.test_system = get_test_system()
self.test_system.open_ended_grading_interface = None
self.openendedchild = OpenEndedChild(self.test_system, self.location,
self.definition, self.descriptor, self.static_data, self.metadata)
def test_latest_answer_empty(self):
answer = self.openendedchild.latest_answer()
self.assertEqual(answer, "")
def test_latest_score_empty(self):
answer = self.openendedchild.latest_score()
self.assertEqual(answer, None)
def test_latest_post_assessment_empty(self):
answer = self.openendedchild.latest_post_assessment(self.test_system)
self.assertEqual(answer, "")
def test_new_history_entry(self):
new_answer = "New Answer"
self.openendedchild.new_history_entry(new_answer)
answer = self.openendedchild.latest_answer()
self.assertEqual(answer, new_answer)
new_answer = "Newer Answer"
self.openendedchild.new_history_entry(new_answer)
answer = self.openendedchild.latest_answer()
self.assertEqual(new_answer, answer)
def test_record_latest_score(self):
new_answer = "New Answer"
self.openendedchild.new_history_entry(new_answer)
new_score = 3
self.openendedchild.record_latest_score(new_score)
score = self.openendedchild.latest_score()
self.assertEqual(score, 3)
new_score = 4
self.openendedchild.new_history_entry(new_answer)
self.openendedchild.record_latest_score(new_score)
score = self.openendedchild.latest_score()
self.assertEqual(score, 4)
def test_record_latest_post_assessment(self):
new_answer = "New Answer"
self.openendedchild.new_history_entry(new_answer)
post_assessment = "Post assessment"
self.openendedchild.record_latest_post_assessment(post_assessment)
self.assertEqual(post_assessment,
self.openendedchild.latest_post_assessment(self.test_system))
def test_get_score(self):
new_answer = "New Answer"
self.openendedchild.new_history_entry(new_answer)
score = self.openendedchild.get_score()
self.assertEqual(score['score'], 0)
self.assertEqual(score['total'], self.static_data['max_score'])
new_score = 4
self.openendedchild.new_history_entry(new_answer)
self.openendedchild.record_latest_score(new_score)
score = self.openendedchild.get_score()
self.assertEqual(score['score'], new_score)
self.assertEqual(score['total'], self.static_data['max_score'])
def test_reset(self):
self.openendedchild.reset(self.test_system)
state = json.loads(self.openendedchild.get_instance_state())
self.assertEqual(state['child_state'], OpenEndedChild.INITIAL)
def test_is_last_response_correct(self):
new_answer = "New Answer"
self.openendedchild.new_history_entry(new_answer)
self.openendedchild.record_latest_score(self.static_data['max_score'])
self.assertEqual(self.openendedchild.is_last_response_correct(),
'correct')
self.openendedchild.new_history_entry(new_answer)
self.openendedchild.record_latest_score(0)
self.assertEqual(self.openendedchild.is_last_response_correct(),
'incorrect')
class OpenEndedModuleTest(unittest.TestCase):
"""
Test the open ended module class
"""
location = Location("edX", "sa_test", "2012_Fall", "selfassessment", "SampleQuestion")
metadata = json.dumps({'attempts': '10'})
prompt = etree.XML("<prompt>This is a question prompt</prompt>")
rubric = etree.XML('''<rubric>
<category>
<description>Response Quality</description>
<option>
The response is not a satisfactory answer to the question.
It either fails to address the question or does so in a limited way,
with no evidence of higher-order thinking.
</option>
</category>
</rubric>''')
max_score = 4
static_data = {
'max_attempts': 20,
'prompt': prompt,
'rubric': rubric,
'max_score': max_score,
'display_name': 'Name',
'accept_file_upload': False,
'close_date': None,
's3_interface': test_util_open_ended.S3_INTERFACE,
'open_ended_grading_interface': test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE,
'skip_basic_checks': False,
'control': {
'required_peer_grading': 1,
'peer_grader_count': 1,
'min_to_calibrate': 3,
'max_to_calibrate': 6,
'peer_grade_finished_submissions_when_none_pending': False,
}
}
oeparam = etree.XML('''
<openendedparam>
<initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display>
<grader_payload>
{"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}
</grader_payload>
</openendedparam>
''')
definition = {'oeparam': oeparam}
descriptor = Mock()
feedback = {
"success": True,
"feedback": "Grader Feedback"
}
single_score_msg = {
'correct': True,
'score': 4,
'msg': 'Grader Message',
'feedback': json.dumps(feedback),
'grader_type': 'IN',
'grader_id': '1',
'submission_id': '1',
'success': True,
'rubric_scores': [0],
'rubric_scores_complete': True,
'rubric_xml': etree.tostring(rubric)
}
multiple_score_msg = {
'correct': True,
'score': [0, 1],
'msg': 'Grader Message',
'feedback': [json.dumps(feedback), json.dumps(feedback)],
'grader_type': 'PE',
'grader_id': ['1', '2'],
'submission_id': '1',
'success': True,
'rubric_scores': [[0], [0]],
'rubric_scores_complete': [True, True],
'rubric_xml': [etree.tostring(rubric), etree.tostring(rubric)]
}
def setUp(self):
super(OpenEndedModuleTest, self).setUp()
self.test_system = get_test_system()
self.test_system.open_ended_grading_interface = None
self.test_system.location = self.location
self.mock_xqueue = MagicMock()
self.mock_xqueue.send_to_queue.return_value = (0, "Queued")
def constructed_callback(dispatch="score_update"):
return dispatch
self.test_system.xqueue = {'interface': self.mock_xqueue, 'construct_callback': constructed_callback,
'default_queuename': 'testqueue',
'waittime': 1}
self.openendedmodule = OpenEndedModule(self.test_system, self.location,
self.definition, self.descriptor, self.static_data, self.metadata)
def test_message_post(self):
"""Test message_post() sends feedback to xqueue."""
submission_time = datetime.strftime(datetime.now(UTC), xqueue_interface.dateformat)
feedback_post = {
'feedback': 'feedback text',
'submission_id': '1',
'grader_id': '1',
'score': 3
}
result = self.openendedmodule.message_post(feedback_post, self.test_system)
self.assertTrue(result['success'])
# make sure it's actually sending something we want to the queue
mock_send_to_queue_body_arg = json.loads(self.mock_xqueue.send_to_queue.call_args[1]['body'])
self.assertEqual(mock_send_to_queue_body_arg['feedback'], feedback_post['feedback'])
self.assertEqual(mock_send_to_queue_body_arg['submission_id'], int(feedback_post['submission_id']))
self.assertEqual(mock_send_to_queue_body_arg['grader_id'], int(feedback_post['grader_id']))
self.assertEqual(mock_send_to_queue_body_arg['score'], feedback_post['score'])
body_arg_student_info = json.loads(mock_send_to_queue_body_arg['student_info'])
self.assertEqual(body_arg_student_info['anonymous_student_id'], self.test_system.anonymous_student_id)
self.assertGreaterEqual(body_arg_student_info['submission_time'], submission_time)
state = json.loads(self.openendedmodule.get_instance_state())
self.assertEqual(state['child_state'], OpenEndedModule.DONE)
def test_message_post_fail(self):
"""Test message_post() if unable to send feedback to xqueue."""
self.mock_xqueue.send_to_queue.return_value = (1, "Not Queued")
feedback_post = {
'feedback': 'feedback text',
'submission_id': '1',
'grader_id': '1',
'score': 3
}
result = self.openendedmodule.message_post(feedback_post, self.test_system)
self.assertFalse(result['success'])
state = json.loads(self.openendedmodule.get_instance_state())
self.assertNotEqual(state['child_state'], OpenEndedModule.DONE)
def test_send_to_grader(self):
student_response = "This is a student submission"
submission_time = datetime.strftime(datetime.now(UTC), xqueue_interface.dateformat)
result, __ = self.openendedmodule.send_to_grader(student_response, self.test_system)
self.assertTrue(result)
mock_send_to_queue_body_arg = json.loads(self.mock_xqueue.send_to_queue.call_args[1]['body'])
self.assertEqual(mock_send_to_queue_body_arg['student_response'], student_response)
self.assertEqual(mock_send_to_queue_body_arg['max_score'], self.max_score)
body_arg_student_info = json.loads(mock_send_to_queue_body_arg['student_info'])
self.assertEqual(body_arg_student_info['anonymous_student_id'], self.test_system.anonymous_student_id)
self.assertGreaterEqual(body_arg_student_info['submission_time'], submission_time)
def test_send_to_grader_fail(self):
"""Test send_to_grader() if unable to send submission to xqueue."""
student_response = "This is a student submission"
self.mock_xqueue.send_to_queue.return_value = (1, "Not Queued")
result, __ = self.openendedmodule.send_to_grader(student_response, self.test_system)
self.assertFalse(result)
def test_save_answer_fail(self):
"""Test save_answer() if unable to send submission to grader."""
submission = "This is a student submission"
self.openendedmodule.send_to_grader = Mock(return_value=(False, "Failed"))
response = self.openendedmodule.save_answer(
{"student_answer": submission},
get_test_system()
)
self.assertFalse(response['success'])
self.assertNotEqual(self.openendedmodule.latest_answer(), submission)
self.assertEqual(self.openendedmodule.stored_answer, submission)
state = json.loads(self.openendedmodule.get_instance_state())
self.assertEqual(state['child_state'], OpenEndedModule.INITIAL)
self.assertEqual(state['stored_answer'], submission)
def update_score_single(self):
self.openendedmodule.new_history_entry("New Entry")
get = {'queuekey': "abcd",
'xqueue_body': json.dumps(self.single_score_msg)}
self.openendedmodule.update_score(get, self.test_system)
def update_score_multiple(self):
self.openendedmodule.new_history_entry("New Entry")
get = {'queuekey': "abcd",
'xqueue_body': json.dumps(self.multiple_score_msg)}
self.openendedmodule.update_score(get, self.test_system)
def test_latest_post_assessment(self):
self.update_score_single()
assessment = self.openendedmodule.latest_post_assessment(self.test_system)
self.assertNotEqual(assessment, '')
# check for errors
self.assertNotIn('errors', assessment)
def test_update_score_single(self):
self.update_score_single()
score = self.openendedmodule.latest_score()
self.assertEqual(score, 4)
def test_update_score_multiple(self):
"""
Tests that a score of [0, 1] gets aggregated to 1. A change in behavior added by @jbau
"""
self.update_score_multiple()
score = self.openendedmodule.latest_score()
self.assertEquals(score, 1)
@patch('xmodule.open_ended_grading_classes.open_ended_module.log.error')
def test_update_score_nohistory(self, error_logger):
"""
Tests error handling when there is no child_history
"""
# NOTE that we are not creating any history items
get = {'queuekey': "abcd",
'xqueue_body': json.dumps(self.multiple_score_msg)}
error_msg = ("Trying to update score without existing studentmodule child_history:\n"
" location: i4x://edX/sa_test/selfassessment/SampleQuestion\n"
" score: 1\n"
" grader_ids: [u'1', u'2']\n"
" submission_ids: [u'1', u'1']")
self.openendedmodule.update_score(get, self.test_system)
(msg,), _ = error_logger.call_args
self.assertTrue(error_logger.called)
self.assertEqual(msg, error_msg)
def test_open_ended_display(self):
"""
Test storing answer with the open ended module.
"""
# Create a module with no state yet. Important that this start off as a blank slate.
test_module = OpenEndedModule(self.test_system, self.location,
self.definition, self.descriptor, self.static_data, self.metadata)
saved_response = "Saved response."
submitted_response = "Submitted response."
# Initially, there will be no stored answer.
self.assertEqual(test_module.stored_answer, None)
# And the initial answer to display will be an empty string.
self.assertEqual(test_module.get_display_answer(), "")
# Now, store an answer in the module.
test_module.handle_ajax("store_answer", {'student_answer': saved_response}, get_test_system())
# The stored answer should now equal our response.
self.assertEqual(test_module.stored_answer, saved_response)
self.assertEqual(test_module.get_display_answer(), saved_response)
# Mock out the send_to_grader function so it doesn't try to connect to the xqueue.
test_module.send_to_grader = Mock(return_value=(True, "Success"))
# Submit a student response to the question.
test_module.handle_ajax(
"save_answer",
{"student_answer": submitted_response},
get_test_system()
)
# Submitting an answer should clear the stored answer.
self.assertEqual(test_module.stored_answer, None)
# Confirm that the answer is stored properly.
self.assertEqual(test_module.latest_answer(), submitted_response)
def test_parse_score_msg(self):
"""
Test _parse_score_msg with empty dict.
"""
assessment = self.openendedmodule._parse_score_msg("{}", self.test_system)
self.assertEqual(assessment.get("valid"), False)
class CombinedOpenEndedModuleTest(unittest.TestCase):
"""
Unit tests for the combined open ended xmodule
"""
location = Location("edX", "open_ended", "2012_Fall", "combinedopenended", "SampleQuestion")
definition_template = """
<combinedopenended attempts="10000">
{rubric}
{prompt}
<task>
{task1}
</task>
<task>
{task2}
</task>
</combinedopenended>
"""
prompt = "<prompt>This is a question prompt</prompt>"
rubric = '''<rubric><rubric>
<category>
<description>Response Quality</description>
<option>
The response is not a satisfactory answer to the question.
It either fails to address the question or does so in a limited way,
with no evidence of higher-order thinking.
</option>
<option>Second option</option>
</category>
</rubric></rubric>'''
max_score = 1
metadata = {'attempts': '10', 'max_score': max_score}
static_data = {
'max_attempts': 20,
'prompt': prompt,
'rubric': rubric,
'max_score': max_score,
'display_name': 'Name',
'accept_file_upload': False,
'close_date': "",
's3_interface': test_util_open_ended.S3_INTERFACE,
'open_ended_grading_interface': test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE,
'skip_basic_checks': False,
'graded': True,
}
oeparam = etree.XML('''
<openendedparam>
<initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display>
<grader_payload>
{"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}
</grader_payload>
</openendedparam>
''')
task_xml1 = '''
<selfassessment>
<hintprompt>
What hint about this problem would you give to someone?
</hintprompt>
<submitmessage>
Save Succcesful. Thanks for participating!
</submitmessage>
</selfassessment>
'''
task_xml2 = '''
<openended min_score_to_attempt="1" max_score_to_attempt="1">
<openendedparam>
<initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display>
<grader_payload>
{"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}
</grader_payload>
</openendedparam>
</openended>'''
definition = {'prompt': etree.XML(prompt), 'rubric': etree.XML(rubric), 'task_xml': [task_xml1, task_xml2]}
full_definition = definition_template.format(prompt=prompt, rubric=rubric, task1=task_xml1, task2=task_xml2)
descriptor = Mock(data=full_definition)
test_system = get_test_system()
test_system.open_ended_grading_interface = None
usage_key = test_system.course_id.make_usage_key('combinedopenended', 'test_loc')
# ScopeIds has 4 fields: user_id, block_type, def_id, usage_id
scope_ids = ScopeIds(1, 'combinedopenended', usage_key, usage_key)
combinedoe_container = CombinedOpenEndedModule(
descriptor=descriptor,
runtime=test_system,
field_data=DictFieldData({
'data': full_definition,
'weight': '1',
}),
scope_ids=scope_ids,
)
def setUp(self):
super(CombinedOpenEndedModuleTest, self).setUp()
self.combinedoe = CombinedOpenEndedV1Module(self.test_system,
self.location,
self.definition,
self.descriptor,
static_data=self.static_data,
metadata=self.metadata,
instance_state=self.static_data)
def test_get_tag_name(self):
"""
Test to see if the xml tag name is correct
"""
name = self.combinedoe.get_tag_name("<t>Tag</t>")
self.assertEqual(name, "t")
def test_get_last_response(self):
"""
See if we can parse the last response
"""
response_dict = self.combinedoe.get_last_response(0)
self.assertEqual(response_dict['type'], "selfassessment")
self.assertEqual(response_dict['max_score'], self.max_score)
self.assertEqual(response_dict['state'], CombinedOpenEndedV1Module.INITIAL)
def test_create_task(self):
combinedoe = self.generate_oe_module(TEST_STATE_AI, 1, [self.task_xml1, self.task_xml2])
first_task = combinedoe.create_task(combinedoe.task_states[0], combinedoe.task_xml[0])
self.assertIsInstance(first_task, SelfAssessmentModule)
second_task = combinedoe.create_task(combinedoe.task_states[1], combinedoe.task_xml[1])
self.assertIsInstance(second_task, OpenEndedModule)
def test_get_task_number(self):
combinedoe = self.generate_oe_module(TEST_STATE_AI, 1, [self.task_xml1, self.task_xml2])
first_task = combinedoe.get_task_number(0)
self.assertIsInstance(first_task, SelfAssessmentModule)
second_task = combinedoe.get_task_number(1)
self.assertIsInstance(second_task, OpenEndedModule)
third_task = combinedoe.get_task_number(2)
self.assertIsNone(third_task)
def test_update_task_states(self):
"""
See if we can update the task states properly
"""
changed = self.combinedoe.update_task_states()
self.assertFalse(changed)
current_task = self.combinedoe.current_task
current_task.change_state(CombinedOpenEndedV1Module.DONE)
changed = self.combinedoe.update_task_states()
self.assertTrue(changed)
def test_get_max_score(self):
"""
Try to get the max score of the problem
"""
self.combinedoe.update_task_states()
self.combinedoe.state = "done"
self.combinedoe.is_scored = True
max_score = self.combinedoe.max_score()
self.assertEqual(max_score, 1)
def test_container_get_max_score(self):
"""
See if we can get the max score from the actual xmodule
"""
# The progress view requires that this function be exposed
max_score = self.combinedoe_container.max_score()
self.assertEqual(max_score, None)
def test_container_get_progress(self):
"""
See if we can get the progress from the actual xmodule
"""
progress = self.combinedoe_container.max_score()
self.assertEqual(progress, None)
def test_get_progress(self):
"""
Test if we can get the correct progress from the combined open ended class
"""
self.combinedoe.update_task_states()
self.combinedoe.state = "done"
self.combinedoe.is_scored = True
progress = self.combinedoe.get_progress()
self.assertIsInstance(progress, Progress)
# progress._a is the score of the xmodule, which is 0 right now.
self.assertEqual(progress._a, 0)
# progress._b is the max_score (which is 1), divided by the weight (which is 1).
self.assertEqual(progress._b, 1)
def test_container_weight(self):
"""
Check the problem weight in the container
"""
weight = self.combinedoe_container.weight
self.assertEqual(weight, 1)
def test_container_child_weight(self):
"""
Test the class to see if it picks up the right weight
"""
weight = self.combinedoe_container.child_module.weight
self.assertEqual(weight, 1)
def test_get_score(self):
"""
See if scoring works
"""
score_dict = self.combinedoe.get_score()
self.assertEqual(score_dict['score'], 0)
self.assertEqual(score_dict['total'], 1)
def test_alternate_orderings(self):
"""
Try multiple ordering of definitions to see if the problem renders different steps correctly.
"""
t1 = self.task_xml1
t2 = self.task_xml2
xml_to_test = [[t1], [t2], [t1, t1], [t1, t2], [t2, t2], [t2, t1], [t1, t2, t1]]
for xml in xml_to_test:
definition = {'prompt': etree.XML(self.prompt), 'rubric': etree.XML(self.rubric), 'task_xml': xml}
descriptor = Mock(data=definition)
combinedoe = CombinedOpenEndedV1Module(self.test_system,
self.location,
definition,
descriptor,
static_data=self.static_data,
metadata=self.metadata,
instance_state=self.static_data)
changed = combinedoe.update_task_states()
self.assertFalse(changed)
combinedoe = CombinedOpenEndedV1Module(self.test_system,
self.location,
definition,
descriptor,
static_data=self.static_data,
metadata=self.metadata,
instance_state={'task_states': TEST_STATE_SA})
combinedoe = CombinedOpenEndedV1Module(self.test_system,
self.location,
definition,
descriptor,
static_data=self.static_data,
metadata=self.metadata,
instance_state={'task_states': TEST_STATE_SA_IN})
def test_get_score_realistic(self):
"""
Try to parse the correct score from a json instance state
"""
instance_state = json.loads(MOCK_INSTANCE_STATE)
rubric = """
<rubric>
<rubric>
<category>
<description>Response Quality</description>
<option>
The response is not a satisfactory answer to the question. It either fails to address
the question or does so in a limited way, with no evidence of higher-order thinking.
</option>
<option>
The response is a marginal answer to the question. It may contain some elements of a
proficient response, but it is inaccurate or incomplete.
</option>
<option>
The response is a proficient answer to the question. It is generally correct, although
it may contain minor inaccuracies. There is limited evidence of higher-order thinking.
</option>
<option>The response is correct, complete, and contains evidence of higher-order thinking.</option>
</category>
</rubric>
</rubric>
"""
definition = {'prompt': etree.XML(self.prompt), 'rubric': etree.XML(rubric),
'task_xml': [self.task_xml1, self.task_xml2]}
descriptor = Mock(data=definition)
combinedoe = CombinedOpenEndedV1Module(self.test_system,
self.location,
definition,
descriptor,
static_data=self.static_data,
metadata=self.metadata,
instance_state=instance_state)
score_dict = combinedoe.get_score()
self.assertEqual(score_dict['score'], 15.0)
self.assertEqual(score_dict['total'], 15.0)
def generate_oe_module(self, task_state, task_number, task_xml):
"""
Return a combined open ended module with the specified parameters
"""
definition = {
'prompt': etree.XML(self.prompt),
'rubric': etree.XML(self.rubric),
'task_xml': task_xml
}
descriptor = Mock(data=definition)
module = Mock(scope_ids=Mock(usage_id='dummy-usage-id'))
instance_state = {'task_states': task_state, 'graded': True}
if task_number is not None:
instance_state.update({'current_task_number': task_number})
combinedoe = CombinedOpenEndedV1Module(self.test_system,
self.location,
definition,
descriptor,
static_data=self.static_data,
metadata=self.metadata,
instance_state=instance_state)
return combinedoe
def ai_state_reset(self, task_state, task_number=None):
"""
See if state is properly reset
"""
combinedoe = self.generate_oe_module(task_state, task_number, [self.task_xml2])
html = combinedoe.get_html()
self.assertIsInstance(html, basestring)
score = combinedoe.get_score()
if combinedoe.is_scored:
self.assertEqual(score['score'], 0)
else:
self.assertEqual(score['score'], None)
def ai_state_success(self, task_state, task_number=None, iscore=2, tasks=None):
"""
See if state stays the same
"""
if tasks is None:
tasks = [self.task_xml1, self.task_xml2]
combinedoe = self.generate_oe_module(task_state, task_number, tasks)
html = combinedoe.get_html()
self.assertIsInstance(html, basestring)
score = combinedoe.get_score()
self.assertEqual(int(score['score']), iscore)
def test_ai_state_reset(self):
self.ai_state_reset(TEST_STATE_AI)
def test_ai_state2_reset(self):
self.ai_state_reset(TEST_STATE_AI2)
def test_ai_invalid_state(self):
self.ai_state_reset(TEST_STATE_AI2_INVALID)
def test_ai_state_rest_task_number(self):
self.ai_state_reset(TEST_STATE_AI, task_number=2)
self.ai_state_reset(TEST_STATE_AI, task_number=5)
self.ai_state_reset(TEST_STATE_AI, task_number=1)
self.ai_state_reset(TEST_STATE_AI, task_number=0)
def test_ai_state_success(self):
self.ai_state_success(TEST_STATE_AI)
def test_state_single(self):
self.ai_state_success(TEST_STATE_SINGLE, iscore=12)
def test_state_pe_single(self):
self.ai_state_success(TEST_STATE_PE_SINGLE, iscore=0, tasks=[self.task_xml2])
def test_deprecation_message(self):
"""
Test the validation message produced for deprecation.
"""
# pylint: disable=no-member
validation = self.combinedoe_container.validate()
deprecation_msg = "ORA1 is no longer supported. To use this assessment, " \
"replace this ORA1 component with an ORA2 component."
validation.summary.text = deprecation_msg
validation.summary.type = 'error'
self.assertEqual(
validation.summary.text,
deprecation_msg
)
self.assertEqual(validation.summary.type, StudioValidationMessage.ERROR)
class CombinedOpenEndedModuleConsistencyTest(unittest.TestCase):
"""
Unit tests for the combined open ended xmodule rubric scores consistency.
"""
# location, definition_template, prompt, rubric, max_score, metadata, oeparam, task_xml1, task_xml2
# All these variables are used to construct the xmodule descriptor.
location = Location("edX", "open_ended", "2012_Fall", "combinedopenended", "SampleQuestion")
definition_template = """
<combinedopenended attempts="10000">
{rubric}
{prompt}
<task>
{task1}
</task>
<task>
{task2}
</task>
</combinedopenended>
"""
prompt = "<prompt>This is a question prompt</prompt>"
rubric = '''<rubric><rubric>
<category>
<description>Response Quality</description>
<option>
The response is not a satisfactory answer to the question. It either fails to address the question
or does so in a limited way, with no evidence of higher-order thinking.
</option>
<option>Second option</option>
</category>
</rubric></rubric>'''
max_score = 10
metadata = {'attempts': '10', 'max_score': max_score}
oeparam = etree.XML('''
<openendedparam>
<initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display>
<grader_payload>
{"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}
</grader_payload>
</openendedparam>
''')
task_xml1 = '''
<selfassessment>
<hintprompt>
What hint about this problem would you give to someone?
</hintprompt>
<submitmessage>
Save Succcesful. Thanks for participating!
</submitmessage>
</selfassessment>
'''
task_xml2 = '''
<openended min_score_to_attempt="1" max_score_to_attempt="10">
<openendedparam>
<initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display>
<grader_payload>
{"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}
</grader_payload>
</openendedparam>
</openended>'''
static_data = {
'max_attempts': 20,
'prompt': prompt,
'rubric': rubric,
'max_score': max_score,
'display_name': 'Name',
'accept_file_upload': False,
'close_date': "",
's3_interface': test_util_open_ended.S3_INTERFACE,
'open_ended_grading_interface': test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE,
'skip_basic_checks': False,
'graded': True,
}
definition = {'prompt': etree.XML(prompt), 'rubric': etree.XML(rubric), 'task_xml': [task_xml1, task_xml2]}
full_definition = definition_template.format(prompt=prompt, rubric=rubric, task1=task_xml1, task2=task_xml2)
descriptor = Mock(data=full_definition)
test_system = get_test_system()
test_system.open_ended_grading_interface = None
usage_key = test_system.course_id.make_usage_key('combinedopenended', 'test_loc')
# ScopeIds has 4 fields: user_id, block_type, def_id, usage_id
scope_ids = ScopeIds(1, 'combinedopenended', usage_key, usage_key)
combinedoe_container = CombinedOpenEndedModule(
descriptor=descriptor,
runtime=test_system,
field_data=DictFieldData({
'data': full_definition,
'weight': '1',
}),
scope_ids=scope_ids,
)
def setUp(self):
super(CombinedOpenEndedModuleConsistencyTest, self).setUp()
self.combinedoe = CombinedOpenEndedV1Module(self.test_system,
self.location,
self.definition,
self.descriptor,
static_data=self.static_data,
metadata=self.metadata,
instance_state=json.loads(INSTANCE_INCONSISTENT_STATE))
def test_get_score(self):
"""
If grader type is ML score should be updated from rubric scores. Aggregate rubric scores = sum([3])*5.
"""
score_dict = self.combinedoe.get_score()
self.assertEqual(score_dict['score'], 15.0)
self.assertEqual(score_dict['total'], 5.0)
def test_get_score_with_pe_grader(self):
"""
If grader type is PE score should not be updated from rubric scores. Aggregate rubric scores = sum([3])*5.
"""
combinedoe = CombinedOpenEndedV1Module(self.test_system,
self.location,
self.definition,
self.descriptor,
static_data=self.static_data,
metadata=self.metadata,
instance_state=json.loads(INSTANCE_INCONSISTENT_STATE2))
score_dict = combinedoe.get_score()
self.assertNotEqual(score_dict['score'], 15.0)
def test_get_score_with_different_score_value_in_rubric(self):
"""
If grader type is ML score should be updated from rubric scores. Aggregate rubric scores = sum([5])*5.
"""
combinedoe = CombinedOpenEndedV1Module(self.test_system,
self.location,
self.definition,
self.descriptor,
static_data=self.static_data,
metadata=self.metadata,
instance_state=json.loads(INSTANCE_INCONSISTENT_STATE3))
score_dict = combinedoe.get_score()
self.assertEqual(score_dict['score'], 25.0)
self.assertEqual(score_dict['total'], 5.0)
def test_get_score_with_old_task_states(self):
"""
If grader type is ML and old_task_states are present in instance inconsistent state score should be updated
from rubric scores. Aggregate rubric scores = sum([3])*5.
"""
combinedoe = CombinedOpenEndedV1Module(self.test_system,
self.location,
self.definition,
self.descriptor,
static_data=self.static_data,
metadata=self.metadata,
instance_state=json.loads(INSTANCE_INCONSISTENT_STATE4))
score_dict = combinedoe.get_score()
self.assertEqual(score_dict['score'], 15.0)
self.assertEqual(score_dict['total'], 5.0)
def test_get_score_with_score_missing(self):
"""
If grader type is ML and score field is missing in instance inconsistent state score should be updated from
rubric scores. Aggregate rubric scores = sum([3])*5.
"""
combinedoe = CombinedOpenEndedV1Module(self.test_system,
self.location,
self.definition,
self.descriptor,
static_data=self.static_data,
metadata=self.metadata,
instance_state=json.loads(INSTANCE_INCONSISTENT_STATE5))
score_dict = combinedoe.get_score()
self.assertEqual(score_dict['score'], 15.0)
self.assertEqual(score_dict['total'], 5.0)
class OpenEndedModuleXmlTest(unittest.TestCase, DummyModulestore):
"""
Test the student flow in the combined open ended xmodule
"""
problem_location = Location("edX", "open_ended", "2012_Fall", "combinedopenended", "SampleQuestion")
answer = "blah blah"
assessment = [0, 1]
hint = "blah"
def get_module_system(self, descriptor):
def construct_callback(dispatch="score_update"):
return dispatch
test_system = get_test_system()
test_system.open_ended_grading_interface = None
test_system.xqueue['interface'] = Mock(
send_to_queue=Mock(return_value=(0, "Queued"))
)
test_system.xqueue['construct_callback'] = construct_callback
return test_system
def setUp(self):
super(OpenEndedModuleXmlTest, self).setUp()
self.setup_modulestore(COURSE)
def _handle_ajax(self, dispatch, content):
# Load the module from persistence
module = self._module()
# Call handle_ajax on the module
result = module.handle_ajax(dispatch, content)
# Persist the state
module.save()
return result
def _module(self):
return self.get_module_from_location(self.problem_location)
def test_open_ended_load_and_save(self):
"""
See if we can load the module and save an answer
@return:
"""
# Try saving an answer
self._handle_ajax("save_answer", {"student_answer": self.answer})
task_one_json = json.loads(self._module().task_states[0])
self.assertEqual(task_one_json['child_history'][0]['answer'], self.answer)
def test_open_ended_flow_reset(self):
"""
Test the flow of the module if we complete the self assessment step and then reset
@return:
"""
assessment = [0, 1]
# Simulate a student saving an answer
self._handle_ajax("get_html", {})
self._handle_ajax("save_answer", {"student_answer": self.answer})
self._handle_ajax("get_html", {})
# Mock a student submitting an assessment
assessment_dict = MultiDict({'assessment': sum(assessment)})
assessment_dict.extend(('score_list[]', val) for val in assessment)
self._handle_ajax("save_assessment", assessment_dict)
task_one_json = json.loads(self._module().task_states[0])
self.assertEqual(json.loads(task_one_json['child_history'][0]['post_assessment']), assessment)
self._handle_ajax("get_combined_rubric", {})
# Move to the next step in the problem
self._handle_ajax("next_problem", {})
self.assertEqual(self._module().current_task_number, 0)
html = self._module().render(STUDENT_VIEW).content
self.assertIsInstance(html, basestring)
rubric = self._handle_ajax("get_combined_rubric", {})
self.assertIsInstance(rubric, basestring)
self.assertEqual(self._module().state, "assessing")
self._handle_ajax("reset", {})
self.assertEqual(self._module().current_task_number, 0)
def test_open_ended_flow_with_xqueue_failure(self):
"""
Test a two step problem where the student first goes through the self assessment step, and then the
open ended step with the xqueue failing in the first step.
"""
assessment = [1, 1]
# Simulate a student saving an answer
self._handle_ajax("save_answer", {"student_answer": self.answer})
status = self._handle_ajax("get_status", {})
self.assertIsInstance(status, basestring)
# Mock a student submitting an assessment
assessment_dict = MultiDict({'assessment': sum(assessment)})
assessment_dict.extend(('score_list[]', val) for val in assessment)
mock_xqueue_interface = Mock(
send_to_queue=Mock(return_value=(1, "Not Queued"))
)
# Call handle_ajax on the module with xqueue down
module = self._module()
with patch.dict(module.xmodule_runtime.xqueue, {'interface': mock_xqueue_interface}):
module.handle_ajax("save_assessment", assessment_dict)
self.assertEqual(module.current_task_number, 1)
self.assertTrue((module.child_module.get_task_number(1).child_created))
module.save()
# Check that next time the OpenEndedModule is loaded it calls send_to_grader
with patch.object(OpenEndedModule, 'send_to_grader') as mock_send_to_grader:
mock_send_to_grader.return_value = (False, "Not Queued")
module = self._module().child_module.get_score()
self.assertTrue(mock_send_to_grader.called)
self.assertTrue((self._module().child_module.get_task_number(1).child_created))
# Loading it this time should send submission to xqueue correctly
self.assertFalse((self._module().child_module.get_task_number(1).child_created))
self.assertEqual(self._module().current_task_number, 1)
self.assertEqual(self._module().state, OpenEndedChild.ASSESSING)
task_one_json = json.loads(self._module().task_states[0])
self.assertEqual(json.loads(task_one_json['child_history'][0]['post_assessment']), assessment)
# Move to the next step in the problem
self._handle_ajax("next_problem", {})
self.assertEqual(self._module().current_task_number, 1)
self._module().render(STUDENT_VIEW)
# Try to get the rubric from the module
self._handle_ajax("get_combined_rubric", {})
self.assertEqual(self._module().state, OpenEndedChild.ASSESSING)
# Make a fake reply from the queue
queue_reply = {
'queuekey': "",
'xqueue_body': json.dumps({
'score': 0,
'feedback': json.dumps({
"spelling": "Spelling: Ok.",
"grammar": "Grammar: Ok.",
"markup-text": " all of us can think of a book that we hope none of our children or any other "
"children have taken off the shelf . but if i have the right to remove that book "
"from the shelf that work i abhor then you also have exactly the same right and "
"so does everyone else . and then we <bg>have no books left</bg> "
"on the shelf for any of us . <bs>katherine</bs> <bs>paterson</bs> , author "
"write a persuasive essay to a newspaper reflecting your vies on censorship "
"<bg>in libraries . do</bg> you believe that certain materials , such as books , "
"music , movies , magazines , <bg>etc . , should be</bg> removed from the shelves "
"if they are found <bg>offensive ? support your</bg> position with convincing "
"arguments from your own experience , observations <bg>, and or reading .</bg> "
}),
'grader_type': "ML",
'success': True,
'grader_id': 1,
'submission_id': 1,
'rubric_xml': '''
<rubric>
<category>
<description>Writing Applications</description>
<score>0</score>
<option points='0'>
The essay loses focus, has little information or supporting details, and the
organization makes it difficult to follow.
</option>
<option points='1'>
The essay presents a mostly unified theme, includes sufficient information to convey
the theme, and is generally organized well.
</option>
</category>
<category>
<description> Language Conventions </description>
<score>0</score>
<option points='0'>
The essay demonstrates a reasonable command of proper spelling and grammar.
</option>
<option points='1'>
The essay demonstrates superior command of proper spelling and grammar.
</option>
</category>
</rubric>
''',
'rubric_scores_complete': True,
})
}
self._handle_ajax("check_for_score", {})
# Update the module with the fake queue reply
self._handle_ajax("score_update", queue_reply)
module = self._module()
self.assertFalse(module.ready_to_reset)
self.assertEqual(module.current_task_number, 1)
# Get html and other data client will request
module.render(STUDENT_VIEW)
self._handle_ajax("skip_post_assessment", {})
# Get all results
self._handle_ajax("get_combined_rubric", {})
# reset the problem
self._handle_ajax("reset", {})
self.assertEqual(self._module().state, "initial")
def test_open_ended_flow_correct(self):
"""
Test a two step problem where the student first goes through the self assessment step, and then the
open ended step.
@return:
"""
assessment = [1, 1]
# Simulate a student saving an answer
self._handle_ajax("save_answer", {"student_answer": self.answer})
status = self._handle_ajax("get_status", {})
self.assertIsInstance(status, basestring)
# Mock a student submitting an assessment
assessment_dict = MultiDict({'assessment': sum(assessment)})
assessment_dict.extend(('score_list[]', val) for val in assessment)
self._handle_ajax("save_assessment", assessment_dict)
task_one_json = json.loads(self._module().task_states[0])
self.assertEqual(json.loads(task_one_json['child_history'][0]['post_assessment']), assessment)
# Move to the next step in the problem
self._handle_ajax("next_problem", {})
self.assertEqual(self._module().current_task_number, 1)
self._module().render(STUDENT_VIEW)
# Try to get the rubric from the module
self._handle_ajax("get_combined_rubric", {})
# Make a fake reply from the queue
queue_reply = {
'queuekey': "",
'xqueue_body': json.dumps({
'score': 0,
'feedback': json.dumps({
"spelling": "Spelling: Ok.", "grammar": "Grammar: Ok.",
"markup-text": " all of us can think of a book that we hope none of our children or any other "
"children have taken off the shelf . but if i have the right to remove that book "
"from the shelf that work i abhor then you also have exactly the same right and "
"so does everyone else . and then we <bg>have no books left</bg> on the shelf for "
"any of us . <bs>katherine</bs> <bs>paterson</bs> , author write a persuasive essay "
"to a newspaper reflecting your vies on censorship <bg>in libraries . do</bg> "
"you believe that certain materials , such as books , music , movies , magazines , "
"<bg>etc . , should be</bg> removed from the shelves if they are found "
"<bg>offensive ? support your</bg> position with convincing arguments from your "
"own experience , observations <bg>, and or reading .</bg> "
}),
'grader_type': "ML",
'success': True,
'grader_id': 1,
'submission_id': 1,
'rubric_xml': '''
<rubric>
<category>
<description>Writing Applications</description>
<score>0</score>
<option points='0'>
The essay loses focus, has little information or supporting details, and
the organization makes it difficult to follow.
</option>
<option points='1'>
The essay presents a mostly unified theme, includes sufficient
information to convey the theme, and is generally organized well.
</option>
</category>
<category>
<description> Language Conventions </description>
<score>0</score>
<option points='0'>
The essay demonstrates a reasonable command of proper spelling and grammar.
</option>
<option points='1'>
The essay demonstrates superior command of proper spelling and grammar.
</option>
</category>
</rubric>
''',
'rubric_scores_complete': True,
})
}
self._handle_ajax("check_for_score", {})
# Update the module with the fake queue reply
self._handle_ajax("score_update", queue_reply)
module = self._module()
self.assertFalse(module.ready_to_reset)
self.assertEqual(module.current_task_number, 1)
# Get html and other data client will request
module.render(STUDENT_VIEW)
self._handle_ajax("skip_post_assessment", {})
# Get all results
self._handle_ajax("get_combined_rubric", {})
# reset the problem
self._handle_ajax("reset", {})
self.assertEqual(self._module().state, "initial")
class OpenEndedModuleXmlAttemptTest(unittest.TestCase, DummyModulestore):
"""
Test if student is able to reset the problem
"""
problem_location = Location("edX", "open_ended", "2012_Fall", "combinedopenended", "SampleQuestion1Attempt")
answer = "blah blah"
assessment = [0, 1]
hint = "blah"
def get_module_system(self, descriptor):
test_system = get_test_system()
test_system.open_ended_grading_interface = None
test_system.xqueue['interface'] = Mock(
send_to_queue=Mock(return_value=(0, "Queued"))
)
return test_system
def setUp(self):
super(OpenEndedModuleXmlAttemptTest, self).setUp()
self.setup_modulestore(COURSE)
def _handle_ajax(self, dispatch, content):
# Load the module from persistence
module = self._module()
# Call handle_ajax on the module
result = module.handle_ajax(dispatch, content)
# Persist the state
module.save()
return result
def _module(self):
return self.get_module_from_location(self.problem_location)
def test_reset_fail(self):
"""
Test the flow of the module if we complete the self assessment step and then reset
Since the problem only allows one attempt, should fail.
@return:
"""
assessment = [0, 1]
# Simulate a student saving an answer
self._handle_ajax("save_answer", {"student_answer": self.answer})
# Mock a student submitting an assessment
assessment_dict = MultiDict({'assessment': sum(assessment)})
assessment_dict.extend(('score_list[]', val) for val in assessment)
self._handle_ajax("save_assessment", assessment_dict)
task_one_json = json.loads(self._module().task_states[0])
self.assertEqual(json.loads(task_one_json['child_history'][0]['post_assessment']), assessment)
# Move to the next step in the problem
self._handle_ajax("next_problem", {})
self.assertEqual(self._module().current_task_number, 0)
html = self._module().render(STUDENT_VIEW).content
self.assertIsInstance(html, basestring)
# Module should now be done
rubric = self._handle_ajax("get_combined_rubric", {})
self.assertIsInstance(rubric, basestring)
self.assertEqual(self._module().state, "done")
# Try to reset, should fail because only 1 attempt is allowed
reset_data = json.loads(self._handle_ajax("reset", {}))
self.assertEqual(reset_data['success'], False)
class OpenEndedModuleXmlImageUploadTest(unittest.TestCase, DummyModulestore):
"""
Test if student is able to upload images properly.
"""
problem_location = Location("edX", "open_ended", "2012_Fall", "combinedopenended", "SampleQuestionImageUpload")
answer_text = "Hello, this is my amazing answer."
file_text = "Hello, this is my amazing file."
file_name = "Student file 1"
answer_link = "http://www.edx.org"
autolink_tag = '<a target="_blank" href='
autolink_tag_swapped = '<a href='
def get_module_system(self, descriptor):
test_system = get_test_system()
test_system.open_ended_grading_interface = None
test_system.s3_interface = test_util_open_ended.S3_INTERFACE
test_system.xqueue['interface'] = Mock(
send_to_queue=Mock(return_value=(0, "Queued"))
)
return test_system
def setUp(self):
super(OpenEndedModuleXmlImageUploadTest, self).setUp()
self.setup_modulestore(COURSE)
def test_file_upload_fail(self):
"""
Test to see if a student submission without a file attached fails.
"""
module = self.get_module_from_location(self.problem_location)
# Simulate a student saving an answer
response = module.handle_ajax("save_answer", {"student_answer": self.answer_text})
response = json.loads(response)
self.assertFalse(response['success'])
self.assertIn('error', response)
@patch(
'xmodule.open_ended_grading_classes.openendedchild.S3Connection',
test_util_open_ended.MockS3Connection
)
@patch(
'xmodule.open_ended_grading_classes.openendedchild.Key',
test_util_open_ended.MockS3Key
)
def test_file_upload_success(self):
"""
Test to see if a student submission with a file is handled properly.
"""
module = self.get_module_from_location(self.problem_location)
# Simulate a student saving an answer with a file
response = module.handle_ajax("save_answer", {
"student_answer": self.answer_text,
"valid_files_attached": True,
"student_file": [MockUploadedFile(self.file_name, self.file_text)],
})
response = json.loads(response)
self.assertTrue(response['success'])
self.assertIn(self.file_name, response['student_response'])
self.assertTrue(self.autolink_tag in response['student_response'] or
self.autolink_tag_swapped in response['student_response'])
def test_link_submission_success(self):
"""
Students can submit links instead of files. Check that the link is properly handled.
"""
module = self.get_module_from_location(self.problem_location)
# Simulate a student saving an answer with a link.
response = module.handle_ajax("save_answer", {
"student_answer": "{0} {1}".format(self.answer_text, self.answer_link)
})
response = json.loads(response)
self.assertTrue(response['success'])
self.assertIn(self.answer_link, response['student_response'])
self.assertTrue(self.autolink_tag in response['student_response'] or
self.autolink_tag_swapped in response['student_response'])
class OpenEndedModuleUtilTest(unittest.TestCase):
"""
Tests for the util functions of OpenEndedModule. Currently just for the html_sanitizer and <br/> inserter
"""
script_dirty = u'<script>alert("xss!")</script>'
script_clean = u'alert("xss!")'
img_dirty = u'<img alt="cats" height="200" onclick="eval()" src="http://example.com/lolcats.jpg" width="200">'
img_clean = u'<img width="200" alt="cats" height="200" src="http://example.com/lolcats.jpg">'
embed_dirty = u'<embed height="200" id="cats" onhover="eval()" src="http://example.com/lolcats.swf" width="200"/>'
embed_clean = u'<embed width="200" height="200" id="cats" src="http://example.com/lolcats.swf">'
iframe_dirty = u'<iframe class="cats" height="200" onerror="eval()" src="http://example.com/lolcats" width="200"/>'
iframe_clean = ur'<iframe (height="200" ?|class="cats" ?|width="200" ?|src="http://example.com/lolcats" ?)+></iframe>'
text = u'I am a \u201c\xfcber student\u201d'
text_lessthan_noencd = u'This used to be broken < by the other parser. 3>5'
text_lessthan_encode = u'This used to be broken &lt; by the other parser. 3&gt;5'
text_linebreaks = u"St\xfcdent submission:\nI like lamp."
text_brs = u"St\xfcdent submission:<br/>I like lamp."
link_text = u'I love going to www.lolcatz.com'
link_atag = u'I love going to <a target="_blank" href="http://www.lolcatz.com">www.lolcatz.com</a>'
def assertHtmlEqual(self, actual, expected):
"""
Assert that two strings represent the same html.
"""
return self._assertHtmlEqual(
fragment_fromstring(actual, create_parent='div'),
fragment_fromstring(expected, create_parent='div')
)
def _assertHtmlEqual(self, actual, expected):
"""
Assert that two HTML ElementTree elements are equal.
"""
self.assertEqual(actual.tag, expected.tag)
self.assertEqual(actual.attrib, expected.attrib)
self.assertEqual(actual.text, expected.text)
self.assertEqual(actual.tail, expected.tail)
self.assertEqual(len(actual), len(expected))
for actual_child, expected_child in zip(actual, expected):
self._assertHtmlEqual(actual_child, expected_child)
def test_script(self):
"""
Basic test for stripping <script>
"""
self.assertHtmlEqual(OpenEndedChild.sanitize_html(self.script_dirty), self.script_clean)
def test_img(self):
"""
Basic test for passing through img, but stripping bad attr
"""
self.assertHtmlEqual(OpenEndedChild.sanitize_html(self.img_dirty), self.img_clean)
def test_embed(self):
"""
Basic test for passing through embed, but stripping bad attr
"""
self.assertHtmlEqual(OpenEndedChild.sanitize_html(self.embed_dirty), self.embed_clean)
def test_iframe(self):
"""
Basic test for passing through iframe, but stripping bad attr
"""
self.assertRegexpMatches(OpenEndedChild.sanitize_html(self.iframe_dirty), self.iframe_clean)
def test_text(self):
"""
Test for passing through text unchanged, including unicode
"""
self.assertHtmlEqual(OpenEndedChild.sanitize_html(self.text), self.text)
def test_lessthan(self):
"""
Tests that `<` in text context is handled properly
"""
self.assertHtmlEqual(OpenEndedChild.sanitize_html(self.text_lessthan_noencd), self.text_lessthan_encode)
def test_linebreaks(self):
"""
tests the replace_newlines function
"""
self.assertHtmlEqual(OpenEndedChild.replace_newlines(self.text_linebreaks), self.text_brs)
def test_linkify(self):
"""
tests the replace_newlines function
"""
self.assertHtmlEqual(OpenEndedChild.sanitize_html(self.link_text), self.link_atag)
def test_combined(self):
"""
tests a combination of inputs
"""
test_input = u"{}\n{}\n{}\n\n{}{}\n{}".format(self.link_text,
self.text,
self.script_dirty,
self.embed_dirty,
self.text_lessthan_noencd,
self.img_dirty)
test_output = u"{}<br/>{}<br/>{}<br/><br/>{}{}<br/>{}".format(self.link_atag,
self.text,
self.script_clean,
self.embed_clean,
self.text_lessthan_encode,
self.img_clean)
self.assertHtmlEqual(OpenEndedChild.sanitize_html(test_input), test_output)

View File

@@ -1,493 +0,0 @@
"""
Test cases covering behaviors and workflows of the Peer Grading XBlock
"""
import unittest
import json
import logging
from mock import Mock, patch
from webob.multidict import MultiDict
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from opaque_keys.edx.locations import Location, SlashSeparatedCourseKey
from xmodule.tests import get_test_system, get_test_descriptor_system
from xmodule.tests.test_util_open_ended import DummyModulestore
from xmodule.open_ended_grading_classes.peer_grading_service import MockPeerGradingService
from xmodule.peer_grading_module import PeerGradingModule, PeerGradingDescriptor, MAX_ALLOWED_FEEDBACK_LENGTH
from xmodule.modulestore.exceptions import ItemNotFoundError, NoPathToItem
from xmodule.validation import StudioValidationMessage
log = logging.getLogger(__name__)
class PeerGradingModuleTest(unittest.TestCase, DummyModulestore):
"""
Test peer grading xmodule at the unit level. More detailed tests are difficult, as the module relies on an
external grading service.
"""
course_id = SlashSeparatedCourseKey('edX', 'open_ended', '2012_Fall')
problem_location = course_id.make_usage_key("peergrading", "PeerGradingSample")
coe_location = course_id.make_usage_key("combinedopenended", "SampleQuestion")
calibrated_dict = {'location': "blah"}
coe_dict = {'location': coe_location.to_deprecated_string()}
save_dict = MultiDict({
'location': "blah",
'submission_id': 1,
'submission_key': "",
'score': 1,
'feedback': "",
'submission_flagged': False,
'answer_unknown': False,
})
save_dict.extend(('rubric_scores[]', val) for val in (0, 1))
def get_module_system(self, descriptor):
test_system = get_test_system(self.course_id)
test_system.open_ended_grading_interface = None
return test_system
def setUp(self):
"""
Create a peer grading module from a test system
@return:
"""
super(PeerGradingModuleTest, self).setUp()
self.setup_modulestore(self.course_id.course)
self.peer_grading = self.get_module_from_location(self.problem_location)
self.coe = self.get_module_from_location(self.coe_location)
def test_module_closed(self):
"""
Test if peer grading is closed
@return:
"""
closed = self.peer_grading.closed()
self.assertFalse(closed)
def test_get_html(self):
"""
Test to see if the module can be rendered
@return:
"""
_html = self.peer_grading.get_html()
def test_get_data(self):
"""
Try getting data from the external grading service
@return:
"""
success, _data = self.peer_grading.query_data_for_location(self.problem_location)
self.assertTrue(success)
def test_get_score_none(self):
"""
Test getting the score.
"""
score = self.peer_grading.get_score()
# Score should be None.
self.assertIsNone(score['score'])
def test_get_max_score(self):
"""
Test getting the max score
@return:
"""
max_score = self.peer_grading.max_score()
self.assertEquals(max_score, None)
def get_next_submission(self):
"""
Test to see if we can get the next mock submission
@return:
"""
success, _next_submission = self.peer_grading.get_next_submission({'location': 'blah'})
self.assertEqual(success, True)
def test_save_grade(self):
"""
Test if we can save the grade
@return:
"""
response = self.peer_grading.save_grade(self.save_dict)
self.assertEqual(response['success'], True)
def test_is_student_calibrated(self):
"""
Check to see if the student has calibrated yet
@return:
"""
response = self.peer_grading.is_student_calibrated(self.calibrated_dict)
self.assertTrue(response['success'])
def test_show_calibration_essay(self):
"""
Test showing the calibration essay
@return:
"""
response = self.peer_grading.show_calibration_essay(self.calibrated_dict)
self.assertTrue(response['success'])
def test_save_calibration_essay(self):
"""
Test saving the calibration essay
@return:
"""
response = self.peer_grading.save_calibration_essay(self.save_dict)
self.assertTrue(response['success'])
def test_peer_grading_problem(self):
"""
See if we can render a single problem
@return:
"""
response = self.peer_grading.peer_grading_problem(self.coe_dict)
self.assertTrue(response['success'])
def test___find_corresponding_module_for_location_exceptions(self):
"""
Unit test for the exception cases of __find_corresponding_module_for_location
Mainly for diff coverage
@return:
"""
# pylint: disable=protected-access
with self.assertRaises(ItemNotFoundError):
self.peer_grading._find_corresponding_module_for_location(
Location('org', 'course', 'run', 'category', 'name', 'revision')
)
def test_get_instance_state(self):
"""
Get the instance state dict
@return:
"""
self.peer_grading.get_instance_state()
def test_save_grade_with_long_feedback(self):
"""
Test if feedback is too long save_grade() should return error message.
"""
feedback_fragment = "This is very long feedback."
self.save_dict["feedback"] = feedback_fragment * (
(MAX_ALLOWED_FEEDBACK_LENGTH / len(feedback_fragment) + 1)
)
response = self.peer_grading.save_grade(self.save_dict)
# Should not succeed.
self.assertEqual(response['success'], False)
self.assertEqual(
response['error'],
"Feedback is too long, Max length is {0} characters.".format(
MAX_ALLOWED_FEEDBACK_LENGTH
)
)
def test_get_score_success_fails(self):
"""
Test if query_data_for_location not succeed, their score is None.
"""
score_dict = self.get_score(False, 0, 0)
# Score dict should be None.
self.assertIsNone(score_dict)
def test_get_score(self):
"""
Test if the student has graded equal to required submissions,
their score is 1.0.
"""
score_dict = self.get_score(True, 3, 3)
# Score should be 1.0.
self.assertEqual(score_dict["score"], 1.0)
# Testing score after data is stored in student_data_for_location in xmodule.
_score_dict = self.peer_grading.get_score()
# Score should be 1.0.
self.assertEqual(_score_dict["score"], 1.0)
def test_get_score_zero(self):
"""
Test if the student has graded not equal to required submissions,
their score is 0.0.
"""
score_dict = self.get_score(True, 2, 3)
# Score should be 0.0.
self.assertEqual(score_dict["score"], 0.0)
def get_score(self, success, count_graded, count_required):
"""
Returns the peer-graded score based on the provided graded/required values
"""
self.peer_grading.use_for_single_location_local = True
self.peer_grading.graded = True
# Patch for external grading service.
module_name = 'xmodule.peer_grading_module.PeerGradingModule.query_data_for_location'
with patch(module_name) as mock_query_data_for_location:
mock_query_data_for_location.return_value = (
success,
{"count_graded": count_graded, "count_required": count_required}
)
# Returning score dict.
return self.peer_grading.get_score()
def test_deprecation_message(self):
"""
Test the validation message produced for deprecation.
"""
peer_grading_module = self.peer_grading
validation = peer_grading_module.validate()
self.assertEqual(len(validation.messages), 0)
self.assertEqual(
validation.summary.text,
"ORA1 is no longer supported. To use this assessment, replace this ORA1 component with an ORA2 component."
)
self.assertEqual(validation.summary.type, StudioValidationMessage.ERROR)
class MockPeerGradingServiceProblemList(MockPeerGradingService):
"""
Mock object representing a set of peer-grading problems
"""
def get_problem_list(self, course_id, grader_id):
return {'success': True,
'problem_list': [
{
"num_graded": 3,
"num_pending": 681,
"num_required": 3,
"location": course_id.make_usage_key('combinedopenended', 'SampleQuestion'),
"problem_name": "Peer-Graded Essay"
},
]}
class PeerGradingModuleScoredTest(unittest.TestCase, DummyModulestore):
"""
Test peer grading xmodule at the unit level. More detailed tests are difficult, as the module relies on an
external grading service.
"""
course_id = SlashSeparatedCourseKey('edX', 'open_ended', '2012_Fall')
problem_location = course_id.make_usage_key("peergrading", "PeerGradingScored")
def get_module_system(self, descriptor):
test_system = get_test_system(self.course_id)
test_system.open_ended_grading_interface = None
return test_system
def setUp(self):
"""
Create a peer grading module from a test system
@return:
"""
super(PeerGradingModuleScoredTest, self).setUp()
self.setup_modulestore(self.course_id.course)
def test_metadata_load(self):
peer_grading = self.get_module_from_location(self.problem_location)
self.assertFalse(peer_grading.closed())
def test_problem_list(self):
"""
Test to see if a peer grading problem list can be correctly initialized.
"""
# Initialize peer grading module.
peer_grading = self.get_module_from_location(self.problem_location)
# Ensure that it cannot find any peer grading.
html = peer_grading.peer_grading()
self.assertNotIn("Peer-Graded", html)
# Swap for our mock class, which will find peer grading.
peer_grading.peer_gs = MockPeerGradingServiceProblemList()
html = peer_grading.peer_grading()
self.assertIn("Peer-Graded", html)
class PeerGradingModuleLinkedTest(unittest.TestCase, DummyModulestore):
"""
Test peer grading that is linked to an open ended module.
"""
course_id = SlashSeparatedCourseKey('edX', 'open_ended', '2012_Fall')
problem_location = course_id.make_usage_key("peergrading", "PeerGradingLinked")
coe_location = course_id.make_usage_key("combinedopenended", "SampleQuestion")
def get_module_system(self, descriptor):
test_system = get_test_system(self.course_id)
test_system.open_ended_grading_interface = None
return test_system
def setUp(self):
"""
Create a peer grading module from a test system.
"""
super(PeerGradingModuleLinkedTest, self).setUp()
self.setup_modulestore(self.course_id.course)
@property
def field_data(self):
"""
Setup the proper field data for a peer grading module.
"""
return DictFieldData({
'data': '<peergrading/>',
'location': self.problem_location,
'use_for_single_location': True,
'link_to_location': self.coe_location.to_deprecated_string(),
'graded': True,
})
@property
def scope_ids(self):
"""
Return the proper scope ids for the peer grading module.
"""
return ScopeIds(None, None, self.problem_location, self.problem_location)
def _create_peer_grading_descriptor_with_linked_problem(self):
"""
Internal helper method to construct a peer grading XBlock
"""
# Initialize the peer grading module.
system = get_test_descriptor_system()
return system.construct_xblock_from_class(
PeerGradingDescriptor,
field_data=self.field_data,
scope_ids=self.scope_ids
)
def _create_peer_grading_with_linked_problem(self, location, valid_linked_descriptor=True):
"""
Create a peer grading problem with a linked location.
"""
# Mock the linked problem descriptor.
linked_descriptor = Mock()
linked_descriptor.location = location
# Mock the peer grading descriptor.
pg_descriptor = Mock()
pg_descriptor.location = self.problem_location
if valid_linked_descriptor:
pg_descriptor.get_required_module_descriptors = lambda: [linked_descriptor, ]
else:
pg_descriptor.get_required_module_descriptors = lambda: []
test_system = self.get_module_system(pg_descriptor)
# Initialize the peer grading module.
peer_grading = PeerGradingModule(
pg_descriptor,
test_system,
self.field_data,
self.scope_ids,
)
return peer_grading
def _get_descriptor_with_invalid_link(self, exception_to_raise):
"""
Ensure that a peer grading descriptor with an invalid link will return an empty list.
"""
# Create a descriptor, and make loading an item throw an error.
descriptor = self._create_peer_grading_descriptor_with_linked_problem()
descriptor.system.load_item = Mock(side_effect=exception_to_raise)
# Ensure that modules is a list of length 0.
modules = descriptor.get_required_module_descriptors()
self.assertIsInstance(modules, list)
self.assertEqual(len(modules), 0)
def test_descriptor_with_nopath(self):
"""
Test to see if a descriptor with a NoPathToItem error when trying to get
its linked module behaves properly.
"""
self._get_descriptor_with_invalid_link(NoPathToItem)
def test_descriptor_with_item_not_found(self):
"""
Test to see if a descriptor with an ItemNotFound error when trying to get
its linked module behaves properly.
"""
self._get_descriptor_with_invalid_link(ItemNotFoundError)
def test_invalid_link(self):
"""
Ensure that a peer grading problem with no linked locations stays in panel mode.
"""
# Setup the peer grading module with no linked locations.
peer_grading = self._create_peer_grading_with_linked_problem(self.coe_location, valid_linked_descriptor=False)
self.assertFalse(peer_grading.use_for_single_location_local)
self.assertTrue(peer_grading.use_for_single_location)
def test_linked_problem(self):
"""
Ensure that a peer grading problem with a linked location loads properly.
"""
# Setup the peer grading module with the proper linked location.
peer_grading = self._create_peer_grading_with_linked_problem(self.coe_location)
# Ensure that it is properly setup.
self.assertTrue(peer_grading.use_for_single_location)
def test_linked_ajax(self):
"""
Ensure that a peer grading problem with a linked location responds to ajax calls.
"""
# Setup the peer grading module with the proper linked location.
peer_grading = self._create_peer_grading_with_linked_problem(self.coe_location)
# If we specify a location, it will render the problem for that location.
data = peer_grading.handle_ajax('problem', {'location': self.coe_location.to_deprecated_string()})
self.assertTrue(json.loads(data)['success'])
# If we don't specify a location, it should use the linked location.
data = peer_grading.handle_ajax('problem', {})
self.assertTrue(json.loads(data)['success'])
def test_linked_score(self):
"""
Ensure that a peer grading problem with a linked location is properly scored.
"""
# Setup the peer grading module with the proper linked location.
peer_grading = self._create_peer_grading_with_linked_problem(self.coe_location)
score_dict = peer_grading.get_score()
self.assertEqual(score_dict['score'], 1)
self.assertEqual(score_dict['total'], 1)
def test_get_next_submission(self):
"""
Ensure that a peer grading problem with a linked location can get a submission to score.
"""
# Setup the peer grading module with the proper linked location.
peer_grading = self._create_peer_grading_with_linked_problem(self.coe_location)
data = peer_grading.handle_ajax('get_next_submission', {'location': self.coe_location})
self.assertEqual(json.loads(data)['submission_id'], 1)

View File

@@ -1,189 +0,0 @@
"""
Test cases covering workflows and behaviors of the Self Assessment feature
"""
from datetime import datetime
import json
import unittest
from mock import Mock, MagicMock
from webob.multidict import MultiDict
from pytz import UTC
from xblock.fields import ScopeIds
from xmodule.open_ended_grading_classes.self_assessment_module import SelfAssessmentModule
from opaque_keys.edx.locations import Location
from lxml import etree
from . import get_test_system
import test_util_open_ended
class SelfAssessmentTest(unittest.TestCase):
"""
Test cases covering workflows and behaviors of the Self Assessment feature
"""
rubric = '''<rubric><rubric>
<category>
<description>Response Quality</description>
<option>The response is not a satisfactory answer to the question. It either fails to address the question or does so in a limited way, with no evidence of higher-order thinking.</option>
</category>
</rubric></rubric>'''
prompt = etree.XML("<prompt>This is sample prompt text.</prompt>")
definition = {
'rubric': rubric,
'prompt': prompt,
'submitmessage': 'Shall we submit now?',
'hintprompt': 'Consider this...',
}
location = Location("edX", "sa_test", "run", "selfassessment", "SampleQuestion", None)
descriptor = Mock()
def setUp(self):
super(SelfAssessmentTest, self).setUp()
self.static_data = {
'max_attempts': 10,
'rubric': etree.XML(self.rubric),
'prompt': self.prompt,
'max_score': 1,
'display_name': "Name",
'accept_file_upload': False,
'close_date': None,
's3_interface': test_util_open_ended.S3_INTERFACE,
'open_ended_grading_interface': test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE,
'skip_basic_checks': False,
'control': {
'required_peer_grading': 1,
'peer_grader_count': 1,
'min_to_calibrate': 3,
'max_to_calibrate': 6,
'peer_grade_finished_submissions_when_none_pending': False,
}
}
system = get_test_system()
usage_key = system.course_id.make_usage_key('combinedopenended', 'test_loc')
scope_ids = ScopeIds(1, 'combinedopenended', usage_key, usage_key)
system.xmodule_instance = Mock(scope_ids=scope_ids)
self.module = SelfAssessmentModule(
system,
self.location,
self.definition,
self.descriptor,
self.static_data
)
def test_get_html(self):
html = self.module.get_html(self.module.system)
self.assertIn("This is sample prompt text", html)
def test_self_assessment_flow(self):
responses = {'assessment': '0', 'score_list[]': ['0', '0']}
def get_fake_item(name):
"""
Returns the specified key from the parent workflow container
"""
return responses[name]
def get_data_for_location(self, location, student):
"""
Returns a dictionary of keys having zero values
"""
return {
'count_graded': 0,
'count_required': 0,
'student_sub_count': 0,
}
mock_query_dict = MagicMock()
mock_query_dict.__getitem__.side_effect = get_fake_item
mock_query_dict.getall = get_fake_item
self.module.peer_gs.get_data_for_location = get_data_for_location
self.assertEqual(self.module.get_score()['score'], 0)
self.module.save_answer({'student_answer': "I am an answer"},
self.module.system)
self.assertEqual(self.module.child_state, self.module.ASSESSING)
self.module.save_assessment(mock_query_dict, self.module.system)
self.assertEqual(self.module.child_state, self.module.DONE)
d = self.module.reset({})
self.assertTrue(d['success'])
self.assertEqual(self.module.child_state, self.module.INITIAL)
# if we now assess as right, skip the REQUEST_HINT state
self.module.save_answer({'student_answer': 'answer 4'},
self.module.system)
responses['assessment'] = '1'
self.module.save_assessment(mock_query_dict, self.module.system)
self.assertEqual(self.module.child_state, self.module.DONE)
def test_self_assessment_display(self):
"""
Test storing an answer with the self assessment module.
"""
# Create a module with no state yet. Important that this start off as a blank slate.
test_module = SelfAssessmentModule(
get_test_system(),
self.location,
self.definition,
self.descriptor,
self.static_data
)
saved_response = "Saved response."
submitted_response = "Submitted response."
# Initially, there will be no stored answer.
self.assertEqual(test_module.stored_answer, None)
# And the initial answer to display will be an empty string.
self.assertEqual(test_module.get_display_answer(), "")
# Now, store an answer in the module.
test_module.handle_ajax("store_answer", {'student_answer': saved_response}, get_test_system())
# The stored answer should now equal our response.
self.assertEqual(test_module.stored_answer, saved_response)
self.assertEqual(test_module.get_display_answer(), saved_response)
# Submit a student response to the question.
test_module.handle_ajax("save_answer", {"student_answer": submitted_response}, get_test_system())
# Submitting an answer should clear the stored answer.
self.assertEqual(test_module.stored_answer, None)
# Confirm that the answer is stored properly.
self.assertEqual(test_module.latest_answer(), submitted_response)
# Mock saving an assessment.
assessment_dict = MultiDict({'assessment': 0, 'score_list[]': 0})
data = test_module.handle_ajax("save_assessment", assessment_dict, get_test_system())
self.assertTrue(json.loads(data)['success'])
# Reset the module so the student can try again.
test_module.reset(get_test_system())
# Confirm that the right response is loaded.
self.assertEqual(test_module.get_display_answer(), submitted_response)
def test_save_assessment_after_closing(self):
"""
Test storing assessment when close date is passed.
"""
responses = {'assessment': '0', 'score_list[]': ['0', '0']}
self.module.save_answer({'student_answer': "I am an answer"}, self.module.system)
self.assertEqual(self.module.child_state, self.module.ASSESSING)
#Set close date to current datetime.
self.module.close_date = datetime.now(UTC)
#Save assessment when close date is passed.
self.module.save_assessment(responses, self.module.system)
self.assertNotEqual(self.module.child_state, self.module.DONE)

File diff suppressed because one or more lines are too long

View File

@@ -31,11 +31,9 @@ from xmodule.x_module import ModuleSystem, XModule, XModuleDescriptor, Descripto
from xmodule.annotatable_module import AnnotatableDescriptor
from xmodule.capa_module import CapaDescriptor
from xmodule.course_module import CourseDescriptor
from xmodule.combined_open_ended_module import CombinedOpenEndedDescriptor
from xmodule.discussion_module import DiscussionDescriptor
from xmodule.gst_module import GraphicalSliderToolDescriptor
from xmodule.html_module import HtmlDescriptor
from xmodule.peer_grading_module import PeerGradingDescriptor
from xmodule.poll_module import PollDescriptor
from xmodule.word_cloud_module import WordCloudDescriptor
from xmodule.crowdsource_hinter import CrowdsourceHinterDescriptor
@@ -54,11 +52,9 @@ from xmodule.tests import get_test_descriptor_system, get_test_system
LEAF_XMODULES = {
AnnotatableDescriptor: [{}],
CapaDescriptor: [{}],
CombinedOpenEndedDescriptor: [{}],
DiscussionDescriptor: [{}],
GraphicalSliderToolDescriptor: [{}],
HtmlDescriptor: [{}],
PeerGradingDescriptor: [{}],
PollDescriptor: [{'display_name': 'Poll Display Name'}],
WordCloudDescriptor: [{}],
# This is being excluded because it has dependencies on django

View File

@@ -1583,7 +1583,6 @@ class ModuleSystem(MetricsMixin, ConfigurableFragmentWrapper, Runtime):
replace_urls, descriptor_runtime, user=None, filestore=None,
debug=False, hostname="", xqueue=None, publish=None, node_path="",
anonymous_student_id='', course_id=None,
open_ended_grading_interface=None, s3_interface=None,
cache=None, can_execute_unsafe_code=None, replace_course_urls=None,
replace_jump_to_id_urls=None, error_descriptor_class=None, get_real_user=None,
field_data=None, get_user_role=None, rebind_noauth_module_to_user=None,
@@ -1678,9 +1677,6 @@ class ModuleSystem(MetricsMixin, ConfigurableFragmentWrapper, Runtime):
if publish:
self.publish = publish
self.open_ended_grading_interface = open_ended_grading_interface
self.s3_interface = s3_interface
self.cache = cache or DoNothingCache()
self.can_execute_unsafe_code = can_execute_unsafe_code or (lambda: False)
self.get_python_lib_zip = get_python_lib_zip or (lambda: None)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 705 B

View File

@@ -1,157 +0,0 @@
"""
Open-ended response in the courseware.
"""
from bok_choy.page_object import PageObject
from bok_choy.promise import EmptyPromise
from .rubric import RubricPage
class OpenResponsePage(PageObject):
"""
Open-ended response in the courseware.
"""
url = None
def is_browser_on_page(self):
return self.q(css='div.xmodule_CombinedOpenEndedModule').present
@property
def assessment_type(self):
"""
Return the type of assessment currently active.
Options are "self", "ai", or "peer"
"""
labels = self.q(css='section#combined-open-ended-status>div.statusitem-current').text
if len(labels) < 1:
self.warning("Could not find assessment type label")
# Provide some tolerance to UI changes
label_compare = labels[0].lower().strip()
if 'self' in label_compare:
return 'self'
elif 'ai' in label_compare:
return 'ai'
elif 'peer' in label_compare:
return 'peer'
else:
raise ValueError("Unexpected assessment type: '{0}'".format(label_compare))
@property
def prompt(self):
"""
Return an HTML string representing the essay prompt.
"""
prompt_css = "section.open-ended-child>div.prompt"
prompts = self.q(css=prompt_css).map(lambda el: el.get_attribute('innerHTML').strip()).results
if len(prompts) == 0:
self.warning("Could not find essay prompt on page.")
return ""
elif len(prompts) > 1:
self.warning("Multiple essay prompts found on page; using the first one.")
return prompts[0]
@property
def rubric(self):
"""
Return a `RubricPage` for a self-assessment problem.
If no rubric is available, raises a `BrokenPromise` exception.
"""
rubric = RubricPage(self.browser)
rubric.wait_for_page()
return rubric
@property
def written_feedback(self):
"""
Return the written feedback from the grader (if any).
If no feedback available, returns None.
"""
feedback = self.q(css='div.written-feedback').text
if len(feedback) > 0:
return feedback[0]
else:
return None
@property
def alert_message(self):
"""
Alert message displayed to the user.
"""
alerts = self.q(css="div.open-ended-alert").text
if len(alerts) < 1:
return ""
else:
return alerts[0]
@property
def grader_status(self):
"""
Status message from the grader.
If not present, return an empty string.
"""
status_list = self.q(css='div.grader-status').text
if len(status_list) < 1:
self.warning("No grader status found")
return ""
elif len(status_list) > 1:
self.warning("Multiple grader statuses found; returning the first one")
return status_list[0]
def set_response(self, response_str):
"""
Input a response to the prompt.
"""
input_css = "textarea.short-form-response"
self.q(css=input_css).fill(response_str)
def save_response(self):
"""
Save the response for later submission.
"""
self.q(css='input.save-button').first.click()
EmptyPromise(
lambda: 'save' in self.alert_message.lower(),
"Status message saved"
).fulfill()
def submit_response(self):
"""
Submit a response for grading.
"""
self.q(css='input.submit-button').first.click()
# modal dialog confirmation
self.q(css='button.ok-button').first.click()
# Ensure that the submission completes
self._wait_for_submitted(self.assessment_type)
def _wait_for_submitted(self, assessment_type):
"""
Wait for the submission to complete.
`assessment_type` is either 'self', 'ai', or 'peer'
"""
if assessment_type == 'self':
RubricPage(self.browser).wait_for_page()
elif assessment_type == 'ai' or assessment_type == "peer":
EmptyPromise(
lambda: self.grader_status != 'Unanswered',
"Problem status is no longer 'unanswered'"
).fulfill()
else:
self.warning("Unrecognized assessment type '{0}'".format(assessment_type))
EmptyPromise(lambda: True, "Unrecognized assessment type").fulfill()

View File

@@ -1,141 +0,0 @@
"""
Rubric for open-ended response problems, including calibration and peer-grading.
"""
from bok_choy.page_object import PageObject
from bok_choy.promise import EmptyPromise
class ScoreMismatchError(Exception):
"""
The provided scores do not match the rubric on the page.
"""
pass
class RubricPage(PageObject):
"""
Rubric for open-ended response problems, including calibration and peer-grading.
"""
url = None
def is_browser_on_page(self):
"""
Return a boolean indicating whether the rubric is available.
"""
return self.q(css='div.rubric').present
@property
def categories(self):
"""
Return a list of categories available in the essay rubric.
Example:
["Writing Applications", "Language Conventions"]
The rubric is not always visible; if it's not available,
this will return an empty list.
"""
return self.q(css='span.rubric-category').text
def set_scores(self, scores):
"""
Set the rubric scores. `scores` is a list of integers
indicating the number of points in each category.
For example, `scores` might be [0, 2, 1] if the student scored
0 points in the first category, 2 points in the second category,
and 1 point in the third category.
If the number of scores does not match the number of categories,
a `ScoreMismatchError` is raised.
"""
# Warn if we have the wrong number of scores
num_categories = self.categories
if len(scores) != len(num_categories):
raise ScoreMismatchError(
"Received {0} scores but there are {1} rubric categories".format(
len(scores), num_categories))
# Set the score for each category
for score_index in range(len(scores)):
# Check that we have the enough radio buttons
category_css = "div.rubric>ul.rubric-list:nth-of-type({0})".format(score_index + 1)
if scores[score_index] > len(self.q(css=category_css + ' input.score-selection').results):
raise ScoreMismatchError(
"Tried to select score {0} but there are only {1} options".format(
score_index, len(scores)))
# Check the radio button at the correct index
else:
input_css = (
category_css +
">li.rubric-list-item:nth-of-type({0}) input.score-selection".format(scores[score_index] + 1)
)
EmptyPromise(lambda: self._select_score_radio_button(input_css), "Score selection failed.").fulfill()
def _select_score_radio_button(self, radio_button_css):
self.q(css=radio_button_css).first.click()
return self.q(css=radio_button_css).selected
@property
def feedback(self):
"""
Return a list of correct/incorrect feedback for each rubric category (e.g. from self-assessment).
Example: ['correct', 'incorrect']
If no feedback is available, returns an empty list.
If feedback could not be interpreted (unexpected CSS class),
the list will contain a `None` item.
"""
# Get the green checkmark / red x labels
# We need to filter out the similar-looking CSS classes
# for the rubric items that are NOT marked correct/incorrect
feedback_css = 'div.rubric-label>label'
labels = [
el_class for el_class in
self.q(css=feedback_css).attrs('class')
if el_class != 'rubric-elements-info'
]
def map_feedback(css_class):
"""
Map CSS classes on the labels to correct/incorrect
"""
if 'choicegroup_incorrect' in css_class:
return 'incorrect'
elif 'choicegroup_correct' in css_class:
return 'correct'
else:
return None
return map(map_feedback, labels)
def submit(self, promise_check_type=None):
"""
Submit the rubric.
`promise_check_type` is either 'self', or 'peer'. If promise check is not required then don't pass any value.
"""
# Wait for the button to become enabled
button_css = 'input.submit-button'
EmptyPromise(
lambda: all(self.q(css=button_css).map(lambda el: not el.get_attribute('disabled')).results),
"Submit button not enabled"
).fulfill()
# Submit the assessment
self.q(css=button_css).first.click()
if promise_check_type == 'self':
# Check if submitted rubric is available
EmptyPromise(
lambda: self.q(css='div.rubric-label>label').present, 'Submitted Rubric not available!'
).fulfill()
elif promise_check_type == 'peer':
# Check if we are ready for peer grading
EmptyPromise(
lambda: self.q(css='input.calibration-feedback-button').present, 'Not ready for peer grading!'
).fulfill()

View File

@@ -1,30 +0,0 @@
<combinedopenended max_score="2" markdown="null" max_attempts="1000">
<rubric>
<rubric>
<category>
<description>Writing Applications</description>
<option> The essay loses focus, has little information or supporting details, and the organization makes it difficult to follow.</option>
<option> The essay presents a mostly unified theme, includes sufficient information to convey the theme, and is generally organized well.</option>
</category>
<category>
<description> Language Conventions </description>
<option> The essay demonstrates a reasonable command of proper spelling and grammar. </option>
<option> The essay demonstrates superior command of proper spelling and grammar.</option>
</category>
</rubric>
</rubric>
<prompt>
<h4>Censorship in the Libraries</h4>
<p>"All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us." --Katherine Paterson, Author</p>
<p>Write a persuasive essay to a newspaper reflecting your vies on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.</p>
</prompt>
<task>
<openended>
<openendedparam>
<initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display>
<grader_payload>{"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}</grader_payload>
</openendedparam>
</openended>
</task>
</combinedopenended>

View File

@@ -1,30 +0,0 @@
<combinedopenended max_score="1" accept_file_upload="False" markdown="null" max_attempts="10000" skip_spelling_checks="False" version="1">
<rubric>
<rubric>
<category>
<description>Writing Applications</description>
<option> The essay loses focus, has little information or supporting details, and the organization makes it difficult to follow.</option>
<option> The essay presents a mostly unified theme, includes sufficient information to convey the theme, and is generally organized well.</option>
</category>
<category>
<description> Language Conventions </description>
<option> The essay demonstrates a reasonable command of proper spelling and grammar. </option>
<option> The essay demonstrates superior command of proper spelling and grammar.</option>
</category>
</rubric>
</rubric>
<prompt>
<h4>Censorship in the Libraries</h4>
<p>"All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us." --Katherine Paterson, Author</p>
<p>Write a persuasive essay to a newspaper reflecting your views on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.</p>
</prompt>
<task>
<openended>
<openendedparam>
<initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display>
<grader_payload>{"grader_settings" : "peer_grading.conf", "problem_id" : "700x/Demo"}</grader_payload>
</openendedparam>
</openended>
</task>
</combinedopenended>

View File

@@ -1 +0,0 @@
<rubric><category><description>Writing Applications</description><score>0</score><option points='0'> The essay loses focus, has little information or supporting details, and the organization makes it difficult to follow.</option><option points='1'> The essay presents a mostly unified theme, includes sufficient information to convey the theme, and is generally organized well.</option></category><category><description> Language Conventions </description><score>1</score><option points='0'> The essay demonstrates a reasonable command of proper spelling and grammar. </option><option points='1'> The essay demonstrates superior command of proper spelling and grammar.</option></category></rubric>

View File

@@ -1,24 +0,0 @@
<combinedopenended max_score="1" accept_file_upload="False" markdown="null" max_attempts="1000" skip_spelling_checks="False" version="1">
<rubric>
<rubric>
<category>
<description>Writing Applications</description>
<option> The essay loses focus, has little information or supporting details, and the organization makes it difficult to follow.</option>
<option> The essay presents a mostly unified theme, includes sufficient information to convey the theme, and is generally organized well.</option>
</category>
<category>
<description> Language Conventions </description>
<option> The essay demonstrates a reasonable command of proper spelling and grammar. </option>
<option> The essay demonstrates superior command of proper spelling and grammar.</option>
</category>
</rubric>
</rubric>
<prompt>
<h4>Censorship in the Libraries</h4>
<p>"All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us." --Katherine Paterson, Author</p>
<p>Write a persuasive essay to a newspaper reflecting your views on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.</p>
</prompt>
<task>
<selfassessment/>
</task>
</combinedopenended>

View File

@@ -0,0 +1,3 @@
<poll url_name="markdown" question="## This is a test&#10;&#10;&lt;h1&gt;This is only a &amp;gt;&amp;lt;test&lt;/h1&gt;&#10;&#10;* One&#10;* Two&#10;* Three&#10;&#10;1. First&#10;2. Second&#10;3. Third&#10;&#10;We shall find out if markdown is respected.&#10;&#10;&gt; &quot;I have not yet begun to code.&quot;"
feedback="### This is some feedback&#10;&#10;[This is a link](http://www.example.com)&#10;&#10;&lt;a href=&quot;http://www.example.com&quot; target=&quot;_blank&quot;&gt;This is also a link.&lt;/a&gt;&#10;&#10;This is a paragraph with *emphasized* and **bold** text, and **_both_**."
answers='[["long", {"label": "I *feel* like this test will **pass**&lt;code&gt;test&lt;/code&gt;.", "img": null}]]'/>

View File

@@ -1628,9 +1628,9 @@ class DeprecationWarningMessageTest(CourseOutlineTest):
self.course_fixture.create_xblock(
parent_vertical.locator,
XBlockFixtureDesc('combinedopenended', "Open", data=load_data_str('ora_peer_problem.xml'))
XBlockFixtureDesc('poll', "Poll", data=load_data_str('poll_markdown.xml'))
)
self.course_fixture.create_xblock(parent_vertical.locator, XBlockFixtureDesc('peergrading', 'Peer'))
self.course_fixture.create_xblock(parent_vertical.locator, XBlockFixtureDesc('survey', 'Survey'))
def _verify_deprecation_warning_info(
self,
@@ -1663,56 +1663,56 @@ class DeprecationWarningMessageTest(CourseOutlineTest):
def test_no_deprecation_warning_message_present(self):
"""
Scenario: Verify that deprecation warning message is not shown if ORA1
advance modules are not present and also no ORA1 component exist in
Scenario: Verify that deprecation warning message is not shown if no deprecated
advance modules are not present and also no deprecated component exist in
course outline.
When I goto course outline
Then I don't see ORA1 deprecated warning
Then I don't see any deprecation warning
"""
self.course_outline_page.visit()
self.assertFalse(self.course_outline_page.deprecated_warning_visible)
def test_deprecation_warning_message_present(self):
"""
Scenario: Verify deprecation warning message if ORA1 advance modules
and ORA1 components are present.
Scenario: Verify deprecation warning message if deprecated modules
and components are present.
Given I have ORA1 advance modules present in `Advanced Module List`
And I have created 2 ORA1 components
Given I have "poll" advance modules present in `Advanced Module List`
And I have created 2 poll components
When I go to course outline
Then I see ORA1 deprecated warning
And I see correct ORA1 deprecated warning heading text
And I see correct ORA1 deprecated warning advance modules remove text
And I see list of ORA1 components with correct display names
Then I see poll deprecated warning
And I see correct poll deprecated warning heading text
And I see correct poll deprecated warning advance modules remove text
And I see list of poll components with correct display names
"""
self._add_deprecated_advance_modules(block_types=['peergrading', 'combinedopenended'])
self._add_deprecated_advance_modules(block_types=['poll', 'survey'])
self._create_deprecated_components()
self.course_outline_page.visit()
self._verify_deprecation_warning_info(
deprecated_blocks_present=True,
components_present=True,
components_display_name_list=['Open', 'Peer'],
deprecated_modules_list=['peergrading', 'combinedopenended']
components_display_name_list=['Poll', 'Survey'],
deprecated_modules_list=['poll', 'survey']
)
def test_deprecation_warning_with_no_displayname(self):
"""
Scenario: Verify deprecation warning message if ORA1 components are present.
Scenario: Verify deprecation warning message if poll components are present.
Given I have created 1 ORA1 deprecated component
Given I have created 1 poll deprecated component
When I go to course outline
Then I see ORA1 deprecated warning
And I see correct ORA1 deprecated warning heading text
And I see list of ORA1 components with correct message
Then I see poll deprecated warning
And I see correct poll deprecated warning heading text
And I see list of poll components with correct message
"""
parent_vertical = self.course_fixture.get_nested_xblocks(category="vertical")[0]
# Create a deprecated ORA1 component with display_name to be empty and make sure
# Create a deprecated component with display_name to be empty and make sure
# the deprecation warning is displayed with
self.course_fixture.create_xblock(
parent_vertical.locator,
XBlockFixtureDesc(category='combinedopenended', display_name="", data=load_data_str('ora_peer_problem.xml'))
XBlockFixtureDesc(category='poll', display_name="", data=load_data_str('poll_markdown.xml'))
)
self.course_outline_page.visit()
@@ -1722,44 +1722,44 @@ class DeprecationWarningMessageTest(CourseOutlineTest):
components_display_name_list=[self.DEFAULT_DISPLAYNAME],
)
def test_warning_with_ora1_advance_modules_only(self):
def test_warning_with_poll_advance_modules_only(self):
"""
Scenario: Verify that deprecation warning message is shown if only
ORA1 advance modules are present and no ORA1 component exist.
poll advance modules are present and no poll component exist.
Given I have ORA1 advance modules present in `Advanced Module List`
Given I have poll advance modules present in `Advanced Module List`
When I go to course outline
Then I see ORA1 deprecated warning
And I see correct ORA1 deprecated warning heading text
And I see correct ORA1 deprecated warning advance modules remove text
And I don't see list of ORA1 components
Then I see poll deprecated warning
And I see correct poll deprecated warning heading text
And I see correct poll deprecated warning advance modules remove text
And I don't see list of poll components
"""
self._add_deprecated_advance_modules(block_types=['peergrading', 'combinedopenended'])
self._add_deprecated_advance_modules(block_types=['poll', 'survey'])
self.course_outline_page.visit()
self._verify_deprecation_warning_info(
deprecated_blocks_present=True,
components_present=False,
deprecated_modules_list=['peergrading', 'combinedopenended']
deprecated_modules_list=['poll', 'survey']
)
def test_warning_with_ora1_components_only(self):
def test_warning_with_poll_components_only(self):
"""
Scenario: Verify that deprecation warning message is shown if only
ORA1 component exist and no ORA1 advance modules are present.
poll component exist and no poll advance modules are present.
Given I have created two ORA1 components
Given I have created two poll components
When I go to course outline
Then I see ORA1 deprecated warning
And I see correct ORA1 deprecated warning heading text
And I don't see ORA1 deprecated warning advance modules remove text
And I see list of ORA1 components with correct display names
Then I see poll deprecated warning
And I see correct poll deprecated warning heading text
And I don't see poll deprecated warning advance modules remove text
And I see list of poll components with correct display names
"""
self._create_deprecated_components()
self.course_outline_page.visit()
self._verify_deprecation_warning_info(
deprecated_blocks_present=False,
components_present=True,
components_display_name_list=['Open', 'Peer']
components_display_name_list=['Poll', 'Survey']
)

View File

@@ -1,102 +0,0 @@
"""
Acceptance tests for Studio related to edit/save peer grading interface.
"""
from ...fixtures.course import XBlockFixtureDesc
from ...pages.studio.import_export import ExportCoursePage
from ...pages.studio.component_editor import ComponentEditorView
from ...pages.studio.overview import CourseOutlinePage
from base_studio_test import StudioCourseTest
from ..helpers import load_data_str
class ORAComponentTest(StudioCourseTest):
"""
Tests tht edit/save is working correctly when link_to_location
is given in peer grading interface settings.
"""
def setUp(self):
super(ORAComponentTest, self).setUp()
self.course_outline_page = CourseOutlinePage(
self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run']
)
self.export_page = ExportCoursePage(
self.browser,
self.course_info['org'], self.course_info['number'], self.course_info['run']
)
def populate_course_fixture(self, course_fixture):
"""
Return a test course fixture containing a discussion component.
"""
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc(
'combinedopenended',
"Peer Problem",
data=load_data_str('ora_peer_problem.xml'),
metadata={
'graded': True,
},
),
XBlockFixtureDesc('peergrading', 'Peer Module'),
)
)
)
)
def _go_to_unit_page(self, section_name='Test Section', subsection_name='Test Subsection', unit_name='Test Unit'):
self.course_outline_page.visit()
subsection = self.course_outline_page.section(section_name).subsection(subsection_name)
return subsection.expand_subsection().unit(unit_name).go_to()
def test_edit_save_and_export(self):
"""
Ensure that edit/save is working correctly with link_to_location
in peer interface settings.
"""
self.course_outline_page.visit()
unit = self._go_to_unit_page()
peer_problem_location = unit.xblocks[1].locator
# Problem location should contain "combinedopeneneded".
self.assertIn("combinedopenended", peer_problem_location)
component = unit.xblocks[2]
# Interface component name should be "Peer Module".
self.assertEqual(component.name, "Peer Module")
component.edit()
component_editor = ComponentEditorView(self.browser, component.locator)
component_editor.set_field_value_and_save('Link to Problem Location', peer_problem_location)
# Verify that we can edit component again after saving and link_to_location is present.
component.edit()
location_input_element = component_editor.get_setting_element("Link to Problem Location")
self.assertEqual(
location_input_element.get_attribute('value'),
peer_problem_location
)
def test_verify_ora1_deprecation_message(self):
"""
Scenario: Verifies the ora1 deprecation message on ora components.
Given I have a course with ora 1 components
When I go to the unit page
Then I see a deprecation error message in ora 1 components.
"""
self.course_outline_page.visit()
unit = self._go_to_unit_page()
for xblock in unit.xblocks:
self.assertTrue(xblock.has_validation_error)
self.assertEqual(
xblock.validation_error_text,
"ORA1 is no longer supported. To use this assessment, "
"replace this ORA1 component with an ORA2 component."
)

View File

@@ -1,392 +0,0 @@
"""
Tests for ORA (Open Response Assessment) through the LMS UI.
"""
import json
from unittest import skip
from bok_choy.promise import Promise, BrokenPromise
from ..pages.lms.peer_confirm import PeerConfirmPage
from ..pages.lms.auto_auth import AutoAuthPage
from ..pages.lms.course_info import CourseInfoPage
from ..pages.lms.tab_nav import TabNavPage
from ..pages.lms.course_nav import CourseNavPage
from ..pages.lms.open_response import OpenResponsePage
from ..pages.lms.peer_grade import PeerGradePage
from ..pages.lms.peer_calibrate import PeerCalibratePage
from ..pages.lms.progress import ProgressPage
from ..fixtures.course import XBlockFixtureDesc, CourseFixture
from ..fixtures.xqueue import XQueueResponseFixture
from .helpers import load_data_str, UniqueCourseTest
class OpenResponseTest(UniqueCourseTest):
"""
Tests that interact with ORA (Open Response Assessment) through the LMS UI.
This base class sets up a course with open response problems and defines
some helper functions used in the ORA tests.
"""
# Grade response (dict) to return from the XQueue stub
# in response to our unique submission text.
XQUEUE_GRADE_RESPONSE = None
def setUp(self):
"""
Install a test course with ORA problems.
Always start in the subsection with open response problems.
"""
super(OpenResponseTest, self).setUp()
# Create page objects
self.auth_page = AutoAuthPage(self.browser, course_id=self.course_id)
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.tab_nav = TabNavPage(self.browser)
self.course_nav = CourseNavPage(self.browser)
self.open_response = OpenResponsePage(self.browser)
self.peer_grade = PeerGradePage(self.browser)
self.peer_calibrate = PeerCalibratePage(self.browser)
self.peer_confirm = PeerConfirmPage(self.browser)
self.progress_page = ProgressPage(self.browser, self.course_id)
# Configure the test course
course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
# Create a unique name for the peer assessed problem. This will show up
# in the list of peer problems, which is shared among tests running
# in parallel; it needs to be unique so we can find it.
# It's also import that the problem has "Peer" in the name; otherwise,
# the ORA stub will ignore it.
self.peer_problem_name = "Peer-Assessed {}".format(self.unique_id[0:6])
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc(
'combinedopenended',
'Self-Assessed',
data=load_data_str('ora_self_problem.xml'),
metadata={
'graded': True,
},
),
XBlockFixtureDesc(
'combinedopenended',
'AI-Assessed',
data=load_data_str('ora_ai_problem.xml'),
metadata={
'graded': True,
},
),
XBlockFixtureDesc(
'combinedopenended',
self.peer_problem_name,
data=load_data_str('ora_peer_problem.xml'),
metadata={
'graded': True,
},
),
# This is the interface a student can use to grade his/her peers
XBlockFixtureDesc('peergrading', 'Peer Module'),
)
)
).install()
# Configure the XQueue stub's response for the text we will submit
# The submission text is unique so we can associate each response with a particular test case.
self.submission = "Test submission " + self.unique_id[0:4]
if self.XQUEUE_GRADE_RESPONSE is not None:
XQueueResponseFixture(self.submission, self.XQUEUE_GRADE_RESPONSE).install()
# Log in and navigate to the essay problems
self.auth_page.visit()
self.course_info_page.visit()
self.tab_nav.go_to_tab('Courseware')
def submit_essay(self, expected_assessment_type, expected_prompt):
"""
Submit an essay and verify that the problem uses
the `expected_assessment_type` ("self", "ai", or "peer") and
shows the `expected_prompt` (a string).
"""
# Check the assessment type and prompt
self.assertEqual(self.open_response.assessment_type, expected_assessment_type)
self.assertIn(expected_prompt, self.open_response.prompt)
# Enter a submission, which will trigger a pre-defined response from the XQueue stub.
self.open_response.set_response(self.submission)
# Save the response and expect some UI feedback
self.open_response.save_response()
self.assertEqual(
self.open_response.alert_message,
"Answer saved, but not yet submitted."
)
# Submit the response
self.open_response.submit_response()
def get_asynch_feedback(self, assessment_type):
"""
Wait for and retrieve asynchronous feedback
(e.g. from AI, instructor, or peer grading)
`assessment_type` is either "ai" or "peer".
"""
# Because the check function involves fairly complicated actions
# (navigating through several screens), we give it more time to complete
# than the default.
return Promise(
self._check_feedback_func(assessment_type),
'Got feedback for {0} problem'.format(assessment_type),
timeout=600, try_interval=5
).fulfill()
def _check_feedback_func(self, assessment_type):
"""
Navigate away from, then return to, the peer problem to
receive updated feedback.
The returned function will return a tuple `(is_success, rubric_feedback)`,
`is_success` is True iff we have received feedback for the problem;
`rubric_feedback` is a list of "correct" or "incorrect" strings.
"""
if assessment_type == 'ai':
section_name = 'AI-Assessed'
elif assessment_type == 'peer':
section_name = self.peer_problem_name
else:
raise ValueError('Assessment type not recognized. Must be either "ai" or "peer"')
def _inner_check():
self.course_nav.go_to_sequential('Self-Assessed')
self.course_nav.go_to_sequential(section_name)
try:
feedback = self.open_response.rubric.feedback
# Unsuccessful if the rubric hasn't loaded
except BrokenPromise:
return False, None
# Successful if `feedback` is a non-empty list
else:
return bool(feedback), feedback
return _inner_check
class SelfAssessmentTest(OpenResponseTest):
"""
Test ORA self-assessment.
"""
def test_self_assessment(self):
"""
Given I am viewing a self-assessment problem
When I submit an essay and complete a self-assessment rubric
Then I see a scored rubric
And I see my score in the progress page.
"""
# Navigate to the self-assessment problem and submit an essay
self.course_nav.go_to_sequential('Self-Assessed')
self.submit_essay('self', 'Censorship in the Libraries')
# Fill in the rubric and expect that we get feedback
rubric = self.open_response.rubric
self.assertEqual(rubric.categories, ["Writing Applications", "Language Conventions"])
rubric.set_scores([0, 1])
rubric.submit('self')
self.assertEqual(rubric.feedback, ['incorrect', 'correct'])
# Verify the progress page
self.progress_page.visit()
scores = self.progress_page.scores('Test Section', 'Test Subsection')
# The first score is self-assessment, which we've answered, so it's 1/2
# The other scores are AI- and peer-assessment, which we haven't answered so those are 0/2
self.assertEqual(scores, [(1, 2), (0, 2), (0, 2)])
class AIAssessmentTest(OpenResponseTest):
"""
Test ORA AI-assessment.
"""
XQUEUE_GRADE_RESPONSE = {
'score': 1,
'feedback': json.dumps({"spelling": "Ok.", "grammar": "Ok.", "markup_text": "NA"}),
'grader_type': 'BC',
'success': True,
'grader_id': 1,
'submission_id': 1,
'rubric_scores_complete': True,
'rubric_xml': load_data_str('ora_rubric.xml')
}
@skip('Intermittently failing, see ORA-342')
def test_ai_assessment(self):
"""
Given I am viewing an AI-assessment problem that has a trained ML model
When I submit an essay and wait for a response
Then I see a scored rubric
And I see my score in the progress page.
"""
# Navigate to the AI-assessment problem and submit an essay
self.course_nav.go_to_sequential('AI-Assessed')
self.submit_essay('ai', 'Censorship in the Libraries')
# Refresh the page to get the updated feedback
# then verify that we get the feedback sent by our stub XQueue implementation
self.assertEqual(self.get_asynch_feedback('ai'), ['incorrect', 'correct'])
# Verify the progress page
self.progress_page.visit()
scores = self.progress_page.scores('Test Section', 'Test Subsection')
# First score is the self-assessment score, which we haven't answered, so it's 0/2
# Second score is the AI-assessment score, which we have answered, so it's 1/2
# Third score is peer-assessment, which we haven't answered, so it's 0/2
self.assertEqual(scores, [(0, 2), (1, 2), (0, 2)])
class InstructorAssessmentTest(OpenResponseTest):
"""
Test an AI-assessment that has been graded by an instructor.
This runs the same test as the AI-assessment test, except
that the feedback comes from an instructor instead of the machine grader.
From the student's perspective, it should look the same.
"""
XQUEUE_GRADE_RESPONSE = {
'score': 1,
'feedback': json.dumps({"feedback": "Good job!"}),
'grader_type': 'IN',
'success': True,
'grader_id': 1,
'submission_id': 1,
'rubric_scores_complete': True,
'rubric_xml': load_data_str('ora_rubric.xml')
}
@skip('Intermittently failing, see ORA-342')
def test_instructor_assessment(self):
"""
Given an instructor has graded my submission
When I view my submission
Then I see a scored rubric
And my progress page shows the problem score.
"""
# Navigate to the AI-assessment problem and submit an essay
# We have configured the stub to simulate that this essay will be staff-graded
self.course_nav.go_to_sequential('AI-Assessed')
self.submit_essay('ai', 'Censorship in the Libraries')
# Refresh the page to get the updated feedback
# then verify that we get the feedback sent by our stub XQueue implementation
self.assertEqual(self.get_asynch_feedback('ai'), ['incorrect', 'correct'])
# Verify the progress page
self.progress_page.visit()
scores = self.progress_page.scores('Test Section', 'Test Subsection')
# First score is the self-assessment score, which we haven't answered, so it's 0/2
# Second score is the AI-assessment score, which we have answered, so it's 1/2
# Third score is peer-assessment, which we haven't answered, so it's 0/2
self.assertEqual(scores, [(0, 2), (1, 2), (0, 2)])
class PeerAssessmentTest(OpenResponseTest):
"""
Test ORA peer-assessment, including calibration and giving/receiving scores.
"""
# Unlike other assessment types, peer assessment has multiple scores
XQUEUE_GRADE_RESPONSE = {
'score': [2, 2, 2],
'feedback': [json.dumps({"feedback": ""})] * 3,
'grader_type': 'PE',
'success': True,
'grader_id': [1, 2, 3],
'submission_id': 1,
'rubric_scores_complete': [True, True, True],
'rubric_xml': [load_data_str('ora_rubric.xml')] * 3
}
def test_peer_calibrate_and_grade(self):
"""
Given I am viewing a peer-assessment problem
And the instructor has submitted enough example essays
When I submit acceptable scores for enough calibration essays
Then I am able to peer-grade other students' essays.
Given I have submitted an essay for peer-assessment
And I have peer-graded enough students essays
And enough other students have scored my essay
Then I can view the scores and written feedback
And I see my score in the progress page.
"""
# Initially, the student should NOT be able to grade peers,
# because he/she hasn't submitted any essays.
self.course_nav.go_to_sequential('Peer Module')
self.assertIn("You currently do not have any peer grading to do", self.peer_calibrate.message)
# Submit an essay
self.course_nav.go_to_sequential(self.peer_problem_name)
self.submit_essay('peer', 'Censorship in the Libraries')
# Need to reload the page to update the peer grading module
self.course_info_page.visit()
self.tab_nav.go_to_tab('Courseware')
self.course_nav.go_to_section('Test Section', 'Test Subsection')
# Select the problem to calibrate
self.course_nav.go_to_sequential('Peer Module')
self.assertIn(self.peer_problem_name, self.peer_grade.problem_list)
self.peer_grade.select_problem(self.peer_problem_name)
# Calibrate
self.peer_confirm.start(is_calibrating=True)
rubric = self.peer_calibrate.rubric
self.assertEqual(rubric.categories, ["Writing Applications", "Language Conventions"])
rubric.set_scores([0, 1])
rubric.submit('peer')
self.peer_calibrate.continue_to_grading()
# Grade a peer
self.peer_confirm.start()
rubric = self.peer_grade.rubric
self.assertEqual(rubric.categories, ["Writing Applications", "Language Conventions"])
rubric.set_scores([0, 1])
rubric.submit()
# Expect to receive essay feedback
# We receive feedback from all three peers, each of which
# provide 2 scores (one for each rubric item)
# Written feedback is a dummy value sent by the XQueue stub.
self.course_nav.go_to_sequential(self.peer_problem_name)
self.assertEqual(self.get_asynch_feedback('peer'), ['incorrect', 'correct'] * 3)
# Verify the progress page
self.progress_page.visit()
scores = self.progress_page.scores('Test Section', 'Test Subsection')
# First score is the self-assessment score, which we haven't answered, so it's 0/2
# Second score is the AI-assessment score, which we haven't answered, so it's 0/2
# Third score is peer-assessment, which we have answered, so it's 2/2
self.assertEqual(scores, [(0, 2), (0, 2), (2, 2)])

View File

@@ -1,7 +0,0 @@
<chapter display_name="New Section 2 - Open Ended">
<sequential url_name="b7ebe0f048e9466e9ef32e7815fb5a93"/>
<sequential url_name="5c33f2c2b3aa45f5bfbf7bf7f9bcb2ff"/>
<sequential url_name="f58fd90cbd794cad881692d3b6e5cdbf"/>
<sequential url_name="345d618ca88944668d86586f83bff338"/>
<sequential url_name="4eadf76912cd436b9d698c8759784d8d"/>
</chapter>

View File

@@ -1,93 +0,0 @@
<combinedopenended accept_file_upload="true" markdown="[prompt]&#10; &lt;h3&gt;Censorship in the Libraries&lt;/h3&gt;&#10;&#10; &lt;p&gt;'All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us.' --Katherine Paterson, Author&#10; &lt;/p&gt;&#10;&#10; &lt;p&gt;&#10; Write a persuasive essay to a newspaper reflecting your views on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.&#10; &lt;/p&gt;&#10;[prompt]&#10;[rubric]&#10;+ Ideas&#10;- Difficult for the reader to discern the main idea. Too brief or too repetitive to establish or maintain a focus.&#10;- Attempts a main idea. Sometimes loses focus or ineffectively displays focus.&#10;- Presents a unifying theme or main idea, but may include minor tangents. Stays somewhat focused on topic and task.&#10;- Presents a unifying theme or main idea without going off on tangents. Stays completely focused on topic and task.&#10;+ Content&#10;- Includes little information with few or no details or unrelated details. Unsuccessful in attempts to explore any facets of the topic.&#10;- Includes little information and few or no details. Explores only one or two facets of the topic.&#10;- Includes sufficient information and supporting details. (Details may not be fully developed; ideas may be listed.) Explores some facets of the topic.&#10;- Includes in-depth information and exceptional supporting details that are fully developed. Explores all facets of the topic.&#10;+ Organization&#10;- Ideas organized illogically, transitions weak, and response difficult to follow.&#10;- Attempts to logically organize ideas. Attempts to progress in an order that enhances meaning, and demonstrates use of transitions.&#10;- Ideas organized logically. Progresses in an order that enhances meaning. Includes smooth transitions.&#10;+ Style&#10;- Contains limited vocabulary, with many words used incorrectly. Demonstrates problems with sentence patterns.&#10;- Contains basic vocabulary, with words that are predictable and common. Contains mostly simple sentences (although there may be an attempt at more varied sentence patterns).&#10;- Includes vocabulary to make explanations detailed and precise. Includes varied sentence patterns, including complex sentences.&#10;+ Voice&#10;- Demonstrates language and tone that may be inappropriate to task and reader.&#10;- Demonstrates an attempt to adjust language and tone to task and reader.&#10;- Demonstrates effective adjustment of language and tone to task and reader.&#10;[rubric]&#10;[tasks]&#10;(Self)&#10;[tasks]&#10;&#10;">
<prompt>
<h3>Censorship in the Libraries</h3>
<p>'All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us.' --Katherine Paterson, Author
</p>
<p>
Write a persuasive essay to a newspaper reflecting your views on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.
</p>
</prompt>
<rubric>
<rubric>
<category>
<description>
Ideas
</description>
<option>
Difficult for the reader to discern the main idea. Too brief or too repetitive to establish or maintain a focus.
</option>
<option>
Attempts a main idea. Sometimes loses focus or ineffectively displays focus.
</option>
<option>
Presents a unifying theme or main idea, but may include minor tangents. Stays somewhat focused on topic and task.
</option>
<option>
Presents a unifying theme or main idea without going off on tangents. Stays completely focused on topic and task.
</option>
</category>
<category>
<description>
Content
</description>
<option>
Includes little information with few or no details or unrelated details. Unsuccessful in attempts to explore any facets of the topic.
</option>
<option>
Includes little information and few or no details. Explores only one or two facets of the topic.
</option>
<option>
Includes sufficient information and supporting details. (Details may not be fully developed; ideas may be listed.) Explores some facets of the topic.
</option>
<option>
Includes in-depth information and exceptional supporting details that are fully developed. Explores all facets of the topic.
</option>
</category>
<category>
<description>
Organization
</description>
<option>
Ideas organized illogically, transitions weak, and response difficult to follow.
</option>
<option>
Attempts to logically organize ideas. Attempts to progress in an order that enhances meaning, and demonstrates use of transitions.
</option>
<option>
Ideas organized logically. Progresses in an order that enhances meaning. Includes smooth transitions.
</option>
</category>
<category>
<description>
Style
</description>
<option>
Contains limited vocabulary, with many words used incorrectly. Demonstrates problems with sentence patterns.
</option>
<option>
Contains basic vocabulary, with words that are predictable and common. Contains mostly simple sentences (although there may be an attempt at more varied sentence patterns).
</option>
<option>
Includes vocabulary to make explanations detailed and precise. Includes varied sentence patterns, including complex sentences.
</option>
</category>
<category>
<description>
Voice
</description>
<option>
Demonstrates language and tone that may be inappropriate to task and reader.
</option>
<option>
Demonstrates an attempt to adjust language and tone to task and reader.
</option>
<option>
Demonstrates effective adjustment of language and tone to task and reader.
</option>
</category>
</rubric>
</rubric>
<task>
<selfassessment/>
</task>
</combinedopenended>

View File

@@ -1,99 +0,0 @@
<combinedopenended markdown="[prompt]&#10; &lt;h3&gt;Censorship in the Libraries&lt;/h3&gt;&#10;&#10; &lt;p&gt;'All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us.' --Katherine Paterson, Author&#10; &lt;/p&gt;&#10;&#10; &lt;p&gt;&#10; Write a persuasive essay to a newspaper reflecting your views on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.&#10; &lt;/p&gt;&#10;[prompt]&#10;[rubric]&#10;+ Ideas&#10;- Difficult for the reader to discern the main idea. Too brief or too repetitive to establish or maintain a focus.&#10;- Attempts a main idea. Sometimes loses focus or ineffectively displays focus.&#10;- Presents a unifying theme or main idea, but may include minor tangents. Stays somewhat focused on topic and task.&#10;- Presents a unifying theme or main idea without going off on tangents. Stays completely focused on topic and task.&#10;+ Content&#10;- Includes little information with few or no details or unrelated details. Unsuccessful in attempts to explore any facets of the topic.&#10;- Includes little information and few or no details. Explores only one or two facets of the topic.&#10;- Includes sufficient information and supporting details. (Details may not be fully developed; ideas may be listed.) Explores some facets of the topic.&#10;- Includes in-depth information and exceptional supporting details that are fully developed. Explores all facets of the topic.&#10;+ Organization&#10;- Ideas organized illogically, transitions weak, and response difficult to follow.&#10;- Attempts to logically organize ideas. Attempts to progress in an order that enhances meaning, and demonstrates use of transitions.&#10;- Ideas organized logically. Progresses in an order that enhances meaning. Includes smooth transitions.&#10;+ Style&#10;- Contains limited vocabulary, with many words used incorrectly. Demonstrates problems with sentence patterns.&#10;- Contains basic vocabulary, with words that are predictable and common. Contains mostly simple sentences (although there may be an attempt at more varied sentence patterns).&#10;- Includes vocabulary to make explanations detailed and precise. Includes varied sentence patterns, including complex sentences.&#10;+ Voice&#10;- Demonstrates language and tone that may be inappropriate to task and reader.&#10;- Demonstrates an attempt to adjust language and tone to task and reader.&#10;- Demonstrates effective adjustment of language and tone to task and reader.&#10;[rubric]&#10;[tasks]&#10;(Peer)&#10;[tasks]&#10;&#10;">
<prompt>
<h3>Censorship in the Libraries</h3>
<p>'All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us.' --Katherine Paterson, Author
</p>
<p>
Write a persuasive essay to a newspaper reflecting your views on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.
</p>
</prompt>
<rubric>
<rubric>
<category>
<description>
Ideas
</description>
<option>
Difficult for the reader to discern the main idea. Too brief or too repetitive to establish or maintain a focus.
</option>
<option>
Attempts a main idea. Sometimes loses focus or ineffectively displays focus.
</option>
<option>
Presents a unifying theme or main idea, but may include minor tangents. Stays somewhat focused on topic and task.
</option>
<option>
Presents a unifying theme or main idea without going off on tangents. Stays completely focused on topic and task.
</option>
</category>
<category>
<description>
Content
</description>
<option>
Includes little information with few or no details or unrelated details. Unsuccessful in attempts to explore any facets of the topic.
</option>
<option>
Includes little information and few or no details. Explores only one or two facets of the topic.
</option>
<option>
Includes sufficient information and supporting details. (Details may not be fully developed; ideas may be listed.) Explores some facets of the topic.
</option>
<option>
Includes in-depth information and exceptional supporting details that are fully developed. Explores all facets of the topic.
</option>
</category>
<category>
<description>
Organization
</description>
<option>
Ideas organized illogically, transitions weak, and response difficult to follow.
</option>
<option>
Attempts to logically organize ideas. Attempts to progress in an order that enhances meaning, and demonstrates use of transitions.
</option>
<option>
Ideas organized logically. Progresses in an order that enhances meaning. Includes smooth transitions.
</option>
</category>
<category>
<description>
Style
</description>
<option>
Contains limited vocabulary, with many words used incorrectly. Demonstrates problems with sentence patterns.
</option>
<option>
Contains basic vocabulary, with words that are predictable and common. Contains mostly simple sentences (although there may be an attempt at more varied sentence patterns).
</option>
<option>
Includes vocabulary to make explanations detailed and precise. Includes varied sentence patterns, including complex sentences.
</option>
</category>
<category>
<description>
Voice
</description>
<option>
Demonstrates language and tone that may be inappropriate to task and reader.
</option>
<option>
Demonstrates an attempt to adjust language and tone to task and reader.
</option>
<option>
Demonstrates effective adjustment of language and tone to task and reader.
</option>
</category>
</rubric>
</rubric>
<task>
<openended>
<openendedparam>
<initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display>
<grader_payload>{"grader_settings" : "peer_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}</grader_payload>
</openendedparam>
</openended>
</task>
</combinedopenended>

View File

@@ -1,93 +0,0 @@
<combinedopenended markdown="[prompt]&#10; &lt;h3&gt;Censorship in the Libraries&lt;/h3&gt;&#10;&#10; &lt;p&gt;'All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us.' --Katherine Paterson, Author&#10; &lt;/p&gt;&#10;&#10; &lt;p&gt;&#10; Write a persuasive essay to a newspaper reflecting your views on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.&#10; &lt;/p&gt;&#10;[prompt]&#10;[rubric]&#10;+ Ideas&#10;- Difficult for the reader to discern the main idea. Too brief or too repetitive to establish or maintain a focus.&#10;- Attempts a main idea. Sometimes loses focus or ineffectively displays focus.&#10;- Presents a unifying theme or main idea, but may include minor tangents. Stays somewhat focused on topic and task.&#10;- Presents a unifying theme or main idea without going off on tangents. Stays completely focused on topic and task.&#10;+ Content&#10;- Includes little information with few or no details or unrelated details. Unsuccessful in attempts to explore any facets of the topic.&#10;- Includes little information and few or no details. Explores only one or two facets of the topic.&#10;- Includes sufficient information and supporting details. (Details may not be fully developed; ideas may be listed.) Explores some facets of the topic.&#10;- Includes in-depth information and exceptional supporting details that are fully developed. Explores all facets of the topic.&#10;+ Organization&#10;- Ideas organized illogically, transitions weak, and response difficult to follow.&#10;- Attempts to logically organize ideas. Attempts to progress in an order that enhances meaning, and demonstrates use of transitions.&#10;- Ideas organized logically. Progresses in an order that enhances meaning. Includes smooth transitions.&#10;+ Style&#10;- Contains limited vocabulary, with many words used incorrectly. Demonstrates problems with sentence patterns.&#10;- Contains basic vocabulary, with words that are predictable and common. Contains mostly simple sentences (although there may be an attempt at more varied sentence patterns).&#10;- Includes vocabulary to make explanations detailed and precise. Includes varied sentence patterns, including complex sentences.&#10;+ Voice&#10;- Demonstrates language and tone that may be inappropriate to task and reader.&#10;- Demonstrates an attempt to adjust language and tone to task and reader.&#10;- Demonstrates effective adjustment of language and tone to task and reader.&#10;[rubric]&#10;[tasks]&#10;(Self)&#10;[tasks]&#10;&#10;">
<prompt>
<h3>Censorship in the Libraries</h3>
<p>'All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us.' --Katherine Paterson, Author
</p>
<p>
Write a persuasive essay to a newspaper reflecting your views on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.
</p>
</prompt>
<rubric>
<rubric>
<category>
<description>
Ideas
</description>
<option>
Difficult for the reader to discern the main idea. Too brief or too repetitive to establish or maintain a focus.
</option>
<option>
Attempts a main idea. Sometimes loses focus or ineffectively displays focus.
</option>
<option>
Presents a unifying theme or main idea, but may include minor tangents. Stays somewhat focused on topic and task.
</option>
<option>
Presents a unifying theme or main idea without going off on tangents. Stays completely focused on topic and task.
</option>
</category>
<category>
<description>
Content
</description>
<option>
Includes little information with few or no details or unrelated details. Unsuccessful in attempts to explore any facets of the topic.
</option>
<option>
Includes little information and few or no details. Explores only one or two facets of the topic.
</option>
<option>
Includes sufficient information and supporting details. (Details may not be fully developed; ideas may be listed.) Explores some facets of the topic.
</option>
<option>
Includes in-depth information and exceptional supporting details that are fully developed. Explores all facets of the topic.
</option>
</category>
<category>
<description>
Organization
</description>
<option>
Ideas organized illogically, transitions weak, and response difficult to follow.
</option>
<option>
Attempts to logically organize ideas. Attempts to progress in an order that enhances meaning, and demonstrates use of transitions.
</option>
<option>
Ideas organized logically. Progresses in an order that enhances meaning. Includes smooth transitions.
</option>
</category>
<category>
<description>
Style
</description>
<option>
Contains limited vocabulary, with many words used incorrectly. Demonstrates problems with sentence patterns.
</option>
<option>
Contains basic vocabulary, with words that are predictable and common. Contains mostly simple sentences (although there may be an attempt at more varied sentence patterns).
</option>
<option>
Includes vocabulary to make explanations detailed and precise. Includes varied sentence patterns, including complex sentences.
</option>
</category>
<category>
<description>
Voice
</description>
<option>
Demonstrates language and tone that may be inappropriate to task and reader.
</option>
<option>
Demonstrates an attempt to adjust language and tone to task and reader.
</option>
<option>
Demonstrates effective adjustment of language and tone to task and reader.
</option>
</category>
</rubric>
</rubric>
<task>
<selfassessment/>
</task>
</combinedopenended>

View File

@@ -1,6 +1,5 @@
<course advanced_modules="[&quot;annotatable&quot;, &quot;combinedopenended&quot;, &quot;peergrading&quot;, &quot;lti&quot;, &quot;word_cloud&quot;]" display_name="Manual Smoke Test Course 1" lti_passports="[&quot;ims:12345:secret&quot;]" pdf_textbooks="[{&quot;tab_title&quot;: &quot;An Example Paper&quot;, &quot;id&quot;: &quot;0An_Example_Paper&quot;, &quot;chapters&quot;: [{&quot;url&quot;: &quot;/static/1.pdf&quot;, &quot;title&quot;: &quot;Introduction &quot;}]}]" show_calculator="true" show_chat="true" start="2014-06-26T00:00:00Z">
<course advanced_modules="[&quot;annotatable&quot;, &quot;lti&quot;, &quot;word_cloud&quot;]" display_name="Manual Smoke Test Course 1" lti_passports="[&quot;ims:12345:secret&quot;]" pdf_textbooks="[{&quot;tab_title&quot;: &quot;An Example Paper&quot;, &quot;id&quot;: &quot;0An_Example_Paper&quot;, &quot;chapters&quot;: [{&quot;url&quot;: &quot;/static/1.pdf&quot;, &quot;title&quot;: &quot;Introduction &quot;}]}]" show_calculator="true" show_chat="true" start="2014-06-26T00:00:00Z">
<chapter url_name="a64a6f63f75d430aa71e6ce113c5b4d2"/>
<chapter url_name="d68c2861c10a4c9d92a679b4cfc0f924"/>
<chapter url_name="ab97a6dbfafd48868c36bed4c8c5391d"/>
<chapter url_name="5bb7a5ab824f460580a756a4f347377c"/>
<chapter url_name="ce2fd991d84b4a5ca75350eb8e350627"/>

View File

@@ -1,93 +0,0 @@
<combinedopenended accept_file_upload="true" markdown="[prompt]&#10; &lt;h3&gt;Censorship in the Libraries&lt;/h3&gt;&#10;&#10; &lt;p&gt;'All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us.' --Katherine Paterson, Author&#10; &lt;/p&gt;&#10;&#10; &lt;p&gt;&#10; Write a persuasive essay to a newspaper reflecting your views on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.&#10; &lt;/p&gt;&#10;[prompt]&#10;[rubric]&#10;+ Ideas&#10;- Difficult for the reader to discern the main idea. Too brief or too repetitive to establish or maintain a focus.&#10;- Attempts a main idea. Sometimes loses focus or ineffectively displays focus.&#10;- Presents a unifying theme or main idea, but may include minor tangents. Stays somewhat focused on topic and task.&#10;- Presents a unifying theme or main idea without going off on tangents. Stays completely focused on topic and task.&#10;+ Content&#10;- Includes little information with few or no details or unrelated details. Unsuccessful in attempts to explore any facets of the topic.&#10;- Includes little information and few or no details. Explores only one or two facets of the topic.&#10;- Includes sufficient information and supporting details. (Details may not be fully developed; ideas may be listed.) Explores some facets of the topic.&#10;- Includes in-depth information and exceptional supporting details that are fully developed. Explores all facets of the topic.&#10;+ Organization&#10;- Ideas organized illogically, transitions weak, and response difficult to follow.&#10;- Attempts to logically organize ideas. Attempts to progress in an order that enhances meaning, and demonstrates use of transitions.&#10;- Ideas organized logically. Progresses in an order that enhances meaning. Includes smooth transitions.&#10;+ Style&#10;- Contains limited vocabulary, with many words used incorrectly. Demonstrates problems with sentence patterns.&#10;- Contains basic vocabulary, with words that are predictable and common. Contains mostly simple sentences (although there may be an attempt at more varied sentence patterns).&#10;- Includes vocabulary to make explanations detailed and precise. Includes varied sentence patterns, including complex sentences.&#10;+ Voice&#10;- Demonstrates language and tone that may be inappropriate to task and reader.&#10;- Demonstrates an attempt to adjust language and tone to task and reader.&#10;- Demonstrates effective adjustment of language and tone to task and reader.&#10;[rubric]&#10;[tasks]&#10;(Self)&#10;[tasks]&#10;&#10;">
<prompt>
<h3>Censorship in the Libraries</h3>
<p>'All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us.' --Katherine Paterson, Author
</p>
<p>
Write a persuasive essay to a newspaper reflecting your views on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.
</p>
</prompt>
<rubric>
<rubric>
<category>
<description>
Ideas
</description>
<option>
Difficult for the reader to discern the main idea. Too brief or too repetitive to establish or maintain a focus.
</option>
<option>
Attempts a main idea. Sometimes loses focus or ineffectively displays focus.
</option>
<option>
Presents a unifying theme or main idea, but may include minor tangents. Stays somewhat focused on topic and task.
</option>
<option>
Presents a unifying theme or main idea without going off on tangents. Stays completely focused on topic and task.
</option>
</category>
<category>
<description>
Content
</description>
<option>
Includes little information with few or no details or unrelated details. Unsuccessful in attempts to explore any facets of the topic.
</option>
<option>
Includes little information and few or no details. Explores only one or two facets of the topic.
</option>
<option>
Includes sufficient information and supporting details. (Details may not be fully developed; ideas may be listed.) Explores some facets of the topic.
</option>
<option>
Includes in-depth information and exceptional supporting details that are fully developed. Explores all facets of the topic.
</option>
</category>
<category>
<description>
Organization
</description>
<option>
Ideas organized illogically, transitions weak, and response difficult to follow.
</option>
<option>
Attempts to logically organize ideas. Attempts to progress in an order that enhances meaning, and demonstrates use of transitions.
</option>
<option>
Ideas organized logically. Progresses in an order that enhances meaning. Includes smooth transitions.
</option>
</category>
<category>
<description>
Style
</description>
<option>
Contains limited vocabulary, with many words used incorrectly. Demonstrates problems with sentence patterns.
</option>
<option>
Contains basic vocabulary, with words that are predictable and common. Contains mostly simple sentences (although there may be an attempt at more varied sentence patterns).
</option>
<option>
Includes vocabulary to make explanations detailed and precise. Includes varied sentence patterns, including complex sentences.
</option>
</category>
<category>
<description>
Voice
</description>
<option>
Demonstrates language and tone that may be inappropriate to task and reader.
</option>
<option>
Demonstrates an attempt to adjust language and tone to task and reader.
</option>
<option>
Demonstrates effective adjustment of language and tone to task and reader.
</option>
</category>
</rubric>
</rubric>
<task>
<selfassessment/>
</task>
</combinedopenended>

View File

@@ -1,3 +0,0 @@
<vertical display_name="Self Assessment" parent_sequential_url="i4x://ManTestX/ManTest1/sequential/b7ebe0f048e9466e9ef32e7815fb5a93" index_in_children_list="0">
<combinedopenended url_name="ecfe4fa774ff48d089ae84daa1f6cc75"/>
</vertical>

View File

@@ -1 +1 @@
{"course/2014": {"advanced_modules": ["annotatable", "combinedopenended", "peergrading", "lti", "word_cloud"], "show_calculator": true, "display_name": "Manual Smoke Test Course 1", "tabs": [{"type": "courseware", "name": "Courseware"}, {"type": "course_info", "name": "Course Info"}, {"type": "textbooks", "name": "Textbooks"}, {"type": "discussion", "name": "Discussion"}, {"type": "wiki", "name": "Wiki"}, {"type": "progress", "name": "Progress"}, {"type": "pdf_textbooks", "name": "Textbooks"}, {"type": "open_ended", "name": "Open Ended Panel"}], "discussion_topics": {"General": {"id": "i4x-ManTestX-ManTest1-course-2014"}}, "start": "2014-06-26T00:00:00Z", "pdf_textbooks": [{"tab_title": "An Example Paper", "id": "0An_Example_Paper", "chapters": [{"url": "/static/1.pdf", "title": "Introduction "}]}], "lti_passports": ["ims:12345:secret"]}}
{"course/2014": {"advanced_modules": ["annotatable", "lti", "word_cloud"], "show_calculator": true, "display_name": "Manual Smoke Test Course 1", "tabs": [{"type": "courseware", "name": "Courseware"}, {"type": "course_info", "name": "Course Info"}, {"type": "textbooks", "name": "Textbooks"}, {"type": "discussion", "name": "Discussion"}, {"type": "wiki", "name": "Wiki"}, {"type": "progress", "name": "Progress"}, {"type": "pdf_textbooks", "name": "Textbooks"}], "discussion_topics": {"General": {"id": "i4x-ManTestX-ManTest1-course-2014"}}, "start": "2014-06-26T00:00:00Z", "pdf_textbooks": [{"tab_title": "An Example Paper", "id": "0An_Example_Paper", "chapters": [{"url": "/static/1.pdf", "title": "Introduction "}]}], "lti_passports": ["ims:12345:secret"]}}

View File

@@ -1,3 +0,0 @@
<sequential display_name="New Subsection 2.5">
<vertical url_name="4502126328484ed58c87e7ba3b0fa21d"/>
</sequential>

View File

@@ -1,3 +0,0 @@
<sequential display_name="New Subsection 2.2">
<vertical url_name="e34798bf546a4178ab76afe3a5f729af"/>
</sequential>

View File

@@ -1,3 +0,0 @@
<sequential display_name="New Subsection 2.1">
<vertical url_name="5887a034ad17480393c5ebca4b8fd1d4"/>
</sequential>

View File

@@ -1,3 +0,0 @@
<vertical display_name="File Uploads">
<combinedopenended url_name="3b04d935c8d945c3900708279fb24892"/>
</vertical>

View File

@@ -1,3 +0,0 @@
<vertical display_name="Self Assessment">
<combinedopenended url_name="ecfe4fa774ff48d089ae84daa1f6cc75"/>
</vertical>

View File

@@ -1,3 +0,0 @@
<vertical display_name="Peer Assessment">
<combinedopenended url_name="b3aa2db471a9412fbc96302f2e5ea983"/>
</vertical>

View File

@@ -1 +0,0 @@
This is a very very simple course, useful for debugging open ended grading code.

View File

@@ -1,33 +0,0 @@
<combinedopenended attempts="10000" display_name = "Humanities Question -- Machine Assessed">
<rubric>
<rubric>
<category>
<description>Writing Applications</description>
<option> The essay loses focus, has little information or supporting details, and the organization makes it difficult to follow.</option>
<option> The essay presents a mostly unified theme, includes sufficient information to convey the theme, and is generally organized well.</option>
</category>
<category>
<description> Language Conventions </description>
<option> The essay demonstrates a reasonable command of proper spelling and grammar. </option>
<option> The essay demonstrates superior command of proper spelling and grammar.</option>
</category>
</rubric>
</rubric>
<prompt>
<h4>Censorship in the Libraries</h4>
<p>"All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us." --Katherine Paterson, Author</p>
<p>Write a persuasive essay to a newspaper reflecting your views on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.</p>
</prompt>
<task>
<selfassessment/>
</task>
<task>
<openended min_score_to_attempt="2" max_score_to_attempt="3">
<openendedparam>
<initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display>
<grader_payload>{"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}</grader_payload>
</openendedparam>
</openended>
</task>
</combinedopenended>

View File

@@ -1,24 +0,0 @@
<combinedopenended attempts="1" display_name = "Humanities Question -- Machine Assessed">
<rubric>
<rubric>
<category>
<description>Writing Applications</description>
<option> The essay loses focus, has little information or supporting details, and the organization makes it difficult to follow.</option>
<option> The essay presents a mostly unified theme, includes sufficient information to convey the theme, and is generally organized well.</option>
</category>
<category>
<description> Language Conventions </description>
<option> The essay demonstrates a reasonable command of proper spelling and grammar. </option>
<option> The essay demonstrates superior command of proper spelling and grammar.</option>
</category>
</rubric>
</rubric>
<prompt>
<h4>Censorship in the Libraries</h4>
<p>"All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us." --Katherine Paterson, Author</p>
<p>Write a persuasive essay to a newspaper reflecting your views on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.</p>
</prompt>
<task>
<selfassessment/>
</task>
</combinedopenended>

View File

@@ -1,24 +0,0 @@
<combinedopenended attempts="1" display_name = "Humanities Question -- Machine Assessed" accept_file_upload="True">
<rubric>
<rubric>
<category>
<description>Writing Applications</description>
<option> The essay loses focus, has little information or supporting details, and the organization makes it difficult to follow.</option>
<option> The essay presents a mostly unified theme, includes sufficient information to convey the theme, and is generally organized well.</option>
</category>
<category>
<description> Language Conventions </description>
<option> The essay demonstrates a reasonable command of proper spelling and grammar. </option>
<option> The essay demonstrates superior command of proper spelling and grammar.</option>
</category>
</rubric>
</rubric>
<prompt>
<h4>Censorship in the Libraries</h4>
<p>"All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us." --Katherine Paterson, Author</p>
<p>Write a persuasive essay to a newspaper reflecting your views on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.</p>
</prompt>
<task>
<selfassessment/>
</task>
</combinedopenended>

View File

@@ -1 +0,0 @@
<course org="edX" course="open_ended" url_name="2012_Fall"/>

View File

@@ -1,10 +0,0 @@
<course>
<chapter url_name="Overview">
<combinedopenended url_name="SampleQuestion"/>
<combinedopenended url_name="SampleQuestion1Attempt"/>
<combinedopenended url_name="SampleQuestionImageUpload"/>
<peergrading url_name="PeerGradingSample"/>
<peergrading url_name="PeerGradingScored"/>
<peergrading url_name="PeerGradingLinked"/>
</chapter>
</course>

View File

@@ -1 +0,0 @@
<peergrading is_graded="True" max_grade="1" use_for_single_location="True" link_to_location="i4x://edX/open_ended/combinedopenended/SampleQuestion"/>

View File

@@ -1 +0,0 @@
<peergrading/>

View File

@@ -1 +0,0 @@
<peergrading is_graded="True" max_grade="1" use_for_single_location="False" link_to_location="i4x://edX/open_ended/combinedopenended/SampleQuestion"/>

View File

@@ -1,17 +0,0 @@
{
"course/2012_Fall": {
"graceperiod": "2 days 5 hours 59 minutes 59 seconds",
"start": "2015-07-17T12:00",
"display_name": "Self Assessment Test",
"graded": "true"
},
"chapter/Overview": {
"display_name": "Overview"
},
"combinedopenended/SampleQuestion": {
"display_name": "Sample Question"
},
"peergrading/PeerGradingSample": {
"display_name": "Sample Question"
}
}

View File

@@ -1 +0,0 @@
<course org="edX" course="sa_test" url_name="2012_Fall"/>

View File

@@ -1 +0,0 @@
This is a very very simple course, useful for debugging open ended grading code. This is specifically for testing if a peer grading module with no path to it in the course will be handled properly.

View File

@@ -1 +0,0 @@
<course org="edX" course="open_ended_nopath" url_name="2012_Fall"/>

View File

@@ -1,4 +0,0 @@
<course>
<chapter url_name="Overview">
</chapter>
</course>

View File

@@ -1,11 +0,0 @@
{
"course/2012_Fall": {
"graceperiod": "2 days 5 hours 59 minutes 59 seconds",
"start": "2015-07-17T12:00",
"display_name": "Self Assessment Test",
"graded": "true"
},
"chapter/Overview": {
"display_name": "Overview"
}
}

View File

@@ -1 +0,0 @@
<peergrading display_name = "Peer Grading" use_for_single_location="False" is_graded="False"/>

View File

@@ -1,37 +0,0 @@
Feature: LMS.Open ended grading
As a student in an edX course
In order to complete the courseware questions
I want the machine learning grading to be functional
# Commenting these all out right now until we can
# make a reference implementation for a course with
# an open ended grading problem that is always available
#
# Scenario: An answer that is too short is rejected
# Given I navigate to an openended question
# And I enter the answer "z"
# When I press the "Check" button
# And I wait for "8" seconds
# And I see the grader status "Submitted for grading"
# And I press the "Recheck for Feedback" button
# Then I see the red X
# And I see the grader score "0"
# Scenario: An answer with too many spelling errors is rejected
# Given I navigate to an openended question
# And I enter the answer "az"
# When I press the "Check" button
# And I wait for "8" seconds
# And I see the grader status "Submitted for grading"
# And I press the "Recheck for Feedback" button
# Then I see the red X
# And I see the grader score "0"
# When I click the link for full output
# Then I see the spelling grading message "More spelling errors than average."
# Scenario: An answer makes its way to the instructor dashboard
# Given I navigate to an openended question as staff
# When I submit the answer "I love Chemistry."
# And I wait for "8" seconds
# And I visit the staff grading page
# Then my answer is queued for instructor grading

View File

@@ -1,93 +0,0 @@
# pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
from lettuce import world, step
from lettuce.django import django_url
from nose.tools import assert_equals, assert_in # pylint: disable=no-name-in-module
from logging import getLogger
logger = getLogger(__name__)
@step('I navigate to an openended question$')
def navigate_to_an_openended_question(step):
world.register_by_course_key('MITx/3.091x/2012_Fall')
world.log_in(email='robot@edx.org', password='test')
problem = '/courses/MITx/3.091x/2012_Fall/courseware/Week_10/Polymer_Synthesis/'
world.browser.visit(django_url(problem))
tab_css = 'ol#sequence-list > li > a[data-element="5"]'
world.css_click(tab_css)
@step('I navigate to an openended question as staff$')
def navigate_to_an_openended_question_as_staff(step):
world.register_by_course_key('MITx/3.091x/2012_Fall', True)
world.log_in(email='robot@edx.org', password='test')
problem = '/courses/MITx/3.091x/2012_Fall/courseware/Week_10/Polymer_Synthesis/'
world.browser.visit(django_url(problem))
tab_css = 'ol#sequence-list > li > a[data-element="5"]'
world.css_click(tab_css)
@step(u'I enter the answer "([^"]*)"$')
def enter_the_answer_text(step, text):
world.css_fill('textarea', text)
@step(u'I submit the answer "([^"]*)"$')
def i_submit_the_answer_text(step, text):
world.css_fill('textarea', text)
world.css_click('input.check')
@step('I click the link for full output$')
def click_full_output_link(step):
world.css_click('a.full')
@step(u'I visit the staff grading page$')
def i_visit_the_staff_grading_page(step):
world.click_link('Instructor')
world.click_link('Staff grading')
@step(u'I see the grader message "([^"]*)"$')
def see_grader_message(step, msg):
message_css = 'div.external-grader-message'
assert_in(msg, world.css_text(message_css))
@step(u'I see the grader status "([^"]*)"$')
def see_the_grader_status(step, status):
status_css = 'div.grader-status'
assert_equals(status, world.css_text(status_css))
@step('I see the red X$')
def see_the_red_x(step):
assert world.is_css_present('div.grader-status > span.incorrect')
@step(u'I see the grader score "([^"]*)"$')
def see_the_grader_score(step, score):
score_css = 'div.result-output > p'
score_text = world.css_text(score_css)
assert_equals(score_text, 'Score: %s' % score)
@step('I see the link for full output$')
def see_full_output_link(step):
assert world.is_css_present('a.full')
@step('I see the spelling grading message "([^"]*)"$')
def see_spelling_msg(step, msg):
spelling_msg = world.css_text('div.spelling')
assert_equals('Spelling: %s' % msg, spelling_msg)
@step(u'my answer is queued for instructor grading$')
def answer_is_queued_for_instructor_grading(step):
list_css = 'ul.problem-list > li > a'
actual_msg = world.css_text(list_css)
expected_msg = "(0 graded, 1 pending)"
assert_in(expected_msg, actual_msg)

View File

@@ -24,11 +24,10 @@ from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.xml_importer import import_course_from_xml
DATA_DIR = settings.COMMON_TEST_DATA_ROOT
XML_COURSE_DIRS = ['toy', 'simple', 'open_ended']
XML_COURSE_DIRS = ['toy', 'simple']
MAPPINGS = {
'edX/toy/2012_Fall': 'xml',
'edX/simple/2012_Fall': 'xml',
'edX/open_ended/2012_Fall': 'xml',
}
TEST_DATA_MIXED_XML_MODULESTORE = mixed_store_config(
@@ -92,7 +91,7 @@ class CommandsTestBase(ModuleStoreTestCase):
self.assertEqual(course_ids, dumped_ids)
def test_correct_course_structure_metadata(self):
course_id = unicode(modulestore().make_course_key('edX', 'open_ended', '2012_Fall'))
course_id = unicode(modulestore().make_course_key('edX', 'simple', '2012_Fall'))
args = [course_id]
kwargs = {'modulestore': 'default'}

View File

@@ -412,31 +412,6 @@ def get_module_system_for_user(user, student_data, # TODO # pylint: disable=to
'waittime': settings.XQUEUE_WAITTIME_BETWEEN_REQUESTS
}
# This is a hacky way to pass settings to the combined open ended xmodule
# It needs an S3 interface to upload images to S3
# It needs the open ended grading interface in order to get peer grading to be done
# this first checks to see if the descriptor is the correct one, and only sends settings if it is
# Get descriptor metadata fields indicating needs for various settings
needs_open_ended_interface = getattr(descriptor, "needs_open_ended_interface", False)
needs_s3_interface = getattr(descriptor, "needs_s3_interface", False)
# Initialize interfaces to None
open_ended_grading_interface = None
s3_interface = None
# Create interfaces if needed
if needs_open_ended_interface:
open_ended_grading_interface = settings.OPEN_ENDED_GRADING_INTERFACE
open_ended_grading_interface['mock_peer_grading'] = settings.MOCK_PEER_GRADING
open_ended_grading_interface['mock_staff_grading'] = settings.MOCK_STAFF_GRADING
if needs_s3_interface:
s3_interface = {
'access_key': getattr(settings, 'AWS_ACCESS_KEY_ID', ''),
'secret_access_key': getattr(settings, 'AWS_SECRET_ACCESS_KEY', ''),
'storage_bucket_name': getattr(settings, 'AWS_STORAGE_BUCKET_NAME', 'openended')
}
def inner_get_module(descriptor):
"""
Delegate to get_module_for_descriptor_internal() with all values except `descriptor` set.
@@ -725,8 +700,6 @@ def get_module_system_for_user(user, student_data, # TODO # pylint: disable=to
publish=publish,
anonymous_student_id=anonymous_student_id,
course_id=course_id,
open_ended_grading_interface=open_ended_grading_interface,
s3_interface=s3_interface,
cache=cache,
can_execute_unsafe_code=(lambda: can_execute_unsafe_code(course_id)),
get_python_lib_zip=(lambda: get_python_lib_zip(contentstore, course_id)),

View File

@@ -2055,13 +2055,13 @@ class TestDisabledXBlockTypes(ModuleStoreTestCase):
super(TestDisabledXBlockTypes, self).setUp()
for store in self.store.modulestores:
store.disabled_xblock_types = ('combinedopenended', 'peergrading', 'video')
store.disabled_xblock_types = ('video',)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_get_item(self, default_ms):
with self.store.default_store(default_ms):
course = CourseFactory()
for block_type in ('peergrading', 'combinedopenended', 'video'):
for block_type in ('video',):
item = ItemFactory(category=block_type, parent=course)
item = self.store.get_item(item.scope_ids.usage_id)
self.assertEqual(item.__class__.__name__, 'RawDescriptorWithMixins')

View File

@@ -67,8 +67,6 @@ from .entrance_exams import (
from courseware.user_state_client import DjangoXBlockUserStateClient
from course_modes.models import CourseMode
from open_ended_grading import open_ended_notifications
from open_ended_grading.views import StaffGradingTab, PeerGradingTab, OpenEndedGradingTab
from student.models import UserTestGroup, CourseEnrollment
from student.views import is_course_blocked
from util.cache import cache, cache_if_anonymous
@@ -1126,25 +1124,6 @@ def submission_history(request, course_id, student_username, location):
return render_to_response('courseware/submission_history.html', context)
def notification_image_for_tab(course_tab, user, course):
"""
Returns the notification image path for the given course_tab if applicable, otherwise None.
"""
tab_notification_handlers = {
StaffGradingTab.type: open_ended_notifications.staff_grading_notifications,
PeerGradingTab.type: open_ended_notifications.peer_grading_notifications,
OpenEndedGradingTab.type: open_ended_notifications.combined_notifications
}
if course_tab.name in tab_notification_handlers:
notifications = tab_notification_handlers[course_tab.name](course, user)
if notifications and notifications['pending_grading']:
return notifications['img_path']
return None
def get_static_tab_contents(request, course, tab):
"""
Returns the contents for the given static tab

View File

@@ -1,133 +0,0 @@
"""
Command to manually re-post open ended submissions to the grader.
"""
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand
from optparse import make_option
from xmodule.modulestore.django import modulestore
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.open_ended_grading_classes.openendedchild import OpenEndedChild
from xmodule.open_ended_grading_classes.open_ended_module import OpenEndedModule
from courseware.courses import get_course
from instructor.utils import get_module_for_student
class Command(BaseCommand):
"""
Command to manually re-post open ended submissions to the grader.
"""
help = ("Usage: openended_post <course_id> <problem_location> <student_ids.txt> <hostname> --dry-run --task-number=<task_number>\n"
"The text file should contain a User.id in each line.")
option_list = BaseCommand.option_list + (
make_option('-n', '--dry-run',
action='store_true', dest='dry_run', default=False,
help="Do everything except send the submission to the grader. "),
make_option('--task-number',
type='int', default=0,
help="Task number that needs to be submitted."),
)
def handle(self, *args, **options):
dry_run = options['dry_run']
task_number = options['task_number']
if len(args) == 4:
course_id = SlashSeparatedCourseKey.from_deprecated_string(args[0])
location = course_id.make_usage_key_from_deprecated_string(args[1])
students_ids = [line.strip() for line in open(args[2])]
hostname = args[3]
else:
print self.help
return
try:
course = get_course(course_id)
except ValueError as err:
print err
return
descriptor = modulestore().get_item(location, depth=0)
if descriptor is None:
print "Location not found in course"
return
if dry_run:
print "Doing a dry run."
students = User.objects.filter(id__in=students_ids).order_by('username')
print "Number of students: {0}".format(students.count())
for student in students:
post_submission_for_student(student, course, location, task_number, dry_run=dry_run, hostname=hostname)
def post_submission_for_student(student, course, location, task_number, dry_run=True, hostname=None):
"""If the student's task child_state is ASSESSING post submission to grader."""
print "{0}:{1}".format(student.id, student.username)
request = DummyRequest()
request.user = student
request.host = hostname
try:
module = get_module_for_student(student, location, request=request, course=course)
if module is None:
print " WARNING: No state found."
return False
latest_task = module.child_module.get_task_number(task_number)
if latest_task is None:
print " WARNING: No task state found."
return False
if not isinstance(latest_task, OpenEndedModule):
print " ERROR: Not an OpenEndedModule task."
return False
latest_task_state = latest_task.child_state
if latest_task_state == OpenEndedChild.INITIAL:
print " WARNING: No submission."
elif latest_task_state == OpenEndedChild.POST_ASSESSMENT or latest_task_state == OpenEndedChild.DONE:
print " WARNING: Submission already graded."
elif latest_task_state == OpenEndedChild.ASSESSING:
latest_answer = latest_task.latest_answer()
if dry_run:
print " Skipped sending submission to grader: {0!r}".format(latest_answer[:100].encode('utf-8'))
else:
latest_task.send_to_grader(latest_answer, latest_task.system)
print " Sent submission to grader: {0!r}".format(latest_answer[:100].encode('utf-8'))
return True
else:
print "WARNING: Invalid task_state: {0}".format(latest_task_state)
except Exception as err: # pylint: disable=broad-except
print err
return False
class DummyRequest(object):
"""Dummy request"""
META = {}
def __init__(self):
self.session = {}
self.user = None
self.host = None
self.secure = True
def get_host(self):
"""Return a default host."""
return self.host
def is_secure(self):
"""Always secure."""
return self.secure

View File

@@ -1,136 +0,0 @@
"""
Command to get statistics about open ended problems.
"""
import csv
import time
from django.core.management.base import BaseCommand
from optparse import make_option
from xmodule.modulestore.django import modulestore
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.open_ended_grading_classes.openendedchild import OpenEndedChild
from courseware.courses import get_course
from courseware.models import StudentModule
from student.models import anonymous_id_for_user, CourseEnrollment
from instructor.utils import get_module_for_student
class Command(BaseCommand):
"""
Command to get statistics about open ended problems.
"""
help = "Usage: openended_stats <course_id> <problem_location> --task-number=<task_number>\n"
option_list = BaseCommand.option_list + (
make_option('--task-number',
type='int', default=0,
help="Task number to get statistics about."),
)
def handle(self, *args, **options):
"""Handler for command."""
task_number = options['task_number']
if len(args) == 2:
course_id = SlashSeparatedCourseKey.from_deprecated_string(args[0])
usage_key = course_id.make_usage_key_from_deprecated_string(args[1])
else:
print self.help
return
try:
course = get_course(course_id)
except ValueError as err:
print err
return
descriptor = modulestore().get_item(usage_key, depth=0)
if descriptor is None:
print "Location {0} not found in course".format(usage_key)
return
try:
enrolled_students = CourseEnrollment.objects.users_enrolled_in(course_id)
print "Total students enrolled in {0}: {1}".format(course_id, enrolled_students.count())
calculate_task_statistics(enrolled_students, course, usage_key, task_number)
except KeyboardInterrupt:
print "\nOperation Cancelled"
def calculate_task_statistics(students, course, location, task_number, write_to_file=True):
"""Print stats of students."""
stats = {
OpenEndedChild.INITIAL: 0,
OpenEndedChild.ASSESSING: 0,
OpenEndedChild.POST_ASSESSMENT: 0,
OpenEndedChild.DONE: 0
}
students_with_saved_answers = []
students_with_ungraded_submissions = [] # pylint: disable=invalid-name
students_with_graded_submissions = [] # pylint: disable=invalid-name
students_with_no_state = []
student_modules = StudentModule.objects.filter(module_state_key=location, student__in=students).order_by('student')
print "Total student modules: {0}".format(student_modules.count())
for index, student_module in enumerate(student_modules):
if index % 100 == 0:
print "--- {0} students processed ---".format(index)
student = student_module.student
print "{0}:{1}".format(student.id, student.username)
module = get_module_for_student(student, location, course=course)
if module is None:
print " WARNING: No state found"
students_with_no_state.append(student)
continue
latest_task = module.child_module.get_task_number(task_number)
if latest_task is None:
print " No task state found"
students_with_no_state.append(student)
continue
task_state = latest_task.child_state
stats[task_state] += 1
print " State: {0}".format(task_state)
if task_state == OpenEndedChild.INITIAL:
if latest_task.stored_answer is not None:
students_with_saved_answers.append(student)
elif task_state == OpenEndedChild.ASSESSING:
students_with_ungraded_submissions.append(student)
elif task_state == OpenEndedChild.POST_ASSESSMENT or task_state == OpenEndedChild.DONE:
students_with_graded_submissions.append(student)
print "----------------------------------"
print "Time: {0}".format(time.strftime("%Y %b %d %H:%M:%S +0000", time.gmtime()))
print "Course: {0}".format(course.id)
print "Location: {0}".format(location)
print "No state: {0}".format(len(students_with_no_state))
print "Initial State: {0}".format(stats[OpenEndedChild.INITIAL] - len(students_with_saved_answers))
print "Saved answers: {0}".format(len(students_with_saved_answers))
print "Submitted answers: {0}".format(stats[OpenEndedChild.ASSESSING])
print "Received grades: {0}".format(stats[OpenEndedChild.POST_ASSESSMENT] + stats[OpenEndedChild.DONE])
print "----------------------------------"
if write_to_file:
filename = "stats.{0}.{1}".format(location.course, location.name)
time_stamp = time.strftime("%Y%m%d-%H%M%S")
with open('{0}.{1}.csv'.format(filename, time_stamp), 'wb') as csv_file:
writer = csv.writer(csv_file, delimiter=' ', quoting=csv.QUOTE_MINIMAL)
for student in students_with_ungraded_submissions:
writer.writerow(("ungraded", student.id, anonymous_id_for_user(student, None), student.username))
for student in students_with_graded_submissions:
writer.writerow(("graded", student.id, anonymous_id_for_user(student, None), student.username))
return stats

View File

@@ -1,192 +0,0 @@
"""Test the openended_post management command."""
from datetime import datetime
import json
from mock import patch
from pytz import UTC
from django.conf import settings
from opaque_keys.edx.locations import Location
import capa.xqueue_interface as xqueue_interface
from courseware.courses import get_course_with_access
from courseware.tests.factories import StudentModuleFactory, UserFactory
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.xml_importer import import_course_from_xml
from xmodule.open_ended_grading_classes.openendedchild import OpenEndedChild
from xmodule.tests.test_util_open_ended import (
STATE_INITIAL, STATE_ACCESSING, STATE_POST_ASSESSMENT
)
from student.models import anonymous_id_for_user
from instructor.management.commands.openended_post import post_submission_for_student
from instructor.management.commands.openended_stats import calculate_task_statistics
from instructor.utils import get_module_for_student
TEST_DATA_DIR = settings.COMMON_TEST_DATA_ROOT
class OpenEndedPostTest(ModuleStoreTestCase):
"""Test the openended_post management command."""
def setUp(self):
super(OpenEndedPostTest, self).setUp()
self.user = UserFactory()
store = modulestore()
course_items = import_course_from_xml(store, self.user.id, TEST_DATA_DIR, ['open_ended']) # pylint: disable=maybe-no-member
self.course = course_items[0]
self.course_id = self.course.id
self.problem_location = Location("edX", "open_ended", "2012_Fall", "combinedopenended", "SampleQuestion")
self.self_assessment_task_number = 0
self.open_ended_task_number = 1
self.student_on_initial = UserFactory()
self.student_on_accessing = UserFactory()
self.student_on_post_assessment = UserFactory()
StudentModuleFactory.create(
course_id=self.course_id,
module_state_key=self.problem_location,
student=self.student_on_initial,
grade=0,
max_grade=1,
state=STATE_INITIAL
)
StudentModuleFactory.create(
course_id=self.course_id,
module_state_key=self.problem_location,
student=self.student_on_accessing,
grade=0,
max_grade=1,
state=STATE_ACCESSING
)
StudentModuleFactory.create(
course_id=self.course_id,
module_state_key=self.problem_location,
student=self.student_on_post_assessment,
grade=0,
max_grade=1,
state=STATE_POST_ASSESSMENT
)
def test_post_submission_for_student_on_initial(self):
course = get_course_with_access(self.student_on_initial, 'load', self.course_id)
dry_run_result = post_submission_for_student(self.student_on_initial, course, self.problem_location, self.open_ended_task_number, dry_run=True)
self.assertFalse(dry_run_result)
result = post_submission_for_student(self.student_on_initial, course, self.problem_location, self.open_ended_task_number, dry_run=False)
self.assertFalse(result)
def test_post_submission_for_student_on_accessing(self):
course = get_course_with_access(self.student_on_accessing, 'load', self.course_id)
dry_run_result = post_submission_for_student(self.student_on_accessing, course, self.problem_location, self.open_ended_task_number, dry_run=True)
self.assertFalse(dry_run_result)
with patch('capa.xqueue_interface.XQueueInterface.send_to_queue') as mock_send_to_queue:
mock_send_to_queue.return_value = (0, "Successfully queued")
module = get_module_for_student(self.student_on_accessing, self.problem_location)
module.child_module.get_task_number(self.open_ended_task_number)
student_response = "Here is an answer."
student_anonymous_id = anonymous_id_for_user(self.student_on_accessing, None)
submission_time = datetime.strftime(datetime.now(UTC), xqueue_interface.dateformat)
result = post_submission_for_student(self.student_on_accessing, course, self.problem_location, self.open_ended_task_number, dry_run=False)
self.assertTrue(result)
mock_send_to_queue_body_arg = json.loads(mock_send_to_queue.call_args[1]['body'])
self.assertEqual(mock_send_to_queue_body_arg['max_score'], 2)
self.assertEqual(mock_send_to_queue_body_arg['student_response'], student_response)
body_arg_student_info = json.loads(mock_send_to_queue_body_arg['student_info'])
self.assertEqual(body_arg_student_info['anonymous_student_id'], student_anonymous_id)
self.assertGreaterEqual(body_arg_student_info['submission_time'], submission_time)
def test_post_submission_for_student_on_post_assessment(self):
course = get_course_with_access(self.student_on_post_assessment, 'load', self.course_id)
dry_run_result = post_submission_for_student(self.student_on_post_assessment, course, self.problem_location, self.open_ended_task_number, dry_run=True)
self.assertFalse(dry_run_result)
result = post_submission_for_student(self.student_on_post_assessment, course, self.problem_location, self.open_ended_task_number, dry_run=False)
self.assertFalse(result)
def test_post_submission_for_student_invalid_task(self):
course = get_course_with_access(self.student_on_accessing, 'load', self.course_id)
result = post_submission_for_student(self.student_on_accessing, course, self.problem_location, self.self_assessment_task_number, dry_run=False)
self.assertFalse(result)
out_of_bounds_task_number = 3
result = post_submission_for_student(self.student_on_accessing, course, self.problem_location, out_of_bounds_task_number, dry_run=False)
self.assertFalse(result)
class OpenEndedStatsTest(ModuleStoreTestCase):
"""Test the openended_stats management command."""
def setUp(self):
super(OpenEndedStatsTest, self).setUp()
self.user = UserFactory()
store = modulestore()
course_items = import_course_from_xml(store, self.user.id, TEST_DATA_DIR, ['open_ended']) # pylint: disable=maybe-no-member
self.course = course_items[0]
self.course_id = self.course.id
self.problem_location = Location("edX", "open_ended", "2012_Fall", "combinedopenended", "SampleQuestion")
self.task_number = 1
self.invalid_task_number = 3
self.student_on_initial = UserFactory()
self.student_on_accessing = UserFactory()
self.student_on_post_assessment = UserFactory()
StudentModuleFactory.create(
course_id=self.course_id,
module_state_key=self.problem_location,
student=self.student_on_initial,
grade=0,
max_grade=1,
state=STATE_INITIAL
)
StudentModuleFactory.create(
course_id=self.course_id,
module_state_key=self.problem_location,
student=self.student_on_accessing,
grade=0,
max_grade=1,
state=STATE_ACCESSING
)
StudentModuleFactory.create(
course_id=self.course_id,
module_state_key=self.problem_location,
student=self.student_on_post_assessment,
grade=0,
max_grade=1,
state=STATE_POST_ASSESSMENT
)
self.students = [self.student_on_initial, self.student_on_accessing, self.student_on_post_assessment]
def test_calculate_task_statistics(self):
course = get_course_with_access(self.student_on_accessing, 'load', self.course_id)
stats = calculate_task_statistics(self.students, course, self.problem_location, self.task_number, write_to_file=False)
self.assertEqual(stats[OpenEndedChild.INITIAL], 1)
self.assertEqual(stats[OpenEndedChild.ASSESSING], 1)
self.assertEqual(stats[OpenEndedChild.POST_ASSESSMENT], 1)
self.assertEqual(stats[OpenEndedChild.DONE], 0)
stats = calculate_task_statistics(self.students, course, self.problem_location, self.invalid_task_number, write_to_file=False)
self.assertEqual(stats[OpenEndedChild.INITIAL], 0)
self.assertEqual(stats[OpenEndedChild.ASSESSING], 0)
self.assertEqual(stats[OpenEndedChild.POST_ASSESSMENT], 0)
self.assertEqual(stats[OpenEndedChild.DONE], 0)

Some files were not shown because too many files have changed in this diff Show More