diff --git a/cms/djangoapps/contentstore/tests/test_contentstore.py b/cms/djangoapps/contentstore/tests/test_contentstore.py index 828a09f53d..5b48ec8a81 100644 --- a/cms/djangoapps/contentstore/tests/test_contentstore.py +++ b/cms/djangoapps/contentstore/tests/test_contentstore.py @@ -698,7 +698,7 @@ class MiscCourseTests(ContentStoreTestCase): self.check_components_on_page( ADVANCED_COMPONENT_TYPES, ['Word cloud', 'Annotation', 'Text Annotation', 'Video Annotation', 'Image Annotation', - 'Open Response Assessment', 'Peer Grading Interface', 'split_test'], + 'split_test'], ) @ddt.data('/Fake/asset/displayname', '\\Fake\\asset\\displayname') diff --git a/cms/djangoapps/contentstore/tests/test_course_settings.py b/cms/djangoapps/contentstore/tests/test_course_settings.py index 53af73fdb0..f880df7b27 100644 --- a/cms/djangoapps/contentstore/tests/test_course_settings.py +++ b/cms/djangoapps/contentstore/tests/test_course_settings.py @@ -771,7 +771,7 @@ class CourseMetadataEditingTest(CourseTestCase): { "advertised_start": {"value": "start A"}, "days_early_for_beta": {"value": 2}, - "advanced_modules": {"value": ['combinedopenended']}, + "advanced_modules": {"value": ['notes']}, }, user=self.user ) @@ -781,7 +781,7 @@ class CourseMetadataEditingTest(CourseTestCase): # Tab gets tested in test_advanced_settings_munge_tabs self.assertIn('advanced_modules', test_model, 'Missing advanced_modules') - self.assertEqual(test_model['advanced_modules']['value'], ['combinedopenended'], 'advanced_module is not updated') + self.assertEqual(test_model['advanced_modules']['value'], ['notes'], 'advanced_module is not updated') def test_validate_from_json_wrong_inputs(self): # input incorrectly formatted data @@ -905,48 +905,21 @@ class CourseMetadataEditingTest(CourseTestCase): """ Test that adding and removing specific advanced components adds and removes tabs. """ - open_ended_tab = {"type": "open_ended", "name": "Open Ended Panel"} - peer_grading_tab = {"type": "peer_grading", "name": "Peer grading"} - # First ensure that none of the tabs are visible - self.assertNotIn(open_ended_tab, self.course.tabs) - self.assertNotIn(peer_grading_tab, self.course.tabs) self.assertNotIn(self.notes_tab, self.course.tabs) - # Now add the "combinedopenended" component and verify that the tab has been added - self.client.ajax_post(self.course_setting_url, { - ADVANCED_COMPONENT_POLICY_KEY: {"value": ["combinedopenended"]} - }) - course = modulestore().get_course(self.course.id) - self.assertIn(open_ended_tab, course.tabs) - self.assertIn(peer_grading_tab, course.tabs) - self.assertNotIn(self.notes_tab, course.tabs) - - # Now enable student notes and verify that the "My Notes" tab has also been added - self.client.ajax_post(self.course_setting_url, { - ADVANCED_COMPONENT_POLICY_KEY: {"value": ["combinedopenended", "notes"]} - }) - course = modulestore().get_course(self.course.id) - self.assertIn(open_ended_tab, course.tabs) - self.assertIn(peer_grading_tab, course.tabs) - self.assertIn(self.notes_tab, course.tabs) - - # Now remove the "combinedopenended" component and verify that the tab is gone + # Now enable student notes and verify that the "My Notes" tab has been added self.client.ajax_post(self.course_setting_url, { ADVANCED_COMPONENT_POLICY_KEY: {"value": ["notes"]} }) course = modulestore().get_course(self.course.id) - self.assertNotIn(open_ended_tab, course.tabs) - self.assertNotIn(peer_grading_tab, course.tabs) self.assertIn(self.notes_tab, course.tabs) - # Finally disable student notes and verify that the "My Notes" tab is gone + # Disable student notes and verify that the "My Notes" tab is gone self.client.ajax_post(self.course_setting_url, { ADVANCED_COMPONENT_POLICY_KEY: {"value": [""]} }) course = modulestore().get_course(self.course.id) - self.assertNotIn(open_ended_tab, course.tabs) - self.assertNotIn(peer_grading_tab, course.tabs) self.assertNotIn(self.notes_tab, course.tabs) def test_advanced_components_munge_tabs_validation_failure(self): diff --git a/cms/djangoapps/contentstore/tests/test_import.py b/cms/djangoapps/contentstore/tests/test_import.py index b48a80cf51..a4675632a2 100644 --- a/cms/djangoapps/contentstore/tests/test_import.py +++ b/cms/djangoapps/contentstore/tests/test_import.py @@ -219,26 +219,6 @@ class ContentStoreImportTest(SignalDisconnectTestMixin, ModuleStoreTestCase): conditional_module.show_tag_list ) - def test_rewrite_reference(self): - module_store = modulestore() - target_id = module_store.make_course_key('testX', 'peergrading_copy', 'copy_run') - import_course_from_xml( - module_store, - self.user.id, - TEST_DATA_DIR, - ['open_ended'], - target_id=target_id, - create_if_not_present=True - ) - peergrading_module = module_store.get_item( - target_id.make_usage_key('peergrading', 'PeerGradingLinked') - ) - self.assertIsNotNone(peergrading_module) - self.assertEqual( - target_id.make_usage_key('combinedopenended', 'SampleQuestion'), - peergrading_module.link_to_location - ) - def test_rewrite_reference_value_dict_published(self): """ Test rewriting references in ReferenceValueDict, specifically with published content. diff --git a/cms/djangoapps/contentstore/views/component.py b/cms/djangoapps/contentstore/views/component.py index 0cc6011024..05bb06ba50 100644 --- a/cms/djangoapps/contentstore/views/component.py +++ b/cms/djangoapps/contentstore/views/component.py @@ -30,11 +30,11 @@ from student.auth import has_course_author_access from django.utils.translation import ugettext as _ from models.settings.course_grading import CourseGradingModel -__all__ = ['OPEN_ENDED_COMPONENT_TYPES', - 'ADVANCED_COMPONENT_POLICY_KEY', - 'container_handler', - 'component_handler' - ] +__all__ = [ + 'ADVANCED_COMPONENT_POLICY_KEY', + 'container_handler', + 'component_handler' +] log = logging.getLogger(__name__) @@ -43,7 +43,6 @@ COMPONENT_TYPES = ['discussion', 'html', 'problem', 'video'] # Constants for determining if these components should be enabled for this course SPLIT_TEST_COMPONENT_TYPE = 'split_test' -OPEN_ENDED_COMPONENT_TYPES = ["combinedopenended", "peergrading"] NOTE_COMPONENT_TYPES = ['notes'] if settings.FEATURES.get('ALLOW_ALL_ADVANCED_COMPONENTS'): diff --git a/cms/djangoapps/contentstore/views/tests/test_course_index.py b/cms/djangoapps/contentstore/views/tests/test_course_index.py index 0d3e5932b2..b9004217cb 100644 --- a/cms/djangoapps/contentstore/views/tests/test_course_index.py +++ b/cms/djangoapps/contentstore/views/tests/test_course_index.py @@ -10,6 +10,7 @@ import pytz from django.conf import settings from django.core.exceptions import PermissionDenied +from django.test.utils import override_settings from django.utils.translation import ugettext as _ from contentstore.courseware_index import CoursewareSearchIndexer, SearchIndexingError @@ -440,6 +441,7 @@ class TestCourseOutline(CourseTestCase): info['block_types_enabled'], any(component in advanced_modules for component in deprecated_block_types) ) + self.assertItemsEqual(info['blocks'], expected_blocks) self.assertEqual( info['advance_settings_url'], @@ -455,27 +457,29 @@ class TestCourseOutline(CourseTestCase): """ Verify deprecated warning info for single deprecated feature. """ - block_types = settings.DEPRECATED_BLOCK_TYPES - course_module = modulestore().get_item(self.course.location) - self._create_test_data(course_module, create_blocks=True, block_types=block_types, publish=publish) - info = _deprecated_blocks_info(course_module, block_types) - self._verify_deprecated_info( - course_module.id, - course_module.advanced_modules, - info, - block_types - ) + block_types = ['notes'] + with override_settings(DEPRECATED_BLOCK_TYPES=block_types): + course_module = modulestore().get_item(self.course.location) + self._create_test_data(course_module, create_blocks=True, block_types=block_types, publish=publish) + info = _deprecated_blocks_info(course_module, block_types) + self._verify_deprecated_info( + course_module.id, + course_module.advanced_modules, + info, + block_types + ) def test_verify_deprecated_warning_message_with_multiple_features(self): """ Verify deprecated warning info for multiple deprecated features. """ - block_types = ['peergrading', 'combinedopenended', 'openassessment'] - course_module = modulestore().get_item(self.course.location) - self._create_test_data(course_module, create_blocks=True, block_types=block_types) + block_types = ['notes', 'lti'] + with override_settings(DEPRECATED_BLOCK_TYPES=block_types): + course_module = modulestore().get_item(self.course.location) + self._create_test_data(course_module, create_blocks=True, block_types=block_types) - info = _deprecated_blocks_info(course_module, block_types) - self._verify_deprecated_info(course_module.id, course_module.advanced_modules, info, block_types) + info = _deprecated_blocks_info(course_module, block_types) + self._verify_deprecated_info(course_module.id, course_module.advanced_modules, info, block_types) @ddt.data( {'delete_vertical': True}, @@ -492,7 +496,7 @@ class TestCourseOutline(CourseTestCase): un-published block(s). This behavior should be same if we delete unpublished vertical or problem. """ - block_types = ['peergrading'] + block_types = ['notes'] course_module = modulestore().get_item(self.course.location) vertical1 = ItemFactory.create( @@ -500,8 +504,8 @@ class TestCourseOutline(CourseTestCase): ) problem1 = ItemFactory.create( parent_location=vertical1.location, - category='peergrading', - display_name='peergrading problem in vert1', + category='notes', + display_name='notes problem in vert1', publish_item=False ) @@ -515,8 +519,8 @@ class TestCourseOutline(CourseTestCase): ) ItemFactory.create( parent_location=vertical2.location, - category='peergrading', - display_name='peergrading problem in vert2', + category='notes', + display_name='notes problem in vert2', pubish_item=True ) # At this point CourseStructure will contain both the above @@ -526,8 +530,8 @@ class TestCourseOutline(CourseTestCase): self.assertItemsEqual( info['blocks'], [ - [reverse_usage_url('container_handler', vertical1.location), 'peergrading problem in vert1'], - [reverse_usage_url('container_handler', vertical2.location), 'peergrading problem in vert2'] + [reverse_usage_url('container_handler', vertical1.location), 'notes problem in vert1'], + [reverse_usage_url('container_handler', vertical2.location), 'notes problem in vert2'] ] ) @@ -542,7 +546,7 @@ class TestCourseOutline(CourseTestCase): # There shouldn't be any info present about un-published vertical1 self.assertEqual( info['blocks'], - [[reverse_usage_url('container_handler', vertical2.location), 'peergrading problem in vert2']] + [[reverse_usage_url('container_handler', vertical2.location), 'notes problem in vert2']] ) diff --git a/cms/djangoapps/contentstore/views/tests/test_item.py b/cms/djangoapps/contentstore/views/tests/test_item.py index ce923e47d1..6ecc9f97eb 100644 --- a/cms/djangoapps/contentstore/views/tests/test_item.py +++ b/cms/djangoapps/contentstore/views/tests/test_item.py @@ -1388,28 +1388,28 @@ class TestComponentTemplates(CourseTestCase): Test the handling of advanced problem templates. """ problem_templates = self.get_templates_of_type('problem') - ora_template = self.get_template(problem_templates, u'Peer Assessment') - self.assertIsNotNone(ora_template) - self.assertEqual(ora_template.get('category'), 'openassessment') - self.assertIsNone(ora_template.get('boilerplate_name', None)) + circuit_template = self.get_template(problem_templates, u'Circuit Schematic Builder') + self.assertIsNotNone(circuit_template) + self.assertEqual(circuit_template.get('category'), 'problem') + self.assertEqual(circuit_template.get('boilerplate_name'), 'circuitschematic.yaml') - @patch('django.conf.settings.DEPRECATED_ADVANCED_COMPONENT_TYPES', ["combinedopenended", "peergrading"]) - def test_ora1_no_advance_component_button(self): + @patch('django.conf.settings.DEPRECATED_ADVANCED_COMPONENT_TYPES', ["poll", "survey"]) + def test_deprecated_no_advance_component_button(self): """ - Test that there will be no `Advanced` button on unit page if `combinedopenended` and `peergrading` are - deprecated provided that there are only 'combinedopenended', 'peergrading' modules in `Advanced Module List` + Test that there will be no `Advanced` button on unit page if units are + deprecated provided that they are the only modules in `Advanced Module List` """ - self.course.advanced_modules.extend(['combinedopenended', 'peergrading']) + self.course.advanced_modules.extend(['poll', 'survey']) templates = get_component_templates(self.course) button_names = [template['display_name'] for template in templates] self.assertNotIn('Advanced', button_names) - @patch('django.conf.settings.DEPRECATED_ADVANCED_COMPONENT_TYPES', ["combinedopenended", "peergrading"]) - def test_cannot_create_ora1_problems(self): + @patch('django.conf.settings.DEPRECATED_ADVANCED_COMPONENT_TYPES', ["poll", "survey"]) + def test_cannot_create_deprecated_problems(self): """ - Test that we can't create ORA1 problems if `combinedopenended` and `peergrading` are deprecated + Test that we can't create problems if they are deprecated """ - self.course.advanced_modules.extend(['annotatable', 'combinedopenended', 'peergrading']) + self.course.advanced_modules.extend(['annotatable', 'poll', 'survey']) templates = get_component_templates(self.course) button_names = [template['display_name'] for template in templates] self.assertIn('Advanced', button_names) @@ -1418,17 +1418,17 @@ class TestComponentTemplates(CourseTestCase): self.assertEqual(template_display_names, ['Annotation']) @patch('django.conf.settings.DEPRECATED_ADVANCED_COMPONENT_TYPES', []) - def test_create_ora1_problems(self): + def test_create_non_deprecated_problems(self): """ - Test that we can create ORA1 problems if `combinedopenended` and `peergrading` are not deprecated + Test that we can create problems if they are not deprecated """ - self.course.advanced_modules.extend(['annotatable', 'combinedopenended', 'peergrading']) + self.course.advanced_modules.extend(['annotatable', 'poll', 'survey']) templates = get_component_templates(self.course) button_names = [template['display_name'] for template in templates] self.assertIn('Advanced', button_names) self.assertEqual(len(templates[0]['templates']), 3) template_display_names = [template['display_name'] for template in templates[0]['templates']] - self.assertEqual(template_display_names, ['Annotation', 'Open Response Assessment', 'Peer Grading Interface']) + self.assertEqual(template_display_names, ['Annotation', 'Poll', 'Survey']) @ddt.ddt diff --git a/cms/envs/bok_choy.auth.json b/cms/envs/bok_choy.auth.json index e0b60afbd3..79dbf904c1 100644 --- a/cms/envs/bok_choy.auth.json +++ b/cms/envs/bok_choy.auth.json @@ -81,14 +81,6 @@ } } }, - "OPEN_ENDED_GRADING_INTERFACE": { - "grading_controller": "grading_controller", - "password": "password", - "peer_grading": "peer_grading", - "staff_grading": "staff_grading", - "url": "http://localhost:18060/", - "username": "lms" - }, "DJFS": { "type": "s3fs", "bucket": "test", diff --git a/cms/envs/bok_choy.env.json b/cms/envs/bok_choy.env.json index d9856eec73..98dfbd0fd1 100644 --- a/cms/envs/bok_choy.env.json +++ b/cms/envs/bok_choy.env.json @@ -104,5 +104,9 @@ "THEME_NAME": "", "TIME_ZONE": "America/New_York", "WIKI_ENABLED": true, - "OAUTH_OIDC_ISSUER": "https://www.example.com/oauth2" + "OAUTH_OIDC_ISSUER": "https://www.example.com/oauth2", + "DEPRECATED_BLOCK_TYPES": [ + "poll", + "survey" + ] } diff --git a/cms/envs/bok_choy.py b/cms/envs/bok_choy.py index 765920aba1..ff95191919 100644 --- a/cms/envs/bok_choy.py +++ b/cms/envs/bok_choy.py @@ -96,6 +96,9 @@ FEATURES['ENABLE_VIDEO_BUMPER'] = True # Enable video bumper in Studio settings # Enable partner support link in Studio footer FEATURES['PARTNER_SUPPORT_EMAIL'] = 'partner-support@example.com' +# Disable some block types to test block deprecation logic +DEPRECATED_BLOCK_TYPES = ['poll', 'survey'] + ########################### Entrance Exams ################################# FEATURES['ENTRANCE_EXAMS'] = True diff --git a/cms/envs/common.py b/cms/envs/common.py index 2f203dc4d3..bcd96c52b4 100644 --- a/cms/envs/common.py +++ b/cms/envs/common.py @@ -1016,8 +1016,6 @@ ADVANCED_COMPONENT_TYPES = [ 'rate', # Allows up-down voting of course content. See https://github.com/pmitros/RateXBlock 'split_test', - 'combinedopenended', - 'peergrading', 'notes', 'schoolyourself_review', 'schoolyourself_lesson', diff --git a/cms/templates/widgets/open-ended-edit.html b/cms/templates/widgets/open-ended-edit.html deleted file mode 100644 index f7d157d010..0000000000 --- a/cms/templates/widgets/open-ended-edit.html +++ /dev/null @@ -1,97 +0,0 @@ -
-
-
- %if enable_markdown: -
-
    -
  • -
  • -
  • -
- -
- - %endif - -
-
- - - - -
-<%include file="metadata-edit.html" /> diff --git a/common/djangoapps/terrain/stubs/ora.py b/common/djangoapps/terrain/stubs/ora.py deleted file mode 100644 index b25c021a25..0000000000 --- a/common/djangoapps/terrain/stubs/ora.py +++ /dev/null @@ -1,535 +0,0 @@ -""" -Stub implementation of ORA service. - -This is an extremely simple version of the service, with most -business logic removed. In particular, the stub: - -1) Provides an infinite number of peer and calibration essays, - with dummy data. - -2) Simulates a set number of pending submissions for each student; - grades submitted by one student are not used for any other student. - -3) Ignores the scores/feedback students submit. - -4) Ignores problem location: an essay graded for *any* problem is graded - for *every* problem. - -Basically, the stub tracks only the *number* of peer/calibration essays -submitted by each student. -""" - -import json -import pkg_resources -from .http import StubHttpRequestHandler, StubHttpService, require_params - - -class StudentState(object): - """ - Store state about the student that the stub - ORA implementation needs to keep track of. - """ - INITIAL_ESSAYS_AVAILABLE = 3 - NUM_ESSAYS_REQUIRED = 1 - NUM_CALIBRATION_REQUIRED = 1 - - def __init__(self): - self.num_graded = 0 - self.num_calibrated = 0 - - def grade_peer_essay(self): - self.num_graded += 1 - - def grade_calibration_essay(self): - self.num_calibrated += 1 - - @property - def num_pending(self): - return max(self.INITIAL_ESSAYS_AVAILABLE - self.num_graded, 0) - - @property - def num_required(self): - return max(self.NUM_ESSAYS_REQUIRED - self.num_graded, 0) - - @property - def is_calibrated(self): - return self.num_calibrated >= self.NUM_CALIBRATION_REQUIRED - - -class StubOraHandler(StubHttpRequestHandler): - """ - Handler for ORA requests. - """ - - GET_URL_HANDLERS = { - '/peer_grading/get_next_submission': '_get_next_submission', - '/peer_grading/is_student_calibrated': '_is_student_calibrated', - '/peer_grading/show_calibration_essay': '_show_calibration_essay', - '/peer_grading/get_notifications': '_get_notifications', - '/peer_grading/get_data_for_location': '_get_data_for_location', - '/peer_grading/get_problem_list': '_get_problem_list', - } - - POST_URL_HANDLERS = { - '/peer_grading/save_grade': '_save_grade', - '/peer_grading/save_calibration_essay': '_save_calibration_essay', - - # Test-specific, used by the XQueue stub to register a new submission, - # which we use to discover valid problem locations in the LMS - '/test/register_submission': '_register_submission' - } - - def do_GET(self): - """ - Handle GET methods to the ORA API stub. - """ - self._send_handler_response('GET') - - def do_POST(self): - """ - Handle POST methods to the ORA API stub. - """ - self._send_handler_response('POST') - - def _send_handler_response(self, method): - """ - Delegate response to handler methods. - If no handler defined, send a 404 response. - """ - # Choose the list of handlers based on the HTTP method - if method == 'GET': - handler_list = self.GET_URL_HANDLERS - elif method == 'POST': - handler_list = self.POST_URL_HANDLERS - else: - self.log_error('Unrecognized method "{method}"'.format(method=method)) - return - - # Check the path (without querystring params) against our list of handlers - handler_name = handler_list.get(self.path_only) - - if handler_name is not None: - handler = getattr(self, handler_name, None) - else: - handler = None - - # Delegate to the handler to send a response - if handler is not None: - handler() - - # If we don't have a handler for this URL and/or HTTP method, - # respond with a 404. This is the same behavior as the ORA API. - else: - self.send_response(404) - - @require_params('GET', 'student_id', 'problem_id') - def _is_student_calibrated(self): - """ - Query whether the student has completed enough calibration - essays to begin peer grading. - - Method: GET - - Params: - - student_id - - problem_id - - Result (JSON): - - success (bool) - - total_calibrated_on_so_far (int) - - calibrated (bool) - """ - student = self._student('GET') - if student is None: - self._error_response() - - else: - self._success_response({ - 'total_calibrated_on_so_far': student.num_calibrated, - 'calibrated': student.is_calibrated - }) - - @require_params('GET', 'student_id', 'problem_id') - def _show_calibration_essay(self): - """ - Retrieve a calibration essay for the student to grade. - - Method: GET - - Params: - - student_id - - problem_id - - Result (JSON): - - success (bool) - - submission_id (str) - - submission_key (str) - - student_response (str) - - prompt (str) - - rubric (str) - - max_score (int) - """ - self._success_response({ - 'submission_id': self.server.DUMMY_DATA['submission_id'], - 'submission_key': self.server.DUMMY_DATA['submission_key'], - 'student_response': self.server.DUMMY_DATA['student_response'], - 'prompt': self.server.DUMMY_DATA['prompt'], - 'rubric': self.server.DUMMY_DATA['rubric'], - 'max_score': self.server.DUMMY_DATA['max_score'] - }) - - @require_params('GET', 'student_id', 'course_id') - def _get_notifications(self): - """ - Query counts of submitted, required, graded, and available peer essays - for a particular student. - - Method: GET - - Params: - - student_id - - course_id - - Result (JSON): - - success (bool) - - student_sub_count (int) - - count_required (int) - - count_graded (int) - - count_available (int) - """ - student = self._student('GET') - if student is None: - self._error_response() - - else: - self._success_response({ - 'student_sub_count': self.server.DUMMY_DATA['student_sub_count'], - 'count_required': student.num_required, - 'count_graded': student.num_graded, - 'count_available': student.num_pending - }) - - @require_params('GET', 'student_id', 'location') - def _get_data_for_location(self): - """ - Query counts of submitted, required, graded, and available peer essays - for a problem location. - - This will send an error response if the problem has not - been registered at the given `location`. This allows us - to ignore problems that are self- or ai-graded. - - Method: GET - - Params: - - student_id - - location - - Result (JSON): - - success (bool) - - student_sub_count (int) - - count_required (int) - - count_graded (int) - - count_available (int) - """ - student = self._student('GET') - location = self.get_params.get('location') - - # Do not return data if we're missing the student param - # or the problem has not yet been registered. - if student is None or location not in self.server.problems: - self._error_response() - - else: - self._success_response({ - 'student_sub_count': self.server.DUMMY_DATA['student_sub_count'], - 'count_required': student.num_required, - 'count_graded': student.num_graded, - 'count_available': student.num_pending - }) - - @require_params('GET', 'grader_id', 'location') - def _get_next_submission(self): - """ - Retrieve the next submission for the student to peer-grade. - - Method: GET - - Params: - - grader_id - - location - - Result (JSON): - - success (bool) - - submission_id (str) - - submission_key (str) - - student_response (str) - - prompt (str, HTML) - - rubric (str, XML) - - max_score (int) - """ - self._success_response({ - 'submission_id': self.server.DUMMY_DATA['submission_id'], - 'submission_key': self.server.DUMMY_DATA['submission_key'], - 'student_response': self.server.DUMMY_DATA['student_response'], - 'prompt': self.server.DUMMY_DATA['prompt'], - 'rubric': self.server.DUMMY_DATA['rubric'], - 'max_score': self.server.DUMMY_DATA['max_score'] - }) - - @require_params('GET', 'course_id') - def _get_problem_list(self): - """ - Retrieve the list of problems available for peer grading. - - Method: GET - - Params: - - course_id - - Result (JSON): - - success (bool) - - problem_list (list) - - where `problem_list` is a list of dictionaries with keys: - - location (str) - - problem_name (str) - - num_graded (int) - - num_pending (int) - - num_required (int) - """ - self._success_response({'problem_list': self.server.problem_list}) - - @require_params('POST', 'grader_id', 'location', 'submission_id', 'score', 'feedback', 'submission_key') - def _save_grade(self): - """ - Save a score and feedback for an essay the student has graded. - - Method: POST - - Params: - - grader_id - - location - - submission_id - - score - - feedback - - submission_key - - Result (JSON): - - success (bool) - """ - student = self._student('POST', key='grader_id') - if student is None: - self._error_response() - - else: - # Update the number of essays the student has graded - student.grade_peer_essay() - return self._success_response({}) - - @require_params('POST', 'student_id', 'location', 'calibration_essay_id', 'score', 'feedback', 'submission_key') - def _save_calibration_essay(self): - """ - Save a score and feedback for a calibration essay the student has graded. - Returns the scores/feedback that the instructor gave for the essay. - - Method: POST - - Params: - - student_id - - location - - calibration_essay_id - - score - - feedback - - submission_key - - Result (JSON): - - success (bool) - - message (str) - - actual_score (int) - - actual_rubric (str, XML) - - actual_feedback (str) - """ - student = self._student('POST') - if student is None: - self._error_response() - - else: - - # Increment the student calibration count - student.grade_calibration_essay() - - self._success_response({ - 'message': self.server.DUMMY_DATA['message'], - 'actual_score': self.server.DUMMY_DATA['actual_score'], - 'actual_rubric': self.server.DUMMY_DATA['actual_rubric'], - 'actual_feedback': self.server.DUMMY_DATA['actual_feedback'] - }) - - @require_params('POST', 'grader_payload') - def _register_submission(self): - """ - Test-specific method to register a new submission. - This is used by `get_problem_list` to return valid locations in the LMS courseware. - In tests, this end-point gets called by the XQueue stub when it receives new submissions, - much like ORA discovers locations when students submit peer-graded problems to the XQueue. - - Since the LMS sends *all* open-ended problems to the XQueue (including self- and ai-graded), - we have to ignore everything except peer-graded problems. We do so by looking - for the text 'peer' in the problem's name. This is a little bit of a hack, - but it makes the implementation much simpler. - - Method: POST - - Params: - - grader_payload (JSON dict) - - Result: Empty - - The only keys we use in `grader_payload` are 'location' and 'problem_id'. - """ - # Since this is a required param, we know it is in the post dict - try: - payload = json.loads(self.post_dict['grader_payload']) - - except ValueError: - self.log_message( - "Could not decode grader payload as JSON: '{0}'".format( - self.post_dict['grader_payload'])) - self.send_response(400) - - else: - - location = payload.get('location') - name = payload.get('problem_id') - - if location is not None and name is not None: - - if "peer" in name.lower(): - self.server.register_problem(location, name) - self.send_response(200) - - else: - self.log_message( - "Problem '{0}' does not have 'peer' in its name. Ignoring...".format(name) - ) - self.send_response(200) - else: - self.log_message( - "Grader payload should contain 'location' and 'problem_id' keys: {0}".format(payload) - ) - self.send_response(400) - - def _student(self, method, key='student_id'): - """ - Return the `StudentState` instance for the student ID given - in the request parameters. - - `method` is the HTTP request method (either "GET" or "POST") - and `key` is the parameter key. - """ - if method == 'GET': - student_id = self.get_params.get(key) - elif method == 'POST': - student_id = self.post_dict.get(key) - else: - self.log_error("Unrecognized method '{method}'".format(method=method)) - return None - - if student_id is None: - self.log_error("Could not get student ID from parameters") - return None - - return self.server.student_state(student_id) - - def _success_response(self, response_dict): - """ - Send a success response. - `response_dict` is a Python dictionary to JSON-encode. - """ - response_dict['success'] = True - response_dict['version'] = 1 - self.send_response( - 200, content=json.dumps(response_dict), - headers={'Content-type': 'application/json'} - ) - - def _error_response(self): - """ - Send an error response. - """ - response_dict = {'success': False, 'version': 1} - self.send_response( - 400, content=json.dumps(response_dict), - headers={'Content-type': 'application/json'} - ) - - -class StubOraService(StubHttpService): - """ - Stub ORA service. - """ - HANDLER_CLASS = StubOraHandler - - DUMMY_DATA = { - 'submission_id': 1, - 'submission_key': 'test key', - 'student_response': 'Test response', - 'prompt': 'Test prompt', - 'rubric': pkg_resources.resource_string(__name__, "data/ora_rubric.xml"), - 'max_score': 2, - 'message': 'Successfully saved calibration record.', - 'actual_score': 2, - 'actual_rubric': pkg_resources.resource_string(__name__, "data/ora_graded_rubric.xml"), - 'actual_feedback': 'Great job!', - 'student_sub_count': 1, - 'problem_name': 'test problem', - 'problem_list_num_graded': 1, - 'problem_list_num_pending': 1, - 'problem_list_num_required': 0, - } - - def __init__(self, *args, **kwargs): - """ - Initialize student submission state. - """ - super(StubOraService, self).__init__(*args, **kwargs) - - # Create a dict to map student ID's to their state - self._students = dict() - - # By default, no problems are available for peer grading - # You can add to this list using the `register_location` HTTP end-point - # This is a dict mapping problem locations to problem names - self.problems = dict() - - def student_state(self, student_id): - """ - Return the `StudentState` (named tuple) for the student - with ID `student_id`. The student state can be modified by the caller. - """ - # Create the student state if it does not already exist - if student_id not in self._students: - student = StudentState() - self._students[student_id] = student - - # Retrieve the student state - return self._students[student_id] - - @property - def problem_list(self): - """ - Return a list of problems available for peer grading. - """ - return [{ - 'location': location, 'problem_name': name, - 'num_graded': self.DUMMY_DATA['problem_list_num_graded'], - 'num_pending': self.DUMMY_DATA['problem_list_num_pending'], - 'num_required': self.DUMMY_DATA['problem_list_num_required'] - } for location, name in self.problems.items()] - - def register_problem(self, location, name): - """ - Register a new problem with `location` and `name` for peer grading. - """ - self.problems[location] = name diff --git a/common/djangoapps/terrain/stubs/start.py b/common/djangoapps/terrain/stubs/start.py index b80ace0b58..a5bee75f3e 100644 --- a/common/djangoapps/terrain/stubs/start.py +++ b/common/djangoapps/terrain/stubs/start.py @@ -7,7 +7,6 @@ import logging from .comments import StubCommentsService from .xqueue import StubXQueueService from .youtube import StubYouTubeService -from .ora import StubOraService from .lti import StubLtiService from .video_source import VideoSourceHttpService from .edxnotes import StubEdxNotesService @@ -19,7 +18,6 @@ USAGE = "USAGE: python -m stubs.start SERVICE_NAME PORT_NUM [CONFIG_KEY=CONFIG_V SERVICES = { 'xqueue': StubXQueueService, 'youtube': StubYouTubeService, - 'ora': StubOraService, 'comments': StubCommentsService, 'lti': StubLtiService, 'video': VideoSourceHttpService, diff --git a/common/djangoapps/terrain/stubs/tests/test_ora.py b/common/djangoapps/terrain/stubs/tests/test_ora.py deleted file mode 100644 index bc0e152cfb..0000000000 --- a/common/djangoapps/terrain/stubs/tests/test_ora.py +++ /dev/null @@ -1,282 +0,0 @@ -""" -Unit tests for stub ORA implementation. -""" - -import unittest -import requests -import json -from ..ora import StubOraService, StudentState - - -class StubOraServiceTest(unittest.TestCase): - - def setUp(self): - """ - Start the stub server. - """ - super(StubOraServiceTest, self).setUp() - self.server = StubOraService() - self.addCleanup(self.server.shutdown) - - def test_calibration(self): - - # Ensure that we use the same student ID throughout - student_id = '1234' - - # Initially, student should not be calibrated - response = requests.get( - self._peer_url('is_student_calibrated'), - params={'student_id': student_id, 'problem_id': '5678'} - ) - self._assert_response(response, { - 'version': 1, 'success': True, - 'total_calibrated_on_so_far': 0, - 'calibrated': False - }) - - # Retrieve a calibration essay - response = requests.get( - self._peer_url('show_calibration_essay'), - params={'student_id': student_id, 'problem_id': '5678'} - ) - self._assert_response(response, { - 'version': 1, 'success': True, - 'submission_id': self.server.DUMMY_DATA['submission_id'], - 'submission_key': self.server.DUMMY_DATA['submission_key'], - 'student_response': self.server.DUMMY_DATA['student_response'], - 'prompt': self.server.DUMMY_DATA['prompt'], - 'rubric': self.server.DUMMY_DATA['rubric'], - 'max_score': self.server.DUMMY_DATA['max_score'] - }) - - # Grade the calibration essay - response = requests.post( - self._peer_url('save_calibration_essay'), - data={ - 'student_id': student_id, - 'location': 'test location', - 'calibration_essay_id': 1, - 'score': 2, - 'submission_key': 'key', - 'feedback': 'Good job!' - } - ) - self._assert_response(response, { - 'version': 1, 'success': True, - 'message': self.server.DUMMY_DATA['message'], - 'actual_score': self.server.DUMMY_DATA['actual_score'], - 'actual_rubric': self.server.DUMMY_DATA['actual_rubric'], - 'actual_feedback': self.server.DUMMY_DATA['actual_feedback'] - }) - - # Now the student should be calibrated - response = requests.get( - self._peer_url('is_student_calibrated'), - params={'student_id': student_id, 'problem_id': '5678'} - ) - self._assert_response(response, { - 'version': 1, 'success': True, - 'total_calibrated_on_so_far': 1, - 'calibrated': True - }) - - # But a student with a different ID should NOT be calibrated. - response = requests.get( - self._peer_url('is_student_calibrated'), - params={'student_id': 'another', 'problem_id': '5678'} - ) - self._assert_response(response, { - 'version': 1, 'success': True, - 'total_calibrated_on_so_far': 0, - 'calibrated': False - }) - - def test_grade_peers(self): - - # Ensure a consistent student ID - student_id = '1234' - - # Check initial number of submissions - # Should be none graded and 1 required - self._assert_num_graded(student_id, None, 0, 1) - - # Register a problem that DOES have "peer" in the name - self._register_problem('test_location', 'Peer Assessed Problem') - - # Retrieve the next submission - response = requests.get( - self._peer_url('get_next_submission'), - params={'grader_id': student_id, 'location': 'test_location'} - ) - self._assert_response(response, { - 'version': 1, 'success': True, - 'submission_id': self.server.DUMMY_DATA['submission_id'], - 'submission_key': self.server.DUMMY_DATA['submission_key'], - 'student_response': self.server.DUMMY_DATA['student_response'], - 'prompt': self.server.DUMMY_DATA['prompt'], - 'rubric': self.server.DUMMY_DATA['rubric'], - 'max_score': self.server.DUMMY_DATA['max_score'] - }) - - # Grade the submission - response = requests.post( - self._peer_url('save_grade'), - data={ - 'location': 'test_location', - 'grader_id': student_id, - 'submission_id': 1, - 'score': 2, - 'feedback': 'Good job!', - 'submission_key': 'key' - } - ) - self._assert_response(response, {'version': 1, 'success': True}) - - # Check final number of submissions - # Shoud be one graded and none required - self._assert_num_graded(student_id, 'test_location', 1, 0) - - # Grade the next submission the submission - response = requests.post( - self._peer_url('save_grade'), - data={ - 'location': 'test_location', - 'grader_id': student_id, - 'submission_id': 1, - 'score': 2, - 'feedback': 'Good job!', - 'submission_key': 'key' - } - ) - self._assert_response(response, {'version': 1, 'success': True}) - - # Check final number of submissions - # Shoud be two graded and none required - self._assert_num_graded(student_id, 'test_location', 2, 0) - - def test_problem_list(self): - - self._register_problem('test_location', 'Peer Grading Problem') - - # The problem list returns dummy counts which are not updated - # The location we use is ignored by the LMS, and we ignore it in the stub, - # so we use a dummy value there too. - response = requests.get( - self._peer_url('get_problem_list'), - params={'course_id': 'test course'} - ) - - self._assert_response(response, { - 'version': 1, 'success': True, - 'problem_list': [{ - 'location': 'test_location', - 'problem_name': 'Peer Grading Problem', - 'num_graded': self.server.DUMMY_DATA['problem_list_num_graded'], - 'num_pending': self.server.DUMMY_DATA['problem_list_num_pending'], - 'num_required': self.server.DUMMY_DATA['problem_list_num_required'] - }] - }) - - def test_ignore_non_peer_problem(self): - - # Register a problem that does NOT have "peer" in the name - self._register_problem('test_location', 'Self Assessed Problem') - - # Expect that the problem list is empty - response = requests.get( - self._peer_url('get_problem_list'), - params={'course_id': 'test course'} - ) - - self._assert_response( - response, - {'version': 1, 'success': True, 'problem_list': []} - ) - - # Expect that no data is available for the problem location - response = requests.get( - self._peer_url('get_data_for_location'), - params={'location': 'test_location', 'student_id': 'test'} - ) - self.assertEqual(response.status_code, 400) - self.assertEqual(response.json(), {'version': 1, 'success': False}) - - def test_empty_problem_list(self): - - # Without configuring any problem location, should return an empty list - response = requests.get( - self._peer_url('get_problem_list'), - params={'course_id': 'test course'} - ) - self._assert_response(response, {'version': 1, 'success': True, 'problem_list': []}) - - def _peer_url(self, path): - """ - Construt a URL to the stub ORA peer-grading service. - """ - return "http://127.0.0.1:{port}/peer_grading/{path}/".format( - port=self.server.port, path=path - ) - - def _register_problem(self, location, name): - """ - Configure the stub to use a particular problem location - The actual implementation discovers problem locations by submission - to the XQueue; we do something similar by having the XQueue stub - register submitted locations with the ORA stub. - """ - grader_payload = json.dumps({'location': location, 'problem_id': name}) - url = "http://127.0.0.1:{port}/test/register_submission".format(port=self.server.port) - response = requests.post(url, data={'grader_payload': grader_payload}) - self.assertTrue(response.ok) - - def _assert_response(self, response, expected_json): - """ - Assert that the `response` was successful and contained - `expected_json` (dict) as its content. - """ - self.assertTrue(response.ok) - self.assertEqual(response.json(), expected_json) - - def _assert_num_graded(self, student_id, location, num_graded, num_required): - """ - ORA provides two distinct ways to get the submitted/graded counts. - Here we check both of them to ensure that the number that we've graded - is consistently `num_graded`. - """ - - # Unlike the actual ORA service, - # we keep track of counts on a per-student basis. - # This means that every user starts with N essays to grade, - # and as they grade essays, that number decreases. - # We do NOT simulate students adding more essays to the queue, - # and essays that the current student submits are NOT graded - # by other students. - num_pending = StudentState.INITIAL_ESSAYS_AVAILABLE - num_graded - - # Notifications - response = requests.get( - self._peer_url('get_notifications'), - params={'student_id': student_id, 'course_id': 'test course'} - ) - self._assert_response(response, { - 'version': 1, 'success': True, - 'count_required': num_required, - 'student_sub_count': self.server.DUMMY_DATA['student_sub_count'], - 'count_graded': num_graded, - 'count_available': num_pending - }) - - # Location data - if location is not None: - response = requests.get( - self._peer_url('get_data_for_location'), - params={'location': location, 'student_id': student_id} - ) - self._assert_response(response, { - 'version': 1, 'success': True, - 'count_required': num_required, - 'student_sub_count': self.server.DUMMY_DATA['student_sub_count'], - 'count_graded': num_graded, - 'count_available': num_pending - }) diff --git a/common/djangoapps/terrain/stubs/tests/test_xqueue_stub.py b/common/djangoapps/terrain/stubs/tests/test_xqueue_stub.py index a7d82c083f..4187cbfb80 100644 --- a/common/djangoapps/terrain/stubs/tests/test_xqueue_stub.py +++ b/common/djangoapps/terrain/stubs/tests/test_xqueue_stub.py @@ -115,19 +115,6 @@ class StubXQueueServiceTest(unittest.TestCase): self.assertFalse(self.post.called) self.assertTrue(logger.error.called) - def test_register_submission_url(self): - # Configure the XQueue stub to notify another service - # when it receives a submission. - register_url = 'http://127.0.0.1:8000/register_submission' - self.server.config['register_submission_url'] = register_url - - callback_url = 'http://127.0.0.1:8000/test_callback' - submission = json.dumps({'grader_payload': 'test payload'}) - self._post_submission(callback_url, 'test_queuekey', 'test_queue', submission) - - # Check that a notification was sent - self.post.assert_any_call(register_url, data={'grader_payload': u'test payload'}) - def _post_submission(self, callback_url, lms_key, queue_name, xqueue_body): """ Post a submission to the stub XQueue implementation. diff --git a/common/djangoapps/terrain/stubs/xqueue.py b/common/djangoapps/terrain/stubs/xqueue.py index ad66ce2ba3..0f128fba65 100644 --- a/common/djangoapps/terrain/stubs/xqueue.py +++ b/common/djangoapps/terrain/stubs/xqueue.py @@ -39,7 +39,8 @@ class StubXQueueHandler(StubHttpRequestHandler): if self._is_grade_request(): # If configured, send the grader payload to other services. - self._register_submission(self.post_dict['xqueue_body']) + # TODO TNL-3906 + # self._register_submission(self.post_dict['xqueue_body']) try: xqueue_header = json.loads(self.post_dict['xqueue_header']) diff --git a/common/lib/xmodule/setup.py b/common/lib/xmodule/setup.py index 1bc19588bd..1bf6ce5324 100644 --- a/common/lib/xmodule/setup.py +++ b/common/lib/xmodule/setup.py @@ -3,7 +3,6 @@ from setuptools import setup, find_packages XMODULES = [ "book = xmodule.backcompat_module:TranslateCustomTagDescriptor", "chapter = xmodule.seq_module:SequenceDescriptor", - "combinedopenended = xmodule.combined_open_ended_module:CombinedOpenEndedDescriptor", "conditional = xmodule.conditional_module:ConditionalDescriptor", "course = xmodule.course_module:CourseDescriptor", "customtag = xmodule.template_module:CustomTagDescriptor", @@ -12,7 +11,6 @@ XMODULES = [ "image = xmodule.backcompat_module:TranslateCustomTagDescriptor", "library_content = xmodule.library_content_module:LibraryContentDescriptor", "error = xmodule.error_module:ErrorDescriptor", - "peergrading = xmodule.peer_grading_module:PeerGradingDescriptor", "poll_question = xmodule.poll_module:PollDescriptor", "problem = xmodule.capa_module:CapaDescriptor", "problemset = xmodule.seq_module:SequenceDescriptor", diff --git a/common/lib/xmodule/xmodule/combined_open_ended_module.py b/common/lib/xmodule/xmodule/combined_open_ended_module.py deleted file mode 100644 index be646ba2a5..0000000000 --- a/common/lib/xmodule/xmodule/combined_open_ended_module.py +++ /dev/null @@ -1,550 +0,0 @@ -""" -ORA1. Deprecated. -""" -import logging - -from lxml import etree -from pkg_resources import resource_string - -from xmodule.raw_module import RawDescriptor -from .x_module import XModule, module_attr -from xblock.fields import Integer, Scope, String, List, Float, Boolean -from xmodule.open_ended_grading_classes.combined_open_ended_modulev1 import CombinedOpenEndedV1Module, CombinedOpenEndedV1Descriptor -from xmodule.validation import StudioValidation, StudioValidationMessage - -from collections import namedtuple -from .fields import Date, Timedelta -import textwrap - -log = logging.getLogger("edx.courseware") - -# Make '_' a no-op so we can scrape strings. Using lambda instead of -# `django.utils.translation.ugettext_noop` because Django cannot be imported in this file -_ = lambda text: text - -V1_SETTINGS_ATTRIBUTES = [ - "display_name", - "max_attempts", - "graded", - "accept_file_upload", - "skip_spelling_checks", - "due", - "graceperiod", - "weight", - "min_to_calibrate", - "max_to_calibrate", - "peer_grader_count", - "required_peer_grading", - "peer_grade_finished_submissions_when_none_pending", -] - -V1_STUDENT_ATTRIBUTES = [ - "current_task_number", - "task_states", - "state", - "student_attempts", - "ready_to_reset", - "old_task_states", -] - -V1_ATTRIBUTES = V1_SETTINGS_ATTRIBUTES + V1_STUDENT_ATTRIBUTES - -VersionTuple = namedtuple('VersionTuple', ['descriptor', 'module', 'settings_attributes', 'student_attributes']) -VERSION_TUPLES = { - 1: VersionTuple(CombinedOpenEndedV1Descriptor, CombinedOpenEndedV1Module, V1_SETTINGS_ATTRIBUTES, - V1_STUDENT_ATTRIBUTES), -} - -DEFAULT_VERSION = 1 -DEFAULT_DATA = textwrap.dedent("""\ - - -

Censorship in the Libraries

- -

'All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us.' --Katherine Paterson, Author -

- -

- Write a persuasive essay to a newspaper reflecting your views on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading. -

- -
- - - - - Ideas - - - - - - - - - Content - - - - - - - - - Organization - - - - - - - - Style - - - - - - - - Voice - - - - - - - - - - - - - - - - Enter essay here. - This is the answer. - {"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"} - - - - - - - - Enter essay here. - This is the answer. - {"grader_settings" : "peer_grading.conf", "problem_id" : "6.002x/Welcome/OETest"} - - - - -
-""") - - -class VersionInteger(Integer): - """ - A model type that converts from strings to integers when reading from json. - Also does error checking to see if version is correct or not. - """ - - def from_json(self, value): - try: - value = int(value) - if value not in VERSION_TUPLES: - version_error_string = "Could not find version {0}, using version {1} instead" - log.error(version_error_string.format(value, DEFAULT_VERSION)) - value = DEFAULT_VERSION - except: - value = DEFAULT_VERSION - return value - - -class CombinedOpenEndedFields(object): - display_name = String( - display_name=_("Display Name"), - help=_("This name appears in the horizontal navigation at the top of the page."), - default=_("Open Response Assessment"), - scope=Scope.settings - ) - current_task_number = Integer( - help=_("Current task that the student is on."), - default=0, - scope=Scope.user_state - ) - old_task_states = List( - help=_("A list of lists of state dictionaries for student states that are saved. " - "This field is only populated if the instructor changes tasks after " - "the module is created and students have attempted it (for example, if a self assessed problem is " - "changed to self and peer assessed)."), - scope=Scope.user_state, - ) - task_states = List( - help=_("List of state dictionaries of each task within this module."), - scope=Scope.user_state - ) - state = String( - help=_("Which step within the current task that the student is on."), - default="initial", - scope=Scope.user_state - ) - graded = Boolean( - display_name=_("Graded"), - help=_("Defines whether the student gets credit for this problem. Credit is based on peer grades of this problem."), - default=False, - scope=Scope.settings - ) - student_attempts = Integer( - help=_("Number of attempts taken by the student on this problem"), - default=0, - scope=Scope.user_state - ) - ready_to_reset = Boolean( - help=_("If the problem is ready to be reset or not."), - default=False, - scope=Scope.user_state - ) - max_attempts = Integer( - display_name=_("Maximum Attempts"), - help=_("The number of times the student can try to answer this problem."), - default=1, - scope=Scope.settings, - values={"min": 1} - ) - accept_file_upload = Boolean( - display_name=_("Allow File Uploads"), - help=_("Whether or not the student can submit files as a response."), - default=False, - scope=Scope.settings - ) - skip_spelling_checks = Boolean( - display_name=_("Disable Quality Filter"), - help=_("If False, the Quality Filter is enabled and submissions with poor spelling, short length, or poor grammar will not be peer reviewed."), - default=False, - scope=Scope.settings - ) - due = Date( - help=_("Date that this problem is due by"), - scope=Scope.settings - ) - graceperiod = Timedelta( - help=_("Amount of time after the due date that submissions will be accepted"), - scope=Scope.settings - ) - version = VersionInteger( - help=_("Current version number"), - default=DEFAULT_VERSION, - scope=Scope.settings) - data = String( - help=_("XML data for the problem"), - scope=Scope.content, - default=DEFAULT_DATA) - weight = Float( - display_name=_("Problem Weight"), - help=_("Defines the number of points each problem is worth. If the value is not set, each problem is worth one point."), - scope=Scope.settings, - values={"min": 0, "step": ".1"}, - default=1 - ) - min_to_calibrate = Integer( - display_name=_("Minimum Peer Grading Calibrations"), - help=_("The minimum number of calibration essays each student will need to complete for peer grading."), - default=3, - scope=Scope.settings, - values={"min": 1, "max": 20, "step": "1"} - ) - max_to_calibrate = Integer( - display_name=_("Maximum Peer Grading Calibrations"), - help=_("The maximum number of calibration essays each student will need to complete for peer grading."), - default=6, - scope=Scope.settings, - values={"min": 1, "max": 20, "step": "1"} - ) - peer_grader_count = Integer( - display_name=_("Peer Graders per Response"), - help=_("The number of peers who will grade each submission."), - default=3, - scope=Scope.settings, - values={"min": 1, "step": "1", "max": 5} - ) - required_peer_grading = Integer( - display_name=_("Required Peer Grading"), - help=_("The number of other students each student making a submission will have to grade."), - default=3, - scope=Scope.settings, - values={"min": 1, "step": "1", "max": 5} - ) - peer_grade_finished_submissions_when_none_pending = Boolean( - display_name=_('Allow "overgrading" of peer submissions'), - help=_( - "EXPERIMENTAL FEATURE. Allow students to peer grade submissions that already have the requisite number of graders, " - "but ONLY WHEN all submissions they are eligible to grade already have enough graders. " - "This is intended for use when settings for `Required Peer Grading` > `Peer Graders per Response`" - ), - default=False, - scope=Scope.settings, - ) - markdown = String( - help=_("Markdown source of this module"), - default=textwrap.dedent("""\ - [prompt] -

Censorship in the Libraries

- -

'All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us.' --Katherine Paterson, Author -

- -

- Write a persuasive essay to a newspaper reflecting your views on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading. -

- [prompt] - [rubric] - + Ideas - - Difficult for the reader to discern the main idea. Too brief or too repetitive to establish or maintain a focus. - - Attempts a main idea. Sometimes loses focus or ineffectively displays focus. - - Presents a unifying theme or main idea, but may include minor tangents. Stays somewhat focused on topic and task. - - Presents a unifying theme or main idea without going off on tangents. Stays completely focused on topic and task. - + Content - - Includes little information with few or no details or unrelated details. Unsuccessful in attempts to explore any facets of the topic. - - Includes little information and few or no details. Explores only one or two facets of the topic. - - Includes sufficient information and supporting details. (Details may not be fully developed; ideas may be listed.) Explores some facets of the topic. - - Includes in-depth information and exceptional supporting details that are fully developed. Explores all facets of the topic. - + Organization - - Ideas organized illogically, transitions weak, and response difficult to follow. - - Attempts to logically organize ideas. Attempts to progress in an order that enhances meaning, and demonstrates use of transitions. - - Ideas organized logically. Progresses in an order that enhances meaning. Includes smooth transitions. - + Style - - Contains limited vocabulary, with many words used incorrectly. Demonstrates problems with sentence patterns. - - Contains basic vocabulary, with words that are predictable and common. Contains mostly simple sentences (although there may be an attempt at more varied sentence patterns). - - Includes vocabulary to make explanations detailed and precise. Includes varied sentence patterns, including complex sentences. - + Voice - - Demonstrates language and tone that may be inappropriate to task and reader. - - Demonstrates an attempt to adjust language and tone to task and reader. - - Demonstrates effective adjustment of language and tone to task and reader. - [rubric] - [tasks] - (Self), ({4-12}AI), ({9-12}Peer) - [tasks] - - """), - scope=Scope.settings - ) - - -class CombinedOpenEndedModule(CombinedOpenEndedFields, XModule): - """ - This is a module that encapsulates all open ended grading (self assessment, peer assessment, etc). - It transitions between problems, and support arbitrary ordering. - Each combined open ended module contains one or multiple "child" modules. - Child modules track their own state, and can transition between states. They also implement get_html and - handle_ajax. - The combined open ended module transitions between child modules as appropriate, tracks its own state, and passess - ajax requests from the browser to the child module or handles them itself (in the cases of reset and next problem) - ajax actions implemented by all children are: - 'save_answer' -- Saves the student answer - 'save_assessment' -- Saves the student assessment (or external grader assessment) - 'save_post_assessment' -- saves a post assessment (hint, feedback on feedback, etc) - ajax actions implemented by combined open ended module are: - 'reset' -- resets the whole combined open ended module and returns to the first child module - 'next_problem' -- moves to the next child module - 'get_results' -- gets results from a given child module - - Types of children. Task is synonymous with child module, so each combined open ended module - incorporates multiple children (tasks): - openendedmodule - selfassessmentmodule - - CombinedOpenEndedModule.__init__ takes the same arguments as xmodule.x_module:XModule.__init__ - """ - STATE_VERSION = 1 - - # states - INITIAL = 'initial' - ASSESSING = 'assessing' - INTERMEDIATE_DONE = 'intermediate_done' - DONE = 'done' - - icon_class = 'problem' - - js = { - 'coffee': [ - resource_string(__name__, 'js/src/combinedopenended/display.coffee'), - resource_string(__name__, 'js/src/javascript_loader.coffee'), - ], - 'js': [ - resource_string(__name__, 'js/src/collapsible.js'), - ] - } - js_module_name = "CombinedOpenEnded" - - css = {'scss': [resource_string(__name__, 'css/combinedopenended/display.scss')]} - - def __init__(self, *args, **kwargs): - """ - Definition file should have one or many task blocks, a rubric block, and a prompt block. - - See DEFAULT_DATA for a sample. - - """ - super(CombinedOpenEndedModule, self).__init__(*args, **kwargs) - - self.system.set('location', self.location) - - if self.task_states is None: - self.task_states = [] - - if self.old_task_states is None: - self.old_task_states = [] - - version_tuple = VERSION_TUPLES[self.version] - - self.student_attributes = version_tuple.student_attributes - self.settings_attributes = version_tuple.settings_attributes - - attributes = self.student_attributes + self.settings_attributes - - static_data = {} - instance_state = {k: getattr(self, k) for k in attributes} - self.child_descriptor = version_tuple.descriptor(self.system) - self.child_definition = version_tuple.descriptor.definition_from_xml(etree.fromstring(self.data), self.system) - self.child_module = version_tuple.module(self.system, self.location, self.child_definition, self.child_descriptor, - instance_state=instance_state, static_data=static_data, - attributes=attributes) - self.save_instance_data() - - def get_html(self): - self.save_instance_data() - return_value = self.child_module.get_html() - return return_value - - def handle_ajax(self, dispatch, data): - self.save_instance_data() - return_value = self.child_module.handle_ajax(dispatch, data) - self.save_instance_data() - return return_value - - def get_instance_state(self): - return self.child_module.get_instance_state() - - def get_score(self): - return self.child_module.get_score() - - def max_score(self): - return self.child_module.max_score() - - def get_progress(self): - return self.child_module.get_progress() - - @property - def due_date(self): - return self.child_module.due_date - - def save_instance_data(self): - for attribute in self.student_attributes: - setattr(self, attribute, getattr(self.child_module, attribute)) - - def validate(self): - """ - Message for either error or warning validation message/s. - - Returns message and type. Priority given to error type message. - """ - return self.descriptor.validate() - - -class CombinedOpenEndedDescriptor(CombinedOpenEndedFields, RawDescriptor): - """ - Module for adding combined open ended questions - """ - mako_template = "widgets/open-ended-edit.html" - module_class = CombinedOpenEndedModule - - has_score = True - always_recalculate_grades = True - template_dir_name = "combinedopenended" - - #Specify whether or not to pass in S3 interface - needs_s3_interface = True - - #Specify whether or not to pass in open ended interface - needs_open_ended_interface = True - - js = {'coffee': [resource_string(__name__, 'js/src/combinedopenended/edit.coffee')]} - js_module_name = "OpenEndedMarkdownEditingDescriptor" - css = {'scss': [resource_string(__name__, 'css/editor/edit.scss'), resource_string(__name__, 'css/combinedopenended/edit.scss')]} - - metadata_translations = { - 'is_graded': 'graded', - 'attempts': 'max_attempts', - } - - def get_context(self): - _context = RawDescriptor.get_context(self) - _context.update({'markdown': self.markdown, - 'enable_markdown': self.markdown is not None}) - return _context - - @property - def non_editable_metadata_fields(self): - non_editable_fields = super(CombinedOpenEndedDescriptor, self).non_editable_metadata_fields - non_editable_fields.extend([CombinedOpenEndedDescriptor.due, CombinedOpenEndedDescriptor.graceperiod, - CombinedOpenEndedDescriptor.markdown, CombinedOpenEndedDescriptor.version]) - return non_editable_fields - - # Proxy to CombinedOpenEndedModule so that external callers don't have to know if they're working - # with a module or a descriptor - child_module = module_attr('child_module') - - def validate(self): - """ - Validates the state of this instance. This is the override of the general XBlock method, - and it will also ask its superclass to validate. - """ - validation = super(CombinedOpenEndedDescriptor, self).validate() - validation = StudioValidation.copy(validation) - - i18n_service = self.runtime.service(self, "i18n") - - validation.summary = StudioValidationMessage( - StudioValidationMessage.ERROR, - i18n_service.ugettext( - "ORA1 is no longer supported. To use this assessment, " - "replace this ORA1 component with an ORA2 component." - ) - ) - return validation diff --git a/common/lib/xmodule/xmodule/css/combinedopenended/display.scss b/common/lib/xmodule/xmodule/css/combinedopenended/display.scss deleted file mode 100644 index 71908f9e5e..0000000000 --- a/common/lib/xmodule/xmodule/css/combinedopenended/display.scss +++ /dev/null @@ -1,993 +0,0 @@ -// lms - xmodule - combinedopenended -// ==================== - -h2 { - margin-top: 0; - margin-bottom: ($baseline*0.75); - - &.problem-header { - section.staff { - margin-top: ($baseline*1.5); - font-size: 80%; - } - } - - @media print { - display: block; - width: auto; - border-right: 0; - } -} - - // Problem Header -div.name{ - padding-bottom: ($baseline*0.75); - - h2 { - display: inline; - } - - .progress-container { - display: inline; - float: right; - padding-top: 3px; - } -} - -.inline-error { - color: darken($error-color, 10%); -} - -section.combined-open-ended { - @include clearfix(); - - .written-feedback { - position: relative; - margin: 0; - height: 150px; - border: 1px solid lightgray; - padding: ($baseline/4); - resize: vertical; - width: 99%; - overflow: auto; - - .del { - text-decoration: line-through; - background-color: #ffc3c3; - } - .ins { - background-color: #c3ffc3; - } - } -} - - -div.problemwrapper { - border: 1px solid lightgray; - border-radius: ($baseline/2); - - .status-bar { - background-color: #eee; - border-radius: ($baseline/2) ($baseline/2) 0 0; - border-bottom: 1px solid lightgray; - - .statustable { - width: 100%; - padding: $baseline; - } - - .status-container { - display: table-cell; - text-align: center; - - .status-elements { - border-radius: ($baseline/4); - border: 1px solid lightgray; - } - } - - .problemtype-container { - padding: ($baseline/2); - width: 60%; - } - - .problemtype{ - padding: ($baseline/2); - } - - .assessments-container { - float: right; - padding: ($baseline/2) $baseline ($baseline/2) ($baseline/2); - - .assessment-text { - display: inline-block; - display: table-cell; - padding-right: ($baseline/2); - } - } - } - .item-container { - padding-bottom: ($baseline/2); - margin: 15px; - } - - .result-container { - float: left; - width: 100%; - position: relative; - } -} - -section.legend-container { - margin: 15px; - border-radius: ($baseline/4); - - .legenditem { - display: inline; - padding: ($baseline/2); - width: 20%; - background-color: #eee; - font-size: .9em; - } -} - -section.combined-open-ended-status { - vertical-align: center; - - .statusitem { - display: table-cell; - padding: ($baseline/2); - width: 30px; - border-right: 1px solid lightgray; - background-color: #eee; - color: #2c2c2c; - font-size: .9em; - - &:first-child { - border-radius: ($baseline/4) 0 0 ($baseline/4); - } - - &:last-child { - border-right: 0; - border-radius: 0 ($baseline/4) ($baseline/4) 0; - } - - &:only-child { - border-radius: ($baseline/4); - } - - .show-results { - margin-top: .3em; - text-align:right; - } - - .show-results-button { - font: 1em monospace; - } - } - - .statusitem-current { - background-color: $white; - color: #222; - } - - span { - &.unanswered { - display: inline-block; - position: relative; - float: right; - width: 14px; - height: 14px; - background: url('#{$static-path}/images/unanswered-icon.png') center center no-repeat; - } - - &.correct { - display: inline-block; - position: relative; - float: right; - width: 25px; - height: 20px; - background: url('#{$static-path}/images/correct-icon.png') center center no-repeat; - } - - &.incorrect { - display: inline-block; - position: relative; - float: right; - width: 20px; - height: 20px; - background: url('#{$static-path}/images/incorrect-icon.png') center center no-repeat; - } - } - - .icon-caret-right { - display: inline-block; - margin-right: ($baseline/4); - vertical-align: baseline; - } -} - -// Problem Section Controls - -.visibility-control, .visibility-control-prompt { - display: block; - width: 100%; - height: 40px; - - .inner { - float: left; - margin-top: $baseline; - width: 85%; - height: 5px; - border-top: 1px dotted #ddd; - } -} - -.section-header { - display: block; - float: right; - padding-top: ($baseline/2); - width: 15%; - text-align: center; - font-size: .9em; -} - -// Rubric Styling - -.wrapper-score-selection { - display: table-cell; - padding: 0 ($baseline/2); - width: 20px; - vertical-align: middle; -} - -.wrappable { - display: table-cell; - padding: ($baseline/4); -} - -.rubric-list-item { - margin-bottom: ($baseline/10); - padding: ($baseline/2); - - &:hover, &:focus { - background-color: #eee; - } - .rubric-label-selected{ - border-radius: ($baseline/4); - background-color: #eee; - } -} - -span.rubric-category { - display: block; - margin-bottom: ($baseline/2); - padding-top: ($baseline/2); - width: 100%; - border-bottom: 1px solid lightgray; - font-size: 1.1em; -} - -div.combined-rubric-container { - margin: 15px; - padding-top: ($baseline/2); - padding-bottom: ($baseline/4); - - ul.rubric-list { - margin: 0 $baseline ($baseline/2) $baseline; - padding: 0; - list-style-type: none; - - li { - - &.rubric-list-item { - margin-bottom: ($baseline/10); - padding: ($baseline/2); - } - } - } - - h4 { - padding-top: ($baseline/2); - } - - span.rubric-category { - display: block; - width: 100%; - border-bottom: 1px solid lightgray; - font-weight: bold; - font-size: .9em; - } - - label.choicegroup_correct { - &:before { - margin-right: ($baseline*0.75); - content: url('#{$static-path}/images/correct-icon.png'); - } - } - - label.choicegroup_partialcorrect { - &:before { - margin-right: ($baseline*0.75); - content: url('#{$static-path}/images/partially-correct-icon.png'); - } - } - - label.choicegroup_incorrect { - &:before { - margin-right: ($baseline*0.75); - content: url('#{$static-path}/images/incorrect-icon.png'); - } - } - - div.written-feedback { - background: $gray-l6; - padding: ($baseline/4); - } -} - -div.result-container { - padding-top: ($baseline/2); - padding-bottom: ($baseline/4); - - .evaluation { - p { - margin-bottom: 1px; - } - } - - .feedback-on-feedback { - height: 100px; - margin-right: 0; - } - - .evaluation-response { - margin-bottom: ($baseline/10); - - header { - a { - font-size: .85em; - } - } - } - - .evaluation-scoring { - .scoring-list { - margin-left: 3px; - list-style-type: none; - - li { - display:inline; - margin-left: 0; - - &:first-child { - margin-left: 0; - } - - label { - font-size: .9em; - } - } - } - } - - .submit-message-container { - margin: ($baseline/2) 0; - } - - .external-grader-message { - margin-bottom: ($baseline/4); - - section { - padding-left: $baseline; - background-color: #fafafa; - color: #2c2c2c; - font-family: monospace; - font-size: 1em; - padding-top: ($baseline/2); - padding-bottom: 30px; - - header { - font-size: 1.4em; - } - - .shortform { - font-weight: bold; - } - - .longform { - padding: 0; - margin: 0; - - .result-errors { - margin: ($baseline/4); - padding: ($baseline/2) ($baseline/2) ($baseline/2) ($baseline*2); - background: url('#{$static-path}/images/incorrect-icon.png') center left no-repeat; - - li { - color: #B00; - } - } - - .result-output { - margin: ($baseline/4); - padding: $baseline 0 ($baseline*0.75) ($baseline*2.5); - border-top: 1px solid #ddd; - border-left: 20px solid #fafafa; - - h4 { - font-size: 1em; - font-family: monospace; - } - - dl { - margin: 0; - } - - dt { - margin-top: $baseline; - } - - dd { - margin-left: 24pt; - } - } - - .markup-text{ - margin: ($baseline/4); - padding: $baseline 0 ($baseline*0.75) ($baseline*2.5); - border-top: 1px solid #ddd; - border-left: 20px solid #fafafa; - - bs { - color: #bb0000; - } - - bg { - color: #bda046; - } - } - } - } - } - - .rubric-result-container { - padding: ($baseline/10); - margin: 0; - display: inline; - - .rubric-result { - font-size: .9em; - padding: ($baseline/10); - display: inline-table; - } - } -} - -div.rubric { - ul.rubric-list{ - margin: 0 $baseline ($baseline/2) $baseline; - padding: 0; - list-style: none; - list-style-type: none; - - li { - &.rubric-list-item { - margin-bottom: ($baseline/10); - padding: ($baseline/2); - border-radius: ($baseline/4); - - &:hover, &:focus { - background-color: #eee; - } - - .wrapper-score-selection { - display: table-cell; - padding: 0 ($baseline/2); - width: 20px; - vertical-align: middle; - } - - .wrappable { - display: table-cell; - padding: ($baseline/4); - } - } - } - } - - span.rubric-category { - display: block; - width: 100%; - border-bottom: 1px solid lightgray; - font-weight: bold; - font-size: .9em; - } -} - - -section.open-ended-child { - @media print { - display: block; - padding: 0; - width: auto; - - canvas, img { - page-break-inside: avoid; - } - } - - .inline { - display: inline; - } - - ol.enumerate { - li { - &:before { - display: block; - visibility: hidden; - height: 0; - content: " "; - } - } - } - - .solution-span { - > span { - position: relative; - display: block; - margin: $baseline 0; - padding: 9px 15px $baseline; - border: 1px solid #ddd; - border-radius: 3px; - background: $white; - box-shadow: inset 0 0 0 1px #eee; - - &:empty { - display: none; - } - } - } - - p { - &.answer { - margin-top: -2px; - } - &.status { - margin: 8px 0 0 ($baseline/2); - text-indent: -9999px; - } - } - - div.unanswered { - p.status { - display: inline-block; - width: 14px; - height: 14px; - background: url('#{$static-path}/images/unanswered-icon.png') center center no-repeat; - } - } - - div.correct, div.ui-icon-check { - p.status { - display: inline-block; - width: 25px; - height: 20px; - background: url('#{$static-path}/images/correct-icon.png') center center no-repeat; - } - - input { - border-color: green; - } - } - - div.processing { - p.status { - display: inline-block; - width: 20px; - height: 20px; - background: url('#{$static-path}/images/spinner.gif') center center no-repeat; - } - - input { - border-color: #aaa; - } - } - - div.incorrect, div.ui-icon-close { - p.status { - display: inline-block; - width: 20px; - height: 20px; - background: url('#{$static-path}/images/incorrect-icon.png') center center no-repeat; - text-indent: -9999px; - } - - input { - border-color: red; - } - } - - > span { - display: block; - margin-bottom: lh(0.5); - } - - p.answer { - display: inline-block; - margin-bottom: 0; - margin-left: ($baseline/2); - - &:before { - content: "Answer: "; - font-weight: bold; - display: inline; - - } - &:empty { - &:before { - display: none; - } - } - } - - span { - &.unanswered, &.ui-icon-bullet { - display: inline-block; - position: relative; - top: 4px; - width: 14px; - height: 14px; - background: url('#{$static-path}/images/unanswered-icon.png') center center no-repeat; - } - - &.processing, &.ui-icon-processing { - display: inline-block; - position: relative; - top: 6px; - width: 25px; - height: 20px; - background: url('#{$static-path}/images/spinner.gif') center center no-repeat; - } - - &.correct, &.ui-icon-check { - display: inline-block; - position: relative; - top: 6px; - width: 25px; - height: 20px; - background: url('#{$static-path}/images/correct-icon.png') center center no-repeat; - } - - &.incorrect, &.ui-icon-close { - display: inline-block; - position: relative; - top: 6px; - width: 20px; - height: 20px; - background: url('#{$static-path}/images/incorrect-icon.png') center center no-repeat; - } - } - - .reload { - float:right; - margin: ($baseline/2); - } - - div.short-form-response { - @include clearfix(); - overflow-y: auto; - margin-bottom: 0; - padding: ($baseline/2); - min-height: 20px; - height: auto; - border: 1px solid #ddd; - background: $gray-l6; - } - - .grader-status { - @include clearfix(); - margin: ($baseline/2) 0; - padding: ($baseline/2); - border-radius: 5px; - background: $gray-l6; - - span { - display: block; - float: left; - overflow: hidden; - margin: -7px 7px 0 0; - text-indent: -9999px; - } - - .grading { - margin: 0 7px 0 0; - padding-left: 25px; - background: url('#{$static-path}/images/info-icon.png') left center no-repeat; - text-indent: 0; - } - - p { - float: left; - margin-bottom: 0; - line-height: 20px; - } - - &.file { - margin-top: $baseline; - padding: $baseline 0 0 0; - border: 0; - border-top: 1px solid #eee; - background: $white; - - p.debug { - display: none; - } - - input { - float: left; - } - } - } - - form.option-input { - margin: -($baseline/2) 0 $baseline; - padding-bottom: $baseline; - - select { - margin-right: flex-gutter(); - } - } - - ul { - margin-bottom: lh(); - margin-left: 0.75em; - margin-left: 0.75rem; - } - - ul.rubric-list{ - margin: 0; - padding: 0; - list-style-type: none; - list-style: none; - - li { - &.rubric-list-item { - margin-bottom: 0; - padding: 0; - border-radius: ($baseline/4); - } - } - } - - ol { - margin-bottom: lh(); - margin-left: .75em; - margin-left: .75rem; - list-style: decimal outside none; - } - - dl { - line-height: 1.4em; - } - - dl dt { - font-weight: bold; - } - - dl dd { - margin-bottom: 0; - } - - dd { - margin-left: .5em; - margin-left: .5rem; - } - - li { - margin-bottom: 0; - padding: 0; - - &:last-child { - margin-bottom: 0; - } - } - - p { - margin-bottom: lh(); - } - - hr { - float: none; - clear: both; - margin: 0 0 .75rem; - width: 100%; - height: 1px; - border: none; - background: #ddd; - color: #ddd; - } - - .hidden { - display: none; - visibility: hidden; - } - - #{$all-text-inputs} { - display: inline; - width: auto; - } - - div.action { - margin-top: $baseline; - - input.save { - @extend .blue-button !optional; - } - - .submission_feedback { - display: inline-block; - margin: 8px 0 0 ($baseline/2); - color: #666; - font-style: italic; - -webkit-font-smoothing: antialiased; - } - } - - .detailed-solution { - > p:first-child { - color: #aaa; - text-transform: uppercase; - font-weight: bold; - font-style: normal; - font-size: 0.9em; - } - - p:last-child { - margin-bottom: 0; - } - } - - div.open-ended-alert, - .save_message { - margin-top: ($baseline/2); - margin-bottom: ($baseline/4); - padding: 8px 12px; - border: 1px solid #ebe8bf; - border-radius: 3px; - background: #fffcdd; - font-size: 0.9em; - } - - div.capa_reset { - margin-top: ($baseline/2); - margin-bottom: ($baseline/2); - padding: 25px; - border: 1px solid $error-color; - border-radius: 3px; - background-color: lighten($error-color, 25%); - font-size: 1em; - } - - .capa_reset > h2 { - color: #aa0000; - } - - .capa_reset li { - font-size: 0.9em; - } - - .assessment-container { - margin: ($baseline*2) 0 ($baseline*1.5) 0; - - .scoring-container { - p { - margin-bottom: 1em; - } - - label { - display: inline-block; - margin: ($baseline/2); - padding: ($baseline/4); - min-width: 50px; - background-color: $gray-l3; - text-size: 1.5em; - } - - input[type=radio]:checked + label { - background: #666; - color: white; - } - - input[class='grade-selection'] { - display: none; - } - } - } - - div.prompt { - background-color: white; - } - - h4 { - padding: $baseline/2 0; - } -} - -//OE Tool Area Styling - -.oe-tools { - display: inline-block; - width: 100%; - border-radius: 5px; - - .oe-tools-label, .oe-tools-scores-label { - display: inline-block; - padding: $baseline/2; - vertical-align: middle; - font-size: 0.8em; - } - - .rubric-button { - padding: 8px $baseline/4; - } - - .rubric-previous-button { - margin-right: $baseline/4; - } - - .rubric-next-button { - margin-left: $baseline/4; - } - - .next-step-button { - margin: $baseline/2; - } - .reset-button { - vertical-align: middle; - } -} - -// Staff Grading -.problem-list-container { - margin: $baseline/2; - - .instructions { - padding-bottom: $baseline/2; - } -} - -.staff-grading { - - .breadcrumbs { - padding: ($baseline/10); - background-color: $gray-l6; - border-radius: 5px; - margin-bottom: ($baseline/2); - } - - .prompt-wrapper { - padding-top: ($baseline/2); - - .meta-info-wrapper { - padding: ($baseline/2); - border-radius: 5px; - } - } -} - -section.peer-grading-container{ - div.peer-grading{ - section.calibration-feedback { - padding: $baseline; - } - } -} - -div.staff-info{ - background-color: #eee; - border-radius: 10px; - border-bottom: 1px solid lightgray; - padding: ($baseline/2); - margin: ($baseline/2) 0 ($baseline/2) 0; -} diff --git a/common/lib/xmodule/xmodule/css/combinedopenended/edit.scss b/common/lib/xmodule/xmodule/css/combinedopenended/edit.scss deleted file mode 100644 index 186de7925e..0000000000 --- a/common/lib/xmodule/xmodule/css/combinedopenended/edit.scss +++ /dev/null @@ -1,105 +0,0 @@ -.editor-bar { - - .editor-tabs { - - .advanced-toggle { - @include white-button; - height: auto; - margin-top: -1px; - padding: 3px 9px; - font-size: 12px; - - &.current { - border: 1px solid $lightGrey !important; - border-radius: 3px !important; - background: $lightGrey !important; - color: $darkGrey !important; - pointer-events: none; - cursor: none; - - &:hover, &:focus { - box-shadow: 0 0 0 0 !important; - } - } - } - - .cheatsheet-toggle { - width: 21px; - height: 21px; - padding: 0; - margin: 0 ($baseline/4) 0 ($baseline*0.75); - border-radius: 22px; - border: 1px solid #a5aaaf; - background: #e5ecf3; - font-size: 13px; - font-weight: 700; - color: #565d64; - text-align: center; - } - } -} - -.simple-editor-open-ended-cheatsheet { - position: absolute; - top: 0; - left: 100%; - width: 0; - border-radius: 0 3px 3px 0; - @include linear-gradient(left, $shadow-l1, $transparent 4px); - background-color: $white; - overflow: hidden; - @include transition(width .3s linear 0s); - - &.shown { - width: 20%; - height: 100%; - overflow-y: scroll; - } - - .cheatsheet-wrapper { - padding: 10%; - } - - h6 { - margin-bottom: 7px; - font-size: 15px; - font-weight: 700; - } - - .row { - @include clearfix(); - padding-bottom: 5px !important; - margin-bottom: 10px !important; - border-bottom: 1px solid #ddd !important; - - &:last-child { - border-bottom: none !important; - margin-bottom: 0 !important; - } - } - - .col { - float: left; - - &.sample { - width: 60px; - margin-right: 30px; - } - } - - pre { - font-size: 12px; - line-height: 18px; - } - - code { - padding: 0; - background: none; - } -} - -.combinedopenended-editor-icon { - display: inline-block; - vertical-align: middle; - color: #333; -} diff --git a/common/lib/xmodule/xmodule/js/fixtures/combined-open-ended.html b/common/lib/xmodule/xmodule/js/fixtures/combined-open-ended.html deleted file mode 100644 index ae2c195a85..0000000000 --- a/common/lib/xmodule/xmodule/js/fixtures/combined-open-ended.html +++ /dev/null @@ -1,128 +0,0 @@ -
-
-
-

Problem 1

-
-

Status

-
-
-
- Step 1 (Problem complete) : 1 / 1 - -
-
- Step 2 (Being scored) : None / 1 - -
-
-
-
-
-

Problem

-
-
-
-
-
-
- Some prompt. -
- -
-
- Submitted for grading. -
- - -
- -
-
-
-
- - -
- -
- -
-
-
-
- - -
- - -
- - - - Edit - / - QA -
-
- - Staff Debug Info - -
- - - - - -
-
diff --git a/common/lib/xmodule/xmodule/js/fixtures/combinedopenended-with-markdown.html b/common/lib/xmodule/xmodule/js/fixtures/combinedopenended-with-markdown.html deleted file mode 100644 index b5c74e00f9..0000000000 --- a/common/lib/xmodule/xmodule/js/fixtures/combinedopenended-with-markdown.html +++ /dev/null @@ -1,6 +0,0 @@ -
-
- - -
-
diff --git a/common/lib/xmodule/xmodule/js/fixtures/combinedopenended-without-markdown.html b/common/lib/xmodule/xmodule/js/fixtures/combinedopenended-without-markdown.html deleted file mode 100644 index 66d0fec2bc..0000000000 --- a/common/lib/xmodule/xmodule/js/fixtures/combinedopenended-without-markdown.html +++ /dev/null @@ -1,5 +0,0 @@ -
-
- -
-
diff --git a/common/lib/xmodule/xmodule/js/fixtures/rubric.html b/common/lib/xmodule/xmodule/js/fixtures/rubric.html deleted file mode 100644 index 20a17260fa..0000000000 --- a/common/lib/xmodule/xmodule/js/fixtures/rubric.html +++ /dev/null @@ -1,321 +0,0 @@ -
-
-

Open Response Assessment

-
-
-
-
-
- - - - - -
-
- Open Response -
-
-
- Assessments: -
-
- -
-
- -
- Peer -
-
-
- -
-
-
- -
-
-
-
- Show Question -
-
-
-
-
- -
-
-
- Response -
-
- -
-
- -
- -
- - - - -
- - -
-
-
-
- - -
-
- -
-
-
-
- Submitted Rubric -
-
- - Scored rubric from grader 1 -
-
- - -Ideas -
-
    - -
  • -
    - - 0 points : -Difficult for the reader to discern the main idea. Too brief or too repetitive to establish or maintain a focus. - - -
    -
  • - - - - - - -
- - -Content -
-
    - -
  • -
    - - 0 points : -Includes little information with few or no details or unrelated details. Unsuccessful in attempts to explore any facets of the topic. - - -
    -
  • - - - - - - -
- - -Organization -
-
    - -
  • -
    - - 0 points : -Ideas organized illogically, transitions weak, and response difficult to follow. - - -
    -
  • - - - - -
- - -Style -
-
    - -
  • -
    - - 0 points : -Contains limited vocabulary, with many words used incorrectly. Demonstrates problems with sentence patterns. - - -
    -
  • - - - - -
- - -Voice -
-
    - -
  • -
    - - 0 points : -Demonstrates language and tone that may be inappropriate to task and reader. - - -
    -
  • - - - - -
-
- - -
- -
-
- - - -
-
- -
-
-
-
\ No newline at end of file diff --git a/common/lib/xmodule/xmodule/js/spec/combinedopenended/display_spec.coffee b/common/lib/xmodule/xmodule/js/spec/combinedopenended/display_spec.coffee deleted file mode 100644 index f90728728a..0000000000 --- a/common/lib/xmodule/xmodule/js/spec/combinedopenended/display_spec.coffee +++ /dev/null @@ -1,170 +0,0 @@ -describe 'Rubric', -> - beforeEach -> - spyOn Logger, 'log' - # load up some fixtures - loadFixtures 'rubric.html' - jasmine.Clock.useMock() - @element = $('.combined-open-ended') - @location = @element.data('location') - - describe 'constructor', -> - beforeEach -> - @rub = new Rubric @element - - it 'rubric should properly grab the element', -> - expect(@rub.el).toEqual @element - - describe 'initialize', -> - beforeEach -> - @rub = new Rubric @element - @rub.initialize @location - - it 'rubric correctly sets location', -> - expect($(@rub.rubric_sel).data('location')).toEqual @location - - it 'rubric correctly read', -> - expect(@rub.categories.length).toEqual 5 - -describe 'CombinedOpenEnded', -> - beforeEach -> - spyOn Logger, 'log' - # load up some fixtures - loadFixtures 'combined-open-ended.html' - jasmine.Clock.useMock() - @element = $('.course-content') - - - describe 'constructor', -> - beforeEach -> - spyOn(Collapsible, 'setCollapsibles') - @combined = new CombinedOpenEnded @element - - it 'set the element', -> - expect(@combined.el).toEqual @element - - it 'get the correct values from data fields', -> - expect(@combined.ajax_url).toEqual '/courses/MITx/6.002x/2012_Fall/modx/i4x://MITx/6.002x/combinedopenended/CombinedOE' - expect(@combined.state).toEqual 'assessing' - expect(@combined.task_count).toEqual 2 - expect(@combined.task_number).toEqual 1 - - it 'subelements are made collapsible', -> - expect(Collapsible.setCollapsibles).toHaveBeenCalled() - - - describe 'poll', -> - # We will store default window.setTimeout() function here. - oldSetTimeout = null - - beforeEach => - # setup the spies - @combined = new CombinedOpenEnded @element - spyOn(@combined, 'reload').andCallFake -> return 0 - - # Store original window.setTimeout() function. If we do not do this, then - # all other tests that rely on code which uses window.setTimeout() - # function might (and probably will) fail. - oldSetTimeout = window.setTimeout - # Redefine window.setTimeout() function as a spy. - window.setTimeout = jasmine.createSpy().andCallFake (callback, timeout) -> return 5 - - afterEach => - # Reset the default window.setTimeout() function. If we do not do this, - # then all other tests that rely on code which uses window.setTimeout() - # function might (and probably will) fail. - window.setTimeout = oldSetTimeout - - it 'polls at the correct intervals', => - fakeResponseContinue = state: 'not done' - spyOn($, 'postWithPrefix').andCallFake (url, callback) -> callback(fakeResponseContinue) - @combined.poll() - expect(window.setTimeout).toHaveBeenCalledWith(@combined.poll, 10000) - expect(window.queuePollerID).toBe(5) - - xit 'polling stops properly', => - fakeResponseDone = state: "done" - spyOn($, 'postWithPrefix').andCallFake (url, callback) -> callback(fakeResponseDone) - @combined.poll() - expect(window.queuePollerID).toBeUndefined() - expect(window.setTimeout).not.toHaveBeenCalled() - - describe 'rebind', -> - # We will store default window.setTimeout() function here. - oldSetTimeout = null - - beforeEach -> - @combined = new CombinedOpenEnded @element - spyOn(@combined, 'queueing').andCallFake -> return 0 - spyOn(@combined, 'skip_post_assessment').andCallFake -> return 0 - - # Store original window.setTimeout() function. If we do not do this, then - # all other tests that rely on code which uses window.setTimeout() - # function might (and probably will) fail. - oldSetTimeout = window.setTimeout - # Redefine window.setTimeout() function as a spy. - window.setTimeout = jasmine.createSpy().andCallFake (callback, timeout) -> return 5 - - afterEach => - # Reset the default window.setTimeout() function. If we do not do this, - # then all other tests that rely on code which uses window.setTimeout() - # function might (and probably will) fail. - window.setTimeout = oldSetTimeout - - it 'when our child is in an assessing state', -> - @combined.child_state = 'assessing' - @combined.rebind() - expect(@combined.answer_area.attr("disabled")).toBe("disabled") - expect(@combined.submit_button.val()).toBe("Submit assessment") - expect(@combined.queueing).toHaveBeenCalled() - - it 'when our child state is initial', -> - @combined.child_state = 'initial' - @combined.rebind() - expect(@combined.answer_area.attr("disabled")).toBeUndefined() - expect(@combined.submit_button.val()).toBe("Submit") - - it 'when our child state is post_assessment', -> - @combined.child_state = 'post_assessment' - @combined.rebind() - expect(@combined.answer_area.attr("disabled")).toBe("disabled") - expect(@combined.submit_button.val()).toBe("Submit post-assessment") - - it 'when our child state is done', -> - spyOn(@combined, 'next_problem').andCallFake -> - @combined.child_state = 'done' - @combined.rebind() - expect(@combined.answer_area.attr("disabled")).toBe("disabled") - expect(@combined.next_problem_button).toBe(":visible") - - describe 'next_problem', -> - beforeEach -> - @combined = new CombinedOpenEnded @element - @combined.child_state = 'done' - - it 'handling a successful call', -> - fakeResponse = - success: true - html: "dummy html" - allow_reset: false - spyOn($, 'postWithPrefix').andCallFake (url, val, callback) -> callback(fakeResponse) - spyOn(@combined, 'reinitialize') - spyOn(@combined, 'rebind') - @combined.next_problem() - expect($.postWithPrefix).toHaveBeenCalled() - expect(@combined.reinitialize).toHaveBeenCalledWith(@combined.element) - expect(@combined.rebind).toHaveBeenCalled() - expect(@combined.answer_area.val()).toBe('') - expect(@combined.child_state).toBe('initial') - - it 'handling an unsuccessful call', -> - fakeResponse = - success: false - error: 'This is an error' - spyOn($, 'postWithPrefix').andCallFake (url, val, callback) -> callback(fakeResponse) - @combined.next_problem() - expect(@combined.errors_area.html()).toBe(fakeResponse.error) - - - - - diff --git a/common/lib/xmodule/xmodule/js/spec/combinedopenended/edit_spec.coffee b/common/lib/xmodule/xmodule/js/spec/combinedopenended/edit_spec.coffee deleted file mode 100644 index cb1efd5a94..0000000000 --- a/common/lib/xmodule/xmodule/js/spec/combinedopenended/edit_spec.coffee +++ /dev/null @@ -1,139 +0,0 @@ -describe 'OpenEndedMarkdownEditingDescriptor', -> - describe 'save stores the correct data', -> - it 'saves markdown from markdown editor', -> - loadFixtures 'combinedopenended-with-markdown.html' - @descriptor = new OpenEndedMarkdownEditingDescriptor($('.combinedopenended-editor')) - saveResult = @descriptor.save() - expect(saveResult.metadata.markdown).toEqual('markdown') - expect(saveResult.data).toEqual('\nmarkdown\n') - it 'clears markdown when xml editor is selected', -> - loadFixtures 'combinedopenended-with-markdown.html' - @descriptor = new OpenEndedMarkdownEditingDescriptor($('.combinedopenended-editor')) - @descriptor.createXMLEditor('replace with markdown') - saveResult = @descriptor.save() - expect(saveResult.nullout).toEqual(['markdown']) - expect(saveResult.data).toEqual('replace with markdown') - it 'saves xml from the xml editor', -> - loadFixtures 'combinedopenended-without-markdown.html' - @descriptor = new OpenEndedMarkdownEditingDescriptor($('.combinedopenended-editor')) - saveResult = @descriptor.save() - expect(saveResult.nullout).toEqual(['markdown']) - expect(saveResult.data).toEqual('xml only') - - describe 'advanced editor opens correctly', -> - it 'click on advanced editor should work', -> - loadFixtures 'combinedopenended-with-markdown.html' - @descriptor = new OpenEndedMarkdownEditingDescriptor($('.combinedopenended-editor')) - spyOn(@descriptor, 'confirmConversionToXml').andReturn(true) - expect(@descriptor.confirmConversionToXml).not.toHaveBeenCalled() - e = jasmine.createSpyObj('e', [ 'preventDefault' ]) - @descriptor.onShowXMLButton(e) - expect(e.preventDefault).toHaveBeenCalled() - expect(@descriptor.confirmConversionToXml).toHaveBeenCalled() - expect($('.editor-bar').length).toEqual(0) - - describe 'insertPrompt', -> - it 'inserts the template if selection is empty', -> - revisedSelection = OpenEndedMarkdownEditingDescriptor.insertPrompt('') - expect(revisedSelection).toEqual(OpenEndedMarkdownEditingDescriptor.promptTemplate) - it 'recognizes html in the prompt', -> - revisedSelection = OpenEndedMarkdownEditingDescriptor.insertPrompt('[prompt]

Hello

[prompt]') - expect(revisedSelection).toEqual('[prompt]

Hello

[prompt]') - - describe 'insertRubric', -> - it 'inserts the template if selection is empty', -> - revisedSelection = OpenEndedMarkdownEditingDescriptor.insertRubric('') - expect(revisedSelection).toEqual(OpenEndedMarkdownEditingDescriptor.rubricTemplate) - it 'recognizes a proper rubric', -> - revisedSelection = OpenEndedMarkdownEditingDescriptor.insertRubric('[rubric]\n+1\n-1\n-2\n[rubric]') - expect(revisedSelection).toEqual('[rubric]\n+1\n-1\n-2\n[rubric]') - - describe 'insertTasks', -> - it 'inserts the template if selection is empty', -> - revisedSelection = OpenEndedMarkdownEditingDescriptor.insertTasks('') - expect(revisedSelection).toEqual(OpenEndedMarkdownEditingDescriptor.tasksTemplate) - it 'recognizes a proper task string', -> - revisedSelection = OpenEndedMarkdownEditingDescriptor.insertTasks('[tasks](Self)[tasks]') - expect(revisedSelection).toEqual('[tasks](Self)[tasks]') - - describe 'markdownToXml', -> - # test default templates - it 'converts prompt to xml', -> - data = OpenEndedMarkdownEditingDescriptor.markdownToXml("""[prompt] -

Prompt!

- This is my super awesome prompt. - [prompt] - """) - data = data.replace(/[\t\n\s]/gmi,'') - expect(data).toEqual(""" - - -

Prompt!

- This is my super awesome prompt. -
-
- """.replace(/[\t\n\s]/gmi,'')) - - it 'converts rubric to xml', -> - data = OpenEndedMarkdownEditingDescriptor.markdownToXml("""[rubric] - + 1 - -1 - -2 - + 2 - -1 - -2 - +3 - -1 - -2 - -3 - [rubric] - """) - data = data.replace(/[\t\n\s]/gmi,'') - expect(data).toEqual(""" - - - - - 1 - - - - - 2 - - - - - 3 - - - - - - - - """.replace(/[\t\n\s]/gmi,'')) - - it 'converts tasks to xml', -> - data = OpenEndedMarkdownEditingDescriptor.markdownToXml("""[tasks] - (Self), ({1-2}AI), ({1-4}AI), ({1-2}Peer - [tasks] - """) - data = data.replace(/[\t\n\s]/gmi,'') - equality_list = """ - - - - - - ml_grading.conf - - - ml_grading.conf - - - peer_grading.conf - - - """ - expect(data).toEqual(equality_list.replace(/[\t\n\s]/gmi,'')) diff --git a/common/lib/xmodule/xmodule/js/src/combinedopenended/display.coffee b/common/lib/xmodule/xmodule/js/src/combinedopenended/display.coffee deleted file mode 100644 index 86fae4d265..0000000000 --- a/common/lib/xmodule/xmodule/js/src/combinedopenended/display.coffee +++ /dev/null @@ -1,725 +0,0 @@ -class @Rubric - - rubric_category_sel: '.rubric-category' - rubric_sel: '.rubric' - - constructor: (el) -> - @el = el - - initialize: (location) => - @$(@rubric_sel).data("location", location) - @$('input[class="score-selection"]').change @tracking_callback - # set up the hotkeys - $(window).unbind('keydown', @keypress_callback) - $(window).keydown @keypress_callback - # display the 'current' carat - @categories = @$(@rubric_category_sel) - @category = @$(@categories.first()) - @category_index = 0 - - # locally scoped jquery. - $: (selector) -> - $(selector, @el) - - keypress_callback: (event) => - # don't try to do this when user is typing in a text input - if @$(event.target).is('input, textarea') - return - # for when we select via top row - if event.which >= 48 and event.which <= 57 - selected = event.which - 48 - # for when we select via numpad - else if event.which >= 96 and event.which <= 105 - selected = event.which - 96 - # we don't want to do anything since we haven't pressed a number - else - return - - # if we actually have a current category (not past the end) - if(@category_index <= @categories.length) - # find the valid selections for this category - inputs = @$("input[name='score-selection-#{@category_index}']") - max_score = inputs.length - 1 - - if selected > max_score or selected < 0 - return - inputs.filter("input[value=#{selected}]").click() - - @category_index++ - @category = @$(@categories[@category_index]) - - tracking_callback: (event) => - target_selection = @$(event.target).val() - # chop off the beginning of the name so that we can get the number of the category - category = @$(event.target).data("category") - location = @$(@rubric_sel).data('location') - # probably want the original problem location as well - - data = {location: location, selection: target_selection, category: category} - Logger.log 'rubric_select', data - - # finds the scores for each rubric category - get_score_list: () => - # find the number of categories: - num_categories = @$(@rubric_category_sel).length - - score_lst = [] - # get the score for each one - for i in [0..(num_categories-1)] - score = @$("input[name='score-selection-#{i}']:checked").val() - score_lst.push(score) - - return score_lst - - get_total_score: () => - score_lst = @get_score_list() - tot = 0 - for score in score_lst - tot += parseInt(score) - return tot - - check_complete: () => - # check to see whether or not any categories have not been scored - num_categories = @$(@rubric_category_sel).length - for i in [0..(num_categories-1)] - score = @$("input[name='score-selection-#{i}']:checked").val() - if score == undefined - return false - return true - -class @CombinedOpenEnded - - wrapper_sel: 'section.xmodule_CombinedOpenEndedModule' - coe_sel: 'section.combined-open-ended' - reset_button_sel: '.reset-button' - next_step_sel: '.next-step-button' - question_header_sel: '.question-header' - submit_evaluation_sel: '.submit-evaluation-button' - result_container_sel: 'div.result-container' - combined_rubric_sel: '.combined-rubric-container' - open_ended_child_sel: 'section.open-ended-child' - error_sel: '.error' - answer_area_sel: 'textarea.answer' - answer_area_div_sel : 'div.answer' - prompt_sel: '.prompt' - rubric_wrapper_sel: '.rubric-wrapper' - hint_wrapper_sel: '.hint-wrapper' - message_wrapper_sel: '.message-wrapper' - submit_button_sel: '.submit-button' - skip_button_sel: '.skip-button' - file_upload_sel: '.file-upload' - file_upload_box_sel: '.file-upload-box' - file_upload_preview_sel: '.file-upload-preview' - fof_sel: 'textarea.feedback-on-feedback' - sub_id_sel: 'input.submission_id' - grader_id_sel: 'input.grader_id' - grader_status_sel: '.grader-status' - info_rubric_elements_sel: '.rubric-info-item' - rubric_collapse_sel: '.rubric-collapse' - next_rubric_sel: '.rubric-next-button' - previous_rubric_sel: '.rubric-previous-button' - oe_alert_sel: '.open-ended-alert' - save_button_sel: '.save-button' - - constructor: (el) -> - @el=el - @$el = $(el) - @reinitialize(el) - $(window).keydown @keydown_handler - $(window).keyup @keyup_handler - - # locally scoped jquery. - $: (selector) -> - $(selector, @el) - - reinitialize: () -> - @has_been_reset = false - @wrapper=@$(@wrapper_sel) - @coe = @$(@coe_sel) - - @ajax_url = @coe.data('ajax-url') - @get_html() - @coe = @$(@coe_sel) - - #Get data from combinedopenended - @allow_reset = @coe.data('allow_reset') - @id = @coe.data('id') - @state = @coe.data('state') - @task_count = @coe.data('task-count') - @task_number = @coe.data('task-number') - @accept_file_upload = @coe.data('accept-file-upload') - @location = @coe.data('location') - - # set up handlers for click tracking - @rub = new Rubric(@coe) - @rub.initialize(@location) - @is_ctrl = false - - #Setup reset - @reset_button = @$(@reset_button_sel) - @reset_button.click @confirm_reset - - #Setup next problem - @next_problem_button = @$(@next_step_sel) - @next_problem_button.click @next_problem - - @question_header = @$(@question_header_sel) - @question_header.click @collapse_question - - # valid states: 'initial', 'assessing', 'post_assessment', 'done' - Collapsible.setCollapsibles(@$el) - @submit_evaluation_button = @$(@submit_evaluation_sel) - @submit_evaluation_button.click @message_post - - @results_container = @$(@result_container_sel) - @combined_rubric_container = @$(@combined_rubric_sel) - - # Where to put the rubric once we load it - @oe = @$(@open_ended_child_sel) - - @errors_area = @$(@oe).find(@error_sel) - @answer_area = @$(@oe).find(@answer_area_sel) - @prompt_container = @$(@oe).find(@prompt_sel) - @rubric_wrapper = @$(@oe).find(@rubric_wrapper_sel) - @hint_wrapper = @$(@oe).find(@hint_wrapper_sel) - @message_wrapper = @$(@oe).find(@message_wrapper_sel) - @submit_button = @$(@oe).find(@submit_button_sel) - @save_button = @$(@oe).find(@save_button_sel) - @child_state = @oe.data('state') - @child_type = @oe.data('child-type') - if @child_type=="openended" - @skip_button = @$(@oe).find(@skip_button_sel) - @skip_button.click @skip_post_assessment - - @file_upload_area = @$(@oe).find(@file_upload_sel) - @can_upload_files = false - @open_ended_child= @$(@oe).find(@open_ended_child_sel) - - @out_of_sync_message = 'The problem state got out of sync. Try reloading the page.' - - if @task_number>1 - @prompt_hide() - else if @task_number==1 and @child_state!='initial' - @prompt_hide() - - @find_assessment_elements() - @find_hint_elements() - - @rebind() - - get_html_callback: (response) => - @coe.replaceWith(response.html) - - get_html: () => - url = "#{@ajax_url}/get_html" - $.ajaxWithPrefix({ - type: 'POST', - url: url, - data: {}, - success: @get_html_callback, - async:false - }); - - show_combined_rubric_current: () => - data = {} - $.postWithPrefix "#{@ajax_url}/get_combined_rubric", data, (response) => - if response.success - @combined_rubric_container.after(response.html).remove() - @combined_rubric_container= @$(@combined_rubric_sel) - @toggle_rubric("") - @rubric_collapse = @$(@rubric_collapse_sel) - @rubric_collapse.click @toggle_rubric - @hide_rubrics() - @$(@previous_rubric_sel).click @previous_rubric - @$(@next_rubric_sel).click @next_rubric - if response.hide_reset - @reset_button.hide() - - message_post: (event)=> - external_grader_message=$(event.target).parent().parent().parent() - evaluation_scoring = $(event.target).parent() - - fd = new FormData() - feedback = @$(evaluation_scoring).find(@fof_sel)[0].value - submission_id = @$(external_grader_message).find(@sub_id_sel)[0].value - grader_id = @$(external_grader_message).find(@grader_id_sel)[0].value - score = @$(evaluation_scoring).find("input:radio[name='evaluation-score']:checked").val() - - fd.append('feedback', feedback) - fd.append('submission_id', submission_id) - fd.append('grader_id', grader_id) - if(!score) - ### - Translators: A "rating" is a score a student gives to indicate how well - they feel they were graded on this problem - ### - @gentle_alert gettext "You need to pick a rating before you can submit." - return - else - fd.append('score', score) - - settings = - type: "POST" - data: fd - processData: false - contentType: false - success: (response) => - @gentle_alert response.msg - @$('section.evaluation').slideToggle() - @message_wrapper.html(response.message_html) - - - $.ajaxWithPrefix("#{@ajax_url}/save_post_assessment", settings) - - - rebind: () => - # rebind to the appropriate function for the current state - @submit_button.unbind('click') - @submit_button.show() - @save_button.unbind('click') - @save_button.hide() - @reset_button.hide() - @hide_file_upload() - @next_problem_button.hide() - @hint_area.attr('disabled', false) - - if @task_number==1 and @child_state=='assessing' - @prompt_hide() - if @child_state == 'done' - @rubric_wrapper.hide() - if @child_type=="openended" - @skip_button.hide() - if @allow_reset=="True" - @show_combined_rubric_current() - @reset_button.show() - @submit_button.hide() - @answer_area.attr("disabled", true) - @replace_text_inputs() - @hint_area.attr('disabled', true) - if @task_number<@task_count - ### - Translators: this message appears when transitioning between openended grading - types (i.e. self assesment to peer assessment). Sometimes, if a student - did not perform well at one step, they cannot move on to the next one. - ### - @gentle_alert gettext "Your score did not meet the criteria to move to the next step." - else if @child_state == 'initial' - @answer_area.attr("disabled", false) - @submit_button.prop('value', gettext 'Submit') - @submit_button.click @confirm_save_answer - @setup_file_upload() - @save_button.click @store_answer - @save_button.show() - else if @child_state == 'assessing' - @answer_area.attr("disabled", true) - @replace_text_inputs() - @hide_file_upload() - ### - Translators: one clicks this button after one has finished filling out the grading - form for an openended assessment - ### - @submit_button.prop('value', gettext 'Submit assessment') - @submit_button.click @save_assessment - @submit_button.attr("disabled",true) - if @child_type == "openended" - @submit_button.hide() - @queueing() - @grader_status = @$(@grader_status_sel) - @grader_status.html("" + (gettext "Your response has been submitted. Please check back later for your grade.") + "") - else if @child_type == "selfassessment" - @setup_score_selection() - else if @child_state == 'post_assessment' - if @child_type=="openended" - @skip_button.show() - @skip_post_assessment() - @answer_area.attr("disabled", true) - @replace_text_inputs() - ### - Translators: this button is clicked to submit a student's rating of - an evaluator's assessment - ### - @submit_button.prop('value', gettext 'Submit post-assessment') - if @child_type=="selfassessment" - @submit_button.click @save_hint - else - @submit_button.click @message_post - else if @child_state == 'done' - @show_combined_rubric_current() - @rubric_wrapper.hide() - @answer_area.attr("disabled", true) - @replace_text_inputs() - @hint_area.attr('disabled', true) - @submit_button.hide() - if @child_type=="openended" - @skip_button.hide() - if @task_number<@task_count - @next_problem_button.show() - else - @reset_button.show() - - find_assessment_elements: -> - @assessment = @$('input[name="grade-selection"]') - - find_hint_elements: -> - @hint_area = @$('textarea.post_assessment') - - store_answer: (event) => - event.preventDefault() - if @child_state == 'initial' - data = {'student_answer' : @answer_area.val()} - @save_button.attr("disabled",true) - $.postWithPrefix "#{@ajax_url}/store_answer", data, (response) => - if response.success - @gentle_alert(gettext "Answer saved, but not yet submitted.") - else - @errors_area.html(response.error) - @save_button.attr("disabled",false) - else - @errors_area.html(@out_of_sync_message) - - replace_answer: (response) => - if response.success - @rubric_wrapper.html(response.rubric_html) - @rubric_wrapper.show() - @rub = new Rubric(@coe) - @rub.initialize(@location) - @child_state = 'assessing' - @find_assessment_elements() - @answer_area.val(response.student_response) - @rebind() - answer_area_div = @$(@answer_area_div_sel) - answer_area_div.html(response.student_response) - else - @submit_button.show() - @submit_button.attr('disabled', false) - @gentle_alert response.error - - confirm_save_answer: (event) => - ### - Translators: This string appears in a confirmation box after one tries to submit - an openended problem - ### - confirmation_text = gettext 'Please confirm that you wish to submit your work. You will not be able to make any changes after submitting.' - accessible_confirm confirmation_text, => - @save_answer(event) - - save_answer: (event) => - @$el.find(@oe_alert_sel).remove() - @submit_button.attr("disabled",true) - @submit_button.hide() - event.preventDefault() - @answer_area.attr("disabled", true) - max_filesize = 2*1000*1000 #2MB - if @child_state == 'initial' - files = "" - valid_files_attached = false - if @can_upload_files == true - files = @$(@file_upload_box_sel)[0].files[0] - if files != undefined - valid_files_attached = true - if files.size > max_filesize - files = "" - # Don't submit the file in the case of it being too large, deal with the error locally. - @submit_button.show() - @submit_button.attr('disabled', false) - @gentle_alert gettext "You are trying to upload a file that is too large for our system. Please choose a file under 2MB or paste a link to it into the answer box." - return - - fd = new FormData() - fd.append('student_answer', @answer_area.val()) - fd.append('student_file', files) - fd.append('valid_files_attached', valid_files_attached) - - that=this - settings = - type: "POST" - data: fd - processData: false - contentType: false - async: false - success: (response) => - @replace_answer(response) - - $.ajaxWithPrefix("#{@ajax_url}/save_answer",settings) - else - @errors_area.html(@out_of_sync_message) - - keydown_handler: (event) => - # Previously, responses were submitted when hitting enter. Add in a modifier that ensures that ctrl+enter is needed. - if event.which == 17 && @is_ctrl==false - @is_ctrl=true - else if @is_ctrl==true && event.which == 13 && @child_state == 'assessing' && @rub.check_complete() - @save_assessment(event) - - keyup_handler: (event) => - # Handle keyup event when ctrl key is released - if event.which == 17 && @is_ctrl==true - @is_ctrl=false - - save_assessment: (event) => - @submit_button.attr("disabled",true) - @submit_button.hide() - event.preventDefault() - if @child_state == 'assessing' && @rub.check_complete() - checked_assessment = @rub.get_total_score() - score_list = @rub.get_score_list() - data = {'assessment' : checked_assessment, 'score_list' : score_list} - $.postWithPrefix "#{@ajax_url}/save_assessment", data, (response) => - if response.success - @child_state = response.state - - if @child_state == 'post_assessment' - @hint_wrapper.html(response.hint_html) - @find_hint_elements() - else if @child_state == 'done' - @rubric_wrapper.hide() - - @rebind() - else - @gentle_alert response.error - else - @errors_area.html(@out_of_sync_message) - - save_hint: (event) => - event.preventDefault() - if @child_state == 'post_assessment' - data = {'hint' : @hint_area.val()} - - $.postWithPrefix "#{@ajax_url}/save_post_assessment", data, (response) => - if response.success - @message_wrapper.html(response.message_html) - @child_state = 'done' - @rebind() - else - @errors_area.html(response.error) - else - @errors_area.html(@out_of_sync_message) - - skip_post_assessment: => - if @child_state == 'post_assessment' - - $.postWithPrefix "#{@ajax_url}/skip_post_assessment", {}, (response) => - if response.success - @child_state = 'done' - @rebind() - else - @errors_area.html(response.error) - else - @errors_area.html(@out_of_sync_message) - - confirm_reset: (event) => - message = gettext 'Are you sure you want to remove your previous response to this question?' - accessible_confirm message, => - @reset(event) - - reset: (event) => - event.preventDefault() - if @child_state == 'done' or @allow_reset=="True" - $.postWithPrefix "#{@ajax_url}/reset", {}, (response) => - if response.success - @answer_area.val('') - @rubric_wrapper.html('') - @hint_wrapper.html('') - @message_wrapper.html('') - @child_state = 'initial' - @coe.after(response.html).remove() - @allow_reset="False" - @reinitialize(@element) - @has_been_reset = true - @rebind() - @reset_button.hide() - else - @errors_area.html(response.error) - else - @errors_area.html(@out_of_sync_message) - - next_problem: => - if @child_state == 'done' - $.postWithPrefix "#{@ajax_url}/next_problem", {}, (response) => - if response.success - @answer_area.val('') - @rubric_wrapper.html('') - @hint_wrapper.html('') - @message_wrapper.html('') - @child_state = 'initial' - @coe.after(response.html).remove() - @reinitialize(@element) - @rebind() - @next_problem_button.hide() - if !response.allow_reset - @gentle_alert gettext "Moved to next step." - else - ### - Translators: this message appears when transitioning between openended grading - types (i.e. self assesment to peer assessment). Sometimes, if a student - did not perform well at one step, they cannot move on to the next one. - ### - @gentle_alert gettext "Your score did not meet the criteria to move to the next step." - @show_combined_rubric_current() - else - @errors_area.html(response.error) - else - @errors_area.html(@out_of_sync_message) - - gentle_alert: (msg) => - if @$el.find(@oe_alert_sel).length - @$el.find(@oe_alert_sel).remove() - alert_elem = "" - @$el.find('.open-ended-action').after(alert_elem) - @$el.find(@oe_alert_sel).css(opacity: 0).animate(opacity: 1, 700) - - queueing: => - if @child_state=="assessing" and @child_type=="openended" - if window.queuePollerID # Only one poller 'thread' per Problem - window.clearTimeout(window.queuePollerID) - window.queuePollerID = window.setTimeout(@poll, 10000) - - poll: => - $.postWithPrefix "#{@ajax_url}/check_for_score", (response) => - if response.state == "done" or response.state=="post_assessment" - delete window.queuePollerID - @reload() - else - window.queuePollerID = window.setTimeout(@poll, 10000) - - setup_file_upload: => - if @accept_file_upload == "True" - if window.File and window.FileReader and window.FileList and window.Blob - @can_upload_files = true - @file_upload_area.html('Uploaded image') - @file_upload_area.show() - @$(@file_upload_preview_sel).hide() - @$(@file_upload_box_sel).change @preview_image - else - @gentle_alert gettext 'File uploads are required for this question, but are not supported in your browser. Try the newest version of Google Chrome. Alternatively, if you have uploaded the image to another website, you can paste a link to it into the answer box.' - - hide_file_upload: => - if @accept_file_upload == "True" - @file_upload_area.hide() - - replace_text_inputs: => - answer_class = @answer_area.attr('class') - answer_id = @answer_area.attr('id') - answer_val = @answer_area.val() - new_text = '' - new_text = "
#{answer_val}
" - @answer_area.replaceWith(new_text) - - # wrap this so that it can be mocked - reload: -> - @reinitialize() - - collapse_question: (event) => - @prompt_container.slideToggle() - @prompt_container.toggleClass('open') - if @prompt_container.hasClass('open') - ### - Translators: "Show Question" is some text that, when clicked, shows a question's - content that had been hidden - ### - new_text = gettext "Show Question" - Logger.log 'oe_show_question', {location: @location} - else - ### - Translators: "Hide Question" is some text that, when clicked, hides a question's - content - ### - Logger.log 'oe_hide_question', {location: @location} - new_text = gettext "Hide Question" - @question_header.text(new_text) - return false - - hide_rubrics: () => - rubrics = @$(@combined_rubric_sel) - for rub in rubrics - if @$(rub).data('status')=="shown" - @$(rub).show() - else - @$(rub).hide() - - next_rubric: => - @shift_rubric(1) - return false - - previous_rubric: => - @shift_rubric(-1) - return false - - shift_rubric: (i) => - rubrics = @$(@combined_rubric_sel) - number = 0 - for rub in rubrics - if @$(rub).data('status')=="shown" - number = @$(rub).data('number') - @$(rub).data('status','hidden') - if i==1 and number < rubrics.length - 1 - number = number + i - - if i==-1 and number>0 - number = number + i - - @$(rubrics[number]).data('status', 'shown') - @hide_rubrics() - - prompt_show: () => - if @prompt_container.is(":hidden")==true - @prompt_container.slideToggle() - @prompt_container.toggleClass('open') - @question_header.text(gettext "Hide Question") - - prompt_hide: () => - if @prompt_container.is(":visible")==true - @prompt_container.slideToggle() - @prompt_container.toggleClass('open') - @question_header.text(gettext "Show Question") - - log_feedback_click: (event) -> - target = @$(event.target) - if target.hasClass('see-full-feedback') - Logger.log 'oe_show_full_feedback', {} - else if target.hasClass('respond-to-feedback') - Logger.log 'oe_show_respond_to_feedback', {} - else - generated_event_type = link_text.toLowerCase().replace(" ","_") - Logger.log "oe_" + generated_event_type, {} - log_feedback_selection: (event) -> - target_selection = @$(event.target).val() - Logger.log 'oe_feedback_response_selected', {value: target_selection} - - remove_attribute: (name) => - if @$(@file_upload_preview_sel).attr(name) - @$(@file_upload_preview_sel)[0].removeAttribute(name) - - preview_image: () => - if @$(@file_upload_box_sel)[0].files && @$(@file_upload_box_sel)[0].files[0] - reader = new FileReader() - reader.onload = (e) => - max_dim = 150 - @remove_attribute('src') - @remove_attribute('height') - @remove_attribute('width') - @$(@file_upload_preview_sel).attr('src', e.target.result) - height_px = @$(@file_upload_preview_sel)[0].height - width_px = @$(@file_upload_preview_sel)[0].width - scale_factor = 0 - if height_px>width_px - scale_factor = height_px/max_dim - else - scale_factor = width_px/max_dim - @$(@file_upload_preview_sel)[0].width = width_px/scale_factor - @$(@file_upload_preview_sel)[0].height = height_px/scale_factor - @$(@file_upload_preview_sel).show() - reader.readAsDataURL(@$(@file_upload_box_sel)[0].files[0]) - - toggle_rubric: (event) => - info_rubric_elements = @$(@info_rubric_elements_sel) - info_rubric_elements.slideToggle() - return false - - setup_score_selection: () => - @$("input[class='score-selection']").change @graded_callback - - graded_callback: () => - if @rub.check_complete() - @submit_button.attr("disabled",false) - @submit_button.show() diff --git a/common/lib/xmodule/xmodule/js/src/combinedopenended/edit.coffee b/common/lib/xmodule/xmodule/js/src/combinedopenended/edit.coffee deleted file mode 100644 index 9bd023ca5e..0000000000 --- a/common/lib/xmodule/xmodule/js/src/combinedopenended/edit.coffee +++ /dev/null @@ -1,304 +0,0 @@ -class @OpenEndedMarkdownEditingDescriptor extends XModule.Descriptor - # TODO really, these templates should come from or also feed the cheatsheet - @rubricTemplate : """ - [rubric] - + Ideas - - Difficult for the reader to discern the main idea. Too brief or too repetitive to establish or maintain a focus. - - Attempts a main idea. Sometimes loses focus or ineffectively displays focus. - - Presents a unifying theme or main idea, but may include minor tangents. Stays somewhat focused on topic and task. - - Presents a unifying theme or main idea without going off on tangents. Stays completely focused on topic and task. - + Content - - Includes little information with few or no details or unrelated details. Unsuccessful in attempts to explore any facets of the topic. - - Includes little information and few or no details. Explores only one or two facets of the topic. - - Includes sufficient information and supporting details. (Details may not be fully developed; ideas may be listed.) Explores some facets of the topic. - - Includes in-depth information and exceptional supporting details that are fully developed. Explores all facets of the topic. - + Organization - - Ideas organized illogically, transitions weak, and response difficult to follow. - - Attempts to logically organize ideas. Attempts to progress in an order that enhances meaning, and demonstrates use of transitions. - - Ideas organized logically. Progresses in an order that enhances meaning. Includes smooth transitions. - + Style - - Contains limited vocabulary, with many words used incorrectly. Demonstrates problems with sentence patterns. - - Contains basic vocabulary, with words that are predictable and common. Contains mostly simple sentences (although there may be an attempt at more varied sentence patterns). - - Includes vocabulary to make explanations detailed and precise. Includes varied sentence patterns, including complex sentences. - + Voice - - Demonstrates language and tone that may be inappropriate to task and reader. - - Demonstrates an attempt to adjust language and tone to task and reader. - - Demonstrates effective adjustment of language and tone to task and reader. - [rubric] - """ - - @tasksTemplate: "[tasks]\n(Self), ({4-12}AI), ({9-12}Peer)\n[tasks]\n" - @promptTemplate: """ - [prompt]\n -

Censorship in the Libraries

- -

'All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us.' --Katherine Paterson, Author -

- -

-Write a persuasive essay to a newspaper reflecting your views on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading. -

- [prompt]\n - """ - - constructor: (element) -> - @element = element - - if $(".markdown-box", @element).length != 0 - @markdown_editor = CodeMirror.fromTextArea($(".markdown-box", element)[0], { - lineWrapping: true - mode: null - }) - @setCurrentEditor(@markdown_editor) - selection = @markdown_editor.getSelection() - #Auto-add in the needed template if it isn't already in there. - if(@markdown_editor.getValue() == "") - @markdown_editor.setValue(OpenEndedMarkdownEditingDescriptor.promptTemplate + "\n" + OpenEndedMarkdownEditingDescriptor.rubricTemplate + "\n" + OpenEndedMarkdownEditingDescriptor.tasksTemplate) - # Add listeners for toolbar buttons (only present for markdown editor) - @element.on('click', '.xml-tab', @onShowXMLButton) - @element.on('click', '.format-buttons a', @onToolbarButton) - @element.on('click', '.cheatsheet-toggle', @toggleCheatsheet) - # Hide the XML text area - $(@element.find('.xml-box')).hide() - else - @createXMLEditor() - - @alertTaskRubricModification() - - ### - Creates the XML Editor and sets it as the current editor. If text is passed in, - it will replace the text present in the HTML template. - - text: optional argument to override the text passed in via the HTML template - ### - createXMLEditor: (text) -> - @xml_editor = CodeMirror.fromTextArea($(".xml-box", @element)[0], { - mode: "xml" - lineNumbers: true - lineWrapping: true - }) - if text - @xml_editor.setValue(text) - @setCurrentEditor(@xml_editor) - $(@xml_editor.getWrapperElement()).toggleClass("CodeMirror-advanced"); - # Need to refresh to get line numbers to display properly. - @xml_editor.refresh() - - ### - User has clicked to show the XML editor. Before XML editor is swapped in, - the user will need to confirm the one-way conversion. - ### - onShowXMLButton: (e) => - e.preventDefault(); - if @cheatsheet && @cheatsheet.hasClass('shown') - @cheatsheet.toggleClass('shown') - @toggleCheatsheetVisibility() - if @confirmConversionToXml() - @createXMLEditor(OpenEndedMarkdownEditingDescriptor.markdownToXml(@markdown_editor.getValue())) - # Put cursor position to 0. - @xml_editor.setCursor(0) - # Hide markdown-specific toolbar buttons - $(@element.find('.editor-bar')).hide() - - alertTaskRubricModification: -> - return alert("Before you edit, please note that if you alter the tasks block or the rubric block of this question after students have submitted responses, it may result in their responses and grades being deleted! Use caution when altering problems that have already been released to students.") - ### - Have the user confirm the one-way conversion to XML. - Returns true if the user clicked OK, else false. - ### - confirmConversionToXml: -> - # TODO: use something besides a JavaScript confirm dialog? - return confirm("If you use the Advanced Editor, this problem will be converted to XML and you will not be able to return to the Simple Editor Interface.\n\nProceed to the Advanced Editor and convert this problem to XML?") - - ### - Event listener for toolbar buttons (only possible when markdown editor is visible). - ### - onToolbarButton: (e) => - e.preventDefault(); - selection = @markdown_editor.getSelection() - revisedSelection = null - switch $(e.currentTarget).attr('class') - when "rubric-button" then revisedSelection = OpenEndedMarkdownEditingDescriptor.insertRubric(selection) - when "prompt-button" then revisedSelection = OpenEndedMarkdownEditingDescriptor.insertPrompt(selection) - when "tasks-button" then revisedSelection = OpenEndedMarkdownEditingDescriptor.insertTasks(selection) - else # ignore click - - if revisedSelection != null - @markdown_editor.replaceSelection(revisedSelection) - @markdown_editor.focus() - - ### - Event listener for toggling cheatsheet (only possible when markdown editor is visible). - ### - toggleCheatsheet: (e) => - e.preventDefault(); - if !$(@markdown_editor.getWrapperElement()).find('.simple-editor-open-ended-cheatsheet')[0] - @cheatsheet = $($('#simple-editor-open-ended-cheatsheet').html()) - $(@markdown_editor.getWrapperElement()).append(@cheatsheet) - - @toggleCheatsheetVisibility() - - setTimeout (=> @cheatsheet.toggleClass('shown')), 10 - - - ### - Function to toggle cheatsheet visibility. - ### - toggleCheatsheetVisibility: () => - $('.modal-content').toggleClass('cheatsheet-is-shown') - - - ### - Stores the current editor and hides the one that is not displayed. - ### - setCurrentEditor: (editor) -> - if @current_editor - $(@current_editor.getWrapperElement()).hide() - @current_editor = editor - $(@current_editor.getWrapperElement()).show() - $(@current_editor).focus(); - - ### - Called when save is called. Listeners are unregistered because editing the block again will - result in a new instance of the descriptor. Note that this is NOT the case for cancel-- - when cancel is called the instance of the descriptor is reused if edit is selected again. - ### - save: -> - @element.off('click', '.xml-tab', @changeEditor) - @element.off('click', '.format-buttons a', @onToolbarButton) - @element.off('click', '.cheatsheet-toggle', @toggleCheatsheet) - if @current_editor == @markdown_editor - { - data: OpenEndedMarkdownEditingDescriptor.markdownToXml(@markdown_editor.getValue()) - metadata: - markdown: @markdown_editor.getValue() - } - else - { - data: @xml_editor.getValue() - nullout: ['markdown'] - } - - @insertRubric: (selectedText) -> - return OpenEndedMarkdownEditingDescriptor.insertGenericInput(selectedText, '[rubric]', '[rubric]', OpenEndedMarkdownEditingDescriptor.rubricTemplate) - - @insertPrompt: (selectedText) -> - return OpenEndedMarkdownEditingDescriptor.insertGenericInput(selectedText, '[prompt]', '[prompt]', OpenEndedMarkdownEditingDescriptor.promptTemplate) - - @insertTasks: (selectedText) -> - return OpenEndedMarkdownEditingDescriptor.insertGenericInput(selectedText, '[tasks]', '[tasks]', OpenEndedMarkdownEditingDescriptor.tasksTemplate) - - @insertGenericInput: (selectedText, lineStart, lineEnd, template) -> - if selectedText.length > 0 - new_string = selectedText.replace(/^\s+|\s+$/g,'') - if new_string.substring(0,lineStart.length) != lineStart - new_string = lineStart + new_string - if new_string.substring((new_string.length)-lineEnd.length,new_string.length) != lineEnd - new_string = new_string + lineEnd - return new_string - else - return template - - @markdownToXml: (markdown)-> - toXml = `function(markdown) { - - function template(template_html,data){ - return template_html.replace(/%(\w*)%/g,function(m,key){return data.hasOwnProperty(key)?data[key]:"";}); - } - - var xml = markdown; - - // group rubrics - xml = xml.replace(/\[rubric\]\n?([^\]]*)\[\/?rubric\]/gmi, function(match, p) { - var groupString = '\n\n'; - var options = p.split('\n'); - var category_open = false; - for(var i = 0; i < options.length; i++) { - if(options[i].length > 0) { - var value = options[i].replace(/^\s+|\s+$/g,''); - if (value.charAt(0)=="+") { - if(i>0){ - if(category_open==true){ - groupString += "\n"; - category_open = false; - } - } - groupString += "\n\n"; - category_open = true; - text = value.substr(1); - text = text.replace(/^\s+|\s+$/g,''); - groupString += text; - groupString += "\n\n"; - } else if (value.charAt(0) == "-") { - groupString += "\n"; - } - } - if(i==options.length-1 && category_open == true){ - groupString += "\n\n"; - } - } - groupString += '\n\n'; - return groupString; - }); - - // group tasks - xml = xml.replace(/\[tasks\]\n?([^\]]*)\[\/?tasks\]/gmi, function(match, p) { - var open_ended_template = $('#open-ended-template').html(); - if(open_ended_template == null) { - open_ended_template = "%grading_config%"; - } - var groupString = ''; - var options = p.split(","); - for(var i = 0; i < options.length; i++) { - if(options[i].length > 0) { - var value = options[i].replace(/^\s+|\s+$/g,''); - var lower_option = value.toLowerCase(); - type = lower_option.match(/(peer|self|ai)/gmi) - if(type != null) { - type = type[0] - var min_max = value.match(/\{\n?([^\]]*)\}/gmi); - var min_max_string = ""; - if(min_max!=null) { - min_max = min_max[0].replace(/^{|}/gmi,''); - min_max = min_max.split("-"); - min = min_max[0]; - max = min_max[1]; - min_max_string = 'min_score_to_attempt="' + min + '" max_score_to_attempt="' + max + '" '; - } - groupString += "\n" - if(type=="self") { - groupString +="" - } else if (type=="peer") { - config = "peer_grading.conf" - groupString += template(open_ended_template,{min_max_string: min_max_string, grading_config: config}); - } else if (type=="ai") { - config = "ml_grading.conf" - groupString += template(open_ended_template,{min_max_string: min_max_string, grading_config: config}); - } - groupString += "\n" - } - } - } - return groupString; - }); - - // replace prompts - xml = xml.replace(/\[prompt\]\n?([^\]]*)\[\/?prompt\]/gmi, function(match, p1) { - var selectString = '\n' + p1 + '\n'; - return selectString; - }); - - // rid white space - xml = xml.replace(/\n\n\n/g, '\n'); - - // surround w/ combinedopenended tag - xml = '\n' + xml + '\n'; - - return xml; - } - ` - return toXml markdown diff --git a/common/lib/xmodule/xmodule/js/src/peergrading/peer_grading.coffee b/common/lib/xmodule/xmodule/js/src/peergrading/peer_grading.coffee deleted file mode 100644 index 7196a5d7a6..0000000000 --- a/common/lib/xmodule/xmodule/js/src/peergrading/peer_grading.coffee +++ /dev/null @@ -1,66 +0,0 @@ -# This is a simple class that just hides the error container -# and message container when they are empty -# Can (and should be) expanded upon when our problem list -# becomes more sophisticated -class @PeerGrading - - peer_grading_sel: '.peer-grading' - peer_grading_container_sel: '.peer-grading-container' - error_container_sel: '.error-container' - message_container_sel: '.message-container' - problem_button_sel: '.problem-button' - problem_list_sel: '.problem-list' - progress_bar_sel: '.progress-bar' - - constructor: (element) -> - @el = element - @peer_grading_container = @$(@peer_grading_sel) - @use_single_location = @peer_grading_container.data('use-single-location') - @peer_grading_outer_container = @$(@peer_grading_container_sel) - @ajax_url = @peer_grading_container.data('ajax-url') - - if @use_single_location.toLowerCase() == "true" - #If the peer grading element is linked to a single location, then activate the backend for that location - @activate_problem() - else - #Otherwise, activate the panel view. - @error_container = @$(@error_container_sel) - @error_container.toggle(not @error_container.is(':empty')) - - @message_container = @$(@message_container_sel) - @message_container.toggle(not @message_container.is(':empty')) - - @problem_button = @$(@problem_button_sel) - @problem_button.click @show_results - - @problem_list = @$(@problem_list_sel) - @construct_progress_bar() - - # locally scoped jquery. - $: (selector) -> - $(selector, @el) - - construct_progress_bar: () => - problems = @problem_list.find('tr').next() - problems.each( (index, element) => - problem = $(element) - progress_bar = problem.find(@progress_bar_sel) - bar_value = parseInt(problem.data('graded')) - bar_max = parseInt(problem.data('required')) + bar_value - progress_bar.progressbar({value: bar_value, max: bar_max}) - ) - - show_results: (event) => - location_to_fetch = $(event.target).data('location') - data = {'location' : location_to_fetch} - $.postWithPrefix "#{@ajax_url}problem", data, (response) => - if response.success - @peer_grading_outer_container.after(response.html).remove() - backend = new PeerGradingProblemBackend(@ajax_url, false) - new PeerGradingProblem(backend, @el) - else - @gentle_alert response.error - - activate_problem: () => - backend = new PeerGradingProblemBackend(@ajax_url, false) - new PeerGradingProblem(backend, @el) \ No newline at end of file diff --git a/common/lib/xmodule/xmodule/js/src/peergrading/peer_grading_problem.coffee b/common/lib/xmodule/xmodule/js/src/peergrading/peer_grading_problem.coffee deleted file mode 100644 index 4056dac047..0000000000 --- a/common/lib/xmodule/xmodule/js/src/peergrading/peer_grading_problem.coffee +++ /dev/null @@ -1,615 +0,0 @@ -################################## -# -# This is the JS that renders the peer grading problem page. -# Fetches the correct problem and/or calibration essay -# and sends back the grades -# -# Should not be run when we don't have a location to send back -# to the server -# -# PeerGradingProblemBackend - -# makes all the ajax requests and provides a mock interface -# for testing purposes -# -# PeerGradingProblem - -# handles the rendering and user interactions with the interface -# -################################## -class @PeerGradingProblemBackend - constructor: (ajax_url, mock_backend) -> - @mock_backend = mock_backend - @ajax_url = ajax_url - @mock_cnt = 0 - - post: (cmd, data, callback) -> - if @mock_backend - callback(@mock(cmd, data)) - else - # if this post request fails, the error callback will catch it - $.post(@ajax_url + cmd, data, callback) - .error => callback({success: false, error: "Error occurred while performing this operation"}) - - mock: (cmd, data) -> - if cmd == 'is_student_calibrated' - # change to test each version - response = - success: true - calibrated: @mock_cnt >= 2 - else if cmd == 'show_calibration_essay' - #response = - # success: false - # error: "There was an error" - @mock_cnt++ - response = - success: true - submission_id: 1 - submission_key: 'abcd' - student_response: ''' - Contrary to popular belief, Lorem Ipsum is not simply random text. It has roots in a piece of classical Latin literature from 45 BC, making it over 2000 years old. Richard McClintock, a Latin professor at Hampden-Sydney College in Virginia, looked up one of the more obscure Latin words, consectetur, from a Lorem Ipsum passage, and going through the cites of the word in classical literature, discovered the undoubtable source. Lorem Ipsum comes from sections 1.10.32 and 1.10.33 of "de Finibus Bonorum et Malorum" (The Extremes of Good and Evil) by Cicero, written in 45 BC. This book is a treatise on the theory of ethics, very popular during the Renaissance. The first line of Lorem Ipsum, "Lorem ipsum dolor sit amet..", comes from a line in section 1.10.32. - - The standard chunk of Lorem Ipsum used since the 1500s is reproduced below for those interested. Sections 1.10.32 and 1.10.33 from "de Finibus Bonorum et Malorum" by Cicero are also reproduced in their exact original form, accompanied by English versions from the 1914 translation by H. Rackham. - ''' - prompt: ''' -

S11E3: Metal Bands

-

Shown below are schematic band diagrams for two different metals. Both diagrams appear different, yet both of the elements are undisputably metallic in nature.

-

* Why is it that both sodium and magnesium behave as metals, even though the s-band of magnesium is filled?

-

This is a self-assessed open response question. Please use as much space as you need in the box below to answer the question.

- ''' - rubric: ''' - - - - - - - - - - - - - - - - - - -
Purpose - - - - - - - -
Organization - - - - - - - -
- ''' - max_score: 4 - else if cmd == 'get_next_submission' - response = - success: true - submission_id: 1 - submission_key: 'abcd' - student_response: '''Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed nec tristique ante. Proin at mauris sapien, quis varius leo. Morbi laoreet leo nisi. Morbi aliquam lacus ante. Cras iaculis velit sed diam mattis a fermentum urna luctus. Duis consectetur nunc vitae felis facilisis eget vulputate risus viverra. Cras consectetur ullamcorper lobortis. Nam eu gravida lorem. Nulla facilisi. Nullam quis felis enim. Mauris orci lectus, dictum id cursus in, vulputate in massa. - - Phasellus non varius sem. Nullam commodo lacinia odio sit amet egestas. Donec ullamcorper sapien sagittis arcu volutpat placerat. Phasellus ut pretium ante. Nam dictum pulvinar nibh dapibus tristique. Sed at tellus mi, fringilla convallis justo. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Phasellus tristique rutrum nulla sed eleifend. Praesent at nunc arcu. Mauris condimentum faucibus nibh, eget commodo quam viverra sed. Morbi in tincidunt dolor. Morbi sed augue et augue interdum fermentum. - - Curabitur tristique purus ac arcu consequat cursus. Cras diam felis, dignissim quis placerat at, aliquet ac metus. Mauris vulputate est eu nibh imperdiet varius. Cras aliquet rhoncus elit a laoreet. Mauris consectetur erat et erat scelerisque eu faucibus dolor consequat. Nam adipiscing sagittis nisl, eu mollis massa tempor ac. Nulla scelerisque tempus blandit. Phasellus ac ipsum eros, id posuere arcu. Nullam non sapien arcu. Vivamus sit amet lorem justo, ac tempus turpis. Suspendisse pharetra gravida imperdiet. Pellentesque lacinia mi eu elit luctus pellentesque. Sed accumsan libero a magna elementum varius. Nunc eget pellentesque metus. ''' - prompt: ''' -

S11E3: Metal Bands

-

Shown below are schematic band diagrams for two different metals. Both diagrams appear different, yet both of the elements are undisputably metallic in nature.

-

* Why is it that both sodium and magnesium behave as metals, even though the s-band of magnesium is filled?

-

This is a self-assessed open response question. Please use as much space as you need in the box below to answer the question.

- ''' - rubric: ''' - - - - - - - - - - - - - - - - - - -
Purpose - - - - - - - -
Organization - - - - - - - -
- ''' - max_score: 4 - else if cmd == 'save_calibration_essay' - response = - success: true - actual_score: 2 - else if cmd == 'save_grade' - response = - success: true - - return response - -class @PeerGradingProblem - - prompt_wrapper_sel: '.prompt-wrapper' - peer_grading_container_sel: '.peer-grading-container' - submission_container_sel: '.submission-container' - prompt_container_sel: '.prompt-container' - rubric_container_sel: '.rubric-container' - flag_student_container_sel: '.flag-student-container' - calibration_panel_sel: '.calibration-panel' - grading_panel_sel: '.grading-panel' - content_panel_sel: '.content-panel' - grading_message_sel: '.grading-message' - question_header_sel: '.question-header' - flag_submission_confirmation_sel: '.flag-submission-confirmation' - flag_submission_confirmation_button_sel: '.flag-submission-confirmation-button' - flag_submission_removal_button_sel: '.flag-submission-removal-button' - grading_wrapper_sel: '.grading-wrapper' - calibration_feedback_sel: '.calibration-feedback' - interstitial_page_sel: '.interstitial-page' - calibration_interstitial_page_sel: '.calibration-interstitial-page' - error_container_sel: '.error-container' - peer_grading_instructions_sel: '.peer-grading-instructions' - feedback_area_sel: '.feedback-area' - ice_legend_sel: '.ice-legend' - score_selection_container_sel: '.score-selection-container' - rubric_selection_container_sel: '.rubric-selection-container' - submit_button_sel: '.submit-button' - action_button_sel: '.action-button' - calibration_feedback_button_sel: '.calibration-feedback-button' - interstitial_page_button_sel: '.interstitial-page-button' - calibration_interstitial_page_button_sel: '.calibration-interstitial-page-button' - flag_checkbox_sel: '.flag-checkbox' - calibration_text_sel: '.calibration-text' - grading_text_sel: '.grading-text' - calibration_feedback_wrapper_sel: '.calibration-feedback-wrapper' - - constructor: (backend, el) -> - @el = el - @prompt_wrapper = $(@prompt_wrapper_sel) - @backend = backend - @is_ctrl = false - @el = $(@peer_grading_container_sel) - - # get the location of the problem - @location = $('.peer-grading').data('location') - # prevent this code from trying to run - # when we don't have a location - if(!@location) - return - - # get the other elements we want to fill in - @submission_container = @$(@submission_container_sel) - @prompt_container = @$(@prompt_container_sel) - @rubric_container = @$(@rubric_container_sel) - @flag_student_container = @$(@flag_student_container_sel) - @calibration_panel = @$(@calibration_panel_sel) - @grading_panel = @$(@grading_panel_sel) - @content_panel = @$(@content_panel_sel) - @grading_message = @$(@grading_message_sel) - @grading_message.hide() - @question_header = @$(@question_header_sel) - @question_header.click @collapse_question - @flag_submission_confirmation = @$(@flag_submission_confirmation_sel) - @flag_submission_confirmation_button = @$(@flag_submission_confirmation_button_sel) - @flag_submission_removal_button = @$(@flag_submission_removal_button_sel) - - @flag_submission_confirmation_button.click @close_dialog_box - @flag_submission_removal_button.click @remove_flag - - @grading_wrapper = @$(@grading_wrapper_sel) - @calibration_feedback_panel = @$(@calibration_feedback_sel) - @interstitial_page = @$(@interstitial_page_sel) - @interstitial_page.hide() - - @calibration_interstitial_page = @$(@calibration_interstitial_page_sel) - @calibration_interstitial_page.hide() - - @error_container = @$(@error_container_sel) - - @submission_key_input = $("input[name='submission-key']") - @essay_id_input = @$("input[name='essay-id']") - @peer_grading_instructions = @$(@peer_grading_instructions_sel) - @feedback_area = @$(@feedback_area_sel) - @ice_legend = @$(@ice_legend_sel) - - @score_selection_container = @$(@score_selection_container_sel) - @rubric_selection_container = @$(@rubric_selection_container_sel) - @grade = null - @calibration = null - - @submit_button = @$(@submit_button_sel) - @action_button = @$(@action_button_sel) - @calibration_feedback_button = @$(@calibration_feedback_button_sel) - @interstitial_page_button = @$(@interstitial_page_button_sel) - @calibration_interstitial_page_button = @$(@calibration_interstitial_page_button_sel) - @flag_student_checkbox = @$(@flag_checkbox_sel) - - $(window).keydown @keydown_handler - $(window).keyup @keyup_handler - - Collapsible.setCollapsibles(@content_panel) - - # Set up the click event handlers - @action_button.click -> history.back() - @calibration_feedback_button.click => - @calibration_feedback_panel.hide() - @grading_wrapper.show() - @gentle_alert "Calibration essay saved. Fetching the next essay." - @is_calibrated_check() - - @interstitial_page_button.click => - @interstitial_page.hide() - @is_calibrated_check() - - @calibration_interstitial_page_button.click => - @calibration_interstitial_page.hide() - @is_calibrated_check() - - @flag_student_checkbox.click => - @flag_box_checked() - - @calibration_feedback_button.hide() - @calibration_feedback_panel.hide() - @error_container.hide() - @flag_submission_confirmation.hide() - - if @tracking_changes() - @change_tracker = new TrackChanges(@el) - - @is_calibrated_check() - - # locally scoped jquery. - $: (selector) -> - $(selector, @el) - - - ########## - # - # Ajax calls to the backend - # - ########## - is_calibrated_check: () => - @backend.post('is_student_calibrated', {location: @location}, @calibration_check_callback) - - fetch_calibration_essay: () => - @backend.post('show_calibration_essay', {location: @location}, @render_calibration) - - fetch_submission_essay: () => - @backend.post('get_next_submission', {location: @location}, @render_submission) - - - construct_data: () -> - if @tracking_changes() - feedback_content = @feedback_area.html() - else - feedback_content = @feedback_area.val() - - data = - rubric_scores: @rub.get_score_list() - score: @rub.get_total_score() - location: @location - submission_id: @essay_id_input.val() - submission_key: @submission_key_input.val() - feedback: feedback_content - submission_flagged: @flag_student_checkbox.is(':checked') - # hardcoding answer_unknown to false - answer_unknown: false - return data - - - submit_calibration_essay: ()=> - data = @construct_data() - @submit_button.hide() - @backend.post('save_calibration_essay', data, @calibration_callback) - - submit_grade: () => - data = @construct_data() - @submit_button.hide() - @backend.post('save_grade', data, @submission_callback) - - - ########## - # - # Callbacks for various events - # - ########## - - remove_flag: () => - @flag_student_checkbox.removeAttr("checked") - @close_dialog_box() - @submit_button.attr('disabled', true) - - close_dialog_box: () => - $(@flag_submission_confirmation_sel).dialog('close') - - flag_box_checked: () => - if @flag_student_checkbox.is(':checked') - @$(@flag_submission_confirmation_sel).dialog({ height: 400, width: 400 }) - @submit_button.attr('disabled', false) - - # called after we perform an is_student_calibrated check - calibration_check_callback: (response) => - if response.success - # if we haven't been calibrating before - if response.calibrated and (@calibration == null or @calibration == false) - @calibration = false - @fetch_submission_essay() - # If we were calibrating before and no longer need to, - # show the interstitial page - else if response.calibrated and @calibration == true - @calibration = false - @render_interstitial_page() - else if not response.calibrated and @calibration==null - @calibration=true - @render_calibration_interstitial_page() - else - @calibration = true - @fetch_calibration_essay() - else if response.error - @render_error(response.error) - else - @render_error("Error contacting the grading service") - - - # called after we submit a calibration score - calibration_callback: (response) => - if response.success - @render_calibration_feedback(response) - else if response.error - @render_error(response.error) - else - @render_error("Error saving calibration score") - - # called after we submit a submission score - submission_callback: (response) => - if response.success - @is_calibrated_check() - @grading_message.fadeIn() - message = "

Successfully saved your feedback. Fetching the next essay." - if response.required_done - message = message + " You have done the required number of peer assessments but may continue grading if you like." - message = message + "

" - @grading_message.html(message) - else - if response.error - @render_error(response.error) - else - @render_error("Error occurred while submitting grade") - - # called after a grade is selected on the interface - graded_callback: (event) => - ev = @$(event.target).parent().parent() - ul = ev.parent().parent() - ul.find(".rubric-label-selected").removeClass('rubric-label-selected') - ev.addClass('rubric-label-selected') - # check to see whether or not any categories have not been scored - if @rub.check_complete() - # show button if we have scores for all categories - @grading_message.hide() - @show_submit_button() - @grade = @rub.get_total_score() - - keydown_handler: (event) => - #Previously, responses were submitted when hitting enter. Add in a modifier that ensures that ctrl+enter is needed. - if event.which == 17 && @is_ctrl==false - @is_ctrl=true - else if event.which == 13 && @submit_button.is(':visible') && @is_ctrl==true - if @calibration - @submit_calibration_essay() - else - @submit_grade() - - keyup_handler: (event) => - #Handle keyup event when ctrl key is released - if event.which == 17 && @is_ctrl==true - @is_ctrl=false - - - ########## - # - # Rendering methods and helpers - # - ########## - # renders a calibration essay - render_calibration: (response) => - if response.success - - # load in all the data - @submission_container.html("") - @render_submission_data(response) - # TODO: indicate that we're in calibration mode - @calibration_panel.addClass('current-state') - @grading_panel.removeClass('current-state') - - # Display the right text - # both versions of the text are written into the template itself - # we only need to show/hide the correct ones at the correct time - @calibration_panel.find(@calibration_text_sel).show() - @grading_panel.find(@calibration_text_sel).show() - @calibration_panel.find(@grading_text_sel).hide() - @grading_panel.find(@grading_text_sel).hide() - @flag_student_container.hide() - @peer_grading_instructions.hide() - @feedback_area.attr('disabled', true) - feedback_text = "Once you are done learning to grade, and are grading your peers' work, you will be asked to share written feedback with them in addition to scoring them." - if @tracking_changes() - @ice_legend.hide() - @feedback_area.attr('contenteditable', false) - @feedback_area.text(feedback_text) - else - @feedback_area.val(feedback_text) - @submit_button.show() - @submit_button.unbind('click') - @submit_button.click @submit_calibration_essay - @submit_button.attr('disabled', true) - @scroll_to_top() - else if response.error - @render_error(response.error) - else - @render_error("An error occurred while retrieving the next calibration essay") - - tracking_changes: () => - return @grading_wrapper.data('track-changes') == true - - # Renders a student submission to be graded - render_submission: (response) => - if response.success - @submit_button.hide() - @submission_container.html("") - @render_submission_data(response) - - @calibration_panel.removeClass('current-state') - @grading_panel.addClass('current-state') - - # Display the correct text - # both versions of the text are written into the template itself - # we only need to show/hide the correct ones at the correct time - @calibration_panel.find(@calibration_text_sel).hide() - @grading_panel.find(@calibration_text_sel).hide() - @calibration_panel.find(@grading_text_sel).show() - @grading_panel.find(@grading_text_sel).show() - @flag_student_container.show() - @peer_grading_instructions.show() - if @tracking_changes() - @ice_legend.show() - @feedback_area.html(@make_paragraphs(response.student_response)) - @change_tracker.rebindTracker() - else - @feedback_area.val("") - @feedback_area.attr('disabled', false) - @flag_student_checkbox.removeAttr("checked") - @submit_button.show() - @submit_button.unbind('click') - @submit_button.click @submit_grade - @submit_button.attr('disabled', true) - @scroll_to_top() - else if response.error - @render_error(response.error) - else - @render_error("An error occurred when retrieving the next submission.") - - make_paragraphs: (text) -> - paragraph_split = text.split(/\n\s*\n/) - new_text = '' - for paragraph in paragraph_split - new_text += "

#{paragraph}

" - return new_text - - # render common information between calibration and grading - render_submission_data: (response) => - @content_panel.show() - @error_container.hide() - - @submission_container.append(@make_paragraphs(response.student_response)) - @prompt_container.html(response.prompt) - @rubric_selection_container.html(response.rubric) - @submission_key_input.val(response.submission_key) - @essay_id_input.val(response.submission_id) - @setup_score_selection(response.max_score) - - @submit_button.hide() - @action_button.hide() - @calibration_feedback_panel.hide() - @rub = new Rubric(@el) - @rub.initialize(@location) - - - render_calibration_feedback: (response) => - # display correct grade - @calibration_feedback_panel.slideDown() - calibration_wrapper = @$(@calibration_feedback_wrapper_sel) - calibration_wrapper.html("

The score you gave was: #{@grade}. The instructor score is: #{response.actual_score}

") - - score = parseInt(@grade) - actual_score = parseInt(response.actual_score) - - if score == actual_score - calibration_wrapper.append("

Your score matches the instructor score!

") - else - calibration_wrapper.append("

You may want to review the rubric again.

") - - if response.actual_rubric != undefined - calibration_wrapper.append("
Instructor Scored Rubric: #{response.actual_rubric}
") - if response.actual_feedback.feedback!=undefined - calibration_wrapper.append("
Instructor Feedback: #{response.actual_feedback}
") - - # disable score selection and submission from the grading interface - @$("input[name='score-selection']").attr('disabled', true) - @submit_button.hide() - @calibration_feedback_button.show() - - render_interstitial_page: () => - @content_panel.hide() - @grading_message.hide() - @interstitial_page.show() - - render_calibration_interstitial_page: () => - @content_panel.hide() - @action_button.hide() - @calibration_interstitial_page.show() - - render_error: (error_message) => - @error_container.show() - @calibration_feedback_panel.hide() - @error_container.html(error_message) - @content_panel.hide() - @action_button.show() - - show_submit_button: () => - @submit_button.attr('disabled', false) - @submit_button.show() - - setup_score_selection: (max_score) => - # And now hook up an event handler again - @$("input[class='score-selection']").change @graded_callback - - gentle_alert: (msg) => - @grading_message.fadeIn() - @grading_message.html("

" + msg + "

") - - collapse_question: (event) => - @prompt_container.slideToggle() - @prompt_container.toggleClass('open') - if @question_header.text() == "Hide Question" - new_text = "Show Question" - Logger.log 'oe_hide_question', {location: @location} - else - Logger.log 'oe_show_question', {location: @location} - new_text = "Hide Question" - @question_header.text(new_text) - return false - - scroll_to_top: () => - $('html, body').animate({ - scrollTop: $(".peer-grading").offset().top - }, 200) diff --git a/common/lib/xmodule/xmodule/modulestore/tests/test_mongo.py b/common/lib/xmodule/xmodule/modulestore/tests/test_mongo.py index b635a67110..2965b2e5ca 100644 --- a/common/lib/xmodule/xmodule/modulestore/tests/test_mongo.py +++ b/common/lib/xmodule/xmodule/modulestore/tests/test_mongo.py @@ -210,13 +210,7 @@ class TestMongoModuleStore(TestMongoModuleStoreBase): '''Make sure the course objects loaded properly''' courses = self.draft_store.get_courses() - # note, the number of courses expected is really - # 6, but due to a lack of cache flushing between - # test case runs, we will get back 7. - # When we fix the caching issue, we should reduce this - # to 6 and remove the 'treexport_peer_component' course_id - # from the list below - assert_equals(len(courses), 7) + assert_equals(len(courses), 6) course_ids = [course.id for course in courses] for course_key in [ @@ -229,9 +223,6 @@ class TestMongoModuleStore(TestMongoModuleStoreBase): ['edX', 'test_unicode', '2012_Fall'], ['edX', 'toy', '2012_Fall'], ['guestx', 'foo', 'bar'], - # This course below is due to a caching issue in the modulestore - # which is not cleared between test runs. This means - ['edX', 'treeexport_peer_component', 'export_peer_component'], ] ]: assert_in(course_key, course_ids) @@ -263,13 +254,7 @@ class TestMongoModuleStore(TestMongoModuleStoreBase): assert_in(course_key, course_ids) courses = self.draft_store.get_courses(org='edX') - # note, the number of courses expected is really - # 5, but due to a lack of cache flushing between - # test case runs, we will get back 6. - # When we fix the caching issue, we should reduce this - # to 6 and remove the 'treexport_peer_component' course_id - # from the list below - assert_equals(len(courses), 6) + assert_equals(len(courses), 5) course_ids = [course.id for course in courses] for course_key in [ @@ -280,9 +265,6 @@ class TestMongoModuleStore(TestMongoModuleStoreBase): ['edX', 'test_import_course', '2012_Fall'], ['edX', 'test_unicode', '2012_Fall'], ['edX', 'toy', '2012_Fall'], - # This course below is due to a caching issue in the modulestore - # which is not cleared between test runs. This means - ['edX', 'treeexport_peer_component', 'export_peer_component'], ] ]: assert_in(course_key, course_ids) @@ -678,57 +660,6 @@ class TestMongoModuleStore(TestMongoModuleStoreBase): self.assertEqual(component.published_on, published_date) self.assertEqual(component.published_by, published_by) - def test_export_course_with_peer_component(self): - """ - Test export course when link_to_location is given in peer grading interface settings. - """ - - name = "export_peer_component" - - locations = self._create_test_tree(name) - - # Insert the test block directly into the module store - problem_location = Location('edX', 'tree{}'.format(name), name, 'combinedopenended', 'test_peer_problem') - - self.draft_store.create_child( - self.dummy_user, - locations["child"], - problem_location.block_type, - block_id=problem_location.block_id - ) - - interface_location = Location('edX', 'tree{}'.format(name), name, 'peergrading', 'test_peer_interface') - - self.draft_store.create_child( - self.dummy_user, - locations["child"], - interface_location.block_type, - block_id=interface_location.block_id - ) - - self.draft_store._update_single_item( - as_draft(interface_location), - { - 'definition.data': {}, - 'metadata': { - 'link_to_location': unicode(problem_location), - 'use_for_single_location': True, - }, - }, - ) - - component = self.draft_store.get_item(interface_location) - self.assertEqual(unicode(component.link_to_location), unicode(problem_location)) - - root_dir = path(mkdtemp()) - self.addCleanup(shutil.rmtree, root_dir) - - # export_course_to_xml should work. - export_course_to_xml( - self.draft_store, self.content_store, interface_location.course_key, - root_dir, 'test_export' - ) - def test_draft_modulestore_create_child_with_position(self): """ This test is designed to hit a specific set of use cases having to do with diff --git a/common/lib/xmodule/xmodule/modulestore/tests/test_mongo_call_count.py b/common/lib/xmodule/xmodule/modulestore/tests/test_mongo_call_count.py index 4e9b336bc8..b7384575ff 100644 --- a/common/lib/xmodule/xmodule/modulestore/tests/test_mongo_call_count.py +++ b/common/lib/xmodule/xmodule/modulestore/tests/test_mongo_call_count.py @@ -95,27 +95,27 @@ class CountMongoCallsCourseTraversal(TestCase): # These two lines show the way this traversal *should* be done # (if you'll eventually access all the fields and load all the definitions anyway). # 'lazy' does not matter in old Mongo. - (MIXED_OLD_MONGO_MODULESTORE_BUILDER, None, False, True, 189), - (MIXED_OLD_MONGO_MODULESTORE_BUILDER, None, True, True, 189), - (MIXED_OLD_MONGO_MODULESTORE_BUILDER, 0, False, True, 387), - (MIXED_OLD_MONGO_MODULESTORE_BUILDER, 0, True, True, 387), + (MIXED_OLD_MONGO_MODULESTORE_BUILDER, None, False, True, 175), + (MIXED_OLD_MONGO_MODULESTORE_BUILDER, None, True, True, 175), + (MIXED_OLD_MONGO_MODULESTORE_BUILDER, 0, False, True, 359), + (MIXED_OLD_MONGO_MODULESTORE_BUILDER, 0, True, True, 359), # As shown in these two lines: whether or not the XBlock fields are accessed, # the same number of mongo calls are made in old Mongo for depth=None. - (MIXED_OLD_MONGO_MODULESTORE_BUILDER, None, False, False, 189), - (MIXED_OLD_MONGO_MODULESTORE_BUILDER, None, True, False, 189), - (MIXED_OLD_MONGO_MODULESTORE_BUILDER, 0, False, False, 387), - (MIXED_OLD_MONGO_MODULESTORE_BUILDER, 0, True, False, 387), + (MIXED_OLD_MONGO_MODULESTORE_BUILDER, None, False, False, 175), + (MIXED_OLD_MONGO_MODULESTORE_BUILDER, None, True, False, 175), + (MIXED_OLD_MONGO_MODULESTORE_BUILDER, 0, False, False, 359), + (MIXED_OLD_MONGO_MODULESTORE_BUILDER, 0, True, False, 359), # The line below shows the way this traversal *should* be done # (if you'll eventually access all the fields and load all the definitions anyway). (MIXED_SPLIT_MODULESTORE_BUILDER, None, False, True, 4), - (MIXED_SPLIT_MODULESTORE_BUILDER, None, True, True, 41), - (MIXED_SPLIT_MODULESTORE_BUILDER, 0, False, True, 143), - (MIXED_SPLIT_MODULESTORE_BUILDER, 0, True, True, 41), + (MIXED_SPLIT_MODULESTORE_BUILDER, None, True, True, 38), + (MIXED_SPLIT_MODULESTORE_BUILDER, 0, False, True, 131), + (MIXED_SPLIT_MODULESTORE_BUILDER, 0, True, True, 38), (MIXED_SPLIT_MODULESTORE_BUILDER, None, False, False, 4), (MIXED_SPLIT_MODULESTORE_BUILDER, None, True, False, 4), # TODO: The call count below seems like a bug - should be 4? # Seems to be related to using self.lazy in CachingDescriptorSystem.get_module_data(). - (MIXED_SPLIT_MODULESTORE_BUILDER, 0, False, False, 143), + (MIXED_SPLIT_MODULESTORE_BUILDER, 0, False, False, 131), (MIXED_SPLIT_MODULESTORE_BUILDER, 0, True, False, 4) ) @ddt.unpack diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/__init__.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/__init__.py deleted file mode 100644 index 9aa77fde52..0000000000 --- a/common/lib/xmodule/xmodule/open_ended_grading_classes/__init__.py +++ /dev/null @@ -1 +0,0 @@ -__author__ = 'vik' diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py deleted file mode 100644 index b4f9b46d5d..0000000000 --- a/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py +++ /dev/null @@ -1,1246 +0,0 @@ -import json -import logging -import traceback - -from lxml import etree - -from xmodule.timeinfo import TimeInfo -from xmodule.capa_module import ComplexEncoder -from xmodule.progress import Progress -from xmodule.stringify import stringify_children -from xmodule.open_ended_grading_classes import self_assessment_module -from xmodule.open_ended_grading_classes import open_ended_module -from .combined_open_ended_rubric import CombinedOpenEndedRubric, GRADER_TYPE_IMAGE_DICT, HUMAN_GRADER_TYPE, LEGEND_LIST -from xmodule.open_ended_grading_classes.peer_grading_service import PeerGradingService, MockPeerGradingService -from xmodule.open_ended_grading_classes.openendedchild import OpenEndedChild -from xmodule.open_ended_grading_classes.grading_service_module import GradingServiceError - -log = logging.getLogger("edx.courseware") - -# Set the default number of max attempts. Should be 1 for production -# Set higher for debugging/testing -# attempts specified in xml definition overrides this. -MAX_ATTEMPTS = 1 - -# The highest score allowed for the overall xmodule and for each rubric point -MAX_SCORE_ALLOWED = 50 - -# If true, default behavior is to score module as a practice problem. Otherwise, no grade at all is shown in progress -# Metadata overrides this. -IS_SCORED = False - -# If true, then default behavior is to require a file upload or pasted link from a student for this problem. -# Metadata overrides this. -ACCEPT_FILE_UPLOAD = False - -# Contains all reasonable bool and case combinations of True -TRUE_DICT = ["True", True, "TRUE", "true"] - -# Make '_' a no-op so we can scrape strings. Using lambda instead of -# `django.utils.translation.ugettext_noop` because Django cannot be imported in this file -_ = lambda text: text - -HUMAN_TASK_TYPE = { - # Translators: "Self" is used to denote an openended response that is self-graded - 'selfassessment': _("Self"), - 'openended': "edX", - # Translators: "AI" is used to denote an openended response that is machine-graded - 'ml_grading.conf': _("AI"), - # Translators: "Peer" is used to denote an openended response that is peer-graded - 'peer_grading.conf': _("Peer"), -} - -HUMAN_STATES = { - # Translators: "Not started" is used to communicate to a student that their response - # has not yet been graded - 'intitial': _("Not started."), - # Translators: "Being scored." is used to communicate to a student that their response - # are in the process of being scored - 'assessing': _("Being scored."), - # Translators: "Scoring finished" is used to communicate to a student that their response - # have been scored, but the full scoring process is not yet complete - 'intermediate_done': _("Scoring finished."), - # Translators: "Complete" is used to communicate to a student that their - # openended response has been fully scored - 'done': _("Complete."), -} - -# Default value that controls whether or not to skip basic spelling checks in the controller -# Metadata overrides this -SKIP_BASIC_CHECKS = False - - -class CombinedOpenEndedV1Module(object): - """ - This is a module that encapsulates all open ended grading (self assessment, peer assessment, etc). - It transitions between problems, and support arbitrary ordering. - Each combined open ended module contains one or multiple "child" modules. - Child modules track their own state, and can transition between states. They also implement get_html and - handle_ajax. - The combined open ended module transitions between child modules as appropriate, tracks its own state, and passess - ajax requests from the browser to the child module or handles them itself (in the cases of reset and next problem) - ajax actions implemented by all children are: - 'save_answer' -- Saves the student answer - 'save_assessment' -- Saves the student assessment (or external grader assessment) - 'save_post_assessment' -- saves a post assessment (hint, feedback on feedback, etc) - ajax actions implemented by combined open ended module are: - 'reset' -- resets the whole combined open ended module and returns to the first child moduleresource_string - 'next_problem' -- moves to the next child module - - Types of children. Task is synonymous with child module, so each combined open ended module - incorporates multiple children (tasks): - openendedmodule - selfassessmentmodule - """ - STATE_VERSION = 1 - - # states - INITIAL = 'initial' - ASSESSING = 'assessing' - INTERMEDIATE_DONE = 'intermediate_done' - DONE = 'done' - - # Where the templates live for this problem - TEMPLATE_DIR = "combinedopenended" - - # hack: included to make this class act enough like an xblock to get i18n - _services_requested = {"i18n": "need"} - _combined_services = _services_requested - - def __init__(self, system, location, definition, descriptor, - instance_state=None, shared_state=None, metadata=None, static_data=None, **kwargs): - - """ - Definition file should have one or many task blocks, a rubric block, and a prompt block. See DEFAULT_DATA in combined_open_ended_module for a sample. - - """ - self.instance_state = instance_state - self.display_name = instance_state.get('display_name', "Open Ended") - - # We need to set the location here so the child modules can use it - system.set('location', location) - self.system = system - - # Tells the system which xml definition to load - self.current_task_number = instance_state.get('current_task_number', 0) - # This loads the states of the individual children - self.task_states = instance_state.get('task_states', []) - #This gets any old task states that have been persisted after the instructor changed the tasks. - self.old_task_states = instance_state.get('old_task_states', []) - # Overall state of the combined open ended module - self.state = instance_state.get('state', self.INITIAL) - - self.student_attempts = instance_state.get('student_attempts', 0) - self.weight = instance_state.get('weight', 1) - - # Allow reset is true if student has failed the criteria to move to the next child task - self.ready_to_reset = instance_state.get('ready_to_reset', False) - self.max_attempts = instance_state.get('max_attempts', MAX_ATTEMPTS) - self.is_scored = instance_state.get('graded', IS_SCORED) in TRUE_DICT - self.accept_file_upload = instance_state.get('accept_file_upload', ACCEPT_FILE_UPLOAD) in TRUE_DICT - self.skip_basic_checks = instance_state.get('skip_spelling_checks', SKIP_BASIC_CHECKS) in TRUE_DICT - - if system.open_ended_grading_interface: - self.peer_gs = PeerGradingService(system.open_ended_grading_interface, system.render_template) - else: - self.peer_gs = MockPeerGradingService() - - self.required_peer_grading = instance_state.get('required_peer_grading', 3) - self.peer_grader_count = instance_state.get('peer_grader_count', 3) - self.min_to_calibrate = instance_state.get('min_to_calibrate', 3) - self.max_to_calibrate = instance_state.get('max_to_calibrate', 6) - self.peer_grade_finished_submissions_when_none_pending = instance_state.get( - 'peer_grade_finished_submissions_when_none_pending', False - ) - - due_date = instance_state.get('due', None) - grace_period_string = instance_state.get('graceperiod', None) - try: - self.timeinfo = TimeInfo(due_date, grace_period_string) - except Exception: - log.error("Error parsing due date information in location {0}".format(location)) - raise - self.display_due_date = self.timeinfo.display_due_date - - self.rubric_renderer = CombinedOpenEndedRubric(system.render_template, True) - rubric_string = stringify_children(definition['rubric']) - self._max_score = self.rubric_renderer.check_if_rubric_is_parseable(rubric_string, location, MAX_SCORE_ALLOWED) - - # Static data is passed to the child modules to render - self.static_data = { - 'max_score': self._max_score, - 'max_attempts': self.max_attempts, - 'prompt': definition['prompt'], - 'rubric': definition['rubric'], - 'display_name': self.display_name, - 'accept_file_upload': self.accept_file_upload, - 'close_date': self.timeinfo.close_date, - 's3_interface': self.system.s3_interface, - 'skip_basic_checks': self.skip_basic_checks, - 'control': { - 'required_peer_grading': self.required_peer_grading, - 'peer_grader_count': self.peer_grader_count, - 'min_to_calibrate': self.min_to_calibrate, - 'max_to_calibrate': self.max_to_calibrate, - 'peer_grade_finished_submissions_when_none_pending': ( - self.peer_grade_finished_submissions_when_none_pending - ), - } - } - - self.task_xml = definition['task_xml'] - self.location = location - self.fix_invalid_state() - self.setup_next_task() - - def validate_task_states(self, tasks_xml, task_states): - """ - Check whether the provided task_states are valid for the supplied task_xml. - - Returns a list of messages indicating what is invalid about the state. - If the list is empty, then the state is valid - """ - msgs = [] - #Loop through each task state and make sure it matches the xml definition - for task_xml, task_state in zip(tasks_xml, task_states): - tag_name = self.get_tag_name(task_xml) - children = self.child_modules() - task_descriptor = children['descriptors'][tag_name](self.system) - task_parsed_xml = task_descriptor.definition_from_xml(etree.fromstring(task_xml), self.system) - try: - task = children['modules'][tag_name]( - self.system, - self.location, - task_parsed_xml, - task_descriptor, - self.static_data, - instance_state=task_state, - ) - #Loop through each attempt of the task and see if it is valid. - for attempt in task.child_history: - if "post_assessment" not in attempt: - continue - post_assessment = attempt['post_assessment'] - try: - post_assessment = json.loads(post_assessment) - except ValueError: - #This is okay, the value may or may not be json encoded. - pass - if tag_name == "openended" and isinstance(post_assessment, list): - msgs.append("Type is open ended and post assessment is a list.") - break - elif tag_name == "selfassessment" and not isinstance(post_assessment, list): - msgs.append("Type is self assessment and post assessment is not a list.") - break - #See if we can properly render the task. Will go into the exception clause below if not. - task.get_html(self.system) - except Exception: - #If one task doesn't match, the state is invalid. - msgs.append("Could not parse task with xml {xml!r} and states {state!r}: {err}".format( - xml=task_xml, - state=task_state, - err=traceback.format_exc() - )) - break - return msgs - - def is_initial_child_state(self, task_child): - """ - Returns true if this is a child task in an initial configuration - """ - task_child = json.loads(task_child) - return ( - task_child['child_state'] == self.INITIAL and - task_child['child_history'] == [] - ) - - def is_reset_task_states(self, task_state): - """ - Returns True if this task_state is from something that was just reset - """ - return all(self.is_initial_child_state(child) for child in task_state) - - def states_sort_key(self, idx_task_states): - """ - Return a key for sorting a list of indexed task_states, by how far the student got - through the tasks, what their highest score was, and then the index of the submission. - """ - idx, task_states = idx_task_states - - state_values = { - OpenEndedChild.INITIAL: 0, - OpenEndedChild.ASSESSING: 1, - OpenEndedChild.POST_ASSESSMENT: 2, - OpenEndedChild.DONE: 3 - } - - if not task_states: - return (0, 0, state_values[OpenEndedChild.INITIAL], idx) - - final_task_xml = self.task_xml[-1] - final_child_state_json = task_states[-1] - final_child_state = json.loads(final_child_state_json) - - tag_name = self.get_tag_name(final_task_xml) - children = self.child_modules() - task_descriptor = children['descriptors'][tag_name](self.system) - task_parsed_xml = task_descriptor.definition_from_xml(etree.fromstring(final_task_xml), self.system) - task = children['modules'][tag_name]( - self.system, - self.location, - task_parsed_xml, - task_descriptor, - self.static_data, - instance_state=final_child_state_json, - ) - scores = task.all_scores() - if scores: - best_score = max(scores) - else: - best_score = 0 - return ( - len(task_states), - best_score, - state_values.get(final_child_state.get('child_state', OpenEndedChild.INITIAL), 0), - idx - ) - - def fix_invalid_state(self): - """ - Sometimes a teacher will change the xml definition of a problem in Studio. - This means that the state passed to the module is invalid. - If that is the case, moved it to old_task_states and delete task_states. - """ - - # If we are on a task that is greater than the number of available tasks, - # it is an invalid state. If the current task number is greater than the number of tasks - # we have in the definition, our state is invalid. - if self.current_task_number > len(self.task_states) or self.current_task_number > len(self.task_xml): - self.current_task_number = max(min(len(self.task_states), len(self.task_xml)) - 1, 0) - #If the length of the task xml is less than the length of the task states, state is invalid - if len(self.task_xml) < len(self.task_states): - self.current_task_number = len(self.task_xml) - 1 - self.task_states = self.task_states[:len(self.task_xml)] - - if not self.old_task_states and not self.task_states: - # No validation needed when a student first looks at the problem - return - - # Pick out of self.task_states and self.old_task_states the state that is - # a) valid for the current task definition - # b) not the result of a reset due to not having a valid task state - # c) has the highest total score - # d) is the most recent (if the other two conditions are met) - - valid_states = [ - task_states - for task_states - in self.old_task_states + [self.task_states] - if ( - len(self.validate_task_states(self.task_xml, task_states)) == 0 and - not self.is_reset_task_states(task_states) - ) - ] - - # If there are no valid states, don't try and use an old state - if len(valid_states) == 0: - # If this isn't an initial task state, then reset to an initial state - if not self.is_reset_task_states(self.task_states): - self.reset_task_state('\n'.join(self.validate_task_states(self.task_xml, self.task_states))) - - return - - sorted_states = sorted(enumerate(valid_states), key=self.states_sort_key, reverse=True) - idx, best_task_states = sorted_states[0] - - if best_task_states == self.task_states: - return - - log.warning( - "Updating current task state for %s to %r for student with anonymous id %r", - self.system.location, - best_task_states, - self.system.anonymous_student_id - ) - - self.old_task_states.remove(best_task_states) - self.old_task_states.append(self.task_states) - self.task_states = best_task_states - - # The state is ASSESSING unless all of the children are done, or all - # of the children haven't been started yet - children = [json.loads(child) for child in best_task_states] - if all(child['child_state'] == self.DONE for child in children): - self.state = self.DONE - elif all(child['child_state'] == self.INITIAL for child in children): - self.state = self.INITIAL - else: - self.state = self.ASSESSING - - # The current task number is the index of the last completed child + 1, - # limited by the number of tasks - last_completed_child = next((i for i, child in reversed(list(enumerate(children))) if child['child_state'] == self.DONE), 0) - self.current_task_number = min(last_completed_child + 1, len(best_task_states) - 1) - - def create_task(self, task_state, task_xml): - """Create task object for given task state and task xml.""" - - tag_name = self.get_tag_name(task_xml) - children = self.child_modules() - task_descriptor = children['descriptors'][tag_name](self.system) - task_parsed_xml = task_descriptor.definition_from_xml(etree.fromstring(task_xml), self.system) - task = children['modules'][tag_name]( - self.system, - self.location, - task_parsed_xml, - task_descriptor, - self.static_data, - instance_state=task_state, - ) - return task - - def get_task_number(self, task_number): - """Return task object at task_index.""" - - task_states_count = len(self.task_states) - if task_states_count > 0 and task_number < task_states_count: - task_state = self.task_states[task_number] - task_xml = self.task_xml[task_number] - return self.create_task(task_state, task_xml) - return None - - def reset_task_state(self, message=""): - """ - Resets the task states. Moves current task state to an old_state variable, and then makes the task number 0. - :param message: A message to put in the log. - :return: None - """ - info_message = "Combined open ended user state for user {0} in location {1} was invalid. It has been reset, and you now have a new attempt. {2}".format(self.system.anonymous_student_id, self.location.to_deprecated_string(), message) - self.current_task_number = 0 - self.student_attempts = 0 - self.old_task_states.append(self.task_states) - self.task_states = [] - log.info(info_message) - - def get_tag_name(self, xml): - """ - Gets the tag name of a given xml block. - Input: XML string - Output: The name of the root tag - """ - tag = etree.fromstring(xml).tag - return tag - - def overwrite_state(self, current_task_state): - """ - Overwrites an instance state and sets the latest response to the current response. This is used - to ensure that the student response is carried over from the first child to the rest. - Input: Task state json string - Output: Task state json string - """ - last_response_data = self.get_last_response(self.current_task_number - 1) - last_response = last_response_data['response'] - - loaded_task_state = json.loads(current_task_state) - if loaded_task_state['child_state'] == self.INITIAL: - loaded_task_state['child_state'] = self.ASSESSING - loaded_task_state['child_created'] = True - loaded_task_state['child_history'].append({'answer': last_response}) - current_task_state = json.dumps(loaded_task_state) - return current_task_state - - def child_modules(self): - """ - Returns the constructors associated with the child modules in a dictionary. This makes writing functions - simpler (saves code duplication) - Input: None - Output: A dictionary of dictionaries containing the descriptor functions and module functions - """ - child_modules = { - 'openended': open_ended_module.OpenEndedModule, - 'selfassessment': self_assessment_module.SelfAssessmentModule, - } - child_descriptors = { - 'openended': open_ended_module.OpenEndedDescriptor, - 'selfassessment': self_assessment_module.SelfAssessmentDescriptor, - } - children = { - 'modules': child_modules, - 'descriptors': child_descriptors, - } - return children - - def setup_next_task(self, reset=False): - """ - Sets up the next task for the module. Creates an instance state if none exists, carries over the answer - from the last instance state to the next if needed. - Input: A boolean indicating whether or not the reset function is calling. - Output: Boolean True (not useful right now) - """ - current_task_state = None - if len(self.task_states) > self.current_task_number: - current_task_state = self.task_states[self.current_task_number] - - self.current_task_xml = self.task_xml[self.current_task_number] - - if self.current_task_number > 0: - self.ready_to_reset = self.check_allow_reset() - if self.ready_to_reset: - self.current_task_number = self.current_task_number - 1 - - current_task_type = self.get_tag_name(self.current_task_xml) - - children = self.child_modules() - child_task_module = children['modules'][current_task_type] - - self.current_task_descriptor = children['descriptors'][current_task_type](self.system) - - # This is the xml object created from the xml definition of the current task - etree_xml = etree.fromstring(self.current_task_xml) - - # This sends the etree_xml object through the descriptor module of the current task, and - # returns the xml parsed by the descriptor - self.current_task_parsed_xml = self.current_task_descriptor.definition_from_xml(etree_xml, self.system) - if current_task_state is None and self.current_task_number == 0: - self.current_task = child_task_module(self.system, self.location, - self.current_task_parsed_xml, self.current_task_descriptor, - self.static_data) - self.task_states.append(self.current_task.get_instance_state()) - self.state = self.ASSESSING - elif current_task_state is None and self.current_task_number > 0: - last_response_data = self.get_last_response(self.current_task_number - 1) - last_response = last_response_data['response'] - current_task_state = json.dumps({ - 'child_state': self.ASSESSING, - 'version': self.STATE_VERSION, - 'max_score': self._max_score, - 'child_attempts': 0, - 'child_created': True, - 'child_history': [{'answer': last_response}], - }) - self.current_task = child_task_module(self.system, self.location, - self.current_task_parsed_xml, self.current_task_descriptor, - self.static_data, - instance_state=current_task_state) - self.task_states.append(self.current_task.get_instance_state()) - self.state = self.ASSESSING - else: - if self.current_task_number > 0 and not reset: - current_task_state = self.overwrite_state(current_task_state) - self.current_task = child_task_module(self.system, self.location, - self.current_task_parsed_xml, self.current_task_descriptor, - self.static_data, - instance_state=current_task_state) - - return True - - def check_allow_reset(self): - """ - Checks to see if the student has passed the criteria to move to the next module. If not, sets - allow_reset to true and halts the student progress through the tasks. - Input: None - Output: the allow_reset attribute of the current module. - """ - if not self.ready_to_reset: - if self.current_task_number > 0: - last_response_data = self.get_last_response(self.current_task_number - 1) - current_response_data = self.get_current_attributes(self.current_task_number) - - if current_response_data['min_score_to_attempt'] > last_response_data['score'] or\ - current_response_data['max_score_to_attempt'] < last_response_data['score']: - self.state = self.DONE - self.ready_to_reset = True - - return self.ready_to_reset - - def get_context(self): - """ - Generates a context dictionary that is used to render html. - Input: None - Output: A dictionary that can be rendered into the combined open ended template. - """ - task_html = self.get_html_base() - # set context variables and render template - ugettext = self.system.service(self, "i18n").ugettext - - context = { - 'items': [{'content': task_html}], - 'ajax_url': self.system.ajax_url, - 'allow_reset': self.ready_to_reset, - 'state': self.state, - 'task_count': len(self.task_xml), - 'task_number': self.current_task_number + 1, - 'status': ugettext(self.get_status(False)), # pylint: disable=translation-of-non-string - 'display_name': self.display_name, - 'accept_file_upload': self.accept_file_upload, - 'location': self.location, - 'legend_list': LEGEND_LIST, - 'human_state': ugettext(HUMAN_STATES.get(self.state, HUMAN_STATES["intitial"])), # pylint: disable=translation-of-non-string - 'is_staff': self.system.user_is_staff, - } - - return context - - def get_html(self): - """ - Gets HTML for rendering. - Input: None - Output: rendered html - """ - context = self.get_context() - html = self.system.render_template( - '{0}/combined_open_ended.html'.format(self.TEMPLATE_DIR), context - ) - return html - - def get_html_nonsystem(self): - """ - Gets HTML for rendering via AJAX. Does not use system, because system contains some additional - html, which is not appropriate for returning via ajax calls. - Input: None - Output: HTML rendered directly via Mako - """ - context = self.get_context() - html = self.system.render_template( - '{0}/combined_open_ended.html'.format(self.TEMPLATE_DIR), context - ) - return html - - def get_html_base(self): - """ - Gets the HTML associated with the current child task - Input: None - Output: Child task HTML - """ - self.update_task_states() - return self.current_task.get_html(self.system) - - def get_html_ajax(self, data): - """ - Get HTML in AJAX callback - data - Needed to preserve AJAX structure - Output: Dictionary with html attribute - """ - return {'html': self.get_html()} - - def get_current_attributes(self, task_number): - """ - Gets the min and max score to attempt attributes of the specified task. - Input: The number of the task. - Output: The minimum and maximum scores needed to move on to the specified task. - """ - task_xml = self.task_xml[task_number] - etree_xml = etree.fromstring(task_xml) - min_score_to_attempt = int(etree_xml.attrib.get('min_score_to_attempt', 0)) - max_score_to_attempt = int(etree_xml.attrib.get('max_score_to_attempt', self._max_score)) - return {'min_score_to_attempt': min_score_to_attempt, 'max_score_to_attempt': max_score_to_attempt} - - def get_last_response(self, task_number): - """ - Returns data associated with the specified task number, such as the last response, score, etc. - Input: The number of the task. - Output: A dictionary that contains information about the specified task. - """ - last_response = "" - task_state = self.task_states[task_number] - task_xml = self.task_xml[task_number] - task_type = self.get_tag_name(task_xml) - - children = self.child_modules() - - task_descriptor = children['descriptors'][task_type](self.system) - etree_xml = etree.fromstring(task_xml) - - min_score_to_attempt = int(etree_xml.attrib.get('min_score_to_attempt', 0)) - max_score_to_attempt = int(etree_xml.attrib.get('max_score_to_attempt', self._max_score)) - - task_parsed_xml = task_descriptor.definition_from_xml(etree_xml, self.system) - task = children['modules'][task_type](self.system, self.location, task_parsed_xml, task_descriptor, - self.static_data, instance_state=task_state) - last_response = task.latest_answer() - last_score = task.latest_score() - all_scores = task.all_scores() - last_post_assessment = task.latest_post_assessment(self.system) - last_post_feedback = "" - feedback_dicts = [{}] - grader_ids = [0] - submission_ids = [0] - if task_type == "openended": - last_post_assessment = task.latest_post_assessment(self.system, short_feedback=False, join_feedback=False) - if isinstance(last_post_assessment, list): - eval_list = [] - for assess in last_post_assessment: - eval_list.append(task.format_feedback_with_evaluation(self.system, assess)) - last_post_evaluation = "".join(eval_list) - else: - last_post_evaluation = task.format_feedback_with_evaluation(self.system, last_post_assessment) - last_post_assessment = last_post_evaluation - try: - rubric_data = task._parse_score_msg(task.child_history[-1].get('post_assessment', "{}"), self.system) - except Exception: - log.debug("Could not parse rubric data from child history. " - "Likely we have not yet initialized a previous step, so this is perfectly fine.") - rubric_data = {} - rubric_scores = rubric_data.get('rubric_scores') - grader_types = rubric_data.get('grader_types') - feedback_items = rubric_data.get('feedback_items') - feedback_dicts = rubric_data.get('feedback_dicts') - grader_ids = rubric_data.get('grader_ids') - submission_ids = rubric_data.get('submission_ids') - elif task_type == "selfassessment": - rubric_scores = last_post_assessment - grader_types = ['SA'] - feedback_items = [''] - last_post_assessment = "" - last_correctness = task.is_last_response_correct() - max_score = task.max_score() - state = task.child_state - if task_type in HUMAN_TASK_TYPE: - human_task_name = HUMAN_TASK_TYPE[task_type] - else: - human_task_name = task_type - - if state in task.HUMAN_NAMES: - human_state = task.HUMAN_NAMES[state] - else: - human_state = state - if grader_types is not None and len(grader_types) > 0: - grader_type = grader_types[0] - else: - grader_type = "IN" - grader_types = ["IN"] - - if grader_type in HUMAN_GRADER_TYPE: - human_grader_name = HUMAN_GRADER_TYPE[grader_type] - else: - human_grader_name = grader_type - - last_response_dict = { - 'response': last_response, - 'score': last_score, - 'all_scores': all_scores, - 'post_assessment': last_post_assessment, - 'type': task_type, - 'max_score': max_score, - 'state': state, - 'human_state': human_state, - 'human_task': human_task_name, - 'correct': last_correctness, - 'min_score_to_attempt': min_score_to_attempt, - 'max_score_to_attempt': max_score_to_attempt, - 'rubric_scores': rubric_scores, - 'grader_types': grader_types, - 'feedback_items': feedback_items, - 'grader_type': grader_type, - 'human_grader_type': human_grader_name, - 'feedback_dicts': feedback_dicts, - 'grader_ids': grader_ids, - 'submission_ids': submission_ids, - 'success': True - } - return last_response_dict - - def extract_human_name_from_task(self, task_xml): - """ - Given the xml for a task, pull out the human name for it. - Input: xml string - Output: a human readable task name (ie Self Assessment) - """ - tree = etree.fromstring(task_xml) - payload = tree.xpath("/openended/openendedparam/grader_payload") - if len(payload) == 0: - task_name = "selfassessment" - else: - inner_payload = json.loads(payload[0].text) - task_name = inner_payload['grader_settings'] - - human_task = HUMAN_TASK_TYPE[task_name] - return human_task - - def update_task_states(self): - """ - Updates the task state of the combined open ended module with the task state of the current child module. - Input: None - Output: boolean indicating whether or not the task state changed. - """ - changed = False - if not self.ready_to_reset: - self.task_states[self.current_task_number] = self.current_task.get_instance_state() - current_task_state = json.loads(self.task_states[self.current_task_number]) - if current_task_state['child_state'] == self.DONE: - self.current_task_number += 1 - if self.current_task_number >= (len(self.task_xml)): - self.state = self.DONE - self.current_task_number = len(self.task_xml) - 1 - else: - self.state = self.INITIAL - changed = True - self.setup_next_task() - return changed - - def update_task_states_ajax(self, return_html): - """ - Runs the update task states function for ajax calls. Currently the same as update_task_states - Input: The html returned by the handle_ajax function of the child - Output: New html that should be rendered - """ - changed = self.update_task_states() - if changed: - pass - return return_html - - def check_if_student_has_done_needed_grading(self): - """ - Checks with the ORA server to see if the student has completed the needed peer grading to be shown their grade. - For example, if a student submits one response, and three peers grade their response, the student - cannot see their grades and feedback unless they reciprocate. - Output: - success - boolean indicator of success - allowed_to_submit - boolean indicator of whether student has done their needed grading or not - error_message - If not success, explains why - """ - student_id = self.system.anonymous_student_id - success = False - allowed_to_submit = True - try: - response = self.peer_gs.get_data_for_location(self.location, student_id) - count_graded = response['count_graded'] - count_required = response['count_required'] - student_sub_count = response['student_sub_count'] - count_available = response['count_available'] - success = True - except GradingServiceError: - # This is a dev_facing_error - log.error("Could not contact external open ended graders for location {0} and student {1}".format( - self.location, student_id)) - # This is a student_facing_error - error_message = "Could not contact the graders. Please notify course staff." - return success, allowed_to_submit, error_message - except KeyError: - log.error("Invalid response from grading server for location {0} and student {1}".format(self.location, student_id)) - error_message = "Received invalid response from the graders. Please notify course staff." - return success, allowed_to_submit, error_message - if count_graded >= count_required or count_available == 0: - error_message = "" - return success, allowed_to_submit, error_message - else: - allowed_to_submit = False - # This is a student_facing_error - error_string = ("

Feedback not available yet

" - "

You need to peer grade {0} more submissions in order to see your feedback.

" - "

You have graded responses from {1} students, and {2} students have graded your submissions.

" - "

You have made {3} submissions.

") - error_message = error_string.format(count_required - count_graded, count_graded, count_required, - student_sub_count) - return success, allowed_to_submit, error_message - - def get_rubric(self, _data): - """ - Gets the results of a given grader via ajax. - Input: AJAX data dictionary - Output: Dictionary to be rendered via ajax that contains the result html. - """ - ugettext = self.system.service(self, "i18n").ugettext - all_responses = [] - success, can_see_rubric, error = self.check_if_student_has_done_needed_grading() - if not can_see_rubric: - return { - 'html': self.system.render_template( - '{0}/combined_open_ended_hidden_results.html'.format(self.TEMPLATE_DIR), - {'error': error}), - 'success': True, - 'hide_reset': True - } - - contexts = [] - rubric_number = self.current_task_number - if self.ready_to_reset: - rubric_number += 1 - response = self.get_last_response(rubric_number) - score_length = len(response['grader_types']) - for z in xrange(score_length): - if response['grader_types'][z] in HUMAN_GRADER_TYPE: - try: - feedback = response['feedback_dicts'][z].get('feedback', '') - except TypeError: - return {'success': False} - rubric_scores = [[response['rubric_scores'][z]]] - grader_types = [[response['grader_types'][z]]] - feedback_items = [[response['feedback_items'][z]]] - rubric_html = self.rubric_renderer.render_combined_rubric( - stringify_children(self.static_data['rubric']), - rubric_scores, - grader_types, - feedback_items - ) - contexts.append({ - 'result': rubric_html, - # Translators: "Scored rubric" appears to a user as part of a longer - # string that looks something like: "Scored rubric from grader 1". - # "Scored" is an adjective that modifies the noun "rubric". - # That longer string appears when a user is viewing a graded rubric - # returned from one of the graders of their openended response problem. - 'task_name': ugettext('Scored rubric'), - 'feedback': feedback - }) - - context = { - 'results': contexts, - } - html = self.system.render_template('{0}/combined_open_ended_results.html'.format(self.TEMPLATE_DIR), context) - return {'html': html, 'success': True, 'hide_reset': False} - - def get_legend(self, _data): - """ - Gets the results of a given grader via ajax. - Input: AJAX data dictionary - Output: Dictionary to be rendered via ajax that contains the result html. - """ - context = { - 'legend_list': LEGEND_LIST, - } - html = self.system.render_template('{0}/combined_open_ended_legend.html'.format(self.TEMPLATE_DIR), context) - return {'html': html, 'success': True} - - def handle_ajax(self, dispatch, data): - """ - This is called by courseware.module_render, to handle an AJAX call. - "data" is request.POST. - - Returns a json dictionary: - { 'progress_changed' : True/False, - 'progress': 'none'/'in_progress'/'done', - } - """ - - handlers = { - 'next_problem': self.next_problem, - 'reset': self.reset, - 'get_combined_rubric': self.get_rubric, - 'get_legend': self.get_legend, - 'get_last_response': self.get_last_response_ajax, - 'get_current_state': self.get_current_state, - 'get_html': self.get_html_ajax, - } - - if dispatch not in handlers: - return_html = self.current_task.handle_ajax(dispatch, data, self.system) - return self.update_task_states_ajax(return_html) - - d = handlers[dispatch](data) - return json.dumps(d, cls=ComplexEncoder) - - def get_current_state(self, data): - """ - Gets the current state of the module. - """ - return self.get_context() - - def get_last_response_ajax(self, data): - """ - Get the last response via ajax callback - data - Needed to preserve ajax callback structure - Output: Last response dictionary - """ - return self.get_last_response(self.current_task_number) - - def next_problem(self, _data): - """ - Called via ajax to advance to the next problem. - Input: AJAX data request. - Output: Dictionary to be rendered - """ - self.update_task_states() - return {'success': True, 'html': self.get_html_nonsystem(), 'allow_reset': self.ready_to_reset} - - def reset(self, data): - """ - If resetting is allowed, reset the state of the combined open ended module. - Input: AJAX data dictionary - Output: AJAX dictionary to tbe rendered - """ - ugettext = self.system.service(self, "i18n").ugettext - if self.state != self.DONE: - if not self.ready_to_reset: - return self.out_of_sync_error(data) - success, can_reset, error = self.check_if_student_has_done_needed_grading() - if not can_reset: - return {'error': error, 'success': False} - if self.student_attempts >= self.max_attempts - 1: - if self.student_attempts == self.max_attempts - 1: - self.student_attempts += 1 - return { - 'success': False, - # This is a student_facing_error - 'error': ugettext( - 'You have attempted this question {number_of_student_attempts} times. ' - 'You are only allowed to attempt it {max_number_of_attempts} times.' - ).format( - number_of_student_attempts=self.student_attempts, - max_number_of_attempts=self.max_attempts - ) - } - self.student_attempts += 1 - self.state = self.INITIAL - self.ready_to_reset = False - for i in xrange(len(self.task_xml)): - self.current_task_number = i - self.setup_next_task(reset=True) - self.current_task.reset(self.system) - self.task_states[self.current_task_number] = self.current_task.get_instance_state() - self.current_task_number = 0 - self.ready_to_reset = False - - self.setup_next_task() - return {'success': True, 'html': self.get_html_nonsystem()} - - def get_instance_state(self): - """ - Returns the current instance state. The module can be recreated from the instance state. - Input: None - Output: A dictionary containing the instance state. - """ - - state = { - 'version': self.STATE_VERSION, - 'current_task_number': self.current_task_number, - 'state': self.state, - 'task_states': self.task_states, - 'student_attempts': self.student_attempts, - 'ready_to_reset': self.ready_to_reset, - } - - return json.dumps(state) - - def get_status(self, render_via_ajax): - """ - Gets the status panel to be displayed at the top right. - Input: None - Output: The status html to be rendered - """ - ugettext = self.system.service(self, "i18n").ugettext - status_list = [] - current_task_human_name = "" - for i in xrange(len(self.task_xml)): - human_task_name = self.extract_human_name_from_task(self.task_xml[i]) - human_task_name = ugettext(human_task_name) # pylint: disable=translation-of-non-string - # Extract the name of the current task for screen readers. - if self.current_task_number == i: - current_task_human_name = human_task_name - task_data = { - 'task_number': i + 1, - 'human_task': human_task_name, - 'current': self.current_task_number == i - } - status_list.append(task_data) - - context = { - 'status_list': status_list, - 'grader_type_image_dict': GRADER_TYPE_IMAGE_DICT, - 'legend_list': LEGEND_LIST, - 'render_via_ajax': render_via_ajax, - 'current_task_human_name': current_task_human_name, - } - status_html = self.system.render_template( - "{0}/combined_open_ended_status.html".format(self.TEMPLATE_DIR), context - ) - - return status_html - - def check_if_done_and_scored(self): - """ - Checks if the object is currently in a finished state (either student didn't meet criteria to move - to next step, in which case they are in the allow_reset state, or they are done with the question - entirely, in which case they will be in the self.DONE state), and if it is scored or not. - @return: Boolean corresponding to the above. - """ - return (self.state == self.DONE or self.ready_to_reset) and self.is_scored - - def get_weight(self): - """ - Return the weight of the problem. The old default weight was None, so set to 1 in that case. - Output - int weight - """ - weight = self.weight - if weight is None: - weight = 1 - return weight - - def get_score(self): - """ - Score the student received on the problem, or None if there is no - score. - - Returns: - dictionary - {'score': integer, from 0 to get_max_score(), - 'total': get_max_score()} - """ - max_score = None - score = None - - #The old default was None, so set to 1 if it is the old default weight - weight = self.get_weight() - if self.is_scored: - # Finds the maximum score of all student attempts and keeps it. - score_mat = [] - for i in xrange(len(self.task_states)): - # For each task, extract all student scores on that task (each attempt for each task) - last_response = self.get_last_response(i) - score = last_response.get('all_scores', None) - if score is not None: - # Convert none scores and weight scores properly - for j in xrange(len(score)): - if score[j] is None: - score[j] = 0 - score[j] *= float(weight) - score_mat.append(score) - - if len(score_mat) > 0: - # Currently, assume that the final step is the correct one, and that those are the final scores. - # This will change in the future, which is why the machinery above exists to extract all scores on all steps - scores = score_mat[-1] - score = max(scores) - else: - score = 0 - - if self._max_score is not None: - # Weight the max score if it is not None - max_score = self._max_score * float(weight) - else: - # Without a max_score, we cannot have a score! - score = None - - score_dict = { - 'score': score, - 'total': max_score, - } - - return score_dict - - def max_score(self): - """ - Maximum score possible in this module. Returns the max score if finished, None if not. - """ - max_score = None - if self.check_if_done_and_scored(): - max_score = self._max_score - return max_score - - def get_progress(self): - """ - Generate a progress object. Progress objects represent how far the - student has gone in this module. Must be implemented to get correct - progress tracking behavior in nested modules like sequence and - vertical. This behavior is consistent with capa. - - If the module is unscored, return None (consistent with capa). - """ - - d = self.get_score() - - if d['total'] > 0 and self.is_scored: - - try: - return Progress(d['score'], d['total']) - except (TypeError, ValueError): - log.exception("Got bad progress") - return None - - return None - - def out_of_sync_error(self, data, msg=''): - """ - return dict out-of-sync error message, and also log. - """ - ugettext = self.system.service(self, "i18n").ugettext - #This is a dev_facing_error - log.warning( - "Combined module state out sync. state: %r, data: %r. %s", - self.state, - data, - msg - ) - #This is a student_facing_error - return { - 'success': False, - 'error': ugettext('The problem state got out-of-sync. Please try reloading the page.') - } - - @classmethod - def service_declaration(cls, service_name): - """ - This classmethod is copied from XBlock's service_declaration. - It is included to make this class act enough like an XBlock - to get i18n working on it. - - This is currently only used for i18n, and will return "need" - in that case. - - Arguments: - service_name (string): the name of the service requested. - - Returns: - One of "need", "want", or None. - - """ - declaration = cls._combined_services.get(service_name) - return declaration - - -class CombinedOpenEndedV1Descriptor(object): - """ - Module for adding combined open ended questions - """ - mako_template = "widgets/html-edit.html" - module_class = CombinedOpenEndedV1Module - filename_extension = "xml" - - has_score = True - - def __init__(self, system): - self.system = system - - @classmethod - def definition_from_xml(cls, xml_object, system): - """ - Pull out the individual tasks, the rubric, and the prompt, and parse - - Returns: - { - 'rubric': 'some-html', - 'prompt': 'some-html', - 'task_xml': dictionary of xml strings, - } - """ - expected_children = ['task', 'rubric', 'prompt'] - for child in expected_children: - if len(xml_object.xpath(child)) == 0: - # This is a staff_facing_error - raise ValueError( - u"Combined Open Ended definition must include at least one '{0}' tag. Contact the learning sciences group for assistance. {1}".format( - child, xml_object)) - - def parse_task(k): - """Assumes that xml_object has child k""" - return [stringify_children(xml_object.xpath(k)[i]) for i in xrange(len(xml_object.xpath(k)))] - - def parse(k): - """Assumes that xml_object has child k""" - return xml_object.xpath(k)[0] - - return {'task_xml': parse_task('task'), 'prompt': parse('prompt'), 'rubric': parse('rubric')} - - def definition_to_xml(self, resource_fs): - '''Return an xml element representing this definition.''' - elt = etree.Element('combinedopenended') - - def add_child(k): - child_str = u'<{tag}>{body}'.format(tag=k, body=self.definition[k]) - child_node = etree.fromstring(child_str) - elt.append(child_node) - - for child in ['task']: - add_child(child) - - return elt diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_rubric.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_rubric.py deleted file mode 100644 index a6015f8923..0000000000 --- a/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_rubric.py +++ /dev/null @@ -1,366 +0,0 @@ -import logging - -from lxml import etree - -log = logging.getLogger(__name__) - -GRADER_TYPE_IMAGE_DICT = { - 'SA': '/static/images/self_assessment_icon.png', - 'PE': '/static/images/peer_grading_icon.png', - 'ML': '/static/images/ml_grading_icon.png', - 'IN': '/static/images/peer_grading_icon.png', - 'BC': '/static/images/ml_grading_icon.png', -} - -# Make '_' a no-op so we can scrape strings. Using lambda instead of -# `django.utils.translation.ugettext_noop` because Django cannot be imported in this file -_ = lambda text: text - -HUMAN_GRADER_TYPE = { - # Translators: "Self-Assessment" refers to the self-assessed mode of openended evaluation - 'SA': _('Self-Assessment'), - # Translators: "Peer-Assessment" refers to the peer-assessed mode of openended evaluation - 'PE': _('Peer-Assessment'), - # Translators: "Instructor-Assessment" refers to the instructor-assessed mode of openended evaluation - 'IN': _('Instructor-Assessment'), - # Translators: "AI-Assessment" refers to the machine-graded mode of openended evaluation - 'ML': _('AI-Assessment'), - # Translators: "AI-Assessment" refers to the machine-graded mode of openended evaluation - 'BC': _('AI-Assessment'), -} - -DO_NOT_DISPLAY = ['BC', 'IN'] - -LEGEND_LIST = [{'name': HUMAN_GRADER_TYPE[k], 'image': GRADER_TYPE_IMAGE_DICT[k]} for k in GRADER_TYPE_IMAGE_DICT.keys() - if k not in DO_NOT_DISPLAY] - - -class RubricParsingError(Exception): - def __init__(self, msg): - self.msg = msg - - -class CombinedOpenEndedRubric(object): - TEMPLATE_DIR = "combinedopenended/openended" - - def __init__(self, render_template, view_only=False): - self.has_score = False - self.view_only = view_only - self.render_template = render_template - - def render_rubric(self, rubric_xml, score_list=None): - ''' - render_rubric: takes in an xml string and outputs the corresponding - html for that xml, given the type of rubric we're generating - Input: - rubric_xml: an string that has not been parsed into xml that - represents this particular rubric - Output: - html: the html that corresponds to the xml given - ''' - success = False - try: - rubric_categories = self.extract_categories(rubric_xml) - if score_list and len(score_list) == len(rubric_categories): - for i in xrange(len(rubric_categories)): - category = rubric_categories[i] - for j in xrange(len(category['options'])): - if score_list[i] == j: - rubric_categories[i]['options'][j]['selected'] = True - rubric_scores = [cat['score'] for cat in rubric_categories] - max_scores = map((lambda cat: cat['options'][-1]['points']), rubric_categories) - max_score = max(max_scores) - rubric_template = '{0}/open_ended_rubric.html'.format(self.TEMPLATE_DIR) - if self.view_only: - rubric_template = '{0}/open_ended_view_only_rubric.html'.format(self.TEMPLATE_DIR) - html = self.render_template( - rubric_template, - { - 'categories': rubric_categories, - 'has_score': self.has_score, - 'view_only': self.view_only, - 'max_score': max_score, - 'combined_rubric': False, - } - ) - success = True - except: - #This is a staff_facing_error - error_message = "[render_rubric] Could not parse the rubric with xml: {0}. Contact the learning sciences group for assistance.".format( - rubric_xml) - log.exception(error_message) - raise RubricParsingError(error_message) - return {'success': success, 'html': html, 'rubric_scores': rubric_scores} - - def check_if_rubric_is_parseable(self, rubric_string, location, max_score_allowed): - rubric_dict = self.render_rubric(rubric_string) - success = rubric_dict['success'] - rubric_feedback = rubric_dict['html'] - if not success: - #This is a staff_facing_error - error_message = "Could not parse rubric : {0} for location {1}. Contact the learning sciences group for assistance.".format( - rubric_string, location.to_deprecated_string()) - log.error(error_message) - raise RubricParsingError(error_message) - - rubric_categories = self.extract_categories(rubric_string) - total = 0 - for category in rubric_categories: - total = total + len(category['options']) - 1 - if len(category['options']) > (max_score_allowed + 1): - #This is a staff_facing_error - error_message = "Number of score points in rubric {0} higher than the max allowed, which is {1}. Contact the learning sciences group for assistance.".format( - len(category['options']), max_score_allowed) - log.error(error_message) - raise RubricParsingError(error_message) - - return int(total) - - def extract_categories(self, element): - ''' - Contstruct a list of categories such that the structure looks like: - [ { category: "Category 1 Name", - options: [{text: "Option 1 Name", points: 0}, {text:"Option 2 Name", points: 5}] - }, - { category: "Category 2 Name", - options: [{text: "Option 1 Name", points: 0}, - {text: "Option 2 Name", points: 1}, - {text: "Option 3 Name", points: 2]}] - - ''' - if isinstance(element, basestring): - element = etree.fromstring(element) - categories = [] - for category in element: - if category.tag != 'category': - #This is a staff_facing_error - raise RubricParsingError( - "[extract_categories] Expected a tag: got {0} instead. Contact the learning sciences group for assistance.".format( - category.tag)) - else: - categories.append(self.extract_category(category)) - return categories - - def extract_category(self, category): - ''' - construct an individual category - {category: "Category 1 Name", - options: [{text: "Option 1 text", points: 1}, - {text: "Option 2 text", points: 2}]} - - all sorting and auto-point generation occurs in this function - ''' - descriptionxml = category[0] - optionsxml = category[1:] - scorexml = category[1] - score = None - if scorexml.tag == 'score': - score_text = scorexml.text - optionsxml = category[2:] - score = int(score_text) - self.has_score = True - # if we are missing the score tag and we are expecting one - elif self.has_score: - #This is a staff_facing_error - raise RubricParsingError( - "[extract_category] Category {0} is missing a score. Contact the learning sciences group for assistance.".format( - descriptionxml.text)) - - # parse description - if descriptionxml.tag != 'description': - #This is a staff_facing_error - raise RubricParsingError( - "[extract_category]: expected description tag, got {0} instead. Contact the learning sciences group for assistance.".format( - descriptionxml.tag)) - - description = descriptionxml.text - - cur_points = 0 - options = [] - autonumbering = True - # parse options - for option in optionsxml: - if option.tag != 'option': - #This is a staff_facing_error - raise RubricParsingError( - "[extract_category]: expected option tag, got {0} instead. Contact the learning sciences group for assistance.".format( - option.tag)) - else: - pointstr = option.get("points") - if pointstr: - autonumbering = False - # try to parse this into an int - try: - points = int(pointstr) - except ValueError: - #This is a staff_facing_error - raise RubricParsingError( - "[extract_category]: expected points to have int, got {0} instead. Contact the learning sciences group for assistance.".format( - pointstr)) - elif autonumbering: - # use the generated one if we're in the right mode - points = cur_points - cur_points = cur_points + 1 - else: - raise Exception( - "[extract_category]: missing points attribute. Cannot continue to auto-create points values after a points value is explicitly defined.") - - selected = score == points - optiontext = option.text - options.append({'text': option.text, 'points': points, 'selected': selected}) - - # sort and check for duplicates - options = sorted(options, key=lambda option: option['points']) - CombinedOpenEndedRubric.validate_options(options) - - return {'description': description, 'options': options, 'score': score} - - def render_combined_rubric(self, rubric_xml, scores, score_types, feedback_types): - success, score_tuples = CombinedOpenEndedRubric.reformat_scores_for_rendering(scores, score_types, - feedback_types) - #Get all the categories in the rubric - rubric_categories = self.extract_categories(rubric_xml) - #Get a list of max scores, each entry belonging to a rubric category - max_scores = map((lambda cat: cat['options'][-1]['points']), rubric_categories) - actual_scores = [] - #Get the highest possible score across all categories - max_score = max(max_scores) - #Loop through each category - for i, category in enumerate(rubric_categories): - #Loop through each option in the category - for j in xrange(len(category['options'])): - #Intialize empty grader types list - rubric_categories[i]['options'][j]['grader_types'] = [] - #Score tuples are a flat data structure with (category, option, grader_type_list) for selected graders - for tup in score_tuples: - if tup[1] == i and tup[2] == j: - for grader_type in tup[3]: - #Set the rubric grader type to the tuple grader types - rubric_categories[i]['options'][j]['grader_types'].append(grader_type) - #Grab the score and add it to the actual scores. J will be the score for the selected - #grader type - if len(actual_scores) <= i: - #Initialize a new list in the list of lists - actual_scores.append([j]) - else: - #If a list in the list of lists for this position exists, append to it - actual_scores[i] += [j] - - actual_scores = [sum(i) / len(i) for i in actual_scores] - correct = [] - #Define if the student is "correct" (1) "incorrect" (0) or "partially correct" (.5) - for (i, a) in enumerate(actual_scores): - if int(a) == max_scores[i]: - correct.append(1) - elif int(a) == 0: - correct.append(0) - else: - correct.append(.5) - - html = self.render_template( - '{0}/open_ended_combined_rubric.html'.format(self.TEMPLATE_DIR), - { - 'categories': rubric_categories, - 'max_scores': max_scores, - 'correct': correct, - 'has_score': True, - 'view_only': True, - 'max_score': max_score, - 'combined_rubric': True, - 'grader_type_image_dict': GRADER_TYPE_IMAGE_DICT, - 'human_grader_types': HUMAN_GRADER_TYPE, - } - ) - return html - - @staticmethod - def validate_options(options): - ''' - Validates a set of options. This can and should be extended to filter out other bad edge cases - ''' - if len(options) == 0: - #This is a staff_facing_error - raise RubricParsingError( - "[extract_category]: no options associated with this category. Contact the learning sciences group for assistance.") - if len(options) == 1: - return - prev = options[0]['points'] - for option in options[1:]: - if prev == option['points']: - #This is a staff_facing_error - raise RubricParsingError( - "[extract_category]: found duplicate point values between two different options. Contact the learning sciences group for assistance.") - else: - prev = option['points'] - - @staticmethod - def reformat_scores_for_rendering(scores, score_types, feedback_types): - """ - Takes in a list of rubric scores, the types of those scores, and feedback associated with them - Outputs a reformatted list of score tuples (count, rubric category, rubric score, [graders that gave this score], [feedback types]) - @param scores: - @param score_types: - @param feedback_types: - @return: - """ - success = False - if len(scores) == 0: - #This is a dev_facing_error - log.error("Score length is 0 when trying to reformat rubric scores for rendering.") - return success, "" - - if len(scores) != len(score_types) or len(feedback_types) != len(scores): - #This is a dev_facing_error - log.error("Length mismatches when trying to reformat rubric scores for rendering. " - "Scores: {0}, Score Types: {1} Feedback Types: {2}".format(scores, score_types, feedback_types)) - return success, "" - - score_lists = [] - score_type_list = [] - feedback_type_list = [] - for i in xrange(len(scores)): - score_cont_list = scores[i] - for j in xrange(len(score_cont_list)): - score_list = score_cont_list[j] - score_lists.append(score_list) - score_type_list.append(score_types[i][j]) - feedback_type_list.append(feedback_types[i][j]) - - score_list_len = len(score_lists[0]) - for score_list in score_lists: - if len(score_list) != score_list_len: - return success, "" - - score_tuples = [] - for i in xrange(len(score_lists)): - for j in xrange(len(score_lists[i])): - tuple = [1, j, score_lists[i][j], [], []] - score_tuples, tup_ind = CombinedOpenEndedRubric.check_for_tuple_matches(score_tuples, tuple) - score_tuples[tup_ind][0] += 1 - score_tuples[tup_ind][3].append(score_type_list[i]) - score_tuples[tup_ind][4].append(feedback_type_list[i]) - - success = True - return success, score_tuples - - @staticmethod - def check_for_tuple_matches(tuples, tuple): - """ - Checks to see if a tuple in a list of tuples is a match for tuple. - If not match, creates a new tuple matching tuple. - @param tuples: list of tuples - @param tuple: tuples to match - @return: a new list of tuples, and the index of the tuple that matches tuple - """ - category = tuple[1] - score = tuple[2] - tup_ind = -1 - for ind in xrange(len(tuples)): - if tuples[ind][1] == category and tuples[ind][2] == score: - tup_ind = ind - break - - if tup_ind == -1: - tuples.append([0, category, score, [], []]) - tup_ind = len(tuples) - 1 - return tuples, tup_ind diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/controller_query_service.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/controller_query_service.py deleted file mode 100644 index 9f84b8cbe9..0000000000 --- a/common/lib/xmodule/xmodule/open_ended_grading_classes/controller_query_service.py +++ /dev/null @@ -1,182 +0,0 @@ -import dogstats_wrapper as dog_stats_api - -import logging -from .grading_service_module import GradingService - -log = logging.getLogger(__name__) - - -class ControllerQueryService(GradingService): - """ - Interface to controller query backend. - """ - - METRIC_NAME = 'edxapp.open_ended_grading.controller_query_service' - - def __init__(self, config, render_template): - config['render_template'] = render_template - super(ControllerQueryService, self).__init__(config) - self.url = config['url'] + config['grading_controller'] - self.login_url = self.url + '/login/' - self.check_eta_url = self.url + '/get_submission_eta/' - self.combined_notifications_url = self.url + '/combined_notifications/' - self.grading_status_list_url = self.url + '/get_grading_status_list/' - self.flagged_problem_list_url = self.url + '/get_flagged_problem_list/' - self.take_action_on_flags_url = self.url + '/take_action_on_flags/' - - def check_for_eta(self, location): - params = { - 'location': location, - } - data = self.get(self.check_eta_url, params) - self._record_result('check_for_eta', data) - dog_stats_api.histogram(self._metric_name('check_for_eta.eta'), data.get('eta', 0)) - - return data - - def check_combined_notifications(self, course_id, student_id, user_is_staff, last_time_viewed): - params = { - 'student_id': student_id, - 'course_id': course_id.to_deprecated_string(), - 'user_is_staff': user_is_staff, - 'last_time_viewed': last_time_viewed, - } - log.debug(self.combined_notifications_url) - data = self.get(self.combined_notifications_url, params) - - tags = [u'course_id:{}'.format(course_id.to_deprecated_string()), u'user_is_staff:{}'.format(user_is_staff)] - tags.extend( - u'{}:{}'.format(key, value) - for key, value in data.items() - if key not in ('success', 'version', 'error') - ) - self._record_result('check_combined_notifications', data, tags) - return data - - def get_grading_status_list(self, course_id, student_id): - params = { - 'student_id': student_id, - 'course_id': course_id.to_deprecated_string(), - } - - data = self.get(self.grading_status_list_url, params) - - tags = [u'course_id:{}'.format(course_id.to_deprecated_string())] - self._record_result('get_grading_status_list', data, tags) - dog_stats_api.histogram( - self._metric_name('get_grading_status_list.length'), - len(data.get('problem_list', [])), - tags=tags - ) - return data - - def get_flagged_problem_list(self, course_id): - params = { - 'course_id': course_id.to_deprecated_string(), - } - - data = self.get(self.flagged_problem_list_url, params) - - tags = [u'course_id:{}'.format(course_id.to_deprecated_string())] - self._record_result('get_flagged_problem_list', data, tags) - dog_stats_api.histogram( - self._metric_name('get_flagged_problem_list.length'), - len(data.get('flagged_submissions', [])) - ) - return data - - def take_action_on_flags(self, course_id, student_id, submission_id, action_type): - params = { - 'course_id': course_id.to_deprecated_string(), - 'student_id': student_id, - 'submission_id': submission_id, - 'action_type': action_type - } - - data = self.post(self.take_action_on_flags_url, params) - - tags = [u'course_id:{}'.format(course_id.to_deprecated_string()), u'action_type:{}'.format(action_type)] - self._record_result('take_action_on_flags', data, tags) - return data - - -class MockControllerQueryService(object): - """ - Mock controller query service for testing - """ - - def __init__(self, config, render_template): - pass - - def check_for_eta(self, *args, **kwargs): - """ - Mock later if needed. Stub function for now. - @param params: - @return: - """ - pass - - def check_combined_notifications(self, *args, **kwargs): - combined_notifications = { - "flagged_submissions_exist": False, - "version": 1, - "new_student_grading_to_view": False, - "success": True, - "staff_needs_to_grade": False, - "student_needs_to_peer_grade": True, - "overall_need_to_check": True - } - return combined_notifications - - def get_grading_status_list(self, *args, **kwargs): - grading_status_list = { - "version": 1, - "problem_list": [ - { - "problem_name": "Science Question -- Machine Assessed", - "grader_type": "NA", - "eta_available": True, - "state": "Waiting to be Graded", - "eta": 259200, - "location": "i4x://MITx/oe101x/combinedopenended/Science_SA_ML" - }, { - "problem_name": "Humanities Question -- Peer Assessed", - "grader_type": "NA", - "eta_available": True, - "state": "Waiting to be Graded", - "eta": 259200, - "location": "i4x://MITx/oe101x/combinedopenended/Humanities_SA_Peer" - } - ], - "success": True - } - return grading_status_list - - def get_flagged_problem_list(self, *args, **kwargs): - flagged_problem_list = { - "version": 1, - "success": False, - "error": "No flagged submissions exist for course: MITx/oe101x/2012_Fall" - } - return flagged_problem_list - - def take_action_on_flags(self, *args, **kwargs): - """ - Mock later if needed. Stub function for now. - @param params: - @return: - """ - pass - - -def convert_seconds_to_human_readable(seconds): - if seconds < 60: - human_string = "{0} seconds".format(seconds) - elif seconds < 60 * 60: - human_string = "{0} minutes".format(round(seconds / 60, 1)) - elif seconds < (24 * 60 * 60): - human_string = "{0} hours".format(round(seconds / (60 * 60), 1)) - else: - human_string = "{0} days".format(round(seconds / (60 * 60 * 24), 1)) - - return human_string diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/grading_service_module.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/grading_service_module.py deleted file mode 100644 index 633392dff8..0000000000 --- a/common/lib/xmodule/xmodule/open_ended_grading_classes/grading_service_module.py +++ /dev/null @@ -1,162 +0,0 @@ -# This class gives a common interface for logging into the grading controller - -import logging - -import requests -import dogstats_wrapper as dog_stats_api -from lxml import etree -from requests.exceptions import RequestException, ConnectionError, HTTPError - -from .combined_open_ended_rubric import CombinedOpenEndedRubric, RubricParsingError - -log = logging.getLogger(__name__) - - -class GradingServiceError(Exception): - """ - Exception for grading service. Shown when Open Response Assessment servers cannot be reached. - """ - pass - - -class GradingService(object): - """ - Interface to staff grading backend. - """ - - def __init__(self, config): - self.username = config['username'] - self.password = config['password'] - self.session = requests.Session() - self.render_template = config['render_template'] - - def _login(self): - """ - Log into the staff grading service. - - Raises requests.exceptions.HTTPError if something goes wrong. - - Returns the decoded json dict of the response. - """ - response = self.session.post(self.login_url, - {'username': self.username, - 'password': self.password, }) - - response.raise_for_status() - - return response.json() - - def _metric_name(self, suffix): - """ - Return a metric name for datadog, using `self.METRIC_NAME` as - a prefix, and `suffix` as the suffix. - - Arguments: - suffix (str): The metric suffix to use. - """ - return '{}.{}'.format(self.METRIC_NAME, suffix) - - def _record_result(self, action, data, tags=None): - """ - Log results from an API call to an ORA service to datadog. - - Arguments: - action (str): The ORA action being recorded. - data (dict): The data returned from the ORA service. Should contain the key 'success'. - tags (list): A list of tags to attach to the logged metric. - """ - if tags is None: - tags = [] - - tags.append(u'result:{}'.format(data.get('success', False))) - tags.append(u'action:{}'.format(action)) - dog_stats_api.increment(self._metric_name('request.count'), tags=tags) - - def post(self, url, data, allow_redirects=False): - """ - Make a post request to the grading controller. Returns the parsed json results of that request. - """ - try: - op = lambda: self.session.post(url, data=data, - allow_redirects=allow_redirects) - response_json = self._try_with_login(op) - except (RequestException, ConnectionError, HTTPError, ValueError) as err: - # reraise as promised GradingServiceError, but preserve stacktrace. - #This is a dev_facing_error - error_string = "Problem posting data to the grading controller. URL: {0}, data: {1}".format(url, data) - log.error(error_string) - raise GradingServiceError(error_string) - - return response_json - - def get(self, url, params, allow_redirects=False): - """ - Make a get request to the grading controller. Returns the parsed json results of that request. - """ - op = lambda: self.session.get(url, - allow_redirects=allow_redirects, - params=params) - try: - response_json = self._try_with_login(op) - except (RequestException, ConnectionError, HTTPError, ValueError) as err: - # reraise as promised GradingServiceError, but preserve stacktrace. - #This is a dev_facing_error - error_string = "Problem getting data from the grading controller. URL: {0}, params: {1}".format(url, params) - log.error(error_string) - raise GradingServiceError(error_string) - - return response_json - - def _try_with_login(self, operation): - """ - Call operation(), which should return a requests response object. If - the request fails with a 'login_required' error, call _login() and try - the operation again. - - Returns the result of operation(). Does not catch exceptions. - """ - response = operation() - resp_json = response.json() - if (resp_json - and resp_json.get('success') is False - and resp_json.get('error') == 'login_required'): - # apparently we aren't logged in. Try to fix that. - r = self._login() - if r and not r.get('success'): - log.warning("Couldn't log into ORA backend. Response: %s", - r) - # try again - response = operation() - response.raise_for_status() - resp_json = response.json() - - return resp_json - - def _render_rubric(self, response, view_only=False): - """ - Given an HTTP Response json with the key 'rubric', render out the html - required to display the rubric and put it back into the response - - returns the updated response as a dictionary that can be serialized later - - """ - try: - if 'rubric' in response: - rubric = response['rubric'] - rubric_renderer = CombinedOpenEndedRubric(self.render_template, view_only) - rubric_dict = rubric_renderer.render_rubric(rubric) - success = rubric_dict['success'] - rubric_html = rubric_dict['html'] - response['rubric'] = rubric_html - return response - # if we can't parse the rubric into HTML, - except (etree.XMLSyntaxError, RubricParsingError): - #This is a dev_facing_error - log.exception("Cannot parse rubric string. Raw string: {0}".format(response['rubric'])) - return {'success': False, - 'error': 'Error displaying submission'} - except ValueError: - #This is a dev_facing_error - log.exception("Error parsing response: {0}".format(response)) - return {'success': False, - 'error': "Error displaying submission"} diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/open_ended_module.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/open_ended_module.py deleted file mode 100644 index d6e6ffa57f..0000000000 --- a/common/lib/xmodule/xmodule/open_ended_grading_classes/open_ended_module.py +++ /dev/null @@ -1,899 +0,0 @@ -""" -A Self Assessment module that allows students to write open-ended responses, -submit, then see a rubric and rate themselves. Persists student supplied -hints, answers, and assessment judgment (currently only correct/incorrect). -Parses xml definition file--see below for exact format. -""" - -import json -import logging -from lxml import etree -import capa.xqueue_interface as xqueue_interface - -from xmodule.capa_module import ComplexEncoder -from xmodule.progress import Progress -from xmodule.stringify import stringify_children -from capa.util import * -import openendedchild - -from numpy import median - -from datetime import datetime -from pytz import UTC - -from .combined_open_ended_rubric import CombinedOpenEndedRubric - -log = logging.getLogger("edx.courseware") - - -class OpenEndedModule(openendedchild.OpenEndedChild): - """ - The open ended module supports all external open ended grader problems. - Sample XML file: - - - Enter essay here. - This is the answer. - {"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"} - - - """ - - TEMPLATE_DIR = "combinedopenended/openended" - - def setup_response(self, system, location, definition, descriptor): - """ - Sets up the response type. - @param system: Modulesystem object - @param location: The location of the problem - @param definition: The xml definition of the problem - @param descriptor: The OpenEndedDescriptor associated with this - @return: None - """ - oeparam = definition['oeparam'] - - self.url = definition.get('url', None) - self.queue_name = definition.get('queuename', self.DEFAULT_QUEUE) - self.message_queue_name = definition.get('message-queuename', self.DEFAULT_MESSAGE_QUEUE) - - # This is needed to attach feedback to specific responses later - self.submission_id = None - self.grader_id = None - - error_message = "No {0} found in problem xml for open ended problem. Contact the learning sciences group for assistance." - if oeparam is None: - # This is a staff_facing_error - raise ValueError(error_message.format('oeparam')) - if self.child_prompt is None: - raise ValueError(error_message.format('prompt')) - if self.child_rubric is None: - raise ValueError(error_message.format('rubric')) - - self._parse(oeparam, self.child_prompt, self.child_rubric, system) - - # If there are multiple tasks (like self-assessment followed by ai), once - # the the status of the first task is set to DONE, setup_next_task() will - # create the OpenEndedChild with parameter child_created=True so that the - # submission can be sent to the grader. Keep trying each time this module - # is loaded until it succeeds. - if self.child_created is True and self.child_state == self.ASSESSING: - success, message = self.send_to_grader(self.latest_answer(), system) - if success: - self.child_created = False - - def _parse(self, oeparam, prompt, rubric, system): - ''' - Parse OpenEndedResponse XML: - self.initial_display - self.payload - dict containing keys -- - 'grader' : path to grader settings file, 'problem_id' : id of the problem - - self.answer - What to display when show answer is clicked - ''' - # Note that OpenEndedResponse is agnostic to the specific contents of grader_payload - prompt_string = stringify_children(prompt) - rubric_string = stringify_children(rubric) - self.child_prompt = prompt_string - self.child_rubric = rubric_string - - grader_payload = oeparam.find('grader_payload') - grader_payload = grader_payload.text if grader_payload is not None else '' - - # Update grader payload with student id. If grader payload not json, error. - try: - parsed_grader_payload = json.loads(grader_payload) - # NOTE: self.system.location is valid because the capa_module - # __init__ adds it (easiest way to get problem location into - # response types) - except (TypeError, ValueError): - # This is a dev_facing_error - log.exception( - "Grader payload from external open ended grading server is not a json object! Object: {0}".format( - grader_payload)) - - self.initial_display = find_with_default(oeparam, 'initial_display', '') - self.answer = find_with_default(oeparam, 'answer_display', 'No answer given.') - - parsed_grader_payload.update({ - 'location': self.location_string, - 'course_id': system.course_id.to_deprecated_string(), - 'prompt': prompt_string, - 'rubric': rubric_string, - 'initial_display': self.initial_display, - 'answer': self.answer, - 'problem_id': self.display_name, - 'skip_basic_checks': self.skip_basic_checks, - 'control': json.dumps(self.control), - }) - updated_grader_payload = json.dumps(parsed_grader_payload) - - self.payload = {'grader_payload': updated_grader_payload} - - def skip_post_assessment(self, _data, system): - """ - Ajax function that allows one to skip the post assessment phase - @param data: AJAX dictionary - @param system: ModuleSystem - @return: Success indicator - """ - self.child_state = self.DONE - return {'success': True} - - def message_post(self, data, system): - """ - Handles a student message post (a reaction to the grade they received from an open ended grader type) - Returns a boolean success/fail and an error message - """ - - event_info = dict() - event_info['problem_id'] = self.location_string - event_info['student_id'] = system.anonymous_student_id - event_info['survey_responses'] = data - _ = self.system.service(self, "i18n").ugettext - - survey_responses = event_info['survey_responses'] - for tag in ['feedback', 'submission_id', 'grader_id', 'score']: - if tag not in survey_responses: - # This is a student_facing_error - return { - 'success': False, - # Translators: 'tag' is one of 'feedback', 'submission_id', - # 'grader_id', or 'score'. They are categories that a student - # responds to when filling out a post-assessment survey - # of his or her grade from an openended problem. - 'msg': _("Could not find needed tag {tag_name} in the " - "survey responses. Please try submitting " - "again.").format(tag_name=tag) - } - try: - submission_id = int(survey_responses['submission_id']) - grader_id = int(survey_responses['grader_id']) - feedback = str(survey_responses['feedback'].encode('ascii', 'ignore')) - score = int(survey_responses['score']) - except: - # This is a dev_facing_error - error_message = ( - "Could not parse submission id, grader id, " - "or feedback from message_post ajax call. " - "Here is the message data: {0}".format(survey_responses) - ) - log.exception(error_message) - # This is a student_facing_error - return { - 'success': False, - 'msg': _( - "There was an error saving your feedback. Please " - "contact course staff." - ) - } - - xqueue = system.get('xqueue') - if xqueue is None: - return {'success': False, 'msg': _("Couldn't submit feedback.")} - qinterface = xqueue['interface'] - qtime = datetime.strftime(datetime.now(UTC), xqueue_interface.dateformat) - anonymous_student_id = system.anonymous_student_id - queuekey = xqueue_interface.make_hashkey(str(system.seed) + qtime + - anonymous_student_id + - str(len(self.child_history))) - - xheader = xqueue_interface.make_xheader( - lms_callback_url=xqueue['construct_callback'](), - lms_key=queuekey, - queue_name=self.message_queue_name - ) - - student_info = { - 'anonymous_student_id': anonymous_student_id, - 'submission_time': qtime, - } - contents = { - 'feedback': feedback, - 'submission_id': submission_id, - 'grader_id': grader_id, - 'score': score, - 'student_info': json.dumps(student_info), - } - - error, error_message = qinterface.send_to_queue( - header=xheader, - body=json.dumps(contents) - ) - - # Convert error to a success value - success = True - message = _("Successfully saved your feedback.") - if error: - success = False - message = _("Unable to save your feedback. Please try again later.") - log.error("Unable to send feedback to grader. location: {0}, error_message: {1}".format( - self.location_string, error_message - )) - else: - self.child_state = self.DONE - - # This is a student_facing_message - return {'success': success, 'msg': message} - - def send_to_grader(self, submission, system): - """ - Send a given submission to the grader, via the xqueue - @param submission: The student submission to send to the grader - @param system: Modulesystem - @return: Boolean true (not useful right now) - """ - - # Prepare xqueue request - #------------------------------------------------------------ - - xqueue = system.get('xqueue') - if xqueue is None: - return False - qinterface = xqueue['interface'] - qtime = datetime.strftime(datetime.now(UTC), xqueue_interface.dateformat) - - anonymous_student_id = system.anonymous_student_id - - # Generate header - queuekey = xqueue_interface.make_hashkey(str(system.seed) + qtime + - anonymous_student_id + - str(len(self.child_history))) - - xheader = xqueue_interface.make_xheader( - lms_callback_url=xqueue['construct_callback'](), - lms_key=queuekey, - queue_name=self.queue_name - ) - - contents = self.payload.copy() - - # Metadata related to the student submission revealed to the external grader - student_info = { - 'anonymous_student_id': anonymous_student_id, - 'submission_time': qtime, - } - - # Update contents with student response and student info - contents.update({ - 'student_info': json.dumps(student_info), - 'student_response': submission, - 'max_score': self.max_score(), - }) - - # Submit request. When successful, 'msg' is the prior length of the queue - error, error_message = qinterface.send_to_queue( - header=xheader, - body=json.dumps(contents) - ) - - # State associated with the queueing request - queuestate = { - 'key': queuekey, - 'time': qtime, - } - _ = self.system.service(self, "i18n").ugettext - success = True - message = _("Successfully saved your submission.") - if error: - success = False - # Translators: the `grader` refers to the grading service open response problems - # are sent to, either to be machine-graded, peer-graded, or instructor-graded. - message = _('Unable to submit your submission to the grader. Please try again later.') - log.error("Unable to submit to grader. location: {0}, error_message: {1}".format( - self.location_string, error_message - )) - - return (success, message) - - def _update_score(self, score_msg, queuekey, system): - """ - Called by xqueue to update the score - @param score_msg: The message from xqueue - @param queuekey: The key sent by xqueue - @param system: Modulesystem - @return: Boolean True (not useful currently) - """ - _ = self.system.service(self, "i18n").ugettext - new_score_msg = self._parse_score_msg(score_msg, system) - if not new_score_msg['valid']: - # Translators: the `grader` refers to the grading service open response problems - # are sent to, either to be machine-graded, peer-graded, or instructor-graded. - new_score_msg['feedback'] = _('Invalid grader reply. Please contact the course staff.') - - # self.child_history is initialized as []. record_latest_score() and record_latest_post_assessment() - # operate on self.child_history[-1]. Thus we have to make sure child_history is not []. - # Handle at this level instead of in record_*() because this is a good place to reduce the number of conditions - # and also keep the persistent state from changing. - if self.child_history: - self.record_latest_score(new_score_msg['score']) - self.record_latest_post_assessment(score_msg) - self.child_state = self.POST_ASSESSMENT - else: - log.error( - "Trying to update score without existing studentmodule child_history:\n" - " location: {location}\n" - " score: {score}\n" - " grader_ids: {grader_ids}\n" - " submission_ids: {submission_ids}".format( - location=self.location_string, - score=new_score_msg['score'], - grader_ids=new_score_msg['grader_ids'], - submission_ids=new_score_msg['submission_ids'], - ) - ) - - return True - - def get_answers(self): - """ - Gets and shows the answer for this problem. - @return: Answer html - """ - anshtml = '
{0}
'.format(self.answer) - return {self.answer_id: anshtml} - - def get_initial_display(self): - """ - Gets and shows the initial display for the input box. - @return: Initial display html - """ - return {self.answer_id: self.initial_display} - - def _convert_longform_feedback_to_html(self, response_items): - """ - Take in a dictionary, and return html strings for display to student. - Input: - response_items: Dictionary with keys success, feedback. - if success is True, feedback should be a dictionary, with keys for - types of feedback, and the corresponding feedback values. - if success is False, feedback is actually an error string. - - NOTE: this will need to change when we integrate peer grading, because - that will have more complex feedback. - - Output: - String -- html that can be displayincorrect-icon.pnged to the student. - """ - - # We want to display available feedback in a particular order. - # This dictionary specifies which goes first--lower first. - priorities = { - # These go at the start of the feedback - 'spelling': 0, - 'grammar': 1, - # needs to be after all the other feedback - 'markup_text': 3 - } - do_not_render = ['topicality', 'prompt-overlap'] - - default_priority = 2 - - def get_priority(elt): - """ - Args: - elt: a tuple of feedback-type, feedback - Returns: - the priority for this feedback type - """ - return priorities.get(elt[0], default_priority) - - def encode_values(feedback_type, value): - feedback_type = str(feedback_type).encode('ascii', 'ignore') - if not isinstance(value, basestring): - value = str(value) - value = value.encode('ascii', 'ignore') - return feedback_type, value - - def format_feedback(feedback_type, value): - feedback_type, value = encode_values(feedback_type, value) - feedback = u""" -
- {value} -
- """.format(feedback_type=feedback_type, value=value) - return feedback - - def format_feedback_hidden(feedback_type, value): - feedback_type, value = encode_values(feedback_type, value) - feedback = """ - - """.format(feedback_type=feedback_type, value=value) - return feedback - - # TODO (vshnayder): design and document the details of this format so - # that we can do proper escaping here (e.g. are the graders allowed to - # include HTML?) - - _ = self.system.service(self, "i18n").ugettext - for tag in ['success', 'feedback', 'submission_id', 'grader_id']: - if tag not in response_items: - # This is a student_facing_error - return format_feedback( - # Translators: the `grader` refers to the grading service open response problems - # are sent to, either to be machine-graded, peer-graded, or instructor-graded. - 'errors', _('Error getting feedback from grader.') - ) - - feedback_items = response_items['feedback'] - try: - feedback = json.loads(feedback_items) - except (TypeError, ValueError): - # This is a dev_facing_error - log.exception("feedback_items from external open ended grader have invalid json {0}".format(feedback_items)) - # This is a student_facing_error - return format_feedback( - # Translators: the `grader` refers to the grading service open response problems - # are sent to, either to be machine-graded, peer-graded, or instructor-graded. - 'errors', _('Error getting feedback from grader.') - ) - - if response_items['success']: - if len(feedback) == 0: - # This is a student_facing_error - return format_feedback( - # Translators: the `grader` refers to the grading service open response problems - # are sent to, either to be machine-graded, peer-graded, or instructor-graded. - 'errors', _('No feedback available from grader.') - ) - - for tag in do_not_render: - if tag in feedback: - feedback.pop(tag) - - feedback_lst = sorted(feedback.items(), key=get_priority) - feedback_list_part1 = u"\n".join(format_feedback(k, v) for k, v in feedback_lst) - else: - # This is a student_facing_error - feedback_list_part1 = format_feedback('errors', response_items['feedback']) - - feedback_list_part2 = (u"\n".join([format_feedback_hidden(feedback_type, value) - for feedback_type, value in response_items.items() - if feedback_type in ['submission_id', 'grader_id']])) - - return u"\n".join([feedback_list_part1, feedback_list_part2]) - - def _format_feedback(self, response_items, system): - """ - Input: - Dictionary called feedback. Must contain keys seen below. - Output: - Return error message or feedback template - """ - - rubric_feedback = "" - feedback = self._convert_longform_feedback_to_html(response_items) - rubric_scores = [] - if response_items['rubric_scores_complete'] is True: - rubric_renderer = CombinedOpenEndedRubric(system.render_template, True) - rubric_dict = rubric_renderer.render_rubric(response_items['rubric_xml']) - success = rubric_dict['success'] - rubric_feedback = rubric_dict['html'] - rubric_scores = rubric_dict['rubric_scores'] - - if not response_items['success']: - return system.render_template( - "{0}/open_ended_error.html".format(self.TEMPLATE_DIR), - {'errors': feedback} - ) - - feedback_template = system.render_template("{0}/open_ended_feedback.html".format(self.TEMPLATE_DIR), { - 'grader_type': response_items['grader_type'], - 'score': "{0} / {1}".format(response_items['score'], self.max_score()), - 'feedback': feedback, - 'rubric_feedback': rubric_feedback - }) - - return feedback_template, rubric_scores - - def _parse_score_msg(self, score_msg, system, join_feedback=True): - """ - Grader reply is a JSON-dump of the following dict - { 'correct': True/False, - 'score': Numeric value (floating point is okay) to assign to answer - 'msg': grader_msg - 'feedback' : feedback from grader - 'grader_type': what type of grader resulted in this score - 'grader_id': id of the grader - 'submission_id' : id of the submission - 'success': whether or not this submission was successful - 'rubric_scores': a list of rubric scores - 'rubric_scores_complete': boolean if rubric scores are complete - 'rubric_xml': the xml of the rubric in string format - } - - Returns (valid_score_msg, correct, score, msg): - valid_score_msg: Flag indicating valid score_msg format (Boolean) - correct: Correctness of submission (Boolean) - score: Points to be assigned (numeric, can be float) - """ - fail = { - 'valid': False, - 'score': 0, - 'feedback': '', - 'rubric_scores': [[0]], - 'grader_types': [''], - 'feedback_items': [''], - 'feedback_dicts': [{}], - 'grader_ids': [0], - 'submission_ids': [0], - } - try: - score_result = json.loads(score_msg) - except (TypeError, ValueError): - # This is a dev_facing_error - error_message = ("External open ended grader message should be a JSON-serialized dict." - " Received score_msg = {0}".format(score_msg)) - log.error(error_message) - fail['feedback'] = error_message - return fail - - if not isinstance(score_result, dict): - # This is a dev_facing_error - error_message = ("External open ended grader message should be a JSON-serialized dict." - " Received score_result = {0}".format(score_result)) - log.error(error_message) - fail['feedback'] = error_message - return fail - - if not score_result: - return fail - - for tag in ['score', 'feedback', 'grader_type', 'success', 'grader_id', 'submission_id']: - if tag not in score_result: - # This is a dev_facing_error - error_message = ("External open ended grader message is missing required tag: {0}" - .format(tag)) - log.error(error_message) - fail['feedback'] = error_message - return fail - # This is to support peer grading - if isinstance(score_result['score'], list): - feedback_items = [] - rubric_scores = [] - grader_types = [] - feedback_dicts = [] - grader_ids = [] - submission_ids = [] - for i in xrange(len(score_result['score'])): - new_score_result = { - 'score': score_result['score'][i], - 'feedback': score_result['feedback'][i], - 'grader_type': score_result['grader_type'], - 'success': score_result['success'], - 'grader_id': score_result['grader_id'][i], - 'submission_id': score_result['submission_id'], - 'rubric_scores_complete': score_result['rubric_scores_complete'][i], - 'rubric_xml': score_result['rubric_xml'][i], - } - feedback_template, rubric_score = self._format_feedback(new_score_result, system) - feedback_items.append(feedback_template) - rubric_scores.append(rubric_score) - grader_types.append(score_result['grader_type']) - try: - feedback_dict = json.loads(score_result['feedback'][i]) - except Exception: - feedback_dict = score_result['feedback'][i] - feedback_dicts.append(feedback_dict) - grader_ids.append(score_result['grader_id'][i]) - submission_ids.append(score_result['submission_id']) - if join_feedback: - feedback = "".join(feedback_items) - else: - feedback = feedback_items - score = int(round(median(score_result['score']))) - else: - # This is for instructor and ML grading - feedback, rubric_score = self._format_feedback(score_result, system) - score = score_result['score'] - rubric_scores = [rubric_score] - grader_types = [score_result['grader_type']] - feedback_items = [feedback] - try: - feedback_dict = json.loads(score_result['feedback']) - except Exception: - feedback_dict = score_result.get('feedback', '') - feedback_dicts = [feedback_dict] - grader_ids = [score_result['grader_id']] - submission_ids = [score_result['submission_id']] - - self.submission_id = score_result['submission_id'] - self.grader_id = score_result['grader_id'] - - return { - 'valid': True, - 'score': score, - 'feedback': feedback, - 'rubric_scores': rubric_scores, - 'grader_types': grader_types, - 'feedback_items': feedback_items, - 'feedback_dicts': feedback_dicts, - 'grader_ids': grader_ids, - 'submission_ids': submission_ids, - } - - def latest_post_assessment(self, system, short_feedback=False, join_feedback=True): - """ - Gets the latest feedback, parses, and returns - @param short_feedback: If the long feedback is wanted or not - @return: Returns formatted feedback - """ - if not self.child_history: - return "" - - feedback_dict = self._parse_score_msg( - self.child_history[-1].get('post_assessment', "{}"), - system, - join_feedback=join_feedback - ) - if not short_feedback: - return feedback_dict['feedback'] if feedback_dict['valid'] else '' - if feedback_dict['valid']: - short_feedback = self._convert_longform_feedback_to_html( - json.loads(self.child_history[-1].get('post_assessment', ""))) - return short_feedback if feedback_dict['valid'] else '' - - def format_feedback_with_evaluation(self, system, feedback): - """ - Renders a given html feedback into an evaluation template - @param feedback: HTML feedback - @return: Rendered html - """ - context = {'msg': feedback, 'id': "1", 'rows': 50, 'cols': 50} - html = system.render_template('{0}/open_ended_evaluation.html'.format(self.TEMPLATE_DIR), context) - return html - - def handle_ajax(self, dispatch, data, system): - ''' - This is called by courseware.module_render, to handle an AJAX call. - "data" is request.POST. - - Returns a json dictionary: - { 'progress_changed' : True/False, - 'progress' : 'none'/'in_progress'/'done', - } - ''' - handlers = { - 'save_answer': self.save_answer, - 'score_update': self.update_score, - 'save_post_assessment': self.message_post, - 'skip_post_assessment': self.skip_post_assessment, - 'check_for_score': self.check_for_score, - 'store_answer': self.store_answer, - } - _ = self.system.service(self, "i18n").ugettext - if dispatch not in handlers: - # This is a dev_facing_error - log.error("Cannot find {0} in handlers in handle_ajax function for open_ended_module.py".format(dispatch)) - # This is a dev_facing_error - return json.dumps( - {'error': _('Error handling action. Please try again.'), 'success': False} - ) - - before = self.get_progress() - d = handlers[dispatch](data, system) - after = self.get_progress() - d.update({ - 'progress_changed': after != before, - 'progress_status': Progress.to_js_status_str(after), - }) - return json.dumps(d, cls=ComplexEncoder) - - def check_for_score(self, _data, system): - """ - Checks to see if a score has been received yet. - @param data: AJAX dictionary - @param system: Modulesystem (needed to align with other ajax functions) - @return: Returns the current state - """ - state = self.child_state - return {'state': state} - - def save_answer(self, data, system): - """ - Saves a student answer - @param data: AJAX dictionary - @param system: modulesystem - @return: Success indicator - """ - # Once we close the problem, we should not allow students - # to save answers - error_message = "" - closed, msg = self.check_if_closed() - if closed: - return msg - - if self.child_state != self.INITIAL: - return self.out_of_sync_error(data) - - message = "Successfully saved your submission." - - # add new history element with answer and empty score and hint. - success, error_message, data = self.append_file_link_to_student_answer(data) - if not success: - message = error_message - else: - data['student_answer'] = OpenEndedModule.sanitize_html(data['student_answer']) - success, error_message = self.send_to_grader(data['student_answer'], system) - if not success: - message = error_message - # Store the answer instead - self.store_answer(data, system) - else: - self.new_history_entry(data['student_answer']) - self.change_state(self.ASSESSING) - - return { - 'success': success, - 'error': message, - 'student_response': data['student_answer'].replace("\n", "
") - } - - def update_score(self, data, system): - """ - Updates the current score via ajax. Called by xqueue. - Input: AJAX data dictionary, modulesystem - Output: None - """ - queuekey = data['queuekey'] - score_msg = data['xqueue_body'] - # TODO: Remove need for cmap - self._update_score(score_msg, queuekey, system) - - return dict() # No AJAX return is needed - - def get_html(self, system): - """ - Gets the HTML for this problem and renders it - Input: Modulesystem object - Output: Rendered HTML - """ - _ = self.system.service(self, "i18n").ugettext - # set context variables and render template - eta_string = None - if self.child_state != self.INITIAL: - post_assessment = self.latest_post_assessment(system) - score = self.latest_score() - correct = 'correct' if self.is_submission_correct(score) else 'incorrect' - if self.child_state == self.ASSESSING: - # Translators: this string appears once an openended response - # is submitted but before it has been graded - eta_string = _("Your response has been submitted. Please check back later for your grade.") - else: - post_assessment = "" - correct = "" - previous_answer = self.get_display_answer() - - # Use the module name as a unique id to pass to the template. - try: - module_id = self.system.location.name - except AttributeError: - # In cases where we don't have a system or a location, use a fallback. - module_id = "open_ended" - - context = { - 'prompt': self.child_prompt, - 'previous_answer': previous_answer, - 'state': self.child_state, - 'allow_reset': self._allow_reset(), - 'rows': 30, - 'cols': 80, - 'module_id': module_id, - 'msg': post_assessment, - 'child_type': 'openended', - 'correct': correct, - 'accept_file_upload': self.accept_file_upload, - 'eta_message': eta_string, - } - html = system.render_template('{0}/open_ended.html'.format(self.TEMPLATE_DIR), context) - return html - - def latest_score(self): - """None if not available""" - if not self.child_history: - return None - return self.score_for_attempt(-1) - - def all_scores(self): - """None if not available""" - if not self.child_history: - return None - return [self.score_for_attempt(index) for index in xrange(len(self.child_history))] - - def score_for_attempt(self, index): - """ - Return sum of rubric scores for ML grading otherwise return attempt["score"]. - """ - attempt = self.child_history[index] - score = attempt.get('score') - post_assessment_data = self._parse_score_msg(attempt.get('post_assessment', "{}"), self.system) - grader_types = post_assessment_data.get('grader_types') - - # According to _parse_score_msg in ML grading there should be only one grader type. - if len(grader_types) == 1 and grader_types[0] == 'ML': - rubric_scores = post_assessment_data.get("rubric_scores") - - # Similarly there should be only one list of rubric scores. - if len(rubric_scores) == 1: - rubric_scores_sum = sum(rubric_scores[0]) - log.debug("""Score normalized for location={loc}, old_score={old_score}, - new_score={new_score}, rubric_score={rubric_score}""".format( - loc=self.location_string, - old_score=score, - new_score=rubric_scores_sum, - rubric_score=rubric_scores - )) - return rubric_scores_sum - return score - - -class OpenEndedDescriptor(object): - """ - Module for adding open ended response questions to courses - """ - mako_template = "widgets/html-edit.html" - module_class = OpenEndedModule - filename_extension = "xml" - - has_score = True - - def __init__(self, system): - self.system = system - - @classmethod - def definition_from_xml(cls, xml_object, system): - """ - Pull out the open ended parameters into a dictionary. - - Returns: - { - 'oeparam': 'some-html' - } - """ - for child in ['openendedparam']: - if len(xml_object.xpath(child)) != 1: - # This is a staff_facing_error - raise ValueError( - u"Open Ended definition must include exactly one '{0}' tag. Contact the learning sciences group for assistance.".format( - child)) - - def parse(k): - """Assumes that xml_object has child k""" - return xml_object.xpath(k)[0] - - return { - 'oeparam': parse('openendedparam') - } - - def definition_to_xml(self, resource_fs): - '''Return an xml element representing this definition.''' - elt = etree.Element('openended') - - def add_child(k): - child_str = u'<{tag}>{body}'.format(tag=k, body=self.definition[k]) - child_node = etree.fromstring(child_str) - elt.append(child_node) - - for child in ['openendedparam']: - add_child(child) - - return elt diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/openendedchild.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/openendedchild.py deleted file mode 100644 index 2981dc8fc8..0000000000 --- a/common/lib/xmodule/xmodule/open_ended_grading_classes/openendedchild.py +++ /dev/null @@ -1,577 +0,0 @@ -""" -ORA1. Deprecated. -""" -from datetime import datetime -import json -import logging -import re - -import bleach -from boto.s3.connection import S3Connection -from boto.s3.key import Key -from html5lib.tokenizer import HTMLTokenizer -from pytz import UTC - -from xmodule.progress import Progress -import capa.xqueue_interface as xqueue_interface -from capa.util import * -from .peer_grading_service import PeerGradingService, MockPeerGradingService -import controller_query_service - - -log = logging.getLogger("edx.courseware") - -# Make '_' a no-op so we can scrape strings. Using lambda instead of -# `django.utils.translation.ugettext_noop` because Django cannot be imported in this file -_ = lambda text: text - -# Set the default number of max attempts. Should be 1 for production -# Set higher for debugging/testing -# attempts specified in xml definition overrides this. -MAX_ATTEMPTS = 1 - -# Set maximum available number of points. -# Overriden by max_score specified in xml. -MAX_SCORE = 1 - - -def upload_to_s3(file_to_upload, keyname, s3_interface): - ''' - Upload file to S3 using provided keyname. - - Returns: - public_url: URL to access uploaded file - ''' - - conn = S3Connection(s3_interface['access_key'], s3_interface['secret_access_key']) - bucketname = str(s3_interface['storage_bucket_name']) - bucket = conn.lookup(bucketname.lower()) - if not bucket: - bucket = conn.create_bucket(bucketname.lower()) - - k = Key(bucket) - k.key = keyname - k.set_metadata('filename', file_to_upload.name) - k.set_contents_from_file(file_to_upload) - - k.set_acl("public-read") - public_url = k.generate_url(60 * 60 * 24 * 365) # URL timeout in seconds. - - return public_url - -# Used by sanitize_html -ALLOWED_HTML_ATTRS = { - '*': ['id', 'class', 'height', 'width', 'alt'], - 'a': ['href', 'title', 'rel', 'target'], - 'embed': ['src'], - 'iframe': ['src'], - 'img': ['src'], -} - - -class OpenEndedChild(object): - """ - States: - - initial (prompt, textbox shown) - | - assessing (read-only textbox, rubric + assessment input shown for self assessment, response queued for open ended) - | - post_assessment (read-only textbox, read-only rubric and assessment, hint input box shown) - | - done (submitted msg, green checkmark, everything else read-only. If attempts < max, shows - a reset button that goes back to initial state. Saves previous - submissions too.) - """ - - DEFAULT_QUEUE = 'open-ended' - DEFAULT_MESSAGE_QUEUE = 'open-ended-message' - max_inputfields = 1 - - STATE_VERSION = 1 - - # states - INITIAL = 'initial' - ASSESSING = 'assessing' - POST_ASSESSMENT = 'post_assessment' - DONE = 'done' - - # This is used to tell students where they are at in the module - HUMAN_NAMES = { - # Translators: "Not started" communicates to a student that their response - # has not yet been graded - 'initial': _('Not started'), - # Translators: "In progress" communicates to a student that their response - # is currently in the grading process - 'assessing': _('In progress'), - # Translators: "Done" communicates to a student that their response - # has been fully graded - 'post_assessment': _('Done'), - 'done': _('Done'), - } - - # included to make this act enough like an xblock to get i18n - _services_requested = {"i18n": "need"} - _combined_services = _services_requested - - def __init__(self, system, location, definition, descriptor, static_data, - instance_state=None, shared_state=None, **kwargs): - # Load instance state - - if instance_state is not None: - try: - instance_state = json.loads(instance_state) - except: - log.error( - "Could not load instance state for open ended. Setting it to nothing.: {0}".format(instance_state)) - instance_state = {} - else: - instance_state = {} - - # History is a list of tuples of (answer, score, hint), where hint may be - # None for any element, and score and hint can be None for the last (current) - # element. - # Scores are on scale from 0 to max_score - - self.child_history = instance_state.get('child_history', []) - self.child_state = instance_state.get('child_state', self.INITIAL) - self.child_created = instance_state.get('child_created', False) - self.child_attempts = instance_state.get('child_attempts', 0) - self.stored_answer = instance_state.get('stored_answer', None) - - self.max_attempts = static_data['max_attempts'] - self.child_prompt = static_data['prompt'] - self.child_rubric = static_data['rubric'] - self.display_name = static_data['display_name'] - self.accept_file_upload = static_data['accept_file_upload'] - self.close_date = static_data['close_date'] - self.s3_interface = static_data['s3_interface'] - self.skip_basic_checks = static_data['skip_basic_checks'] - self._max_score = static_data['max_score'] - self.control = static_data['control'] - - # Used for progress / grading. Currently get credit just for - # completion (doesn't matter if you self-assessed correct/incorrect). - if system.open_ended_grading_interface: - self.peer_gs = PeerGradingService(system.open_ended_grading_interface, system.render_template) - self.controller_qs = controller_query_service.ControllerQueryService( - system.open_ended_grading_interface, system.render_template - ) - else: - self.peer_gs = MockPeerGradingService() - self.controller_qs = None - - self.system = system - - self.location_string = location - try: - self.location_string = self.location_string.to_deprecated_string() - except: - pass - - self.setup_response(system, location, definition, descriptor) - - def setup_response(self, system, location, definition, descriptor): - """ - Needs to be implemented by the inheritors of this module. Sets up additional fields used by the child modules. - @param system: Modulesystem - @param location: Module location - @param definition: XML definition - @param descriptor: Descriptor of the module - @return: None - """ - pass - - def closed(self): - if self.close_date is not None and datetime.now(UTC) > self.close_date: - return True - return False - - def check_if_closed(self): - if self.closed(): - return True, { - 'success': False, - # This is a student_facing_error - 'error': 'The problem close date has passed, and this problem is now closed.' - } - elif self.child_attempts > self.max_attempts: - return True, { - 'success': False, - # This is a student_facing_error - 'error': 'You have attempted this problem {0} times. You are allowed {1} attempts.'.format( - self.child_attempts, self.max_attempts - ) - } - else: - return False, {} - - def latest_answer(self): - """Empty string if not available""" - if not self.child_history: - return "" - return self.child_history[-1].get('answer', "") - - def latest_score(self): - """None if not available""" - if not self.child_history: - return None - return self.child_history[-1].get('score') - - def all_scores(self): - """None if not available""" - if not self.child_history: - return None - return [child_hist.get('score') for child_hist in self.child_history] - - def latest_post_assessment(self, system): - """Empty string if not available""" - if not self.child_history: - return "" - return self.child_history[-1].get('post_assessment', "") - - @staticmethod - def sanitize_html(answer): - """ - Take a student response and sanitize the HTML to prevent malicious script injection - or other unwanted content. - answer - any string - return - a cleaned version of the string - """ - clean_html = bleach.clean(answer, - tags=['embed', 'iframe', 'a', 'img', 'br'], - attributes=ALLOWED_HTML_ATTRS, - strip=True) - autolinked = bleach.linkify(clean_html, - callbacks=[bleach.callbacks.target_blank], - skip_pre=True, - tokenizer=HTMLTokenizer) - return OpenEndedChild.replace_newlines(autolinked) - - @staticmethod - def replace_newlines(html): - """ - Replaces "\n" newlines with
- """ - retv = re.sub(r'

$', '', re.sub(r'^

', '', html)) - return re.sub("\n", "
", retv) - - def new_history_entry(self, answer): - """ - Adds a new entry to the history dictionary - @param answer: The student supplied answer - @return: None - """ - answer = OpenEndedChild.sanitize_html(answer) - self.child_history.append({'answer': answer}) - self.stored_answer = None - - def record_latest_score(self, score): - """Assumes that state is right, so we're adding a score to the latest - history element""" - self.child_history[-1]['score'] = score - - def record_latest_post_assessment(self, post_assessment): - """Assumes that state is right, so we're adding a score to the latest - history element""" - self.child_history[-1]['post_assessment'] = post_assessment - - def change_state(self, new_state): - """ - A centralized place for state changes--allows for hooks. If the - current state matches the old state, don't run any hooks. - """ - if self.child_state == new_state: - return - - self.child_state = new_state - - if self.child_state == self.DONE: - self.child_attempts += 1 - - def get_instance_state(self): - """ - Get the current score and state - """ - - state = { - 'version': self.STATE_VERSION, - 'child_history': self.child_history, - 'child_state': self.child_state, - 'max_score': self._max_score, - 'child_attempts': self.child_attempts, - 'child_created': self.child_created, - 'stored_answer': self.stored_answer, - } - return json.dumps(state) - - def _allow_reset(self): - """Can the module be reset?""" - return self.child_state == self.DONE and self.child_attempts < self.max_attempts - - def max_score(self): - """ - Return max_score - """ - return self._max_score - - def get_score(self): - """ - Returns the last score in the list - """ - score = self.latest_score() - return {'score': score if score is not None else 0, - 'total': self._max_score} - - def reset(self, system): - """ - If resetting is allowed, reset the state. - - Returns {'success': bool, 'error': msg} - (error only present if not success) - """ - self.change_state(self.INITIAL) - return {'success': True} - - def get_display_answer(self): - latest = self.latest_answer() - if self.child_state == self.INITIAL: - if self.stored_answer is not None: - previous_answer = self.stored_answer - elif latest is not None and len(latest) > 0: - previous_answer = latest - else: - previous_answer = "" - previous_answer = previous_answer.replace("
", "\n").replace("
", "\n") - else: - if latest is not None and len(latest) > 0: - previous_answer = latest - else: - previous_answer = "" - previous_answer = previous_answer.replace("\n", "
") - - return previous_answer - - def store_answer(self, data, system): - if self.child_state != self.INITIAL: - # We can only store an answer if the problem has not moved into the assessment phase. - return self.out_of_sync_error(data) - - self.stored_answer = data['student_answer'] - return {'success': True} - - def get_progress(self): - ''' - For now, just return last score / max_score - ''' - if self._max_score > 0: - try: - return Progress(int(self.get_score()['score']), int(self._max_score)) - except Exception as err: - # This is a dev_facing_error - log.exception("Got bad progress from open ended child module. Max Score: {0}".format(self._max_score)) - return None - return None - - def out_of_sync_error(self, data, msg=''): - """ - return dict out-of-sync error message, and also log. - """ - # This is a dev_facing_error - log.warning("Open ended child state out sync. state: %r, data: %r. %s", - self.child_state, data, msg) - # This is a student_facing_error - return {'success': False, - 'error': 'The problem state got out-of-sync. Please try reloading the page.'} - - def get_html(self): - """ - Needs to be implemented by inheritors. Renders the HTML that students see. - @return: - """ - pass - - def handle_ajax(self): - """ - Needs to be implemented by child modules. Handles AJAX events. - @return: - """ - pass - - def is_submission_correct(self, score): - """ - Checks to see if a given score makes the answer correct. Very naive right now (>66% is correct) - @param score: Numeric score. - @return: Boolean correct. - """ - correct = False - if isinstance(score, (int, long, float, complex)): - score_ratio = int(score) / float(self.max_score()) - correct = (score_ratio >= 0.66) - return correct - - def is_last_response_correct(self): - """ - Checks to see if the last response in the module is correct. - @return: 'correct' if correct, otherwise 'incorrect' - """ - score = self.get_score()['score'] - correctness = 'correct' if self.is_submission_correct(score) else 'incorrect' - return correctness - - def upload_file_to_s3(self, file_data): - """ - Uploads a file to S3. - file_data: InMemoryUploadedFileObject that responds to read() and seek(). - @return: A URL corresponding to the uploaded object. - """ - - file_key = file_data.name + datetime.now(UTC).strftime( - xqueue_interface.dateformat - ) - - file_data.seek(0) - s3_public_url = upload_to_s3( - file_data, file_key, self.s3_interface - ) - - return s3_public_url - - def check_for_file_and_upload(self, data): - """ - Checks to see if a file was passed back by the student. If so, it will be uploaded to S3. - @param data: AJAX post dictionary containing keys student_file and valid_files_attached. - @return: has_file_to_upload, whether or not a file was in the data dictionary, - and image_tag, the html needed to create a link to the uploaded file. - """ - has_file_to_upload = False - image_tag = "" - - # Ensure that a valid file was uploaded. - if 'valid_files_attached' in data and \ - data['valid_files_attached'] in ['true', '1', True] and \ - data['student_file'] is not None and \ - len(data['student_file']) > 0: - has_file_to_upload = True - student_file = data['student_file'][0] - - # Upload the file to S3 and generate html to embed a link. - s3_public_url = self.upload_file_to_s3(student_file) - image_tag = self.generate_file_link_html_from_url(s3_public_url, student_file.name) - - return has_file_to_upload, image_tag - - def generate_file_link_html_from_url(self, s3_public_url, file_name): - """ - Create an html link to a given URL. - @param s3_public_url: URL of the file. - @param file_name: Name of the file. - @return: Boolean success, updated AJAX data. - """ - image_link = """ - {1} - """.format(s3_public_url, file_name) - return image_link - - def append_file_link_to_student_answer(self, data): - """ - Adds a file to a student answer after uploading it to S3. - @param data: AJAX data containing keys student_answer, valid_files_attached, and student_file. - @return: Boolean success, and updated AJAX data dictionary. - """ - - _ = self.system.service(self, "i18n").ugettext - - error_message = "" - - if not self.accept_file_upload: - # If the question does not accept file uploads, do not do anything - return True, error_message, data - - try: - # Try to upload the file to S3. - has_file_to_upload, image_tag = self.check_for_file_and_upload(data) - data['student_answer'] += image_tag - success = True - if not has_file_to_upload: - # If there is no file to upload, probably the student has embedded the link in the answer text - success, data['student_answer'] = self.check_for_url_in_text(data['student_answer']) - - # If success is False, we have not found a link, and no file was attached. - # Show error to student. - if success is False: - error_message = _( - "We could not find a file in your submission. " - "Please try choosing a file or pasting a URL to your " - "file into the answer box." - ) - - except Exception: - # In this case, an image was submitted by the student, but the image could not be uploaded to S3. Likely - # a config issue (development vs deployment). - log.exception("Student AJAX post to combined open ended xmodule indicated that it contained a file, " - "but the image was not able to be uploaded to S3. This could indicate a configuration " - "issue with this deployment and the S3_INTERFACE setting.") - success = False - error_message = _( - "We are having trouble saving your file. Please try another " - "file or paste a URL to your file into the answer box." - ) - - return success, error_message, data - - def check_for_url_in_text(self, string): - """ - Checks for urls in a string. - @param string: Arbitrary string. - @return: Boolean success, and the edited string. - """ - has_link = False - - # Find all links in the string. - links = re.findall(r'(https?://\S+)', string) - if len(links) > 0: - has_link = True - - # Autolink by wrapping links in anchor tags. - for link in links: - string = re.sub(link, self.generate_file_link_html_from_url(link, link), string) - - return has_link, string - - def get_eta(self): - if self.controller_qs: - response = self.controller_qs.check_for_eta(self.location_string) - else: - return "" - - success = response['success'] - if isinstance(success, basestring): - success = (success.lower() == "true") - - if success: - eta = controller_query_service.convert_seconds_to_human_readable(response['eta']) - eta_string = "Please check back for your response in at most {0}.".format(eta) - else: - eta_string = "" - - return eta_string - - @classmethod - def service_declaration(cls, service_name): - """ - This classmethod is copied from XBlock's service_declaration. - It is included to make this class act enough like an XBlock - to get i18n working on it. - - This is currently only used for i18n, and will return "need" - in that case. - - Arguments: - service_name (string): the name of the service requested. - - Returns: - One of "need", "want", or None. - - """ - declaration = cls._combined_services.get(service_name) - return declaration diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/peer_grading_service.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/peer_grading_service.py deleted file mode 100644 index 1ad247be74..0000000000 --- a/common/lib/xmodule/xmodule/open_ended_grading_classes/peer_grading_service.py +++ /dev/null @@ -1,168 +0,0 @@ -import logging -import dogstats_wrapper as dog_stats_api - -from .grading_service_module import GradingService -from opaque_keys.edx.keys import UsageKey - -log = logging.getLogger(__name__) - - -class PeerGradingService(GradingService): - """ - Interface with the grading controller for peer grading - """ - - METRIC_NAME = 'edxapp.open_ended_grading.peer_grading_service' - - def __init__(self, config, render_template): - config['render_template'] = render_template - super(PeerGradingService, self).__init__(config) - self.url = config['url'] + config['peer_grading'] - self.login_url = self.url + '/login/' - self.get_next_submission_url = self.url + '/get_next_submission/' - self.save_grade_url = self.url + '/save_grade/' - self.is_student_calibrated_url = self.url + '/is_student_calibrated/' - self.show_calibration_essay_url = self.url + '/show_calibration_essay/' - self.save_calibration_essay_url = self.url + '/save_calibration_essay/' - self.get_problem_list_url = self.url + '/get_problem_list/' - self.get_notifications_url = self.url + '/get_notifications/' - self.get_data_for_location_url = self.url + '/get_data_for_location/' - - def get_data_for_location(self, problem_location, student_id): - if isinstance(problem_location, UsageKey): - problem_location = problem_location.to_deprecated_string() - params = {'location': problem_location, 'student_id': student_id} - result = self.get(self.get_data_for_location_url, params) - self._record_result('get_data_for_location', result) - for key in result.keys(): - if key in ('success', 'error', 'version'): - continue - - dog_stats_api.histogram( - self._metric_name('get_data_for_location.{}'.format(key)), - result[key], - ) - return result - - def get_next_submission(self, problem_location, grader_id): - if isinstance(problem_location, UsageKey): - problem_location = problem_location.to_deprecated_string() - result = self._render_rubric(self.get( - self.get_next_submission_url, - { - 'location': problem_location, - 'grader_id': grader_id - } - )) - self._record_result('get_next_submission', result) - return result - - def save_grade(self, **kwargs): - data = kwargs - data.update({'rubric_scores_complete': True}) - result = self.post(self.save_grade_url, data) - self._record_result('save_grade', result) - return result - - def is_student_calibrated(self, problem_location, grader_id): - if isinstance(problem_location, UsageKey): - problem_location = problem_location.to_deprecated_string() - params = {'problem_id': problem_location, 'student_id': grader_id} - result = self.get(self.is_student_calibrated_url, params) - self._record_result( - 'is_student_calibrated', - result, - tags=['calibrated:{}'.format(result.get('calibrated'))] - ) - return result - - def show_calibration_essay(self, problem_location, grader_id): - if isinstance(problem_location, UsageKey): - problem_location = problem_location.to_deprecated_string() - params = {'problem_id': problem_location, 'student_id': grader_id} - result = self._render_rubric(self.get(self.show_calibration_essay_url, params)) - self._record_result('show_calibration_essay', result) - return result - - def save_calibration_essay(self, **kwargs): - data = kwargs - data.update({'rubric_scores_complete': True}) - result = self.post(self.save_calibration_essay_url, data) - self._record_result('show_calibration_essay', result) - return result - - def get_problem_list(self, course_id, grader_id): - params = {'course_id': course_id.to_deprecated_string(), 'student_id': grader_id} - result = self.get(self.get_problem_list_url, params) - - if 'problem_list' in result: - for problem in result['problem_list']: - problem['location'] = course_id.make_usage_key_from_deprecated_string(problem['location']) - - self._record_result('get_problem_list', result) - dog_stats_api.histogram( - self._metric_name('get_problem_list.result.length'), - len(result.get('problem_list', [])), - ) - return result - - def get_notifications(self, course_id, grader_id): - params = {'course_id': course_id.to_deprecated_string(), 'student_id': grader_id} - result = self.get(self.get_notifications_url, params) - self._record_result( - 'get_notifications', - result, - tags=['needs_to_peer_grade:{}'.format(result.get('student_needs_to_peer_grade'))] - ) - return result - - -class MockPeerGradingService(object): - """ - This is a mock peer grading service that can be used for unit tests - without making actual service calls to the grading controller - """ - - def get_next_submission(self, problem_location, grader_id): - return { - 'success': True, - 'submission_id': 1, - 'submission_key': "", - 'student_response': 'Sample student response.', - 'prompt': 'Sample submission prompt.', - 'rubric': 'Placeholder text for the full rubric.', - 'max_score': 4 - } - - def save_grade(self, **kwargs): - return {'success': True} - - def is_student_calibrated(self, problem_location, grader_id): - return {'success': True, 'calibrated': True} - - def show_calibration_essay(self, problem_location, grader_id): - return {'success': True, - 'submission_id': 1, - 'submission_key': '', - 'student_response': 'Sample student response.', - 'prompt': 'Sample submission prompt.', - 'rubric': 'Placeholder text for the full rubric.', - 'max_score': 4} - - def save_calibration_essay(self, **kwargs): - return {'success': True, 'actual_score': 2} - - def get_problem_list(self, course_id, grader_id): - return {'success': True, - 'problem_list': [ - ]} - - def get_data_for_location(self, problem_location, student_id): - return { - "version": 1, - "count_graded": 3, - "count_required": 3, - "success": True, - "student_sub_count": 1, - 'submissions_available': 0, - } diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/self_assessment_module.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/self_assessment_module.py deleted file mode 100644 index fb82c54271..0000000000 --- a/common/lib/xmodule/xmodule/open_ended_grading_classes/self_assessment_module.py +++ /dev/null @@ -1,339 +0,0 @@ -import json -import logging -from lxml import etree - -from xmodule.capa_module import ComplexEncoder -from xmodule.progress import Progress -from xmodule.stringify import stringify_children -import openendedchild - -from .combined_open_ended_rubric import CombinedOpenEndedRubric - -log = logging.getLogger("edx.courseware") - - -class SelfAssessmentModule(openendedchild.OpenEndedChild): - """ - A Self Assessment module that allows students to write open-ended responses, - submit, then see a rubric and rate themselves. Persists student supplied - hints, answers, and assessment judgment (currently only correct/incorrect). - Parses xml definition file--see below for exact format. - - Sample XML format: - - - What hint about this problem would you give to someone? - - - Save Succcesful. Thanks for participating! - - - """ - TEMPLATE_DIR = "combinedopenended/selfassessment" - # states - INITIAL = 'initial' - ASSESSING = 'assessing' - REQUEST_HINT = 'request_hint' - DONE = 'done' - - def setup_response(self, system, location, definition, descriptor): - """ - Sets up the module - @param system: Modulesystem - @param location: location, to let the module know where it is. - @param definition: XML definition of the module. - @param descriptor: SelfAssessmentDescriptor - @return: None - """ - self.child_prompt = stringify_children(self.child_prompt) - self.child_rubric = stringify_children(self.child_rubric) - - def get_html(self, system): - """ - Gets context and renders HTML that represents the module - @param system: Modulesystem - @return: Rendered HTML - """ - # set context variables and render template - previous_answer = self.get_display_answer() - - # Use the module name as a unique id to pass to the template. - try: - module_id = self.system.location.name - except AttributeError: - # In cases where we don't have a system or a location, use a fallback. - module_id = "self_assessment" - - context = { - 'prompt': self.child_prompt, - 'previous_answer': previous_answer, - 'ajax_url': system.ajax_url, - 'initial_rubric': self.get_rubric_html(system), - 'state': self.child_state, - 'allow_reset': self._allow_reset(), - 'child_type': 'selfassessment', - 'accept_file_upload': self.accept_file_upload, - 'module_id': module_id, - } - - html = system.render_template('{0}/self_assessment_prompt.html'.format(self.TEMPLATE_DIR), context) - return html - - def handle_ajax(self, dispatch, data, system): - """ - This is called by courseware.module_render, to handle an AJAX call. - "data" is request.POST. - - Returns a json dictionary: - { 'progress_changed' : True/False, - 'progress': 'none'/'in_progress'/'done', - } - """ - - handlers = { - 'save_answer': self.save_answer, - 'save_assessment': self.save_assessment, - 'save_post_assessment': self.save_hint, - 'store_answer': self.store_answer, - } - - if dispatch not in handlers: - # This is a dev_facing_error - log.error("Cannot find {0} in handlers in handle_ajax function for open_ended_module.py".format(dispatch)) - # This is a dev_facing_error - return json.dumps({'error': 'Error handling action. Please try again.', 'success': False}) - - before = self.get_progress() - d = handlers[dispatch](data, system) - after = self.get_progress() - d.update({ - 'progress_changed': after != before, - 'progress_status': Progress.to_js_status_str(after), - }) - return json.dumps(d, cls=ComplexEncoder) - - def get_rubric_html(self, system): - """ - Return the appropriate version of the rubric, based on the state. - """ - if self.child_state == self.INITIAL: - return '' - - rubric_renderer = CombinedOpenEndedRubric(system.render_template, False) - rubric_dict = rubric_renderer.render_rubric(self.child_rubric) - success = rubric_dict['success'] - rubric_html = rubric_dict['html'] - - # we'll render it - context = { - 'rubric': rubric_html, - 'max_score': self._max_score, - } - - if self.child_state == self.ASSESSING: - context['read_only'] = False - elif self.child_state in (self.POST_ASSESSMENT, self.DONE): - context['read_only'] = True - else: - # This is a dev_facing_error - raise ValueError("Self assessment module is in an illegal state '{0}'".format(self.child_state)) - - return system.render_template('{0}/self_assessment_rubric.html'.format(self.TEMPLATE_DIR), context) - - def get_hint_html(self, system): - """ - Return the appropriate version of the hint view, based on state. - """ - if self.child_state in (self.INITIAL, self.ASSESSING): - return '' - - if self.child_state == self.DONE: - # display the previous hint - latest = self.latest_post_assessment(system) - hint = latest if latest is not None else '' - else: - hint = '' - - context = {'hint': hint} - - if self.child_state == self.POST_ASSESSMENT: - context['read_only'] = False - elif self.child_state == self.DONE: - context['read_only'] = True - else: - # This is a dev_facing_error - raise ValueError("Self Assessment module is in an illegal state '{0}'".format(self.child_state)) - - return system.render_template('{0}/self_assessment_hint.html'.format(self.TEMPLATE_DIR), context) - - def save_answer(self, data, system): - """ - After the answer is submitted, show the rubric. - - Args: - data: the request dictionary passed to the ajax request. Should contain - a key 'student_answer' - - Returns: - Dictionary with keys 'success' and either 'error' (if not success), - or 'rubric_html' (if success). - """ - # Check to see if this problem is closed - closed, msg = self.check_if_closed() - if closed: - return msg - - if self.child_state != self.INITIAL: - return self.out_of_sync_error(data) - - error_message = "" - # add new history element with answer and empty score and hint. - success, error_message, data = self.append_file_link_to_student_answer(data) - if success: - data['student_answer'] = SelfAssessmentModule.sanitize_html(data['student_answer']) - self.new_history_entry(data['student_answer']) - self.change_state(self.ASSESSING) - - return { - 'success': success, - 'rubric_html': self.get_rubric_html(system), - 'error': error_message, - 'student_response': data['student_answer'].replace("\n", "
"), - } - - def save_assessment(self, data, _system): - """ - Save the assessment. If the student said they're right, don't ask for a - hint, and go straight to the done state. Otherwise, do ask for a hint. - - Returns a dict { 'success': bool, 'state': state, - - 'hint_html': hint_html OR 'message_html': html and 'allow_reset', - - 'error': error-msg}, - - with 'error' only present if 'success' is False, and 'hint_html' or - 'message_html' only if success is true - - :param data: A `webob.multidict.MultiDict` containing the keys - asasssment: The sum of assessment scores - score_list[]: A multivalue key containing all the individual scores - """ - - closed, msg = self.check_if_closed() - if closed: - return msg - - if self.child_state != self.ASSESSING: - return self.out_of_sync_error(data) - - try: - score = int(data.get('assessment')) - score_list = [int(x) for x in data.getall('score_list[]')] - except (ValueError, TypeError): - # This is a dev_facing_error - log.error("Non-integer score value passed to save_assessment, or no score list present.") - # This is a student_facing_error - _ = self.system.service(self, "i18n").ugettext - return { - 'success': False, - 'error': _("Error saving your score. Please notify course staff.") - } - - # Record score as assessment and rubric scores as post assessment - self.record_latest_score(score) - self.record_latest_post_assessment(json.dumps(score_list)) - - d = {'success': True, } - - self.change_state(self.DONE) - d['allow_reset'] = self._allow_reset() - - d['state'] = self.child_state - return d - - def save_hint(self, data, _system): - ''' - Not used currently, as hints have been removed from the system. - Save the hint. - Returns a dict { 'success': bool, - 'message_html': message_html, - 'error': error-msg, - 'allow_reset': bool}, - with the error key only present if success is False and message_html - only if True. - ''' - if self.child_state != self.POST_ASSESSMENT: - # Note: because we only ask for hints on wrong answers, may not have - # the same number of hints and answers. - return self.out_of_sync_error(data) - - self.record_latest_post_assessment(data['hint']) - self.change_state(self.DONE) - - return { - 'success': True, - 'message_html': '', - 'allow_reset': self._allow_reset(), - } - - def latest_post_assessment(self, system): - latest_post_assessment = super(SelfAssessmentModule, self).latest_post_assessment(system) - try: - rubric_scores = json.loads(latest_post_assessment) - except: - rubric_scores = [] - return [rubric_scores] - - -class SelfAssessmentDescriptor(object): - """ - Module for adding self assessment questions to courses - """ - mako_template = "widgets/html-edit.html" - module_class = SelfAssessmentModule - filename_extension = "xml" - - has_score = True - - def __init__(self, system): - self.system = system - - @classmethod - def definition_from_xml(cls, xml_object, system): - """ - Pull out the rubric, prompt, and submitmessage into a dictionary. - - Returns: - { - 'submitmessage': 'some-html' - 'hintprompt': 'some-html' - } - """ - expected_children = [] - for child in expected_children: - if len(xml_object.xpath(child)) != 1: - # This is a staff_facing_error - raise ValueError( - u"Self assessment definition must include exactly one '{0}' tag. Contact the learning sciences group for assistance.".format( - child)) - - def parse(k): - """Assumes that xml_object has child k""" - return stringify_children(xml_object.xpath(k)[0]) - - return {} - - def definition_to_xml(self, resource_fs): - '''Return an xml element representing this definition.''' - elt = etree.Element('selfassessment') - - def add_child(k): - child_str = u'<{tag}>{body}'.format(tag=k, body=getattr(self, k)) - child_node = etree.fromstring(child_str) - elt.append(child_node) - - for child in []: - add_child(child) - - return elt diff --git a/common/lib/xmodule/xmodule/peer_grading_module.py b/common/lib/xmodule/xmodule/peer_grading_module.py deleted file mode 100644 index 1c246aac71..0000000000 --- a/common/lib/xmodule/xmodule/peer_grading_module.py +++ /dev/null @@ -1,744 +0,0 @@ -""" -ORA1. Deprecated. -""" -import json -import logging - -from datetime import datetime - -from django.utils.timezone import UTC -from lxml import etree -from pkg_resources import resource_string - -from xblock.fields import Dict, String, Scope, Boolean, Float, Reference - -from xmodule.capa_module import ComplexEncoder -from xmodule.fields import Date, Timedelta -from xmodule.modulestore.exceptions import ItemNotFoundError, NoPathToItem -from xmodule.raw_module import RawDescriptor -from xmodule.timeinfo import TimeInfo -from xmodule.x_module import XModule, module_attr -from xmodule.open_ended_grading_classes.peer_grading_service import PeerGradingService, MockPeerGradingService -from xmodule.open_ended_grading_classes.grading_service_module import GradingServiceError -from xmodule.validation import StudioValidation, StudioValidationMessage - -from open_ended_grading_classes import combined_open_ended_rubric - -log = logging.getLogger(__name__) - -# Make '_' a no-op so we can scrape strings. Using lambda instead of -# `django.utils.translation.ugettext_noop` because Django cannot be imported in this file -_ = lambda text: text - -EXTERNAL_GRADER_NO_CONTACT_ERROR = "Failed to contact external graders. Please notify course staff." -MAX_ALLOWED_FEEDBACK_LENGTH = 5000 - - -class PeerGradingFields(object): - use_for_single_location = Boolean( - display_name=_("Show Single Problem"), - help=_('When True, only the single problem specified by "Link to Problem Location" is shown. ' - 'When False, a panel is displayed with all problems available for peer grading.'), - default=False, - scope=Scope.settings - ) - link_to_location = Reference( - display_name=_("Link to Problem Location"), - help=_('The location of the problem being graded. Only used when "Show Single Problem" is True.'), - default="", - scope=Scope.settings - ) - graded = Boolean( - display_name=_("Graded"), - help=_('Defines whether the student gets credit for grading this problem. Only used when "Show Single Problem" is True.'), - default=False, - scope=Scope.settings - ) - due = Date( - help=_("Due date that should be displayed."), - scope=Scope.settings) - graceperiod = Timedelta( - help=_("Amount of grace to give on the due date."), - scope=Scope.settings - ) - student_data_for_location = Dict( - help=_("Student data for a given peer grading problem."), - scope=Scope.user_state - ) - weight = Float( - display_name=_("Problem Weight"), - help=_("Defines the number of points each problem is worth. If the value is not set, each problem is worth one point."), - scope=Scope.settings, values={"min": 0, "step": ".1"}, - default=1 - ) - display_name = String( - display_name=_("Display Name"), - help=_("Display name for this module"), - scope=Scope.settings, - default=_("Peer Grading Interface") - ) - data = String( - help=_("Html contents to display for this module"), - default='', - scope=Scope.content - ) - - -class InvalidLinkLocation(Exception): - """ - Exception for the case in which a peer grading module tries to link to an invalid location. - """ - pass - - -class PeerGradingModule(PeerGradingFields, XModule): - """ - PeerGradingModule.__init__ takes the same arguments as xmodule.x_module:XModule.__init__ - """ - _VERSION = 1 - - js = { - 'coffee': [ - resource_string(__name__, 'js/src/peergrading/peer_grading.coffee'), - resource_string(__name__, 'js/src/peergrading/peer_grading_problem.coffee'), - resource_string(__name__, 'js/src/javascript_loader.coffee'), - ], - 'js': [ - resource_string(__name__, 'js/src/collapsible.js'), - ] - } - js_module_name = "PeerGrading" - - css = {'scss': [resource_string(__name__, 'css/combinedopenended/display.scss')]} - - def __init__(self, *args, **kwargs): - super(PeerGradingModule, self).__init__(*args, **kwargs) - - # Copy this to a new variable so that we can edit it if needed. - # We need to edit it if the linked module cannot be found, so - # we can revert to panel model. - self.use_for_single_location_local = self.use_for_single_location - - # We need to set the location here so the child modules can use it. - self.runtime.set('location', self.location) - if self.runtime.open_ended_grading_interface: - self.peer_gs = PeerGradingService(self.system.open_ended_grading_interface, self.system.render_template) - else: - self.peer_gs = MockPeerGradingService() - - if self.use_for_single_location_local: - linked_descriptors = self.descriptor.get_required_module_descriptors() - if len(linked_descriptors) == 0: - error_msg = "Peer grading module {0} is trying to use single problem mode without " - "a location specified.".format(self.location) - log.error(error_msg) - # Change module over to panel mode from single problem mode. - self.use_for_single_location_local = False - else: - self.linked_problem = self.system.get_module(linked_descriptors[0]) - - try: - self.timeinfo = TimeInfo(self.due, self.graceperiod) - except Exception: - log.error("Error parsing due date information in location {0}".format(self.location)) - raise - - self.display_due_date = self.timeinfo.display_due_date - - try: - self.student_data_for_location = json.loads(self.student_data_for_location) - except Exception: # pylint: disable=broad-except - # OK with this broad exception because we just want to continue on any error - pass - - @property - def ajax_url(self): - """ - Returns the `ajax_url` from the system, with any trailing '/' stripped off. - """ - ajax_url = self.system.ajax_url - if not ajax_url.endswith("/"): - ajax_url += "/" - return ajax_url - - def closed(self): - return self._closed(self.timeinfo) - - def _closed(self, timeinfo): - if timeinfo.close_date is not None and datetime.now(UTC()) > timeinfo.close_date: - return True - return False - - def _err_response(self, msg): - """ - Return a HttpResponse with a json dump with success=False, and the given error message. - """ - return {'success': False, 'error': msg} - - def _check_required(self, data, required): - actual = set(data.keys()) - missing = required - actual - if len(missing) > 0: - return False, "Missing required keys: {0}".format(', '.join(missing)) - else: - return True, "" - - def get_html(self): - """ - Needs to be implemented by inheritors. Renders the HTML that students see. - @return: - """ - if self.closed(): - return self.peer_grading_closed() - if not self.use_for_single_location_local: - return self.peer_grading() - else: - # b/c handle_ajax expects serialized data payload and directly calls peer_grading - return self.peer_grading_problem({'location': self.link_to_location.to_deprecated_string()})['html'] - - def handle_ajax(self, dispatch, data): - """ - Needs to be implemented by child modules. Handles AJAX events. - @return: - """ - handlers = { - 'get_next_submission': self.get_next_submission, - 'show_calibration_essay': self.show_calibration_essay, - 'is_student_calibrated': self.is_student_calibrated, - 'save_grade': self.save_grade, - 'save_calibration_essay': self.save_calibration_essay, - 'problem': self.peer_grading_problem, - } - - if dispatch not in handlers: - # This is a dev_facing_error - log.error("Cannot find {0} in handlers in handle_ajax function for open_ended_module.py".format(dispatch)) - # This is a dev_facing_error - return json.dumps({'error': 'Error handling action. Please try again.', 'success': False}) - - data_dict = handlers[dispatch](data) - - return json.dumps(data_dict, cls=ComplexEncoder) - - def query_data_for_location(self, location): - student_id = self.system.anonymous_student_id - success = False - response = {} - - try: - response = self.peer_gs.get_data_for_location(location, student_id) - _count_graded = response['count_graded'] - _count_required = response['count_required'] - success = True - except GradingServiceError: - # This is a dev_facing_error - log.exception("Error getting location data from controller for location %s, student %s", location, student_id) - - return success, response - - def get_progress(self): - pass - - def get_score(self): - max_score = None - score = None - weight = self.weight - - #The old default was None, so set to 1 if it is the old default weight - if weight is None: - weight = 1 - score_dict = { - 'score': score, - 'total': max_score, - } - if not self.use_for_single_location_local or not self.graded: - return score_dict - - try: - count_graded = self.student_data_for_location['count_graded'] - count_required = self.student_data_for_location['count_required'] - except: - success, response = self.query_data_for_location(self.link_to_location) - if not success: - log.exception( - "No instance data found and could not get data from controller for loc {0} student {1}".format( - self.system.location.to_deprecated_string(), self.system.anonymous_student_id - )) - return None - count_graded = response['count_graded'] - count_required = response['count_required'] - if count_required > 0 and count_graded >= count_required: - # Ensures that once a student receives a final score for peer grading, that it does not change. - self.student_data_for_location = response - - score = int(count_graded >= count_required and count_graded > 0) * float(weight) - total = float(weight) - score_dict['score'] = score - score_dict['total'] = total - - return score_dict - - def max_score(self): - ''' Maximum score. Two notes: - - * This is generic; in abstract, a problem could be 3/5 points on one - randomization, and 5/7 on another - ''' - max_grade = None - if self.use_for_single_location_local and self.graded: - max_grade = self.weight - return max_grade - - def get_next_submission(self, data): - """ - Makes a call to the grading controller for the next essay that should be graded - Returns a json dict with the following keys: - - 'success': bool - - 'submission_id': a unique identifier for the submission, to be passed back - with the grade. - - 'submission': the submission, rendered as read-only html for grading - - 'rubric': the rubric, also rendered as html. - - 'submission_key': a key associated with the submission for validation reasons - - 'error': if success is False, will have an error message with more info. - """ - required = set(['location']) - success, message = self._check_required(data, required) - if not success: - return self._err_response(message) - grader_id = self.system.anonymous_student_id - location = data['location'] - - try: - response = self.peer_gs.get_next_submission(location, grader_id) - return response - except GradingServiceError: - # This is a dev_facing_error - log.exception("Error getting next submission. server url: %s location: %s, grader_id: %s", self.peer_gs.url, location, grader_id) - # This is a student_facing_error - return {'success': False, - 'error': EXTERNAL_GRADER_NO_CONTACT_ERROR} - - def save_grade(self, data): - """ - Saves the grade of a given submission. - Input: - The request should have the following keys: - location - problem location - submission_id - id associated with this submission - submission_key - submission key given for validation purposes - score - the grade that was given to the submission - feedback - the feedback from the student - Returns - A json object with the following keys: - success: bool indicating whether the save was a success - error: if there was an error in the submission, this is the error message - """ - - required = ['location', 'submission_id', 'submission_key', 'score', 'feedback', 'submission_flagged', 'answer_unknown'] - if data.get("submission_flagged", False) in ["false", False, "False", "FALSE"]: - required.append("rubric_scores[]") - success, message = self._check_required(data, set(required)) - if not success: - return self._err_response(message) - - success, message = self._check_feedback_length(data) - if not success: - return self._err_response(message) - - data_dict = {k: data.get(k) for k in required} - if 'rubric_scores[]' in required: - data_dict['rubric_scores'] = data.getall('rubric_scores[]') - data_dict['grader_id'] = self.system.anonymous_student_id - - try: - response = self.peer_gs.save_grade(**data_dict) - success, location_data = self.query_data_for_location(data_dict['location']) - #Don't check for success above because the response = statement will raise the same Exception as the one - #that will cause success to be false. - response.update({'required_done': False}) - if 'count_graded' in location_data and 'count_required' in location_data and int(location_data['count_graded']) >= int(location_data['count_required']): - response['required_done'] = True - return response - except GradingServiceError: - # This is a dev_facing_error - log.exception("Error saving grade to open ended grading service. server url: %s", self.peer_gs.url) - - # This is a student_facing_error - return { - 'success': False, - 'error': EXTERNAL_GRADER_NO_CONTACT_ERROR - } - - def is_student_calibrated(self, data): - """ - Calls the grading controller to see if the given student is calibrated - on the given problem - - Input: - In the request, we need the following arguments: - location - problem location - - Returns: - Json object with the following keys - success - bool indicating whether or not the call was successful - calibrated - true if the grader has fully calibrated and can now move on to grading - - false if the grader is still working on calibration problems - total_calibrated_on_so_far - the number of calibration essays for this problem - that this grader has graded - """ - - required = set(['location']) - success, message = self._check_required(data, required) - if not success: - return self._err_response(message) - grader_id = self.system.anonymous_student_id - - location = data['location'] - - try: - response = self.peer_gs.is_student_calibrated(location, grader_id) - return response - except GradingServiceError: - # This is a dev_facing_error - log.exception("Error from open ended grading service. server url: %s, grader_id: %s, location: %s", self.peer_gs.url, grader_id, location) - # This is a student_facing_error - return { - 'success': False, - 'error': EXTERNAL_GRADER_NO_CONTACT_ERROR - } - - def show_calibration_essay(self, data): - """ - Fetch the next calibration essay from the grading controller and return it - Inputs: - In the request - location - problem location - - Returns: - A json dict with the following keys - 'success': bool - - 'submission_id': a unique identifier for the submission, to be passed back - with the grade. - - 'submission': the submission, rendered as read-only html for grading - - 'rubric': the rubric, also rendered as html. - - 'submission_key': a key associated with the submission for validation reasons - - 'error': if success is False, will have an error message with more info. - - """ - - required = set(['location']) - success, message = self._check_required(data, required) - if not success: - return self._err_response(message) - - grader_id = self.system.anonymous_student_id - - location = data['location'] - try: - response = self.peer_gs.show_calibration_essay(location, grader_id) - return response - except GradingServiceError: - # This is a dev_facing_error - log.exception("Error from open ended grading service. server url: %s, location: %s", self.peer_gs.url, location) - # This is a student_facing_error - return {'success': False, - 'error': EXTERNAL_GRADER_NO_CONTACT_ERROR} - # if we can't parse the rubric into HTML, - except etree.XMLSyntaxError: - # This is a dev_facing_error - log.exception("Cannot parse rubric string.") - # This is a student_facing_error - return {'success': False, - 'error': 'Error displaying submission. Please notify course staff.'} - - def save_calibration_essay(self, data): - """ - Saves the grader's grade of a given calibration. - Input: - The request should have the following keys: - location - problem location - submission_id - id associated with this submission - submission_key - submission key given for validation purposes - score - the grade that was given to the submission - feedback - the feedback from the student - Returns - A json object with the following keys: - success: bool indicating whether the save was a success - error: if there was an error in the submission, this is the error message - actual_score: the score that the instructor gave to this calibration essay - - """ - - required = set(['location', 'submission_id', 'submission_key', 'score', 'feedback', 'rubric_scores[]']) - success, message = self._check_required(data, required) - if not success: - return self._err_response(message) - - data_dict = {k: data.get(k) for k in required} - data_dict['rubric_scores'] = data.getall('rubric_scores[]') - data_dict['student_id'] = self.system.anonymous_student_id - data_dict['calibration_essay_id'] = data_dict['submission_id'] - - try: - response = self.peer_gs.save_calibration_essay(**data_dict) - if 'actual_rubric' in response: - rubric_renderer = combined_open_ended_rubric.CombinedOpenEndedRubric(self.system.render_template, True) - response['actual_rubric'] = rubric_renderer.render_rubric(response['actual_rubric'])['html'] - return response - except GradingServiceError: - # This is a dev_facing_error - log.exception("Error saving calibration grade") - # This is a student_facing_error - return self._err_response('There was an error saving your score. Please notify course staff.') - - def peer_grading_closed(self): - ''' - Show the Peer grading closed template - ''' - html = self.system.render_template('peer_grading/peer_grading_closed.html', { - 'use_for_single_location': self.use_for_single_location_local - }) - return html - - def _find_corresponding_module_for_location(self, location): - """ - Find the peer grading module that exists at the given location. - """ - try: - return self.descriptor.system.load_item(location) - except ItemNotFoundError: - # The linked problem doesn't exist. - log.error("Problem {0} does not exist in this course.".format(location)) - raise - except NoPathToItem: - # The linked problem does not have a path to it (ie is in a draft or other strange state). - log.error("Cannot find a path to problem {0} in this course.".format(location)) - raise - - def peer_grading(self, _data=None): - ''' - Show a peer grading interface - ''' - - # call problem list service - success = False - error_text = "" - problem_list = [] - try: - problem_list_dict = self.peer_gs.get_problem_list(self.course_id, self.system.anonymous_student_id) - success = problem_list_dict['success'] - if 'error' in problem_list_dict: - error_text = problem_list_dict['error'] - - problem_list = problem_list_dict['problem_list'] - - except GradingServiceError: - # This is a student_facing_error - error_text = EXTERNAL_GRADER_NO_CONTACT_ERROR - log.error(error_text) - success = False - # catch error if if the json loads fails - except ValueError: - # This is a student_facing_error - error_text = "Could not get list of problems to peer grade. Please notify course staff." - log.error(error_text) - success = False - except Exception: - log.exception("Could not contact peer grading service.") - success = False - - good_problem_list = [] - for problem in problem_list: - problem_location = problem['location'] - try: - descriptor = self._find_corresponding_module_for_location(problem_location) - except (NoPathToItem, ItemNotFoundError): - continue - if descriptor: - problem['due'] = descriptor.due - grace_period = descriptor.graceperiod - try: - problem_timeinfo = TimeInfo(problem['due'], grace_period) - except Exception: - log.error("Malformed due date or grace period string for location {0}".format(problem_location)) - raise - if self._closed(problem_timeinfo): - problem['closed'] = True - else: - problem['closed'] = False - else: - # if we can't find the due date, assume that it doesn't have one - problem['due'] = None - problem['closed'] = False - good_problem_list.append(problem) - - ajax_url = self.ajax_url - html = self.system.render_template('peer_grading/peer_grading.html', { - 'ajax_url': ajax_url, - 'success': success, - 'problem_list': good_problem_list, - 'error_text': error_text, - # Checked above - 'staff_access': False, - 'use_single_location': self.use_for_single_location_local, - }) - - return html - - def peer_grading_problem(self, data=None): - ''' - Show individual problem interface - ''' - if data is None or data.get('location') is None: - if not self.use_for_single_location_local: - # This is an error case, because it must be set to use a single location to be called without get parameters - # This is a dev_facing_error - log.error( - "Peer grading problem in peer_grading_module called with no get parameters, but use_for_single_location is False.") - return {'html': "", 'success': False} - problem_location = self.link_to_location - - elif data.get('location') is not None: - problem_location = self.course_id.make_usage_key_from_deprecated_string(data.get('location')) - - self._find_corresponding_module_for_location(problem_location) - - ajax_url = self.ajax_url - html = self.system.render_template('peer_grading/peer_grading_problem.html', { - 'view_html': '', - 'problem_location': problem_location, - 'course_id': self.course_id, - 'ajax_url': ajax_url, - # Checked above - 'staff_access': False, - 'use_single_location': self.use_for_single_location_local, - }) - - return {'html': html, 'success': True} - - def get_instance_state(self): - """ - Returns the current instance state. The module can be recreated from the instance state. - Input: None - Output: A dictionary containing the instance state. - """ - - state = { - 'student_data_for_location': self.student_data_for_location, - } - - return json.dumps(state) - - def _check_feedback_length(self, data): - feedback = data.get("feedback") - if feedback and len(feedback) > MAX_ALLOWED_FEEDBACK_LENGTH: - return False, "Feedback is too long, Max length is {0} characters.".format( - MAX_ALLOWED_FEEDBACK_LENGTH - ) - else: - return True, "" - - def validate(self): - """ - Message for either error or warning validation message/s. - - Returns message and type. Priority given to error type message. - """ - return self.descriptor.validate() - - -class PeerGradingDescriptor(PeerGradingFields, RawDescriptor): - """ - Module for adding peer grading questions - """ - mako_template = "widgets/raw-edit.html" - module_class = PeerGradingModule - filename_extension = "xml" - - has_score = True - always_recalculate_grades = True - - #Specify whether or not to pass in open ended interface - needs_open_ended_interface = True - - metadata_translations = { - 'is_graded': 'graded', - 'attempts': 'max_attempts', - 'due_data': 'due' - } - - @property - def non_editable_metadata_fields(self): - non_editable_fields = super(PeerGradingDescriptor, self).non_editable_metadata_fields - non_editable_fields.extend([PeerGradingFields.due, PeerGradingFields.graceperiod]) - return non_editable_fields - - def get_required_module_descriptors(self): - """ - Returns a list of XModuleDescriptor instances upon which this module depends, but are - not children of this module. - """ - - # If use_for_single_location is True, this is linked to an open ended problem. - if self.use_for_single_location: - # Try to load the linked module. - # If we can't load it, return empty list to avoid exceptions on progress page. - try: - linked_module = self.system.load_item(self.link_to_location) - return [linked_module] - except (NoPathToItem, ItemNotFoundError): - error_message = ("Cannot find the combined open ended module " - "at location {0} being linked to from peer " - "grading module {1}").format(self.link_to_location, self.location) - log.error(error_message) - return [] - else: - return [] - - # Proxy to PeerGradingModule so that external callers don't have to know if they're working - # with a module or a descriptor - closed = module_attr('closed') - get_instance_state = module_attr('get_instance_state') - get_next_submission = module_attr('get_next_submission') - graded = module_attr('graded') - is_student_calibrated = module_attr('is_student_calibrated') - peer_grading = module_attr('peer_grading') - peer_grading_closed = module_attr('peer_grading_closed') - peer_grading_problem = module_attr('peer_grading_problem') - peer_gs = module_attr('peer_gs') - query_data_for_location = module_attr('query_data_for_location') - save_calibration_essay = module_attr('save_calibration_essay') - save_grade = module_attr('save_grade') - show_calibration_essay = module_attr('show_calibration_essay') - use_for_single_location_local = module_attr('use_for_single_location_local') - _find_corresponding_module_for_location = module_attr('_find_corresponding_module_for_location') - - def validate(self): - """ - Validates the state of this instance. This is the override of the general XBlock method, - and it will also ask its superclass to validate. - """ - validation = super(PeerGradingDescriptor, self).validate() - validation = StudioValidation.copy(validation) - - i18n_service = self.runtime.service(self, "i18n") - - validation.summary = StudioValidationMessage( - StudioValidationMessage.ERROR, - i18n_service.ugettext( - "ORA1 is no longer supported. To use this assessment, " - "replace this ORA1 component with an ORA2 component." - ) - ) - return validation diff --git a/common/lib/xmodule/xmodule/tests/__init__.py b/common/lib/xmodule/xmodule/tests/__init__.py index d66e4b8e22..498e145479 100644 --- a/common/lib/xmodule/xmodule/tests/__init__.py +++ b/common/lib/xmodule/xmodule/tests/__init__.py @@ -41,16 +41,6 @@ MODULE_DIR = path(__file__).dirname() DATA_DIR = MODULE_DIR.parent.parent.parent.parent / "test" / "data" -open_ended_grading_interface = { - 'url': 'blah/', - 'username': 'incorrect_user', - 'password': 'incorrect_pass', - 'staff_grading': 'staff_grading', - 'peer_grading': 'peer_grading', - 'grading_controller': 'grading_controller', -} - - class TestModuleSystem(ModuleSystem): # pylint: disable=abstract-method """ ModuleSystem for testing @@ -150,7 +140,6 @@ def get_test_system(course_id=SlashSeparatedCourseKey('org', 'course', 'run')): }, node_path=os.environ.get("NODE_PATH", "/usr/local/lib/node_modules"), anonymous_student_id='student', - open_ended_grading_interface=open_ended_grading_interface, course_id=course_id, error_descriptor_class=ErrorDescriptor, get_user_role=Mock(name='get_test_system.get_user_role', is_staff=False), diff --git a/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py b/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py deleted file mode 100644 index 5b7bfc815d..0000000000 --- a/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py +++ /dev/null @@ -1,1607 +0,0 @@ -""" -Tests for the various pieces of the CombinedOpenEndedGrading system - -OpenEndedChild -OpenEndedModule - -""" - -import json -import logging -import unittest - -from datetime import datetime -from lxml import etree -from lxml.html import fragment_fromstring -from mock import Mock, MagicMock, patch -from pytz import UTC -from webob.multidict import MultiDict - -from xmodule.open_ended_grading_classes.openendedchild import OpenEndedChild -from xmodule.open_ended_grading_classes.open_ended_module import OpenEndedModule -from xmodule.open_ended_grading_classes.self_assessment_module import SelfAssessmentModule -from xmodule.open_ended_grading_classes.combined_open_ended_modulev1 import CombinedOpenEndedV1Module -from xmodule.combined_open_ended_module import CombinedOpenEndedModule -from opaque_keys.edx.locations import Location -from xmodule.tests import get_test_system, test_util_open_ended -from xmodule.progress import Progress -from xmodule.validation import StudioValidationMessage -from xmodule.x_module import STUDENT_VIEW - -from xmodule.tests.test_util_open_ended import ( - DummyModulestore, TEST_STATE_SA_IN, - MOCK_INSTANCE_STATE, TEST_STATE_SA, TEST_STATE_AI, TEST_STATE_AI2, TEST_STATE_AI2_INVALID, - TEST_STATE_SINGLE, TEST_STATE_PE_SINGLE, MockUploadedFile, INSTANCE_INCONSISTENT_STATE, - INSTANCE_INCONSISTENT_STATE2, INSTANCE_INCONSISTENT_STATE3, INSTANCE_INCONSISTENT_STATE4, - INSTANCE_INCONSISTENT_STATE5 -) - -from xblock.field_data import DictFieldData -from xblock.fields import ScopeIds -import capa.xqueue_interface as xqueue_interface - - -log = logging.getLogger(__name__) - -ORG = 'edX' -COURSE = 'open_ended' # name of directory with course data - - -class OpenEndedChildTest(unittest.TestCase): - """ - Test the open ended child class - """ - location = Location("edX", "sa_test", "2012_Fall", "selfassessment", "SampleQuestion") - - metadata = json.dumps({'attempts': '10'}) - prompt = etree.XML("This is a question prompt") - rubric = ''' - - Response Quality - - - - ''' - max_score = 1 - - static_data = { - 'max_attempts': 20, - 'prompt': prompt, - 'rubric': rubric, - 'max_score': max_score, - 'display_name': 'Name', - 'accept_file_upload': False, - 'close_date': None, - 's3_interface': "", - 'open_ended_grading_interface': {}, - 'skip_basic_checks': False, - 'control': { - 'required_peer_grading': 1, - 'peer_grader_count': 1, - 'min_to_calibrate': 3, - 'max_to_calibrate': 6, - 'peer_grade_finished_submissions_when_none_pending': False, - } - } - definition = Mock() - descriptor = Mock() - - def setUp(self): - super(OpenEndedChildTest, self).setUp() - self.test_system = get_test_system() - self.test_system.open_ended_grading_interface = None - self.openendedchild = OpenEndedChild(self.test_system, self.location, - self.definition, self.descriptor, self.static_data, self.metadata) - - def test_latest_answer_empty(self): - answer = self.openendedchild.latest_answer() - self.assertEqual(answer, "") - - def test_latest_score_empty(self): - answer = self.openendedchild.latest_score() - self.assertEqual(answer, None) - - def test_latest_post_assessment_empty(self): - answer = self.openendedchild.latest_post_assessment(self.test_system) - self.assertEqual(answer, "") - - def test_new_history_entry(self): - new_answer = "New Answer" - self.openendedchild.new_history_entry(new_answer) - answer = self.openendedchild.latest_answer() - self.assertEqual(answer, new_answer) - - new_answer = "Newer Answer" - self.openendedchild.new_history_entry(new_answer) - answer = self.openendedchild.latest_answer() - self.assertEqual(new_answer, answer) - - def test_record_latest_score(self): - new_answer = "New Answer" - self.openendedchild.new_history_entry(new_answer) - new_score = 3 - self.openendedchild.record_latest_score(new_score) - score = self.openendedchild.latest_score() - self.assertEqual(score, 3) - - new_score = 4 - self.openendedchild.new_history_entry(new_answer) - self.openendedchild.record_latest_score(new_score) - score = self.openendedchild.latest_score() - self.assertEqual(score, 4) - - def test_record_latest_post_assessment(self): - new_answer = "New Answer" - self.openendedchild.new_history_entry(new_answer) - - post_assessment = "Post assessment" - self.openendedchild.record_latest_post_assessment(post_assessment) - self.assertEqual(post_assessment, - self.openendedchild.latest_post_assessment(self.test_system)) - - def test_get_score(self): - new_answer = "New Answer" - self.openendedchild.new_history_entry(new_answer) - - score = self.openendedchild.get_score() - self.assertEqual(score['score'], 0) - self.assertEqual(score['total'], self.static_data['max_score']) - - new_score = 4 - self.openendedchild.new_history_entry(new_answer) - self.openendedchild.record_latest_score(new_score) - score = self.openendedchild.get_score() - self.assertEqual(score['score'], new_score) - self.assertEqual(score['total'], self.static_data['max_score']) - - def test_reset(self): - self.openendedchild.reset(self.test_system) - state = json.loads(self.openendedchild.get_instance_state()) - self.assertEqual(state['child_state'], OpenEndedChild.INITIAL) - - def test_is_last_response_correct(self): - new_answer = "New Answer" - self.openendedchild.new_history_entry(new_answer) - self.openendedchild.record_latest_score(self.static_data['max_score']) - self.assertEqual(self.openendedchild.is_last_response_correct(), - 'correct') - - self.openendedchild.new_history_entry(new_answer) - self.openendedchild.record_latest_score(0) - self.assertEqual(self.openendedchild.is_last_response_correct(), - 'incorrect') - - -class OpenEndedModuleTest(unittest.TestCase): - """ - Test the open ended module class - """ - location = Location("edX", "sa_test", "2012_Fall", "selfassessment", "SampleQuestion") - - metadata = json.dumps({'attempts': '10'}) - prompt = etree.XML("This is a question prompt") - rubric = etree.XML(''' - - Response Quality - - - ''') - max_score = 4 - - static_data = { - 'max_attempts': 20, - 'prompt': prompt, - 'rubric': rubric, - 'max_score': max_score, - 'display_name': 'Name', - 'accept_file_upload': False, - 'close_date': None, - 's3_interface': test_util_open_ended.S3_INTERFACE, - 'open_ended_grading_interface': test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE, - 'skip_basic_checks': False, - 'control': { - 'required_peer_grading': 1, - 'peer_grader_count': 1, - 'min_to_calibrate': 3, - 'max_to_calibrate': 6, - 'peer_grade_finished_submissions_when_none_pending': False, - } - } - - oeparam = etree.XML(''' - - Enter essay here. - This is the answer. - - {"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"} - - - ''') - definition = {'oeparam': oeparam} - descriptor = Mock() - - feedback = { - "success": True, - "feedback": "Grader Feedback" - } - - single_score_msg = { - 'correct': True, - 'score': 4, - 'msg': 'Grader Message', - 'feedback': json.dumps(feedback), - 'grader_type': 'IN', - 'grader_id': '1', - 'submission_id': '1', - 'success': True, - 'rubric_scores': [0], - 'rubric_scores_complete': True, - 'rubric_xml': etree.tostring(rubric) - } - - multiple_score_msg = { - 'correct': True, - 'score': [0, 1], - 'msg': 'Grader Message', - 'feedback': [json.dumps(feedback), json.dumps(feedback)], - 'grader_type': 'PE', - 'grader_id': ['1', '2'], - 'submission_id': '1', - 'success': True, - 'rubric_scores': [[0], [0]], - 'rubric_scores_complete': [True, True], - 'rubric_xml': [etree.tostring(rubric), etree.tostring(rubric)] - } - - def setUp(self): - super(OpenEndedModuleTest, self).setUp() - self.test_system = get_test_system() - self.test_system.open_ended_grading_interface = None - self.test_system.location = self.location - self.mock_xqueue = MagicMock() - self.mock_xqueue.send_to_queue.return_value = (0, "Queued") - - def constructed_callback(dispatch="score_update"): - return dispatch - - self.test_system.xqueue = {'interface': self.mock_xqueue, 'construct_callback': constructed_callback, - 'default_queuename': 'testqueue', - 'waittime': 1} - self.openendedmodule = OpenEndedModule(self.test_system, self.location, - self.definition, self.descriptor, self.static_data, self.metadata) - - def test_message_post(self): - """Test message_post() sends feedback to xqueue.""" - - submission_time = datetime.strftime(datetime.now(UTC), xqueue_interface.dateformat) - - feedback_post = { - 'feedback': 'feedback text', - 'submission_id': '1', - 'grader_id': '1', - 'score': 3 - } - result = self.openendedmodule.message_post(feedback_post, self.test_system) - self.assertTrue(result['success']) - - # make sure it's actually sending something we want to the queue - mock_send_to_queue_body_arg = json.loads(self.mock_xqueue.send_to_queue.call_args[1]['body']) - self.assertEqual(mock_send_to_queue_body_arg['feedback'], feedback_post['feedback']) - self.assertEqual(mock_send_to_queue_body_arg['submission_id'], int(feedback_post['submission_id'])) - self.assertEqual(mock_send_to_queue_body_arg['grader_id'], int(feedback_post['grader_id'])) - self.assertEqual(mock_send_to_queue_body_arg['score'], feedback_post['score']) - body_arg_student_info = json.loads(mock_send_to_queue_body_arg['student_info']) - self.assertEqual(body_arg_student_info['anonymous_student_id'], self.test_system.anonymous_student_id) - self.assertGreaterEqual(body_arg_student_info['submission_time'], submission_time) - - state = json.loads(self.openendedmodule.get_instance_state()) - self.assertEqual(state['child_state'], OpenEndedModule.DONE) - - def test_message_post_fail(self): - """Test message_post() if unable to send feedback to xqueue.""" - - self.mock_xqueue.send_to_queue.return_value = (1, "Not Queued") - - feedback_post = { - 'feedback': 'feedback text', - 'submission_id': '1', - 'grader_id': '1', - 'score': 3 - } - result = self.openendedmodule.message_post(feedback_post, self.test_system) - self.assertFalse(result['success']) - - state = json.loads(self.openendedmodule.get_instance_state()) - self.assertNotEqual(state['child_state'], OpenEndedModule.DONE) - - def test_send_to_grader(self): - student_response = "This is a student submission" - submission_time = datetime.strftime(datetime.now(UTC), xqueue_interface.dateformat) - - result, __ = self.openendedmodule.send_to_grader(student_response, self.test_system) - self.assertTrue(result) - - mock_send_to_queue_body_arg = json.loads(self.mock_xqueue.send_to_queue.call_args[1]['body']) - self.assertEqual(mock_send_to_queue_body_arg['student_response'], student_response) - self.assertEqual(mock_send_to_queue_body_arg['max_score'], self.max_score) - body_arg_student_info = json.loads(mock_send_to_queue_body_arg['student_info']) - self.assertEqual(body_arg_student_info['anonymous_student_id'], self.test_system.anonymous_student_id) - self.assertGreaterEqual(body_arg_student_info['submission_time'], submission_time) - - def test_send_to_grader_fail(self): - """Test send_to_grader() if unable to send submission to xqueue.""" - - student_response = "This is a student submission" - self.mock_xqueue.send_to_queue.return_value = (1, "Not Queued") - result, __ = self.openendedmodule.send_to_grader(student_response, self.test_system) - self.assertFalse(result) - - def test_save_answer_fail(self): - """Test save_answer() if unable to send submission to grader.""" - - submission = "This is a student submission" - self.openendedmodule.send_to_grader = Mock(return_value=(False, "Failed")) - response = self.openendedmodule.save_answer( - {"student_answer": submission}, - get_test_system() - ) - self.assertFalse(response['success']) - self.assertNotEqual(self.openendedmodule.latest_answer(), submission) - self.assertEqual(self.openendedmodule.stored_answer, submission) - state = json.loads(self.openendedmodule.get_instance_state()) - self.assertEqual(state['child_state'], OpenEndedModule.INITIAL) - self.assertEqual(state['stored_answer'], submission) - - def update_score_single(self): - self.openendedmodule.new_history_entry("New Entry") - get = {'queuekey': "abcd", - 'xqueue_body': json.dumps(self.single_score_msg)} - self.openendedmodule.update_score(get, self.test_system) - - def update_score_multiple(self): - self.openendedmodule.new_history_entry("New Entry") - get = {'queuekey': "abcd", - 'xqueue_body': json.dumps(self.multiple_score_msg)} - self.openendedmodule.update_score(get, self.test_system) - - def test_latest_post_assessment(self): - self.update_score_single() - assessment = self.openendedmodule.latest_post_assessment(self.test_system) - self.assertNotEqual(assessment, '') - # check for errors - self.assertNotIn('errors', assessment) - - def test_update_score_single(self): - self.update_score_single() - score = self.openendedmodule.latest_score() - self.assertEqual(score, 4) - - def test_update_score_multiple(self): - """ - Tests that a score of [0, 1] gets aggregated to 1. A change in behavior added by @jbau - """ - self.update_score_multiple() - score = self.openendedmodule.latest_score() - self.assertEquals(score, 1) - - @patch('xmodule.open_ended_grading_classes.open_ended_module.log.error') - def test_update_score_nohistory(self, error_logger): - """ - Tests error handling when there is no child_history - """ - # NOTE that we are not creating any history items - get = {'queuekey': "abcd", - 'xqueue_body': json.dumps(self.multiple_score_msg)} - error_msg = ("Trying to update score without existing studentmodule child_history:\n" - " location: i4x://edX/sa_test/selfassessment/SampleQuestion\n" - " score: 1\n" - " grader_ids: [u'1', u'2']\n" - " submission_ids: [u'1', u'1']") - self.openendedmodule.update_score(get, self.test_system) - (msg,), _ = error_logger.call_args - self.assertTrue(error_logger.called) - self.assertEqual(msg, error_msg) - - def test_open_ended_display(self): - """ - Test storing answer with the open ended module. - """ - - # Create a module with no state yet. Important that this start off as a blank slate. - test_module = OpenEndedModule(self.test_system, self.location, - self.definition, self.descriptor, self.static_data, self.metadata) - - saved_response = "Saved response." - submitted_response = "Submitted response." - - # Initially, there will be no stored answer. - self.assertEqual(test_module.stored_answer, None) - # And the initial answer to display will be an empty string. - self.assertEqual(test_module.get_display_answer(), "") - - # Now, store an answer in the module. - test_module.handle_ajax("store_answer", {'student_answer': saved_response}, get_test_system()) - # The stored answer should now equal our response. - self.assertEqual(test_module.stored_answer, saved_response) - self.assertEqual(test_module.get_display_answer(), saved_response) - - # Mock out the send_to_grader function so it doesn't try to connect to the xqueue. - test_module.send_to_grader = Mock(return_value=(True, "Success")) - # Submit a student response to the question. - test_module.handle_ajax( - "save_answer", - {"student_answer": submitted_response}, - get_test_system() - ) - # Submitting an answer should clear the stored answer. - self.assertEqual(test_module.stored_answer, None) - # Confirm that the answer is stored properly. - self.assertEqual(test_module.latest_answer(), submitted_response) - - def test_parse_score_msg(self): - """ - Test _parse_score_msg with empty dict. - """ - - assessment = self.openendedmodule._parse_score_msg("{}", self.test_system) - self.assertEqual(assessment.get("valid"), False) - - -class CombinedOpenEndedModuleTest(unittest.TestCase): - """ - Unit tests for the combined open ended xmodule - """ - location = Location("edX", "open_ended", "2012_Fall", "combinedopenended", "SampleQuestion") - definition_template = """ - - {rubric} - {prompt} - - {task1} - - - {task2} - - - """ - prompt = "This is a question prompt" - rubric = ''' - - Response Quality - - - - ''' - max_score = 1 - - metadata = {'attempts': '10', 'max_score': max_score} - - static_data = { - 'max_attempts': 20, - 'prompt': prompt, - 'rubric': rubric, - 'max_score': max_score, - 'display_name': 'Name', - 'accept_file_upload': False, - 'close_date': "", - 's3_interface': test_util_open_ended.S3_INTERFACE, - 'open_ended_grading_interface': test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE, - 'skip_basic_checks': False, - 'graded': True, - } - - oeparam = etree.XML(''' - - Enter essay here. - This is the answer. - - {"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"} - - - ''') - - task_xml1 = ''' - - - What hint about this problem would you give to someone? - - - Save Succcesful. Thanks for participating! - - - ''' - task_xml2 = ''' - - - Enter essay here. - This is the answer. - - {"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"} - - - ''' - definition = {'prompt': etree.XML(prompt), 'rubric': etree.XML(rubric), 'task_xml': [task_xml1, task_xml2]} - full_definition = definition_template.format(prompt=prompt, rubric=rubric, task1=task_xml1, task2=task_xml2) - descriptor = Mock(data=full_definition) - test_system = get_test_system() - test_system.open_ended_grading_interface = None - usage_key = test_system.course_id.make_usage_key('combinedopenended', 'test_loc') - # ScopeIds has 4 fields: user_id, block_type, def_id, usage_id - scope_ids = ScopeIds(1, 'combinedopenended', usage_key, usage_key) - combinedoe_container = CombinedOpenEndedModule( - descriptor=descriptor, - runtime=test_system, - field_data=DictFieldData({ - 'data': full_definition, - 'weight': '1', - }), - scope_ids=scope_ids, - ) - - def setUp(self): - super(CombinedOpenEndedModuleTest, self).setUp() - self.combinedoe = CombinedOpenEndedV1Module(self.test_system, - self.location, - self.definition, - self.descriptor, - static_data=self.static_data, - metadata=self.metadata, - instance_state=self.static_data) - - def test_get_tag_name(self): - """ - Test to see if the xml tag name is correct - """ - name = self.combinedoe.get_tag_name("Tag") - self.assertEqual(name, "t") - - def test_get_last_response(self): - """ - See if we can parse the last response - """ - response_dict = self.combinedoe.get_last_response(0) - self.assertEqual(response_dict['type'], "selfassessment") - self.assertEqual(response_dict['max_score'], self.max_score) - self.assertEqual(response_dict['state'], CombinedOpenEndedV1Module.INITIAL) - - def test_create_task(self): - combinedoe = self.generate_oe_module(TEST_STATE_AI, 1, [self.task_xml1, self.task_xml2]) - - first_task = combinedoe.create_task(combinedoe.task_states[0], combinedoe.task_xml[0]) - self.assertIsInstance(first_task, SelfAssessmentModule) - - second_task = combinedoe.create_task(combinedoe.task_states[1], combinedoe.task_xml[1]) - self.assertIsInstance(second_task, OpenEndedModule) - - def test_get_task_number(self): - combinedoe = self.generate_oe_module(TEST_STATE_AI, 1, [self.task_xml1, self.task_xml2]) - - first_task = combinedoe.get_task_number(0) - self.assertIsInstance(first_task, SelfAssessmentModule) - - second_task = combinedoe.get_task_number(1) - self.assertIsInstance(second_task, OpenEndedModule) - - third_task = combinedoe.get_task_number(2) - self.assertIsNone(third_task) - - def test_update_task_states(self): - """ - See if we can update the task states properly - """ - changed = self.combinedoe.update_task_states() - self.assertFalse(changed) - - current_task = self.combinedoe.current_task - current_task.change_state(CombinedOpenEndedV1Module.DONE) - changed = self.combinedoe.update_task_states() - - self.assertTrue(changed) - - def test_get_max_score(self): - """ - Try to get the max score of the problem - """ - self.combinedoe.update_task_states() - self.combinedoe.state = "done" - self.combinedoe.is_scored = True - max_score = self.combinedoe.max_score() - self.assertEqual(max_score, 1) - - def test_container_get_max_score(self): - """ - See if we can get the max score from the actual xmodule - """ - # The progress view requires that this function be exposed - max_score = self.combinedoe_container.max_score() - self.assertEqual(max_score, None) - - def test_container_get_progress(self): - """ - See if we can get the progress from the actual xmodule - """ - progress = self.combinedoe_container.max_score() - self.assertEqual(progress, None) - - def test_get_progress(self): - """ - Test if we can get the correct progress from the combined open ended class - """ - self.combinedoe.update_task_states() - self.combinedoe.state = "done" - self.combinedoe.is_scored = True - progress = self.combinedoe.get_progress() - self.assertIsInstance(progress, Progress) - - # progress._a is the score of the xmodule, which is 0 right now. - self.assertEqual(progress._a, 0) - - # progress._b is the max_score (which is 1), divided by the weight (which is 1). - self.assertEqual(progress._b, 1) - - def test_container_weight(self): - """ - Check the problem weight in the container - """ - weight = self.combinedoe_container.weight - self.assertEqual(weight, 1) - - def test_container_child_weight(self): - """ - Test the class to see if it picks up the right weight - """ - weight = self.combinedoe_container.child_module.weight - self.assertEqual(weight, 1) - - def test_get_score(self): - """ - See if scoring works - """ - score_dict = self.combinedoe.get_score() - self.assertEqual(score_dict['score'], 0) - self.assertEqual(score_dict['total'], 1) - - def test_alternate_orderings(self): - """ - Try multiple ordering of definitions to see if the problem renders different steps correctly. - """ - t1 = self.task_xml1 - t2 = self.task_xml2 - xml_to_test = [[t1], [t2], [t1, t1], [t1, t2], [t2, t2], [t2, t1], [t1, t2, t1]] - for xml in xml_to_test: - definition = {'prompt': etree.XML(self.prompt), 'rubric': etree.XML(self.rubric), 'task_xml': xml} - descriptor = Mock(data=definition) - combinedoe = CombinedOpenEndedV1Module(self.test_system, - self.location, - definition, - descriptor, - static_data=self.static_data, - metadata=self.metadata, - instance_state=self.static_data) - - changed = combinedoe.update_task_states() - self.assertFalse(changed) - - combinedoe = CombinedOpenEndedV1Module(self.test_system, - self.location, - definition, - descriptor, - static_data=self.static_data, - metadata=self.metadata, - instance_state={'task_states': TEST_STATE_SA}) - - combinedoe = CombinedOpenEndedV1Module(self.test_system, - self.location, - definition, - descriptor, - static_data=self.static_data, - metadata=self.metadata, - instance_state={'task_states': TEST_STATE_SA_IN}) - - def test_get_score_realistic(self): - """ - Try to parse the correct score from a json instance state - """ - instance_state = json.loads(MOCK_INSTANCE_STATE) - rubric = """ - - - - Response Quality - - - - - - - - """ - definition = {'prompt': etree.XML(self.prompt), 'rubric': etree.XML(rubric), - 'task_xml': [self.task_xml1, self.task_xml2]} - descriptor = Mock(data=definition) - combinedoe = CombinedOpenEndedV1Module(self.test_system, - self.location, - definition, - descriptor, - static_data=self.static_data, - metadata=self.metadata, - instance_state=instance_state) - score_dict = combinedoe.get_score() - self.assertEqual(score_dict['score'], 15.0) - self.assertEqual(score_dict['total'], 15.0) - - def generate_oe_module(self, task_state, task_number, task_xml): - """ - Return a combined open ended module with the specified parameters - """ - definition = { - 'prompt': etree.XML(self.prompt), - 'rubric': etree.XML(self.rubric), - 'task_xml': task_xml - } - descriptor = Mock(data=definition) - module = Mock(scope_ids=Mock(usage_id='dummy-usage-id')) - instance_state = {'task_states': task_state, 'graded': True} - if task_number is not None: - instance_state.update({'current_task_number': task_number}) - combinedoe = CombinedOpenEndedV1Module(self.test_system, - self.location, - definition, - descriptor, - static_data=self.static_data, - metadata=self.metadata, - instance_state=instance_state) - return combinedoe - - def ai_state_reset(self, task_state, task_number=None): - """ - See if state is properly reset - """ - combinedoe = self.generate_oe_module(task_state, task_number, [self.task_xml2]) - html = combinedoe.get_html() - self.assertIsInstance(html, basestring) - - score = combinedoe.get_score() - if combinedoe.is_scored: - self.assertEqual(score['score'], 0) - else: - self.assertEqual(score['score'], None) - - def ai_state_success(self, task_state, task_number=None, iscore=2, tasks=None): - """ - See if state stays the same - """ - if tasks is None: - tasks = [self.task_xml1, self.task_xml2] - combinedoe = self.generate_oe_module(task_state, task_number, tasks) - html = combinedoe.get_html() - self.assertIsInstance(html, basestring) - score = combinedoe.get_score() - self.assertEqual(int(score['score']), iscore) - - def test_ai_state_reset(self): - self.ai_state_reset(TEST_STATE_AI) - - def test_ai_state2_reset(self): - self.ai_state_reset(TEST_STATE_AI2) - - def test_ai_invalid_state(self): - self.ai_state_reset(TEST_STATE_AI2_INVALID) - - def test_ai_state_rest_task_number(self): - self.ai_state_reset(TEST_STATE_AI, task_number=2) - self.ai_state_reset(TEST_STATE_AI, task_number=5) - self.ai_state_reset(TEST_STATE_AI, task_number=1) - self.ai_state_reset(TEST_STATE_AI, task_number=0) - - def test_ai_state_success(self): - self.ai_state_success(TEST_STATE_AI) - - def test_state_single(self): - self.ai_state_success(TEST_STATE_SINGLE, iscore=12) - - def test_state_pe_single(self): - self.ai_state_success(TEST_STATE_PE_SINGLE, iscore=0, tasks=[self.task_xml2]) - - def test_deprecation_message(self): - """ - Test the validation message produced for deprecation. - """ - # pylint: disable=no-member - validation = self.combinedoe_container.validate() - deprecation_msg = "ORA1 is no longer supported. To use this assessment, " \ - "replace this ORA1 component with an ORA2 component." - validation.summary.text = deprecation_msg - validation.summary.type = 'error' - - self.assertEqual( - validation.summary.text, - deprecation_msg - ) - self.assertEqual(validation.summary.type, StudioValidationMessage.ERROR) - - -class CombinedOpenEndedModuleConsistencyTest(unittest.TestCase): - """ - Unit tests for the combined open ended xmodule rubric scores consistency. - """ - - # location, definition_template, prompt, rubric, max_score, metadata, oeparam, task_xml1, task_xml2 - # All these variables are used to construct the xmodule descriptor. - location = Location("edX", "open_ended", "2012_Fall", "combinedopenended", "SampleQuestion") - definition_template = """ - - {rubric} - {prompt} - - {task1} - - - {task2} - - - """ - prompt = "This is a question prompt" - rubric = ''' - - Response Quality - - - - ''' - max_score = 10 - - metadata = {'attempts': '10', 'max_score': max_score} - - oeparam = etree.XML(''' - - Enter essay here. - This is the answer. - - {"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"} - - - ''') - - task_xml1 = ''' - - - What hint about this problem would you give to someone? - - - Save Succcesful. Thanks for participating! - - - ''' - task_xml2 = ''' - - - Enter essay here. - This is the answer. - - {"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"} - - - ''' - - static_data = { - 'max_attempts': 20, - 'prompt': prompt, - 'rubric': rubric, - 'max_score': max_score, - 'display_name': 'Name', - 'accept_file_upload': False, - 'close_date': "", - 's3_interface': test_util_open_ended.S3_INTERFACE, - 'open_ended_grading_interface': test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE, - 'skip_basic_checks': False, - 'graded': True, - } - - definition = {'prompt': etree.XML(prompt), 'rubric': etree.XML(rubric), 'task_xml': [task_xml1, task_xml2]} - full_definition = definition_template.format(prompt=prompt, rubric=rubric, task1=task_xml1, task2=task_xml2) - descriptor = Mock(data=full_definition) - test_system = get_test_system() - test_system.open_ended_grading_interface = None - usage_key = test_system.course_id.make_usage_key('combinedopenended', 'test_loc') - # ScopeIds has 4 fields: user_id, block_type, def_id, usage_id - scope_ids = ScopeIds(1, 'combinedopenended', usage_key, usage_key) - combinedoe_container = CombinedOpenEndedModule( - descriptor=descriptor, - runtime=test_system, - field_data=DictFieldData({ - 'data': full_definition, - 'weight': '1', - }), - scope_ids=scope_ids, - ) - - def setUp(self): - super(CombinedOpenEndedModuleConsistencyTest, self).setUp() - self.combinedoe = CombinedOpenEndedV1Module(self.test_system, - self.location, - self.definition, - self.descriptor, - static_data=self.static_data, - metadata=self.metadata, - instance_state=json.loads(INSTANCE_INCONSISTENT_STATE)) - - def test_get_score(self): - """ - If grader type is ML score should be updated from rubric scores. Aggregate rubric scores = sum([3])*5. - """ - score_dict = self.combinedoe.get_score() - self.assertEqual(score_dict['score'], 15.0) - self.assertEqual(score_dict['total'], 5.0) - - def test_get_score_with_pe_grader(self): - """ - If grader type is PE score should not be updated from rubric scores. Aggregate rubric scores = sum([3])*5. - """ - combinedoe = CombinedOpenEndedV1Module(self.test_system, - self.location, - self.definition, - self.descriptor, - static_data=self.static_data, - metadata=self.metadata, - instance_state=json.loads(INSTANCE_INCONSISTENT_STATE2)) - score_dict = combinedoe.get_score() - self.assertNotEqual(score_dict['score'], 15.0) - - def test_get_score_with_different_score_value_in_rubric(self): - """ - If grader type is ML score should be updated from rubric scores. Aggregate rubric scores = sum([5])*5. - """ - combinedoe = CombinedOpenEndedV1Module(self.test_system, - self.location, - self.definition, - self.descriptor, - static_data=self.static_data, - metadata=self.metadata, - instance_state=json.loads(INSTANCE_INCONSISTENT_STATE3)) - score_dict = combinedoe.get_score() - self.assertEqual(score_dict['score'], 25.0) - self.assertEqual(score_dict['total'], 5.0) - - def test_get_score_with_old_task_states(self): - """ - If grader type is ML and old_task_states are present in instance inconsistent state score should be updated - from rubric scores. Aggregate rubric scores = sum([3])*5. - """ - combinedoe = CombinedOpenEndedV1Module(self.test_system, - self.location, - self.definition, - self.descriptor, - static_data=self.static_data, - metadata=self.metadata, - instance_state=json.loads(INSTANCE_INCONSISTENT_STATE4)) - score_dict = combinedoe.get_score() - self.assertEqual(score_dict['score'], 15.0) - self.assertEqual(score_dict['total'], 5.0) - - def test_get_score_with_score_missing(self): - """ - If grader type is ML and score field is missing in instance inconsistent state score should be updated from - rubric scores. Aggregate rubric scores = sum([3])*5. - """ - combinedoe = CombinedOpenEndedV1Module(self.test_system, - self.location, - self.definition, - self.descriptor, - static_data=self.static_data, - metadata=self.metadata, - instance_state=json.loads(INSTANCE_INCONSISTENT_STATE5)) - score_dict = combinedoe.get_score() - self.assertEqual(score_dict['score'], 15.0) - self.assertEqual(score_dict['total'], 5.0) - - -class OpenEndedModuleXmlTest(unittest.TestCase, DummyModulestore): - """ - Test the student flow in the combined open ended xmodule - """ - problem_location = Location("edX", "open_ended", "2012_Fall", "combinedopenended", "SampleQuestion") - answer = "blah blah" - assessment = [0, 1] - hint = "blah" - - def get_module_system(self, descriptor): - - def construct_callback(dispatch="score_update"): - return dispatch - - test_system = get_test_system() - test_system.open_ended_grading_interface = None - test_system.xqueue['interface'] = Mock( - send_to_queue=Mock(return_value=(0, "Queued")) - ) - test_system.xqueue['construct_callback'] = construct_callback - - return test_system - - def setUp(self): - super(OpenEndedModuleXmlTest, self).setUp() - self.setup_modulestore(COURSE) - - def _handle_ajax(self, dispatch, content): - # Load the module from persistence - module = self._module() - - # Call handle_ajax on the module - result = module.handle_ajax(dispatch, content) - - # Persist the state - module.save() - - return result - - def _module(self): - return self.get_module_from_location(self.problem_location) - - def test_open_ended_load_and_save(self): - """ - See if we can load the module and save an answer - @return: - """ - # Try saving an answer - self._handle_ajax("save_answer", {"student_answer": self.answer}) - - task_one_json = json.loads(self._module().task_states[0]) - self.assertEqual(task_one_json['child_history'][0]['answer'], self.answer) - - def test_open_ended_flow_reset(self): - """ - Test the flow of the module if we complete the self assessment step and then reset - @return: - """ - assessment = [0, 1] - - # Simulate a student saving an answer - self._handle_ajax("get_html", {}) - self._handle_ajax("save_answer", {"student_answer": self.answer}) - self._handle_ajax("get_html", {}) - - # Mock a student submitting an assessment - assessment_dict = MultiDict({'assessment': sum(assessment)}) - assessment_dict.extend(('score_list[]', val) for val in assessment) - - self._handle_ajax("save_assessment", assessment_dict) - - task_one_json = json.loads(self._module().task_states[0]) - self.assertEqual(json.loads(task_one_json['child_history'][0]['post_assessment']), assessment) - - self._handle_ajax("get_combined_rubric", {}) - - # Move to the next step in the problem - self._handle_ajax("next_problem", {}) - self.assertEqual(self._module().current_task_number, 0) - - html = self._module().render(STUDENT_VIEW).content - self.assertIsInstance(html, basestring) - - rubric = self._handle_ajax("get_combined_rubric", {}) - self.assertIsInstance(rubric, basestring) - - self.assertEqual(self._module().state, "assessing") - - self._handle_ajax("reset", {}) - self.assertEqual(self._module().current_task_number, 0) - - def test_open_ended_flow_with_xqueue_failure(self): - """ - Test a two step problem where the student first goes through the self assessment step, and then the - open ended step with the xqueue failing in the first step. - """ - assessment = [1, 1] - - # Simulate a student saving an answer - self._handle_ajax("save_answer", {"student_answer": self.answer}) - status = self._handle_ajax("get_status", {}) - self.assertIsInstance(status, basestring) - - # Mock a student submitting an assessment - assessment_dict = MultiDict({'assessment': sum(assessment)}) - assessment_dict.extend(('score_list[]', val) for val in assessment) - - mock_xqueue_interface = Mock( - send_to_queue=Mock(return_value=(1, "Not Queued")) - ) - - # Call handle_ajax on the module with xqueue down - module = self._module() - with patch.dict(module.xmodule_runtime.xqueue, {'interface': mock_xqueue_interface}): - module.handle_ajax("save_assessment", assessment_dict) - self.assertEqual(module.current_task_number, 1) - self.assertTrue((module.child_module.get_task_number(1).child_created)) - module.save() - - # Check that next time the OpenEndedModule is loaded it calls send_to_grader - with patch.object(OpenEndedModule, 'send_to_grader') as mock_send_to_grader: - mock_send_to_grader.return_value = (False, "Not Queued") - module = self._module().child_module.get_score() - self.assertTrue(mock_send_to_grader.called) - self.assertTrue((self._module().child_module.get_task_number(1).child_created)) - - # Loading it this time should send submission to xqueue correctly - self.assertFalse((self._module().child_module.get_task_number(1).child_created)) - self.assertEqual(self._module().current_task_number, 1) - self.assertEqual(self._module().state, OpenEndedChild.ASSESSING) - - task_one_json = json.loads(self._module().task_states[0]) - self.assertEqual(json.loads(task_one_json['child_history'][0]['post_assessment']), assessment) - - # Move to the next step in the problem - self._handle_ajax("next_problem", {}) - self.assertEqual(self._module().current_task_number, 1) - self._module().render(STUDENT_VIEW) - - # Try to get the rubric from the module - self._handle_ajax("get_combined_rubric", {}) - - self.assertEqual(self._module().state, OpenEndedChild.ASSESSING) - - # Make a fake reply from the queue - queue_reply = { - 'queuekey': "", - 'xqueue_body': json.dumps({ - 'score': 0, - 'feedback': json.dumps({ - "spelling": "Spelling: Ok.", - "grammar": "Grammar: Ok.", - "markup-text": " all of us can think of a book that we hope none of our children or any other " - "children have taken off the shelf . but if i have the right to remove that book " - "from the shelf that work i abhor then you also have exactly the same right and " - "so does everyone else . and then we have no books left " - "on the shelf for any of us . katherine paterson , author " - "write a persuasive essay to a newspaper reflecting your vies on censorship " - "in libraries . do you believe that certain materials , such as books , " - "music , movies , magazines , etc . , should be removed from the shelves " - "if they are found offensive ? support your position with convincing " - "arguments from your own experience , observations , and or reading . " - }), - 'grader_type': "ML", - 'success': True, - 'grader_id': 1, - 'submission_id': 1, - 'rubric_xml': ''' - - - Writing Applications - 0 - - - - - Language Conventions - 0 - - - - - ''', - 'rubric_scores_complete': True, - }) - } - - self._handle_ajax("check_for_score", {}) - - # Update the module with the fake queue reply - self._handle_ajax("score_update", queue_reply) - - module = self._module() - self.assertFalse(module.ready_to_reset) - self.assertEqual(module.current_task_number, 1) - - # Get html and other data client will request - module.render(STUDENT_VIEW) - - self._handle_ajax("skip_post_assessment", {}) - - # Get all results - self._handle_ajax("get_combined_rubric", {}) - - # reset the problem - self._handle_ajax("reset", {}) - self.assertEqual(self._module().state, "initial") - - def test_open_ended_flow_correct(self): - """ - Test a two step problem where the student first goes through the self assessment step, and then the - open ended step. - @return: - """ - assessment = [1, 1] - - # Simulate a student saving an answer - self._handle_ajax("save_answer", {"student_answer": self.answer}) - status = self._handle_ajax("get_status", {}) - self.assertIsInstance(status, basestring) - - # Mock a student submitting an assessment - assessment_dict = MultiDict({'assessment': sum(assessment)}) - assessment_dict.extend(('score_list[]', val) for val in assessment) - - self._handle_ajax("save_assessment", assessment_dict) - - task_one_json = json.loads(self._module().task_states[0]) - self.assertEqual(json.loads(task_one_json['child_history'][0]['post_assessment']), assessment) - - # Move to the next step in the problem - self._handle_ajax("next_problem", {}) - self.assertEqual(self._module().current_task_number, 1) - self._module().render(STUDENT_VIEW) - - # Try to get the rubric from the module - self._handle_ajax("get_combined_rubric", {}) - - # Make a fake reply from the queue - queue_reply = { - 'queuekey': "", - 'xqueue_body': json.dumps({ - 'score': 0, - 'feedback': json.dumps({ - "spelling": "Spelling: Ok.", "grammar": "Grammar: Ok.", - "markup-text": " all of us can think of a book that we hope none of our children or any other " - "children have taken off the shelf . but if i have the right to remove that book " - "from the shelf that work i abhor then you also have exactly the same right and " - "so does everyone else . and then we have no books left on the shelf for " - "any of us . katherine paterson , author write a persuasive essay " - "to a newspaper reflecting your vies on censorship in libraries . do " - "you believe that certain materials , such as books , music , movies , magazines , " - "etc . , should be removed from the shelves if they are found " - "offensive ? support your position with convincing arguments from your " - "own experience , observations , and or reading . " - }), - 'grader_type': "ML", - 'success': True, - 'grader_id': 1, - 'submission_id': 1, - 'rubric_xml': ''' - - - Writing Applications - 0 - - - - - Language Conventions - 0 - - - - - ''', - 'rubric_scores_complete': True, - }) - } - - self._handle_ajax("check_for_score", {}) - - # Update the module with the fake queue reply - self._handle_ajax("score_update", queue_reply) - - module = self._module() - self.assertFalse(module.ready_to_reset) - self.assertEqual(module.current_task_number, 1) - - # Get html and other data client will request - module.render(STUDENT_VIEW) - - self._handle_ajax("skip_post_assessment", {}) - - # Get all results - self._handle_ajax("get_combined_rubric", {}) - - # reset the problem - self._handle_ajax("reset", {}) - self.assertEqual(self._module().state, "initial") - - -class OpenEndedModuleXmlAttemptTest(unittest.TestCase, DummyModulestore): - """ - Test if student is able to reset the problem - """ - problem_location = Location("edX", "open_ended", "2012_Fall", "combinedopenended", "SampleQuestion1Attempt") - answer = "blah blah" - assessment = [0, 1] - hint = "blah" - - def get_module_system(self, descriptor): - test_system = get_test_system() - test_system.open_ended_grading_interface = None - test_system.xqueue['interface'] = Mock( - send_to_queue=Mock(return_value=(0, "Queued")) - ) - return test_system - - def setUp(self): - super(OpenEndedModuleXmlAttemptTest, self).setUp() - self.setup_modulestore(COURSE) - - def _handle_ajax(self, dispatch, content): - # Load the module from persistence - module = self._module() - - # Call handle_ajax on the module - result = module.handle_ajax(dispatch, content) - - # Persist the state - module.save() - - return result - - def _module(self): - return self.get_module_from_location(self.problem_location) - - def test_reset_fail(self): - """ - Test the flow of the module if we complete the self assessment step and then reset - Since the problem only allows one attempt, should fail. - @return: - """ - assessment = [0, 1] - - # Simulate a student saving an answer - self._handle_ajax("save_answer", {"student_answer": self.answer}) - - # Mock a student submitting an assessment - assessment_dict = MultiDict({'assessment': sum(assessment)}) - assessment_dict.extend(('score_list[]', val) for val in assessment) - - self._handle_ajax("save_assessment", assessment_dict) - task_one_json = json.loads(self._module().task_states[0]) - self.assertEqual(json.loads(task_one_json['child_history'][0]['post_assessment']), assessment) - - # Move to the next step in the problem - self._handle_ajax("next_problem", {}) - self.assertEqual(self._module().current_task_number, 0) - - html = self._module().render(STUDENT_VIEW).content - self.assertIsInstance(html, basestring) - - # Module should now be done - rubric = self._handle_ajax("get_combined_rubric", {}) - self.assertIsInstance(rubric, basestring) - self.assertEqual(self._module().state, "done") - - # Try to reset, should fail because only 1 attempt is allowed - reset_data = json.loads(self._handle_ajax("reset", {})) - self.assertEqual(reset_data['success'], False) - - -class OpenEndedModuleXmlImageUploadTest(unittest.TestCase, DummyModulestore): - """ - Test if student is able to upload images properly. - """ - problem_location = Location("edX", "open_ended", "2012_Fall", "combinedopenended", "SampleQuestionImageUpload") - answer_text = "Hello, this is my amazing answer." - file_text = "Hello, this is my amazing file." - file_name = "Student file 1" - answer_link = "http://www.edx.org" - autolink_tag = ' inserter - """ - script_dirty = u'' - script_clean = u'alert("xss!")' - img_dirty = u'cats' - img_clean = u'cats' - embed_dirty = u'' - embed_clean = u'' - iframe_dirty = u'' - - text = u'I am a \u201c\xfcber student\u201d' - text_lessthan_noencd = u'This used to be broken < by the other parser. 3>5' - text_lessthan_encode = u'This used to be broken < by the other parser. 3>5' - text_linebreaks = u"St\xfcdent submission:\nI like lamp." - text_brs = u"St\xfcdent submission:
I like lamp." - - link_text = u'I love going to www.lolcatz.com' - link_atag = u'I love going to
www.lolcatz.com' - - def assertHtmlEqual(self, actual, expected): - """ - Assert that two strings represent the same html. - """ - return self._assertHtmlEqual( - fragment_fromstring(actual, create_parent='div'), - fragment_fromstring(expected, create_parent='div') - ) - - def _assertHtmlEqual(self, actual, expected): - """ - Assert that two HTML ElementTree elements are equal. - """ - self.assertEqual(actual.tag, expected.tag) - self.assertEqual(actual.attrib, expected.attrib) - self.assertEqual(actual.text, expected.text) - self.assertEqual(actual.tail, expected.tail) - self.assertEqual(len(actual), len(expected)) - for actual_child, expected_child in zip(actual, expected): - self._assertHtmlEqual(actual_child, expected_child) - - def test_script(self): - """ - Basic test for stripping - - - - -

-

Staff grading

- -
-
- -
-
- -
-

Submission

-
-
-
- -
-
-
-
- Rubric -
-
-
- -
- -

-

-
- -
- -
- -
- -
- - diff --git a/lms/static/images/grading_notification.png b/lms/static/images/grading_notification.png deleted file mode 100644 index cd93857da9..0000000000 Binary files a/lms/static/images/grading_notification.png and /dev/null differ diff --git a/lms/static/sass/_build-course.scss b/lms/static/sass/_build-course.scss index c4a13a67fa..2883ab38e6 100644 --- a/lms/static/sass/_build-course.scss +++ b/lms/static/sass/_build-course.scss @@ -45,9 +45,6 @@ @import "course/profile"; @import "course/gradebook"; @import "course/tabs"; -@import "course/staff_grading"; -@import "course/rubric"; -@import "course/open_ended_grading"; @import "course/student-notes"; @import "views/teams"; diff --git a/lms/static/sass/course/_open_ended_grading.scss b/lms/static/sass/course/_open_ended_grading.scss deleted file mode 100644 index e9bf04d938..0000000000 --- a/lms/static/sass/course/_open_ended_grading.scss +++ /dev/null @@ -1,64 +0,0 @@ -.open-ended-problems, -.combined-notifications { - padding: ($baseline*2); - - .problem-list { - table-layout: auto; - margin-top: ($baseline/2); - width: 70%; - - td, th { - padding: 7px; - } - } - - .notification-container { - margin: ($baseline*1.5) 0; - } - - .notification { - @include clearfix(); - margin: ($baseline/2); - width: 30%; - display: inline-block; - vertical-align: top; - - .notification-link { - display:block; - height: 9em; - padding: ($baseline/2); - border: 1px solid black; - text-align: center; - - p { - font-size: 0.9em; - text-align: center; - } - } - - .notification-title { - text-transform: uppercase; - background: $blue; - color: white; - padding: ($baseline/4) 0; - font-size: 1.1em; - } - - .notification-link:hover, - .notification-link:focus { - background-color: #eee; - } - - .notification-description { - padding-top:5%; - } - - .alert-message { - - img { - vertical-align: baseline; - } - } - } - -} diff --git a/lms/static/sass/course/_rubric.scss b/lms/static/sass/course/_rubric.scss deleted file mode 100644 index b0830e7881..0000000000 --- a/lms/static/sass/course/_rubric.scss +++ /dev/null @@ -1,85 +0,0 @@ -.rubric-header { - background-color: #fafafa; - border-radius: 5px; - - .rubric-collapse { - margin-right: $baseline/2; - } -} - -.button { - display: inline-block; -} - -.rubric { - margin: 0; - color: #3C3C3C; - - tr { - margin: 0; - height: 100%; - } - - td { - height: 100%; - border: 1px black solid; - text-align: center; - } - - th { - margin: $baseline/4; - padding: $baseline/4; - text-align: center; - } - - .points-header th { - padding: 0px; - } - - .rubric-label { - position: relative; - display: block; - font-size: .9em; - - .choicegroup-correct { - //nothing - } - - .choicegroup-incorrect { - display:none; - } - } - - .grade { - position: absolute; - bottom: 0; - right: 0; - } - .selected-grade, - .selected-grade .rubric-label { - background: #666; - color: white; - } - - input[type=radio]:checked + .rubric-label { - background: white; - color: $base-font-color; - white-space:nowrap; - } - - .wrappable { - white-space:normal; - } - - input[class='score-selection'] { - position: relative; - font-size: 16px; - } - - ul.rubric-list { - margin: 0; - padding: 0; - list-style-type: none; - } -} - diff --git a/lms/static/sass/course/_staff_grading.scss b/lms/static/sass/course/_staff_grading.scss deleted file mode 100644 index f9a712e877..0000000000 --- a/lms/static/sass/course/_staff_grading.scss +++ /dev/null @@ -1,248 +0,0 @@ -div.staff-grading, -div.peer-grading { - border: 1px solid lightgray; - - textarea.feedback-area { - margin: 0; - height: 75px; - } - - div.feedback-area.track-changes { - position: relative; - margin: 0; - height: 400px; - border: 1px solid lightgray; - padding: ($baseline/4); - resize: vertical; - width: 99%; - overflow: auto; - } - - div.feedback-area.track-changes, p.ice-legend { - - .ice-controls { - float: right; - } - .del { - position: relative; - text-decoration: line-through; - background-color: #ffc3c3; - } - .ins { - position: relative; - background-color: #c3ffc3; - } - } - - ul.rubric-list{ - margin: 0; - padding: 0; - list-style-type: none; - - li { - &.rubric-list-item{ - margin-bottom: 0; - padding: 0; - } - } - } - - h1 { - margin: 0 0 0 ($baseline/2); - } - - h2 { - a { - text-size: 0.5em; - } - } - - div { - margin: 0; - - &.submission-container{ - @include clearfix(); - overflow-y: auto; - max-height: 300px; - height: auto; - border: 1px solid #ddd; - background: $gray-l6; - } - } - - label { - margin: 0; - padding: ($baseline/10); - min-width: 50px; - text-size: 1.5em; - } - - /* Toggled State */ - input[type=radio]:checked + label { - background: #666; - color: white; - } - - input[name='score-selection'], - input[name='grade-selection'] { - display: none; - } - - .problem-list { - width: 100%; - table-layout: auto; - text-align: center; - - th { - padding: ($baseline/10); - } - - td { - padding: ($baseline/10); - } - - td.problem-name { - text-align: left; - } - - .ui-progressbar { - margin: 0; - padding: 0; - height: 1em; - } - } - - .prompt-information-container, - .rubric-wrapper, - .calibration-feedback-wrapper, - .grading-container { - padding: ($baseline/2) 0; - } - - .error-container { - margin-left: 0; - padding: ($baseline/10); - background-color: #ffcccc; - } - - .submission-wrapper { - padding: ($baseline/10); - padding-bottom: ($baseline*0.75); - - h3 { - margin-bottom: ($baseline/10); - } - - p { - margin-left: ($baseline/10); - } - } - .meta-info-wrapper { - padding: ($baseline/10); - background-color: #eee; - - div { - display: inline; - } - } - .message-container, - .grading-message { - margin-left: 0; - padding: ($baseline/10); - background-color: $yellow; - } - - .breadcrumbs { - margin: ($baseline/2) ($baseline/4); - font-size: .8em; - } - - .instructions-panel { - @include clearfix(); - padding: ($baseline/2); - background-color: #eee; - font-size: .8em; - - > div { - margin-bottom: ($baseline/4); - padding: ($baseline/2); - width: 49%; - background: #eee; - - h3 { - color: #777; - text-align: center; - text-transform: uppercase; - } - - p{ - color: #777; - } - } - .calibration-panel { - display: inline-block; - width: 20%; - border-radius: 3px; - } - - .grading-panel { - display: inline-block; - width: 20%; - border-radius: 3px; - } - .current-state { - background: $white; - - } - } - - .collapsible { - margin-left: 0; - - header { - margin-top: ($baseline/10); - margin-bottom: ($baseline/10); - font-size: 1.2em; - } - } - - .interstitial-page { - text-align: center; - - input[type=button] { - margin-top: $baseline; - } - } -} - -div.peer-grading { - border-radius: ($baseline/2); - padding: 0; - - .peer-grading-tools { - padding: $baseline; - } - - .error-container { - margin: $baseline; - border-radius: ($baseline/4); - padding: ($baseline/2); - } - - .interstitial-page, .calibration -feedback, .calibration-interstitial-page { - padding: $baseline; - } - - .prompt-wrapper { - padding: $baseline; - } - - .grading-wrapper { - padding: $baseline; - } -} - -div.staff-grading { - padding: $baseline; -} - diff --git a/lms/templates/combinedopenended/combined_open_ended.html b/lms/templates/combinedopenended/combined_open_ended.html deleted file mode 100644 index 5b0f94f6bb..0000000000 --- a/lms/templates/combinedopenended/combined_open_ended.html +++ /dev/null @@ -1,65 +0,0 @@ -<%! from django.utils.translation import ugettext as _ %> -
-
-

${display_name}

-
-
-
-
-
- - - - - -
-
- ${_("Open Response")} -
-
-
- ${_("Assessments:")} -
-
- ${status|n} -
-
-
- -
- -
- % for item in items: -
${item['content'] | n}
- % endfor -
-
- - -
-
- -
-
-
- - -
- -
-
- -
-
-
- % if is_staff: -
- ${_("Staff Warning: Please note that if you submit a duplicate of text that has already been submitted for grading, it will not show up in the staff grading view. It will be given the same grade that the original received automatically, and will be returned within 30 minutes if the original is already graded, or when the original is graded if not.")} -
- % endif -
- diff --git a/lms/templates/combinedopenended/combined_open_ended_hidden_results.html b/lms/templates/combinedopenended/combined_open_ended_hidden_results.html deleted file mode 100644 index 396a657273..0000000000 --- a/lms/templates/combinedopenended/combined_open_ended_hidden_results.html +++ /dev/null @@ -1,10 +0,0 @@ -
-
-
-
- Submitted Rubric -
-
- ${error} -
-
diff --git a/lms/templates/combinedopenended/combined_open_ended_legend.html b/lms/templates/combinedopenended/combined_open_ended_legend.html deleted file mode 100644 index d5d482e190..0000000000 --- a/lms/templates/combinedopenended/combined_open_ended_legend.html +++ /dev/null @@ -1,14 +0,0 @@ -<%! from django.utils.translation import ugettext as _ %> -
-
- ${_("Legend")} -
- % for i in xrange(0,len(legend_list)): - <%legend_title=legend_list[i]['name'] %> - <%legend_image=legend_list[i]['image'] %> - -
- ${legend_title}= -
- % endfor -
diff --git a/lms/templates/combinedopenended/combined_open_ended_results.html b/lms/templates/combinedopenended/combined_open_ended_results.html deleted file mode 100644 index 933f744539..0000000000 --- a/lms/templates/combinedopenended/combined_open_ended_results.html +++ /dev/null @@ -1,55 +0,0 @@ -<%! from django.utils.translation import ugettext as _ %> -<% num_results = len(results) %> -% for (i,result) in enumerate(results): - % if 'task_name' in result and 'result' in result: -
0: - data-status="hidden" data-number="${i}"> - % else: - data-status="shown" data-number="${i}"> - % endif -
-
-
- ${_("Submitted Rubric")} -
-
- - - - - % if num_results>1: - - % endif - ## Translators: an example of what this string will look - ## like is: "Scored rubric from grader 1", where - ## "Scored rubric" replaces {result_of_task} and - ## "1" replaces {number}. - ## This string appears when a user is viewing one of - ## their graded rubrics for an openended response problem. - ## the number distinguishes between the different - ## graded rubrics the user might have received - ${_("{result_of_task} from grader {number}").format(result_of_task = result['task_name'], number = i + 1)} - % if num_results>1: - - % endif - -
- ${result['result'] | n} - % if result.get('feedback'): -
- ${result['feedback'] | n} -
- %endif -
- %endif - -% endfor diff --git a/lms/templates/combinedopenended/combined_open_ended_status.html b/lms/templates/combinedopenended/combined_open_ended_status.html deleted file mode 100644 index a74d3f03c0..0000000000 --- a/lms/templates/combinedopenended/combined_open_ended_status.html +++ /dev/null @@ -1,16 +0,0 @@ -<%! from django.utils.translation import ugettext as _ %> -
-
- %for i in xrange(0,len(status_list)): - <%status=status_list[i]%> - %if status['current']: -
- %else: -
- %endif - ${status['human_task']} -
- %endfor -
- -
diff --git a/lms/templates/combinedopenended/open_ended_result_table.html b/lms/templates/combinedopenended/open_ended_result_table.html deleted file mode 100644 index 6d1411b502..0000000000 --- a/lms/templates/combinedopenended/open_ended_result_table.html +++ /dev/null @@ -1,66 +0,0 @@ -<%! from django.utils.translation import ugettext as _ %> -% for co in context_list: - % if co['grader_type'] in grader_type_image_dict: - <%grader_type=co['grader_type']%> - <% grader_image = grader_type_image_dict[grader_type] %> - % if grader_type in human_grader_types: - <% human_title = human_grader_types[grader_type] %> - % else: - <% human_title = grader_type %> - % endif -
-
- -
-
- ${co['rubric_html']} -
-
- %if len(co['feedback'])>2: -
-
- ## Translators: "See full feedback" is the text of - ## a link that allows a user to see more detailed - ## feedback from a self, peer, or instructor - ## graded openended problem - -
- -
- %endif -
- %if grader_type!="SA": -
- - -
-
- ## Translators: this text forms a link that, when - ## clicked, allows a user to respond to the feedback - ## the user received on his or her openended problem - ${_("Respond to Feedback")} -
-
-

${_("How accurate do you find this feedback?")}

-
-
    -
  • -
  • -
  • -
  • -
  • -
-
-

${_("Additional comments:")}

- - -
-
-
- %endif -
-
- %endif -%endfor diff --git a/lms/templates/combinedopenended/openended/open_ended.html b/lms/templates/combinedopenended/openended/open_ended.html deleted file mode 100644 index f1962b8734..0000000000 --- a/lms/templates/combinedopenended/openended/open_ended.html +++ /dev/null @@ -1,43 +0,0 @@ -<%! from django.utils.translation import ugettext as _ %> -
-
-
- ${prompt|n} -
-
-
-
- -
- - -
-
- % if state == 'initial': - ${_("Unanswered")} - % elif state == 'assessing': - - % if eta_message is not None: - ${eta_message} - % endif - - - % endif - - % if hidden: -
- % endif -
- -
- - - - -
- - -
diff --git a/lms/templates/combinedopenended/openended/open_ended_combined_rubric.html b/lms/templates/combinedopenended/openended/open_ended_combined_rubric.html deleted file mode 100644 index 6920be806d..0000000000 --- a/lms/templates/combinedopenended/openended/open_ended_combined_rubric.html +++ /dev/null @@ -1,48 +0,0 @@ -<%! from django.utils.translation import ungettext %> -
- % for i in range(len(categories)): - <% category = categories[i] %> - ${category['description']} -
    - % for j in range(len(category['options'])): - <% option = category['options'][j] %> - <% - points_earned_msg = ungettext( - "{num} point: {explanatory_text}", - "{num} points: {explanatory_text}", - option['points'] - ).format( - num=option['points'], - explanatory_text=option['text'], - ) - %> - %if len(category['options'][j]['grader_types'])>0: -
  • - %else: -
  • - %endif -
    - %if len(category['options'][j]['grader_types'])>0: - %if correct[i]==1: - - %elif correct[i]==.5: - - %else: - - %endif - - ${points_earned_msg} - - %else: - - %endif -
    -
  • - % endfor -
- % endfor -
- diff --git a/lms/templates/combinedopenended/openended/open_ended_error.html b/lms/templates/combinedopenended/openended/open_ended_error.html deleted file mode 100644 index 65b7381d60..0000000000 --- a/lms/templates/combinedopenended/openended/open_ended_error.html +++ /dev/null @@ -1,13 +0,0 @@ -<%! from django.utils.translation import ugettext as _ %> -
-
-
- ${_("There was an error with your submission. Please contact course staff.")} -
-
-
-
- ${errors} -
-
-
diff --git a/lms/templates/combinedopenended/openended/open_ended_evaluation.html b/lms/templates/combinedopenended/openended/open_ended_evaluation.html deleted file mode 100644 index 0986971f2a..0000000000 --- a/lms/templates/combinedopenended/openended/open_ended_evaluation.html +++ /dev/null @@ -1,27 +0,0 @@ -<%! from django.utils.translation import ugettext as _ %> -
- ${msg|n} -
-
- ## Translators: when "Respond to Feedback" is clicked, a survey - ## appears on which a user can respond to the feedback the user - ## received on an openended problem - ${_("Respond to Feedback")} -
-
-

${_("How accurate do you find this feedback?")}

-
-
    -
  • -
  • -
  • -
  • -
  • -
-
-

${_("Additional comments:")}

- - -
-
-
diff --git a/lms/templates/combinedopenended/openended/open_ended_feedback.html b/lms/templates/combinedopenended/openended/open_ended_feedback.html deleted file mode 100644 index e16aea0b53..0000000000 --- a/lms/templates/combinedopenended/openended/open_ended_feedback.html +++ /dev/null @@ -1,10 +0,0 @@ -
-
- ${rubric_feedback | n} - % if grader_type=="PE": -
- ${ feedback | n} -
- % endif -
-
diff --git a/lms/templates/combinedopenended/openended/open_ended_rubric.html b/lms/templates/combinedopenended/openended/open_ended_rubric.html deleted file mode 100644 index b2a8ac56bb..0000000000 --- a/lms/templates/combinedopenended/openended/open_ended_rubric.html +++ /dev/null @@ -1,42 +0,0 @@ -<%! -from django.utils.translation import ugettext as _ -from django.utils.translation import ungettext -from random import randint -%> -
-
-
-
- ${_("Rubric")} -
-

${_("Select the criteria you feel best represents this submission in each category.")}

-
- % for i in range(len(categories)): - <% category = categories[i] %> - <% m = randint(0,1000) %> - ${category['description']} -
    - % for j in range(len(category['options'])): - <% option = category['options'][j] %> - %if option['selected']: -
  • - %else: -
  • - % endif - -
  • - % endfor -
- % endfor -
-
diff --git a/lms/templates/combinedopenended/openended/open_ended_view_only_rubric.html b/lms/templates/combinedopenended/openended/open_ended_view_only_rubric.html deleted file mode 100644 index 7cd9370c47..0000000000 --- a/lms/templates/combinedopenended/openended/open_ended_view_only_rubric.html +++ /dev/null @@ -1,12 +0,0 @@ -
- % for i in range(len(categories)): - <% category = categories[i] %> - % for j in range(len(category['options'])): - <% option = category['options'][j] %> - % if option['selected']: - ${category['description']} : ${option['points']} | - % endif - % endfor - % endfor -
- diff --git a/lms/templates/combinedopenended/selfassessment/self_assessment_hint.html b/lms/templates/combinedopenended/selfassessment/self_assessment_hint.html deleted file mode 100644 index abdc25b77b..0000000000 --- a/lms/templates/combinedopenended/selfassessment/self_assessment_hint.html +++ /dev/null @@ -1,8 +0,0 @@ -<%! from django.utils.translation import ugettext as _ %> -
-
- ${_("Please enter a hint below:")} -
- -
diff --git a/lms/templates/combinedopenended/selfassessment/self_assessment_prompt.html b/lms/templates/combinedopenended/selfassessment/self_assessment_prompt.html deleted file mode 100644 index 77386caad3..0000000000 --- a/lms/templates/combinedopenended/selfassessment/self_assessment_prompt.html +++ /dev/null @@ -1,30 +0,0 @@ -<%! from django.utils.translation import ugettext as _ %> -
-
-
- ${prompt} -
-
-
-
- -
-
- - -
-
- -
${initial_rubric}
- -
- - - -
- -
diff --git a/lms/templates/combinedopenended/selfassessment/self_assessment_rubric.html b/lms/templates/combinedopenended/selfassessment/self_assessment_rubric.html deleted file mode 100644 index 2986c5041a..0000000000 --- a/lms/templates/combinedopenended/selfassessment/self_assessment_rubric.html +++ /dev/null @@ -1,5 +0,0 @@ -
-
- ${rubric | n } -
-
diff --git a/lms/templates/courseware/course_navigation.html b/lms/templates/courseware/course_navigation.html index 0ed756d523..d4a46efc8d 100644 --- a/lms/templates/courseware/course_navigation.html +++ b/lms/templates/courseware/course_navigation.html @@ -3,7 +3,6 @@ <%! from django.utils.translation import ugettext as _ from courseware.tabs import get_course_tab_list -from courseware.views import notification_image_for_tab from django.core.urlresolvers import reverse from django.conf import settings from openedx.core.djangoapps.course_groups.partition_scheme import get_cohorted_user_partition @@ -88,7 +87,6 @@ include_special_exams = settings.FEATURES.get('ENABLE_SPECIAL_EXAMS', False) and % for tab in get_course_tab_list(request, course): <% tab_is_active = (tab.tab_id == active_page) or (tab.tab_id == default_tab) - tab_image = notification_image_for_tab(tab, user, course) %>
  • diff --git a/lms/templates/instructor/staff_grading.html b/lms/templates/instructor/staff_grading.html deleted file mode 100644 index dbf8536c18..0000000000 --- a/lms/templates/instructor/staff_grading.html +++ /dev/null @@ -1,93 +0,0 @@ -<%! from django.utils.translation import ugettext as _ %> -<%inherit file="/main.html" /> -<%block name="bodyclass">${course.css_class} -<%namespace name='static' file='/static_content.html'/> - -<%block name="headextra"> -<%static:css group='style-course-vendor'/> -<%static:css group='style-course'/> - - -<%block name="pagetitle">${_("{course_number} Staff Grading").format(course_number=course.display_number_with_default) | h} - -<%include file="/courseware/course_navigation.html" args="active_page='staff_grading'" /> - -<%block name="js_extra"> - <%static:js group='staff_grading'/> - - -
    - - - -
    diff --git a/lms/templates/open_ended_problems/combined_notifications.html b/lms/templates/open_ended_problems/combined_notifications.html deleted file mode 100644 index 7d0a6c08d0..0000000000 --- a/lms/templates/open_ended_problems/combined_notifications.html +++ /dev/null @@ -1,51 +0,0 @@ -<%! from django.utils.translation import ugettext as _ %> -<%inherit file="/main.html" /> -<%block name="bodyclass">${course.css_class} -<%namespace name='static' file='/static_content.html'/> - -<%block name="headextra"> -<%static:css group='style-course-vendor'/> -<%static:css group='style-course'/> - - -<%block name="pagetitle">${_("{course_number} Combined Notifications").format(course_number=course.display_number_with_default)} - -<%include file="/courseware/course_navigation.html" args="active_page='open_ended'" /> - - -
    -
    -
    ${error_text}
    - -

    ${_("Open Ended Console")}

    -

    ${_("Instructions")}

    -

    ${_("Here are items that could potentially need your attention.")}

    - % if success: - % if len(notification_list) == 0: -
    - ${_("No items require attention at the moment.")} -
    - %else: -
    - %for notification in notification_list: - % if notification['alert']: - - %endif - %endif -
    -
    diff --git a/lms/templates/open_ended_problems/open_ended_flagged_problems.html b/lms/templates/open_ended_problems/open_ended_flagged_problems.html deleted file mode 100644 index 93c35ba307..0000000000 --- a/lms/templates/open_ended_problems/open_ended_flagged_problems.html +++ /dev/null @@ -1,62 +0,0 @@ -<%! from django.utils.translation import ugettext as _ %> -<%inherit file="/main.html" /> -<%block name="bodyclass">${course.css_class} -<%namespace name='static' file='/static_content.html'/> - -<%block name="headextra"> -<%static:css group='style-course-vendor'/> -<%static:css group='style-course'/> - - -<%block name="pagetitle">${_("{course_number} Flagged Open Ended Problems").format(course_number=course.display_number_with_default)} - -<%include file="/courseware/course_navigation.html" args="active_page='open_ended_flagged_problems'" /> - -<%block name="js_extra"> - <%static:js group='open_ended'/> - - -
    -
    -
    ${error_text}
    - -

    ${_("Flagged Open Ended Problems")}

    -

    ${_("Instructions")}

    -

    ${_("Here is a list of open ended problems for this course that have been flagged by students as potentially inappropriate.")}

    - % if success: - % if len(problem_list) == 0: -
    - ${_("No flagged problems exist.")} -
    - %else: - - - - - - - - %for problem in problem_list: - - - - - - - - %endfor -
    ${_("Name")}${_("Response")}
    - ${problem['problem_name']} - - ${problem['student_response']} - - ${_("Unflag")} - - ${_("Ban")} - -
    -
    - %endif - %endif -
    -
    diff --git a/lms/templates/open_ended_problems/open_ended_problems.html b/lms/templates/open_ended_problems/open_ended_problems.html deleted file mode 100644 index 33a05d088d..0000000000 --- a/lms/templates/open_ended_problems/open_ended_problems.html +++ /dev/null @@ -1,51 +0,0 @@ -<%! from django.utils.translation import ugettext as _ %> -<%inherit file="/main.html" /> -<%block name="bodyclass">${course.css_class} -<%namespace name='static' file='/static_content.html'/> - -<%block name="headextra"> -<%static:css group='style-course-vendor'/> -<%static:css group='style-course'/> - - -<%block name="pagetitle">${_("{course_number} Open Ended Problems").format(course_number=course.display_number_with_default)} - -<%include file="/courseware/course_navigation.html" args="active_page='open_ended_problems'" /> - - -
    -
    -
    ${error_text}
    -

    ${_("Open Ended Problems")}

    -

    ${_("Instructions")}

    -

    ${_("Here is a list of open ended problems for this course.")}

    - % if success: - % if len(problem_list) == 0: -
    - ${_("You have not attempted any open ended problems yet.")} -
    - %else: - - - - - - - %for problem in problem_list: - - - - - - %endfor -
    ${_("Problem Name")}${_("Status")}${_("Grader Type")}
    - ${problem['problem_name']} - - ${problem['state']} - - ${problem['grader_type_display_name']} -
    - %endif - %endif -
    -
    \ No newline at end of file diff --git a/lms/urls.py b/lms/urls.py index 33d8237d48..1e50b236a7 100644 --- a/lms/urls.py +++ b/lms/urls.py @@ -549,61 +549,6 @@ urlpatterns += ( ), # see ENABLE_INSTRUCTOR_LEGACY_DASHBOARD section for legacy dash urls - # Open Ended grading views - url( - r'^courses/{}/staff_grading$'.format( - settings.COURSE_ID_PATTERN, - ), - 'open_ended_grading.views.staff_grading', - name='staff_grading', - ), - url( - r'^courses/{}/staff_grading/get_next$'.format( - settings.COURSE_ID_PATTERN, - ), - 'open_ended_grading.staff_grading_service.get_next', - name='staff_grading_get_next', - ), - url( - r'^courses/{}/staff_grading/save_grade$'.format( - settings.COURSE_ID_PATTERN, - ), - 'open_ended_grading.staff_grading_service.save_grade', - name='staff_grading_save_grade', - ), - url( - r'^courses/{}/staff_grading/get_problem_list$'.format( - settings.COURSE_ID_PATTERN, - ), - 'open_ended_grading.staff_grading_service.get_problem_list', - name='staff_grading_get_problem_list', - ), - - # Open Ended problem list - url( - r'^courses/{}/open_ended_problems$'.format( - settings.COURSE_ID_PATTERN, - ), - 'open_ended_grading.views.student_problem_list', - name='open_ended_problems', - ), - - # Open Ended flagged problem list - url( - r'^courses/{}/open_ended_flagged_problems$'.format( - settings.COURSE_ID_PATTERN, - ), - 'open_ended_grading.views.flagged_problem_list', - name='open_ended_flagged_problems', - ), - url( - r'^courses/{}/open_ended_flagged_problems/take_action_on_flags$'.format( - settings.COURSE_ID_PATTERN, - ), - 'open_ended_grading.views.take_action_on_flags', - name='open_ended_flagged_problems_take_action', - ), - # Cohorts management url( r'^courses/{}/cohorts/settings$'.format( @@ -655,23 +600,6 @@ urlpatterns += ( name='cohort_discussion_topics', ), - # Open Ended Notifications - url( - r'^courses/{}/open_ended_notifications$'.format( - settings.COURSE_ID_PATTERN, - ), - 'open_ended_grading.views.combined_notifications', - name='open_ended_notifications', - ), - - url( - r'^courses/{}/peer_grading$'.format( - settings.COURSE_ID_PATTERN, - ), - 'open_ended_grading.views.peer_grading', - name='peer_grading', - ), - url( r'^courses/{}/notes$'.format( settings.COURSE_ID_PATTERN, diff --git a/setup.py b/setup.py index 83478fe3b4..f384cd3684 100644 --- a/setup.py +++ b/setup.py @@ -37,11 +37,6 @@ setup( "teams = lms.djangoapps.teams.plugins:TeamsTab", "textbooks = lms.djangoapps.courseware.tabs:TextbookTabs", "wiki = lms.djangoapps.course_wiki.tab:WikiTab", - - # ORA 1 tabs (deprecated) - "peer_grading = lms.djangoapps.open_ended_grading.views:PeerGradingTab", - "staff_grading = lms.djangoapps.open_ended_grading.views:StaffGradingTab", - "open_ended = lms.djangoapps.open_ended_grading.views:OpenEndedGradingTab", ], "openedx.user_partition_scheme": [ "random = openedx.core.djangoapps.user_api.partition_schemes:RandomUserPartitionScheme",