diff --git a/common/test/acceptance/pages/lms/open_response.py b/common/test/acceptance/pages/lms/open_response.py deleted file mode 100644 index 1edffcc0f6..0000000000 --- a/common/test/acceptance/pages/lms/open_response.py +++ /dev/null @@ -1,157 +0,0 @@ -""" -Open-ended response in the courseware. -""" - -from bok_choy.page_object import PageObject -from bok_choy.promise import EmptyPromise -from .rubric import RubricPage - - -class OpenResponsePage(PageObject): - """ - Open-ended response in the courseware. - """ - - url = None - - def is_browser_on_page(self): - return self.q(css='div.xmodule_CombinedOpenEndedModule').present - - @property - def assessment_type(self): - """ - Return the type of assessment currently active. - Options are "self", "ai", or "peer" - """ - labels = self.q(css='section#combined-open-ended-status>div.statusitem-current').text - - if len(labels) < 1: - self.warning("Could not find assessment type label") - - # Provide some tolerance to UI changes - label_compare = labels[0].lower().strip() - - if 'self' in label_compare: - return 'self' - elif 'ai' in label_compare: - return 'ai' - elif 'peer' in label_compare: - return 'peer' - else: - raise ValueError("Unexpected assessment type: '{0}'".format(label_compare)) - - @property - def prompt(self): - """ - Return an HTML string representing the essay prompt. - """ - prompt_css = "section.open-ended-child>div.prompt" - prompts = self.q(css=prompt_css).map(lambda el: el.get_attribute('innerHTML').strip()).results - - if len(prompts) == 0: - self.warning("Could not find essay prompt on page.") - return "" - - elif len(prompts) > 1: - self.warning("Multiple essay prompts found on page; using the first one.") - - return prompts[0] - - @property - def rubric(self): - """ - Return a `RubricPage` for a self-assessment problem. - If no rubric is available, raises a `BrokenPromise` exception. - """ - rubric = RubricPage(self.browser) - rubric.wait_for_page() - return rubric - - @property - def written_feedback(self): - """ - Return the written feedback from the grader (if any). - If no feedback available, returns None. - """ - feedback = self.q(css='div.written-feedback').text - - if len(feedback) > 0: - return feedback[0] - else: - return None - - @property - def alert_message(self): - """ - Alert message displayed to the user. - """ - alerts = self.q(css="div.open-ended-alert").text - - if len(alerts) < 1: - return "" - else: - return alerts[0] - - @property - def grader_status(self): - """ - Status message from the grader. - If not present, return an empty string. - """ - status_list = self.q(css='div.grader-status').text - - if len(status_list) < 1: - self.warning("No grader status found") - return "" - - elif len(status_list) > 1: - self.warning("Multiple grader statuses found; returning the first one") - - return status_list[0] - - def set_response(self, response_str): - """ - Input a response to the prompt. - """ - input_css = "textarea.short-form-response" - self.q(css=input_css).fill(response_str) - - def save_response(self): - """ - Save the response for later submission. - """ - self.q(css='input.save-button').first.click() - EmptyPromise( - lambda: 'save' in self.alert_message.lower(), - "Status message saved" - ).fulfill() - - def submit_response(self): - """ - Submit a response for grading. - """ - self.q(css='input.submit-button').first.click() - - # modal dialog confirmation - self.q(css='button.ok-button').first.click() - - # Ensure that the submission completes - self._wait_for_submitted(self.assessment_type) - - def _wait_for_submitted(self, assessment_type): - """ - Wait for the submission to complete. - `assessment_type` is either 'self', 'ai', or 'peer' - """ - if assessment_type == 'self': - RubricPage(self.browser).wait_for_page() - - elif assessment_type == 'ai' or assessment_type == "peer": - EmptyPromise( - lambda: self.grader_status != 'Unanswered', - "Problem status is no longer 'unanswered'" - ).fulfill() - - else: - self.warning("Unrecognized assessment type '{0}'".format(assessment_type)) - EmptyPromise(lambda: True, "Unrecognized assessment type").fulfill() diff --git a/common/test/acceptance/pages/lms/rubric.py b/common/test/acceptance/pages/lms/rubric.py deleted file mode 100644 index 1b6737cb51..0000000000 --- a/common/test/acceptance/pages/lms/rubric.py +++ /dev/null @@ -1,141 +0,0 @@ -""" -Rubric for open-ended response problems, including calibration and peer-grading. -""" - -from bok_choy.page_object import PageObject -from bok_choy.promise import EmptyPromise - - -class ScoreMismatchError(Exception): - """ - The provided scores do not match the rubric on the page. - """ - pass - - -class RubricPage(PageObject): - """ - Rubric for open-ended response problems, including calibration and peer-grading. - """ - - url = None - - def is_browser_on_page(self): - """ - Return a boolean indicating whether the rubric is available. - """ - return self.q(css='div.rubric').present - - @property - def categories(self): - """ - Return a list of categories available in the essay rubric. - - Example: - ["Writing Applications", "Language Conventions"] - - The rubric is not always visible; if it's not available, - this will return an empty list. - """ - return self.q(css='span.rubric-category').text - - def set_scores(self, scores): - """ - Set the rubric scores. `scores` is a list of integers - indicating the number of points in each category. - - For example, `scores` might be [0, 2, 1] if the student scored - 0 points in the first category, 2 points in the second category, - and 1 point in the third category. - - If the number of scores does not match the number of categories, - a `ScoreMismatchError` is raised. - """ - # Warn if we have the wrong number of scores - num_categories = self.categories - if len(scores) != len(num_categories): - raise ScoreMismatchError( - "Received {0} scores but there are {1} rubric categories".format( - len(scores), num_categories)) - - # Set the score for each category - for score_index in range(len(scores)): - # Check that we have the enough radio buttons - category_css = "div.rubric>ul.rubric-list:nth-of-type({0})".format(score_index + 1) - if scores[score_index] > len(self.q(css=category_css + ' input.score-selection').results): - raise ScoreMismatchError( - "Tried to select score {0} but there are only {1} options".format( - score_index, len(scores))) - - # Check the radio button at the correct index - else: - input_css = ( - category_css + - ">li.rubric-list-item:nth-of-type({0}) input.score-selection".format(scores[score_index] + 1) - ) - - EmptyPromise(lambda: self._select_score_radio_button(input_css), "Score selection failed.").fulfill() - - def _select_score_radio_button(self, radio_button_css): - self.q(css=radio_button_css).first.click() - return self.q(css=radio_button_css).selected - - @property - def feedback(self): - """ - Return a list of correct/incorrect feedback for each rubric category (e.g. from self-assessment). - Example: ['correct', 'incorrect'] - - If no feedback is available, returns an empty list. - If feedback could not be interpreted (unexpected CSS class), - the list will contain a `None` item. - """ - # Get the green checkmark / red x labels - # We need to filter out the similar-looking CSS classes - # for the rubric items that are NOT marked correct/incorrect - feedback_css = 'div.rubric-label>label' - labels = [ - el_class for el_class in - self.q(css=feedback_css).attrs('class') - if el_class != 'rubric-elements-info' - ] - - def map_feedback(css_class): - """ - Map CSS classes on the labels to correct/incorrect - """ - if 'choicegroup_incorrect' in css_class: - return 'incorrect' - elif 'choicegroup_correct' in css_class: - return 'correct' - else: - return None - - return map(map_feedback, labels) - - def submit(self, promise_check_type=None): - """ - Submit the rubric. - `promise_check_type` is either 'self', or 'peer'. If promise check is not required then don't pass any value. - """ - # Wait for the button to become enabled - button_css = 'input.submit-button' - - EmptyPromise( - lambda: all(self.q(css=button_css).map(lambda el: not el.get_attribute('disabled')).results), - "Submit button not enabled" - ).fulfill() - - # Submit the assessment - self.q(css=button_css).first.click() - - if promise_check_type == 'self': - # Check if submitted rubric is available - EmptyPromise( - lambda: self.q(css='div.rubric-label>label').present, 'Submitted Rubric not available!' - ).fulfill() - elif promise_check_type == 'peer': - # Check if we are ready for peer grading - EmptyPromise( - lambda: self.q(css='input.calibration-feedback-button').present, 'Not ready for peer grading!' - ).fulfill() diff --git a/common/test/acceptance/tests/data/ora_ai_problem.xml b/common/test/acceptance/tests/data/ora_ai_problem.xml deleted file mode 100644 index 2b11831561..0000000000 --- a/common/test/acceptance/tests/data/ora_ai_problem.xml +++ /dev/null @@ -1,30 +0,0 @@ - - - - - Writing Applications - - - - - Language Conventions - - - - - - -

Censorship in the Libraries

-

"All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us." --Katherine Paterson, Author

-

Write a persuasive essay to a newspaper reflecting your vies on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.

-
- - - - Enter essay here. - This is the answer. - {"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"} - - - -
diff --git a/common/test/acceptance/tests/data/ora_peer_problem.xml b/common/test/acceptance/tests/data/ora_peer_problem.xml deleted file mode 100644 index ff8ef01988..0000000000 --- a/common/test/acceptance/tests/data/ora_peer_problem.xml +++ /dev/null @@ -1,30 +0,0 @@ - - - - - Writing Applications - - - - - Language Conventions - - - - - - -

Censorship in the Libraries

-

"All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us." --Katherine Paterson, Author

-

Write a persuasive essay to a newspaper reflecting your views on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.

-
- - - - Enter essay here. - This is the answer. - {"grader_settings" : "peer_grading.conf", "problem_id" : "700x/Demo"} - - - -
diff --git a/common/test/acceptance/tests/data/ora_rubric.xml b/common/test/acceptance/tests/data/ora_rubric.xml deleted file mode 100644 index 5db0138ebe..0000000000 --- a/common/test/acceptance/tests/data/ora_rubric.xml +++ /dev/null @@ -1 +0,0 @@ -Writing Applications0 Language Conventions 1 diff --git a/common/test/acceptance/tests/data/ora_self_problem.xml b/common/test/acceptance/tests/data/ora_self_problem.xml deleted file mode 100644 index b76f90ce63..0000000000 --- a/common/test/acceptance/tests/data/ora_self_problem.xml +++ /dev/null @@ -1,24 +0,0 @@ - - - - - Writing Applications - - - - - Language Conventions - - - - - - -

Censorship in the Libraries

-

"All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us." --Katherine Paterson, Author

-

Write a persuasive essay to a newspaper reflecting your views on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.

-
- - - -
diff --git a/common/test/acceptance/tests/studio/test_studio_outline.py b/common/test/acceptance/tests/studio/test_studio_outline.py index 4d8cbc3288..a72e0a0a4c 100644 --- a/common/test/acceptance/tests/studio/test_studio_outline.py +++ b/common/test/acceptance/tests/studio/test_studio_outline.py @@ -1628,9 +1628,9 @@ class DeprecationWarningMessageTest(CourseOutlineTest): self.course_fixture.create_xblock( parent_vertical.locator, - XBlockFixtureDesc('combinedopenended', "Open", data=load_data_str('ora_peer_problem.xml')) + XBlockFixtureDesc('poll', "Poll", data=load_data_str('poll_markdown.xml')) ) - self.course_fixture.create_xblock(parent_vertical.locator, XBlockFixtureDesc('peergrading', 'Peer')) + self.course_fixture.create_xblock(parent_vertical.locator, XBlockFixtureDesc('survey', 'Survey')) def _verify_deprecation_warning_info( self, @@ -1663,56 +1663,56 @@ class DeprecationWarningMessageTest(CourseOutlineTest): def test_no_deprecation_warning_message_present(self): """ - Scenario: Verify that deprecation warning message is not shown if ORA1 - advance modules are not present and also no ORA1 component exist in + Scenario: Verify that deprecation warning message is not shown if no deprecated + advance modules are not present and also no deprecated component exist in course outline. When I goto course outline - Then I don't see ORA1 deprecated warning + Then I don't see any deprecation warning """ self.course_outline_page.visit() self.assertFalse(self.course_outline_page.deprecated_warning_visible) def test_deprecation_warning_message_present(self): """ - Scenario: Verify deprecation warning message if ORA1 advance modules - and ORA1 components are present. + Scenario: Verify deprecation warning message if deprecated modules + and components are present. - Given I have ORA1 advance modules present in `Advanced Module List` - And I have created 2 ORA1 components + Given I have "poll" advance modules present in `Advanced Module List` + And I have created 2 poll components When I go to course outline - Then I see ORA1 deprecated warning - And I see correct ORA1 deprecated warning heading text - And I see correct ORA1 deprecated warning advance modules remove text - And I see list of ORA1 components with correct display names + Then I see poll deprecated warning + And I see correct poll deprecated warning heading text + And I see correct poll deprecated warning advance modules remove text + And I see list of poll components with correct display names """ - self._add_deprecated_advance_modules(block_types=['peergrading', 'combinedopenended']) + self._add_deprecated_advance_modules(block_types=['poll', 'survey']) self._create_deprecated_components() self.course_outline_page.visit() self._verify_deprecation_warning_info( deprecated_blocks_present=True, components_present=True, - components_display_name_list=['Open', 'Peer'], - deprecated_modules_list=['peergrading', 'combinedopenended'] + components_display_name_list=['Poll', 'Survey'], + deprecated_modules_list=['poll', 'survey'] ) def test_deprecation_warning_with_no_displayname(self): """ - Scenario: Verify deprecation warning message if ORA1 components are present. + Scenario: Verify deprecation warning message if poll components are present. - Given I have created 1 ORA1 deprecated component + Given I have created 1 poll deprecated component When I go to course outline - Then I see ORA1 deprecated warning - And I see correct ORA1 deprecated warning heading text - And I see list of ORA1 components with correct message + Then I see poll deprecated warning + And I see correct poll deprecated warning heading text + And I see list of poll components with correct message """ parent_vertical = self.course_fixture.get_nested_xblocks(category="vertical")[0] - # Create a deprecated ORA1 component with display_name to be empty and make sure + # Create a deprecated component with display_name to be empty and make sure # the deprecation warning is displayed with self.course_fixture.create_xblock( parent_vertical.locator, - XBlockFixtureDesc(category='combinedopenended', display_name="", data=load_data_str('ora_peer_problem.xml')) + XBlockFixtureDesc(category='poll', display_name="", data=load_data_str('poll_markdown.xml')) ) self.course_outline_page.visit() @@ -1722,44 +1722,44 @@ class DeprecationWarningMessageTest(CourseOutlineTest): components_display_name_list=[self.DEFAULT_DISPLAYNAME], ) - def test_warning_with_ora1_advance_modules_only(self): + def test_warning_with_poll_advance_modules_only(self): """ Scenario: Verify that deprecation warning message is shown if only - ORA1 advance modules are present and no ORA1 component exist. + poll advance modules are present and no poll component exist. - Given I have ORA1 advance modules present in `Advanced Module List` + Given I have poll advance modules present in `Advanced Module List` When I go to course outline - Then I see ORA1 deprecated warning - And I see correct ORA1 deprecated warning heading text - And I see correct ORA1 deprecated warning advance modules remove text - And I don't see list of ORA1 components + Then I see poll deprecated warning + And I see correct poll deprecated warning heading text + And I see correct poll deprecated warning advance modules remove text + And I don't see list of poll components """ - self._add_deprecated_advance_modules(block_types=['peergrading', 'combinedopenended']) + self._add_deprecated_advance_modules(block_types=['poll', 'survey']) self.course_outline_page.visit() self._verify_deprecation_warning_info( deprecated_blocks_present=True, components_present=False, - deprecated_modules_list=['peergrading', 'combinedopenended'] + deprecated_modules_list=['poll', 'survey'] ) - def test_warning_with_ora1_components_only(self): + def test_warning_with_poll_components_only(self): """ Scenario: Verify that deprecation warning message is shown if only - ORA1 component exist and no ORA1 advance modules are present. + poll component exist and no poll advance modules are present. - Given I have created two ORA1 components + Given I have created two poll components When I go to course outline - Then I see ORA1 deprecated warning - And I see correct ORA1 deprecated warning heading text - And I don't see ORA1 deprecated warning advance modules remove text - And I see list of ORA1 components with correct display names + Then I see poll deprecated warning + And I see correct poll deprecated warning heading text + And I don't see poll deprecated warning advance modules remove text + And I see list of poll components with correct display names """ self._create_deprecated_components() self.course_outline_page.visit() self._verify_deprecation_warning_info( deprecated_blocks_present=False, components_present=True, - components_display_name_list=['Open', 'Peer'] + components_display_name_list=['Poll', 'Survey'] ) diff --git a/common/test/acceptance/tests/studio/test_studio_with_ora_component.py b/common/test/acceptance/tests/studio/test_studio_with_ora_component.py deleted file mode 100644 index 48902d2c71..0000000000 --- a/common/test/acceptance/tests/studio/test_studio_with_ora_component.py +++ /dev/null @@ -1,102 +0,0 @@ -""" -Acceptance tests for Studio related to edit/save peer grading interface. -""" - -from ...fixtures.course import XBlockFixtureDesc -from ...pages.studio.import_export import ExportCoursePage -from ...pages.studio.component_editor import ComponentEditorView -from ...pages.studio.overview import CourseOutlinePage -from base_studio_test import StudioCourseTest -from ..helpers import load_data_str - - -class ORAComponentTest(StudioCourseTest): - """ - Tests tht edit/save is working correctly when link_to_location - is given in peer grading interface settings. - """ - - def setUp(self): - super(ORAComponentTest, self).setUp() - - self.course_outline_page = CourseOutlinePage( - self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run'] - ) - self.export_page = ExportCoursePage( - self.browser, - self.course_info['org'], self.course_info['number'], self.course_info['run'] - ) - - def populate_course_fixture(self, course_fixture): - """ - Return a test course fixture containing a discussion component. - """ - - course_fixture.add_children( - XBlockFixtureDesc('chapter', 'Test Section').add_children( - XBlockFixtureDesc('sequential', 'Test Subsection').add_children( - XBlockFixtureDesc('vertical', 'Test Unit').add_children( - XBlockFixtureDesc( - 'combinedopenended', - "Peer Problem", - data=load_data_str('ora_peer_problem.xml'), - metadata={ - 'graded': True, - }, - ), - XBlockFixtureDesc('peergrading', 'Peer Module'), - ) - ) - ) - ) - - def _go_to_unit_page(self, section_name='Test Section', subsection_name='Test Subsection', unit_name='Test Unit'): - self.course_outline_page.visit() - subsection = self.course_outline_page.section(section_name).subsection(subsection_name) - return subsection.expand_subsection().unit(unit_name).go_to() - - def test_edit_save_and_export(self): - """ - Ensure that edit/save is working correctly with link_to_location - in peer interface settings. - """ - self.course_outline_page.visit() - unit = self._go_to_unit_page() - peer_problem_location = unit.xblocks[1].locator - - # Problem location should contain "combinedopeneneded". - self.assertIn("combinedopenended", peer_problem_location) - component = unit.xblocks[2] - - # Interface component name should be "Peer Module". - self.assertEqual(component.name, "Peer Module") - component.edit() - component_editor = ComponentEditorView(self.browser, component.locator) - component_editor.set_field_value_and_save('Link to Problem Location', peer_problem_location) - - # Verify that we can edit component again after saving and link_to_location is present. - component.edit() - location_input_element = component_editor.get_setting_element("Link to Problem Location") - self.assertEqual( - location_input_element.get_attribute('value'), - peer_problem_location - ) - - def test_verify_ora1_deprecation_message(self): - """ - Scenario: Verifies the ora1 deprecation message on ora components. - - Given I have a course with ora 1 components - When I go to the unit page - Then I see a deprecation error message in ora 1 components. - """ - self.course_outline_page.visit() - unit = self._go_to_unit_page() - - for xblock in unit.xblocks: - self.assertTrue(xblock.has_validation_error) - self.assertEqual( - xblock.validation_error_text, - "ORA1 is no longer supported. To use this assessment, " - "replace this ORA1 component with an ORA2 component." - ) diff --git a/common/test/acceptance/tests/test_ora.py b/common/test/acceptance/tests/test_ora.py deleted file mode 100644 index 149a23f511..0000000000 --- a/common/test/acceptance/tests/test_ora.py +++ /dev/null @@ -1,392 +0,0 @@ -""" -Tests for ORA (Open Response Assessment) through the LMS UI. -""" - -import json -from unittest import skip - -from bok_choy.promise import Promise, BrokenPromise -from ..pages.lms.peer_confirm import PeerConfirmPage -from ..pages.lms.auto_auth import AutoAuthPage -from ..pages.lms.course_info import CourseInfoPage -from ..pages.lms.tab_nav import TabNavPage -from ..pages.lms.course_nav import CourseNavPage -from ..pages.lms.open_response import OpenResponsePage -from ..pages.lms.peer_grade import PeerGradePage -from ..pages.lms.peer_calibrate import PeerCalibratePage - -from ..pages.lms.progress import ProgressPage -from ..fixtures.course import XBlockFixtureDesc, CourseFixture -from ..fixtures.xqueue import XQueueResponseFixture - -from .helpers import load_data_str, UniqueCourseTest - - -class OpenResponseTest(UniqueCourseTest): - """ - Tests that interact with ORA (Open Response Assessment) through the LMS UI. - This base class sets up a course with open response problems and defines - some helper functions used in the ORA tests. - """ - - # Grade response (dict) to return from the XQueue stub - # in response to our unique submission text. - XQUEUE_GRADE_RESPONSE = None - - def setUp(self): - """ - Install a test course with ORA problems. - Always start in the subsection with open response problems. - """ - super(OpenResponseTest, self).setUp() - - # Create page objects - self.auth_page = AutoAuthPage(self.browser, course_id=self.course_id) - self.course_info_page = CourseInfoPage(self.browser, self.course_id) - self.tab_nav = TabNavPage(self.browser) - self.course_nav = CourseNavPage(self.browser) - self.open_response = OpenResponsePage(self.browser) - self.peer_grade = PeerGradePage(self.browser) - self.peer_calibrate = PeerCalibratePage(self.browser) - self.peer_confirm = PeerConfirmPage(self.browser) - self.progress_page = ProgressPage(self.browser, self.course_id) - - # Configure the test course - course_fix = CourseFixture( - self.course_info['org'], self.course_info['number'], - self.course_info['run'], self.course_info['display_name'] - ) - - # Create a unique name for the peer assessed problem. This will show up - # in the list of peer problems, which is shared among tests running - # in parallel; it needs to be unique so we can find it. - # It's also import that the problem has "Peer" in the name; otherwise, - # the ORA stub will ignore it. - self.peer_problem_name = "Peer-Assessed {}".format(self.unique_id[0:6]) - - course_fix.add_children( - XBlockFixtureDesc('chapter', 'Test Section').add_children( - XBlockFixtureDesc('sequential', 'Test Subsection').add_children( - - XBlockFixtureDesc( - 'combinedopenended', - 'Self-Assessed', - data=load_data_str('ora_self_problem.xml'), - metadata={ - 'graded': True, - }, - ), - - XBlockFixtureDesc( - 'combinedopenended', - 'AI-Assessed', - data=load_data_str('ora_ai_problem.xml'), - metadata={ - 'graded': True, - }, - ), - - XBlockFixtureDesc( - 'combinedopenended', - self.peer_problem_name, - data=load_data_str('ora_peer_problem.xml'), - metadata={ - 'graded': True, - }, - ), - - # This is the interface a student can use to grade his/her peers - XBlockFixtureDesc('peergrading', 'Peer Module'), - - ) - ) - ).install() - - # Configure the XQueue stub's response for the text we will submit - # The submission text is unique so we can associate each response with a particular test case. - self.submission = "Test submission " + self.unique_id[0:4] - if self.XQUEUE_GRADE_RESPONSE is not None: - XQueueResponseFixture(self.submission, self.XQUEUE_GRADE_RESPONSE).install() - - # Log in and navigate to the essay problems - self.auth_page.visit() - self.course_info_page.visit() - self.tab_nav.go_to_tab('Courseware') - - def submit_essay(self, expected_assessment_type, expected_prompt): - """ - Submit an essay and verify that the problem uses - the `expected_assessment_type` ("self", "ai", or "peer") and - shows the `expected_prompt` (a string). - """ - - # Check the assessment type and prompt - self.assertEqual(self.open_response.assessment_type, expected_assessment_type) - self.assertIn(expected_prompt, self.open_response.prompt) - - # Enter a submission, which will trigger a pre-defined response from the XQueue stub. - self.open_response.set_response(self.submission) - - # Save the response and expect some UI feedback - self.open_response.save_response() - self.assertEqual( - self.open_response.alert_message, - "Answer saved, but not yet submitted." - ) - - # Submit the response - self.open_response.submit_response() - - def get_asynch_feedback(self, assessment_type): - """ - Wait for and retrieve asynchronous feedback - (e.g. from AI, instructor, or peer grading) - `assessment_type` is either "ai" or "peer". - """ - # Because the check function involves fairly complicated actions - # (navigating through several screens), we give it more time to complete - # than the default. - return Promise( - self._check_feedback_func(assessment_type), - 'Got feedback for {0} problem'.format(assessment_type), - timeout=600, try_interval=5 - ).fulfill() - - def _check_feedback_func(self, assessment_type): - """ - Navigate away from, then return to, the peer problem to - receive updated feedback. - - The returned function will return a tuple `(is_success, rubric_feedback)`, - `is_success` is True iff we have received feedback for the problem; - `rubric_feedback` is a list of "correct" or "incorrect" strings. - """ - if assessment_type == 'ai': - section_name = 'AI-Assessed' - elif assessment_type == 'peer': - section_name = self.peer_problem_name - else: - raise ValueError('Assessment type not recognized. Must be either "ai" or "peer"') - - def _inner_check(): - self.course_nav.go_to_sequential('Self-Assessed') - self.course_nav.go_to_sequential(section_name) - - try: - feedback = self.open_response.rubric.feedback - - # Unsuccessful if the rubric hasn't loaded - except BrokenPromise: - return False, None - - # Successful if `feedback` is a non-empty list - else: - return bool(feedback), feedback - - return _inner_check - - -class SelfAssessmentTest(OpenResponseTest): - """ - Test ORA self-assessment. - """ - - def test_self_assessment(self): - """ - Given I am viewing a self-assessment problem - When I submit an essay and complete a self-assessment rubric - Then I see a scored rubric - And I see my score in the progress page. - """ - - # Navigate to the self-assessment problem and submit an essay - self.course_nav.go_to_sequential('Self-Assessed') - self.submit_essay('self', 'Censorship in the Libraries') - - # Fill in the rubric and expect that we get feedback - rubric = self.open_response.rubric - - self.assertEqual(rubric.categories, ["Writing Applications", "Language Conventions"]) - rubric.set_scores([0, 1]) - rubric.submit('self') - - self.assertEqual(rubric.feedback, ['incorrect', 'correct']) - - # Verify the progress page - self.progress_page.visit() - scores = self.progress_page.scores('Test Section', 'Test Subsection') - - # The first score is self-assessment, which we've answered, so it's 1/2 - # The other scores are AI- and peer-assessment, which we haven't answered so those are 0/2 - self.assertEqual(scores, [(1, 2), (0, 2), (0, 2)]) - - -class AIAssessmentTest(OpenResponseTest): - """ - Test ORA AI-assessment. - """ - - XQUEUE_GRADE_RESPONSE = { - 'score': 1, - 'feedback': json.dumps({"spelling": "Ok.", "grammar": "Ok.", "markup_text": "NA"}), - 'grader_type': 'BC', - 'success': True, - 'grader_id': 1, - 'submission_id': 1, - 'rubric_scores_complete': True, - 'rubric_xml': load_data_str('ora_rubric.xml') - } - - @skip('Intermittently failing, see ORA-342') - def test_ai_assessment(self): - """ - Given I am viewing an AI-assessment problem that has a trained ML model - When I submit an essay and wait for a response - Then I see a scored rubric - And I see my score in the progress page. - """ - - # Navigate to the AI-assessment problem and submit an essay - self.course_nav.go_to_sequential('AI-Assessed') - self.submit_essay('ai', 'Censorship in the Libraries') - - # Refresh the page to get the updated feedback - # then verify that we get the feedback sent by our stub XQueue implementation - self.assertEqual(self.get_asynch_feedback('ai'), ['incorrect', 'correct']) - - # Verify the progress page - self.progress_page.visit() - scores = self.progress_page.scores('Test Section', 'Test Subsection') - - # First score is the self-assessment score, which we haven't answered, so it's 0/2 - # Second score is the AI-assessment score, which we have answered, so it's 1/2 - # Third score is peer-assessment, which we haven't answered, so it's 0/2 - self.assertEqual(scores, [(0, 2), (1, 2), (0, 2)]) - - -class InstructorAssessmentTest(OpenResponseTest): - """ - Test an AI-assessment that has been graded by an instructor. - This runs the same test as the AI-assessment test, except - that the feedback comes from an instructor instead of the machine grader. - From the student's perspective, it should look the same. - """ - - XQUEUE_GRADE_RESPONSE = { - 'score': 1, - 'feedback': json.dumps({"feedback": "Good job!"}), - 'grader_type': 'IN', - 'success': True, - 'grader_id': 1, - 'submission_id': 1, - 'rubric_scores_complete': True, - 'rubric_xml': load_data_str('ora_rubric.xml') - } - - @skip('Intermittently failing, see ORA-342') - def test_instructor_assessment(self): - """ - Given an instructor has graded my submission - When I view my submission - Then I see a scored rubric - And my progress page shows the problem score. - """ - - # Navigate to the AI-assessment problem and submit an essay - # We have configured the stub to simulate that this essay will be staff-graded - self.course_nav.go_to_sequential('AI-Assessed') - self.submit_essay('ai', 'Censorship in the Libraries') - - # Refresh the page to get the updated feedback - # then verify that we get the feedback sent by our stub XQueue implementation - self.assertEqual(self.get_asynch_feedback('ai'), ['incorrect', 'correct']) - - # Verify the progress page - self.progress_page.visit() - scores = self.progress_page.scores('Test Section', 'Test Subsection') - - # First score is the self-assessment score, which we haven't answered, so it's 0/2 - # Second score is the AI-assessment score, which we have answered, so it's 1/2 - # Third score is peer-assessment, which we haven't answered, so it's 0/2 - self.assertEqual(scores, [(0, 2), (1, 2), (0, 2)]) - - -class PeerAssessmentTest(OpenResponseTest): - """ - Test ORA peer-assessment, including calibration and giving/receiving scores. - """ - - # Unlike other assessment types, peer assessment has multiple scores - XQUEUE_GRADE_RESPONSE = { - 'score': [2, 2, 2], - 'feedback': [json.dumps({"feedback": ""})] * 3, - 'grader_type': 'PE', - 'success': True, - 'grader_id': [1, 2, 3], - 'submission_id': 1, - 'rubric_scores_complete': [True, True, True], - 'rubric_xml': [load_data_str('ora_rubric.xml')] * 3 - } - - def test_peer_calibrate_and_grade(self): - """ - Given I am viewing a peer-assessment problem - And the instructor has submitted enough example essays - When I submit acceptable scores for enough calibration essays - Then I am able to peer-grade other students' essays. - - Given I have submitted an essay for peer-assessment - And I have peer-graded enough students essays - And enough other students have scored my essay - Then I can view the scores and written feedback - And I see my score in the progress page. - """ - # Initially, the student should NOT be able to grade peers, - # because he/she hasn't submitted any essays. - self.course_nav.go_to_sequential('Peer Module') - self.assertIn("You currently do not have any peer grading to do", self.peer_calibrate.message) - - # Submit an essay - self.course_nav.go_to_sequential(self.peer_problem_name) - self.submit_essay('peer', 'Censorship in the Libraries') - - # Need to reload the page to update the peer grading module - self.course_info_page.visit() - self.tab_nav.go_to_tab('Courseware') - self.course_nav.go_to_section('Test Section', 'Test Subsection') - - # Select the problem to calibrate - self.course_nav.go_to_sequential('Peer Module') - self.assertIn(self.peer_problem_name, self.peer_grade.problem_list) - self.peer_grade.select_problem(self.peer_problem_name) - - # Calibrate - self.peer_confirm.start(is_calibrating=True) - rubric = self.peer_calibrate.rubric - self.assertEqual(rubric.categories, ["Writing Applications", "Language Conventions"]) - rubric.set_scores([0, 1]) - rubric.submit('peer') - self.peer_calibrate.continue_to_grading() - - # Grade a peer - self.peer_confirm.start() - rubric = self.peer_grade.rubric - self.assertEqual(rubric.categories, ["Writing Applications", "Language Conventions"]) - rubric.set_scores([0, 1]) - rubric.submit() - - # Expect to receive essay feedback - # We receive feedback from all three peers, each of which - # provide 2 scores (one for each rubric item) - # Written feedback is a dummy value sent by the XQueue stub. - self.course_nav.go_to_sequential(self.peer_problem_name) - self.assertEqual(self.get_asynch_feedback('peer'), ['incorrect', 'correct'] * 3) - - # Verify the progress page - self.progress_page.visit() - scores = self.progress_page.scores('Test Section', 'Test Subsection') - - # First score is the self-assessment score, which we haven't answered, so it's 0/2 - # Second score is the AI-assessment score, which we haven't answered, so it's 0/2 - # Third score is peer-assessment, which we have answered, so it's 2/2 - self.assertEqual(scores, [(0, 2), (0, 2), (2, 2)])