From b73c6f63fce0ea4a7f193af9ab7e5f94cbbcaa42 Mon Sep 17 00:00:00 2001 From: Will Daly Date: Thu, 16 Jan 2014 11:02:36 -0500 Subject: [PATCH] Added stub XQueue server to bok_choy test suite. Added feedback check for AI-assessment test Added peer assessment feedback test --- common/djangoapps/terrain/start_stubs.py | 2 - common/djangoapps/terrain/stubs/http.py | 78 ++--- common/djangoapps/terrain/stubs/start.py | 72 +++++ .../terrain/stubs/tests/test_http.py | 34 ++- .../terrain/stubs/tests/test_xqueue_stub.py | 188 ++++++++++--- .../terrain/stubs/tests/test_youtube_stub.py | 2 +- common/djangoapps/terrain/stubs/xqueue.py | 68 ++++- common/djangoapps/terrain/stubs/youtube.py | 2 +- .../edxapp_pages/lms/open_response.py | 5 +- .../acceptance/edxapp_pages/lms/progress.py | 2 +- common/test/acceptance/fixtures/__init__.py | 3 + common/test/acceptance/fixtures/xqueue.py | 42 +++ .../tests/data/ora_peer_problem.xml | 30 ++ .../test/acceptance/tests/data/ora_rubric.xml | 1 + common/test/acceptance/tests/helpers.py | 32 ++- common/test/acceptance/tests/test_ora.py | 266 +++++++++++++++--- common/test/acceptance/tests/test_studio.py | 26 +- .../courseware/features/problems.py | 10 +- lms/djangoapps/courseware/features/video.py | 2 +- lms/envs/bok_choy.auth.json | 2 +- lms/envs/bok_choy.py | 3 + rakelib/bok_choy.rake | 22 +- 22 files changed, 706 insertions(+), 186 deletions(-) create mode 100644 common/djangoapps/terrain/stubs/start.py create mode 100644 common/test/acceptance/fixtures/xqueue.py create mode 100644 common/test/acceptance/tests/data/ora_peer_problem.xml create mode 100644 common/test/acceptance/tests/data/ora_rubric.xml diff --git a/common/djangoapps/terrain/start_stubs.py b/common/djangoapps/terrain/start_stubs.py index aec88a1cc1..7d115d04a7 100644 --- a/common/djangoapps/terrain/start_stubs.py +++ b/common/djangoapps/terrain/start_stubs.py @@ -8,8 +8,6 @@ from terrain.stubs.youtube import StubYouTubeService from terrain.stubs.xqueue import StubXQueueService -USAGE = "USAGE: python -m fakes.start SERVICE_NAME PORT_NUM" - SERVICES = { "youtube": {"port": settings.YOUTUBE_PORT, "class": StubYouTubeService}, "xqueue": {"port": settings.XQUEUE_PORT, "class": StubXQueueService}, diff --git a/common/djangoapps/terrain/stubs/http.py b/common/djangoapps/terrain/stubs/http.py index 14dcf8ac2f..0a202c413a 100644 --- a/common/djangoapps/terrain/stubs/http.py +++ b/common/djangoapps/terrain/stubs/http.py @@ -23,14 +23,13 @@ class StubHttpRequestHandler(BaseHTTPRequestHandler, object): """ Redirect messages to keep the test console clean. """ + LOGGER.debug(self._format_msg(format_str, *args)) - msg = "{0} - - [{1}] {2}\n".format( - self.client_address[0], - self.log_date_time_string(), - format_str % args - ) - - LOGGER.debug(msg) + def log_error(self, format_str, *args): + """ + Helper to log a server error. + """ + LOGGER.error(self._format_msg(format_str, *args)) @lazy def request_content(self): @@ -76,22 +75,39 @@ class StubHttpRequestHandler(BaseHTTPRequestHandler, object): def do_PUT(self): """ Allow callers to configure the stub server using the /set_config URL. + The request should have POST data, such that: + + Each POST parameter is the configuration key. + Each POST value is a JSON-encoded string value for the configuration. """ if self.path == "/set_config" or self.path == "/set_config/": - for key, value in self.post_dict.iteritems(): - self.log_message("Set config '{0}' to '{1}'".format(key, value)) + if len(self.post_dict) > 0: + for key, value in self.post_dict.iteritems(): - try: - value = json.loads(value) + # Decode the params as UTF-8 + try: + key = unicode(key, 'utf-8') + value = unicode(value, 'utf-8') + except UnicodeDecodeError: + self.log_message("Could not decode request params as UTF-8") - except ValueError: - self.log_message(u"Could not parse JSON: {0}".format(value)) - self.send_response(400) + self.log_message(u"Set config '{0}' to '{1}'".format(key, value)) - else: - self.server.set_config(unicode(key, 'utf-8'), value) - self.send_response(200) + try: + value = json.loads(value) + + except ValueError: + self.log_message(u"Could not parse JSON: {0}".format(value)) + self.send_response(400) + + else: + self.server.config[key] = value + self.send_response(200) + + # No parameters sent to configure, so return success by default + else: + self.send_response(200) else: self.send_response(404) @@ -119,6 +135,18 @@ class StubHttpRequestHandler(BaseHTTPRequestHandler, object): if content is not None: self.wfile.write(content) + def _format_msg(self, format_str, *args): + """ + Format message for logging. + `format_str` is a string with old-style Python format escaping; + `args` is an array of values to fill into the string. + """ + return u"{0} - - [{1}] {2}\n".format( + self.client_address[0], + self.log_date_time_string(), + format_str % args + ) + class StubHttpService(HTTPServer, object): """ @@ -138,7 +166,7 @@ class StubHttpService(HTTPServer, object): HTTPServer.__init__(self, address, self.HANDLER_CLASS) # Create a dict to store configuration values set by the client - self._config = dict() + self.config = dict() # Start the server in a separate thread server_thread = threading.Thread(target=self.serve_forever) @@ -165,17 +193,3 @@ class StubHttpService(HTTPServer, object): """ _, port = self.server_address return port - - def config(self, key, default=None): - """ - Return the configuration value for `key`. If this - value has not been set, return `default` instead. - """ - return self._config.get(key, default) - - def set_config(self, key, value): - """ - Set the configuration `value` for `key`. - """ - self._config[key] = value - diff --git a/common/djangoapps/terrain/stubs/start.py b/common/djangoapps/terrain/stubs/start.py new file mode 100644 index 0000000000..1533607583 --- /dev/null +++ b/common/djangoapps/terrain/stubs/start.py @@ -0,0 +1,72 @@ +""" +Command-line utility to start a stub service. +""" +import sys +import time +import logging +from .xqueue import StubXQueueService +from .youtube import StubYouTubeService + + +USAGE = "USAGE: python -m stubs.start SERVICE_NAME PORT_NUM" + +SERVICES = { + 'xqueue': StubXQueueService, + 'youtube': StubYouTubeService +} + +# Log to stdout, including debug messages +logging.basicConfig(level=logging.DEBUG, format="%(levelname)s %(message)s") + + +def get_args(): + """ + Parse arguments, returning tuple of `(service_name, port_num)`. + Exits with a message if arguments are invalid. + """ + if len(sys.argv) < 3: + print USAGE + sys.exit(1) + + service_name = sys.argv[1] + port_num = sys.argv[2] + + if service_name not in SERVICES: + print "Unrecognized service '{0}'. Valid choices are: {1}".format( + service_name, ", ".join(SERVICES.keys())) + sys.exit(1) + + try: + port_num = int(port_num) + if port_num < 0: + raise ValueError + + except ValueError: + print "Port '{0}' must be a positive integer".format(port_num) + sys.exit(1) + + return service_name, port_num + + +def main(): + """ + Start a server; shut down on keyboard interrupt signal. + """ + service_name, port_num = get_args() + print "Starting stub service '{0}' on port {1}...".format(service_name, port_num) + + server = SERVICES[service_name](port_num=port_num) + + try: + while True: + time.sleep(1) + + except KeyboardInterrupt: + print "Stopping stub service..." + + finally: + server.shutdown() + + +if __name__ == "__main__": + main() diff --git a/common/djangoapps/terrain/stubs/tests/test_http.py b/common/djangoapps/terrain/stubs/tests/test_http.py index fb09bad173..ffad4cd88f 100644 --- a/common/djangoapps/terrain/stubs/tests/test_http.py +++ b/common/djangoapps/terrain/stubs/tests/test_http.py @@ -13,6 +13,7 @@ class StubHttpServiceTest(unittest.TestCase): def setUp(self): self.server = StubHttpService() self.addCleanup(self.server.shutdown) + self.url = "http://127.0.0.1:{0}/set_config".format(self.server.port) def test_configure(self): """ @@ -21,33 +22,38 @@ class StubHttpServiceTest(unittest.TestCase): """ params = { 'test_str': 'This is only a test', + 'test_empty': '', 'test_int': 12345, 'test_float': 123.45, + 'test_dict': { 'test_key': 'test_val' }, + 'test_empty_dict': {}, 'test_unicode': u'\u2603 the snowman', - 'test_dict': { 'test_key': 'test_val' } + 'test_none': None, + 'test_boolean': False } for key, val in params.iteritems(): - post_params = {key: json.dumps(val)} - response = requests.put( - "http://127.0.0.1:{0}/set_config".format(self.server.port), - data=post_params - ) + # JSON-encode each parameter + post_params = {key: json.dumps(val)} + response = requests.put(self.url, data=post_params) self.assertEqual(response.status_code, 200) # Check that the expected values were set in the configuration for key, val in params.iteritems(): - self.assertEqual(self.server.config(key), val) - - def test_default_config(self): - self.assertEqual(self.server.config('not_set', default=42), 42) + self.assertEqual(self.server.config.get(key), val) def test_bad_json(self): - response = requests.put( - "http://127.0.0.1:{0}/set_config".format(self.server.port), - data="{,}" - ) + response = requests.put(self.url, data="{,}") + self.assertEqual(response.status_code, 400) + + def test_no_post_data(self): + response = requests.put(self.url, data={}) + self.assertEqual(response.status_code, 200) + + def test_unicode_non_json(self): + # Send unicode without json-encoding it + response = requests.put(self.url, data={'test_unicode': u'\u2603 the snowman'}) self.assertEqual(response.status_code, 400) def test_unknown_path(self): diff --git a/common/djangoapps/terrain/stubs/tests/test_xqueue_stub.py b/common/djangoapps/terrain/stubs/tests/test_xqueue_stub.py index 222792ebb3..5ac170b187 100644 --- a/common/djangoapps/terrain/stubs/tests/test_xqueue_stub.py +++ b/common/djangoapps/terrain/stubs/tests/test_xqueue_stub.py @@ -5,70 +5,172 @@ Unit tests for stub XQueue implementation. import mock import unittest import json -import urllib +import requests import time -from terrain.stubs.xqueue import StubXQueueService +import copy +from terrain.stubs.xqueue import StubXQueueService, StubXQueueHandler class StubXQueueServiceTest(unittest.TestCase): def setUp(self): self.server = StubXQueueService() - self.url = "http://127.0.0.1:{0}".format(self.server.port) + self.url = "http://127.0.0.1:{0}/xqueue/submit".format(self.server.port) self.addCleanup(self.server.shutdown) # For testing purposes, do not delay the grading response - self.server.set_config('response_delay', 0) + self.server.config['response_delay'] = 0 - @mock.patch('requests.post') + @mock.patch('terrain.stubs.xqueue.post') def test_grade_request(self, post): - # Send a grade request + # Post a submission to the stub XQueue callback_url = 'http://127.0.0.1:8000/test_callback' + expected_header = self._post_submission( + callback_url, 'test_queuekey', 'test_queue', + json.dumps({ + 'student_info': 'test', + 'grader_payload': 'test', + 'student_response': 'test' + }) + ) - grade_header = json.dumps({ - 'lms_callback_url': callback_url, - 'lms_key': 'test_queuekey', - 'queue_name': 'test_queue' - }) + # Check the response we receive + # (Should be the default grading response) + expected_body = json.dumps({'correct': True, 'score': 1, 'msg': '
'}) + self._check_grade_response(post, callback_url, expected_header, expected_body) - grade_body = json.dumps({ - 'student_info': 'test', - 'grader_payload': 'test', - 'student_response': 'test' - }) + @mock.patch('terrain.stubs.xqueue.post') + def test_configure_default_response(self, post): + # Configure the default response for submissions to any queue + response_content = {'test_response': 'test_content'} + self.server.config['default'] = response_content + + # Post a submission to the stub XQueue + callback_url = 'http://127.0.0.1:8000/test_callback' + expected_header = self._post_submission( + callback_url, 'test_queuekey', 'test_queue', + json.dumps({ + 'student_info': 'test', + 'grader_payload': 'test', + 'student_response': 'test' + }) + ) + + # Check the response we receive + # (Should be the default grading response) + self._check_grade_response( + post, callback_url, expected_header, json.dumps(response_content) + ) + + @mock.patch('terrain.stubs.xqueue.post') + def test_configure_specific_response(self, post): + + # Configure the XQueue stub response to any submission to the test queue + response_content = {'test_response': 'test_content'} + self.server.config['This is only a test.'] = response_content + + # Post a submission to the XQueue stub + callback_url = 'http://127.0.0.1:8000/test_callback' + expected_header = self._post_submission( + callback_url, 'test_queuekey', 'test_queue', + json.dumps({'submission': 'This is only a test.'}) + ) + + # Check that we receive the response we configured + self._check_grade_response( + post, callback_url, expected_header, json.dumps(response_content) + ) + + @mock.patch('terrain.stubs.xqueue.post') + def test_multiple_response_matches(self, post): + + # Configure the XQueue stub with two responses that + # match the same submission + self.server.config['test_1'] = {'response': True} + self.server.config['test_2'] = {'response': False} + + with mock.patch('terrain.stubs.http.LOGGER') as logger: + + # Post a submission to the XQueue stub + callback_url = 'http://127.0.0.1:8000/test_callback' + expected_header = self._post_submission( + callback_url, 'test_queuekey', 'test_queue', + json.dumps({'submission': 'test_1 and test_2'}) + ) + + # Wait for the delayed grade response + self._wait_for_mock_called(logger.error, max_time=10) + + # Expect that we do NOT receive a response + # and that an error message is logged + self.assertFalse(post.called) + self.assertTrue(logger.error.called) + + def _post_submission(self, callback_url, lms_key, queue_name, xqueue_body): + """ + Post a submission to the stub XQueue implementation. + `callback_url` is the URL at which we expect to receive a grade response + `lms_key` is the authentication key sent in the header + `queue_name` is the name of the queue in which to send put the submission + `xqueue_body` is the content of the submission + + Returns the header (a string) we send with the submission, which can + be used to validate the response we receive from the stub. + """ + + # Post a submission to the XQueue stub grade_request = { - 'xqueue_header': grade_header, - 'xqueue_body': grade_body - } - - response_handle = urllib.urlopen( - self.url + '/xqueue/submit', - urllib.urlencode(grade_request) - ) - - response_dict = json.loads(response_handle.read()) - - # Expect that the response is success - self.assertEqual(response_dict['return_code'], 0) - - # Expect that the server tries to post back the grading info - xqueue_body = json.dumps( - {'correct': True, 'score': 1, 'msg': '
'} - ) - - expected_callback_dict = { - 'xqueue_header': grade_header, + 'xqueue_header': json.dumps({ + 'lms_callback_url': callback_url, + 'lms_key': 'test_queuekey', + 'queue_name': 'test_queue' + }), 'xqueue_body': xqueue_body } + resp = requests.post(self.url, data=grade_request) + + # Expect that the response is success + self.assertEqual(resp.status_code, 200) + + # Return back the header, so we can authenticate the response we receive + return grade_request['xqueue_header'] + + def _check_grade_response(self, post_mock, callback_url, expected_header, expected_body): + """ + Verify that the stub sent a POST request back to us + with the expected data. + + `post_mock` is our mock for `requests.post` + `callback_url` is the URL we expect the stub to POST to + `expected_header` is the header (a string) we expect to receive with the grade. + `expected_body` is the content (a string) we expect to receive with the grade. + + Raises an `AssertionError` if the check fails. + """ # Wait for the server to POST back to the callback URL - # Time out if it takes too long - start_time = time.time() - while time.time() - start_time < 5: - if post.called: - break + # If it takes too long, continue anyway + self._wait_for_mock_called(post_mock, max_time=10) + + # Check the response posted back to us + # This is the default response + expected_callback_dict = { + 'xqueue_header': expected_header, + 'xqueue_body': expected_body, + } # Check that the POST request was made with the correct params - post.assert_called_with(callback_url, data=expected_callback_dict) + post_mock.assert_called_with(callback_url, data=expected_callback_dict) + + def _wait_for_mock_called(self, mock_obj, max_time=10): + """ + Wait for `mock` (a `Mock` object) to be called. + If seconds elapsed exceeds `max_time`, continue without error. + """ + start_time = time.time() + while time.time() - start_time < max_time: + if mock_obj.called: + break + time.sleep(1) diff --git a/common/djangoapps/terrain/stubs/tests/test_youtube_stub.py b/common/djangoapps/terrain/stubs/tests/test_youtube_stub.py index 3c8ef46908..0e5f27aca8 100644 --- a/common/djangoapps/terrain/stubs/tests/test_youtube_stub.py +++ b/common/djangoapps/terrain/stubs/tests/test_youtube_stub.py @@ -12,7 +12,7 @@ class StubYouTubeServiceTest(unittest.TestCase): def setUp(self): self.server = StubYouTubeService() self.url = "http://127.0.0.1:{0}/".format(self.server.port) - self.server.set_config('time_to_response', 0.0) + self.server.config['time_to_response'] = 0.0 self.addCleanup(self.server.shutdown) def test_unused_url(self): diff --git a/common/djangoapps/terrain/stubs/xqueue.py b/common/djangoapps/terrain/stubs/xqueue.py index 96e8e78f7b..52ca14ae34 100644 --- a/common/djangoapps/terrain/stubs/xqueue.py +++ b/common/djangoapps/terrain/stubs/xqueue.py @@ -1,10 +1,17 @@ """ Stub implementation of XQueue for acceptance tests. + +Configuration values: + "default" (dict): Default response to be sent to LMS as a grade for a submission + "" (dict): Grade response to return for submissions containing the text + +If no grade response is configured, a default response will be returned. """ from .http import StubHttpRequestHandler, StubHttpService import json -import requests +import copy +from requests import post import threading @@ -34,15 +41,13 @@ class StubXQueueHandler(StubHttpRequestHandler): except KeyError: # If the message doesn't have a header or body, - # then it's malformed. - # Respond with failure + # then it's malformed. Respond with failure error_msg = "XQueue received invalid grade request" self._send_immediate_response(False, message=error_msg) except ValueError: # If we could not decode the body or header, # respond with failure - error_msg = "XQueue could not decode grade request" self._send_immediate_response(False, message=error_msg) @@ -56,19 +61,18 @@ class StubXQueueHandler(StubHttpRequestHandler): # Otherwise, the problem will not realize it's # queued and it will keep waiting for a response indefinitely delayed_grade_func = lambda: self._send_grade_response( - callback_url, xqueue_header + callback_url, xqueue_header, self.post_dict['xqueue_body'] ) threading.Timer( - self.server.config('response_delay', default=self.DEFAULT_RESPONSE_DELAY), + self.server.config.get('response_delay', self.DEFAULT_RESPONSE_DELAY), delayed_grade_func ).start() # If we get a request that's not to the grading submission # URL, return an error else: - error_message = "Invalid request URL" - self._send_immediate_response(False, message=error_message) + self._send_immediate_response(False, message="Invalid request URL") def _send_immediate_response(self, success, message=""): """ @@ -90,13 +94,49 @@ class StubXQueueHandler(StubHttpRequestHandler): else: self.send_response(500) - def _send_grade_response(self, postback_url, xqueue_header): + def _send_grade_response(self, postback_url, xqueue_header, xqueue_body_json): """ POST the grade response back to the client - using the response provided by the server configuration + using the response provided by the server configuration. + + Uses the server configuration to determine what response to send: + 1) Specific response for submissions containing matching text in `xqueue_body` + 2) Default submission configured by client + 3) Default submission + + `postback_url` is the URL the client told us to post back to + `xqueue_header` (dict) is the full header the client sent us, which we will send back + to the client so it can authenticate us. + `xqueue_body_json` (json-encoded string) is the body of the submission the client sent us. """ - # Get the grade response from the server configuration - grade_response = self.server.config('grade_response', default=self.DEFAULT_GRADE_RESPONSE) + # First check if we have a configured response that matches the submission body + grade_response = None + + # This matches the pattern against the JSON-encoded xqueue_body + # This is very simplistic, but sufficient to associate a student response + # with a grading response. + # There is a danger here that a submission will match multiple response patterns. + # Rather than fail silently (which could cause unpredictable behavior in tests) + # we abort and log a debugging message. + for pattern, response in self.server.config.iteritems(): + + if pattern in xqueue_body_json: + if grade_response is None: + grade_response = response + + # Multiple matches, so abort and log an error + else: + self.log_error( + "Multiple response patterns matched '{0}'".format(xqueue_body_json), + ) + return + + # Fall back to the default grade response configured for this queue, + # then to the default response. + if grade_response is None: + grade_response = self.server.config.get( + 'default', copy.deepcopy(self.DEFAULT_GRADE_RESPONSE) + ) # Wrap the message in
tags to ensure that it is valid XML if isinstance(grade_response, dict) and 'msg' in grade_response: @@ -107,8 +147,8 @@ class StubXQueueHandler(StubHttpRequestHandler): 'xqueue_body': json.dumps(grade_response) } - requests.post(postback_url, data=data) - self.log_message("XQueue: sent grading response {0}".format(data)) + post(postback_url, data=data) + self.log_message("XQueue: sent grading response {0} to {1}".format(data, postback_url)) def _is_grade_request(self): return 'xqueue/submit' in self.path diff --git a/common/djangoapps/terrain/stubs/youtube.py b/common/djangoapps/terrain/stubs/youtube.py index fc68cc3da0..1facc16e83 100644 --- a/common/djangoapps/terrain/stubs/youtube.py +++ b/common/djangoapps/terrain/stubs/youtube.py @@ -72,7 +72,7 @@ class StubYouTubeHandler(StubHttpRequestHandler): Requires sending back callback id. """ # Delay the response to simulate network latency - time.sleep(self.server.config('time_to_response', self.DEFAULT_DELAY_SEC)) + time.sleep(self.server.config.get('time_to_response', self.DEFAULT_DELAY_SEC)) # Construct the response content callback = self.get_params['callback'][0] diff --git a/common/test/acceptance/edxapp_pages/lms/open_response.py b/common/test/acceptance/edxapp_pages/lms/open_response.py index 712d5577d8..c61b6017bd 100644 --- a/common/test/acceptance/edxapp_pages/lms/open_response.py +++ b/common/test/acceptance/edxapp_pages/lms/open_response.py @@ -224,15 +224,12 @@ class OpenResponsePage(PageObject): if assessment_type == 'self': return EmptyPromise(lambda: self.has_rubric, "Rubric has appeared") - elif assessment_type == 'ai': + elif assessment_type == 'ai' or assessment_type == "peer": return EmptyPromise( lambda: self.grader_status != 'Unanswered', "Problem status is no longer 'unanswered'" ) - elif assessment_type == 'peer': - return EmptyPromise(lambda: False, "Peer assessment not yet implemented") - else: self.warning("Unrecognized assessment type '{0}'".format(assessment_type)) return EmptyPromise(lambda: True, "Unrecognized assessment type") diff --git a/common/test/acceptance/edxapp_pages/lms/progress.py b/common/test/acceptance/edxapp_pages/lms/progress.py index a0fe656e85..af496234e1 100644 --- a/common/test/acceptance/edxapp_pages/lms/progress.py +++ b/common/test/acceptance/edxapp_pages/lms/progress.py @@ -27,7 +27,7 @@ class ProgressPage(PageObject): for the section. Example: - section_scores('Week 1', 'Lesson 1', 2) --> [(2, 4), (0, 1)] + scores('Week 1', 'Lesson 1') --> [(2, 4), (0, 1)] Returns `None` if no such chapter and section can be found. """ diff --git a/common/test/acceptance/fixtures/__init__.py b/common/test/acceptance/fixtures/__init__.py index 0f6c1937f4..b15d698df8 100644 --- a/common/test/acceptance/fixtures/__init__.py +++ b/common/test/acceptance/fixtures/__init__.py @@ -2,3 +2,6 @@ import os # Get the URL of the instance under test STUDIO_BASE_URL = os.environ.get('studio_url', 'http://localhost:8031') + +# Get the URL of the XQueue stub used in the test +XQUEUE_STUB_URL = os.environ.get('xqueue_url', 'http://localhost:8040') diff --git a/common/test/acceptance/fixtures/xqueue.py b/common/test/acceptance/fixtures/xqueue.py new file mode 100644 index 0000000000..4eacace76d --- /dev/null +++ b/common/test/acceptance/fixtures/xqueue.py @@ -0,0 +1,42 @@ +""" +Fixture to configure XQueue response. +""" + +import requests +import json +from bok_choy.web_app_fixture import WebAppFixture, WebAppFixtureError +from . import XQUEUE_STUB_URL + + +class XQueueResponseFixture(WebAppFixture): + """ + Configure the XQueue stub's response to submissions. + """ + + def __init__(self, queue_name, response_dict): + """ + Configure XQueue stub to POST `response_dict` (a dictionary) + back to the LMS when it receives a submission to a queue + named `queue_name`. + + Remember that there is one XQueue stub shared by all the tests; + if possible, you should have tests use unique queue names + to avoid conflict between tests running in parallel. + """ + self._queue_name = queue_name + self._response_dict = response_dict + + def install(self): + """ + Configure the stub via HTTP. + """ + url = XQUEUE_STUB_URL + "/set_config" + + # Configure the stub to respond to submissions to our queue + payload = {self._queue_name: json.dumps(self._response_dict)} + response = requests.put(url, data=payload) + + if not response.ok: + raise WebFixtureError( + "Could not configure XQueue stub for queue '{1}'. Status code: {2}".format( + self._queue_name, self._response_dict)) diff --git a/common/test/acceptance/tests/data/ora_peer_problem.xml b/common/test/acceptance/tests/data/ora_peer_problem.xml new file mode 100644 index 0000000000..ff8ef01988 --- /dev/null +++ b/common/test/acceptance/tests/data/ora_peer_problem.xml @@ -0,0 +1,30 @@ + + + + + Writing Applications + + + + + Language Conventions + + + + + + +

Censorship in the Libraries

+

"All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us." --Katherine Paterson, Author

+

Write a persuasive essay to a newspaper reflecting your views on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.

+
+ + + + Enter essay here. + This is the answer. + {"grader_settings" : "peer_grading.conf", "problem_id" : "700x/Demo"} + + + +
diff --git a/common/test/acceptance/tests/data/ora_rubric.xml b/common/test/acceptance/tests/data/ora_rubric.xml new file mode 100644 index 0000000000..5db0138ebe --- /dev/null +++ b/common/test/acceptance/tests/data/ora_rubric.xml @@ -0,0 +1 @@ +Writing Applications0 Language Conventions 1 diff --git a/common/test/acceptance/tests/helpers.py b/common/test/acceptance/tests/helpers.py index ac3278f494..b5a84ed01b 100644 --- a/common/test/acceptance/tests/helpers.py +++ b/common/test/acceptance/tests/helpers.py @@ -1,7 +1,8 @@ """ -Test helper functions. +Test helper functions and base classes. """ from path import path +from bok_choy.web_app_test import WebAppTest def load_data_str(rel_path): @@ -12,3 +13,32 @@ def load_data_str(rel_path): full_path = path(__file__).abspath().dirname() / "data" / rel_path #pylint: disable=E1120 with open(full_path) as data_file: return data_file.read() + + +class UniqueCourseTest(WebAppTest): + """ + Test that provides a unique course ID. + """ + + COURSE_ID_SEPARATOR = "/" + + def __init__(self, *args, **kwargs): + """ + Create a unique course ID. + """ + self.course_info = { + 'org': 'test_org', + 'number': self.unique_id, + 'run': 'test_run', + 'display_name': 'Test Course' + self.unique_id + } + + super(UniqueCourseTest, self).__init__(*args, **kwargs) + + @property + def course_id(self): + return self.COURSE_ID_SEPARATOR.join([ + self.course_info['org'], + self.course_info['number'], + self.course_info['run'] + ]) diff --git a/common/test/acceptance/tests/test_ora.py b/common/test/acceptance/tests/test_ora.py index 29660b3c16..4459e80aad 100644 --- a/common/test/acceptance/tests/test_ora.py +++ b/common/test/acceptance/tests/test_ora.py @@ -2,79 +2,167 @@ Tests for ORA (Open Response Assessment) through the LMS UI. """ -from bok_choy.web_app_test import WebAppTest +import json +from bok_choy.promise import fulfill, Promise from ..edxapp_pages.studio.auto_auth import AutoAuthPage from ..edxapp_pages.lms.course_info import CourseInfoPage from ..edxapp_pages.lms.tab_nav import TabNavPage from ..edxapp_pages.lms.course_nav import CourseNavPage from ..edxapp_pages.lms.open_response import OpenResponsePage +from ..edxapp_pages.lms.progress import ProgressPage from ..fixtures.course import XBlockFixtureDesc, CourseFixture +from ..fixtures.xqueue import XQueueResponseFixture -from .helpers import load_data_str +from .helpers import load_data_str, UniqueCourseTest -class OpenResponseTest(WebAppTest): +class OpenResponseTest(UniqueCourseTest): """ Tests that interact with ORA (Open Response Assessment) through the LMS UI. + This base class sets up a course with open response problems and defines + some helper functions used in the ORA tests. """ + page_object_classes = [ + AutoAuthPage, CourseInfoPage, TabNavPage, + CourseNavPage, OpenResponsePage, ProgressPage + ] + + # Grade response (dict) to return from the XQueue stub + # in response to our unique submission text. + XQUEUE_GRADE_RESPONSE = None + def setUp(self): """ Always start in the subsection with open response problems. """ - - # Create a unique course ID - self.course_info = { - 'org': 'test_org', - 'number': self.unique_id, - 'run': 'test_run', - 'display_name': 'Test Course' + self.unique_id - } + # Create a unique submission + self.submission = "Test submission " + self.unique_id # Ensure fixtures are installed super(OpenResponseTest, self).setUp() # Log in and navigate to the essay problems - course_id = '{org}/{number}/{run}'.format(**self.course_info) - self.ui.visit('studio.auto_auth', course_id=course_id) - self.ui.visit('lms.course_info', course_id=course_id) + self.ui.visit('studio.auto_auth', course_id=self.course_id) + self.ui.visit('lms.course_info', course_id=self.course_id) self.ui['lms.tab_nav'].go_to_tab('Courseware') - self.ui['lms.course_nav'].go_to_section( - 'Example Week 2: Get Interactive', 'Homework - Essays' - ) - - @property - def page_object_classes(self): - return [AutoAuthPage, CourseInfoPage, TabNavPage, CourseNavPage, OpenResponsePage] @property def fixtures(self): """ Create a test course with open response problems. + Configure the XQueue stub to respond to submissions to the open-ended queue. """ + + # Configure the test course course_fix = CourseFixture( self.course_info['org'], self.course_info['number'], self.course_info['run'], self.course_info['display_name'] ) course_fix.add_children( + XBlockFixtureDesc('chapter', 'Test Section').add_children( XBlockFixtureDesc('sequential', 'Test Subsection').add_children( - XBlockFixtureDesc('combinedopenended', 'Self-Assessed', data=load_data_str('ora_self_problem.xml')), - XBlockFixtureDesc('combinedopenended', 'AI-Assessed', data=load_data_str('ora_ai_problem.xml')) + + XBlockFixtureDesc('combinedopenended', 'Self-Assessed', + data=load_data_str('ora_self_problem.xml'), metadata={'graded': True}), + + XBlockFixtureDesc('combinedopenended', 'AI-Assessed', + data=load_data_str('ora_ai_problem.xml'), metadata={'graded': True}), + + XBlockFixtureDesc('combinedopenended', 'Peer-Assessed', + data=load_data_str('ora_peer_problem.xml'), metadata={'graded': True}), ) ) ) - return [course_fix] + # Configure the XQueue stub's response for the text we will submit + if self.XQUEUE_GRADE_RESPONSE is not None: + xqueue_fix = XQueueResponseFixture(self.submission, self.XQUEUE_GRADE_RESPONSE) + return [course_fix, xqueue_fix] + + else: + return [course_fix] + + def submit_essay(self, expected_assessment_type, expected_prompt): + """ + Submit an essay and verify that the problem uses + the `expected_assessment_type` ("self", "ai", or "peer") and + shows the `expected_prompt` (a string). + """ + + # Check the assessment type and prompt + self.assertEqual(self.ui['lms.open_response'].assessment_type, expected_assessment_type) + self.assertIn(expected_prompt, self.ui['lms.open_response'].prompt) + + # Enter a submission, which will trigger a pre-defined response from the XQueue stub. + self.ui['lms.open_response'].set_response(self.submission) + + # Save the response and expect some UI feedback + self.ui['lms.open_response'].save_response() + self.assertEqual( + self.ui['lms.open_response'].alert_message, + "Answer saved, but not yet submitted." + ) + + # Submit the response + self.ui['lms.open_response'].submit_response() + + def get_asynch_feedback(self, assessment_type): + """ + Wait for and retrieve asynchronous feedback + (e.g. from AI, instructor, or peer grading) + `assessment_type` is either "ai" or "peer". + """ + feedback_promise = Promise( + self._check_feedback_func(assessment_type), + 'Got feedback for {0} problem'.format(assessment_type) + ) + return fulfill(feedback_promise) + + def _check_feedback_func(self, assessment_type): + """ + Navigate away from, then return to, the peer problem to + receive updated feedback. + + The returned function will return a tuple `(is_success, rubric_feedback)`, + `is_success` is True iff we have received feedback for the problem; + `rubric_feedback` is a list of "correct" or "incorrect" strings. + """ + if assessment_type == 'ai': + section_name = 'AI-Assessed' + elif assessment_type == 'peer': + section_name = 'Peer-Assessed' + else: + raise ValueError('Assessment type not recognized. Must be either "ai" or "peer"') + + def _inner_check(): + self.ui['lms.course_nav'].go_to_sequential('Self-Assessed') + self.ui['lms.course_nav'].go_to_sequential(section_name) + feedback = self.ui['lms.open_response'].rubric_feedback + + # Successful if `feedback` is a non-empty list + return (bool(feedback), feedback) + + return _inner_check + + +class SelfAssessmentTest(OpenResponseTest): + """ + Test ORA self-assessment. + """ def test_self_assessment(self): """ - Test that the user can self-assess an essay. + Given I am viewing a self-assessment problem + When I submit an essay and complete a self-assessment rubric + Then I see a scored rubric + And I see my score in the progress page. """ # Navigate to the self-assessment problem and submit an essay self.ui['lms.course_nav'].go_to_sequential('Self-Assessed') - self._submit_essay('self', 'Censorship in the Libraries') + self.submit_essay('self', 'Censorship in the Libraries') # Check the rubric categories self.assertEqual( @@ -91,14 +179,42 @@ class OpenResponseTest(WebAppTest): ['incorrect', 'correct'] ) + # Verify the progress page + self.ui.visit('lms.progress', course_id=self.course_id) + scores = self.ui['lms.progress'].scores('Test Section', 'Test Subsection') + + # The first score is self-assessment, which we've answered, so it's 1/2 + # The other scores are AI- and peer-assessment, which we haven't answered so those are 0/2 + self.assertEqual(scores, [(1, 2), (0, 2), (0, 2)]) + + +class AIAssessmentTest(OpenResponseTest): + """ + Test ORA AI-assessment. + """ + + XQUEUE_GRADE_RESPONSE = { + 'score': 1, + 'feedback': {"spelling": "Ok.", "grammar": "Ok.", "markup_text": "NA"}, + 'grader_type': 'BC', + 'success': True, + 'grader_id': 1, + 'submission_id': 1, + 'rubric_scores_complete': True, + 'rubric_xml': load_data_str('ora_rubric.xml') + } + def test_ai_assessment(self): """ - Test that a user can submit an essay and receive AI feedback. + Given I am viewing an AI-assessment problem that has a trained ML model + When I submit an essay and wait for a response + Then I see a scored rubric + And I see my score in the progress page. """ # Navigate to the AI-assessment problem and submit an essay self.ui['lms.course_nav'].go_to_sequential('AI-Assessed') - self._submit_essay('ai', 'Censorship in the Libraries') + self.submit_essay('ai', 'Censorship in the Libraries') # Expect UI feedback that the response was submitted self.assertEqual( @@ -106,27 +222,85 @@ class OpenResponseTest(WebAppTest): "Your response has been submitted. Please check back later for your grade." ) - def _submit_essay(self, expected_assessment_type, expected_prompt): + # Refresh the page to get the updated feedback + # then verify that we get the feedback sent by our stub XQueue implementation + self.assertEqual(self.get_asynch_feedback('ai'), ['incorrect', 'correct']) + + # Verify the progress page + self.ui.visit('lms.progress', course_id=self.course_id) + scores = self.ui['lms.progress'].scores('Test Section', 'Test Subsection') + + # First score is the self-assessment score, which we haven't answered, so it's 0/2 + # Second score is the AI-assessment score, which we have answered, so it's 1/2 + # Third score is peer-assessment, which we haven't answered, so it's 0/2 + self.assertEqual(scores, [(0, 2), (1, 2), (0, 2)]) + + +class InstructorAssessmentTest(AIAssessmentTest): + """ + Test an AI-assessment that has been graded by an instructor. + This runs the exact same test as the AI-assessment test, except + that the feedback comes from an instructor instead of the machine grader. + From the student's perspective, it should look the same. + """ + + XQUEUE_GRADE_RESPONSE = { + 'score': 1, + 'feedback': {"feedback": "Good job!"}, + 'grader_type': 'IN', + 'success': True, + 'grader_id': 1, + 'submission_id': 1, + 'rubric_scores_complete': True, + 'rubric_xml': load_data_str('ora_rubric.xml') + } + + +class PeerFeedbackTest(OpenResponseTest): + """ + Test ORA peer-assessment. Note that this tests only *receiving* feedback, + not *giving* feedback -- those tests are located in another module. + """ + + # Unlike other assessment types, peer assessment has multiple scores + XQUEUE_GRADE_RESPONSE = { + 'score': [2, 2, 2], + 'feedback': [json.dumps({"feedback": ""})] * 3, + 'grader_type': 'PE', + 'success': True, + 'grader_id': [1, 2, 3], + 'submission_id': 1, + 'rubric_scores_complete': [True, True, True], + 'rubric_xml': [load_data_str('ora_rubric.xml')] * 3 + } + + def test_peer_assessment(self): """ - Submit an essay and verify that the problem uses - the `expected_assessment_type` ("self", "ai", or "peer") and - shows the `expected_prompt` (a string). + Given I have submitted an essay for peer-assessment + And enough other students have scored my essay + Then I can view the scores and written feedback + And I see my score in the progress page. """ + # Navigate to the peer-assessment problem and submit an essay + self.ui['lms.course_nav'].go_to_sequential('Peer-Assessed') + self.submit_essay('peer', 'Censorship in the Libraries') - # Check the assessment type and prompt - self.assertEqual(self.ui['lms.open_response'].assessment_type, expected_assessment_type) - self.assertIn(expected_prompt, self.ui['lms.open_response'].prompt) - - # Enter a response - essay = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Ut vehicula." - self.ui['lms.open_response'].set_response(essay) - - # Save the response and expect some UI feedback - self.ui['lms.open_response'].save_response() + # Expect UI feedback that the response was submitted self.assertEqual( - self.ui['lms.open_response'].alert_message, - "Answer saved, but not yet submitted." + self.ui['lms.open_response'].grader_status, + "Your response has been submitted. Please check back later for your grade." ) - # Submit the response - self.ui['lms.open_response'].submit_response() + # Refresh the page to get feedback from the stub XQueue grader. + # We receive feedback from all three peers, each of which + # provide 2 scores (one for each rubric item) + self.assertEqual(self.get_asynch_feedback('peer'), ['incorrect', 'correct'] * 3) + + # Verify the progress page + self.ui.visit('lms.progress', course_id=self.course_id) + scores = self.ui['lms.progress'].scores('Test Section', 'Test Subsection') + + # First score is the self-assessment score, which we haven't answered, so it's 0/2 + # Second score is the AI-assessment score, which we haven't answered, so it's 0/2 + # Third score is peer-assessment, which we have answered, so it's 2/2 + self.assertEqual(scores, [(0, 2), (0, 2), (2, 2)]) diff --git a/common/test/acceptance/tests/test_studio.py b/common/test/acceptance/tests/test_studio.py index 9149d894a6..1d233a7bb2 100644 --- a/common/test/acceptance/tests/test_studio.py +++ b/common/test/acceptance/tests/test_studio.py @@ -22,6 +22,8 @@ from ..edxapp_pages.studio.signup import SignupPage from ..edxapp_pages.studio.textbooks import TextbooksPage from ..fixtures.course import CourseFixture +from .helpers import UniqueCourseTest + class LoggedOutTest(WebAppTest): """ @@ -59,26 +61,13 @@ class LoggedInPagesTest(WebAppTest): self.ui.visit('studio.dashboard') -class CoursePagesTest(WebAppTest): +class CoursePagesTest(UniqueCourseTest): """ Tests that verify the pages in Studio that you can get to when logged in and have a course. """ - def setUp(self): - """ - Create a unique identifier for the course used in this test. - """ - # Define a unique course identifier - self.course_info = { - 'org': 'test_org', - 'number': '101', - 'run': 'test_' + self.unique_id, - 'display_name': 'Test Course ' + self.unique_id - } - - # Ensure that the superclass sets up - super(CoursePagesTest, self).setUp() + COURSE_ID_SEPARATOR = "." @property def page_object_classes(self): @@ -90,14 +79,13 @@ class CoursePagesTest(WebAppTest): @property def fixtures(self): - super_fixtures = super(CoursePagesTest, self).fixtures course_fix = CourseFixture( self.course_info['org'], self.course_info['number'], self.course_info['run'], self.course_info['display_name'] ) - return set(super_fixtures + [course_fix]) + return [course_fix] def test_page_existence(self): """ @@ -113,6 +101,6 @@ class CoursePagesTest(WebAppTest): # Log in self.ui.visit('studio.auto_auth', staff=True) - course_id = '{org}.{number}.{run}'.format(**self.course_info) + # Verify that each page is available for page in pages: - self.ui.visit('studio.{0}'.format(page), course_id=course_id) + self.ui.visit('studio.{0}'.format(page), course_id=self.course_id) diff --git a/lms/djangoapps/courseware/features/problems.py b/lms/djangoapps/courseware/features/problems.py index a6c6981eff..991f239f78 100644 --- a/lms/djangoapps/courseware/features/problems.py +++ b/lms/djangoapps/courseware/features/problems.py @@ -42,13 +42,15 @@ def view_problem(step, problem_type): def set_external_grader_response(step, correctness): assert(correctness in ['correct', 'incorrect']) - response_dict = {'correct': True if correctness == 'correct' else False, - 'score': 1 if correctness == 'correct' else 0, - 'msg': 'Your problem was graded %s' % correctness} + response_dict = { + 'correct': True if correctness == 'correct' else False, + 'score': 1 if correctness == 'correct' else 0, + 'msg': 'Your problem was graded {0}'.format(correctness) + } # Set the fake xqueue server to always respond # correct/incorrect when asked to grade a problem - world.xqueue.set_config('grade_response', response_dict) + world.xqueue.config['default'] = response_dict @step(u'I answer a "([^"]*)" problem "([^"]*)ly"') diff --git a/lms/djangoapps/courseware/features/video.py b/lms/djangoapps/courseware/features/video.py index c1c1437f34..65933bfc59 100644 --- a/lms/djangoapps/courseware/features/video.py +++ b/lms/djangoapps/courseware/features/video.py @@ -107,7 +107,7 @@ def add_video_to_course(course, player_mode, display_name='Video'): @step('youtube server is up and response time is (.*) seconds$') def set_youtube_response_timeout(_step, time): - world.youtube.set_config('time_to_response', float(time)) + world.youtube.config['time_to_response'] = float(time) @step('when I view the video it has rendered in (.*) mode$') diff --git a/lms/envs/bok_choy.auth.json b/lms/envs/bok_choy.auth.json index 858a6e154d..18c904d92a 100644 --- a/lms/envs/bok_choy.auth.json +++ b/lms/envs/bok_choy.auth.json @@ -107,7 +107,7 @@ "password": "password", "username": "lms" }, - "url": "http://localhost:18040" + "url": "** OVERRIDDEN **" }, "ZENDESK_API_KEY": "", "ZENDESK_USER": "" diff --git a/lms/envs/bok_choy.py b/lms/envs/bok_choy.py index cd4fac9aee..28a1d7d959 100644 --- a/lms/envs/bok_choy.py +++ b/lms/envs/bok_choy.py @@ -38,6 +38,9 @@ MONGO_MODULESTORE['OPTIONS']['fs_root'] = (TEST_ROOT / "data").abspath() XML_MODULESTORE = MODULESTORE['default']['OPTIONS']['stores']['xml'] XML_MODULESTORE['OPTIONS']['data_dir'] = (TEST_ROOT / "data").abspath() +# Configure the LMS to use our stub XQueue implementation +XQUEUE_INTERFACE['url'] = 'http://localhost:8040' + # Enable django-pipeline and staticfiles STATIC_ROOT = (TEST_ROOT / "staticfiles").abspath() PIPELINE = True diff --git a/rakelib/bok_choy.rake b/rakelib/bok_choy.rake index bd64e36173..4128fe9934 100644 --- a/rakelib/bok_choy.rake +++ b/rakelib/bok_choy.rake @@ -29,22 +29,40 @@ BOK_CHOY_SERVERS = { :cms => { :port => 8031, :log => File.join(BOK_CHOY_LOG_DIR, "bok_choy_studio.log") } } +BOK_CHOY_STUBS = { + :xqueue => { :port => 8040, :log => File.join(BOK_CHOY_LOG_DIR, "bok_choy_xqueue.log") } +} + +# For the time being, stubs are used by both the bok-choy and lettuce acceptance tests +# For this reason, the stubs package is currently located in the Django app called "terrain" +# where other lettuce configuration is stored. +BOK_CHOY_STUB_DIR = File.join(REPO_ROOT, "common", "djangoapps", "terrain") + BOK_CHOY_CACHE = Dalli::Client.new('localhost:11211') -# Start the server we will run tests on +# Start the servers we will run tests on def start_servers() BOK_CHOY_SERVERS.each do | service, info | address = "0.0.0.0:#{info[:port]}" cmd = "coverage run --rcfile=#{BOK_CHOY_COVERAGE_RC} -m manage #{service} --settings bok_choy runserver #{address} --traceback --noreload" singleton_process(cmd, logfile=info[:log]) end + + BOK_CHOY_STUBS.each do | service, info | + Dir.chdir(BOK_CHOY_STUB_DIR) do + singleton_process( + "python -m stubs.start #{service} #{info[:port]}", + logfile=info[:log] + ) + end + end end # Wait until we get a successful response from the servers or time out def wait_for_test_servers() - BOK_CHOY_SERVERS.each do | service, info | + BOK_CHOY_SERVERS.merge(BOK_CHOY_STUBS).each do | service, info | ready = wait_for_server("0.0.0.0", info[:port]) if not ready fail("Could not contact #{service} test server")