diff --git a/common/djangoapps/terrain/mock_xqueue_server.py b/common/djangoapps/terrain/mock_xqueue_server.py
index 50d77a2f19..49fee5167a 100644
--- a/common/djangoapps/terrain/mock_xqueue_server.py
+++ b/common/djangoapps/terrain/mock_xqueue_server.py
@@ -2,7 +2,10 @@ from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
import json
import urllib
import urlparse
+import time
+from logging import getLogger
+logger = getLogger(__name__)
class MockXQueueRequestHandler(BaseHTTPRequestHandler):
'''
@@ -16,11 +19,10 @@ class MockXQueueRequestHandler(BaseHTTPRequestHandler):
def do_POST(self):
'''
- Handle a POST request from the client, interpreted
- as either a login request or a submission for grading request.
+ Handle a POST request from the client
Sends back an immediate success/failure response.
- If grading is required, it then POSTS back to the client
+ It then POSTS back to the client
with grading results, as configured in MockXQueueServer.
'''
self._send_head()
@@ -28,21 +30,60 @@ class MockXQueueRequestHandler(BaseHTTPRequestHandler):
# Retrieve the POST data
post_dict = self._post_dict()
- # Send a response indicating success/failure
- success = self._send_immediate_response(post_dict)
+ # Log the request
+ logger.debug("XQueue received POST request %s to path %s" %
+ (str(post_dict), self.path))
+
+ # Respond only to grading requests
+ if self._is_grade_request():
+ try:
+ xqueue_header = json.loads(post_dict['xqueue_header'])
+ xqueue_body = json.loads(post_dict['xqueue_body'])
+
+ callback_url = xqueue_header['lms_callback_url']
+
+ except KeyError:
+ # If the message doesn't have a header or body,
+ # then it's malformed.
+ # Respond with failure
+ error_msg = "XQueue received invalid grade request"
+ self._send_immediate_response(False, message=error_msg)
+
+ except ValueError:
+ # If we could not decode the body or header,
+ # respond with failure
+
+ error_msg = "XQueue could not decode grade request"
+ self._send_immediate_response(False, message=error_msg)
+
+ else:
+ # Send an immediate response of success
+ # The grade request is formed correctly
+ self._send_immediate_response(True)
+
+ # Wait a bit before POSTing back to the callback url with the
+ # grade result configured by the server
+ # Otherwise, the problem will not realize it's
+ # queued and it will keep waiting for a response
+ # indefinitely
+ delayed_grade_func = lambda: self._send_grade_response(callback_url,
+ xqueue_header)
+
+ timer = threading.Timer(2, delayed_grade_func)
+ timer.start()
+
+ # If we get a request that's not to the grading submission
+ # URL, return an error
+ else:
+ error_message = "Invalid request URL"
+ self._send_immediate_response(False, message=error_message)
- # If the client submitted a valid submission request,
- # we need to post back to the callback url
- # with the grading result
- if success and self._is_grade_request():
- self._send_grade_response(post_dict['lms_callback_url'],
- post_dict['lms_key'])
def _send_head(self):
'''
Send the response code and MIME headers
'''
- if self._is_login_request() or self._is_grade_request():
+ if self._is_grade_request():
self.send_response(200)
else:
self.send_response(500)
@@ -78,47 +119,34 @@ class MockXQueueRequestHandler(BaseHTTPRequestHandler):
return post_dict
- def _send_immediate_response(self, post_dict):
+ def _send_immediate_response(self, success, message=""):
'''
- Check the post_dict for the appropriate fields
- for this request (login or grade submission)
- If it finds them, inform the client of success.
- Otherwise, inform the client of failure
+ Send an immediate success/failure message
+ back to the client
'''
- # Allow any user to log in, as long as the POST
- # dict has a username and password
- if self._is_login_request():
- success = 'username' in post_dict and 'password' in post_dict
-
- elif self._is_grade_request():
- success = ('lms_callback_url' in post_dict and
- 'lms_key' in post_dict and
- 'queue_name' in post_dict)
- else:
- success = False
-
# Send the response indicating success/failure
response_str = json.dumps({'return_code': 0 if success else 1,
- 'content': '' if success else 'Error'})
+ 'content': message})
+
+ # Log the response
+ logger.debug("XQueue: sent response %s" % response_str)
self.wfile.write(response_str)
- return success
-
- def _send_grade_response(self, postback_url, queuekey):
+ def _send_grade_response(self, postback_url, xqueue_header):
'''
POST the grade response back to the client
using the response provided by the server configuration
'''
- response_dict = {'queuekey': queuekey,
- 'xqueue_body': self.server.grade_response}
+ response_dict = {'xqueue_header': json.dumps(xqueue_header),
+ 'xqueue_body': json.dumps(self.server.grade_response())}
+
+ # Log the response
+ logger.debug("XQueue: sent grading response %s" % str(response_dict))
MockXQueueRequestHandler.post_to_url(postback_url, response_dict)
- def _is_login_request(self):
- return 'xqueue/login' in self.path
-
def _is_grade_request(self):
return 'xqueue/submit' in self.path
@@ -138,7 +166,8 @@ class MockXQueueServer(HTTPServer):
to POST requests to localhost.
'''
- def __init__(self, port_num, grade_response_dict):
+ def __init__(self, port_num,
+ grade_response_dict={'correct':True, 'score': 1, 'msg': ''}):
'''
Initialize the mock XQueue server instance.
@@ -148,18 +177,36 @@ class MockXQueueServer(HTTPServer):
and sent in response to XQueue grading requests.
'''
- self.grade_response = grade_response_dict
+ self.set_grade_response(grade_response_dict)
handler = MockXQueueRequestHandler
address = ('', port_num)
HTTPServer.__init__(self, address, handler)
- @property
+ def shutdown(self):
+ '''
+ Stop the server and free up the port
+ '''
+ # First call superclass shutdown()
+ HTTPServer.shutdown(self)
+
+ # We also need to manually close the socket
+ self.socket.close()
+
def grade_response(self):
return self._grade_response
- @grade_response.setter
- def grade_response(self, grade_response_dict):
+ def set_grade_response(self, grade_response_dict):
+
+ # Check that the grade response has the right keys
+ assert('correct' in grade_response_dict and
+ 'score' in grade_response_dict and
+ 'msg' in grade_response_dict)
+
+ # Wrap the message in
tags to ensure that it is valid XML
+ grade_response_dict['msg'] = "
%s
" % grade_response_dict['msg']
+
+ # Save the response dictionary
self._grade_response = grade_response_dict
@@ -190,16 +237,6 @@ class MockXQueueServerTest(unittest.TestCase):
# Stop the server, freeing up the port
self.server.shutdown()
- self.server.socket.close()
-
- def test_login_request(self):
-
- # Send a login request
- login_request = {'username': 'Test', 'password': 'Test'}
- response_handle = urllib.urlopen(self.server_url + '/xqueue/login',
- urllib.urlencode(login_request))
- response_dict = json.loads(response_handle.read())
- self.assertEqual(response_dict['return_code'], 0)
def test_grade_request(self):
@@ -209,19 +246,33 @@ class MockXQueueServerTest(unittest.TestCase):
# Send a grade request
callback_url = 'http://127.0.0.1:8000/test_callback'
- grade_request = {'lms_callback_url': callback_url,
- 'lms_key': 'test_queuekey',
- 'queue_name': 'test_queue'}
+
+ grade_header = json.dumps({'lms_callback_url': callback_url,
+ 'lms_key': 'test_queuekey',
+ 'queue_name': 'test_queue'})
+
+ grade_body = json.dumps({'student_info': 'test',
+ 'grader_payload': 'test',
+ 'student_response': 'test'})
+
+ grade_request = {'xqueue_header': grade_header,
+ 'xqueue_body': grade_body}
+
response_handle = urllib.urlopen(self.server_url + '/xqueue/submit',
urllib.urlencode(grade_request))
+
response_dict = json.loads(response_handle.read())
# Expect that the response is success
self.assertEqual(response_dict['return_code'], 0)
+ # Wait a bit before checking that the server posted back
+ time.sleep(3)
+
# Expect that the server tries to post back the grading info
- expected_callback_dict = {'queuekey': 'test_queuekey',
- 'xqueue_body': {'correct': True,
- 'score': 1, 'msg': ''}}
+ xqueue_body = json.dumps({'correct': True, 'score': 1,
+ 'msg': '
'})
+ expected_callback_dict = {'xqueue_header': grade_header,
+ 'xqueue_body': xqueue_body }
MockXQueueRequestHandler.post_to_url.assert_called_with(callback_url,
expected_callback_dict)
diff --git a/common/djangoapps/terrain/steps.py b/common/djangoapps/terrain/steps.py
index 52eeb23c4a..b58c9fa7f2 100644
--- a/common/djangoapps/terrain/steps.py
+++ b/common/djangoapps/terrain/steps.py
@@ -10,6 +10,8 @@ import time
import re
import os.path
+from .xqueue_setup import *
+
from logging import getLogger
logger = getLogger(__name__)
diff --git a/common/djangoapps/terrain/xqueue_setup.py b/common/djangoapps/terrain/xqueue_setup.py
new file mode 100644
index 0000000000..003e4ebfa3
--- /dev/null
+++ b/common/djangoapps/terrain/xqueue_setup.py
@@ -0,0 +1,31 @@
+from mock_xqueue_server import MockXQueueServer
+from lettuce import before, after, world
+from django.conf import settings
+import threading
+
+@before.all
+def setup_mock_xqueue_server():
+
+ # Retrieve the local port from settings
+ server_port = settings.XQUEUE_PORT
+
+ # Create the mock server instance
+ server = MockXQueueServer(server_port)
+
+ # Start the server running in a separate daemon thread
+ # Because the thread is a daemon, it will terminate
+ # when the main thread terminates.
+ server_thread = threading.Thread(target=server.serve_forever)
+ server_thread.daemon = True
+ server_thread.start()
+
+ # Store the server instance in lettuce's world
+ # so that other steps can access it
+ # (and we can shut it down later)
+ world.xqueue_server = server
+
+@after.all
+def teardown_mock_xqueue_server(total):
+
+ # Stop the xqueue server and free up the port
+ world.xqueue_server.shutdown()
diff --git a/lms/djangoapps/courseware/features/problems.feature b/lms/djangoapps/courseware/features/problems.feature
index a7fbac49c7..8ae03efb92 100644
--- a/lms/djangoapps/courseware/features/problems.feature
+++ b/lms/djangoapps/courseware/features/problems.feature
@@ -1,10 +1,11 @@
-Feature: Answer choice problems
+Feature: Answer problems
As a student in an edX course
In order to test my understanding of the material
- I want to answer choice based problems
+ I want to answer problems
Scenario: I can answer a problem correctly
- Given I am viewing a "
" problem
+ Given External graders respond "correct"
+ And I am viewing a "" problem
When I answer a "" problem "correctly"
Then My "" answer is marked "correct"
@@ -17,9 +18,11 @@ Feature: Answer choice problems
| numerical |
| formula |
| script |
+ | code |
Scenario: I can answer a problem incorrectly
- Given I am viewing a "" problem
+ Given External graders respond "incorrect"
+ And I am viewing a "" problem
When I answer a "" problem "incorrectly"
Then My "" answer is marked "incorrect"
@@ -32,6 +35,7 @@ Feature: Answer choice problems
| numerical |
| formula |
| script |
+ | code |
Scenario: I can submit a blank answer
Given I am viewing a "" problem
diff --git a/lms/djangoapps/courseware/features/problems.feature.bak b/lms/djangoapps/courseware/features/problems.feature.bak
new file mode 100644
index 0000000000..1bb6eb087b
--- /dev/null
+++ b/lms/djangoapps/courseware/features/problems.feature.bak
@@ -0,0 +1,86 @@
+Feature: Answer problems
+ As a student in an edX course
+ In order to test my understanding of the material
+ I want to answer problems
+
+ Scenario: I can answer a problem correctly
+ Given I am viewing a "" problem
+ When I answer a "" problem "correctly"
+ Then My "" answer is marked "correct"
+
+ Examples:
+ | ProblemType |
+ | drop down |
+ | multiple choice |
+ | checkbox |
+ | string |
+ | numerical |
+ | formula |
+ | script |
+
+ Scenario: I can answer a problem incorrectly
+ Given I am viewing a "" problem
+ When I answer a "" problem "incorrectly"
+ Then My "" answer is marked "incorrect"
+
+ Examples:
+ | ProblemType |
+ | drop down |
+ | multiple choice |
+ | checkbox |
+ | string |
+ | numerical |
+ | formula |
+ | script |
+
+ Scenario: I can submit a blank answer
+ Given I am viewing a "" problem
+ When I check a problem
+ Then My "" answer is marked "incorrect"
+
+ Examples:
+ | ProblemType |
+ | drop down |
+ | multiple choice |
+ | checkbox |
+ | string |
+ | numerical |
+ | formula |
+ | script |
+
+
+ Scenario: I can reset a problem
+ Given I am viewing a "" problem
+ And I answer a "" problem "ly"
+ When I reset the problem
+ Then My "" answer is marked "unanswered"
+
+ Examples:
+ | ProblemType | Correctness |
+ | drop down | correct |
+ | drop down | incorrect |
+ | multiple choice | correct |
+ | multiple choice | incorrect |
+ | checkbox | correct |
+ | checkbox | incorrect |
+ | string | correct |
+ | string | incorrect |
+ | numerical | correct |
+ | numerical | incorrect |
+ | formula | correct |
+ | formula | incorrect |
+ | script | correct |
+ | script | incorrect |
+
+
+ Scenario: I can answer a code-based problem
+ Given I am viewing a "code" problem
+ And External graders respond "" with message "Test Message"
+ When I answer a "code" problem ""
+ Then My "code" answer is marked ""
+ And I should see "Test Message" somewhere in the page
+
+ Examples:
+ | Correctness |
+ | correct |
+ | incorrect |
diff --git a/lms/djangoapps/courseware/features/problems.py b/lms/djangoapps/courseware/features/problems.py
index a6575c3d22..96c2f446d3 100644
--- a/lms/djangoapps/courseware/features/problems.py
+++ b/lms/djangoapps/courseware/features/problems.py
@@ -1,14 +1,17 @@
from lettuce import world, step
from lettuce.django import django_url
from selenium.webdriver.support.ui import Select
+from selenium.common.exceptions import WebDriverException
import random
import textwrap
+import time
from common import i_am_registered_for_the_course, TEST_SECTION_NAME, section_location
from terrain.factories import ItemFactory
from capa.tests.response_xml_factory import OptionResponseXMLFactory, \
ChoiceResponseXMLFactory, MultipleChoiceResponseXMLFactory, \
StringResponseXMLFactory, NumericalResponseXMLFactory, \
- FormulaResponseXMLFactory, CustomResponseXMLFactory
+ FormulaResponseXMLFactory, CustomResponseXMLFactory, \
+ CodeResponseXMLFactory
# Factories from capa.tests.response_xml_factory that we will use
# to generate the problem XML, with the keyword args used to configure
@@ -77,7 +80,13 @@ PROBLEM_FACTORY_DICT = {
a1=0
a2=0
return (a1+a2)==int(expect)
- """)}},
+ """) }},
+ 'code': {
+ 'factory': CodeResponseXMLFactory(),
+ 'kwargs': {
+ 'question_text': 'Submit code to an external grader',
+ 'initial_display': 'print "Hello world!"',
+ 'grader_payload': '{"grader": "ps1/Spring2013/test_grader.py"}', }},
}
@@ -115,6 +124,18 @@ def view_problem(step, problem_type):
world.browser.visit(url)
+@step(u'External graders respond "([^"]*)"')
+def set_external_grader_response(step, correctness):
+ assert(correctness in ['correct', 'incorrect'])
+
+ response_dict = {'correct': True if correctness == 'correct' else False,
+ 'score': 1 if correctness == 'correct' else 0,
+ 'msg': 'Your problem was graded %s' % correctness}
+
+ # Set the fake xqueue server to always respond
+ # correct/incorrect when asked to grade a problem
+ world.xqueue_server.set_grade_response(response_dict)
+
@step(u'I answer a "([^"]*)" problem "([^"]*)ly"')
def answer_problem(step, problem_type, correctness):
@@ -169,13 +190,32 @@ def answer_problem(step, problem_type, correctness):
inputfield('script', input_num=1).fill(str(first_addend))
inputfield('script', input_num=2).fill(str(second_addend))
+ elif problem_type == 'code':
+ # The fake xqueue server is configured to respond
+ # correct / incorrect no matter what we submit.
+ # Furthermore, since the inline code response uses
+ # JavaScript to make the code display nicely, it's difficult
+ # to programatically input text
+ # (there's not