diff --git a/common/djangoapps/terrain/steps.py b/common/djangoapps/terrain/steps.py index 52eeb23c4a..371496f823 100644 --- a/common/djangoapps/terrain/steps.py +++ b/common/djangoapps/terrain/steps.py @@ -9,6 +9,7 @@ from bs4 import BeautifulSoup import time import re import os.path +from selenium.common.exceptions import WebDriverException from logging import getLogger logger = getLogger(__name__) @@ -214,3 +215,15 @@ def save_the_course_content(path='/tmp'): f = open('%s/%s' % (path, filename), 'w') f.write(output) f.close + +@world.absorb +def css_click(css_selector): + try: + world.browser.find_by_css(css_selector).click() + + except WebDriverException: + # Occassionally, MathJax or other JavaScript can cover up + # an element temporarily. + # If this happens, wait a second, then try again + time.sleep(1) + world.browser.find_by_css(css_selector).click() diff --git a/lms/djangoapps/courseware/features/problems.feature b/lms/djangoapps/courseware/features/problems.feature index a7fbac49c7..efeb338c45 100644 --- a/lms/djangoapps/courseware/features/problems.feature +++ b/lms/djangoapps/courseware/features/problems.feature @@ -1,10 +1,11 @@ -Feature: Answer choice problems +Feature: Answer problems As a student in an edX course In order to test my understanding of the material - I want to answer choice based problems + I want to answer problems Scenario: I can answer a problem correctly - Given I am viewing a "" problem + Given External graders respond "correct" + And I am viewing a "" problem When I answer a "" problem "correctly" Then My "" answer is marked "correct" @@ -17,9 +18,11 @@ Feature: Answer choice problems | numerical | | formula | | script | + | code | Scenario: I can answer a problem incorrectly - Given I am viewing a "" problem + Given External graders respond "incorrect" + And I am viewing a "" problem When I answer a "" problem "incorrectly" Then My "" answer is marked "incorrect" @@ -32,6 +35,7 @@ Feature: Answer choice problems | numerical | | formula | | script | + | code | Scenario: I can submit a blank answer Given I am viewing a "" problem diff --git a/lms/djangoapps/courseware/features/problems.py b/lms/djangoapps/courseware/features/problems.py index a6575c3d22..715e2689fb 100644 --- a/lms/djangoapps/courseware/features/problems.py +++ b/lms/djangoapps/courseware/features/problems.py @@ -1,14 +1,15 @@ from lettuce import world, step from lettuce.django import django_url -from selenium.webdriver.support.ui import Select import random import textwrap +import time from common import i_am_registered_for_the_course, TEST_SECTION_NAME, section_location from terrain.factories import ItemFactory from capa.tests.response_xml_factory import OptionResponseXMLFactory, \ ChoiceResponseXMLFactory, MultipleChoiceResponseXMLFactory, \ StringResponseXMLFactory, NumericalResponseXMLFactory, \ - FormulaResponseXMLFactory, CustomResponseXMLFactory + FormulaResponseXMLFactory, CustomResponseXMLFactory, \ + CodeResponseXMLFactory # Factories from capa.tests.response_xml_factory that we will use # to generate the problem XML, with the keyword args used to configure @@ -78,6 +79,12 @@ PROBLEM_FACTORY_DICT = { a2=0 return (a1+a2)==int(expect) """)}}, + 'code': { + 'factory': CodeResponseXMLFactory(), + 'kwargs': { + 'question_text': 'Submit code to an external grader', + 'initial_display': 'print "Hello world!"', + 'grader_payload': '{"grader": "ps1/Spring2013/test_grader.py"}', }}, } @@ -116,6 +123,19 @@ def view_problem(step, problem_type): world.browser.visit(url) +@step(u'External graders respond "([^"]*)"') +def set_external_grader_response(step, correctness): + assert(correctness in ['correct', 'incorrect']) + + response_dict = {'correct': True if correctness == 'correct' else False, + 'score': 1 if correctness == 'correct' else 0, + 'msg': 'Your problem was graded %s' % correctness} + + # Set the fake xqueue server to always respond + # correct/incorrect when asked to grade a problem + world.xqueue_server.set_grade_response(response_dict) + + @step(u'I answer a "([^"]*)" problem "([^"]*)ly"') def answer_problem(step, problem_type, correctness): """ Mark a given problem type correct or incorrect, then submit it. @@ -169,18 +189,29 @@ def answer_problem(step, problem_type, correctness): inputfield('script', input_num=1).fill(str(first_addend)) inputfield('script', input_num=2).fill(str(second_addend)) + elif problem_type == 'code': + # The fake xqueue server is configured to respond + # correct / incorrect no matter what we submit. + # Furthermore, since the inline code response uses + # JavaScript to make the code display nicely, it's difficult + # to programatically input text + # (there's not