diff --git a/.gitignore b/.gitignore index 2fd1ca0181..8fb170c30f 100644 --- a/.gitignore +++ b/.gitignore @@ -27,4 +27,7 @@ lms/lib/comment_client/python nosetests.xml cover_html/ .idea/ -chromedriver.log \ No newline at end of file +.redcar/ +chromedriver.log +/nbproject +ghostdriver.log diff --git a/.pylintrc b/.pylintrc index ce2f2e3b87..6690bb7df0 100644 --- a/.pylintrc +++ b/.pylintrc @@ -12,7 +12,7 @@ profile=no # Add files or directories to the blacklist. They should be base names, not # paths. -ignore=CVS +ignore=CVS, migrations # Pickle collected data for later comparisons. persistent=yes @@ -33,7 +33,15 @@ load-plugins= # can either give multiple identifier separated by comma (,) or put this option # multiple time (only on the command line, not in the configuration file where # it should appear only once). -disable=E1102,W0142 +disable= +# W0141: Used builtin function 'map' +# W0142: Used * or ** magic +# R0201: Method could be a function +# R0901: Too many ancestors +# R0902: Too many instance attributes +# R0903: Too few public methods (1/2) +# R0904: Too many public methods + W0141,W0142,R0201,R0901,R0902,R0903,R0904 [REPORTS] @@ -43,7 +51,7 @@ disable=E1102,W0142 output-format=text # Include message's id in output -include-ids=no +include-ids=yes # Put messages in a separate file for each module / package specified on the # command line instead of printing them on stdout. Reports (if any) will be @@ -97,7 +105,7 @@ bad-functions=map,filter,apply,input module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ # Regular expression which should only match correct module level names -const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ +const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__)|log|urlpatterns)$ # Regular expression which should only match correct class names class-rgx=[A-Z_][a-zA-Z0-9]+$ @@ -106,7 +114,7 @@ class-rgx=[A-Z_][a-zA-Z0-9]+$ function-rgx=[a-z_][a-z0-9_]{2,30}$ # Regular expression which should only match correct method names -method-rgx=[a-z_][a-z0-9_]{2,30}$ +method-rgx=([a-z_][a-z0-9_]{2,60}|setUp|set[Uu]pClass|tearDown|tear[Dd]ownClass|assert[A-Z]\w*)$ # Regular expression which should only match correct instance attribute names attr-rgx=[a-z_][a-z0-9_]{2,30}$ diff --git a/.ruby-version b/.ruby-version index dd472cffa2..311baaf3e2 100644 --- a/.ruby-version +++ b/.ruby-version @@ -1 +1 @@ -1.8.7-p371 \ No newline at end of file +1.9.3-p374 diff --git a/Gemfile b/Gemfile index 43a9f6e2b1..7f7b146978 100644 --- a/Gemfile +++ b/Gemfile @@ -1,4 +1,4 @@ -source :rubygems +source 'https://rubygems.org' gem 'rake', '~> 10.0.3' gem 'sass', '3.1.15' gem 'bourbon', '~> 1.3.6' diff --git a/cms/.coveragerc b/cms/.coveragerc index dbc6203c87..4f0dbebe79 100644 --- a/cms/.coveragerc +++ b/cms/.coveragerc @@ -2,7 +2,7 @@ [run] data_file = reports/cms/.coverage source = cms,common/djangoapps -omit = cms/envs/*, cms/manage.py, common/djangoapps/*/migrations/* +omit = cms/envs/*, cms/manage.py, common/djangoapps/terrain/*, common/djangoapps/*/migrations/* [report] ignore_errors = True diff --git a/cms/djangoapps/contentstore/__init__.py b/cms/djangoapps/contentstore/__init__.py index e8dccbbf60..8b13789179 100644 --- a/cms/djangoapps/contentstore/__init__.py +++ b/cms/djangoapps/contentstore/__init__.py @@ -1,3 +1 @@ -from xmodule.templates import update_templates -update_templates() diff --git a/cms/djangoapps/contentstore/course_info_model.py b/cms/djangoapps/contentstore/course_info_model.py index 153d13dd13..8c8aed549d 100644 --- a/cms/djangoapps/contentstore/course_info_model.py +++ b/cms/djangoapps/contentstore/course_info_model.py @@ -1,7 +1,7 @@ from xmodule.modulestore.exceptions import ItemNotFoundError from xmodule.modulestore import Location from xmodule.modulestore.django import modulestore -from lxml import html +from lxml import html, etree import re from django.http import HttpResponseBadRequest import logging @@ -26,9 +26,9 @@ def get_course_updates(location): # purely to handle free formed updates not done via editor. Actually kills them, but at least doesn't break. try: - course_html_parsed = html.fromstring(course_updates.definition['data']) - except: - course_html_parsed = html.fromstring("
    ") + course_html_parsed = etree.fromstring(course_updates.data) + except etree.XMLSyntaxError: + course_html_parsed = etree.fromstring("
      ") # Confirm that root is
        , iterate over
      1. , pull out

        subs and then rest of val course_upd_collection = [] @@ -60,13 +60,13 @@ def update_course_updates(location, update, passed_id=None): try: course_updates = modulestore('direct').get_item(location) except ItemNotFoundError: - return HttpResponseBadRequest + return HttpResponseBadRequest() # purely to handle free formed updates not done via editor. Actually kills them, but at least doesn't break. try: - course_html_parsed = html.fromstring(course_updates.definition['data']) - except: - course_html_parsed = html.fromstring("
          ") + course_html_parsed = etree.fromstring(course_updates.data) + except etree.XMLSyntaxError: + course_html_parsed = etree.fromstring("
            ") # No try/catch b/c failure generates an error back to client new_html_parsed = html.fromstring('
          1. ' + update['date'] + '

            ' + update['content'] + '
          2. ') @@ -85,13 +85,12 @@ def update_course_updates(location, update, passed_id=None): passed_id = course_updates.location.url() + "/" + str(idx) # update db record - course_updates.definition['data'] = html.tostring(course_html_parsed) - modulestore('direct').update_item(location, course_updates.definition['data']) - - return {"id": passed_id, - "date": update['date'], - "content": update['content']} + course_updates.data = etree.tostring(course_html_parsed) + modulestore('direct').update_item(location, course_updates.data) + return {"id" : passed_id, + "date" : update['date'], + "content" :update['content']} def delete_course_update(location, update, passed_id): """ @@ -99,19 +98,19 @@ def delete_course_update(location, update, passed_id): Returns the resulting course_updates b/c their ids change. """ if not passed_id: - return HttpResponseBadRequest + return HttpResponseBadRequest() try: course_updates = modulestore('direct').get_item(location) except ItemNotFoundError: - return HttpResponseBadRequest + return HttpResponseBadRequest() # TODO use delete_blank_text parser throughout and cache as a static var in a class # purely to handle free formed updates not done via editor. Actually kills them, but at least doesn't break. try: - course_html_parsed = html.fromstring(course_updates.definition['data']) - except: - course_html_parsed = html.fromstring("
              ") + course_html_parsed = etree.fromstring(course_updates.data) + except etree.XMLSyntaxError: + course_html_parsed = etree.fromstring("
                ") if course_html_parsed.tag == 'ol': # ??? Should this use the id in the json or in the url or does it matter? @@ -122,9 +121,9 @@ def delete_course_update(location, update, passed_id): course_html_parsed.remove(element_to_delete) # update db record - course_updates.definition['data'] = html.tostring(course_html_parsed) + course_updates.data = etree.tostring(course_html_parsed) store = modulestore('direct') - store.update_item(location, course_updates.definition['data']) + store.update_item(location, course_updates.data) return get_course_updates(location) diff --git a/cms/djangoapps/contentstore/features/advanced-settings.feature b/cms/djangoapps/contentstore/features/advanced-settings.feature new file mode 100644 index 0000000000..af97709ad0 --- /dev/null +++ b/cms/djangoapps/contentstore/features/advanced-settings.feature @@ -0,0 +1,42 @@ +Feature: Advanced (manual) course policy + In order to specify course policy settings for which no custom user interface exists + I want to be able to manually enter JSON key/value pairs + + Scenario: A course author sees default advanced settings + Given I have opened a new course in Studio + When I select the Advanced Settings + Then I see default advanced settings + + Scenario: Add new entries, and they appear alphabetically after save + Given I am on the Advanced Course Settings page in Studio + Then the settings are alphabetized + + Scenario: Test cancel editing key value + Given I am on the Advanced Course Settings page in Studio + When I edit the value of a policy key + And I press the "Cancel" notification button + Then the policy key value is unchanged + And I reload the page + Then the policy key value is unchanged + + Scenario: Test editing key value + Given I am on the Advanced Course Settings page in Studio + When I edit the value of a policy key + And I press the "Save" notification button + Then the policy key value is changed + And I reload the page + Then the policy key value is changed + + Scenario: Test how multi-line input appears + Given I am on the Advanced Course Settings page in Studio + When I create a JSON object as a value + Then it is displayed as formatted + And I reload the page + Then it is displayed as formatted + + Scenario: Test automatic quoting of non-JSON values + Given I am on the Advanced Course Settings page in Studio + When I create a non-JSON value not in quotes + Then it is displayed as a string + And I reload the page + Then it is displayed as a string diff --git a/cms/djangoapps/contentstore/features/advanced-settings.py b/cms/djangoapps/contentstore/features/advanced-settings.py new file mode 100644 index 0000000000..7e86e94a31 --- /dev/null +++ b/cms/djangoapps/contentstore/features/advanced-settings.py @@ -0,0 +1,146 @@ +from lettuce import world, step +from common import * +import time +from terrain.steps import reload_the_page +from selenium.common.exceptions import WebDriverException +from selenium.webdriver.support import expected_conditions as EC + +from nose.tools import assert_true, assert_false, assert_equal + +""" +http://selenium.googlecode.com/svn/trunk/docs/api/py/webdriver/selenium.webdriver.common.keys.html +""" +from selenium.webdriver.common.keys import Keys + +KEY_CSS = '.key input.policy-key' +VALUE_CSS = 'textarea.json' +DISPLAY_NAME_KEY = "display_name" +DISPLAY_NAME_VALUE = '"Robot Super Course"' + +############### ACTIONS #################### +@step('I select the Advanced Settings$') +def i_select_advanced_settings(step): + expand_icon_css = 'li.nav-course-settings i.icon-expand' + if world.browser.is_element_present_by_css(expand_icon_css): + css_click(expand_icon_css) + link_css = 'li.nav-course-settings-advanced a' + css_click(link_css) + + +@step('I am on the Advanced Course Settings page in Studio$') +def i_am_on_advanced_course_settings(step): + step.given('I have opened a new course in Studio') + step.given('I select the Advanced Settings') + + +@step(u'I press the "([^"]*)" notification button$') +def press_the_notification_button(step, name): + def is_visible(driver): + return EC.visibility_of_element_located((By.CSS_SELECTOR, css,)) + + # def is_invisible(driver): + # return EC.invisibility_of_element_located((By.CSS_SELECTOR,css,)) + + css = 'a.%s-button' % name.lower() + wait_for(is_visible) + time.sleep(float(1)) + css_click_at(css) + +# is_invisible is not returning a boolean, not working +# try: +# css_click_at(css) +# wait_for(is_invisible) +# except WebDriverException, e: +# css_click_at(css) +# wait_for(is_invisible) + + +@step(u'I edit the value of a policy key$') +def edit_the_value_of_a_policy_key(step): + """ + It is hard to figure out how to get into the CodeMirror + area, so cheat and do it from the policy key field :) + """ + e = css_find(KEY_CSS)[get_index_of(DISPLAY_NAME_KEY)] + e._element.send_keys(Keys.TAB, Keys.END, Keys.ARROW_LEFT, ' ', 'X') + + +@step('I create a JSON object as a value$') +def create_JSON_object(step): + change_display_name_value(step, '{"key": "value", "key_2": "value_2"}') + + +@step('I create a non-JSON value not in quotes$') +def create_value_not_in_quotes(step): + change_display_name_value(step, 'quote me') + + +############### RESULTS #################### +@step('I see default advanced settings$') +def i_see_default_advanced_settings(step): + # Test only a few of the existing properties (there are around 34 of them) + assert_policy_entries( + ["advanced_modules", DISPLAY_NAME_KEY, "show_calculator"], ["[]", DISPLAY_NAME_VALUE, "false"]) + + +@step('the settings are alphabetized$') +def they_are_alphabetized(step): + key_elements = css_find(KEY_CSS) + all_keys = [] + for key in key_elements: + all_keys.append(key.value) + + assert_equal(sorted(all_keys), all_keys, "policy keys were not sorted") + + +@step('it is displayed as formatted$') +def it_is_formatted(step): + assert_policy_entries([DISPLAY_NAME_KEY], ['{\n "key": "value",\n "key_2": "value_2"\n}']) + + +@step('it is displayed as a string') +def it_is_formatted(step): + assert_policy_entries([DISPLAY_NAME_KEY], ['"quote me"']) + + +@step(u'the policy key value is unchanged$') +def the_policy_key_value_is_unchanged(step): + assert_equal(get_display_name_value(), DISPLAY_NAME_VALUE) + + +@step(u'the policy key value is changed$') +def the_policy_key_value_is_changed(step): + assert_equal(get_display_name_value(), '"Robot Super Course X"') + + +############# HELPERS ############### +def assert_policy_entries(expected_keys, expected_values): + for counter in range(len(expected_keys)): + index = get_index_of(expected_keys[counter]) + assert_false(index == -1, "Could not find key: " + expected_keys[counter]) + assert_equal(expected_values[counter], css_find(VALUE_CSS)[index].value, "value is incorrect") + + +def get_index_of(expected_key): + for counter in range(len(css_find(KEY_CSS))): + # Sometimes get stale reference if I hold on to the array of elements + key = css_find(KEY_CSS)[counter].value + if key == expected_key: + return counter + + return -1 + + +def get_display_name_value(): + index = get_index_of(DISPLAY_NAME_KEY) + return css_find(VALUE_CSS)[index].value + + +def change_display_name_value(step, new_value): + e = css_find(KEY_CSS)[get_index_of(DISPLAY_NAME_KEY)] + display_name = get_display_name_value() + for count in range(len(display_name)): + e._element.send_keys(Keys.TAB, Keys.END, Keys.BACK_SPACE) + # Must delete "" before typing the JSON value + e._element.send_keys(Keys.TAB, Keys.END, Keys.BACK_SPACE, Keys.BACK_SPACE, new_value) + press_the_notification_button(step, "Save") \ No newline at end of file diff --git a/cms/djangoapps/contentstore/features/common.py b/cms/djangoapps/contentstore/features/common.py index d3364fcc3c..2ec0427e1d 100644 --- a/cms/djangoapps/contentstore/features/common.py +++ b/cms/djangoapps/contentstore/features/common.py @@ -1,19 +1,22 @@ from lettuce import world, step -from factories import * -from django.core.management import call_command from lettuce.django import django_url -from django.conf import settings -from django.core.management import call_command from nose.tools import assert_true from nose.tools import assert_equal -import xmodule.modulestore.django +from selenium.webdriver.support.ui import WebDriverWait +from selenium.common.exceptions import WebDriverException, StaleElementReferenceException +from selenium.webdriver.support import expected_conditions as EC +from selenium.webdriver.common.by import By + +from terrain.factories import UserFactory, RegistrationFactory, UserProfileFactory +from terrain.factories import CourseFactory, GroupFactory +from xmodule.modulestore.django import _MODULESTORES, modulestore +from xmodule.templates import update_templates +from auth.authz import get_user_by_email from logging import getLogger logger = getLogger(__name__) ########### STEP HELPERS ############## - - @step('I (?:visit|access|open) the Studio homepage$') def i_visit_the_studio_homepage(step): # To make this go to port 8001, put @@ -44,9 +47,15 @@ def i_press_the_category_delete_icon(step, category): assert False, 'Invalid category: %s' % category css_click(css) + +@step('I have opened a new course in Studio$') +def i_have_opened_a_new_course(step): + clear_courses() + log_into_studio() + create_a_course() + + ####### HELPER FUNCTIONS ############## - - def create_studio_user( uname='robot', email='robot+studio@edx.org', @@ -75,9 +84,9 @@ def flush_xmodule_store(): # (though it shouldn't), do this manually # from the bash shell to drop it: # $ mongo test_xmodule --eval "db.dropDatabase()" - xmodule.modulestore.django._MODULESTORES = {} - xmodule.modulestore.django.modulestore().collection.drop() - xmodule.templates.update_templates() + _MODULESTORES = {} + modulestore().collection.drop() + update_templates() def assert_css_with_text(css, text): @@ -86,13 +95,50 @@ def assert_css_with_text(css, text): def css_click(css): - world.browser.find_by_css(css).first.click() + ''' + First try to use the regular click method, + but if clicking in the middle of an element + doesn't work it might be that it thinks some other + element is on top of it there so click in the upper left + ''' + try: + css_find(css).first.click() + except WebDriverException, e: + css_click_at(css) + + +def css_click_at(css, x=10, y=10): + ''' + A method to click at x,y coordinates of the element + rather than in the center of the element + ''' + e = css_find(css).first + e.action_chains.move_to_element_with_offset(e._element, x, y) + e.action_chains.click() + e.action_chains.perform() def css_fill(css, value): world.browser.find_by_css(css).first.fill(value) +def css_find(css): + def is_visible(driver): + return EC.visibility_of_element_located((By.CSS_SELECTOR,css,)) + + world.browser.is_element_present_by_css(css, 5) + wait_for(is_visible) + return world.browser.find_by_css(css) + + +def wait_for(func): + WebDriverWait(world.browser.driver, 5).until(func) + + +def id_find(id): + return world.browser.find_by_id(id) + + def clear_courses(): flush_xmodule_store() @@ -129,9 +175,18 @@ def log_into_studio( def create_a_course(): - css_click('a.new-course-button') - fill_in_course_info() - css_click('input.new-course-save') + c = CourseFactory.create(org='MITx', course='999', display_name='Robot Super Course') + + # Add the user to the instructor group of the course + # so they will have the permissions to see it in studio + g = GroupFactory.create(name='instructor_MITx/999/Robot_Super_Course') + u = get_user_by_email('robot+studio@edx.org') + u.groups.add(g) + u.save() + world.browser.reload() + + course_link_css = 'span.class-name' + css_click(course_link_css) course_title_css = 'span.course-title' assert_true(world.browser.is_element_present_by_css(course_title_css, 5)) @@ -146,6 +201,7 @@ def add_section(name='My Section'): span_css = 'span.section-name-span' assert_true(world.browser.is_element_present_by_css(span_css, 5)) + def add_subsection(name='Subsection One'): css = 'a.new-subsection-item' css_click(css) diff --git a/cms/djangoapps/contentstore/features/courses.py b/cms/djangoapps/contentstore/features/courses.py index db8e20722a..e394165f08 100644 --- a/cms/djangoapps/contentstore/features/courses.py +++ b/cms/djangoapps/contentstore/features/courses.py @@ -59,4 +59,4 @@ def i_am_on_tab(step, tab_name): @step('I see a link for adding a new section$') def i_see_new_section_link(step): link_css = 'a.new-courseware-section-button' - assert_css_with_text(link_css, 'New Section') + assert_css_with_text(link_css, '+ New Section') diff --git a/cms/djangoapps/contentstore/features/section.feature b/cms/djangoapps/contentstore/features/section.feature index ad00ba2911..08d38367bc 100644 --- a/cms/djangoapps/contentstore/features/section.feature +++ b/cms/djangoapps/contentstore/features/section.feature @@ -11,6 +11,14 @@ Feature: Create Section And I see a release date for my section And I see a link to create a new subsection + Scenario: Add a new section (with a quote in the name) to a course (bug #216) + Given I have opened a new course in Studio + When I click the New Section link + And I enter a section name with a quote and click save + Then I see my section name with a quote on the Courseware page + And I click to edit the section name + Then I see the complete section name with a quote in the editor + Scenario: Edit section release date Given I have opened a new course in Studio And I have added a new section @@ -18,9 +26,10 @@ Feature: Create Section And I save a new section release date Then the section release date is updated + @skip-phantom Scenario: Delete section Given I have opened a new course in Studio And I have added a new section When I press the "section" delete icon And I confirm the alert - Then the section does not exist \ No newline at end of file + Then the section does not exist diff --git a/cms/djangoapps/contentstore/features/section.py b/cms/djangoapps/contentstore/features/section.py index 3bcaeab6c4..b5ddb48a09 100644 --- a/cms/djangoapps/contentstore/features/section.py +++ b/cms/djangoapps/contentstore/features/section.py @@ -1,16 +1,12 @@ from lettuce import world, step from common import * +from nose.tools import assert_equal +from selenium.webdriver.common.keys import Keys +import time ############### ACTIONS #################### -@step('I have opened a new course in Studio$') -def i_have_opened_a_new_course(step): - clear_courses() - log_into_studio() - create_a_course() - - @step('I click the new section link$') def i_click_new_section_link(step): link_css = 'a.new-courseware-section-button' @@ -19,10 +15,12 @@ def i_click_new_section_link(step): @step('I enter the section name and click save$') def i_save_section_name(step): - name_css = '.new-section-name' - save_css = '.new-section-name-save' - css_fill(name_css, 'My Section') - css_click(save_css) + save_section_name('My Section') + + +@step('I enter a section name with a quote and click save$') +def i_save_section_name_with_quote(step): + save_section_name('Section with "Quote"') @step('I have added a new section$') @@ -41,18 +39,39 @@ def i_save_a_new_section_release_date(step): date_css = 'input.start-date.date.hasDatepicker' time_css = 'input.start-time.time.ui-timepicker-input' css_fill(date_css, '12/25/2013') - # click here to make the calendar go away - css_click(time_css) + # hit TAB to get to the time field + e = css_find(date_css).first + e._element.send_keys(Keys.TAB) css_fill(time_css, '12:00am') - css_click('a.save-button') + e = css_find(time_css).first + e._element.send_keys(Keys.TAB) + time.sleep(float(1)) + world.browser.click_link_by_text('Save') + ############ ASSERTIONS ################### @step('I see my section on the Courseware page$') def i_see_my_section_on_the_courseware_page(step): - section_css = 'span.section-name-span' - assert_css_with_text(section_css, 'My Section') + see_my_section_on_the_courseware_page('My Section') + + +@step('I see my section name with a quote on the Courseware page$') +def i_see_my_section_name_with_quote_on_the_courseware_page(step): + see_my_section_on_the_courseware_page('Section with "Quote"') + + +@step('I click to edit the section name$') +def i_click_to_edit_section_name(step): + css_click('span.section-name-span') + + +@step('I see the complete section name with a quote in the editor$') +def i_see_complete_section_name_with_quote_in_editor(step): + css = '.edit-section-name' + assert world.browser.is_element_present_by_css(css, 5) + assert_equal(world.browser.find_by_css(css).value, 'Section with "Quote"') @step('the section does not exist$') @@ -93,4 +112,18 @@ def the_section_release_date_picker_not_visible(step): def the_section_release_date_is_updated(step): css = 'span.published-status' status_text = world.browser.find_by_css(css).text - assert status_text == 'Will Release: 12/25/2013 at 12:00am' + assert_equal(status_text,'Will Release: 12/25/2013 at 12:00am') + + +############ HELPER METHODS ################### + +def save_section_name(name): + name_css = '.new-section-name' + save_css = '.new-section-name-save' + css_fill(name_css, name) + css_click(save_css) + + +def see_my_section_on_the_courseware_page(name): + section_css = 'span.section-name-span' + assert_css_with_text(section_css, name) diff --git a/cms/djangoapps/contentstore/features/signup.py b/cms/djangoapps/contentstore/features/signup.py index a786225ead..e8d0dd8229 100644 --- a/cms/djangoapps/contentstore/features/signup.py +++ b/cms/djangoapps/contentstore/features/signup.py @@ -1,4 +1,5 @@ from lettuce import world, step +from common import * @step('I fill in the registration form$') @@ -13,10 +14,11 @@ def i_fill_in_the_registration_form(step): @step('I press the Create My Account button on the registration form$') def i_press_the_button_on_the_registration_form(step): - register_form = world.browser.find_by_css('form#register_form') - submit_css = 'button#submit' - register_form.find_by_css(submit_css).click() - + submit_css = 'form#register_form button#submit' + # Workaround for click not working on ubuntu + # for some unknown reason. + e = css_find(submit_css) + e.type(' ') @step('I should see be on the studio home page$') def i_should_see_be_on_the_studio_home_page(step): diff --git a/cms/djangoapps/contentstore/features/studio-overview-togglesection.feature b/cms/djangoapps/contentstore/features/studio-overview-togglesection.feature index 5276b90d12..52c10e41a8 100644 --- a/cms/djangoapps/contentstore/features/studio-overview-togglesection.feature +++ b/cms/djangoapps/contentstore/features/studio-overview-togglesection.feature @@ -21,6 +21,7 @@ Feature: Overview Toggle Section Then I see the "Collapse All Sections" link And all sections are expanded + @skip-phantom Scenario: Collapse link is not removed after last section of a course is deleted Given I have a course with 1 section And I navigate to the course overview page diff --git a/cms/djangoapps/contentstore/features/subsection.feature b/cms/djangoapps/contentstore/features/subsection.feature index 5acb5bfe44..1be5f4aeb9 100644 --- a/cms/djangoapps/contentstore/features/subsection.feature +++ b/cms/djangoapps/contentstore/features/subsection.feature @@ -9,6 +9,15 @@ Feature: Create Subsection And I enter the subsection name and click save Then I see my subsection on the Courseware page + Scenario: Add a new subsection (with a name containing a quote) to a section (bug #216) + Given I have opened a new course section in Studio + When I click the New Subsection link + And I enter a subsection name with a quote and click save + Then I see my subsection name with a quote on the Courseware page + And I click to edit the subsection name + Then I see the complete subsection name with a quote in the editor + + @skip-phantom Scenario: Delete a subsection Given I have opened a new course section in Studio And I have added a new subsection diff --git a/cms/djangoapps/contentstore/features/subsection.py b/cms/djangoapps/contentstore/features/subsection.py index e2041b8dbf..88e1424898 100644 --- a/cms/djangoapps/contentstore/features/subsection.py +++ b/cms/djangoapps/contentstore/features/subsection.py @@ -1,5 +1,6 @@ from lettuce import world, step from common import * +from nose.tools import assert_equal ############### ACTIONS #################### @@ -20,28 +21,60 @@ def i_click_the_new_subsection_link(step): @step('I enter the subsection name and click save$') def i_save_subsection_name(step): - name_css = 'input.new-subsection-name-input' - save_css = 'input.new-subsection-name-save' - css_fill(name_css, 'Subsection One') - css_click(save_css) + save_subsection_name('Subsection One') + + +@step('I enter a subsection name with a quote and click save$') +def i_save_subsection_name_with_quote(step): + save_subsection_name('Subsection With "Quote"') + + +@step('I click to edit the subsection name$') +def i_click_to_edit_subsection_name(step): + css_click('span.subsection-name-value') + + +@step('I see the complete subsection name with a quote in the editor$') +def i_see_complete_subsection_name_with_quote_in_editor(step): + css = '.subsection-display-name-input' + assert world.browser.is_element_present_by_css(css, 5) + assert_equal(world.browser.find_by_css(css).value, 'Subsection With "Quote"') @step('I have added a new subsection$') def i_have_added_a_new_subsection(step): add_subsection() + ############ ASSERTIONS ################### @step('I see my subsection on the Courseware page$') def i_see_my_subsection_on_the_courseware_page(step): - css = 'span.subsection-name' - assert world.browser.is_element_present_by_css(css) - css = 'span.subsection-name-value' - assert_css_with_text(css, 'Subsection One') + see_subsection_name('Subsection One') + + +@step('I see my subsection name with a quote on the Courseware page$') +def i_see_my_subsection_name_with_quote_on_the_courseware_page(step): + see_subsection_name('Subsection With "Quote"') @step('the subsection does not exist$') def the_subsection_does_not_exist(step): css = 'span.subsection-name' assert world.browser.is_element_not_present_by_css(css) + + +############ HELPER METHODS ################### + +def save_subsection_name(name): + name_css = 'input.new-subsection-name-input' + save_css = 'input.new-subsection-name-save' + css_fill(name_css, name) + css_click(save_css) + +def see_subsection_name(name): + css = 'span.subsection-name' + assert world.browser.is_element_present_by_css(css) + css = 'span.subsection-name-value' + assert_css_with_text(css, name) diff --git a/cms/djangoapps/contentstore/management/commands/delete_course.py b/cms/djangoapps/contentstore/management/commands/delete_course.py index bb38e72d44..fc92205030 100644 --- a/cms/djangoapps/contentstore/management/commands/delete_course.py +++ b/cms/djangoapps/contentstore/management/commands/delete_course.py @@ -7,7 +7,7 @@ from xmodule.modulestore.django import modulestore from xmodule.contentstore.django import contentstore from xmodule.modulestore import Location from xmodule.course_module import CourseDescriptor -from prompt import query_yes_no +from .prompt import query_yes_no from auth.authz import _delete_course_group @@ -17,22 +17,29 @@ from auth.authz import _delete_course_group class Command(BaseCommand): - help = \ -'''Delete a MongoDB backed course''' + help = '''Delete a MongoDB backed course''' def handle(self, *args, **options): - if len(args) != 1: - raise CommandError("delete_course requires one argument: ") + if len(args) != 1 and len(args) != 2: + raise CommandError("delete_course requires one or more arguments: |commit|") loc_str = args[0] + commit = False + if len(args) == 2: + commit = args[1] == 'commit' + + if commit: + print 'Actually going to delete the course from DB....' + ms = modulestore('direct') cs = contentstore() if query_yes_no("Deleting course {0}. Confirm?".format(loc_str), default="no"): - if query_yes_no("Are you sure. This action cannot be undone!", default="no"): - loc = CourseDescriptor.id_to_location(loc_str) - if delete_course(ms, cs, loc) == True: - print 'removing User permissions from course....' - # in the django layer, we need to remove all the user permissions groups associated with this course - _delete_course_group(loc) + if query_yes_no("Are you sure. This action cannot be undone!", default="no"): + loc = CourseDescriptor.id_to_location(loc_str) + if delete_course(ms, cs, loc, commit) == True: + print 'removing User permissions from course....' + # in the django layer, we need to remove all the user permissions groups associated with this course + if commit: + _delete_course_group(loc) diff --git a/cms/djangoapps/contentstore/management/commands/prompt.py b/cms/djangoapps/contentstore/management/commands/prompt.py index 211c48406c..40a39d0a11 100644 --- a/cms/djangoapps/contentstore/management/commands/prompt.py +++ b/cms/djangoapps/contentstore/management/commands/prompt.py @@ -13,7 +13,7 @@ def query_yes_no(question, default="yes"): """ valid = {"yes":True, "y":True, "ye":True, "no":False, "n":False} - if default == None: + if default is None: prompt = " [y/n] " elif default == "yes": prompt = " [Y/n] " diff --git a/cms/djangoapps/contentstore/management/commands/update_templates.py b/cms/djangoapps/contentstore/management/commands/update_templates.py new file mode 100644 index 0000000000..b30d30480a --- /dev/null +++ b/cms/djangoapps/contentstore/management/commands/update_templates.py @@ -0,0 +1,9 @@ +from xmodule.templates import update_templates +from django.core.management.base import BaseCommand + +class Command(BaseCommand): + help = \ +'''Imports and updates the Studio component templates from the code pack and put in the DB''' + + def handle(self, *args, **options): + update_templates() \ No newline at end of file diff --git a/cms/djangoapps/contentstore/module_info_model.py b/cms/djangoapps/contentstore/module_info_model.py index 7ed4505c94..8ea6add88d 100644 --- a/cms/djangoapps/contentstore/module_info_model.py +++ b/cms/djangoapps/contentstore/module_info_model.py @@ -15,10 +15,10 @@ def get_module_info(store, location, parent_location=None, rewrite_static_links= template_location = Location(['i4x', 'edx', 'templates', location.category, 'Empty']) module = store.clone_item(template_location, location) - data = module.definition['data'] + data = module.data if rewrite_static_links: data = replace_static_urls( - module.definition['data'], + module.data, None, course_namespace=Location([ module.location.tag, @@ -32,7 +32,8 @@ def get_module_info(store, location, parent_location=None, rewrite_static_links= return { 'id': module.location.url(), 'data': data, - 'metadata': module.metadata + # TODO (cpennington): This really shouldn't have to do this much reaching in to get the metadata + 'metadata': module._model_data._kvs._metadata } @@ -70,23 +71,23 @@ def set_module_info(store, location, post_data): # 'apply' the submitted metadata, so we don't end up deleting system metadata if post_data.get('metadata') is not None: posted_metadata = post_data['metadata'] - + # update existing metadata with submitted metadata (which can be partial) # IMPORTANT NOTE: if the client passed pack 'null' (None) for a piece of metadata that means 'remove it' - for metadata_key in posted_metadata.keys(): - + for metadata_key, value in posted_metadata.items(): + # let's strip out any metadata fields from the postback which have been identified as system metadata # and therefore should not be user-editable, so we should accept them back from the client if metadata_key in module.system_metadata_fields: del posted_metadata[metadata_key] elif posted_metadata[metadata_key] is None: # remove both from passed in collection as well as the collection read in from the modulestore - if metadata_key in module.metadata: - del module.metadata[metadata_key] + if metadata_key in module._model_data: + del module._model_data[metadata_key] del posted_metadata[metadata_key] - - # overlay the new metadata over the modulestore sourced collection to support partial updates - module.metadata.update(posted_metadata) - + else: + module._model_data[metadata_key] = value + # commit to datastore - store.update_metadata(location, module.metadata) + # TODO (cpennington): This really shouldn't have to do this much reaching in to get the metadata + store.update_metadata(location, module._model_data._kvs._metadata) diff --git a/cms/djangoapps/contentstore/tests/factories.py b/cms/djangoapps/contentstore/tests/factories.py deleted file mode 100644 index d15610f11c..0000000000 --- a/cms/djangoapps/contentstore/tests/factories.py +++ /dev/null @@ -1,49 +0,0 @@ -from factory import Factory -from datetime import datetime -from uuid import uuid4 -from student.models import (User, UserProfile, Registration, - CourseEnrollmentAllowed) -from django.contrib.auth.models import Group - - -class UserProfileFactory(Factory): - FACTORY_FOR = UserProfile - - user = None - name = 'Robot Studio' - courseware = 'course.xml' - - -class RegistrationFactory(Factory): - FACTORY_FOR = Registration - - user = None - activation_key = uuid4().hex - - -class UserFactory(Factory): - FACTORY_FOR = User - - username = 'robot' - email = 'robot@edx.org' - password = 'test' - first_name = 'Robot' - last_name = 'Tester' - is_staff = False - is_active = True - is_superuser = False - last_login = datetime.now() - date_joined = datetime.now() - - -class GroupFactory(Factory): - FACTORY_FOR = Group - - name = 'test_group' - - -class CourseEnrollmentAllowedFactory(Factory): - FACTORY_FOR = CourseEnrollmentAllowed - - email = 'test@edx.org' - course_id = 'edX/test/2012_Fall' diff --git a/cms/djangoapps/contentstore/tests/test_contentstore.py b/cms/djangoapps/contentstore/tests/test_contentstore.py index a4ce54f950..d04e1a6332 100644 --- a/cms/djangoapps/contentstore/tests/test_contentstore.py +++ b/cms/djangoapps/contentstore/tests/test_contentstore.py @@ -5,29 +5,28 @@ from django.test.utils import override_settings from django.conf import settings from django.core.urlresolvers import reverse from path import path -from tempfile import mkdtemp +from tempdir import mkdtemp_clean +from datetime import timedelta import json from fs.osfs import OSFS import copy -from mock import Mock -from json import dumps, loads +from json import loads -from student.models import Registration from django.contrib.auth.models import User -from cms.djangoapps.contentstore.utils import get_modulestore +from contentstore.utils import get_modulestore -from utils import ModuleStoreTestCase, parse_json +from .utils import ModuleStoreTestCase, parse_json from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory from xmodule.modulestore import Location from xmodule.modulestore.store_utilities import clone_course from xmodule.modulestore.store_utilities import delete_course -from xmodule.modulestore.django import modulestore, _MODULESTORES +from xmodule.modulestore.django import modulestore from xmodule.contentstore.django import contentstore from xmodule.templates import update_templates from xmodule.modulestore.xml_exporter import export_to_xml from xmodule.modulestore.xml_importer import import_from_xml -from xmodule.templates import update_templates +from xmodule.modulestore.inheritance import own_metadata from xmodule.capa_module import CapaDescriptor from xmodule.course_module import CourseDescriptor @@ -63,7 +62,6 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase): self.client = Client() self.client.login(username=uname, password=password) - def check_edit_unit(self, test_course_name): import_from_xml(modulestore(), 'common/test/data/', [test_course_name]) @@ -82,8 +80,8 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase): def test_static_tab_reordering(self): import_from_xml(modulestore(), 'common/test/data/', ['full']) - ms = modulestore('direct') - course = ms.get_item(Location(['i4x', 'edX', 'full', 'course', '6.002_Spring_2012', None])) + module_store = modulestore('direct') + course = module_store.get_item(Location(['i4x', 'edX', 'full', 'course', '6.002_Spring_2012', None])) # reverse the ordering reverse_tabs = [] @@ -91,9 +89,9 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase): if tab['type'] == 'static_tab': reverse_tabs.insert(0, 'i4x://edX/full/static_tab/{0}'.format(tab['url_slug'])) - resp = self.client.post(reverse('reorder_static_tabs'), json.dumps({'tabs': reverse_tabs}), "application/json") + self.client.post(reverse('reorder_static_tabs'), json.dumps({'tabs': reverse_tabs}), "application/json") - course = ms.get_item(Location(['i4x', 'edX', 'full', 'course', '6.002_Spring_2012', None])) + course = module_store.get_item(Location(['i4x', 'edX', 'full', 'course', '6.002_Spring_2012', None])) # compare to make sure that the tabs information is in the expected order after the server call course_tabs = [] @@ -103,29 +101,61 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase): self.assertEqual(reverse_tabs, course_tabs) + def test_delete(self): + import_from_xml(modulestore(), 'common/test/data/', ['full']) + + module_store = modulestore('direct') + + sequential = module_store.get_item(Location(['i4x', 'edX', 'full', 'sequential', 'Administrivia_and_Circuit_Elements', None])) + + chapter = module_store.get_item(Location(['i4x', 'edX', 'full', 'chapter','Week_1', None])) + + # make sure the parent no longer points to the child object which was deleted + self.assertTrue(sequential.location.url() in chapter.children) + + self.client.post(reverse('delete_item'), + json.dumps({'id': sequential.location.url(), 'delete_children': 'true', 'delete_all_versions': 'true'}), + "application/json") + + found = False + try: + module_store.get_item(Location(['i4x', 'edX', 'full', 'sequential', 'Administrivia_and_Circuit_Elements', None])) + found = True + except ItemNotFoundError: + pass + + self.assertFalse(found) + + chapter = module_store.get_item(Location(['i4x', 'edX', 'full', 'chapter','Week_1', None])) + + # make sure the parent no longer points to the child object which was deleted + self.assertFalse(sequential.location.url() in chapter.children) + + + def test_about_overrides(self): ''' This test case verifies that a course can use specialized override for about data, e.g. /about/Fall_2012/effort.html while there is a base definition in /about/effort.html ''' import_from_xml(modulestore(), 'common/test/data/', ['full']) - ms = modulestore('direct') - effort = ms.get_item(Location(['i4x', 'edX', 'full', 'about', 'effort', None])) - self.assertEqual(effort.definition['data'], '6 hours') + module_store = modulestore('direct') + effort = module_store.get_item(Location(['i4x', 'edX', 'full', 'about', 'effort', None])) + self.assertEqual(effort.data, '6 hours') # this one should be in a non-override folder - effort = ms.get_item(Location(['i4x', 'edX', 'full', 'about', 'end_date', None])) - self.assertEqual(effort.definition['data'], 'TBD') + effort = module_store.get_item(Location(['i4x', 'edX', 'full', 'about', 'end_date', None])) + self.assertEqual(effort.data, 'TBD') def test_remove_hide_progress_tab(self): import_from_xml(modulestore(), 'common/test/data/', ['full']) - ms = modulestore('direct') - cs = contentstore() + module_store = modulestore('direct') + content_store = contentstore() source_location = CourseDescriptor.id_to_location('edX/full/6.002_Spring_2012') - course = ms.get_item(source_location) - self.assertNotIn('hide_progress_tab', course.metadata) + course = module_store.get_item(source_location) + self.assertFalse(course.hide_progress_tab) def test_clone_course(self): @@ -143,19 +173,19 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase): data = parse_json(resp) self.assertEqual(data['id'], 'i4x://MITx/999/course/Robot_Super_Course') - ms = modulestore('direct') - cs = contentstore() + module_store = modulestore('direct') + content_store = contentstore() source_location = CourseDescriptor.id_to_location('edX/full/6.002_Spring_2012') dest_location = CourseDescriptor.id_to_location('MITx/999/Robot_Super_Course') - clone_course(ms, cs, source_location, dest_location) + clone_course(module_store, content_store, source_location, dest_location) # now loop through all the units in the course and verify that the clone can render them, which # means the objects are at least present - items = ms.get_items(Location(['i4x', 'edX', 'full', 'vertical', None])) + items = module_store.get_items(Location(['i4x', 'edX', 'full', 'vertical', None])) self.assertGreater(len(items), 0) - clone_items = ms.get_items(Location(['i4x', 'MITx', '999', 'vertical', None])) + clone_items = module_store.get_items(Location(['i4x', 'MITx', '999', 'vertical', None])) self.assertGreater(len(clone_items), 0) for descriptor in items: new_loc = descriptor.location._replace(org='MITx', course='999') @@ -166,14 +196,14 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase): def test_delete_course(self): import_from_xml(modulestore(), 'common/test/data/', ['full']) - ms = modulestore('direct') - cs = contentstore() + module_store = modulestore('direct') + content_store = contentstore() location = CourseDescriptor.id_to_location('edX/full/6.002_Spring_2012') - delete_course(ms, cs, location) + delete_course(module_store, content_store, location, commit=True) - items = ms.get_items(Location(['i4x', 'edX', 'full', 'vertical', None])) + items = module_store.get_items(Location(['i4x', 'edX', 'full', 'vertical', None])) self.assertEqual(len(items), 0) def verify_content_existence(self, modulestore, root_dir, location, dirname, category_name, filename_suffix=''): @@ -188,54 +218,54 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase): self.assertTrue(fs.exists(item.location.name + filename_suffix)) def test_export_course(self): - ms = modulestore('direct') - cs = contentstore() + module_store = modulestore('direct') + content_store = contentstore() - import_from_xml(ms, 'common/test/data/', ['full']) + import_from_xml(module_store, 'common/test/data/', ['full']) location = CourseDescriptor.id_to_location('edX/full/6.002_Spring_2012') - root_dir = path(mkdtemp()) + root_dir = path(mkdtemp_clean()) print 'Exporting to tempdir = {0}'.format(root_dir) # export out to a tempdir - export_to_xml(ms, cs, location, root_dir, 'test_export') + export_to_xml(module_store, content_store, location, root_dir, 'test_export') # check for static tabs - self.verify_content_existence(ms, root_dir, location, 'tabs', 'static_tab', '.html') + self.verify_content_existence(module_store, root_dir, location, 'tabs', 'static_tab', '.html') # check for custom_tags - self.verify_content_existence(ms, root_dir, location, 'info', 'course_info', '.html') + self.verify_content_existence(module_store, root_dir, location, 'info', 'course_info', '.html') # check for custom_tags - self.verify_content_existence(ms, root_dir, location, 'custom_tags', 'custom_tag_template') + self.verify_content_existence(module_store, root_dir, location, 'custom_tags', 'custom_tag_template') # check for graiding_policy.json fs = OSFS(root_dir / 'test_export/policies/6.002_Spring_2012') self.assertTrue(fs.exists('grading_policy.json')) - course = ms.get_item(location) + course = module_store.get_item(location) # compare what's on disk compared to what we have in our course - with fs.open('grading_policy.json','r') as grading_policy: - on_disk = loads(grading_policy.read()) - self.assertEqual(on_disk, course.definition['data']['grading_policy']) + with fs.open('grading_policy.json', 'r') as grading_policy: + on_disk = loads(grading_policy.read()) + self.assertEqual(on_disk, course.grading_policy) #check for policy.json self.assertTrue(fs.exists('policy.json')) # compare what's on disk to what we have in the course module - with fs.open('policy.json','r') as course_policy: + with fs.open('policy.json', 'r') as course_policy: on_disk = loads(course_policy.read()) self.assertIn('course/6.002_Spring_2012', on_disk) - self.assertEqual(on_disk['course/6.002_Spring_2012'], course.metadata) + self.assertEqual(on_disk['course/6.002_Spring_2012'], own_metadata(course)) # remove old course - delete_course(ms, cs, location) + delete_course(module_store, content_store, location) # reimport - import_from_xml(ms, root_dir, ['test_export']) + import_from_xml(module_store, root_dir, ['test_export']) - items = ms.get_items(Location(['i4x', 'edX', 'full', 'vertical', None])) + items = module_store.get_items(Location(['i4x', 'edX', 'full', 'vertical', None])) self.assertGreater(len(items), 0) for descriptor in items: print "Checking {0}....".format(descriptor.location.url()) @@ -245,11 +275,11 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase): shutil.rmtree(root_dir) def test_course_handouts_rewrites(self): - ms = modulestore('direct') - cs = contentstore() + module_store = modulestore('direct') + content_store = contentstore() # import a test course - import_from_xml(ms, 'common/test/data/', ['full']) + import_from_xml(module_store, 'common/test/data/', ['full']) handout_location = Location(['i4x', 'edX', 'full', 'course_info', 'handouts']) @@ -263,6 +293,34 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase): # note, we know the link it should be because that's what in the 'full' course in the test data self.assertContains(resp, '/c4x/edX/full/asset/handouts_schematic_tutorial.pdf') + def test_export_course_with_unknown_metadata(self): + module_store = modulestore('direct') + content_store = contentstore() + + import_from_xml(module_store, 'common/test/data/', ['full']) + location = CourseDescriptor.id_to_location('edX/full/6.002_Spring_2012') + + root_dir = path(mkdtemp_clean()) + + course = module_store.get_item(location) + + metadata = own_metadata(course) + # add a bool piece of unknown metadata so we can verify we don't throw an exception + metadata['new_metadata'] = True + + module_store.update_metadata(location, metadata) + + print 'Exporting to tempdir = {0}'.format(root_dir) + + # export out to a tempdir + exported = False + try: + export_to_xml(module_store, content_store, location, root_dir, 'test_export') + exported = True + except Exception: + pass + + self.assertTrue(exported) class ContentStoreTest(ModuleStoreTestCase): """ @@ -342,7 +400,7 @@ class ContentStoreTest(ModuleStoreTestCase): # Create a course so there is something to view resp = self.client.get(reverse('index')) self.assertContains(resp, - '

                My Courses

                ', + '

                My Courses

                ', status_code=200, html=True) @@ -401,7 +459,7 @@ class ContentStoreTest(ModuleStoreTestCase): def test_capa_module(self): """Test that a problem treats markdown specially.""" - course = CourseFactory.create(org='MITx', course='999', display_name='Robot Super Course') + CourseFactory.create(org='MITx', course='999', display_name='Robot Super Course') problem_data = { 'parent_location': 'i4x://MITx/999/course/Robot_Super_Course', @@ -418,22 +476,77 @@ class ContentStoreTest(ModuleStoreTestCase): self.assertIsInstance(problem, CapaDescriptor, "New problem is not a CapaDescriptor") context = problem.get_context() self.assertIn('markdown', context, "markdown is missing from context") - self.assertIn('markdown', problem.metadata, "markdown is missing from metadata") self.assertNotIn('markdown', problem.editable_metadata_fields, "Markdown slipped into the editable metadata fields") + def test_import_metadata_with_attempts_empty_string(self): + import_from_xml(modulestore(), 'common/test/data/', ['simple']) + module_store = modulestore('direct') + did_load_item = False + try: + module_store.get_item(Location(['i4x', 'edX', 'simple', 'problem', 'ps01-simple', None])) + did_load_item = True + except ItemNotFoundError: + pass + + # make sure we found the item (e.g. it didn't error while loading) + self.assertTrue(did_load_item) + + def test_metadata_inheritance(self): + import_from_xml(modulestore(), 'common/test/data/', ['full']) + + module_store = modulestore('direct') + course = module_store.get_item(Location(['i4x', 'edX', 'full', 'course', '6.002_Spring_2012', None])) + + verticals = module_store.get_items(['i4x', 'edX', 'full', 'vertical', None, None]) + + # let's assert on the metadata_inheritance on an existing vertical + for vertical in verticals: + self.assertEqual(course.lms.xqa_key, vertical.lms.xqa_key) + + self.assertGreater(len(verticals), 0) + + new_component_location = Location('i4x', 'edX', 'full', 'html', 'new_component') + source_template_location = Location('i4x', 'edx', 'templates', 'html', 'Blank_HTML_Page') + + # crate a new module and add it as a child to a vertical + module_store.clone_item(source_template_location, new_component_location) + parent = verticals[0] + module_store.update_children(parent.location, parent.children + [new_component_location.url()]) + + # flush the cache + module_store.get_cached_metadata_inheritance_tree(new_component_location, -1) + new_module = module_store.get_item(new_component_location) + + # check for grace period definition which should be defined at the course level + self.assertEqual(parent.lms.graceperiod, new_module.lms.graceperiod) + + self.assertEqual(course.lms.xqa_key, new_module.lms.xqa_key) + + # + # now let's define an override at the leaf node level + # + new_module.lms.graceperiod = timedelta(1) + module_store.update_metadata(new_module.location, own_metadata(new_module)) + + # flush the cache and refetch + module_store.get_cached_metadata_inheritance_tree(new_component_location, -1) + new_module = module_store.get_item(new_component_location) + + self.assertEqual(timedelta(1), new_module.lms.graceperiod) + class TemplateTestCase(ModuleStoreTestCase): - def test_template_cleanup(self): - ms = modulestore('direct') + def test_template_cleanup(self): + module_store = modulestore('direct') # insert a bogus template in the store bogus_template_location = Location('i4x', 'edx', 'templates', 'html', 'bogus') source_template_location = Location('i4x', 'edx', 'templates', 'html', 'Blank_HTML_Page') - - ms.clone_item(source_template_location, bogus_template_location) - verify_create = ms.get_item(bogus_template_location) + module_store.clone_item(source_template_location, bogus_template_location) + + verify_create = module_store.get_item(bogus_template_location) self.assertIsNotNone(verify_create) # now run cleanup @@ -442,10 +555,8 @@ class TemplateTestCase(ModuleStoreTestCase): # now try to find dangling template, it should not be in DB any longer asserted = False try: - verify_create = ms.get_item(bogus_template_location) + verify_create = module_store.get_item(bogus_template_location) except ItemNotFoundError: asserted = True - self.assertTrue(asserted) - - + self.assertTrue(asserted) diff --git a/cms/djangoapps/contentstore/tests/test_course_settings.py b/cms/djangoapps/contentstore/tests/test_course_settings.py index 86503d2136..ecdeca29e7 100644 --- a/cms/djangoapps/contentstore/tests/test_course_settings.py +++ b/cms/djangoapps/contentstore/tests/test_course_settings.py @@ -1,7 +1,5 @@ import datetime -import time import json -import calendar import copy from util import converters from util.converters import jsdate_to_time @@ -11,17 +9,20 @@ from django.test.client import Client from django.core.urlresolvers import reverse from django.utils.timezone import UTC -import xmodule from xmodule.modulestore import Location -from cms.djangoapps.models.settings.course_details import (CourseDetails, +from models.settings.course_details import (CourseDetails, CourseSettingsEncoder) -from cms.djangoapps.models.settings.course_grading import CourseGradingModel -from cms.djangoapps.contentstore.utils import get_modulestore +from models.settings.course_grading import CourseGradingModel +from contentstore.utils import get_modulestore from django.test import TestCase -from utils import ModuleStoreTestCase +from .utils import ModuleStoreTestCase from xmodule.modulestore.tests.factories import CourseFactory +from models.settings.course_metadata import CourseMetadata +from xmodule.modulestore.xml_importer import import_from_xml +from xmodule.modulestore.django import modulestore + # YYYY-MM-DDThh:mm:ss.s+/-HH:MM class ConvertersTestCase(TestCase): @@ -245,8 +246,9 @@ class CourseGradingTest(CourseTestCase): altered_grader = CourseGradingModel.update_from_json(test_grader.__dict__) self.assertDictEqual(test_grader.__dict__, altered_grader.__dict__, "cutoff add D") - test_grader.grace_period = {'hours' : 4, 'minutes' : 5, 'seconds': 0} + test_grader.grace_period = {'hours': 4, 'minutes': 5, 'seconds': 0} altered_grader = CourseGradingModel.update_from_json(test_grader.__dict__) + print test_grader.grace_period, altered_grader.grace_period self.assertDictEqual(test_grader.__dict__, altered_grader.__dict__, "4 hour grace period") def test_update_grader_from_json(self): @@ -261,3 +263,64 @@ class CourseGradingTest(CourseTestCase): test_grader.graders[1]['drop_count'] = test_grader.graders[1].get('drop_count') + 1 altered_grader = CourseGradingModel.update_grader_from_json(test_grader.course_location, test_grader.graders[1]) self.assertDictEqual(test_grader.graders[1], altered_grader, "drop_count[1] + 2") + +class CourseMetadataEditingTest(CourseTestCase): + def setUp(self): + CourseTestCase.setUp(self) + # add in the full class too + import_from_xml(modulestore(), 'common/test/data/', ['full']) + self.fullcourse_location = Location(['i4x','edX','full','course','6.002_Spring_2012', None]) + + + def test_fetch_initial_fields(self): + test_model = CourseMetadata.fetch(self.course_location) + self.assertIn('display_name', test_model, 'Missing editable metadata field') + self.assertEqual(test_model['display_name'], 'Robot Super Course', "not expected value") + + test_model = CourseMetadata.fetch(self.fullcourse_location) + self.assertNotIn('graceperiod', test_model, 'blacklisted field leaked in') + self.assertIn('display_name', test_model, 'full missing editable metadata field') + self.assertEqual(test_model['display_name'], 'Testing', "not expected value") + self.assertIn('rerandomize', test_model, 'Missing rerandomize metadata field') + self.assertIn('showanswer', test_model, 'showanswer field ') + self.assertIn('xqa_key', test_model, 'xqa_key field ') + + def test_update_from_json(self): + test_model = CourseMetadata.update_from_json(self.course_location, + { "advertised_start" : "start A", + "testcenter_info" : { "c" : "test" }, + "days_early_for_beta" : 2}) + self.update_check(test_model) + # try fresh fetch to ensure persistence + test_model = CourseMetadata.fetch(self.course_location) + self.update_check(test_model) + # now change some of the existing metadata + test_model = CourseMetadata.update_from_json(self.course_location, + { "advertised_start" : "start B", + "display_name" : "jolly roger"}) + self.assertIn('display_name', test_model, 'Missing editable metadata field') + self.assertEqual(test_model['display_name'], 'jolly roger', "not expected value") + self.assertIn('advertised_start', test_model, 'Missing revised advertised_start metadata field') + self.assertEqual(test_model['advertised_start'], 'start B', "advertised_start not expected value") + + def update_check(self, test_model): + self.assertIn('display_name', test_model, 'Missing editable metadata field') + self.assertEqual(test_model['display_name'], 'Robot Super Course', "not expected value") + self.assertIn('advertised_start', test_model, 'Missing new advertised_start metadata field') + self.assertEqual(test_model['advertised_start'], 'start A', "advertised_start not expected value") + self.assertIn('testcenter_info', test_model, 'Missing testcenter_info metadata field') + self.assertDictEqual(test_model['testcenter_info'], { "c" : "test" }, "testcenter_info not expected value") + self.assertIn('days_early_for_beta', test_model, 'Missing days_early_for_beta metadata field') + self.assertEqual(test_model['days_early_for_beta'], 2, "days_early_for_beta not expected value") + + + def test_delete_key(self): + test_model = CourseMetadata.delete_key(self.fullcourse_location, { 'deleteKeys' : ['doesnt_exist', 'showanswer', 'xqa_key']}) + # ensure no harm + self.assertNotIn('graceperiod', test_model, 'blacklisted field leaked in') + self.assertIn('display_name', test_model, 'full missing editable metadata field') + self.assertEqual(test_model['display_name'], 'Testing', "not expected value") + self.assertIn('rerandomize', test_model, 'Missing rerandomize metadata field') + # check for deletion effectiveness + self.assertEqual('closed', test_model['showanswer'], 'showanswer field still in') + self.assertEqual(None, test_model['xqa_key'], 'xqa_key field still in') diff --git a/cms/djangoapps/contentstore/tests/test_course_updates.py b/cms/djangoapps/contentstore/tests/test_course_updates.py index c57f1322f5..6a3a1e21f7 100644 --- a/cms/djangoapps/contentstore/tests/test_course_updates.py +++ b/cms/djangoapps/contentstore/tests/test_course_updates.py @@ -1,4 +1,4 @@ -from cms.djangoapps.contentstore.tests.test_course_settings import CourseTestCase +from contentstore.tests.test_course_settings import CourseTestCase from django.core.urlresolvers import reverse import json diff --git a/cms/djangoapps/contentstore/tests/test_utils.py b/cms/djangoapps/contentstore/tests/test_utils.py index 09e3b045f9..4ab40d17a8 100644 --- a/cms/djangoapps/contentstore/tests/test_utils.py +++ b/cms/djangoapps/contentstore/tests/test_utils.py @@ -1,4 +1,4 @@ -from cms.djangoapps.contentstore import utils +from contentstore import utils import mock from django.test import TestCase diff --git a/cms/djangoapps/contentstore/tests/tests.py b/cms/djangoapps/contentstore/tests/tests.py index 166982e35f..e43a95fccd 100644 --- a/cms/djangoapps/contentstore/tests/tests.py +++ b/cms/djangoapps/contentstore/tests/tests.py @@ -4,12 +4,11 @@ from django.test.client import Client from django.conf import settings from django.core.urlresolvers import reverse from path import path -from tempfile import mkdtemp import json from fs.osfs import OSFS import copy -from cms.djangoapps.contentstore.utils import get_modulestore +from contentstore.utils import get_modulestore from xmodule.modulestore import Location from xmodule.modulestore.store_utilities import clone_course @@ -25,7 +24,7 @@ from xmodule.course_module import CourseDescriptor from xmodule.seq_module import SequenceDescriptor from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory -from utils import ModuleStoreTestCase, parse_json, user, registration +from .utils import ModuleStoreTestCase, parse_json, user, registration class ContentStoreTestCase(ModuleStoreTestCase): diff --git a/cms/djangoapps/contentstore/tests/utils.py b/cms/djangoapps/contentstore/tests/utils.py index be028b2836..b6b8cd5023 100644 --- a/cms/djangoapps/contentstore/tests/utils.py +++ b/cms/djangoapps/contentstore/tests/utils.py @@ -1,6 +1,6 @@ import json import copy -from time import time +from uuid import uuid4 from django.test import TestCase from django.conf import settings @@ -20,13 +20,12 @@ class ModuleStoreTestCase(TestCase): def _pre_setup(self): super(ModuleStoreTestCase, self)._pre_setup() - # Use the current seconds since epoch to differentiate + # Use a uuid to differentiate # the mongo collections on jenkins. - sec_since_epoch = '%s' % int(time() * 100) self.orig_MODULESTORE = copy.deepcopy(settings.MODULESTORE) self.test_MODULESTORE = self.orig_MODULESTORE - self.test_MODULESTORE['default']['OPTIONS']['collection'] = 'modulestore_%s' % sec_since_epoch - self.test_MODULESTORE['direct']['OPTIONS']['collection'] = 'modulestore_%s' % sec_since_epoch + self.test_MODULESTORE['default']['OPTIONS']['collection'] = 'modulestore_%s' % uuid4().hex + self.test_MODULESTORE['direct']['OPTIONS']['collection'] = 'modulestore_%s' % uuid4().hex settings.MODULESTORE = self.test_MODULESTORE # Flush and initialize the module store diff --git a/cms/djangoapps/contentstore/utils.py b/cms/djangoapps/contentstore/utils.py index b14dd8b353..0a99441fe9 100644 --- a/cms/djangoapps/contentstore/utils.py +++ b/cms/djangoapps/contentstore/utils.py @@ -39,10 +39,10 @@ def get_course_location_for_item(location): # make sure we found exactly one match on this above course search found_cnt = len(courses) if found_cnt == 0: - raise BaseException('Could not find course at {0}'.format(course_search_location)) + raise Exception('Could not find course at {0}'.format(course_search_location)) if found_cnt > 1: - raise BaseException('Found more than one course at {0}. There should only be one!!! Dump = {1}'.format(course_search_location, courses)) + raise Exception('Found more than one course at {0}. There should only be one!!! Dump = {1}'.format(course_search_location, courses)) location = courses[0].location @@ -75,12 +75,20 @@ def get_course_for_item(location): return courses[0] -def get_lms_link_for_item(location, preview=False): +def get_lms_link_for_item(location, preview=False, course_id=None): + if course_id is None: + course_id = get_course_id(location) + if settings.LMS_BASE is not None: - lms_link = "//{preview}{lms_base}/courses/{course_id}/jump_to/{location}".format( - preview='preview.' if preview else '', - lms_base=settings.LMS_BASE, - course_id=get_course_id(location), + if preview: + lms_base = settings.MITX_FEATURES.get('PREVIEW_LMS_BASE', + 'preview.' + settings.LMS_BASE) + else: + lms_base = settings.LMS_BASE + + lms_link = "//{lms_base}/courses/{course_id}/jump_to/{location}".format( + lms_base=lms_base, + course_id=course_id, location=Location(location) ) else: @@ -128,7 +136,7 @@ def compute_unit_state(unit): 'private' content is editabled and not visible in the LMS """ - if unit.metadata.get('is_draft', False): + if unit.cms.is_draft: try: modulestore('direct').get_item(unit.location) return UnitState.draft diff --git a/cms/djangoapps/contentstore/views.py b/cms/djangoapps/contentstore/views.py index 926fd05d68..eb634b0cdd 100644 --- a/cms/djangoapps/contentstore/views.py +++ b/cms/djangoapps/contentstore/views.py @@ -18,7 +18,8 @@ from django.core.files.temp import NamedTemporaryFile # to install PIL on MacOSX: 'easy_install http://dist.repoze.org/PIL-1.1.6.tar.gz' from PIL import Image -from django.http import HttpResponse, Http404, HttpResponseBadRequest, HttpResponseForbidden +from django.http import HttpResponse, Http404, HttpResponseBadRequest, HttpResponseForbidden, HttpResponseServerError +from django.http import HttpResponseNotFound from django.contrib.auth.decorators import login_required from django.core.exceptions import PermissionDenied from django.core.context_processors import csrf @@ -28,11 +29,15 @@ from django.conf import settings from xmodule.modulestore import Location from xmodule.modulestore.exceptions import ItemNotFoundError, InvalidLocationError +from xmodule.modulestore.inheritance import own_metadata +from xblock.core import Scope +from xblock.runtime import KeyValueStore, DbModel, InvalidScopeError from xmodule.x_module import ModuleSystem from xmodule.error_module import ErrorDescriptor from xmodule.errortracker import exc_info_to_str import static_replace from external_auth.views import ssl_login_shortcut +from xmodule.modulestore.mongo import MongoUsage from mitxmako.shortcuts import render_to_response, render_to_string from xmodule.modulestore.django import modulestore @@ -54,12 +59,12 @@ from contentstore.course_info_model import get_course_updates,\ from cache_toolbox.core import del_cached_content from xmodule.timeparse import stringify_time from contentstore.module_info_model import get_module_info, set_module_info -from cms.djangoapps.models.settings.course_details import CourseDetails,\ +from models.settings.course_details import CourseDetails,\ CourseSettingsEncoder -from cms.djangoapps.models.settings.course_grading import CourseGradingModel -from cms.djangoapps.contentstore.utils import get_modulestore -from lxml import etree +from models.settings.course_grading import CourseGradingModel +from contentstore.utils import get_modulestore from django.shortcuts import redirect +from models.settings.course_metadata import CourseMetadata # to install PIL on MacOSX: 'easy_install http://dist.repoze.org/PIL-1.1.6.tar.gz' @@ -68,6 +73,10 @@ log = logging.getLogger(__name__) COMPONENT_TYPES = ['customtag', 'discussion', 'html', 'problem', 'video'] +ADVANCED_COMPONENT_TYPES = ['annotatable','combinedopenended', 'peergrading'] +ADVANCED_COMPONENT_CATEGORY = 'advanced' +ADVANCED_COMPONENT_POLICY_KEY = 'advanced_modules' + # cdodge: these are categories which should not be parented, they are detached from the hierarchy DETACHED_CATEGORIES = ['about', 'static_tab', 'course_info'] @@ -82,12 +91,14 @@ def signup(request): csrf_token = csrf(request)['csrf_token'] return render_to_response('signup.html', {'csrf': csrf_token}) + def old_login_redirect(request): ''' Redirect to the active login url. ''' return redirect('login', permanent=True) + @ssl_login_shortcut @ensure_csrf_cookie def login_page(request): @@ -100,21 +111,23 @@ def login_page(request): 'forgot_password_link': "//{base}/#forgot-password-modal".format(base=settings.LMS_BASE), }) + def howitworks(request): if request.user.is_authenticated(): return index(request) - else: + else: return render_to_response('howitworks.html', {}) # ==== Views for any logged-in user ================================== + @login_required @ensure_csrf_cookie def index(request): """ List all courses available to the logged in user """ - courses = modulestore().get_items(['i4x', None, None, 'course', None]) + courses = modulestore('direct').get_items(['i4x', None, None, 'course', None]) # filter out courses that we don't have access too def course_filter(course): @@ -127,11 +140,12 @@ def index(request): return render_to_response('index.html', { 'new_course_template': Location('i4x', 'edx', 'templates', 'course', 'Empty'), - 'courses': [(course.metadata.get('display_name'), + 'courses': [(course.display_name, reverse('course_index', args=[ course.location.org, course.location.course, - course.location.name])) + course.location.name]), + get_lms_link_for_item(course.location, course_id=course.location.course_id)) for course in courses], 'user': request.user, 'disable_course_creation': settings.MITX_FEATURES.get('DISABLE_COURSE_CREATION', False) and not request.user.is_staff @@ -140,6 +154,7 @@ def index(request): # ==== Views with per-item permissions================================ + def has_access(user, location, role=STAFF_ROLE_NAME): ''' Return True if user allowed to access this piece of data @@ -172,6 +187,8 @@ def course_index(request, org, course, name): if not has_access(request.user, location): raise PermissionDenied() + lms_link = get_lms_link_for_item(location) + upload_asset_callback_url = reverse('upload_asset', kwargs={ 'org': org, 'course': course, @@ -184,6 +201,7 @@ def course_index(request, org, course, name): return render_to_response('overview.html', { 'active_tab': 'courseware', 'context_course': course, + 'lms_link': lms_link, 'sections': sections, 'course_graders': json.dumps(CourseGradingModel.fetch(course.location).graders), 'parent_location': course.location, @@ -226,8 +244,13 @@ def edit_subsection(request, location): # remove all metadata from the generic dictionary that is presented in a more normalized UI - policy_metadata = dict((key, value) for key, value in item.metadata.iteritems() - if key not in ['display_name', 'start', 'due', 'format'] and key not in item.system_metadata_fields) + policy_metadata = dict( + (field.name, field.read_from(item)) + for field + in item.fields + if field.name not in ['display_name', 'start', 'due', 'format'] and + field.scope == Scope.settings + ) can_view_live = False subsection_units = item.get_children() @@ -277,14 +300,34 @@ def edit_unit(request, location): component_templates = defaultdict(list) + # Check if there are any advanced modules specified in the course policy. These modules + # should be specified as a list of strings, where the strings are the names of the modules + # in ADVANCED_COMPONENT_TYPES that should be enabled for the course. + course_advanced_keys = course.advanced_modules + + # Set component types according to course policy file + component_types = list(COMPONENT_TYPES) + if isinstance(course_advanced_keys, list): + course_advanced_keys = [c for c in course_advanced_keys if c in ADVANCED_COMPONENT_TYPES] + if len(course_advanced_keys) > 0: + component_types.append(ADVANCED_COMPONENT_CATEGORY) + else: + log.error("Improper format for course advanced keys! {0}".format(course_advanced_keys)) + templates = modulestore().get_items(Location('i4x', 'edx', 'templates')) for template in templates: - if template.location.category in COMPONENT_TYPES: - component_templates[template.location.category].append(( - template.display_name, + category = template.location.category + + if category in course_advanced_keys: + category = ADVANCED_COMPONENT_CATEGORY + + if category in component_types: + #This is a hack to create categories for different xmodules + component_templates[category].append(( + template.display_name_with_default, template.location.url(), - 'markdown' in template.metadata, - 'empty' in template.metadata + hasattr(template, 'markdown') and template.markdown is not None, + template.cms.empty, )) components = [ @@ -313,8 +356,11 @@ def edit_unit(request, location): break index = index + 1 - preview_lms_link = '//{preview}{lms_base}/courses/{org}/{course}/{course_name}/courseware/{section}/{subsection}/{index}'.format( - preview='preview.', + preview_lms_base = settings.MITX_FEATURES.get('PREVIEW_LMS_BASE', + 'preview.' + settings.LMS_BASE) + + preview_lms_link = '//{preview_lms_base}/courses/{org}/{course}/{course_name}/courseware/{section}/{subsection}/{index}'.format( + preview_lms_base=preview_lms_base, lms_base=settings.LMS_BASE, org=course.location.org, course=course.location.course, @@ -325,11 +371,6 @@ def edit_unit(request, location): unit_state = compute_unit_state(item) - try: - published_date = time.strftime('%B %d, %Y', item.metadata.get('published_date')) - except TypeError: - published_date = None - return render_to_response('unit.html', { 'context_course': course, 'active_tab': 'courseware', @@ -340,11 +381,11 @@ def edit_unit(request, location): 'draft_preview_link': preview_lms_link, 'published_preview_link': lms_link, 'subsection': containing_subsection, - 'release_date': get_date_display(datetime.fromtimestamp(time.mktime(containing_subsection.start))) if containing_subsection.start is not None else None, + 'release_date': get_date_display(datetime.fromtimestamp(time.mktime(containing_subsection.lms.start))) if containing_subsection.lms.start is not None else None, 'section': containing_section, 'create_new_unit_template': Location('i4x', 'edx', 'templates', 'vertical', 'Empty'), 'unit_state': unit_state, - 'published_date': published_date, + 'published_date': item.cms.published_date.strftime('%B %d, %Y') if item.cms.published_date is not None else None, }) @@ -409,9 +450,8 @@ def preview_dispatch(request, preview_id, location, dispatch=None): dispatch: The action to execute """ - instance_state, shared_state = load_preview_state(request, preview_id, location) descriptor = modulestore().get_item(location) - instance = load_preview_module(request, preview_id, descriptor, instance_state, shared_state) + instance = load_preview_module(request, preview_id, descriptor) # Let the module handle the AJAX try: ajax_return = instance.handle_ajax(dispatch, request.POST) @@ -422,46 +462,9 @@ def preview_dispatch(request, preview_id, location, dispatch=None): log.exception("error processing ajax call") raise - save_preview_state(request, preview_id, location, instance.get_instance_state(), instance.get_shared_state()) return HttpResponse(ajax_return) -def load_preview_state(request, preview_id, location): - """ - Load the state of a preview module from the request - - preview_id (str): An identifier specifying which preview this module is used for - location: The Location of the module to dispatch to - """ - if 'preview_states' not in request.session: - request.session['preview_states'] = defaultdict(dict) - - instance_state = request.session['preview_states'][preview_id, location].get('instance') - shared_state = request.session['preview_states'][preview_id, location].get('shared') - - return instance_state, shared_state - - -def save_preview_state(request, preview_id, location, instance_state, shared_state): - """ - Save the state of a preview module to the request - - preview_id (str): An identifier specifying which preview this module is used for - location: The Location of the module to dispatch to - instance_state: The instance state to save - shared_state: The shared state to save - """ - if 'preview_states' not in request.session: - request.session['preview_states'] = defaultdict(dict) - - # request.session doesn't notice indirect changes; so, must set its dict w/ every change to get - # it to persist: http://www.djangobook.com/en/2.0/chapter14.html - preview_states = request.session['preview_states'] - preview_states[preview_id, location]['instance'] = instance_state - preview_states[preview_id, location]['shared'] = shared_state - request.session['preview_states'] = preview_states # make session mgmt notice the update - - def render_from_lms(template_name, dictionary, context=None, namespace='main'): """ Render a template using the LMS MAKO_TEMPLATES @@ -469,6 +472,33 @@ def render_from_lms(template_name, dictionary, context=None, namespace='main'): return render_to_string(template_name, dictionary, context, namespace="lms." + namespace) +class SessionKeyValueStore(KeyValueStore): + def __init__(self, request, model_data): + self._model_data = model_data + self._session = request.session + + def get(self, key): + try: + return self._model_data[key.field_name] + except (KeyError, InvalidScopeError): + return self._session[tuple(key)] + + def set(self, key, value): + try: + self._model_data[key.field_name] = value + except (KeyError, InvalidScopeError): + self._session[tuple(key)] = value + + def delete(self, key): + try: + del self._model_data[key.field_name] + except (KeyError, InvalidScopeError): + del self._session[tuple(key)] + + def has(self, key): + return key in self._model_data or key in self._session + + def preview_module_system(request, preview_id, descriptor): """ Returns a ModuleSystem for the specified descriptor that is specialized for @@ -479,6 +509,14 @@ def preview_module_system(request, preview_id, descriptor): descriptor: An XModuleDescriptor """ + def preview_model_data(descriptor): + return DbModel( + SessionKeyValueStore(request, descriptor._model_data), + descriptor.module_class, + preview_id, + MongoUsage(preview_id, descriptor.location.url()), + ) + return ModuleSystem( ajax_url=reverse('preview_dispatch', args=[preview_id, descriptor.location.url(), '']).rstrip('/'), # TODO (cpennington): Do we want to track how instructors are using the preview problems? @@ -489,6 +527,7 @@ def preview_module_system(request, preview_id, descriptor): debug=True, replace_urls=partial(static_replace.replace_static_urls, data_directory=None, course_namespace=descriptor.location), user=request.user, + xblock_model_data=preview_model_data, ) @@ -501,11 +540,11 @@ def get_preview_module(request, preview_id, descriptor): preview_id (str): An identifier specifying which preview this module is used for location: A Location """ - instance_state, shared_state = descriptor.get_sample_state()[0] - return load_preview_module(request, preview_id, descriptor, instance_state, shared_state) + + return load_preview_module(request, preview_id, descriptor) -def load_preview_module(request, preview_id, descriptor, instance_state, shared_state): +def load_preview_module(request, preview_id, descriptor): """ Return a preview XModule instantiated from the supplied descriptor, instance_state, and shared_state @@ -517,12 +556,13 @@ def load_preview_module(request, preview_id, descriptor, instance_state, shared_ """ system = preview_module_system(request, preview_id, descriptor) try: - module = descriptor.xmodule_constructor(system)(instance_state, shared_state) + module = descriptor.xmodule(system) except: + log.debug("Unable to load preview module", exc_info=True) module = ErrorDescriptor.from_descriptor( descriptor, error_msg=exc_info_to_str(sys.exc_info()) - ).xmodule_constructor(system)(None, None) + ).xmodule(system) # cdodge: Special case if module.location.category == 'static_tab': @@ -540,11 +580,9 @@ def load_preview_module(request, preview_id, descriptor, instance_state, shared_ module.get_html = replace_static_urls( module.get_html, - module.metadata.get('data_dir', module.location.course), + getattr(module, 'data_dir', module.location.course), course_namespace=Location([module.location.tag, module.location.org, module.location.course, None, None]) ) - save_preview_state(request, preview_id, descriptor.location.url(), - module.get_instance_state(), module.get_shared_state()) return module @@ -558,7 +596,7 @@ def get_module_previews(request, descriptor): """ preview_html = [] for idx, (instance_state, shared_state) in enumerate(descriptor.get_sample_state()): - module = load_preview_module(request, str(idx), descriptor, instance_state, shared_state) + module = load_preview_module(request, str(idx), descriptor) preview_html.append(module.get_html()) return preview_html @@ -605,6 +643,19 @@ def delete_item(request): if item.location.revision is None and item.location.category == 'vertical' and delete_all_versions: modulestore('direct').delete_item(item.location) + # cdodge: we need to remove our parent's pointer to us so that it is no longer dangling + if delete_all_versions: + parent_locs = modulestore('direct').get_parent_locations(item_loc, None) + + for parent_loc in parent_locs: + parent = modulestore('direct').get_item(parent_loc) + item_url = item_loc.url() + if item_url in parent.children: + children = parent.children + children.remove(item_url) + parent.children = children + modulestore('direct').update_children(parent.location, parent.children) + return HttpResponse() @@ -642,7 +693,7 @@ def save_item(request): # update existing metadata with submitted metadata (which can be partial) # IMPORTANT NOTE: if the client passed pack 'null' (None) for a piece of metadata that means 'remove it' - for metadata_key in posted_metadata.keys(): + for metadata_key, value in posted_metadata.items(): # let's strip out any metadata fields from the postback which have been identified as system metadata # and therefore should not be user-editable, so we should accept them back from the client @@ -650,15 +701,15 @@ def save_item(request): del posted_metadata[metadata_key] elif posted_metadata[metadata_key] is None: # remove both from passed in collection as well as the collection read in from the modulestore - if metadata_key in existing_item.metadata: - del existing_item.metadata[metadata_key] + if metadata_key in existing_item._model_data: + del existing_item._model_data[metadata_key] del posted_metadata[metadata_key] - - # overlay the new metadata over the modulestore sourced collection to support partial updates - existing_item.metadata.update(posted_metadata) + else: + existing_item._model_data[metadata_key] = value # commit to datastore - store.update_metadata(item_location, existing_item.metadata) + # TODO (cpennington): This really shouldn't have to do this much reaching in to get the metadata + store.update_metadata(item_location, own_metadata(existing_item)) return HttpResponse() @@ -725,22 +776,18 @@ def clone_item(request): new_item = get_modulestore(template).clone_item(template, dest_location) - # TODO: This needs to be deleted when we have proper storage for static content - new_item.metadata['data_dir'] = parent.metadata['data_dir'] - # replace the display name with an optional parameter passed in from the caller if display_name is not None: - new_item.metadata['display_name'] = display_name + new_item.display_name = display_name - get_modulestore(template).update_metadata(new_item.location.url(), new_item.own_metadata) + get_modulestore(template).update_metadata(new_item.location.url(), own_metadata(new_item)) if new_item.location.category not in DETACHED_CATEGORIES: - get_modulestore(parent.location).update_children(parent_location, parent.definition.get('children', []) + [new_item.location.url()]) + get_modulestore(parent.location).update_children(parent_location, parent.children + [new_item.location.url()]) return HttpResponse(json.dumps({'id': dest_location.url()})) -#@login_required -#@ensure_csrf_cookie + def upload_asset(request, org, course, coursename): ''' cdodge: this method allows for POST uploading of files into the course asset library, which will @@ -802,6 +849,7 @@ def upload_asset(request, org, course, coursename): response['asset_url'] = StaticContent.get_url_path_from_location(content.location) return response + ''' This view will return all CMS users who are editors for the specified course ''' @@ -834,6 +882,7 @@ def create_json_response(errmsg = None): return resp + ''' This POST-back view will add a user - specified by email - to the list of editors for the specified course @@ -866,6 +915,7 @@ def add_user(request, location): return create_json_response() + ''' This POST-back view will remove a user - specified by email - from the list of editors for the specified course @@ -952,7 +1002,7 @@ def reorder_static_tabs(request): for tab in course.tabs: if tab['type'] == 'static_tab': reordered_tabs.append({'type': 'static_tab', - 'name': tab_items[static_tab_idx].metadata.get('display_name'), + 'name': tab_items[static_tab_idx].display_name, 'url_slug': tab_items[static_tab_idx].location.name}) static_tab_idx += 1 else: @@ -961,7 +1011,7 @@ def reorder_static_tabs(request): # OK, re-assemble the static tabs in the new order course.tabs = reordered_tabs - modulestore('direct').update_metadata(course.location, course.metadata) + modulestore('direct').update_metadata(course.location, own_metadata(course)) return HttpResponse() @@ -1124,14 +1174,18 @@ def get_course_settings(request, org, course, name): raise PermissionDenied() course_module = modulestore().get_item(location) - course_details = CourseDetails.fetch(location) return render_to_response('settings.html', { 'context_course': course_module, - 'course_location' : location, - 'course_details' : json.dumps(course_details, cls=CourseSettingsEncoder) + 'course_location': location, + 'details_url': reverse(course_settings_updates, + kwargs={"org": org, + "course": course, + "name": name, + "section": "details"}) }) + @login_required @ensure_csrf_cookie def course_config_graders_page(request, org, course, name): @@ -1156,6 +1210,29 @@ def course_config_graders_page(request, org, course, name): }) +@login_required +@ensure_csrf_cookie +def course_config_advanced_page(request, org, course, name): + """ + Send models and views as well as html for editing the advanced course settings to the client. + + org, course, name: Attributes of the Location for the item to edit + """ + location = ['i4x', org, course, 'course', name] + + # check that logged in user has permissions to this item + if not has_access(request.user, location): + raise PermissionDenied() + + course_module = modulestore().get_item(location) + + return render_to_response('settings_advanced.html', { + 'context_course': course_module, + 'course_location' : location, + 'advanced_dict' : json.dumps(CourseMetadata.fetch(location)), + }) + + @expect_json @login_required @ensure_csrf_cookie @@ -1223,6 +1300,37 @@ def course_grader_updates(request, org, course, name, grader_index=None): mimetype="application/json") +## NB: expect_json failed on ["key", "key2"] and json payload +@login_required +@ensure_csrf_cookie +def course_advanced_updates(request, org, course, name): + """ + restful CRUD operations on metadata. The payload is a json rep of the metadata dicts. For delete, otoh, + the payload is either a key or a list of keys to delete. + + org, course: Attributes of the Location for the item to edit + """ + location = ['i4x', org, course, 'course', name] + + # check that logged in user has permissions to this item + if not has_access(request.user, location): + raise PermissionDenied() + + # NB: we're setting Backbone.emulateHTTP to true on the client so everything comes as a post!!! + if request.method == 'POST' and 'HTTP_X_HTTP_METHOD_OVERRIDE' in request.META: + real_method = request.META['HTTP_X_HTTP_METHOD_OVERRIDE'] + else: + real_method = request.method + + if real_method == 'GET': + return HttpResponse(json.dumps(CourseMetadata.fetch(location)), mimetype="application/json") + elif real_method == 'DELETE': + return HttpResponse(json.dumps(CourseMetadata.delete_key(location, json.loads(request.body))), mimetype="application/json") + elif real_method == 'POST' or real_method == 'PUT': + # NOTE: request.POST is messed up because expect_json cloned_request.POST.copy() is creating a defective entry w/ the whole payload as the key + return HttpResponse(json.dumps(CourseMetadata.update_from_json(location, json.loads(request.body))), mimetype="application/json") + + @login_required @ensure_csrf_cookie def asset_index(request, org, course, name): @@ -1324,13 +1432,10 @@ def create_new_course(request): new_course = modulestore('direct').clone_item(template, dest_location) if display_name is not None: - new_course.metadata['display_name'] = display_name - - # we need a 'data_dir' for legacy reasons - new_course.metadata['data_dir'] = uuid4().hex + new_course.display_name = display_name # set a default start date to now - new_course.metadata['start'] = stringify_time(time.gmtime()) + new_course.start = time.gmtime() initialize_course_tabs(new_course) @@ -1349,12 +1454,12 @@ def initialize_course_tabs(course): # This logic is repeated in xmodule/modulestore/tests/factories.py # so if you change anything here, you need to also change it there. course.tabs = [{"type": "courseware"}, - {"type": "course_info", "name": "Course Info"}, + {"type": "course_info", "name": "Course Info"}, {"type": "discussion", "name": "Discussion"}, {"type": "wiki", "name": "Wiki"}, {"type": "progress", "name": "Progress"}] - modulestore('direct').update_metadata(course.location.url(), course.own_metadata) + modulestore('direct').update_metadata(course.location.url(), own_metadata(course)) @ensure_csrf_cookie @@ -1493,3 +1598,11 @@ def event(request): console logs don't get distracted :-) ''' return HttpResponse(True) + + +def render_404(request): + return HttpResponseNotFound(render_to_string('404.html', {})) + + +def render_500(request): + return HttpResponseServerError(render_to_string('500.html', {})) diff --git a/cms/djangoapps/models/settings/course_details.py b/cms/djangoapps/models/settings/course_details.py index b27f4e3804..d3cd5fe164 100644 --- a/cms/djangoapps/models/settings/course_details.py +++ b/cms/djangoapps/models/settings/course_details.py @@ -1,13 +1,14 @@ from xmodule.modulestore.django import modulestore from xmodule.modulestore import Location from xmodule.modulestore.exceptions import ItemNotFoundError +from xmodule.modulestore.inheritance import own_metadata import json from json.encoder import JSONEncoder import time from contentstore.utils import get_modulestore from util.converters import jsdate_to_time, time_to_date -from cms.djangoapps.models.settings import course_grading -from cms.djangoapps.contentstore.utils import update_item +from models.settings import course_grading +from contentstore.utils import update_item import re import logging @@ -43,25 +44,25 @@ class CourseDetails(object): temploc = course_location._replace(category='about', name='syllabus') try: - course.syllabus = get_modulestore(temploc).get_item(temploc).definition['data'] + course.syllabus = get_modulestore(temploc).get_item(temploc).data except ItemNotFoundError: pass temploc = temploc._replace(name='overview') try: - course.overview = get_modulestore(temploc).get_item(temploc).definition['data'] + course.overview = get_modulestore(temploc).get_item(temploc).data except ItemNotFoundError: pass temploc = temploc._replace(name='effort') try: - course.effort = get_modulestore(temploc).get_item(temploc).definition['data'] + course.effort = get_modulestore(temploc).get_item(temploc).data except ItemNotFoundError: pass temploc = temploc._replace(name='video') try: - raw_video = get_modulestore(temploc).get_item(temploc).definition['data'] + raw_video = get_modulestore(temploc).get_item(temploc).data course.intro_video = CourseDetails.parse_video_tag(raw_video) except ItemNotFoundError: pass @@ -116,7 +117,7 @@ class CourseDetails(object): descriptor.enrollment_end = converted if dirty: - get_modulestore(course_location).update_metadata(course_location, descriptor.metadata) + get_modulestore(course_location).update_metadata(course_location, own_metadata(descriptor)) # NOTE: below auto writes to the db w/o verifying that any of the fields actually changed # to make faster, could compare against db or could have client send over a list of which fields changed. @@ -133,7 +134,6 @@ class CourseDetails(object): recomposed_video_tag = CourseDetails.recompose_video_tag(jsondict['intro_video']) update_item(temploc, recomposed_video_tag) - # Could just generate and return a course obj w/o doing any db reads, but I put the reads in as a means to confirm # it persisted correctly return CourseDetails.fetch(course_location) diff --git a/cms/djangoapps/models/settings/course_grading.py b/cms/djangoapps/models/settings/course_grading.py index 3d0b8f78af..b20fb71f66 100644 --- a/cms/djangoapps/models/settings/course_grading.py +++ b/cms/djangoapps/models/settings/course_grading.py @@ -2,6 +2,7 @@ from xmodule.modulestore import Location from contentstore.utils import get_modulestore import re from util import converters +from datetime import timedelta class CourseGradingModel(object): @@ -91,7 +92,7 @@ class CourseGradingModel(object): descriptor.raw_grader = graders_parsed descriptor.grade_cutoffs = jsondict['grade_cutoffs'] - get_modulestore(course_location).update_item(course_location, descriptor.definition['data']) + get_modulestore(course_location).update_item(course_location, descriptor._model_data._kvs._data) CourseGradingModel.update_grace_period_from_json(course_location, jsondict['grace_period']) return CourseGradingModel.fetch(course_location) @@ -119,7 +120,7 @@ class CourseGradingModel(object): else: descriptor.raw_grader.append(grader) - get_modulestore(course_location).update_item(course_location, descriptor.definition['data']) + get_modulestore(course_location).update_item(course_location, descriptor._model_data._kvs._data) return CourseGradingModel.jsonize_grader(index, descriptor.raw_grader[index]) @@ -134,7 +135,7 @@ class CourseGradingModel(object): descriptor = get_modulestore(course_location).get_item(course_location) descriptor.grade_cutoffs = cutoffs - get_modulestore(course_location).update_item(course_location, descriptor.definition['data']) + get_modulestore(course_location).update_item(course_location, descriptor._model_data._kvs._data) return cutoffs @@ -156,11 +157,11 @@ class CourseGradingModel(object): graceperiodjson = graceperiodjson['grace_period'] # lms requires these to be in a fixed order - grace_rep = "{0[hours]:d} hours {0[minutes]:d} minutes {0[seconds]:d} seconds".format(graceperiodjson) + grace_timedelta = timedelta(**graceperiodjson) descriptor = get_modulestore(course_location).get_item(course_location) - descriptor.metadata['graceperiod'] = grace_rep - get_modulestore(course_location).update_metadata(course_location, descriptor.metadata) + descriptor.lms.graceperiod = grace_timedelta + get_modulestore(course_location).update_metadata(course_location, descriptor._model_data._kvs._metadata) @staticmethod def delete_grader(course_location, index): @@ -176,7 +177,7 @@ class CourseGradingModel(object): del descriptor.raw_grader[index] # force propagation to definition descriptor.raw_grader = descriptor.raw_grader - get_modulestore(course_location).update_item(course_location, descriptor.definition['data']) + get_modulestore(course_location).update_item(course_location, descriptor._model_data._kvs._data) # NOTE cannot delete cutoffs. May be useful to reset @staticmethod @@ -189,7 +190,7 @@ class CourseGradingModel(object): descriptor = get_modulestore(course_location).get_item(course_location) descriptor.grade_cutoffs = descriptor.defaut_grading_policy['GRADE_CUTOFFS'] - get_modulestore(course_location).update_item(course_location, descriptor.definition['data']) + get_modulestore(course_location).update_item(course_location, descriptor._model_data._kvs._data) return descriptor.grade_cutoffs @@ -202,8 +203,8 @@ class CourseGradingModel(object): course_location = Location(course_location) descriptor = get_modulestore(course_location).get_item(course_location) - if 'graceperiod' in descriptor.metadata: del descriptor.metadata['graceperiod'] - get_modulestore(course_location).update_metadata(course_location, descriptor.metadata) + del descriptor.lms.graceperiod + get_modulestore(course_location).update_metadata(course_location, descriptor._model_data._kvs._metadata) @staticmethod def get_section_grader_type(location): @@ -212,7 +213,7 @@ class CourseGradingModel(object): descriptor = get_modulestore(location).get_item(location) return { - "graderType": descriptor.metadata.get('format', u"Not Graded"), + "graderType": descriptor.lms.format if descriptor.lms.format is not None else 'Not Graded', "location": location, "id": 99 # just an arbitrary value to } @@ -224,23 +225,41 @@ class CourseGradingModel(object): descriptor = get_modulestore(location).get_item(location) if 'graderType' in jsondict and jsondict['graderType'] != u"Not Graded": - descriptor.metadata['format'] = jsondict.get('graderType') - descriptor.metadata['graded'] = True + descriptor.lms.format = jsondict.get('graderType') + descriptor.lms.graded = True else: - if 'format' in descriptor.metadata: del descriptor.metadata['format'] - if 'graded' in descriptor.metadata: del descriptor.metadata['graded'] + del descriptor.lms.format + del descriptor.lms.graded - get_modulestore(location).update_metadata(location, descriptor.metadata) + get_modulestore(location).update_metadata(location, descriptor._model_data._kvs._metadata) @staticmethod def convert_set_grace_period(descriptor): - # 5 hours 59 minutes 59 seconds => { hours: 5, minutes : 59, seconds : 59} - rawgrace = descriptor.metadata.get('graceperiod', None) + # 5 hours 59 minutes 59 seconds => converted to iso format + rawgrace = descriptor.lms.graceperiod if rawgrace: - parsedgrace = {str(key): int(val) for (val, key) in re.findall('\s*(\d+)\s*(\w+)', rawgrace)} - return parsedgrace - else: return None + hours_from_days = rawgrace.days*24 + seconds = rawgrace.seconds + hours_from_seconds = int(seconds / 3600) + hours = hours_from_days + hours_from_seconds + seconds -= hours_from_seconds * 3600 + minutes = int(seconds / 60) + seconds -= minutes * 60 + + graceperiod = {'hours': 0, 'minutes': 0, 'seconds': 0} + if hours > 0: + graceperiod['hours'] = hours + + if minutes > 0: + graceperiod['minutes'] = minutes + + if seconds > 0: + graceperiod['seconds'] = seconds + + return graceperiod + else: + return None @staticmethod def parse_grader(json_grader): diff --git a/cms/djangoapps/models/settings/course_metadata.py b/cms/djangoapps/models/settings/course_metadata.py new file mode 100644 index 0000000000..ed11a6d7a4 --- /dev/null +++ b/cms/djangoapps/models/settings/course_metadata.py @@ -0,0 +1,81 @@ +from xmodule.modulestore import Location +from contentstore.utils import get_modulestore +from xmodule.x_module import XModuleDescriptor +from xmodule.modulestore.inheritance import own_metadata +from xblock.core import Scope + + +class CourseMetadata(object): + ''' + For CRUD operations on metadata fields which do not have specific editors on the other pages including any user generated ones. + The objects have no predefined attrs but instead are obj encodings of the editable metadata. + ''' + FILTERED_LIST = XModuleDescriptor.system_metadata_fields + ['start', 'end', 'enrollment_start', 'enrollment_end', 'tabs', 'graceperiod'] + + @classmethod + def fetch(cls, course_location): + """ + Fetch the key:value editable course details for the given course from persistence and return a CourseMetadata model. + """ + if not isinstance(course_location, Location): + course_location = Location(course_location) + + course = {} + + descriptor = get_modulestore(course_location).get_item(course_location) + + for field in descriptor.fields + descriptor.lms.fields: + if field.scope != Scope.settings: + continue + + if field.name not in cls.FILTERED_LIST: + course[field.name] = field.read_from(descriptor) + + return course + + @classmethod + def update_from_json(cls, course_location, jsondict): + """ + Decode the json into CourseMetadata and save any changed attrs to the db. + + Ensures none of the fields are in the blacklist. + """ + descriptor = get_modulestore(course_location).get_item(course_location) + + dirty = False + + for k, v in jsondict.iteritems(): + # should it be an error if one of the filtered list items is in the payload? + if k in cls.FILTERED_LIST: + continue + + if hasattr(descriptor, k) and getattr(descriptor, k) != v: + dirty = True + setattr(descriptor, k, v) + elif hasattr(descriptor.lms, k) and getattr(descriptor.lms, k) != k: + dirty = True + setattr(descriptor.lms, k, v) + + if dirty: + get_modulestore(course_location).update_metadata(course_location, own_metadata(descriptor)) + + # Could just generate and return a course obj w/o doing any db reads, but I put the reads in as a means to confirm + # it persisted correctly + return cls.fetch(course_location) + + @classmethod + def delete_key(cls, course_location, payload): + ''' + Remove the given metadata key(s) from the course. payload can be a single key or [key..] + ''' + descriptor = get_modulestore(course_location).get_item(course_location) + + for key in payload['deleteKeys']: + if hasattr(descriptor, key): + delattr(descriptor, key) + elif hasattr(descriptor.lms, key): + delattr(descriptor.lms, key) + + get_modulestore(course_location).update_metadata(course_location, own_metadata(descriptor)) + + return cls.fetch(course_location) diff --git a/cms/envs/aws.py b/cms/envs/aws.py index a147f84531..be7816d21f 100644 --- a/cms/envs/aws.py +++ b/cms/envs/aws.py @@ -62,3 +62,6 @@ AWS_SECRET_ACCESS_KEY = AUTH_TOKENS["AWS_SECRET_ACCESS_KEY"] DATABASES = AUTH_TOKENS['DATABASES'] MODULESTORE = AUTH_TOKENS['MODULESTORE'] CONTENTSTORE = AUTH_TOKENS['CONTENTSTORE'] + +# Datadog for events! +DATADOG_API = AUTH_TOKENS.get("DATADOG_API") \ No newline at end of file diff --git a/cms/envs/common.py b/cms/envs/common.py index 281dd97f20..a83f61d8f9 100644 --- a/cms/envs/common.py +++ b/cms/envs/common.py @@ -20,7 +20,6 @@ Longer TODO: """ import sys -import tempfile import os.path import os import lms.envs.common @@ -59,7 +58,8 @@ sys.path.append(COMMON_ROOT / 'lib') ############################# WEB CONFIGURATION ############################# # This is where we stick our compiled template files. -MAKO_MODULE_DIR = tempfile.mkdtemp('mako') +from tempdir import mkdtemp_clean +MAKO_MODULE_DIR = mkdtemp_clean('mako') MAKO_TEMPLATES = {} MAKO_TEMPLATES['main'] = [ PROJECT_ROOT / 'templates', @@ -172,6 +172,9 @@ LANGUAGE_CODE = 'en' # http://www.i18nguy.com/unicode/language-identi USE_I18N = True USE_L10N = True +# Tracking +TRACK_MAX_EVENT = 10000 + # Messages MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage' @@ -275,6 +278,10 @@ INSTALLED_APPS = ( 'auth', 'student', # misleading name due to sharing with lms 'course_groups', # not used in cms (yet), but tests run + + # tracking + 'track', + # For asset pipelining 'pipeline', 'staticfiles', diff --git a/cms/envs/dev.py b/cms/envs/dev.py index 3dee93a398..f70f22512e 100644 --- a/cms/envs/dev.py +++ b/cms/envs/dev.py @@ -4,9 +4,6 @@ This config file runs the simplest dev environment""" from .common import * from logsettings import get_logger_config -import logging -import sys - DEBUG = True TEMPLATE_DEBUG = DEBUG LOGGING = get_logger_config(ENV_ROOT / "log", @@ -99,6 +96,13 @@ CACHES = { 'KEY_PREFIX': 'general', 'VERSION': 4, 'KEY_FUNCTION': 'util.memcache.safe_key', + }, + + 'mongo_metadata_inheritance': { + 'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache', + 'LOCATION': '/var/tmp/mongo_metadata_inheritance', + 'TIMEOUT': 300, + 'KEY_FUNCTION': 'util.memcache.safe_key', } } @@ -107,3 +111,36 @@ CACHE_TIMEOUT = 0 # Dummy secret key for dev SECRET_KEY = '85920908f28904ed733fe576320db18cabd7b6cd' + +################################ DEBUG TOOLBAR ################################# +INSTALLED_APPS += ('debug_toolbar', 'debug_toolbar_mongo') +MIDDLEWARE_CLASSES += ('django_comment_client.utils.QueryCountDebugMiddleware', + 'debug_toolbar.middleware.DebugToolbarMiddleware',) +INTERNAL_IPS = ('127.0.0.1',) + +DEBUG_TOOLBAR_PANELS = ( + 'debug_toolbar.panels.version.VersionDebugPanel', + 'debug_toolbar.panels.timer.TimerDebugPanel', + 'debug_toolbar.panels.settings_vars.SettingsVarsDebugPanel', + 'debug_toolbar.panels.headers.HeaderDebugPanel', + 'debug_toolbar.panels.request_vars.RequestVarsDebugPanel', + 'debug_toolbar.panels.sql.SQLDebugPanel', + 'debug_toolbar.panels.signals.SignalDebugPanel', + 'debug_toolbar.panels.logger.LoggingPanel', +# This is breaking Mongo updates-- Christina is investigating. +# 'debug_toolbar_mongo.panel.MongoDebugPanel', + + # Enabling the profiler has a weird bug as of django-debug-toolbar==0.9.4 and + # Django=1.3.1/1.4 where requests to views get duplicated (your method gets + # hit twice). So you can uncomment when you need to diagnose performance + # problems, but you shouldn't leave it on. + # 'debug_toolbar.panels.profiling.ProfilingDebugPanel', + ) + +DEBUG_TOOLBAR_CONFIG = { + 'INTERCEPT_REDIRECTS': False +} + +# To see stacktraces for MongoDB queries, set this to True. +# Stacktraces slow down page loads drastically (for pages with lots of queries). +# DEBUG_TOOLBAR_MONGO_STACKTRACES = False diff --git a/cms/envs/test.py b/cms/envs/test.py index 7f39e6818b..d7992cb471 100644 --- a/cms/envs/test.py +++ b/cms/envs/test.py @@ -27,6 +27,9 @@ STATIC_ROOT = TEST_ROOT / "staticfiles" GITHUB_REPO_ROOT = TEST_ROOT / "data" COMMON_TEST_DATA_ROOT = COMMON_ROOT / "test" / "data" +# Makes the tests run much faster... +SOUTH_TESTS_MIGRATE = False # To disable migrations and use syncdb instead + # TODO (cpennington): We need to figure out how envs/test.py can inject things into common.py so that we don't have to repeat this sort of thing STATICFILES_DIRS = [ COMMON_ROOT / "static", @@ -95,6 +98,13 @@ CACHES = { 'KEY_PREFIX': 'general', 'VERSION': 4, 'KEY_FUNCTION': 'util.memcache.safe_key', + }, + + 'mongo_metadata_inheritance': { + 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', + 'LOCATION': '/var/tmp/mongo_metadata_inheritance', + 'TIMEOUT': 300, + 'KEY_FUNCTION': 'util.memcache.safe_key', } } diff --git a/cms/one_time_startup.py b/cms/one_time_startup.py new file mode 100644 index 0000000000..38a2fef847 --- /dev/null +++ b/cms/one_time_startup.py @@ -0,0 +1,14 @@ +from dogapi import dog_http_api, dog_stats_api +from django.conf import settings +from xmodule.modulestore.django import modulestore + +from django.core.cache import get_cache, InvalidCacheBackendError + +cache = get_cache('mongo_metadata_inheritance') +for store_name in settings.MODULESTORE: + store = modulestore(store_name) + store.metadata_inheritance_cache = cache + +if hasattr(settings, 'DATADOG_API'): + dog_http_api.api_key = settings.DATADOG_API + dog_stats_api.start(api_key=settings.DATADOG_API, statsd=True) diff --git a/cms/static/client_templates/advanced_entry.html b/cms/static/client_templates/advanced_entry.html new file mode 100644 index 0000000000..6be22e2116 --- /dev/null +++ b/cms/static/client_templates/advanced_entry.html @@ -0,0 +1,11 @@ +
              1. +
                + + +
                + +
                + + +
                +
              2. \ No newline at end of file diff --git a/cms/static/coffee/files.json b/cms/static/coffee/files.json index 2249813b04..e7a66b5bc0 100644 --- a/cms/static/coffee/files.json +++ b/cms/static/coffee/files.json @@ -1,12 +1,12 @@ { - "js_files": [ - "/static/js/vendor/RequireJS.js", - "/static/js/vendor/jquery.min.js", - "/static/js/vendor/jquery-ui.min.js", - "/static/js/vendor/jquery.ui.draggable.js", - "/static/js/vendor/jquery.cookie.js", - "/static/js/vendor/json2.js", - "/static/js/vendor/underscore-min.js", - "/static/js/vendor/backbone-min.js" + "static_files": [ + "js/vendor/RequireJS.js", + "js/vendor/jquery.min.js", + "js/vendor/jquery-ui.min.js", + "js/vendor/jquery.ui.draggable.js", + "js/vendor/jquery.cookie.js", + "js/vendor/json2.js", + "js/vendor/underscore-min.js", + "js/vendor/backbone-min.js" ] } diff --git a/cms/static/coffee/src/views/tabs.coffee b/cms/static/coffee/src/views/tabs.coffee index 5a826c1794..9fbe4e5789 100644 --- a/cms/static/coffee/src/views/tabs.coffee +++ b/cms/static/coffee/src/views/tabs.coffee @@ -1,6 +1,4 @@ class CMS.Views.TabsEdit extends Backbone.View - events: - 'click .new-tab': 'addNewTab' initialize: => @$('.component').each((idx, element) => @@ -13,6 +11,7 @@ class CMS.Views.TabsEdit extends Backbone.View ) ) + @options.mast.find('.new-tab').on('click', @addNewTab) @$('.components').sortable( handle: '.drag-handle' update: @tabMoved diff --git a/cms/static/img/large-advanced-icon.png b/cms/static/img/large-advanced-icon.png new file mode 100644 index 0000000000..c6a19ea5a9 Binary files /dev/null and b/cms/static/img/large-advanced-icon.png differ diff --git a/cms/static/img/large-annotations-icon.png b/cms/static/img/large-annotations-icon.png new file mode 100644 index 0000000000..249193521f Binary files /dev/null and b/cms/static/img/large-annotations-icon.png differ diff --git a/cms/static/img/large-openended-icon.png b/cms/static/img/large-openended-icon.png new file mode 100644 index 0000000000..4d31815413 Binary files /dev/null and b/cms/static/img/large-openended-icon.png differ diff --git a/cms/static/img/preview-lms-staticpages.png b/cms/static/img/preview-lms-staticpages.png new file mode 100644 index 0000000000..05a62f7c7f Binary files /dev/null and b/cms/static/img/preview-lms-staticpages.png differ diff --git a/cms/static/js/base.js b/cms/static/js/base.js index f9a3f9e80d..d8b32cb0e8 100644 --- a/cms/static/js/base.js +++ b/cms/static/js/base.js @@ -43,6 +43,12 @@ $(document).ready(function () { $('body').addClass('js'); + // lean/simple modal + $('a[rel*=modal]').leanModal({overlay : 0.80, closeButton: '.action-modal-close' }); + $('a.action-modal-close').click(function(e){ + (e).preventDefault(); + }); + // nav - dropdown related $body.click(function (e) { $('.nav-dropdown .nav-item .wrapper-nav-sub').removeClass('is-shown'); @@ -638,7 +644,7 @@ function addNewCourse(e) { $(e.target).hide(); var $newCourse = $($('#new-course-template').html()); var $cancelButton = $newCourse.find('.new-course-cancel'); - $('.new-course-button').after($newCourse); + $('.inner-wrapper').prepend($newCourse); $newCourse.find('.new-course-name').focus().select(); $newCourse.find('form').bind('submit', saveNewCourse); $cancelButton.bind('click', cancelNewCourse); @@ -822,4 +828,4 @@ function saveSetSectionScheduleDate(e) { hideModal(); }); -} +} \ No newline at end of file diff --git a/cms/static/js/models/settings/advanced.js b/cms/static/js/models/settings/advanced.js new file mode 100644 index 0000000000..adc259239d --- /dev/null +++ b/cms/static/js/models/settings/advanced.js @@ -0,0 +1,50 @@ +if (!CMS.Models['Settings']) CMS.Models.Settings = {}; + +CMS.Models.Settings.Advanced = Backbone.Model.extend({ + + defaults: { + // the properties are whatever the user types in (in addition to whatever comes originally from the server) + }, + // which keys to send as the deleted keys on next save + deleteKeys : [], + + validate: function (attrs) { + // Keys can no longer be edited. We are currently not validating values. + }, + + save : function (attrs, options) { + // wraps the save call w/ the deletion of the removed keys after we know the saved ones worked + options = options ? _.clone(options) : {}; + // add saveSuccess to the success + var success = options.success; + options.success = function(model, resp, options) { + model.afterSave(model); + if (success) success(model, resp, options); + }; + Backbone.Model.prototype.save.call(this, attrs, options); + }, + + afterSave : function(self) { + // remove deleted attrs + if (!_.isEmpty(self.deleteKeys)) { + // remove the to be deleted keys from the returned model + _.each(self.deleteKeys, function(key) { self.unset(key); }); + // not able to do via backbone since we're not destroying the model + $.ajax({ + url : self.url, + // json to and fro + contentType : "application/json", + dataType : "json", + // delete + type : 'DELETE', + // data + data : JSON.stringify({ deleteKeys : self.deleteKeys}) + }) + .fail(function(hdr, status, error) { CMS.ServerError(self, "Deleting keys:" + status); }) + .done(function(data, status, error) { + // clear deleteKeys on success + self.deleteKeys = []; + }); + } + } +}); diff --git a/cms/static/js/models/settings/course_details.js b/cms/static/js/models/settings/course_details.js index 168cb960be..148df7a325 100644 --- a/cms/static/js/models/settings/course_details.js +++ b/cms/static/js/models/settings/course_details.js @@ -59,19 +59,14 @@ CMS.Models.Settings.CourseDetails = Backbone.Model.extend({ // NOTE don't return empty errors as that will be interpreted as an error state }, - url: function() { - var location = this.get('location'); - return '/' + location.get('org') + "/" + location.get('course') + '/settings-details/' + location.get('name') + '/section/details'; - }, - _videokey_illegal_chars : /[^a-zA-Z0-9_-]/g, save_videosource: function(newsource) { // newsource either is

              3. + - +
                - -
                + +
                - +
                Set a due date @@ -80,9 +80,9 @@

                <% # due date uses it own formatting for stringifying the date. As with capa_module.py, there's a utility module available for us to use - due_date = dateutil.parser.parse(subsection.metadata.get('due')) if 'due' in subsection.metadata else None + due_date = dateutil.parser.parse(subsection.lms.due) if subsection.lms.due else None %> - + Remove due date

                @@ -110,7 +110,7 @@ - + \ No newline at end of file diff --git a/cms/templates/import.html b/cms/templates/import.html index ab06f17787..add31597cd 100644 --- a/cms/templates/import.html +++ b/cms/templates/import.html @@ -6,21 +6,29 @@ <%block name="bodyclass">is-signedin course tools import <%block name="content"> +
                +
                +
                + Tools +

                Course Import

                +
                +
                +
                +
                -

                Please read the documentation before attempting an import!

                Importing a new course will delete all content currently associated with your course and replace it with the contents of the uploaded file.

                -

                File uploads must be gzipped tar files (.tar.gz or .tgz) containing, at a minimum, a course.xml file.

                +

                File uploads must be gzipped tar files (.tar.gz) containing, at a minimum, a course.xml file.

                Please note that if your course has any problems with auto-generated url_name nodes, re-importing your course could cause the loss of student data associated with those problems.

                Course to import:

                - Choose File + Choose File

                change

                @@ -37,13 +45,13 @@ <%block name="jsextra"> \ No newline at end of file diff --git a/cms/templates/index.html b/cms/templates/index.html index ed50b8ccb3..fdb46612a0 100644 --- a/cms/templates/index.html +++ b/cms/templates/index.html @@ -33,35 +33,57 @@ <%block name="content"> -
                -
                -

                My Courses

                -
                - % if user.is_active: - % if not disable_course_creation: - New Course - %endif - - % else: -
                -

                - In order to start authoring courses using edX studio, please click on the activation link in your email. -

                -
                - % endif -
                -
                -
                - +
                +
                +
                +

                My Courses

                +
                + + % if user.is_active: + + % endif +
                +
                + +
                +
                +
                +

                Welcome, ${ user.username }. Here are all of the courses you are currently authoring in Studio:

                +
                +
                +
                + +
                +
                +
                + % if user.is_active: + + % else: +
                +

                + In order to start authoring courses using edX Studio, please click on the activation link in your email. +

                +
                + % endif +
                +
                +
                + \ No newline at end of file diff --git a/cms/templates/manage_users.html b/cms/templates/manage_users.html index b424f030ca..722e756203 100644 --- a/cms/templates/manage_users.html +++ b/cms/templates/manage_users.html @@ -4,15 +4,28 @@ <%block name="content"> +
                +
                +
                + Course Settings +

                Course Team

                +
                + + +
                +
                +
                -
                - %if allow_actions: - - New User - - %endif -

                The following list of users have been designated as course staff. This means that these users will have permissions to modify course content. You may add additional course staff below, if you are the course instructor. Please note that they must have already registered and verified their account.

                diff --git a/cms/templates/new_item.html b/cms/templates/new_item.html index 60da39fd2a..45cb157845 100644 --- a/cms/templates/new_item.html +++ b/cms/templates/new_item.html @@ -8,7 +8,7 @@
                ${module_type}
                % for template in module_templates: - ${template.display_name} + ${template.display_name_with_default} % endfor
                diff --git a/cms/templates/overview.html b/cms/templates/overview.html index ca53c456a2..904f654717 100644 --- a/cms/templates/overview.html +++ b/cms/templates/overview.html @@ -32,7 +32,7 @@ window.graderTypes.course_location = new CMS.Models.Location('${parent_location}'); window.graderTypes.reset(${course_graders|n}); } - + $(".gradable-status").each(function(index, ele) { var gradeView = new CMS.Views.OverviewAssignmentGrader({ el : ele, @@ -40,7 +40,7 @@ }); }); }); - + @@ -120,13 +120,33 @@
                +
                +
                +
                + Course Content +

                Course Outline

                +
                + + +
                +
                +
                - -
                +
                % for section in sections:
                @@ -134,16 +154,16 @@

                - ${section.display_name} + ${section.display_name_with_default} - +

                +
                - + - -
                + +
                - +
                - +
                diff --git a/cms/templates/settings.html b/cms/templates/settings.html index 3b10f76afd..b26a17125b 100644 --- a/cms/templates/settings.html +++ b/cms/templates/settings.html @@ -23,36 +23,42 @@ from contentstore import utils <%block name="content"> -
                -
                -
                +
                +
                +
                Settings

                Schedule & Details

                -
                - - +
                +
                +
                +
                +
                @@ -64,17 +70,17 @@ from contentstore import utils
                1. - +
                2. - +
                3. - +
                These are used in your course URL, and cannot be changed @@ -205,7 +211,7 @@ from contentstore import utils
                diff --git a/cms/templates/widgets/metadata-edit.html b/cms/templates/widgets/metadata-edit.html index 590baec3c9..51fe400f88 100644 --- a/cms/templates/widgets/metadata-edit.html +++ b/cms/templates/widgets/metadata-edit.html @@ -1,18 +1,17 @@ -% if metadata: <% import hashlib hlskey = hashlib.md5(module.location.url()).hexdigest() %> -% endif diff --git a/cms/templates/widgets/navigation.html b/cms/templates/widgets/navigation.html deleted file mode 100644 index f7e79bceb3..0000000000 --- a/cms/templates/widgets/navigation.html +++ /dev/null @@ -1,101 +0,0 @@ -
                -
                - - -
                  -
                • -

                  Sort:

                  - -
                • - -
                • -

                  Filter:

                  - - More -
                • -
                • - Hide goals -
                • - -
                -
                - -
                  - % for week in weeks: -
                1. -
                  -

                  ${week.url_name}

                  -
                    - % if 'goals' in week.metadata: - % for goal in week.metadata['goals']: -
                  • ${goal}
                  • - % endfor - % else: -
                  • Please create a learning goal for this week
                  • - % endif -
                  -
                  - -
                    - % for module in week.get_children(): -
                  • - - ${module.display_name} -
                  • - % endfor - <%include file="module-dropdown.html"/> -
                  -
                2. - %endfor -
                - -
                - + Add New Section - - -
                -
                - diff --git a/cms/templates/widgets/sequence-edit.html b/cms/templates/widgets/sequence-edit.html index e9d796784d..c70f2568fa 100644 --- a/cms/templates/widgets/sequence-edit.html +++ b/cms/templates/widgets/sequence-edit.html @@ -40,7 +40,7 @@ ${child.display_name} + data-preview-type="${child.module_class.js_module_name}">${child.display_name_with_default} handle %endfor diff --git a/cms/templates/widgets/source-edit.html b/cms/templates/widgets/source-edit.html index f0922831e1..c7460c9cf7 100644 --- a/cms/templates/widgets/source-edit.html +++ b/cms/templates/widgets/source-edit.html @@ -10,7 +10,7 @@

                High Level Source Editing

                -
                +
                @@ -18,6 +18,9 @@ + + +
                @@ -25,88 +28,148 @@ - diff --git a/cms/templates/widgets/units.html b/cms/templates/widgets/units.html index 8e23b05bf8..5ac05e79eb 100644 --- a/cms/templates/widgets/units.html +++ b/cms/templates/widgets/units.html @@ -22,7 +22,7 @@ This def will enumerate through a passed in subsection and list all of the units
                - ${unit.display_name} + ${unit.display_name_with_default} % if actions:
                @@ -39,7 +39,7 @@ This def will enumerate through a passed in subsection and list all of the units
              - + diff --git a/cms/urls.py b/cms/urls.py index 35b2707241..69ce4a540d 100644 --- a/cms/urls.py +++ b/cms/urls.py @@ -1,5 +1,6 @@ from django.conf import settings from django.conf.urls import patterns, include, url +from . import one_time_startup # Uncomment the next two lines to enable the admin: # from django.contrib import admin @@ -47,6 +48,10 @@ urlpatterns = ('', url(r'^(?P[^/]+)/(?P[^/]+)/settings-grading/(?P[^/]+)$', 'contentstore.views.course_config_graders_page', name='course_settings'), url(r'^(?P[^/]+)/(?P[^/]+)/settings-details/(?P[^/]+)/section/(?P
              [^/]+).*$', 'contentstore.views.course_settings_updates', name='course_settings'), url(r'^(?P[^/]+)/(?P[^/]+)/settings-grading/(?P[^/]+)/(?P.*)$', 'contentstore.views.course_grader_updates', name='course_settings'), + # This is the URL to initially render the course advanced settings. + url(r'^(?P[^/]+)/(?P[^/]+)/settings-advanced/(?P[^/]+)$', 'contentstore.views.course_config_advanced_page', name='course_advanced_settings'), + # This is the URL used by BackBone for updating and re-fetching the model. + url(r'^(?P[^/]+)/(?P[^/]+)/settings-advanced/(?P[^/]+)/update.*$', 'contentstore.views.course_advanced_updates', name='course_advanced_settings_updates'), url(r'^(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/gradeas.*$', 'contentstore.views.assignment_type_update', name='assignment_type_update'), @@ -99,3 +104,9 @@ if settings.ENABLE_JASMINE: urlpatterns = urlpatterns + (url(r'^_jasmine/', include('django_jasmine.urls')),) urlpatterns = patterns(*urlpatterns) + +#Custom error pages +handler404 = 'contentstore.views.render_404' +handler500 = 'contentstore.views.render_500' + + diff --git a/cms/xmodule_namespace.py b/cms/xmodule_namespace.py new file mode 100644 index 0000000000..cad3110574 --- /dev/null +++ b/cms/xmodule_namespace.py @@ -0,0 +1,46 @@ +""" +Namespace defining common fields used by Studio for all blocks +""" + +import datetime + +from xblock.core import Namespace, Boolean, Scope, ModelType, String + + +class StringyBoolean(Boolean): + """ + Reads strings from JSON as booleans. + + If the string is 'true' (case insensitive), then return True, + otherwise False. + + JSON values that aren't strings are returned as is + """ + def from_json(self, value): + if isinstance(value, basestring): + return value.lower() == 'true' + return value + + +class DateTuple(ModelType): + """ + ModelType that stores datetime objects as time tuples + """ + def from_json(self, value): + return datetime.datetime(*value[0:6]) + + def to_json(self, value): + if value is None: + return None + + return list(value.timetuple()) + + +class CmsNamespace(Namespace): + """ + Namespace with fields common to all blocks in Studio + """ + is_draft = Boolean(help="Whether this module is a draft", default=False, scope=Scope.settings) + published_date = DateTuple(help="Date when the module was published", scope=Scope.settings) + published_by = String(help="Id of the user who published this module", scope=Scope.settings) + empty = StringyBoolean(help="Whether this is an empty template", scope=Scope.settings, default=False) diff --git a/common/djangoapps/course_groups/cohorts.py b/common/djangoapps/course_groups/cohorts.py index 155f82e0c7..c362ed4e89 100644 --- a/common/djangoapps/course_groups/cohorts.py +++ b/common/djangoapps/course_groups/cohorts.py @@ -6,6 +6,7 @@ forums, and to the cohort admin views. from django.contrib.auth.models import User from django.http import Http404 import logging +import random from courseware import courses from student.models import get_user_by_username_or_email @@ -65,6 +66,22 @@ def is_commentable_cohorted(course_id, commentable_id): return ans +def get_cohorted_commentables(course_id): + """ + Given a course_id return a list of strings representing cohorted commentables + """ + + course = courses.get_course_by_id(course_id) + + if not course.is_cohorted: + # this is the easy case :) + ans = [] + else: + ans = course.cohorted_discussions + + return ans + + def get_cohort(user, course_id): """ Given a django User and a course_id, return the user's cohort in that @@ -96,9 +113,38 @@ def get_cohort(user, course_id): group_type=CourseUserGroup.COHORT, users__id=user.id) except CourseUserGroup.DoesNotExist: - # TODO: add auto-cohorting logic here once we know what that will be. + # Didn't find the group. We'll go on to create one if needed. + pass + + if not course.auto_cohort: return None + choices = course.auto_cohort_groups + n = len(choices) + if n == 0: + # Nowhere to put user + log.warning("Course %s is auto-cohorted, but there are no" + " auto_cohort_groups specified", + course_id) + return None + + # Put user in a random group, creating it if needed + choice = random.randrange(0, n) + group_name = choices[choice] + + # Victor: we are seeing very strange behavior on prod, where almost all users + # end up in the same group. Log at INFO to try to figure out what's going on. + log.info("DEBUG: adding user {0} to cohort {1}. choice={2}".format( + user, group_name,choice)) + + group, created = CourseUserGroup.objects.get_or_create( + course_id=course_id, + group_type=CourseUserGroup.COHORT, + name=group_name) + + user.course_groups.add(group) + return group + def get_course_cohorts(course_id): """ diff --git a/common/djangoapps/course_groups/tests/tests.py b/common/djangoapps/course_groups/tests/tests.py index b3ad928b39..94d52ff6df 100644 --- a/common/djangoapps/course_groups/tests/tests.py +++ b/common/djangoapps/course_groups/tests/tests.py @@ -6,7 +6,7 @@ from django.test.utils import override_settings from course_groups.models import CourseUserGroup from course_groups.cohorts import (get_cohort, get_course_cohorts, - is_commentable_cohorted) + is_commentable_cohorted, get_cohort_by_name) from xmodule.modulestore.django import modulestore, _MODULESTORES @@ -47,7 +47,10 @@ class TestCohorts(django.test.TestCase): @staticmethod def config_course_cohorts(course, discussions, - cohorted, cohorted_discussions=None): + cohorted, + cohorted_discussions=None, + auto_cohort=None, + auto_cohort_groups=None): """ Given a course with no discussion set up, add the discussions and set the cohort config appropriately. @@ -59,6 +62,9 @@ class TestCohorts(django.test.TestCase): cohorted: bool. cohorted_discussions: optional list of topic names. If specified, converts them to use the same ids as topic names. + auto_cohort: optional bool. + auto_cohort_groups: optional list of strings + (names of groups to put students into). Returns: Nothing -- modifies course in place. @@ -70,13 +76,19 @@ class TestCohorts(django.test.TestCase): "id": to_id(name)}) for name in discussions) - course.metadata["discussion_topics"] = topics + course.discussion_topics = topics d = {"cohorted": cohorted} if cohorted_discussions is not None: d["cohorted_discussions"] = [to_id(name) for name in cohorted_discussions] - course.metadata["cohort_config"] = d + + if auto_cohort is not None: + d["auto_cohort"] = auto_cohort + if auto_cohort_groups is not None: + d["auto_cohort_groups"] = auto_cohort_groups + + course.cohort_config = d def setUp(self): @@ -89,12 +101,9 @@ class TestCohorts(django.test.TestCase): def test_get_cohort(self): - # Need to fix this, but after we're testing on staging. (Looks like - # problem is that when get_cohort internally tries to look up the - # course.id, it fails, even though we loaded it through the modulestore. - - # Proper fix: give all tests a standard modulestore that uses the test - # dir. + """ + Make sure get_cohort() does the right thing when the course is cohorted + """ course = modulestore().get_course("edX/toy/2012_Fall") self.assertEqual(course.id, "edX/toy/2012_Fall") self.assertFalse(course.is_cohorted) @@ -122,6 +131,85 @@ class TestCohorts(django.test.TestCase): self.assertEquals(get_cohort(other_user, course.id), None, "other_user shouldn't have a cohort") + def test_auto_cohorting(self): + """ + Make sure get_cohort() does the right thing when the course is auto_cohorted + """ + course = modulestore().get_course("edX/toy/2012_Fall") + self.assertEqual(course.id, "edX/toy/2012_Fall") + self.assertFalse(course.is_cohorted) + + user1 = User.objects.create(username="test", email="a@b.com") + user2 = User.objects.create(username="test2", email="a2@b.com") + user3 = User.objects.create(username="test3", email="a3@b.com") + + cohort = CourseUserGroup.objects.create(name="TestCohort", + course_id=course.id, + group_type=CourseUserGroup.COHORT) + + # user1 manually added to a cohort + cohort.users.add(user1) + + # Make the course auto cohorted... + self.config_course_cohorts(course, [], cohorted=True, + auto_cohort=True, + auto_cohort_groups=["AutoGroup"]) + + self.assertEquals(get_cohort(user1, course.id).id, cohort.id, + "user1 should stay put") + + self.assertEquals(get_cohort(user2, course.id).name, "AutoGroup", + "user2 should be auto-cohorted") + + # Now make the group list empty + self.config_course_cohorts(course, [], cohorted=True, + auto_cohort=True, + auto_cohort_groups=[]) + + self.assertEquals(get_cohort(user3, course.id), None, + "No groups->no auto-cohorting") + + # Now make it different + self.config_course_cohorts(course, [], cohorted=True, + auto_cohort=True, + auto_cohort_groups=["OtherGroup"]) + + self.assertEquals(get_cohort(user3, course.id).name, "OtherGroup", + "New list->new group") + self.assertEquals(get_cohort(user2, course.id).name, "AutoGroup", + "user2 should still be in originally placed cohort") + + + def test_auto_cohorting_randomization(self): + """ + Make sure get_cohort() randomizes properly. + """ + course = modulestore().get_course("edX/toy/2012_Fall") + self.assertEqual(course.id, "edX/toy/2012_Fall") + self.assertFalse(course.is_cohorted) + + groups = ["group_{0}".format(n) for n in range(5)] + self.config_course_cohorts(course, [], cohorted=True, + auto_cohort=True, + auto_cohort_groups=groups) + + # Assign 100 users to cohorts + for i in range(100): + user = User.objects.create(username="test_{0}".format(i), + email="a@b{0}.com".format(i)) + get_cohort(user, course.id) + + # Now make sure that the assignment was at least vaguely random: + # each cohort should have at least 1, and fewer than 50 students. + # (with 5 groups, probability of 0 users in any group is about + # .8**100= 2.0e-10) + for cohort_name in groups: + cohort = get_cohort_by_name(course.id, cohort_name) + num_users = cohort.users.count() + self.assertGreater(num_users, 1) + self.assertLess(num_users, 50) + + def test_get_course_cohorts(self): course1_id = 'a/b/c' diff --git a/common/djangoapps/heartbeat/views.py b/common/djangoapps/heartbeat/views.py index 956504407b..d7c3a32192 100644 --- a/common/djangoapps/heartbeat/views.py +++ b/common/djangoapps/heartbeat/views.py @@ -2,8 +2,9 @@ import json from datetime import datetime from django.http import HttpResponse from xmodule.modulestore.django import modulestore +from dogapi import dog_stats_api - +@dog_stats_api.timed('edxapp.heartbeat') def heartbeat(request): """ Simple view that a loadbalancer can check to verify that the app is up diff --git a/common/djangoapps/mitxmako/makoloader.py b/common/djangoapps/mitxmako/makoloader.py index 29184299b6..d623e8bcff 100644 --- a/common/djangoapps/mitxmako/makoloader.py +++ b/common/djangoapps/mitxmako/makoloader.py @@ -9,6 +9,7 @@ from django.template.loaders.app_directories import Loader as AppDirectoriesLoad from mitxmako.template import Template import mitxmako.middleware +import tempdir log = logging.getLogger(__name__) @@ -30,7 +31,7 @@ class MakoLoader(object): if module_directory is None: log.warning("For more caching of mako templates, set the MAKO_MODULE_DIR in settings!") - module_directory = tempfile.mkdtemp() + module_directory = tempdir.mkdtemp_clean() self.module_directory = module_directory diff --git a/common/djangoapps/mitxmako/middleware.py b/common/djangoapps/mitxmako/middleware.py index 64cb2e5415..3f66f8cc48 100644 --- a/common/djangoapps/mitxmako/middleware.py +++ b/common/djangoapps/mitxmako/middleware.py @@ -13,7 +13,7 @@ # limitations under the License. from mako.lookup import TemplateLookup -import tempfile +import tempdir from django.template import RequestContext from django.conf import settings @@ -29,7 +29,7 @@ class MakoMiddleware(object): module_directory = getattr(settings, 'MAKO_MODULE_DIR', None) if module_directory is None: - module_directory = tempfile.mkdtemp() + module_directory = tempdir.mkdtemp_clean() for location in template_locations: lookup[location] = TemplateLookup(directories=template_locations[location], diff --git a/common/djangoapps/static_replace/__init__.py b/common/djangoapps/static_replace/__init__.py index fb1f48d143..b73a658c5f 100644 --- a/common/djangoapps/static_replace/__init__.py +++ b/common/djangoapps/static_replace/__init__.py @@ -84,12 +84,19 @@ def replace_static_urls(text, data_directory, course_namespace=None): if rest.endswith('?raw'): return original - # course_namespace is not None, then use studio style urls - if course_namespace is not None and not isinstance(modulestore(), XMLModuleStore): - url = StaticContent.convert_legacy_static_url(rest, course_namespace) # In debug mode, if we can find the url as is, - elif settings.DEBUG and finders.find(rest, True): + if settings.DEBUG and finders.find(rest, True): return original + # if we're running with a MongoBacked store course_namespace is not None, then use studio style urls + elif course_namespace is not None and not isinstance(modulestore(), XMLModuleStore): + # first look in the static file pipeline and see if we are trying to reference + # a piece of static content which is in the mitx repo (e.g. JS associated with an xmodule) + if staticfiles_storage.exists(rest): + url = staticfiles_storage.url(rest) + else: + # if not, then assume it's courseware specific content and then look in the + # Mongo-backed database + url = StaticContent.convert_legacy_static_url(rest, course_namespace) # Otherwise, look the file up in staticfiles_storage, and append the data directory if needed else: course_path = "/".join((data_directory, rest)) diff --git a/common/djangoapps/status/tests.py b/common/djangoapps/status/tests.py index 1695663ac5..bf60017036 100644 --- a/common/djangoapps/status/tests.py +++ b/common/djangoapps/status/tests.py @@ -4,7 +4,7 @@ import os from django.test.utils import override_settings from tempfile import NamedTemporaryFile -from status import get_site_status_msg +from .status import get_site_status_msg # Get a name where we can put test files TMP_FILE = NamedTemporaryFile(delete=False) diff --git a/common/djangoapps/student/management/commands/pearson_transfer.py b/common/djangoapps/student/management/commands/pearson_transfer.py index 5eded6484a..75716c7443 100644 --- a/common/djangoapps/student/management/commands/pearson_transfer.py +++ b/common/djangoapps/student/management/commands/pearson_transfer.py @@ -10,6 +10,7 @@ import paramiko import boto dog_http_api.api_key = settings.DATADOG_API +dog_stats_api.start(api_key=settings.DATADOG_API, statsd=True) class Command(BaseCommand): diff --git a/common/djangoapps/student/management/commands/tests/test_pearson.py b/common/djangoapps/student/management/commands/tests/test_pearson.py index 12969405de..65d628fba0 100644 --- a/common/djangoapps/student/management/commands/tests/test_pearson.py +++ b/common/djangoapps/student/management/commands/tests/test_pearson.py @@ -7,6 +7,7 @@ import logging import os from tempfile import mkdtemp import cStringIO +import shutil import sys from django.test import TestCase @@ -143,23 +144,18 @@ class PearsonTestCase(TestCase): ''' Base class for tests running Pearson-related commands ''' - import_dir = mkdtemp(prefix="import") - export_dir = mkdtemp(prefix="export") def assertErrorContains(self, error_message, expected): self.assertTrue(error_message.find(expected) >= 0, 'error message "{}" did not contain "{}"'.format(error_message, expected)) + def setUp(self): + self.import_dir = mkdtemp(prefix="import") + self.addCleanup(shutil.rmtree, self.import_dir) + self.export_dir = mkdtemp(prefix="export") + self.addCleanup(shutil.rmtree, self.export_dir) + def tearDown(self): - def delete_temp_dir(dirname): - if os.path.exists(dirname): - for filename in os.listdir(dirname): - os.remove(os.path.join(dirname, filename)) - os.rmdir(dirname) - - # clean up after any test data was dumped to temp directory - delete_temp_dir(self.import_dir) - delete_temp_dir(self.export_dir) - + pass # and clean up the database: # TestCenterUser.objects.all().delete() # TestCenterRegistration.objects.all().delete() diff --git a/common/djangoapps/student/views.py b/common/djangoapps/student/views.py index 040dd56c69..7d96cafa28 100644 --- a/common/djangoapps/student/views.py +++ b/common/djangoapps/student/views.py @@ -44,9 +44,8 @@ from collections import namedtuple from courseware.courses import get_courses, sort_by_announcement from courseware.access import has_access -from courseware.models import StudentModuleCache from courseware.views import get_module_for_descriptor, jump_to -from courseware.module_render import get_instance_module +from courseware.model_data import ModelDataCache from statsd import statsd @@ -115,7 +114,7 @@ def get_date_for_press(publish_date): def press(request): json_articles = cache.get("student_press_json_articles") - if json_articles == None: + if json_articles is None: if hasattr(settings, 'RSS_URL'): content = urllib.urlopen(settings.PRESS_URL).read() json_articles = json.loads(content) @@ -316,7 +315,7 @@ def change_enrollment(request): action = request.POST.get("enrollment_action", "") course_id = request.POST.get("course_id", None) - if course_id == None: + if course_id is None: return HttpResponse(json.dumps({'success': False, 'error': 'There was an error receiving the course id.'})) @@ -333,7 +332,7 @@ def change_enrollment(request): if not has_access(user, course, 'enroll'): return {'success': False, 'error': 'enrollment in {} not allowed at this time' - .format(course.display_name)} + .format(course.display_name_with_default)} org, course_num, run = course_id.split("/") statsd.increment("common.student.enrollment", @@ -398,7 +397,7 @@ def login_user(request, error=""): try: login(request, user) if request.POST.get('remember') == 'true': - request.session.set_expiry(None) # or change to 604800 for 7 days + request.session.set_expiry(604800) log.debug("Setting user session to never expire") else: request.session.set_expiry(0) @@ -567,7 +566,7 @@ def create_account(request, post_override=None): try: validate_slug(post_vars['username']) except ValidationError: - js['value'] = "Username should only consist of A-Z and 0-9.".format(field=a) + js['value'] = "Username should only consist of A-Z and 0-9, with no spaces.".format(field=a) js['field'] = 'username' return HttpResponse(json.dumps(js)) @@ -1161,10 +1160,10 @@ def test_center_login(request): if not timelimit_descriptor: log.error("cand {} on exam {} for course {}: descriptor not found for location {}".format(client_candidate_id, exam_series_code, course_id, location)) return HttpResponseRedirect(makeErrorURL(error_url, "missingClientProgram")); - - timelimit_module_cache = StudentModuleCache.cache_for_descriptor_descendents(course_id, testcenteruser.user, + + timelimit_module_cache = StudentModuleCache.cache_for_descriptor_descendents(course_id, testcenteruser.user, timelimit_descriptor, depth=None) - timelimit_module = get_module_for_descriptor(request.user, request, timelimit_descriptor, + timelimit_module = get_module_for_descriptor(request.user, request, timelimit_descriptor, timelimit_module_cache, course_id, position=None) if not timelimit_module.category == 'timelimit': log.error("cand {} on exam {} for course {}: non-timelimit module at location {}".format(client_candidate_id, exam_series_code, course_id, location)) @@ -1189,9 +1188,6 @@ def test_center_login(request): if time_accommodation_code: timelimit_module.accommodation_code = time_accommodation_code - instance_module = get_instance_module(course_id, testcenteruser.user, timelimit_module, timelimit_module_cache) - instance_module.state = timelimit_module.get_instance_state() - instance_module.save() log.info("cand {} on exam {} for course {}: receiving accommodation {}".format(client_candidate_id, exam_series_code, course_id, time_accommodation_code)) # UGLY HACK!!! @@ -1215,7 +1211,7 @@ def _get_news(top=None): "Return the n top news items on settings.RSS_URL" feed_data = cache.get("students_index_rss_feed_data") - if feed_data == None: + if feed_data is None: if hasattr(settings, 'RSS_URL'): feed_data = urllib.urlopen(settings.RSS_URL).read() else: diff --git a/lms/djangoapps/terrain/__init__.py b/common/djangoapps/terrain/__init__.py similarity index 100% rename from lms/djangoapps/terrain/__init__.py rename to common/djangoapps/terrain/__init__.py diff --git a/lms/djangoapps/terrain/browser.py b/common/djangoapps/terrain/browser.py similarity index 80% rename from lms/djangoapps/terrain/browser.py rename to common/djangoapps/terrain/browser.py index 8c2a8ba7a5..6394959532 100644 --- a/lms/djangoapps/terrain/browser.py +++ b/common/djangoapps/terrain/browser.py @@ -3,6 +3,11 @@ from splinter.browser import Browser from logging import getLogger import time +# Let the LMS and CMS do their one-time setup +# For example, setting up mongo caches +from lms import one_time_startup +from cms import one_time_startup + logger = getLogger(__name__) logger.info("Loading the lettuce acceptance testing terrain file...") @@ -13,6 +18,7 @@ from django.core.management import call_command def initial_setup(server): # Launch the browser app (choose one of these below) world.browser = Browser('chrome') + # world.browser = Browser('phantomjs') # world.browser = Browser('firefox') diff --git a/lms/djangoapps/terrain/factories.py b/common/djangoapps/terrain/factories.py similarity index 67% rename from lms/djangoapps/terrain/factories.py rename to common/djangoapps/terrain/factories.py index 896f115df5..c36bf935f1 100644 --- a/lms/djangoapps/terrain/factories.py +++ b/common/djangoapps/terrain/factories.py @@ -1,4 +1,5 @@ from student.models import User, UserProfile, Registration +from django.contrib.auth.models import Group from datetime import datetime from factory import Factory from xmodule.modulestore import Location @@ -6,6 +7,13 @@ from xmodule.modulestore.django import modulestore from time import gmtime from uuid import uuid4 from xmodule.timeparse import stringify_time +from xmodule.modulestore.inheritance import own_metadata + + +class GroupFactory(Factory): + FACTORY_FOR = Group + + name = 'staff_MITx/999/Robot_Super_Course' class UserProfileFactory(Factory): @@ -74,18 +82,17 @@ class XModuleCourseFactory(Factory): # This metadata code was copied from cms/djangoapps/contentstore/views.py if display_name is not None: - new_course.metadata['display_name'] = display_name + new_course.display_name = display_name - new_course.metadata['data_dir'] = uuid4().hex - new_course.metadata['start'] = stringify_time(gmtime()) + new_course.lms.start = gmtime() new_course.tabs = [{"type": "courseware"}, - {"type": "course_info", "name": "Course Info"}, + {"type": "course_info", "name": "Course Info"}, {"type": "discussion", "name": "Discussion"}, {"type": "wiki", "name": "Wiki"}, {"type": "progress", "name": "Progress"}] # Update the data in the mongo datastore - store.update_metadata(new_course.location.url(), new_course.own_metadata) + store.update_metadata(new_course.location.url(), own_metadata(new_course)) return new_course @@ -114,35 +121,59 @@ class XModuleItemFactory(Factory): @classmethod def _create(cls, target_class, *args, **kwargs): """ - kwargs must include parent_location, template. Can contain display_name - target_class is ignored + Uses *kwargs*: + + *parent_location* (required): the location of the parent module + (e.g. the parent course or section) + + *template* (required): the template to create the item from + (e.g. i4x://templates/section/Empty) + + *data* (optional): the data for the item + (e.g. XML problem definition for a problem item) + + *display_name* (optional): the display name of the item + + *metadata* (optional): dictionary of metadata attributes + + *target_class* is ignored """ DETACHED_CATEGORIES = ['about', 'static_tab', 'course_info'] parent_location = Location(kwargs.get('parent_location')) template = Location(kwargs.get('template')) + data = kwargs.get('data') display_name = kwargs.get('display_name') + metadata = kwargs.get('metadata', {}) store = modulestore('direct') # This code was based off that in cms/djangoapps/contentstore/views.py parent = store.get_item(parent_location) - dest_location = parent_location._replace(category=template.category, name=uuid4().hex) + + # If a display name is set, use that + dest_name = display_name.replace(" ", "_") if display_name is not None else uuid4().hex + dest_location = parent_location._replace(category=template.category, + name=dest_name) new_item = store.clone_item(template, dest_location) - # TODO: This needs to be deleted when we have proper storage for static content - new_item.metadata['data_dir'] = parent.metadata['data_dir'] - # replace the display name with an optional parameter passed in from the caller if display_name is not None: - new_item.metadata['display_name'] = display_name + new_item.display_name = display_name - store.update_metadata(new_item.location.url(), new_item.own_metadata) + # Add additional metadata or override current metadata + item_metadata = own_metadata(new_item) + item_metadata.update(metadata) + store.update_metadata(new_item.location.url(), item_metadata) + + # replace the data with the optional *data* parameter + if data is not None: + store.update_item(new_item.location, data) if new_item.location.category not in DETACHED_CATEGORIES: - store.update_children(parent_location, parent.definition.get('children', []) + [new_item.location.url()]) + store.update_children(parent_location, parent.children + [new_item.location.url()]) return new_item diff --git a/lms/djangoapps/terrain/steps.py b/common/djangoapps/terrain/steps.py similarity index 81% rename from lms/djangoapps/terrain/steps.py rename to common/djangoapps/terrain/steps.py index 6b2a813d8d..52eeb23c4a 100644 --- a/lms/djangoapps/terrain/steps.py +++ b/common/djangoapps/terrain/steps.py @@ -1,8 +1,6 @@ from lettuce import world, step -from factories import * -from django.core.management import call_command +from .factories import * from lettuce.django import django_url -from django.conf import settings from django.contrib.auth.models import User from student.models import CourseEnrollment from urllib import quote_plus @@ -21,6 +19,11 @@ def wait(step, seconds): time.sleep(float(seconds)) +@step('I reload the page$') +def reload_the_page(step): + world.browser.reload() + + @step('I (?:visit|access|open) the homepage$') def i_visit_the_homepage(step): world.browser.visit(django_url('/')) @@ -66,6 +69,11 @@ def the_page_title_should_be(step, title): assert_equals(world.browser.title, title) +@step(u'the page title should contain "([^"]*)"$') +def the_page_title_should_contain(step, title): + assert(title in world.browser.title) + + @step('I am a logged in user$') def i_am_logged_in_user(step): create_user('robot') @@ -77,18 +85,6 @@ def i_am_not_logged_in(step): world.browser.cookies.delete() -@step('I am registered for a course$') -def i_am_registered_for_a_course(step): - create_user('robot') - u = User.objects.get(username='robot') - CourseEnrollment.objects.get_or_create(user=u, course_id='MITx/6.002x/2012_Fall') - - -@step('I am registered for course "([^"]*)"$') -def i_am_registered_for_course_by_id(step, course_id): - register_by_course_id(course_id) - - @step('I am staff for course "([^"]*)"$') def i_am_staff_for_course_by_id(step, course_id): register_by_course_id(course_id, True) @@ -106,8 +102,19 @@ def i_am_an_edx_user(step): #### helper functions +@world.absorb +def scroll_to_bottom(): + # Maximize the browser + world.browser.execute_script("window.scrollTo(0, screen.height);") + + @world.absorb def create_user(uname): + + # If the user already exists, don't try to create it again + if len(User.objects.filter(username=uname)) > 0: + return + portal_user = UserFactory.build(username=uname, email=uname + '@edx.org') portal_user.set_password('test') portal_user.save() @@ -125,13 +132,25 @@ def log_in(email, password): world.browser.visit(django_url('/')) world.browser.is_element_present_by_css('header.global', 10) world.browser.click_link_by_href('#login-modal') - login_form = world.browser.find_by_css('form#login_form') + + # Wait for the login dialog to load + # This is complicated by the fact that sometimes a second #login_form + # dialog loads, while the first one remains hidden. + # We give them both time to load, starting with the second one. + world.browser.is_element_present_by_css('section.content-wrapper form#login_form', wait_time=4) + world.browser.is_element_present_by_css('form#login_form', wait_time=2) + + # For some reason, the page sometimes includes two #login_form + # elements, the first of which is not visible. + # To avoid this, we always select the last of the two #login_form dialogs + login_form = world.browser.find_by_css('form#login_form').last + login_form.find_by_name('email').fill(email) login_form.find_by_name('password').fill(password) login_form.find_by_name('submit').click() # wait for the page to redraw - assert world.browser.is_element_present_by_css('.content-wrapper', 10) + assert world.browser.is_element_present_by_css('.content-wrapper', wait_time=10) @world.absorb diff --git a/common/djangoapps/xmodule_modifiers.py b/common/djangoapps/xmodule_modifiers.py index 7b19c27553..d398dfef0d 100644 --- a/common/djangoapps/xmodule_modifiers.py +++ b/common/djangoapps/xmodule_modifiers.py @@ -33,7 +33,7 @@ def wrap_xmodule(get_html, module, template, context=None): def _get_html(): context.update({ 'content': get_html(), - 'display_name': module.metadata.get('display_name') if module.metadata is not None else None, + 'display_name': module.display_name, 'class_': module.__class__.__name__, 'module_name': module.js_module_name }) @@ -108,42 +108,25 @@ def add_histogram(get_html, module, user): histogram = grade_histogram(module_id) render_histogram = len(histogram) > 0 - # TODO (ichuang): Remove after fall 2012 LMS migration done - if settings.MITX_FEATURES.get('ENABLE_LMS_MIGRATION'): - [filepath, filename] = module.definition.get('filename', ['', None]) - osfs = module.system.filestore - if filename is not None and osfs.exists(filename): - # if original, unmangled filename exists then use it (github - # doesn't like symlinks) - filepath = filename - data_dir = osfs.root_path.rsplit('/')[-1] - giturl = module.metadata.get('giturl', 'https://github.com/MITx') - edit_link = "%s/%s/tree/master/%s" % (giturl, data_dir, filepath) - else: - edit_link = False - # Need to define all the variables that are about to be used - giturl = "" - data_dir = "" - source_file = module.metadata.get('source_file', '') # source used to generate the problem XML, eg latex or word + source_file = module.lms.source_file # source used to generate the problem XML, eg latex or word # useful to indicate to staff if problem has been released or not # TODO (ichuang): use _has_access_descriptor.can_load in lms.courseware.access, instead of now>mstart comparison here now = time.gmtime() is_released = "unknown" - mstart = getattr(module.descriptor, 'start') + mstart = module.descriptor.lms.start + if mstart is not None: is_released = "Yes!" if (now > mstart) else "Not yet" - staff_context = {'definition': module.definition.get('data'), - 'metadata': json.dumps(module.metadata, indent=4), + staff_context = {'fields': [(field.name, getattr(module, field.name)) for field in module.fields], + 'lms_fields': [(field.name, getattr(module.lms, field.name)) for field in module.lms.fields], 'location': module.location, - 'xqa_key': module.metadata.get('xqa_key', ''), + 'xqa_key': module.lms.xqa_key, 'source_file': source_file, - 'source_url': '%s/%s/tree/master/%s' % (giturl, data_dir, source_file), 'category': str(module.__class__.__name__), # Template uses element_id in js function names, so can't allow dashes 'element_id': module.location.html_id().replace('-', '_'), - 'edit_link': edit_link, 'user': user, 'xqa_server': settings.MITX_FEATURES.get('USE_XQA_SERVER', 'http://xqa:server@content-qa.mitx.mit.edu/xqa'), 'histogram': json.dumps(histogram), diff --git a/common/lib/capa/capa/calc.py b/common/lib/capa/capa/calc.py index 0f062d17d5..c3fe6b656b 100644 --- a/common/lib/capa/capa/calc.py +++ b/common/lib/capa/capa/calc.py @@ -183,7 +183,7 @@ def evaluator(variables, functions, string, cs=False): # 0.33k or -17 number = (Optional(minus | plus) + inner_number - + Optional(CaselessLiteral("E") + Optional("-") + number_part) + + Optional(CaselessLiteral("E") + Optional((plus | minus)) + number_part) + Optional(number_suffix)) number = number.setParseAction(number_parse_action) # Convert to number diff --git a/common/lib/capa/capa/capa_problem.py b/common/lib/capa/capa/capa_problem.py index 9b8bbd7288..42753fc90b 100644 --- a/common/lib/capa/capa/capa_problem.py +++ b/common/lib/capa/capa/capa_problem.py @@ -29,6 +29,7 @@ import sys from lxml import etree from xml.sax.saxutils import unescape +from copy import deepcopy import chem import chem.chemcalc @@ -38,11 +39,11 @@ import verifiers import verifiers.draganddrop import calc -from correctmap import CorrectMap +from .correctmap import CorrectMap import eia import inputtypes import customrender -from util import contextualize_text, convert_files_to_filenames +from .util import contextualize_text, convert_files_to_filenames import xqueue_interface # to be replaced with auto-registering @@ -77,7 +78,7 @@ global_context = {'random': random, # These should be removed from HTML output, including all subelements html_problem_semantics = ["codeparam", "responseparam", "answer", "script", "hintgroup", "openendedparam", "openendedrubric"] -log = logging.getLogger('mitx.' + __name__) +log = logging.getLogger(__name__) #----------------------------------------------------------------------------- # main class for this module @@ -107,6 +108,8 @@ class LoncapaProblem(object): self.do_reset() self.problem_id = id self.system = system + if self.system is None: + raise Exception() self.seed = seed if state: @@ -146,6 +149,13 @@ class LoncapaProblem(object): if not self.student_answers: # True when student_answers is an empty dict self.set_initial_display() + # dictionary of InputType objects associated with this problem + # input_id string -> InputType object + self.inputs = {} + + self.extracted_tree = self._extract_html(self.tree) + + def do_reset(self): ''' Reset internal state to unfinished, with no answers @@ -324,7 +334,27 @@ class LoncapaProblem(object): ''' Main method called externally to get the HTML to be rendered for this capa Problem. ''' - return contextualize_text(etree.tostring(self._extract_html(self.tree)), self.context) + html = contextualize_text(etree.tostring(self._extract_html(self.tree)), self.context) + return html + + + def handle_input_ajax(self, get): + ''' + InputTypes can support specialized AJAX calls. Find the correct input and pass along the correct data + + Also, parse out the dispatch from the get so that it can be passed onto the input type nicely + ''' + + # pull out the id + input_id = get['input_id'] + if self.inputs[input_id]: + dispatch = get['dispatch'] + return self.inputs[input_id].handle_ajax(dispatch, get) + else: + log.warning("Could not find matching input for id: %s" % problem_id) + return {} + + # ======= Private Methods Below ======== @@ -458,6 +488,8 @@ class LoncapaProblem(object): finally: sys.path = original_path + + def _extract_html(self, problemtree): # private ''' Main (private) function which converts Problem XML tree to HTML. @@ -471,7 +503,7 @@ class LoncapaProblem(object): if (problemtree.tag == 'script' and problemtree.get('type') and 'javascript' in problemtree.get('type')): # leave javascript intact. - return problemtree + return deepcopy(problemtree) if problemtree.tag in html_problem_semantics: return @@ -484,8 +516,9 @@ class LoncapaProblem(object): msg = '' hint = '' hintmode = None + input_id = problemtree.get('id') if problemid in self.correct_map: - pid = problemtree.get('id') + pid = input_id status = self.correct_map.get_correctness(pid) msg = self.correct_map.get_msg(pid) hint = self.correct_map.get_hint(pid) @@ -496,21 +529,23 @@ class LoncapaProblem(object): value = self.student_answers[problemid] # do the rendering - state = {'value': value, 'status': status, - 'id': problemtree.get('id'), + 'id': input_id, 'feedback': {'message': msg, 'hint': hint, 'hintmode': hintmode, }} input_type_cls = inputtypes.registry.get_class_for_tag(problemtree.tag) - the_input = input_type_cls(self.system, problemtree, state) - return the_input.get_html() + # save the input type so that we can make ajax calls on it if we need to + self.inputs[input_id] = input_type_cls(self.system, problemtree, state) + return self.inputs[input_id].get_html() # let each Response render itself if problemtree in self.responders: - return self.responders[problemtree].render_html(self._extract_html) + overall_msg = self.correct_map.get_overall_message() + return self.responders[problemtree].render_html(self._extract_html, + response_msg=overall_msg) # let each custom renderer render itself: if problemtree.tag in customrender.registry.registered_tags(): diff --git a/common/lib/capa/capa/checker.py b/common/lib/capa/capa/checker.py index f583a5ea7d..15358aac9e 100755 --- a/common/lib/capa/capa/checker.py +++ b/common/lib/capa/capa/checker.py @@ -12,8 +12,8 @@ from path import path from cStringIO import StringIO from collections import defaultdict -from calc import UndefinedVariable -from capa_problem import LoncapaProblem +from .calc import UndefinedVariable +from .capa_problem import LoncapaProblem from mako.lookup import TemplateLookup logging.basicConfig(format="%(levelname)s %(message)s") diff --git a/common/lib/capa/capa/chem/tests.py b/common/lib/capa/capa/chem/tests.py index 571526f915..f422fcf0d1 100644 --- a/common/lib/capa/capa/chem/tests.py +++ b/common/lib/capa/capa/chem/tests.py @@ -2,7 +2,7 @@ import codecs from fractions import Fraction import unittest -from chemcalc import (compare_chemical_expression, divide_chemical_expression, +from .chemcalc import (compare_chemical_expression, divide_chemical_expression, render_to_html, chemical_equations_equal) import miller @@ -277,7 +277,6 @@ class Test_Render_Equations(unittest.TestCase): def test_render9(self): s = "5[Ni(NH3)4]^2+ + 5/2SO4^2-" - #import ipdb; ipdb.set_trace() out = render_to_html(s) correct = u'5[Ni(NH3)4]2++52SO42-' log(out + ' ------- ' + correct, 'html') diff --git a/common/lib/capa/capa/correctmap.py b/common/lib/capa/capa/correctmap.py index a78b10d07a..b726f765d8 100644 --- a/common/lib/capa/capa/correctmap.py +++ b/common/lib/capa/capa/correctmap.py @@ -27,6 +27,7 @@ class CorrectMap(object): self.cmap = dict() self.items = self.cmap.items self.keys = self.cmap.keys + self.overall_message = "" self.set(*args, **kwargs) def __getitem__(self, *args, **kwargs): @@ -46,7 +47,7 @@ class CorrectMap(object): queuestate=None, **kwargs): if answer_id is not None: - self.cmap[answer_id] = {'correctness': correctness, + self.cmap[str(answer_id)] = {'correctness': correctness, 'npoints': npoints, 'msg': msg, 'hint': hint, @@ -94,7 +95,7 @@ class CorrectMap(object): def is_correct(self, answer_id): if answer_id in self.cmap: - return self.cmap[answer_id]['correctness'] == 'correct' + return self.cmap[answer_id]['correctness'] in ['correct', 'partially-correct'] return None def is_queued(self, answer_id): @@ -104,9 +105,13 @@ class CorrectMap(object): return self.is_queued(answer_id) and self.cmap[answer_id]['queuestate']['key'] == test_key def get_queuetime_str(self, answer_id): - return self.cmap[answer_id]['queuestate']['time'] + if self.cmap[answer_id]['queuestate']: + return self.cmap[answer_id]['queuestate']['time'] + else: + return None def get_npoints(self, answer_id): + """Return the number of points for an answer, used for partial credit.""" npoints = self.get_property(answer_id, 'npoints') if npoints is not None: return npoints @@ -153,3 +158,15 @@ class CorrectMap(object): if not isinstance(other_cmap, CorrectMap): raise Exception('CorrectMap.update called with invalid argument %s' % other_cmap) self.cmap.update(other_cmap.get_dict()) + self.set_overall_message(other_cmap.get_overall_message()) + + + def set_overall_message(self, message_str): + """ Set a message that applies to the question as a whole, + rather than to individual inputs. """ + self.overall_message = str(message_str) if message_str else "" + + def get_overall_message(self): + """ Retrieve a message that applies to the question as a whole. + If no message is available, returns the empty string """ + return self.overall_message diff --git a/common/lib/capa/capa/customrender.py b/common/lib/capa/capa/customrender.py index a925a5970d..60d3ce578b 100644 --- a/common/lib/capa/capa/customrender.py +++ b/common/lib/capa/capa/customrender.py @@ -6,7 +6,7 @@ These tags do not have state, so they just get passed the system (for access to and the xml element. """ -from registry import TagRegistry +from .registry import TagRegistry import logging import re @@ -15,9 +15,9 @@ import json from lxml import etree import xml.sax.saxutils as saxutils -from registry import TagRegistry +from .registry import TagRegistry -log = logging.getLogger('mitx.' + __name__) +log = logging.getLogger(__name__) registry = TagRegistry() diff --git a/common/lib/capa/capa/inputtypes.py b/common/lib/capa/capa/inputtypes.py index 951104501a..c2babfa479 100644 --- a/common/lib/capa/capa/inputtypes.py +++ b/common/lib/capa/capa/inputtypes.py @@ -45,10 +45,12 @@ import re import shlex # for splitting quoted strings import sys import os +import pyparsing -from registry import TagRegistry +from .registry import TagRegistry +from capa.chem import chemcalc -log = logging.getLogger('mitx.' + __name__) +log = logging.getLogger(__name__) ######################################################################### @@ -215,6 +217,18 @@ class InputTypeBase(object): """ pass + def handle_ajax(self, dispatch, get): + """ + InputTypes that need to handle specialized AJAX should override this. + + Input: + dispatch: a string that can be used to determine how to handle the data passed in + get: a dictionary containing the data that was sent with the ajax call + + Output: + a dictionary object that can be serialized into JSON. This will be sent back to the Javascript. + """ + pass def _get_render_context(self): """ @@ -352,6 +366,12 @@ class ChoiceGroup(InputTypeBase): self.choices = self.extract_choices(self.xml) + @classmethod + def get_attributes(cls): + return [Attribute("show_correctness", "always"), + Attribute("submitted_message", "Answer received.")] + + def _extra_context(self): return {'input_type': self.html_input_type, 'choices': self.choices, @@ -740,6 +760,45 @@ class ChemicalEquationInput(InputTypeBase): """ return {'previewer': '/static/js/capa/chemical_equation_preview.js', } + def handle_ajax(self, dispatch, get): + ''' + Since we only have chemcalc preview this input, check to see if it + matches the corresponding dispatch and send it through if it does + ''' + if dispatch == 'preview_chemcalc': + return self.preview_chemcalc(get) + return {} + + def preview_chemcalc(self, get): + """ + Render an html preview of a chemical formula or equation. get should + contain a key 'formula' and value 'some formula string'. + + Returns a json dictionary: + { + 'preview' : 'the-preview-html' or '' + 'error' : 'the-error' or '' + } + """ + + result = {'preview': '', + 'error': ''} + formula = get['formula'] + if formula is None: + result['error'] = "No formula specified." + return result + + try: + result['preview'] = chemcalc.render_to_html(formula) + except pyparsing.ParseException as p: + result['error'] = "Couldn't parse formula: {0}".format(p) + except Exception: + # this is unexpected, so log + log.warning("Error while previewing chemical formula", exc_info=True) + result['error'] = "Error while rendering preview" + + return result + registry.register(ChemicalEquationInput) #----------------------------------------------------------------------------- @@ -798,6 +857,10 @@ class DragAndDropInput(InputTypeBase): if tag_type == 'draggable' and not self.no_labels: dic['label'] = dic['label'] or dic['id'] + if tag_type == 'draggable': + dic['target_fields'] = [parse(target, 'target') for target in + tag.iterchildren('target')] + return dic # add labels to images?: @@ -909,33 +972,142 @@ registry.register(DesignProtein2dInput) class EditAGeneInput(InputTypeBase): """ An input type for editing a gene. Integrates with the genex java applet. - + Example: - + """ - + template = "editageneinput.html" tags = ['editageneinput'] - + @classmethod def get_attributes(cls): """ - Note: width, hight, and dna_sequencee are required. - """ + Note: width, height, and dna_sequencee are required. + """ return [Attribute('width'), Attribute('height'), - Attribute('dna_sequence') + Attribute('dna_sequence'), + Attribute('genex_problem_number') ] - + def _extra_context(self): """ """ context = { 'applet_loader': '/static/js/capa/edit-a-gene.js', } - + return context registry.register(EditAGeneInput) +#--------------------------------------------------------------------- + +class AnnotationInput(InputTypeBase): + """ + Input type for annotations: students can enter some notes or other text + (currently ungraded), and then choose from a set of tags/optoins, which are graded. + + Example: + + + Annotation Exercise + + They are the ones who, at the public assembly, had put savage derangement [ate] into my thinking + [phrenes] |89 on that day when I myself deprived Achilles of his honorific portion [geras] + + Agamemnon says that ate or 'derangement' was the cause of his actions: why could Zeus say the same thing? + Type a commentary below: + Select one tag: + + + + + + + + # TODO: allow ordering to be randomized + """ + + template = "annotationinput.html" + tags = ['annotationinput'] + + def setup(self): + xml = self.xml + + self.debug = False # set to True to display extra debug info with input + self.return_to_annotation = True # return only works in conjunction with annotatable xmodule + + self.title = xml.findtext('./title', 'Annotation Exercise') + self.text = xml.findtext('./text') + self.comment = xml.findtext('./comment') + self.comment_prompt = xml.findtext('./comment_prompt', 'Type a commentary below:') + self.tag_prompt = xml.findtext('./tag_prompt', 'Select one tag:') + self.options = self._find_options() + + # Need to provide a value that JSON can parse if there is no + # student-supplied value yet. + if self.value == '': + self.value = 'null' + + self._validate_options() + + def _find_options(self): + ''' Returns an array of dicts where each dict represents an option. ''' + elements = self.xml.findall('./options/option') + return [{ + 'id': index, + 'description': option.text, + 'choice': option.get('choice') + } for (index, option) in enumerate(elements) ] + + def _validate_options(self): + ''' Raises a ValueError if the choice attribute is missing or invalid. ''' + valid_choices = ('correct', 'partially-correct', 'incorrect') + for option in self.options: + choice = option['choice'] + if choice is None: + raise ValueError('Missing required choice attribute.') + elif choice not in valid_choices: + raise ValueError('Invalid choice attribute: {0}. Must be one of: {1}'.format(choice, ', '.join(valid_choices))) + + def _unpack(self, json_value): + ''' Unpacks the json input state into a dict. ''' + d = json.loads(json_value) + if type(d) != dict: + d = {} + + comment_value = d.get('comment', '') + if not isinstance(comment_value, basestring): + comment_value = '' + + options_value = d.get('options', []) + if not isinstance(options_value, list): + options_value = [] + + return { + 'options_value': options_value, + 'has_options_value': len(options_value) > 0, # for convenience + 'comment_value': comment_value, + } + + def _extra_context(self): + extra_context = { + 'title': self.title, + 'text': self.text, + 'comment': self.comment, + 'comment_prompt': self.comment_prompt, + 'tag_prompt': self.tag_prompt, + 'options': self.options, + 'return_to_annotation': self.return_to_annotation, + 'debug': self.debug + } + + extra_context.update(self._unpack(self.value)) + + return extra_context + +registry.register(AnnotationInput) + diff --git a/common/lib/capa/capa/responsetypes.py b/common/lib/capa/capa/responsetypes.py index a1a4e6b65e..6bf98999d8 100644 --- a/common/lib/capa/capa/responsetypes.py +++ b/common/lib/capa/capa/responsetypes.py @@ -28,15 +28,15 @@ from collections import namedtuple from shapely.geometry import Point, MultiPoint # specific library imports -from calc import evaluator, UndefinedVariable -from correctmap import CorrectMap +from .calc import evaluator, UndefinedVariable +from .correctmap import CorrectMap from datetime import datetime -from util import * +from .util import * from lxml import etree from lxml.html.soupparser import fromstring as fromstring_bs # uses Beautiful Soup!!! FIXME? import xqueue_interface -log = logging.getLogger('mitx.' + __name__) +log = logging.getLogger(__name__) #----------------------------------------------------------------------------- @@ -174,13 +174,14 @@ class LoncapaResponse(object): ''' return sum(self.maxpoints.values()) - def render_html(self, renderer): + def render_html(self, renderer, response_msg=''): ''' Return XHTML Element tree representation of this Response. Arguments: - renderer : procedure which produces HTML given an ElementTree + - response_msg: a message displayed at the end of the Response ''' # render ourself as a + our content tree = etree.Element('span') @@ -195,6 +196,11 @@ class LoncapaResponse(object): if item_xhtml is not None: tree.append(item_xhtml) tree.tail = self.xml.tail + + # Add a
              for the message at the end of the response + if response_msg: + tree.append(self._render_response_msg_html(response_msg)) + return tree def evaluate_answers(self, student_answers, old_cmap): @@ -225,16 +231,14 @@ class LoncapaResponse(object): # hint specified by function? hintfn = hintgroup.get('hintfn') if hintfn: - ''' - Hint is determined by a function defined in the ''' - snippets = [{'snippet': """ + snippets = [{'snippet': r"""
              Suppose that \(I(t)\) rises from \(0\) to \(I_S\) at a time \(t_0 \neq 0\) @@ -882,7 +909,8 @@ def sympy_check2(): allowed_inputfields = ['textline', 'textbox', 'crystallography', 'chemicalequationinput', 'vsepr_input', 'drag_and_drop_input', 'editamoleculeinput', - 'designprotein2dinput', 'editageneinput'] + 'designprotein2dinput', 'editageneinput', + 'annotationinput'] def setup_response(self): xml = self.xml @@ -965,6 +993,7 @@ def sympy_check2(): # not expecting 'unknown's correct = ['unknown'] * len(idset) messages = [''] * len(idset) + overall_message = "" # put these in the context of the check function evaluator # note that this doesn't help the "cfn" version - only the exec version @@ -996,6 +1025,10 @@ def sympy_check2(): # the list of messages to be filled in by the check function 'messages': messages, + # a message that applies to the entire response + # instead of a particular input + 'overall_message': overall_message, + # any options to be passed to the cfn 'options': self.xml.get('options'), 'testdat': 'hello world', @@ -1010,6 +1043,7 @@ def sympy_check2(): exec self.code in self.context['global_context'], self.context correct = self.context['correct'] messages = self.context['messages'] + overall_message = self.context['overall_message'] except Exception as err: print "oops in customresponse (code) error %s" % err print "context = ", self.context @@ -1044,34 +1078,100 @@ def sympy_check2(): log.error(traceback.format_exc()) raise Exception("oops in customresponse (cfn) error %s" % err) log.debug("[courseware.capa.responsetypes.customresponse.get_score] ret = %s" % ret) + if type(ret) == dict: - correct = ['correct'] * len(idset) if ret['ok'] else ['incorrect'] * len(idset) - msg = ret['msg'] - if 1: - # try to clean up message html - msg = '' + msg + '' - msg = msg.replace('<', '<') - #msg = msg.replace('<','<') - msg = etree.tostring(fromstring_bs(msg, convertEntities=None), - pretty_print=True) - #msg = etree.tostring(fromstring_bs(msg),pretty_print=True) - msg = msg.replace(' ', '') - #msg = re.sub('(.*)','\\1',msg,flags=re.M|re.DOTALL) # python 2.7 - msg = re.sub('(?ms)(.*)', '\\1', msg) + # One kind of dictionary the check function can return has the + # form {'ok': BOOLEAN, 'msg': STRING} + # If there are multiple inputs, they all get marked + # to the same correct/incorrect value + if 'ok' in ret: + correct = ['correct'] * len(idset) if ret['ok'] else ['incorrect'] * len(idset) + msg = ret.get('msg', None) + msg = self.clean_message_html(msg) - messages[0] = msg + # If there is only one input, apply the message to that input + # Otherwise, apply the message to the whole problem + if len(idset) > 1: + overall_message = msg + else: + messages[0] = msg + + + # Another kind of dictionary the check function can return has + # the form: + # {'overall_message': STRING, + # 'input_list': [{ 'ok': BOOLEAN, 'msg': STRING }, ...] } + # + # This allows the function to return an 'overall message' + # that applies to the entire problem, as well as correct/incorrect + # status and messages for individual inputs + elif 'input_list' in ret: + overall_message = ret.get('overall_message', '') + input_list = ret['input_list'] + + correct = [] + messages = [] + for input_dict in input_list: + correct.append('correct' if input_dict['ok'] else 'incorrect') + msg = self.clean_message_html(input_dict['msg']) if 'msg' in input_dict else None + messages.append(msg) + + # Otherwise, we do not recognize the dictionary + # Raise an exception + else: + log.error(traceback.format_exc()) + raise Exception("CustomResponse: check function returned an invalid dict") + + # The check function can return a boolean value, + # indicating whether all inputs should be marked + # correct or incorrect else: correct = ['correct'] * len(idset) if ret else ['incorrect'] * len(idset) # build map giving "correct"ness of the answer(s) correct_map = CorrectMap() + + overall_message = self.clean_message_html(overall_message) + correct_map.set_overall_message(overall_message) + for k in range(len(idset)): npoints = self.maxpoints[idset[k]] if correct[k] == 'correct' else 0 correct_map.set(idset[k], correct[k], msg=messages[k], npoints=npoints) return correct_map + def clean_message_html(self, msg): + + # If *msg* is an empty string, then the code below + # will return "". To avoid this, we first check + # that *msg* is a non-empty string. + if msg: + + # When we parse *msg* using etree, there needs to be a root + # element, so we wrap the *msg* text in tags + msg = '' + msg + '' + + # Replace < characters + msg = msg.replace('<', '<') + + # Use etree to prettify the HTML + msg = etree.tostring(fromstring_bs(msg, convertEntities=None), + pretty_print=True) + + msg = msg.replace(' ', '') + + # Remove the tags we introduced earlier, so we're + # left with just the prettified message markup + msg = re.sub('(?ms)(.*)', '\\1', msg) + + # Strip leading and trailing whitespace + return msg.strip() + + # If we start with an empty string, then return an empty string + else: + return "" + def get_answers(self): ''' Give correct answer expected for this response. @@ -1095,7 +1195,7 @@ class SymbolicResponse(CustomResponse): """ Symbolic math response checking, using symmath library. """ - snippets = [{'snippet': ''' + snippets = [{'snippet': r''' Compute \[ \exp\left(-i \frac{\theta}{2} \left[ \begin{matrix} 0 & 1 \\ 1 & 0 \end{matrix} \right] \right) \] and give the resulting \(2\times 2\) matrix:
              @@ -1842,6 +1942,117 @@ class ImageResponse(LoncapaResponse): dict([(ie.get('id'), ie.get('regions')) for ie in self.ielements])) #----------------------------------------------------------------------------- +class AnnotationResponse(LoncapaResponse): + ''' + Checking of annotation responses. + + The response contains both a comment (student commentary) and an option (student tag). + Only the tag is currently graded. Answers may be incorrect, partially correct, or correct. + ''' + response_tag = 'annotationresponse' + allowed_inputfields = ['annotationinput'] + max_inputfields = 1 + default_scoring = {'incorrect': 0, 'partially-correct': 1, 'correct': 2 } + def setup_response(self): + xml = self.xml + self.scoring_map = self._get_scoring_map() + self.answer_map = self._get_answer_map() + self.maxpoints = self._get_max_points() + + def get_score(self, student_answers): + ''' Returns a CorrectMap for the student answer, which may include + partially correct answers.''' + student_answer = student_answers[self.answer_id] + student_option = self._get_submitted_option_id(student_answer) + + scoring = self.scoring_map[self.answer_id] + is_valid = student_option is not None and student_option in scoring.keys() + + (correctness, points) = ('incorrect', None) + if is_valid: + correctness = scoring[student_option]['correctness'] + points = scoring[student_option]['points'] + + return CorrectMap(self.answer_id, correctness=correctness, npoints=points) + + def get_answers(self): + return self.answer_map + + def _get_scoring_map(self): + ''' Returns a dict of option->scoring for each input. ''' + scoring = self.default_scoring + choices = dict([(choice,choice) for choice in scoring]) + scoring_map = {} + + for inputfield in self.inputfields: + option_scoring = dict([(option['id'], { + 'correctness': choices.get(option['choice']), + 'points': scoring.get(option['choice']) + }) for option in self._find_options(inputfield) ]) + + scoring_map[inputfield.get('id')] = option_scoring + + return scoring_map + + def _get_answer_map(self): + ''' Returns a dict of answers for each input.''' + answer_map = {} + for inputfield in self.inputfields: + correct_option = self._find_option_with_choice(inputfield, 'correct') + if correct_option is not None: + answer_map[inputfield.get('id')] = correct_option.get('description') + return answer_map + + def _get_max_points(self): + ''' Returns a dict of the max points for each input: input id -> maxpoints. ''' + scoring = self.default_scoring + correct_points = scoring.get('correct') + return dict([(inputfield.get('id'), correct_points) for inputfield in self.inputfields]) + + def _find_options(self, inputfield): + ''' Returns an array of dicts where each dict represents an option. ''' + elements = inputfield.findall('./options/option') + return [{ + 'id': index, + 'description': option.text, + 'choice': option.get('choice') + } for (index, option) in enumerate(elements) ] + + def _find_option_with_choice(self, inputfield, choice): + ''' Returns the option with the given choice value, otherwise None. ''' + for option in self._find_options(inputfield): + if option['choice'] == choice: + return option + + def _unpack(self, json_value): + ''' Unpacks a student response value submitted as JSON.''' + d = json.loads(json_value) + if type(d) != dict: + d = {} + + comment_value = d.get('comment', '') + if not isinstance(d, basestring): + comment_value = '' + + options_value = d.get('options', []) + if not isinstance(options_value, list): + options_value = [] + + return { + 'options_value': options_value, + 'comment_value': comment_value + } + + def _get_submitted_option_id(self, student_answer): + ''' Return the single option that was selected, otherwise None.''' + submitted = self._unpack(student_answer) + option_ids = submitted['options_value'] + if len(option_ids) == 1: + return option_ids[0] + return None + +#----------------------------------------------------------------------------- + # TEMPORARY: List of all response subclasses # FIXME: To be replaced by auto-registration @@ -1858,4 +2069,5 @@ __all__ = [CodeResponse, ChoiceResponse, MultipleChoiceResponse, TrueFalseResponse, - JavascriptResponse] + JavascriptResponse, + AnnotationResponse] diff --git a/common/lib/capa/capa/templates/annotationinput.html b/common/lib/capa/capa/templates/annotationinput.html new file mode 100644 index 0000000000..e0172bb13b --- /dev/null +++ b/common/lib/capa/capa/templates/annotationinput.html @@ -0,0 +1,70 @@ +
              +
              + +
              + ${title} + + % if return_to_annotation: + Return to Annotation
              + % endif +
              +
              + +
              ${text}
              +
              ${comment}
              + +
              ${comment_prompt}
              + + +
              ${tag_prompt}
              +
                + % for option in options: +
              • + % if has_options_value: + % if all([c == 'correct' for c in option['choice'], status]): + + % elif all([c == 'partially-correct' for c in option['choice'], status]): + + % elif all([c == 'incorrect' for c in option['choice'], status]): + + % endif + % endif + + + ${option['description']} + +
              • + % endfor +
              + + % if debug: +
              + Rendered with value:
              +
              ${value|h}
              + Current input value:
              + +
              + % else: + + % endif + + % if status == 'unsubmitted': + + % elif status == 'incomplete': + + % elif status == 'incorrect' and not has_options_value: + + % endif + +

              +
              + + +% if msg: +${msg|n} +% endif + diff --git a/common/lib/capa/capa/templates/chemicalequationinput.html b/common/lib/capa/capa/templates/chemicalequationinput.html index dd177dc920..17c84114e5 100644 --- a/common/lib/capa/capa/templates/chemicalequationinput.html +++ b/common/lib/capa/capa/templates/chemicalequationinput.html @@ -11,7 +11,7 @@
              % endif - -
              - % if status == 'unsubmitted': - - % elif status == 'correct': - - % elif status == 'incorrect': - - % elif status == 'incomplete': - +
              + % if input_type == 'checkbox' or not value: + % if status == 'unsubmitted' or show_correctness == 'never': + + % elif status == 'correct': + + % elif status == 'incorrect': + + % elif status == 'incomplete': + + % endif % endif -
              +
              -
              - % for choice_id, choice_description in choices: - - % endfor - -
              +
              + % for choice_id, choice_description in choices: + + % endfor + +
              + + % if show_correctness == "never" and (value or status not in ['unsubmitted']): +
              ${submitted_message}
              + %endif diff --git a/common/lib/capa/capa/templates/editageneinput.html b/common/lib/capa/capa/templates/editageneinput.html index 8dd4fa89d1..3465c62593 100644 --- a/common/lib/capa/capa/templates/editageneinput.html +++ b/common/lib/capa/capa/templates/editageneinput.html @@ -1,4 +1,5 @@ -
              +
              +
              % if status == 'unsubmitted': @@ -8,16 +9,12 @@ % elif status == 'incorrect':
              % elif status == 'incomplete': -
              +
              % endif - - - - - - Applet failed to run. No Java plug-in was found. - - + +
              + +

              @@ -37,3 +34,4 @@

              % endif
              + diff --git a/common/lib/capa/capa/tests/response_xml_factory.py b/common/lib/capa/capa/tests/response_xml_factory.py new file mode 100644 index 0000000000..aa401b70cd --- /dev/null +++ b/common/lib/capa/capa/tests/response_xml_factory.py @@ -0,0 +1,707 @@ +from lxml import etree +from abc import ABCMeta, abstractmethod + + +class ResponseXMLFactory(object): + """ Abstract base class for capa response XML factories. + Subclasses override create_response_element and + create_input_element to produce XML of particular response types""" + + __metaclass__ = ABCMeta + + @abstractmethod + def create_response_element(self, **kwargs): + """ Subclasses override to return an etree element + representing the capa response XML + (e.g. ). + + The tree should NOT contain any input elements + (such as ) as these will be added later.""" + return None + + @abstractmethod + def create_input_element(self, **kwargs): + """ Subclasses override this to return an etree element + representing the capa input XML (such as )""" + return None + + def build_xml(self, **kwargs): + """ Construct an XML string for a capa response + based on **kwargs. + + **kwargs is a dictionary that will be passed + to create_response_element() and create_input_element(). + See the subclasses below for other keyword arguments + you can specify. + + For all response types, **kwargs can contain: + + *question_text*: The text of the question to display, + wrapped in

              tags. + + *explanation_text*: The detailed explanation that will + be shown if the user answers incorrectly. + + *script*: The embedded Python script (a string) + + *num_responses*: The number of responses to create [DEFAULT: 1] + + *num_inputs*: The number of input elements + to create [DEFAULT: 1] + + Returns a string representation of the XML tree. + """ + + # Retrieve keyward arguments + question_text = kwargs.get('question_text', '') + explanation_text = kwargs.get('explanation_text', '') + script = kwargs.get('script', None) + num_responses = kwargs.get('num_responses', 1) + num_inputs = kwargs.get('num_inputs', 1) + + # The root is + root = etree.Element("problem") + + # Add a script if there is one + if script: + script_element = etree.SubElement(root, "script") + script_element.set("type", "loncapa/python") + script_element.text = str(script) + + # The problem has a child

              with question text + question = etree.SubElement(root, "p") + question.text = question_text + + # Add the response(s) + for i in range(0, int(num_responses)): + response_element = self.create_response_element(**kwargs) + root.append(response_element) + + # Add input elements + for j in range(0, int(num_inputs)): + input_element = self.create_input_element(**kwargs) + if not (None == input_element): + response_element.append(input_element) + + # The problem has an explanation of the solution + if explanation_text: + explanation = etree.SubElement(root, "solution") + explanation_div = etree.SubElement(explanation, "div") + explanation_div.set("class", "detailed-solution") + explanation_div.text = explanation_text + + return etree.tostring(root) + + @staticmethod + def textline_input_xml(**kwargs): + """ Create a XML element + + Uses **kwargs: + + *math_display*: If True, then includes a MathJax display of user input + + *size*: An integer representing the width of the text line + """ + math_display = kwargs.get('math_display', False) + size = kwargs.get('size', None) + + input_element = etree.Element('textline') + + if math_display: + input_element.set('math', '1') + + if size: + input_element.set('size', str(size)) + + return input_element + + @staticmethod + def choicegroup_input_xml(**kwargs): + """ Create a XML element + + Uses **kwargs: + + *choice_type*: Can be "checkbox", "radio", or "multiple" + + *choices*: List of True/False values indicating whether + a particular choice is correct or not. + Users must choose *all* correct options in order + to be marked correct. + DEFAULT: [True] + + *choice_names": List of strings identifying the choices. + If specified, you must ensure that + len(choice_names) == len(choices) + """ + # Names of group elements + group_element_names = {'checkbox': 'checkboxgroup', + 'radio': 'radiogroup', + 'multiple': 'choicegroup'} + + # Retrieve **kwargs + choices = kwargs.get('choices', [True]) + choice_type = kwargs.get('choice_type', 'multiple') + choice_names = kwargs.get('choice_names', [None] * len(choices)) + + # Create the , , or element + assert(choice_type in group_element_names) + group_element = etree.Element(group_element_names[choice_type]) + + # Create the elements + for (correct_val, name) in zip(choices, choice_names): + choice_element = etree.SubElement(group_element, "choice") + choice_element.set("correct", "true" if correct_val else "false") + + # Add a name identifying the choice, if one exists + # For simplicity, we use the same string as both the + # name attribute and the text of the element + if name: + choice_element.text = str(name) + choice_element.set("name", str(name)) + + return group_element + + +class NumericalResponseXMLFactory(ResponseXMLFactory): + """ Factory for producing XML trees """ + + def create_response_element(self, **kwargs): + """ Create a XML element. + Uses **kwarg keys: + + *answer*: The correct answer (e.g. "5") + + *tolerance*: The tolerance within which a response + is considered correct. Can be a decimal (e.g. "0.01") + or percentage (e.g. "2%") + """ + + answer = kwargs.get('answer', None) + tolerance = kwargs.get('tolerance', None) + + response_element = etree.Element('numericalresponse') + + if answer: + response_element.set('answer', str(answer)) + + if tolerance: + responseparam_element = etree.SubElement(response_element, 'responseparam') + responseparam_element.set('type', 'tolerance') + responseparam_element.set('default', str(tolerance)) + + return response_element + + def create_input_element(self, **kwargs): + return ResponseXMLFactory.textline_input_xml(**kwargs) + + +class CustomResponseXMLFactory(ResponseXMLFactory): + """ Factory for producing XML trees """ + + def create_response_element(self, **kwargs): + """ Create a XML element. + + Uses **kwargs: + + *cfn*: the Python code to run. Can be inline code, + or the name of a function defined in earlier - - -

              Hints can be provided to students, based on the last response given, as well as the history of responses given. Here is an example of a hint produced by a Formula Response problem.

              - -

              -What is the equation of the line which passess through ($x1,$y1) and -($x2,$y2)?

              - -

              The correct answer is $answer. A common error is to invert the equation for the slope. Enter -$wrongans to see a hint.

              - - - - - - y = - - - - - You have inverted the slope in the question. - - - - - diff --git a/common/lib/capa/capa/tests/test_files/imageresponse.xml b/common/lib/capa/capa/tests/test_files/imageresponse.xml deleted file mode 100644 index 41c9f01218..0000000000 --- a/common/lib/capa/capa/tests/test_files/imageresponse.xml +++ /dev/null @@ -1,40 +0,0 @@ - -

              -Two skiers are on frictionless black diamond ski slopes. -Hello

              - - - -Click on the image where the top skier will stop momentarily if the top skier starts from rest. - -Click on the image where the lower skier will stop momentarily if the lower skier starts from rest. - -Click on either of the two positions as discussed previously. - -Click on either of the two positions as discussed previously. - -Click on either of the two positions as discussed previously. - -

              Use conservation of energy.

              -
              -
              - - - - - - - -Click on either of the two positions as discussed previously. - -Click on either of the two positions as discussed previously. - - -Click on either of the two positions as discussed previously. - -

              Use conservation of energy.

              -
              -
              - - -
              diff --git a/common/lib/capa/capa/tests/test_files/javascriptresponse.xml b/common/lib/capa/capa/tests/test_files/javascriptresponse.xml deleted file mode 100644 index 439866e62c..0000000000 --- a/common/lib/capa/capa/tests/test_files/javascriptresponse.xml +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - - - - - diff --git a/common/lib/capa/capa/tests/test_files/js/compiled/c9a9cd4242d84c924fe5f8324e9ae79d.js b/common/lib/capa/capa/tests/test_files/js/compiled/c9a9cd4242d84c924fe5f8324e9ae79d.js deleted file mode 100644 index 6670c6a09a..0000000000 --- a/common/lib/capa/capa/tests/test_files/js/compiled/c9a9cd4242d84c924fe5f8324e9ae79d.js +++ /dev/null @@ -1,50 +0,0 @@ -// Generated by CoffeeScript 1.3.3 -(function() { - var MinimaxProblemDisplay, root, - __hasProp = {}.hasOwnProperty, - __extends = function(child, parent) { for (var key in parent) { if (__hasProp.call(parent, key)) child[key] = parent[key]; } function ctor() { this.constructor = child; } ctor.prototype = parent.prototype; child.prototype = new ctor(); child.__super__ = parent.prototype; return child; }; - - MinimaxProblemDisplay = (function(_super) { - - __extends(MinimaxProblemDisplay, _super); - - function MinimaxProblemDisplay(state, submission, evaluation, container, submissionField, parameters) { - this.state = state; - this.submission = submission; - this.evaluation = evaluation; - this.container = container; - this.submissionField = submissionField; - this.parameters = parameters != null ? parameters : {}; - MinimaxProblemDisplay.__super__.constructor.call(this, this.state, this.submission, this.evaluation, this.container, this.submissionField, this.parameters); - } - - MinimaxProblemDisplay.prototype.render = function() {}; - - MinimaxProblemDisplay.prototype.createSubmission = function() { - var id, value, _ref, _results; - this.newSubmission = {}; - if (this.submission != null) { - _ref = this.submission; - _results = []; - for (id in _ref) { - value = _ref[id]; - _results.push(this.newSubmission[id] = value); - } - return _results; - } - }; - - MinimaxProblemDisplay.prototype.getCurrentSubmission = function() { - return this.newSubmission; - }; - - return MinimaxProblemDisplay; - - })(XProblemDisplay); - - root = typeof exports !== "undefined" && exports !== null ? exports : this; - - root.TestProblemDisplay = TestProblemDisplay; - -}).call(this); -; diff --git a/common/lib/capa/capa/tests/test_files/js/compiled/javascriptresponse.js b/common/lib/capa/capa/tests/test_files/js/compiled/javascriptresponse.js deleted file mode 100644 index 6670c6a09a..0000000000 --- a/common/lib/capa/capa/tests/test_files/js/compiled/javascriptresponse.js +++ /dev/null @@ -1,50 +0,0 @@ -// Generated by CoffeeScript 1.3.3 -(function() { - var MinimaxProblemDisplay, root, - __hasProp = {}.hasOwnProperty, - __extends = function(child, parent) { for (var key in parent) { if (__hasProp.call(parent, key)) child[key] = parent[key]; } function ctor() { this.constructor = child; } ctor.prototype = parent.prototype; child.prototype = new ctor(); child.__super__ = parent.prototype; return child; }; - - MinimaxProblemDisplay = (function(_super) { - - __extends(MinimaxProblemDisplay, _super); - - function MinimaxProblemDisplay(state, submission, evaluation, container, submissionField, parameters) { - this.state = state; - this.submission = submission; - this.evaluation = evaluation; - this.container = container; - this.submissionField = submissionField; - this.parameters = parameters != null ? parameters : {}; - MinimaxProblemDisplay.__super__.constructor.call(this, this.state, this.submission, this.evaluation, this.container, this.submissionField, this.parameters); - } - - MinimaxProblemDisplay.prototype.render = function() {}; - - MinimaxProblemDisplay.prototype.createSubmission = function() { - var id, value, _ref, _results; - this.newSubmission = {}; - if (this.submission != null) { - _ref = this.submission; - _results = []; - for (id in _ref) { - value = _ref[id]; - _results.push(this.newSubmission[id] = value); - } - return _results; - } - }; - - MinimaxProblemDisplay.prototype.getCurrentSubmission = function() { - return this.newSubmission; - }; - - return MinimaxProblemDisplay; - - })(XProblemDisplay); - - root = typeof exports !== "undefined" && exports !== null ? exports : this; - - root.TestProblemDisplay = TestProblemDisplay; - -}).call(this); -; diff --git a/common/lib/capa/capa/tests/test_files/multi_bare.xml b/common/lib/capa/capa/tests/test_files/multi_bare.xml deleted file mode 100644 index 20bc8f853d..0000000000 --- a/common/lib/capa/capa/tests/test_files/multi_bare.xml +++ /dev/null @@ -1,21 +0,0 @@ - - - - - This is foil One. - - - This is foil Two. - - - This is foil Three. - - - This is foil Four. - - - This is foil Five. - - - - diff --git a/common/lib/capa/capa/tests/test_files/multichoice.xml b/common/lib/capa/capa/tests/test_files/multichoice.xml deleted file mode 100644 index 60bf02ec59..0000000000 --- a/common/lib/capa/capa/tests/test_files/multichoice.xml +++ /dev/null @@ -1,21 +0,0 @@ - - - - - This is foil One. - - - This is foil Two. - - - This is foil Three. - - - This is foil Four. - - - This is foil Five. - - - - diff --git a/common/lib/capa/capa/tests/test_files/optionresponse.xml b/common/lib/capa/capa/tests/test_files/optionresponse.xml deleted file mode 100644 index 99a17e8fac..0000000000 --- a/common/lib/capa/capa/tests/test_files/optionresponse.xml +++ /dev/null @@ -1,63 +0,0 @@ - - -

              -Why do bicycles benefit from having larger wheels when going up a bump as shown in the picture?
              -Assume that for both bicycles:
              -1.) The tires have equal air pressure.
              -2.) The bicycles never leave the contact with the bump.
              -3.) The bicycles have the same mass. The bicycle tires (regardless of size) have the same mass.
              -

              -
              - -
                -
              • - -

                The bicycles with larger wheels have more time to go over the bump. This decreases the magnitude of the force needed to lift the bicycle.

                -
                - - -
              • -
              • - -

                The bicycles with larger wheels always have a smaller vertical displacement regardless of speed.

                -
                - - -
              • -
              • - -

                The bicycles with larger wheels experience a force backward with less magnitude for the same amount of time.

                -
                - - -
              • -
              • - -

                The bicycles with larger wheels experience a force backward with less magnitude for a greater amount of time.

                -
                - - -
              • -
              • - -

                The bicycles with larger wheels have more kinetic energy turned into gravitational potential energy.

                -
                - - -
              • -
              • - -

                The bicycles with larger wheels have more rotational kinetic energy, so the horizontal velocity of the biker changes less.

                -
                - - -
              • -
              - - -
              -
              -
              -
              -
              -
              diff --git a/common/lib/capa/capa/tests/test_files/stringresponse_with_hint.xml b/common/lib/capa/capa/tests/test_files/stringresponse_with_hint.xml deleted file mode 100644 index 86efdf0f18..0000000000 --- a/common/lib/capa/capa/tests/test_files/stringresponse_with_hint.xml +++ /dev/null @@ -1,25 +0,0 @@ - -

              Example: String Response Problem

              -
              -
              - - Which US state has Lansing as its capital? - - - - - - - - - The state capital of Wisconsin is Madison. - - - The state capital of Minnesota is St. Paul. - - - The state you are looking for is also known as the 'Great Lakes State' - - - -
              diff --git a/common/lib/capa/capa/tests/test_files/symbolicresponse.xml b/common/lib/capa/capa/tests/test_files/symbolicresponse.xml deleted file mode 100644 index 4dc2bc9d7b..0000000000 --- a/common/lib/capa/capa/tests/test_files/symbolicresponse.xml +++ /dev/null @@ -1,29 +0,0 @@ - - -

              Example: Symbolic Math Response Problem

              - -

              -A symbolic math response problem presents one or more symbolic math -input fields for input. Correctness of input is evaluated based on -the symbolic properties of the expression entered. The student enters -text, but sees a proper symbolic rendition of the entered formula, in -real time, next to the input box. -

              - -

              This is a correct answer which may be entered below:

              -

              cos(theta)*[[1,0],[0,1]] + i*sin(theta)*[[0,1],[1,0]]

              - - - Compute [mathjax] U = \exp\left( i \theta \left[ \begin{matrix} 0 & 1 \\ 1 & 0 \end{matrix} \right] \right) [/mathjax] - and give the resulting \(2 \times 2\) matrix.
              - Your input should be typed in as a list of lists, eg [[1,2],[3,4]].
              - [mathjax]U=[/mathjax] - - -
              -
              - -
              -
              diff --git a/common/lib/capa/capa/tests/test_files/truefalse.xml b/common/lib/capa/capa/tests/test_files/truefalse.xml deleted file mode 100644 index 60018f7a2d..0000000000 --- a/common/lib/capa/capa/tests/test_files/truefalse.xml +++ /dev/null @@ -1,21 +0,0 @@ - - - - - This is foil One. - - - This is foil Two. - - - This is foil Three. - - - This is foil Four. - - - This is foil Five. - - - - diff --git a/common/lib/capa/capa/tests/test_html_render.py b/common/lib/capa/capa/tests/test_html_render.py new file mode 100644 index 0000000000..e99308587e --- /dev/null +++ b/common/lib/capa/capa/tests/test_html_render.py @@ -0,0 +1,233 @@ +import unittest +from lxml import etree +import os +import textwrap +import json + +import mock + +from capa.capa_problem import LoncapaProblem +from .response_xml_factory import StringResponseXMLFactory, CustomResponseXMLFactory +from . import test_system + +class CapaHtmlRenderTest(unittest.TestCase): + + def test_blank_problem(self): + """ + It's important that blank problems don't break, since that's + what you start with in studio. + """ + xml_str = " " + + # Create the problem + problem = LoncapaProblem(xml_str, '1', system=test_system) + + # Render the HTML + rendered_html = etree.XML(problem.get_html()) + # expect that we made it here without blowing up + + def test_include_html(self): + # Create a test file to include + self._create_test_file('test_include.xml', + 'Test include') + + # Generate some XML with an + xml_str = textwrap.dedent(""" + + + + """) + + # Create the problem + problem = LoncapaProblem(xml_str, '1', system=test_system) + + # Render the HTML + rendered_html = etree.XML(problem.get_html()) + + # Expect that the include file was embedded in the problem + test_element = rendered_html.find("test") + self.assertEqual(test_element.tag, "test") + self.assertEqual(test_element.text, "Test include") + + + + + def test_process_outtext(self): + # Generate some XML with and + xml_str = textwrap.dedent(""" + + Test text + + """) + + # Create the problem + problem = LoncapaProblem(xml_str, '1', system=test_system) + + # Render the HTML + rendered_html = etree.XML(problem.get_html()) + + # Expect that the and + # were converted to tags + span_element = rendered_html.find('span') + self.assertEqual(span_element.text, 'Test text') + + def test_render_script(self): + # Generate some XML with a + + """) + + # Create the problem + problem = LoncapaProblem(xml_str, '1', system=test_system) + + # Render the HTML + rendered_html = etree.XML(problem.get_html()) + + # Expect that the script element has been removed from the rendered HTML + script_element = rendered_html.find('script') + self.assertEqual(None, script_element) + + def test_render_javascript(self): + # Generate some XML with a + + """) + + # Create the problem + problem = LoncapaProblem(xml_str, '1', system=test_system) + + # Render the HTML + rendered_html = etree.XML(problem.get_html()) + + + # expect the javascript is still present in the rendered html + self.assertTrue("" in etree.tostring(rendered_html)) + + + def test_render_response_xml(self): + # Generate some XML for a string response + kwargs = {'question_text': "Test question", + 'explanation_text': "Test explanation", + 'answer': 'Test answer', + 'hints': [('test prompt', 'test_hint', 'test hint text')]} + xml_str = StringResponseXMLFactory().build_xml(**kwargs) + + # Mock out the template renderer + test_system.render_template = mock.Mock() + test_system.render_template.return_value = "
              Input Template Render
              " + + # Create the problem and render the HTML + problem = LoncapaProblem(xml_str, '1', system=test_system) + rendered_html = etree.XML(problem.get_html()) + + # Expect problem has been turned into a
              + self.assertEqual(rendered_html.tag, "div") + + # Expect question text is in a

              child + question_element = rendered_html.find("p") + self.assertEqual(question_element.text, "Test question") + + # Expect that the response has been turned into a + response_element = rendered_html.find("span") + self.assertEqual(response_element.tag, "span") + + # Expect that the response + # that contains a

              for the textline + textline_element = response_element.find("div") + self.assertEqual(textline_element.text, 'Input Template Render') + + # Expect a child
              for the solution + # with the rendered template + solution_element = rendered_html.find("div") + self.assertEqual(solution_element.text, 'Input Template Render') + + # Expect that the template renderer was called with the correct + # arguments, once for the textline input and once for + # the solution + expected_textline_context = {'status': 'unsubmitted', + 'value': '', + 'preprocessor': None, + 'msg': '', + 'inline': False, + 'hidden': False, + 'do_math': False, + 'id': '1_2_1', + 'size': None} + + expected_solution_context = {'id': '1_solution_1'} + + expected_calls = [mock.call('textline.html', expected_textline_context), + mock.call('solutionspan.html', expected_solution_context), + mock.call('textline.html', expected_textline_context), + mock.call('solutionspan.html', expected_solution_context)] + + self.assertEqual(test_system.render_template.call_args_list, + expected_calls) + + + def test_render_response_with_overall_msg(self): + # CustomResponse script that sets an overall_message + script=textwrap.dedent(""" + def check_func(*args): + msg = '

              Test message 1

              Test message 2

              ' + return {'overall_message': msg, + 'input_list': [ {'ok': True, 'msg': '' } ] } + """) + + # Generate some XML for a CustomResponse + kwargs = {'script':script, 'cfn': 'check_func'} + xml_str = CustomResponseXMLFactory().build_xml(**kwargs) + + # Create the problem and render the html + problem = LoncapaProblem(xml_str, '1', system=test_system) + + # Grade the problem + correctmap = problem.grade_answers({'1_2_1': 'test'}) + + # Render the html + rendered_html = etree.XML(problem.get_html()) + + + # Expect that there is a
              within the response
              + # with css class response_message + msg_div_element = rendered_html.find(".//div[@class='response_message']") + self.assertEqual(msg_div_element.tag, "div") + self.assertEqual(msg_div_element.get('class'), "response_message") + + # Expect that the
              contains our message (as part of the XML tree) + msg_p_elements = msg_div_element.findall('p') + self.assertEqual(msg_p_elements[0].tag, "p") + self.assertEqual(msg_p_elements[0].text, "Test message 1") + + self.assertEqual(msg_p_elements[1].tag, "p") + self.assertEqual(msg_p_elements[1].text, "Test message 2") + + + def test_substitute_python_vars(self): + # Generate some XML with Python variables defined in a script + # and used later as attributes + xml_str = textwrap.dedent(""" + + + + + """) + + # Create the problem and render the HTML + problem = LoncapaProblem(xml_str, '1', system=test_system) + rendered_html = etree.XML(problem.get_html()) + + # Expect that the variable $test has been replaced with its value + span_element = rendered_html.find('span') + self.assertEqual(span_element.get('attr'), "TEST") + + def _create_test_file(self, path, content_str): + test_fp = test_system.filestore.open(path, "w") + test_fp.write(content_str) + test_fp.close() + + self.addCleanup(lambda: os.remove(test_fp.name)) diff --git a/common/lib/capa/capa/tests/test_inputtypes.py b/common/lib/capa/capa/tests/test_inputtypes.py index 4a5ea5c429..360fd9f2f6 100644 --- a/common/lib/capa/capa/tests/test_inputtypes.py +++ b/common/lib/capa/capa/tests/test_inputtypes.py @@ -102,6 +102,8 @@ class ChoiceGroupTest(unittest.TestCase): 'choices': [('foil1', 'This is foil One.'), ('foil2', 'This is foil Two.'), ('foil3', 'This is foil Three.'), ], + 'show_correctness': 'always', + 'submitted_message': 'Answer received.', 'name_array_suffix': expected_suffix, # what is this for?? } @@ -482,27 +484,43 @@ class ChemicalEquationTest(unittest.TestCase): ''' Check that chemical equation inputs work. ''' - - def test_rendering(self): - size = "42" - xml_str = """""".format(size=size) + def setUp(self): + self.size = "42" + xml_str = """""".format(size=self.size) element = etree.fromstring(xml_str) state = {'value': 'H2OYeah', } - the_input = lookup_tag('chemicalequationinput')(test_system, element, state) + self.the_input = lookup_tag('chemicalequationinput')(test_system, element, state) - context = the_input._get_render_context() + + def test_rendering(self): + ''' Verify that the render context matches the expected render context''' + context = self.the_input._get_render_context() expected = {'id': 'prob_1_2', 'value': 'H2OYeah', 'status': 'unanswered', 'msg': '', - 'size': size, + 'size': self.size, 'previewer': '/static/js/capa/chemical_equation_preview.js', } self.assertEqual(context, expected) + + def test_chemcalc_ajax_sucess(self): + ''' Verify that using the correct dispatch and valid data produces a valid response''' + + data = {'formula': "H"} + response = self.the_input.handle_ajax("preview_chemcalc", data) + + self.assertTrue('preview' in response) + self.assertNotEqual(response['preview'], '') + self.assertEqual(response['error'], "") + + + + class DragAndDropTest(unittest.TestCase): ''' @@ -539,14 +557,14 @@ class DragAndDropTest(unittest.TestCase): "target_outline": "false", "base_image": "/static/images/about_1.png", "draggables": [ -{"can_reuse": "", "label": "Label 1", "id": "1", "icon": ""}, -{"can_reuse": "", "label": "cc", "id": "name_with_icon", "icon": "/static/images/cc.jpg", }, -{"can_reuse": "", "label": "arrow-left", "id": "with_icon", "icon": "/static/images/arrow-left.png", "can_reuse": ""}, -{"can_reuse": "", "label": "Label2", "id": "5", "icon": "", "can_reuse": ""}, -{"can_reuse": "", "label": "Mute", "id": "2", "icon": "/static/images/mute.png", "can_reuse": ""}, -{"can_reuse": "", "label": "spinner", "id": "name_label_icon3", "icon": "/static/images/spinner.gif", "can_reuse": ""}, -{"can_reuse": "", "label": "Star", "id": "name4", "icon": "/static/images/volume.png", "can_reuse": ""}, -{"can_reuse": "", "label": "Label3", "id": "7", "icon": "", "can_reuse": ""}], +{"can_reuse": "", "label": "Label 1", "id": "1", "icon": "", "target_fields": []}, +{"can_reuse": "", "label": "cc", "id": "name_with_icon", "icon": "/static/images/cc.jpg", "target_fields": []}, +{"can_reuse": "", "label": "arrow-left", "id": "with_icon", "icon": "/static/images/arrow-left.png", "can_reuse": "", "target_fields": []}, +{"can_reuse": "", "label": "Label2", "id": "5", "icon": "", "can_reuse": "", "target_fields": []}, +{"can_reuse": "", "label": "Mute", "id": "2", "icon": "/static/images/mute.png", "can_reuse": "", "target_fields": []}, +{"can_reuse": "", "label": "spinner", "id": "name_label_icon3", "icon": "/static/images/spinner.gif", "can_reuse": "", "target_fields": []}, +{"can_reuse": "", "label": "Star", "id": "name4", "icon": "/static/images/volume.png", "can_reuse": "", "target_fields": []}, +{"can_reuse": "", "label": "Label3", "id": "7", "icon": "", "can_reuse": "", "target_fields": []}], "one_per_target": "True", "targets": [ {"y": "90", "x": "210", "id": "t1", "w": "90", "h": "90"}, @@ -570,3 +588,65 @@ class DragAndDropTest(unittest.TestCase): context.pop('drag_and_drop_json') expected.pop('drag_and_drop_json') self.assertEqual(context, expected) + + +class AnnotationInputTest(unittest.TestCase): + ''' + Make sure option inputs work + ''' + def test_rendering(self): + xml_str = ''' + + foo + bar + my comment + type a commentary + select a tag + + + + + + +''' + element = etree.fromstring(xml_str) + + value = {"comment": "blah blah", "options": [1]} + json_value = json.dumps(value) + state = { + 'value': json_value, + 'id': 'annotation_input', + 'status': 'answered' + } + + tag = 'annotationinput' + + the_input = lookup_tag(tag)(test_system, element, state) + + context = the_input._get_render_context() + + expected = { + 'id': 'annotation_input', + 'value': value, + 'status': 'answered', + 'msg': '', + 'title': 'foo', + 'text': 'bar', + 'comment': 'my comment', + 'comment_prompt': 'type a commentary', + 'tag_prompt': 'select a tag', + 'options': [ + {'id': 0, 'description': 'x', 'choice': 'correct'}, + {'id': 1, 'description': 'y', 'choice': 'incorrect'}, + {'id': 2, 'description': 'z', 'choice': 'partially-correct'} + ], + 'value': json_value, + 'options_value': value['options'], + 'has_options_value': len(value['options']) > 0, + 'comment_value': value['comment'], + 'debug': False, + 'return_to_annotation': True + } + + self.maxDiff = None + self.assertDictEqual(context, expected) diff --git a/common/lib/capa/capa/tests/test_responsetypes.py b/common/lib/capa/capa/tests/test_responsetypes.py index 18da338b91..e024909d75 100644 --- a/common/lib/capa/capa/tests/test_responsetypes.py +++ b/common/lib/capa/capa/tests/test_responsetypes.py @@ -8,6 +8,7 @@ import json from nose.plugins.skip import SkipTest import os import unittest +import textwrap from . import test_system @@ -16,93 +17,151 @@ from capa.correctmap import CorrectMap from capa.util import convert_files_to_filenames from capa.xqueue_interface import dateformat +class ResponseTest(unittest.TestCase): + """ Base class for tests of capa responses.""" -class MultiChoiceTest(unittest.TestCase): - def test_MC_grade(self): - multichoice_file = os.path.dirname(__file__) + "/test_files/multichoice.xml" - test_lcp = lcp.LoncapaProblem(open(multichoice_file).read(), '1', system=test_system) - correct_answers = {'1_2_1': 'choice_foil3'} - self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') - false_answers = {'1_2_1': 'choice_foil2'} - self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect') + xml_factory_class = None - def test_MC_bare_grades(self): - multichoice_file = os.path.dirname(__file__) + "/test_files/multi_bare.xml" - test_lcp = lcp.LoncapaProblem(open(multichoice_file).read(), '1', system=test_system) - correct_answers = {'1_2_1': 'choice_2'} - self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') - false_answers = {'1_2_1': 'choice_1'} - self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect') + def setUp(self): + if self.xml_factory_class: + self.xml_factory = self.xml_factory_class() - def test_TF_grade(self): - truefalse_file = os.path.dirname(__file__) + "/test_files/truefalse.xml" - test_lcp = lcp.LoncapaProblem(open(truefalse_file).read(), '1', system=test_system) - correct_answers = {'1_2_1': ['choice_foil2', 'choice_foil1']} - self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') - false_answers = {'1_2_1': ['choice_foil1']} - self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect') - false_answers = {'1_2_1': ['choice_foil1', 'choice_foil3']} - self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect') - false_answers = {'1_2_1': ['choice_foil3']} - self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect') - false_answers = {'1_2_1': ['choice_foil1', 'choice_foil2', 'choice_foil3']} - self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect') + def build_problem(self, **kwargs): + xml = self.xml_factory.build_xml(**kwargs) + return lcp.LoncapaProblem(xml, '1', system=test_system) + + def assert_grade(self, problem, submission, expected_correctness): + input_dict = {'1_2_1': submission} + correct_map = problem.grade_answers(input_dict) + self.assertEquals(correct_map.get_correctness('1_2_1'), expected_correctness) + + def assert_multiple_grade(self, problem, correct_answers, incorrect_answers): + for input_str in correct_answers: + result = problem.grade_answers({'1_2_1': input_str}).get_correctness('1_2_1') + self.assertEqual(result, 'correct', + msg="%s should be marked correct" % str(input_str)) + + for input_str in incorrect_answers: + result = problem.grade_answers({'1_2_1': input_str}).get_correctness('1_2_1') + self.assertEqual(result, 'incorrect', + msg="%s should be marked incorrect" % str(input_str)) + +class MultiChoiceResponseTest(ResponseTest): + from response_xml_factory import MultipleChoiceResponseXMLFactory + xml_factory_class = MultipleChoiceResponseXMLFactory + + def test_multiple_choice_grade(self): + problem = self.build_problem(choices=[False, True, False]) + + # Ensure that we get the expected grades + self.assert_grade(problem, 'choice_0', 'incorrect') + self.assert_grade(problem, 'choice_1', 'correct') + self.assert_grade(problem, 'choice_2', 'incorrect') + + def test_named_multiple_choice_grade(self): + problem = self.build_problem(choices=[False, True, False], + choice_names=["foil_1", "foil_2", "foil_3"]) + + # Ensure that we get the expected grades + self.assert_grade(problem, 'choice_foil_1', 'incorrect') + self.assert_grade(problem, 'choice_foil_2', 'correct') + self.assert_grade(problem, 'choice_foil_3', 'incorrect') -class ImageResponseTest(unittest.TestCase): - def test_ir_grade(self): - imageresponse_file = os.path.dirname(__file__) + "/test_files/imageresponse.xml" - test_lcp = lcp.LoncapaProblem(open(imageresponse_file).read(), '1', system=test_system) - # testing regions only - correct_answers = { - #regions - '1_2_1': '(490,11)-(556,98)', - '1_2_2': '(242,202)-(296,276)', - '1_2_3': '(490,11)-(556,98);(242,202)-(296,276)', - '1_2_4': '(490,11)-(556,98);(242,202)-(296,276)', - '1_2_5': '(490,11)-(556,98);(242,202)-(296,276)', - #testing regions and rectanges - '1_3_1': 'rectangle="(490,11)-(556,98)" \ - regions="[[[10,10], [20,10], [20, 30]], [[100,100], [120,100], [120,150]]]"', - '1_3_2': 'rectangle="(490,11)-(556,98)" \ - regions="[[[10,10], [20,10], [20, 30]], [[100,100], [120,100], [120,150]]]"', - '1_3_3': 'regions="[[[10,10], [20,10], [20, 30]], [[100,100], [120,100], [120,150]]]"', - '1_3_4': 'regions="[[[10,10], [20,10], [20, 30]], [[100,100], [120,100], [120,150]]]"', - '1_3_5': 'regions="[[[10,10], [20,10], [20, 30]]]"', - '1_3_6': 'regions="[[10,10], [30,30], [15, 15]]"', - '1_3_7': 'regions="[[10,10], [30,30], [10, 30], [30, 10]]"', - } - test_answers = { - '1_2_1': '[500,20]', - '1_2_2': '[250,300]', - '1_2_3': '[500,20]', - '1_2_4': '[250,250]', - '1_2_5': '[10,10]', +class TrueFalseResponseTest(ResponseTest): + from response_xml_factory import TrueFalseResponseXMLFactory + xml_factory_class = TrueFalseResponseXMLFactory - '1_3_1': '[500,20]', - '1_3_2': '[15,15]', - '1_3_3': '[500,20]', - '1_3_4': '[115,115]', - '1_3_5': '[15,15]', - '1_3_6': '[20,20]', - '1_3_7': '[20,15]', - } + def test_true_false_grade(self): + problem = self.build_problem(choices=[False, True, True]) - # regions - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_1'), 'correct') - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_2'), 'incorrect') - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_3'), 'correct') - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_4'), 'correct') - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_5'), 'incorrect') + # Check the results + # Mark correct if and only if ALL (and only) correct choices selected + self.assert_grade(problem, 'choice_0', 'incorrect') + self.assert_grade(problem, 'choice_1', 'incorrect') + self.assert_grade(problem, 'choice_2', 'incorrect') + self.assert_grade(problem, ['choice_0', 'choice_1', 'choice_2'], 'incorrect') + self.assert_grade(problem, ['choice_0', 'choice_2'], 'incorrect') + self.assert_grade(problem, ['choice_0', 'choice_1'], 'incorrect') + self.assert_grade(problem, ['choice_1', 'choice_2'], 'correct') - # regions and rectangles - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_3_1'), 'correct') - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_3_2'), 'correct') - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_3_3'), 'incorrect') - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_3_4'), 'correct') - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_3_5'), 'correct') - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_3_6'), 'incorrect') - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_3_7'), 'correct') + # Invalid choices should be marked incorrect (we have no choice 3) + self.assert_grade(problem, 'choice_3', 'incorrect') + self.assert_grade(problem, 'not_a_choice', 'incorrect') + + def test_named_true_false_grade(self): + problem = self.build_problem(choices=[False, True, True], + choice_names=['foil_1','foil_2','foil_3']) + + # Check the results + # Mark correct if and only if ALL (and only) correct chocies selected + self.assert_grade(problem, 'choice_foil_1', 'incorrect') + self.assert_grade(problem, 'choice_foil_2', 'incorrect') + self.assert_grade(problem, 'choice_foil_3', 'incorrect') + self.assert_grade(problem, ['choice_foil_1', 'choice_foil_2', 'choice_foil_3'], 'incorrect') + self.assert_grade(problem, ['choice_foil_1', 'choice_foil_3'], 'incorrect') + self.assert_grade(problem, ['choice_foil_1', 'choice_foil_2'], 'incorrect') + self.assert_grade(problem, ['choice_foil_2', 'choice_foil_3'], 'correct') + + # Invalid choices should be marked incorrect + self.assert_grade(problem, 'choice_foil_4', 'incorrect') + self.assert_grade(problem, 'not_a_choice', 'incorrect') + +class ImageResponseTest(ResponseTest): + from response_xml_factory import ImageResponseXMLFactory + xml_factory_class = ImageResponseXMLFactory + + def test_rectangle_grade(self): + # Define a rectangle with corners (10,10) and (20,20) + problem = self.build_problem(rectangle="(10,10)-(20,20)") + + # Anything inside the rectangle (and along the borders) is correct + # Everything else is incorrect + correct_inputs = ["[12,19]", "[10,10]", "[20,20]", + "[10,15]", "[20,15]", "[15,10]", "[15,20]"] + incorrect_inputs = ["[4,6]", "[25,15]", "[15,40]", "[15,4]"] + self.assert_multiple_grade(problem, correct_inputs, incorrect_inputs) + + def test_multiple_rectangles_grade(self): + # Define two rectangles + rectangle_str = "(10,10)-(20,20);(100,100)-(200,200)" + + # Expect that only points inside the rectangles are marked correct + problem = self.build_problem(rectangle=rectangle_str) + correct_inputs = ["[12,19]", "[120, 130]"] + incorrect_inputs = ["[4,6]", "[25,15]", "[15,40]", "[15,4]", + "[50,55]", "[300, 14]", "[120, 400]"] + self.assert_multiple_grade(problem, correct_inputs, incorrect_inputs) + + def test_region_grade(self): + # Define a triangular region with corners (0,0), (5,10), and (0, 10) + region_str = "[ [1,1], [5,10], [0,10] ]" + + # Expect that only points inside the triangle are marked correct + problem = self.build_problem(regions=region_str) + correct_inputs = ["[2,4]", "[1,3]"] + incorrect_inputs = ["[0,0]", "[3,5]", "[5,15]", "[30, 12]"] + self.assert_multiple_grade(problem, correct_inputs, incorrect_inputs) + + def test_multiple_regions_grade(self): + # Define multiple regions that the user can select + region_str="[[[10,10], [20,10], [20, 30]], [[100,100], [120,100], [120,150]]]" + + # Expect that only points inside the regions are marked correct + problem = self.build_problem(regions=region_str) + correct_inputs = ["[15,12]", "[110,112]"] + incorrect_inputs = ["[0,0]", "[600,300]"] + self.assert_multiple_grade(problem, correct_inputs, incorrect_inputs) + + def test_region_and_rectangle_grade(self): + rectangle_str = "(100,100)-(200,200)" + region_str="[[10,10], [20,10], [20, 30]]" + + # Expect that only points inside the rectangle or region are marked correct + problem = self.build_problem(regions=region_str, rectangle=rectangle_str) + correct_inputs = ["[13,12]", "[110,112]"] + incorrect_inputs = ["[0,0]", "[600,300]"] + self.assert_multiple_grade(problem, correct_inputs, incorrect_inputs) class SymbolicResponseTest(unittest.TestCase): @@ -195,60 +254,165 @@ class SymbolicResponseTest(unittest.TestCase): self.assertEquals(test_lcp.grade_answers(wrong_answers).get_correctness('1_2_1'), 'incorrect') -class OptionResponseTest(unittest.TestCase): - ''' - Run this with +class OptionResponseTest(ResponseTest): + from response_xml_factory import OptionResponseXMLFactory + xml_factory_class = OptionResponseXMLFactory - python manage.py test courseware.OptionResponseTest - ''' - def test_or_grade(self): - optionresponse_file = os.path.dirname(__file__) + "/test_files/optionresponse.xml" - test_lcp = lcp.LoncapaProblem(open(optionresponse_file).read(), '1', system=test_system) - correct_answers = {'1_2_1': 'True', - '1_2_2': 'False'} - test_answers = {'1_2_1': 'True', - '1_2_2': 'True', - } - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_1'), 'correct') - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_2'), 'incorrect') + def test_grade(self): + problem = self.build_problem(options=["first", "second", "third"], + correct_option="second") + + # Assert that we get the expected grades + self.assert_grade(problem, "first", "incorrect") + self.assert_grade(problem, "second", "correct") + self.assert_grade(problem, "third", "incorrect") + + # Options not in the list should be marked incorrect + self.assert_grade(problem, "invalid_option", "incorrect") -class FormulaResponseWithHintTest(unittest.TestCase): - ''' - Test Formula response problem with a hint - This problem also uses calc. - ''' - def test_or_grade(self): - problem_file = os.path.dirname(__file__) + "/test_files/formularesponse_with_hint.xml" - test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=test_system) - correct_answers = {'1_2_1': '2.5*x-5.0'} - test_answers = {'1_2_1': '0.4*x-5.0'} - self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') - cmap = test_lcp.grade_answers(test_answers) - self.assertEquals(cmap.get_correctness('1_2_1'), 'incorrect') - self.assertTrue('You have inverted' in cmap.get_hint('1_2_1')) +class FormulaResponseTest(ResponseTest): + from response_xml_factory import FormulaResponseXMLFactory + xml_factory_class = FormulaResponseXMLFactory + + def test_grade(self): + # Sample variables x and y in the range [-10, 10] + sample_dict = {'x': (-10, 10), 'y': (-10, 10)} + + # The expected solution is numerically equivalent to x+2y + problem = self.build_problem(sample_dict=sample_dict, + num_samples=10, + tolerance=0.01, + answer="x+2*y") + + # Expect an equivalent formula to be marked correct + # 2x - x + y + y = x + 2y + input_formula = "2*x - x + y + y" + self.assert_grade(problem, input_formula, "correct") + + # Expect an incorrect formula to be marked incorrect + # x + y != x + 2y + input_formula = "x + y" + self.assert_grade(problem, input_formula, "incorrect") + + def test_hint(self): + # Sample variables x and y in the range [-10, 10] + sample_dict = {'x': (-10, 10), 'y': (-10,10) } + + # Give a hint if the user leaves off the coefficient + # or leaves out x + hints = [('x + 3*y', 'y_coefficient', 'Check the coefficient of y'), + ('2*y', 'missing_x', 'Try including the variable x')] -class StringResponseWithHintTest(unittest.TestCase): - ''' - Test String response problem with a hint - ''' - def test_or_grade(self): - problem_file = os.path.dirname(__file__) + "/test_files/stringresponse_with_hint.xml" - test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=test_system) - correct_answers = {'1_2_1': 'Michigan'} - test_answers = {'1_2_1': 'Minnesota'} - self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') - cmap = test_lcp.grade_answers(test_answers) - self.assertEquals(cmap.get_correctness('1_2_1'), 'incorrect') - self.assertTrue('St. Paul' in cmap.get_hint('1_2_1')) + # The expected solution is numerically equivalent to x+2y + problem = self.build_problem(sample_dict=sample_dict, + num_samples=10, + tolerance=0.01, + answer="x+2*y", + hints=hints) + + # Expect to receive a hint if we add an extra y + input_dict = {'1_2_1': "x + 2*y + y"} + correct_map = problem.grade_answers(input_dict) + self.assertEquals(correct_map.get_hint('1_2_1'), + 'Check the coefficient of y') + + # Expect to receive a hint if we leave out x + input_dict = {'1_2_1': "2*y"} + correct_map = problem.grade_answers(input_dict) + self.assertEquals(correct_map.get_hint('1_2_1'), + 'Try including the variable x') -class CodeResponseTest(unittest.TestCase): - ''' - Test CodeResponse - TODO: Add tests for external grader messages - ''' + def test_script(self): + # Calculate the answer using a script + script = "calculated_ans = 'x+x'" + + # Sample x in the range [-10,10] + sample_dict = {'x': (-10, 10)} + + # The expected solution is numerically equivalent to 2*x + problem = self.build_problem(sample_dict=sample_dict, + num_samples=10, + tolerance=0.01, + answer="$calculated_ans", + script=script) + + # Expect that the inputs are graded correctly + self.assert_grade(problem, '2*x', 'correct') + self.assert_grade(problem, '3*x', 'incorrect') + + +class StringResponseTest(ResponseTest): + from response_xml_factory import StringResponseXMLFactory + xml_factory_class = StringResponseXMLFactory + + + def test_case_sensitive(self): + problem = self.build_problem(answer="Second", case_sensitive=True) + + # Exact string should be correct + self.assert_grade(problem, "Second", "correct") + + # Other strings and the lowercase version of the string are incorrect + self.assert_grade(problem, "Other String", "incorrect") + self.assert_grade(problem, "second", "incorrect") + + def test_case_insensitive(self): + problem = self.build_problem(answer="Second", case_sensitive=False) + + # Both versions of the string should be allowed, regardless + # of capitalization + self.assert_grade(problem, "Second", "correct") + self.assert_grade(problem, "second", "correct") + + # Other strings are not allowed + self.assert_grade(problem, "Other String", "incorrect") + + def test_hints(self): + hints = [("wisconsin", "wisc", "The state capital of Wisconsin is Madison"), + ("minnesota", "minn", "The state capital of Minnesota is St. Paul")] + + problem = self.build_problem(answer="Michigan", + case_sensitive=False, + hints=hints) + + # We should get a hint for Wisconsin + input_dict = {'1_2_1': 'Wisconsin'} + correct_map = problem.grade_answers(input_dict) + self.assertEquals(correct_map.get_hint('1_2_1'), + "The state capital of Wisconsin is Madison") + + # We should get a hint for Minnesota + input_dict = {'1_2_1': 'Minnesota'} + correct_map = problem.grade_answers(input_dict) + self.assertEquals(correct_map.get_hint('1_2_1'), + "The state capital of Minnesota is St. Paul") + + # We should NOT get a hint for Michigan (the correct answer) + input_dict = {'1_2_1': 'Michigan'} + correct_map = problem.grade_answers(input_dict) + self.assertEquals(correct_map.get_hint('1_2_1'), "") + + # We should NOT get a hint for any other string + input_dict = {'1_2_1': 'California'} + correct_map = problem.grade_answers(input_dict) + self.assertEquals(correct_map.get_hint('1_2_1'), "") + +class CodeResponseTest(ResponseTest): + from response_xml_factory import CodeResponseXMLFactory + xml_factory_class = CodeResponseXMLFactory + + def setUp(self): + super(CodeResponseTest, self).setUp() + + grader_payload = json.dumps({"grader": "ps04/grade_square.py"}) + self.problem = self.build_problem(initial_display="def square(x):", + answer_display="answer", + grader_payload=grader_payload, + num_responses=2) + @staticmethod def make_queuestate(key, time): timestr = datetime.strftime(time, dateformat) @@ -258,171 +422,533 @@ class CodeResponseTest(unittest.TestCase): """ Simple test of whether LoncapaProblem knows when it's been queued """ - problem_file = os.path.join(os.path.dirname(__file__), "test_files/coderesponse.xml") - with open(problem_file) as input_file: - test_lcp = lcp.LoncapaProblem(input_file.read(), '1', system=test_system) - answer_ids = sorted(test_lcp.get_question_answers()) + answer_ids = sorted(self.problem.get_question_answers()) - # CodeResponse requires internal CorrectMap state. Build it now in the unqueued state - cmap = CorrectMap() - for answer_id in answer_ids: - cmap.update(CorrectMap(answer_id=answer_id, queuestate=None)) - test_lcp.correct_map.update(cmap) + # CodeResponse requires internal CorrectMap state. Build it now in the unqueued state + cmap = CorrectMap() + for answer_id in answer_ids: + cmap.update(CorrectMap(answer_id=answer_id, queuestate=None)) + self.problem.correct_map.update(cmap) - self.assertEquals(test_lcp.is_queued(), False) + self.assertEquals(self.problem.is_queued(), False) - # Now we queue the LCP - cmap = CorrectMap() - for i, answer_id in enumerate(answer_ids): - queuestate = CodeResponseTest.make_queuestate(i, datetime.now()) - cmap.update(CorrectMap(answer_id=answer_ids[i], queuestate=queuestate)) - test_lcp.correct_map.update(cmap) + # Now we queue the LCP + cmap = CorrectMap() + for i, answer_id in enumerate(answer_ids): + queuestate = CodeResponseTest.make_queuestate(i, datetime.now()) + cmap.update(CorrectMap(answer_id=answer_ids[i], queuestate=queuestate)) + self.problem.correct_map.update(cmap) - self.assertEquals(test_lcp.is_queued(), True) + self.assertEquals(self.problem.is_queued(), True) def test_update_score(self): ''' Test whether LoncapaProblem.update_score can deliver queued result to the right subproblem ''' - problem_file = os.path.join(os.path.dirname(__file__), "test_files/coderesponse.xml") - with open(problem_file) as input_file: - test_lcp = lcp.LoncapaProblem(input_file.read(), '1', system=test_system) + answer_ids = sorted(self.problem.get_question_answers()) - answer_ids = sorted(test_lcp.get_question_answers()) + # CodeResponse requires internal CorrectMap state. Build it now in the queued state + old_cmap = CorrectMap() + for i, answer_id in enumerate(answer_ids): + queuekey = 1000 + i + queuestate = CodeResponseTest.make_queuestate(1000 + i, datetime.now()) + old_cmap.update(CorrectMap(answer_id=answer_ids[i], queuestate=queuestate)) - # CodeResponse requires internal CorrectMap state. Build it now in the queued state - old_cmap = CorrectMap() + # Message format common to external graders + grader_msg = 'MESSAGE' # Must be valid XML + correct_score_msg = json.dumps({'correct': True, 'score': 1, 'msg': grader_msg}) + incorrect_score_msg = json.dumps({'correct': False, 'score': 0, 'msg': grader_msg}) + + xserver_msgs = {'correct': correct_score_msg, + 'incorrect': incorrect_score_msg, } + + # Incorrect queuekey, state should not be updated + for correctness in ['correct', 'incorrect']: + self.problem.correct_map = CorrectMap() + self.problem.correct_map.update(old_cmap) # Deep copy + + self.problem.update_score(xserver_msgs[correctness], queuekey=0) + self.assertEquals(self.problem.correct_map.get_dict(), old_cmap.get_dict()) # Deep comparison + + for answer_id in answer_ids: + self.assertTrue(self.problem.correct_map.is_queued(answer_id)) # Should be still queued, since message undelivered + + # Correct queuekey, state should be updated + for correctness in ['correct', 'incorrect']: for i, answer_id in enumerate(answer_ids): - queuekey = 1000 + i - queuestate = CodeResponseTest.make_queuestate(1000 + i, datetime.now()) - old_cmap.update(CorrectMap(answer_id=answer_ids[i], queuestate=queuestate)) + self.problem.correct_map = CorrectMap() + self.problem.correct_map.update(old_cmap) - # Message format common to external graders - grader_msg = 'MESSAGE' # Must be valid XML - correct_score_msg = json.dumps({'correct': True, 'score': 1, 'msg': grader_msg}) - incorrect_score_msg = json.dumps({'correct': False, 'score': 0, 'msg': grader_msg}) + new_cmap = CorrectMap() + new_cmap.update(old_cmap) + npoints = 1 if correctness == 'correct' else 0 + new_cmap.set(answer_id=answer_id, npoints=npoints, correctness=correctness, msg=grader_msg, queuestate=None) - xserver_msgs = {'correct': correct_score_msg, - 'incorrect': incorrect_score_msg, } + self.problem.update_score(xserver_msgs[correctness], queuekey=1000 + i) + self.assertEquals(self.problem.correct_map.get_dict(), new_cmap.get_dict()) - # Incorrect queuekey, state should not be updated - for correctness in ['correct', 'incorrect']: - test_lcp.correct_map = CorrectMap() - test_lcp.correct_map.update(old_cmap) # Deep copy - - test_lcp.update_score(xserver_msgs[correctness], queuekey=0) - self.assertEquals(test_lcp.correct_map.get_dict(), old_cmap.get_dict()) # Deep comparison - - for answer_id in answer_ids: - self.assertTrue(test_lcp.correct_map.is_queued(answer_id)) # Should be still queued, since message undelivered - - # Correct queuekey, state should be updated - for correctness in ['correct', 'incorrect']: - for i, answer_id in enumerate(answer_ids): - test_lcp.correct_map = CorrectMap() - test_lcp.correct_map.update(old_cmap) - - new_cmap = CorrectMap() - new_cmap.update(old_cmap) - npoints = 1 if correctness == 'correct' else 0 - new_cmap.set(answer_id=answer_id, npoints=npoints, correctness=correctness, msg=grader_msg, queuestate=None) - - test_lcp.update_score(xserver_msgs[correctness], queuekey=1000 + i) - self.assertEquals(test_lcp.correct_map.get_dict(), new_cmap.get_dict()) - - for j, test_id in enumerate(answer_ids): - if j == i: - self.assertFalse(test_lcp.correct_map.is_queued(test_id)) # Should be dequeued, message delivered - else: - self.assertTrue(test_lcp.correct_map.is_queued(test_id)) # Should be queued, message undelivered + for j, test_id in enumerate(answer_ids): + if j == i: + self.assertFalse(self.problem.correct_map.is_queued(test_id)) # Should be dequeued, message delivered + else: + self.assertTrue(self.problem.correct_map.is_queued(test_id)) # Should be queued, message undelivered def test_recentmost_queuetime(self): ''' Test whether the LoncapaProblem knows about the time of queue requests ''' - problem_file = os.path.join(os.path.dirname(__file__), "test_files/coderesponse.xml") - with open(problem_file) as input_file: - test_lcp = lcp.LoncapaProblem(input_file.read(), '1', system=test_system) + answer_ids = sorted(self.problem.get_question_answers()) - answer_ids = sorted(test_lcp.get_question_answers()) + # CodeResponse requires internal CorrectMap state. Build it now in the unqueued state + cmap = CorrectMap() + for answer_id in answer_ids: + cmap.update(CorrectMap(answer_id=answer_id, queuestate=None)) + self.problem.correct_map.update(cmap) - # CodeResponse requires internal CorrectMap state. Build it now in the unqueued state - cmap = CorrectMap() - for answer_id in answer_ids: - cmap.update(CorrectMap(answer_id=answer_id, queuestate=None)) - test_lcp.correct_map.update(cmap) + self.assertEquals(self.problem.get_recentmost_queuetime(), None) - self.assertEquals(test_lcp.get_recentmost_queuetime(), None) + # CodeResponse requires internal CorrectMap state. Build it now in the queued state + cmap = CorrectMap() + for i, answer_id in enumerate(answer_ids): + queuekey = 1000 + i + latest_timestamp = datetime.now() + queuestate = CodeResponseTest.make_queuestate(1000 + i, latest_timestamp) + cmap.update(CorrectMap(answer_id=answer_id, queuestate=queuestate)) + self.problem.correct_map.update(cmap) - # CodeResponse requires internal CorrectMap state. Build it now in the queued state - cmap = CorrectMap() - for i, answer_id in enumerate(answer_ids): - queuekey = 1000 + i - latest_timestamp = datetime.now() - queuestate = CodeResponseTest.make_queuestate(1000 + i, latest_timestamp) - cmap.update(CorrectMap(answer_id=answer_id, queuestate=queuestate)) - test_lcp.correct_map.update(cmap) + # Queue state only tracks up to second + latest_timestamp = datetime.strptime(datetime.strftime(latest_timestamp, dateformat), dateformat) - # Queue state only tracks up to second - latest_timestamp = datetime.strptime(datetime.strftime(latest_timestamp, dateformat), dateformat) + self.assertEquals(self.problem.get_recentmost_queuetime(), latest_timestamp) - self.assertEquals(test_lcp.get_recentmost_queuetime(), latest_timestamp) + def test_convert_files_to_filenames(self): + ''' + Test whether file objects are converted to filenames without altering other structures + ''' + problem_file = os.path.join(os.path.dirname(__file__), "test_files/filename_convert_test.txt") + with open(problem_file) as fp: + answers_with_file = {'1_2_1': 'String-based answer', + '1_3_1': ['answer1', 'answer2', 'answer3'], + '1_4_1': [fp, fp]} + answers_converted = convert_files_to_filenames(answers_with_file) + self.assertEquals(answers_converted['1_2_1'], 'String-based answer') + self.assertEquals(answers_converted['1_3_1'], ['answer1', 'answer2', 'answer3']) + self.assertEquals(answers_converted['1_4_1'], [fp.name, fp.name]) - def test_convert_files_to_filenames(self): - ''' - Test whether file objects are converted to filenames without altering other structures - ''' - problem_file = os.path.join(os.path.dirname(__file__), "test_files/coderesponse.xml") - with open(problem_file) as fp: - answers_with_file = {'1_2_1': 'String-based answer', - '1_3_1': ['answer1', 'answer2', 'answer3'], - '1_4_1': [fp, fp]} - answers_converted = convert_files_to_filenames(answers_with_file) - self.assertEquals(answers_converted['1_2_1'], 'String-based answer') - self.assertEquals(answers_converted['1_3_1'], ['answer1', 'answer2', 'answer3']) - self.assertEquals(answers_converted['1_4_1'], [fp.name, fp.name]) +class ChoiceResponseTest(ResponseTest): + from response_xml_factory import ChoiceResponseXMLFactory + xml_factory_class = ChoiceResponseXMLFactory + + def test_radio_group_grade(self): + problem = self.build_problem(choice_type='radio', + choices=[False, True, False]) + + # Check that we get the expected results + self.assert_grade(problem, 'choice_0', 'incorrect') + self.assert_grade(problem, 'choice_1', 'correct') + self.assert_grade(problem, 'choice_2', 'incorrect') + + # No choice 3 exists --> mark incorrect + self.assert_grade(problem, 'choice_3', 'incorrect') -class ChoiceResponseTest(unittest.TestCase): + def test_checkbox_group_grade(self): + problem = self.build_problem(choice_type='checkbox', + choices=[False, True, True]) - def test_cr_rb_grade(self): - problem_file = os.path.dirname(__file__) + "/test_files/choiceresponse_radio.xml" - test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=test_system) - correct_answers = {'1_2_1': 'choice_2', - '1_3_1': ['choice_2', 'choice_3']} - test_answers = {'1_2_1': 'choice_2', - '1_3_1': 'choice_2', - } - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_1'), 'correct') - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_3_1'), 'incorrect') + # Check that we get the expected results + # (correct if and only if BOTH correct choices chosen) + self.assert_grade(problem, ['choice_1', 'choice_2'], 'correct') + self.assert_grade(problem, 'choice_1', 'incorrect') + self.assert_grade(problem, 'choice_2', 'incorrect') + self.assert_grade(problem, ['choice_0', 'choice_1'], 'incorrect') + self.assert_grade(problem, ['choice_0', 'choice_2'], 'incorrect') - def test_cr_cb_grade(self): - problem_file = os.path.dirname(__file__) + "/test_files/choiceresponse_checkbox.xml" - test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=test_system) - correct_answers = {'1_2_1': 'choice_2', - '1_3_1': ['choice_2', 'choice_3'], - '1_4_1': ['choice_2', 'choice_3']} - test_answers = {'1_2_1': 'choice_2', - '1_3_1': 'choice_2', - '1_4_1': ['choice_2', 'choice_3'], - } - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_1'), 'correct') - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_3_1'), 'incorrect') - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_4_1'), 'correct') + # No choice 3 exists --> mark incorrect + self.assert_grade(problem, 'choice_3', 'incorrect') -class JavascriptResponseTest(unittest.TestCase): +class JavascriptResponseTest(ResponseTest): + from response_xml_factory import JavascriptResponseXMLFactory + xml_factory_class = JavascriptResponseXMLFactory - def test_jr_grade(self): - problem_file = os.path.dirname(__file__) + "/test_files/javascriptresponse.xml" + def test_grade(self): + # Compile coffee files into javascript used by the response coffee_file_path = os.path.dirname(__file__) + "/test_files/js/*.coffee" os.system("coffee -c %s" % (coffee_file_path)) - test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=test_system) - correct_answers = {'1_2_1': json.dumps({0: 4})} - incorrect_answers = {'1_2_1': json.dumps({0: 5})} - self.assertEquals(test_lcp.grade_answers(incorrect_answers).get_correctness('1_2_1'), 'incorrect') - self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') + problem = self.build_problem(generator_src="test_problem_generator.js", + grader_src="test_problem_grader.js", + display_class="TestProblemDisplay", + display_src="test_problem_display.js", + param_dict={'value': '4'}) + + # Test that we get graded correctly + self.assert_grade(problem, json.dumps({0:4}), "correct") + self.assert_grade(problem, json.dumps({0:5}), "incorrect") + +class NumericalResponseTest(ResponseTest): + from response_xml_factory import NumericalResponseXMLFactory + xml_factory_class = NumericalResponseXMLFactory + + def test_grade_exact(self): + problem = self.build_problem(question_text="What is 2 + 2?", + explanation="The answer is 4", + answer=4) + correct_responses = ["4", "4.0", "4.00"] + incorrect_responses = ["", "3.9", "4.1", "0"] + self.assert_multiple_grade(problem, correct_responses, incorrect_responses) + + + def test_grade_decimal_tolerance(self): + problem = self.build_problem(question_text="What is 2 + 2 approximately?", + explanation="The answer is 4", + answer=4, + tolerance=0.1) + correct_responses = ["4.0", "4.00", "4.09", "3.91"] + incorrect_responses = ["", "4.11", "3.89", "0"] + self.assert_multiple_grade(problem, correct_responses, incorrect_responses) + + def test_grade_percent_tolerance(self): + problem = self.build_problem(question_text="What is 2 + 2 approximately?", + explanation="The answer is 4", + answer=4, + tolerance="10%") + correct_responses = ["4.0", "4.3", "3.7", "4.30", "3.70"] + incorrect_responses = ["", "4.5", "3.5", "0"] + self.assert_multiple_grade(problem, correct_responses, incorrect_responses) + + def test_grade_with_script(self): + script_text = "computed_response = math.sqrt(4)" + problem = self.build_problem(question_text="What is sqrt(4)?", + explanation="The answer is 2", + answer="$computed_response", + script=script_text) + correct_responses = ["2", "2.0"] + incorrect_responses = ["", "2.01", "1.99", "0"] + self.assert_multiple_grade(problem, correct_responses, incorrect_responses) + + def test_grade_with_script_and_tolerance(self): + script_text = "computed_response = math.sqrt(4)" + problem = self.build_problem(question_text="What is sqrt(4)?", + explanation="The answer is 2", + answer="$computed_response", + tolerance="0.1", + script=script_text) + correct_responses = ["2", "2.0", "2.05", "1.95"] + incorrect_responses = ["", "2.11", "1.89", "0"] + self.assert_multiple_grade(problem, correct_responses, incorrect_responses) + + def test_exponential_answer(self): + problem = self.build_problem(question_text="What 5 * 10?", + explanation="The answer is 50", + answer="5e+1") + correct_responses = ["50", "50.0", "5e1", "5e+1", "50e0", "500e-1"] + incorrect_responses = ["", "3.9", "4.1", "0", "5.01e1"] + self.assert_multiple_grade(problem, correct_responses, incorrect_responses) + + + +class CustomResponseTest(ResponseTest): + from response_xml_factory import CustomResponseXMLFactory + xml_factory_class = CustomResponseXMLFactory + + def test_inline_code(self): + + # For inline code, we directly modify global context variables + # 'answers' is a list of answers provided to us + # 'correct' is a list we fill in with True/False + # 'expect' is given to us (if provided in the XML) + inline_script = """correct[0] = 'correct' if (answers['1_2_1'] == expect) else 'incorrect'""" + problem = self.build_problem(answer=inline_script, expect="42") + + # Check results + self.assert_grade(problem, '42', 'correct') + self.assert_grade(problem, '0', 'incorrect') + + def test_inline_message(self): + + # Inline code can update the global messages list + # to pass messages to the CorrectMap for a particular input + # The code can also set the global overall_message (str) + # to pass a message that applies to the whole response + inline_script = textwrap.dedent(""" + messages[0] = "Test Message" + overall_message = "Overall message" + """) + problem = self.build_problem(answer=inline_script) + + input_dict = {'1_2_1': '0'} + correctmap = problem.grade_answers(input_dict) + + # Check that the message for the particular input was received + input_msg = correctmap.get_msg('1_2_1') + self.assertEqual(input_msg, "Test Message") + + # Check that the overall message (for the whole response) was received + overall_msg = correctmap.get_overall_message() + self.assertEqual(overall_msg, "Overall message") + + + def test_function_code_single_input(self): + + # For function code, we pass in these arguments: + # + # 'expect' is the expect attribute of the + # + # 'answer_given' is the answer the student gave (if there is just one input) + # or an ordered list of answers (if there are multiple inputs) + # + # + # The function should return a dict of the form + # { 'ok': BOOL, 'msg': STRING } + # + script = textwrap.dedent(""" + def check_func(expect, answer_given): + return {'ok': answer_given == expect, 'msg': 'Message text'} + """) + + problem = self.build_problem(script=script, cfn="check_func", expect="42") + + # Correct answer + input_dict = {'1_2_1': '42'} + correct_map = problem.grade_answers(input_dict) + + correctness = correct_map.get_correctness('1_2_1') + msg = correct_map.get_msg('1_2_1') + + self.assertEqual(correctness, 'correct') + self.assertEqual(msg, "Message text") + + # Incorrect answer + input_dict = {'1_2_1': '0'} + correct_map = problem.grade_answers(input_dict) + + correctness = correct_map.get_correctness('1_2_1') + msg = correct_map.get_msg('1_2_1') + + self.assertEqual(correctness, 'incorrect') + self.assertEqual(msg, "Message text") + + def test_function_code_multiple_input_no_msg(self): + + # Check functions also have the option of returning + # a single boolean value + # If true, mark all the inputs correct + # If false, mark all the inputs incorrect + script = textwrap.dedent(""" + def check_func(expect, answer_given): + return (answer_given[0] == expect and + answer_given[1] == expect) + """) + + problem = self.build_problem(script=script, cfn="check_func", + expect="42", num_inputs=2) + + # Correct answer -- expect both inputs marked correct + input_dict = {'1_2_1': '42', '1_2_2': '42'} + correct_map = problem.grade_answers(input_dict) + + correctness = correct_map.get_correctness('1_2_1') + self.assertEqual(correctness, 'correct') + + correctness = correct_map.get_correctness('1_2_2') + self.assertEqual(correctness, 'correct') + + # One answer incorrect -- expect both inputs marked incorrect + input_dict = {'1_2_1': '0', '1_2_2': '42'} + correct_map = problem.grade_answers(input_dict) + + correctness = correct_map.get_correctness('1_2_1') + self.assertEqual(correctness, 'incorrect') + + correctness = correct_map.get_correctness('1_2_2') + self.assertEqual(correctness, 'incorrect') + + + def test_function_code_multiple_inputs(self): + + # If the has multiple inputs associated with it, + # the check function can return a dict of the form: + # + # {'overall_message': STRING, + # 'input_list': [{'ok': BOOL, 'msg': STRING}, ...] } + # + # 'overall_message' is displayed at the end of the response + # + # 'input_list' contains dictionaries representing the correctness + # and message for each input. + script = textwrap.dedent(""" + def check_func(expect, answer_given): + check1 = (int(answer_given[0]) == 1) + check2 = (int(answer_given[1]) == 2) + check3 = (int(answer_given[2]) == 3) + return {'overall_message': 'Overall message', + 'input_list': [ + {'ok': check1, 'msg': 'Feedback 1'}, + {'ok': check2, 'msg': 'Feedback 2'}, + {'ok': check3, 'msg': 'Feedback 3'} ] } + """) + + problem = self.build_problem(script=script, + cfn="check_func", num_inputs=3) + + # Grade the inputs (one input incorrect) + input_dict = {'1_2_1': '-999', '1_2_2': '2', '1_2_3': '3' } + correct_map = problem.grade_answers(input_dict) + + # Expect that we receive the overall message (for the whole response) + self.assertEqual(correct_map.get_overall_message(), "Overall message") + + # Expect that the inputs were graded individually + self.assertEqual(correct_map.get_correctness('1_2_1'), 'incorrect') + self.assertEqual(correct_map.get_correctness('1_2_2'), 'correct') + self.assertEqual(correct_map.get_correctness('1_2_3'), 'correct') + + # Expect that we received messages for each individual input + self.assertEqual(correct_map.get_msg('1_2_1'), 'Feedback 1') + self.assertEqual(correct_map.get_msg('1_2_2'), 'Feedback 2') + self.assertEqual(correct_map.get_msg('1_2_3'), 'Feedback 3') + + + def test_multiple_inputs_return_one_status(self): + # When given multiple inputs, the 'answer_given' argument + # to the check_func() is a list of inputs + # + # The sample script below marks the problem as correct + # if and only if it receives answer_given=[1,2,3] + # (or string values ['1','2','3']) + # + # Since we return a dict describing the status of one input, + # we expect that the same 'ok' value is applied to each + # of the inputs. + script = textwrap.dedent(""" + def check_func(expect, answer_given): + check1 = (int(answer_given[0]) == 1) + check2 = (int(answer_given[1]) == 2) + check3 = (int(answer_given[2]) == 3) + return {'ok': (check1 and check2 and check3), + 'msg': 'Message text'} + """) + + problem = self.build_problem(script=script, + cfn="check_func", num_inputs=3) + + # Grade the inputs (one input incorrect) + input_dict = {'1_2_1': '-999', '1_2_2': '2', '1_2_3': '3' } + correct_map = problem.grade_answers(input_dict) + + # Everything marked incorrect + self.assertEqual(correct_map.get_correctness('1_2_1'), 'incorrect') + self.assertEqual(correct_map.get_correctness('1_2_2'), 'incorrect') + self.assertEqual(correct_map.get_correctness('1_2_3'), 'incorrect') + + # Grade the inputs (everything correct) + input_dict = {'1_2_1': '1', '1_2_2': '2', '1_2_3': '3' } + correct_map = problem.grade_answers(input_dict) + + # Everything marked incorrect + self.assertEqual(correct_map.get_correctness('1_2_1'), 'correct') + self.assertEqual(correct_map.get_correctness('1_2_2'), 'correct') + self.assertEqual(correct_map.get_correctness('1_2_3'), 'correct') + + # Message is interpreted as an "overall message" + self.assertEqual(correct_map.get_overall_message(), 'Message text') + + def test_script_exception(self): + + # Construct a script that will raise an exception + script = textwrap.dedent(""" + def check_func(expect, answer_given): + raise Exception("Test") + """) + + problem = self.build_problem(script=script, cfn="check_func") + + # Expect that an exception gets raised when we check the answer + with self.assertRaises(Exception): + problem.grade_answers({'1_2_1': '42'}) + + def test_invalid_dict_exception(self): + + # Construct a script that passes back an invalid dict format + script = textwrap.dedent(""" + def check_func(expect, answer_given): + return {'invalid': 'test'} + """) + + problem = self.build_problem(script=script, cfn="check_func") + + # Expect that an exception gets raised when we check the answer + with self.assertRaises(Exception): + problem.grade_answers({'1_2_1': '42'}) + + +class SchematicResponseTest(ResponseTest): + from response_xml_factory import SchematicResponseXMLFactory + xml_factory_class = SchematicResponseXMLFactory + + def test_grade(self): + + # Most of the schematic-specific work is handled elsewhere + # (in client-side JavaScript) + # The is responsible only for executing the + # Python code in with *submission* (list) + # in the global context. + + # To test that the context is set up correctly, + # we create a script that sets *correct* to true + # if and only if we find the *submission* (list) + script="correct = ['correct' if 'test' in submission[0] else 'incorrect']" + problem = self.build_problem(answer=script) + + # The actual dictionary would contain schematic information + # sent from the JavaScript simulation + submission_dict = {'test': 'test'} + input_dict = { '1_2_1': json.dumps(submission_dict) } + correct_map = problem.grade_answers(input_dict) + + # Expect that the problem is graded as true + # (That is, our script verifies that the context + # is what we expect) + self.assertEqual(correct_map.get_correctness('1_2_1'), 'correct') + +class AnnotationResponseTest(ResponseTest): + from response_xml_factory import AnnotationResponseXMLFactory + xml_factory_class = AnnotationResponseXMLFactory + + def test_grade(self): + (correct, partially, incorrect) = ('correct', 'partially-correct', 'incorrect') + + answer_id = '1_2_1' + options = (('x', correct),('y', partially),('z', incorrect)) + make_answer = lambda option_ids: {answer_id: json.dumps({'options': option_ids })} + + tests = [ + {'correctness': correct, 'points': 2,'answers': make_answer([0]) }, + {'correctness': partially, 'points': 1, 'answers': make_answer([1]) }, + {'correctness': incorrect, 'points': 0, 'answers': make_answer([2]) }, + {'correctness': incorrect, 'points': 0, 'answers': make_answer([0,1,2]) }, + {'correctness': incorrect, 'points': 0, 'answers': make_answer([]) }, + {'correctness': incorrect, 'points': 0, 'answers': make_answer('') }, + {'correctness': incorrect, 'points': 0, 'answers': make_answer(None) }, + {'correctness': incorrect, 'points': 0, 'answers': {answer_id: 'null' } }, + ] + + for (index, test) in enumerate(tests): + expected_correctness = test['correctness'] + expected_points = test['points'] + answers = test['answers'] + + problem = self.build_problem(options=options) + correct_map = problem.grade_answers(answers) + actual_correctness = correct_map.get_correctness(answer_id) + actual_points = correct_map.get_npoints(answer_id) + + self.assertEqual(expected_correctness, actual_correctness, + msg="%s should be marked %s" % (answer_id, expected_correctness)) + self.assertEqual(expected_points, actual_points, + msg="%s should have %d points" % (answer_id, expected_points)) diff --git a/common/lib/capa/capa/util.py b/common/lib/capa/capa/util.py index a0f25c4947..9f3e8bd3a0 100644 --- a/common/lib/capa/capa/util.py +++ b/common/lib/capa/capa/util.py @@ -1,4 +1,4 @@ -from calc import evaluator, UndefinedVariable +from .calc import evaluator, UndefinedVariable #----------------------------------------------------------------------------- # diff --git a/common/lib/capa/capa/verifiers/draganddrop.py b/common/lib/capa/capa/verifiers/draganddrop.py index eb91208923..cdfa163f33 100644 --- a/common/lib/capa/capa/verifiers/draganddrop.py +++ b/common/lib/capa/capa/verifiers/draganddrop.py @@ -27,6 +27,49 @@ values are (x,y) coordinates of centers of dragged images. import json +def flat_user_answer(user_answer): + """ + Convert nested `user_answer` to flat format. + + {'up': {'first': {'p': 'p_l'}}} + + to + + {'up': 'p_l[p][first]'} + """ + + def parse_user_answer(answer): + key = answer.keys()[0] + value = answer.values()[0] + if isinstance(value, dict): + + # Make complex value: + # Example: + # Create like 'p_l[p][first]' from {'first': {'p': 'p_l'} + complex_value_list = [] + v_value = value + while isinstance(v_value, dict): + v_key = v_value.keys()[0] + v_value = v_value.values()[0] + complex_value_list.append(v_key) + + complex_value = '{0}'.format(v_value) + for i in reversed(complex_value_list): + complex_value = '{0}[{1}]'.format(complex_value, i) + + res = {key: complex_value} + return res + else: + return answer + + result = [] + for answer in user_answer: + parse_answer = parse_user_answer(answer) + result.append(parse_answer) + + return result + + class PositionsCompare(list): """ Class for comparing positions. @@ -111,42 +154,41 @@ class DragAndDrop(object): Returns: bool. ''' for draggable in self.excess_draggables: - if not self.excess_draggables[draggable]: + if self.excess_draggables[draggable]: return False # user answer has more draggables than correct answer # Number of draggables in user_groups may be differ that in # correct_groups, that is incorrect, except special case with 'number' - for groupname, draggable_ids in self.correct_groups.items(): - + for index, draggable_ids in enumerate(self.correct_groups): # 'number' rule special case # for reusable draggables we may get in self.user_groups # {'1': [u'2', u'2', u'2'], '0': [u'1', u'1'], '2': [u'3']} # if '+number' is in rule - do not remove duplicates and strip # '+number' from rule - current_rule = self.correct_positions[groupname].keys()[0] + current_rule = self.correct_positions[index].keys()[0] if 'number' in current_rule: - rule_values = self.correct_positions[groupname][current_rule] + rule_values = self.correct_positions[index][current_rule] # clean rule, do not do clean duplicate items - self.correct_positions[groupname].pop(current_rule, None) + self.correct_positions[index].pop(current_rule, None) parsed_rule = current_rule.replace('+', '').replace('number', '') - self.correct_positions[groupname][parsed_rule] = rule_values + self.correct_positions[index][parsed_rule] = rule_values else: # remove dublicates - self.user_groups[groupname] = list(set(self.user_groups[groupname])) + self.user_groups[index] = list(set(self.user_groups[index])) - if sorted(draggable_ids) != sorted(self.user_groups[groupname]): + if sorted(draggable_ids) != sorted(self.user_groups[index]): return False # Check that in every group, for rule of that group, user positions of # every element are equal with correct positions - for groupname in self.correct_groups: + for index, _ in enumerate(self.correct_groups): rules_executed = 0 for rule in ('exact', 'anyof', 'unordered_equal'): # every group has only one rule - if self.correct_positions[groupname].get(rule, None): + if self.correct_positions[index].get(rule, None): rules_executed += 1 if not self.compare_positions( - self.correct_positions[groupname][rule], - self.user_positions[groupname]['user'], flag=rule): + self.correct_positions[index][rule], + self.user_positions[index]['user'], flag=rule): return False if not rules_executed: # no correct rules for current group # probably xml content mistake - wrong rules names @@ -248,7 +290,7 @@ class DragAndDrop(object): correct_answer = {'name4': 't1', 'name_with_icon': 't1', '5': 't2', - '7':'t2'} + '7': 't2'} It is draggable_name: dragable_position mapping. @@ -284,48 +326,56 @@ class DragAndDrop(object): Args: user_answer: json - correct_answer: dict or list + correct_answer: dict or list """ - self.correct_groups = dict() # correct groups from xml - self.correct_positions = dict() # correct positions for comparing - self.user_groups = dict() # will be populated from user answer - self.user_positions = dict() # will be populated from user answer + self.correct_groups = [] # Correct groups from xml. + self.correct_positions = [] # Correct positions for comparing. + self.user_groups = [] # Will be populated from user answer. + self.user_positions = [] # Will be populated from user answer. - # convert from dict answer format to list format + # Convert from dict answer format to list format. if isinstance(correct_answer, dict): tmp = [] for key, value in correct_answer.items(): - tmp_dict = {'draggables': [], 'targets': [], 'rule': 'exact'} - tmp_dict['draggables'].append(key) - tmp_dict['targets'].append(value) - tmp.append(tmp_dict) + tmp.append({ + 'draggables': [key], + 'targets': [value], + 'rule': 'exact'}) correct_answer = tmp + # Convert string `user_answer` to object. user_answer = json.loads(user_answer) - # check if we have draggables that are not in correct answer: - self.excess_draggables = {} + # This dictionary will hold a key for each draggable the user placed on + # the image. The value is True if that draggable is not mentioned in any + # correct_answer entries. If the draggable is mentioned in at least one + # correct_answer entry, the value is False. + # default to consider every user answer excess until proven otherwise. + self.excess_draggables = dict((users_draggable.keys()[0],True) + for users_draggable in user_answer) - # create identical data structures from user answer and correct answer - for i in xrange(0, len(correct_answer)): - groupname = str(i) - self.correct_groups[groupname] = correct_answer[i]['draggables'] - self.correct_positions[groupname] = {correct_answer[i]['rule']: - correct_answer[i]['targets']} - self.user_groups[groupname] = [] - self.user_positions[groupname] = {'user': []} - for draggable_dict in user_answer['draggables']: - # draggable_dict is 1-to-1 {draggable_name: position} + # Convert nested `user_answer` to flat format. + user_answer = flat_user_answer(user_answer) + + # Create identical data structures from user answer and correct answer. + for answer in correct_answer: + user_groups_data = [] + user_positions_data = [] + for draggable_dict in user_answer: + # Draggable_dict is 1-to-1 {draggable_name: position}. draggable_name = draggable_dict.keys()[0] - if draggable_name in self.correct_groups[groupname]: - self.user_groups[groupname].append(draggable_name) - self.user_positions[groupname]['user'].append( + if draggable_name in answer['draggables']: + user_groups_data.append(draggable_name) + user_positions_data.append( draggable_dict[draggable_name]) - self.excess_draggables[draggable_name] = True - else: - self.excess_draggables[draggable_name] = \ - self.excess_draggables.get(draggable_name, False) + # proved that this is not excess + self.excess_draggables[draggable_name] = False + + self.correct_groups.append(answer['draggables']) + self.correct_positions.append({answer['rule']: answer['targets']}) + self.user_groups.append(user_groups_data) + self.user_positions.append({'user': user_positions_data}) def grade(user_input, correct_answer): diff --git a/common/lib/capa/capa/verifiers/tests_draganddrop.py b/common/lib/capa/capa/verifiers/tests_draganddrop.py index 9b1b15ce0c..75a194cc6d 100644 --- a/common/lib/capa/capa/verifiers/tests_draganddrop.py +++ b/common/lib/capa/capa/verifiers/tests_draganddrop.py @@ -1,7 +1,8 @@ import unittest import draganddrop -from draganddrop import PositionsCompare +from .draganddrop import PositionsCompare +import json class Test_PositionsCompare(unittest.TestCase): @@ -40,78 +41,314 @@ class Test_PositionsCompare(unittest.TestCase): class Test_DragAndDrop_Grade(unittest.TestCase): - def test_targets_true(self): - user_input = '{"draggables": [{"1": "t1"}, \ - {"name_with_icon": "t2"}]}' - correct_answer = {'1': 't1', 'name_with_icon': 't2'} + def test_targets_are_draggable_1(self): + user_input = json.dumps([ + {'p': 'p_l'}, + {'up': {'first': {'p': 'p_l'}}} + ]) + + correct_answer = [ + { + 'draggables': ['p'], + 'targets': [ + 'p_l', 'p_r' + ], + 'rule': 'anyof' + }, + { + 'draggables': ['up'], + 'targets': [ + 'p_l[p][first]' + ], + 'rule': 'anyof' + } + ] self.assertTrue(draganddrop.grade(user_input, correct_answer)) + def test_targets_are_draggable_2(self): + user_input = json.dumps([ + {'p': 'p_l'}, + {'p': 'p_r'}, + {'s': 's_l'}, + {'s': 's_r'}, + {'up': {'1': {'p': 'p_l'}}}, + {'up': {'3': {'p': 'p_l'}}}, + {'up': {'1': {'p': 'p_r'}}}, + {'up': {'3': {'p': 'p_r'}}}, + {'up_and_down': {'1': {'s': 's_l'}}}, + {'up_and_down': {'1': {'s': 's_r'}}} + ]) + + correct_answer = [ + { + 'draggables': ['p'], + 'targets': ['p_l', 'p_r'], + 'rule': 'unordered_equal' + }, + { + 'draggables': ['s'], + 'targets': ['s_l', 's_r'], + 'rule': 'unordered_equal' + }, + { + 'draggables': ['up_and_down'], + 'targets': [ + 's_l[s][1]', 's_r[s][1]' + ], + 'rule': 'unordered_equal' + }, + { + 'draggables': ['up'], + 'targets': [ + 'p_l[p][1]', 'p_l[p][3]', 'p_r[p][1]', 'p_r[p][3]' + ], + 'rule': 'unordered_equal' + } + ] + self.assertTrue(draganddrop.grade(user_input, correct_answer)) + + def test_targets_are_draggable_2_manual_parsing(self): + user_input = json.dumps([ + {'up': 'p_l[p][1]'}, + {'p': 'p_l'}, + {'up': 'p_l[p][3]'}, + {'up': 'p_r[p][1]'}, + {'p': 'p_r'}, + {'up': 'p_r[p][3]'}, + {'up_and_down': 's_l[s][1]'}, + {'s': 's_l'}, + {'up_and_down': 's_r[s][1]'}, + {'s': 's_r'} + ]) + + correct_answer = [ + { + 'draggables': ['p'], + 'targets': ['p_l', 'p_r'], + 'rule': 'unordered_equal' + }, + { + 'draggables': ['s'], + 'targets': ['s_l', 's_r'], + 'rule': 'unordered_equal' + }, + { + 'draggables': ['up_and_down'], + 'targets': [ + 's_l[s][1]', 's_r[s][1]' + ], + 'rule': 'unordered_equal' + }, + { + 'draggables': ['up'], + 'targets': [ + 'p_l[p][1]', 'p_l[p][3]', 'p_r[p][1]', 'p_r[p][3]' + ], + 'rule': 'unordered_equal' + } + ] + self.assertTrue(draganddrop.grade(user_input, correct_answer)) + + def test_targets_are_draggable_3_nested(self): + user_input = json.dumps([ + {'molecule': 'left_side_tagret'}, + {'molecule': 'right_side_tagret'}, + {'p': {'p_target': {'molecule': 'left_side_tagret'}}}, + {'p': {'p_target': {'molecule': 'right_side_tagret'}}}, + {'s': {'s_target': {'molecule': 'left_side_tagret'}}}, + {'s': {'s_target': {'molecule': 'right_side_tagret'}}}, + {'up': {'1': {'p': {'p_target': {'molecule': 'left_side_tagret'}}}}}, + {'up': {'3': {'p': {'p_target': {'molecule': 'left_side_tagret'}}}}}, + {'up': {'1': {'p': {'p_target': {'molecule': 'right_side_tagret'}}}}}, + {'up': {'3': {'p': {'p_target': {'molecule': 'right_side_tagret'}}}}}, + {'up_and_down': {'1': {'s': {'s_target': {'molecule': 'left_side_tagret'}}}}}, + {'up_and_down': {'1': {'s': {'s_target': {'molecule': 'right_side_tagret'}}}}} + ]) + + correct_answer = [ + { + 'draggables': ['molecule'], + 'targets': ['left_side_tagret', 'right_side_tagret'], + 'rule': 'unordered_equal' + }, + { + 'draggables': ['p'], + 'targets': [ + 'left_side_tagret[molecule][p_target]', + 'right_side_tagret[molecule][p_target]' + ], + 'rule': 'unordered_equal' + }, + { + 'draggables': ['s'], + 'targets': [ + 'left_side_tagret[molecule][s_target]', + 'right_side_tagret[molecule][s_target]' + ], + 'rule': 'unordered_equal' + }, + { + 'draggables': ['up_and_down'], + 'targets': [ + 'left_side_tagret[molecule][s_target][s][1]', + 'right_side_tagret[molecule][s_target][s][1]' + ], + 'rule': 'unordered_equal' + }, + { + 'draggables': ['up'], + 'targets': [ + 'left_side_tagret[molecule][p_target][p][1]', + 'left_side_tagret[molecule][p_target][p][3]', + 'right_side_tagret[molecule][p_target][p][1]', + 'right_side_tagret[molecule][p_target][p][3]' + ], + 'rule': 'unordered_equal' + } + ] + self.assertTrue(draganddrop.grade(user_input, correct_answer)) + + def test_targets_are_draggable_4_real_example(self): + user_input = json.dumps([ + {'single_draggable': 's_l'}, + {'single_draggable': 's_r'}, + {'single_draggable': 'p_sigma'}, + {'single_draggable': 'p_sigma*'}, + {'single_draggable': 's_sigma'}, + {'single_draggable': 's_sigma*'}, + {'double_draggable': 'p_pi*'}, + {'double_draggable': 'p_pi'}, + {'triple_draggable': 'p_l'}, + {'triple_draggable': 'p_r'}, + {'up': {'1': {'triple_draggable': 'p_l'}}}, + {'up': {'2': {'triple_draggable': 'p_l'}}}, + {'up': {'2': {'triple_draggable': 'p_r'}}}, + {'up': {'3': {'triple_draggable': 'p_r'}}}, + {'up_and_down': {'1': {'single_draggable': 's_l'}}}, + {'up_and_down': {'1': {'single_draggable': 's_r'}}}, + {'up_and_down': {'1': {'single_draggable': 's_sigma'}}}, + {'up_and_down': {'1': {'single_draggable': 's_sigma*'}}}, + {'up_and_down': {'1': {'double_draggable': 'p_pi'}}}, + {'up_and_down': {'2': {'double_draggable': 'p_pi'}}} + ]) + + # 10 targets: + # s_l, s_r, p_l, p_r, s_sigma, s_sigma*, p_pi, p_sigma, p_pi*, p_sigma* + # + # 3 draggable objects, which have targets (internal target ids - 1, 2, 3): + # single_draggable, double_draggable, triple_draggable + # + # 2 draggable objects: + # up, up_and_down + correct_answer = [ + { + 'draggables': ['triple_draggable'], + 'targets': ['p_l', 'p_r'], + 'rule': 'unordered_equal' + }, + { + 'draggables': ['double_draggable'], + 'targets': ['p_pi', 'p_pi*'], + 'rule': 'unordered_equal' + }, + { + 'draggables': ['single_draggable'], + 'targets': ['s_l', 's_r', 's_sigma', 's_sigma*', 'p_sigma', 'p_sigma*'], + 'rule': 'unordered_equal' + }, + { + 'draggables': ['up'], + 'targets': ['p_l[triple_draggable][1]', 'p_l[triple_draggable][2]', + 'p_r[triple_draggable][2]', 'p_r[triple_draggable][3]'], + 'rule': 'unordered_equal' + }, + { + 'draggables': ['up_and_down'], + 'targets': ['s_l[single_draggable][1]', 's_r[single_draggable][1]', + 's_sigma[single_draggable][1]', 's_sigma*[single_draggable][1]', + 'p_pi[double_draggable][1]', 'p_pi[double_draggable][2]'], + 'rule': 'unordered_equal' + }, + + ] + self.assertTrue(draganddrop.grade(user_input, correct_answer)) + + def test_targets_true(self): + user_input = '[{"1": "t1"}, \ + {"name_with_icon": "t2"}]' + correct_answer = {'1': 't1', 'name_with_icon': 't2'} + self.assertTrue(draganddrop.grade(user_input, correct_answer)) + + def test_expect_no_actions_wrong(self): + user_input = '[{"1": "t1"}, \ + {"name_with_icon": "t2"}]' + correct_answer = [] + self.assertFalse(draganddrop.grade(user_input, correct_answer)) + + def test_expect_no_actions_right(self): + user_input = '[]' + correct_answer = [] + self.assertTrue(draganddrop.grade(user_input, correct_answer)) + + def test_targets_false(self): - user_input = '{"draggables": [{"1": "t1"}, \ - {"name_with_icon": "t2"}]}' - correct_answer = {'1': 't3', 'name_with_icon': 't2'} + user_input = '[{"1": "t1"}, \ + {"name_with_icon": "t2"}]' + correct_answer = {'1': 't3', 'name_with_icon': 't2'} self.assertFalse(draganddrop.grade(user_input, correct_answer)) def test_multiple_images_per_target_true(self): - user_input = '{\ - "draggables": [{"1": "t1"}, {"name_with_icon": "t2"}, \ - {"2": "t1"}]}' - correct_answer = {'1': 't1', 'name_with_icon': 't2', + user_input = '[{"1": "t1"}, {"name_with_icon": "t2"}, \ + {"2": "t1"}]' + correct_answer = {'1': 't1', 'name_with_icon': 't2', '2': 't1'} self.assertTrue(draganddrop.grade(user_input, correct_answer)) def test_multiple_images_per_target_false(self): - user_input = '{\ - "draggables": [{"1": "t1"}, {"name_with_icon": "t2"}, \ - {"2": "t1"}]}' - correct_answer = {'1': 't2', 'name_with_icon': 't2', + user_input = '[{"1": "t1"}, {"name_with_icon": "t2"}, \ + {"2": "t1"}]' + correct_answer = {'1': 't2', 'name_with_icon': 't2', '2': 't1'} self.assertFalse(draganddrop.grade(user_input, correct_answer)) def test_targets_and_positions(self): - user_input = '{"draggables": [{"1": [10,10]}, \ - {"name_with_icon": [[10,10],4]}]}' + user_input = '[{"1": [10,10]}, \ + {"name_with_icon": [[10,10],4]}]' correct_answer = {'1': [10, 10], 'name_with_icon': [[10, 10], 4]} self.assertTrue(draganddrop.grade(user_input, correct_answer)) def test_position_and_targets(self): - user_input = '{"draggables": [{"1": "t1"}, {"name_with_icon": "t2"}]}' + user_input = '[{"1": "t1"}, {"name_with_icon": "t2"}]' correct_answer = {'1': 't1', 'name_with_icon': 't2'} self.assertTrue(draganddrop.grade(user_input, correct_answer)) def test_positions_exact(self): - user_input = '{"draggables": \ - [{"1": [10, 10]}, {"name_with_icon": [20, 20]}]}' + user_input = '[{"1": [10, 10]}, {"name_with_icon": [20, 20]}]' correct_answer = {'1': [10, 10], 'name_with_icon': [20, 20]} self.assertTrue(draganddrop.grade(user_input, correct_answer)) def test_positions_false(self): - user_input = '{"draggables": \ - [{"1": [10, 10]}, {"name_with_icon": [20, 20]}]}' + user_input = '[{"1": [10, 10]}, {"name_with_icon": [20, 20]}]' correct_answer = {'1': [25, 25], 'name_with_icon': [20, 20]} self.assertFalse(draganddrop.grade(user_input, correct_answer)) def test_positions_true_in_radius(self): - user_input = '{"draggables": \ - [{"1": [10, 10]}, {"name_with_icon": [20, 20]}]}' + user_input = '[{"1": [10, 10]}, {"name_with_icon": [20, 20]}]' correct_answer = {'1': [14, 14], 'name_with_icon': [20, 20]} self.assertTrue(draganddrop.grade(user_input, correct_answer)) def test_positions_true_in_manual_radius(self): - user_input = '{"draggables": \ - [{"1": [10, 10]}, {"name_with_icon": [20, 20]}]}' + user_input = '[{"1": [10, 10]}, {"name_with_icon": [20, 20]}]' correct_answer = {'1': [[40, 10], 30], 'name_with_icon': [20, 20]} self.assertTrue(draganddrop.grade(user_input, correct_answer)) def test_positions_false_in_manual_radius(self): - user_input = '{"draggables": \ - [{"1": [10, 10]}, {"name_with_icon": [20, 20]}]}' + user_input = '[{"1": [10, 10]}, {"name_with_icon": [20, 20]}]' correct_answer = {'1': [[40, 10], 29], 'name_with_icon': [20, 20]} self.assertFalse(draganddrop.grade(user_input, correct_answer)) def test_correct_answer_not_has_key_from_user_answer(self): - user_input = '{"draggables": [{"1": "t1"}, \ - {"name_with_icon": "t2"}]}' + user_input = '[{"1": "t1"}, {"name_with_icon": "t2"}]' correct_answer = {'3': 't3', 'name_with_icon': 't2'} self.assertFalse(draganddrop.grade(user_input, correct_answer)) @@ -119,20 +356,20 @@ class Test_DragAndDrop_Grade(unittest.TestCase): """Draggables can be places anywhere on base image. Place grass in the middle of the image and ant in the right upper corner.""" - user_input = '{"draggables": \ - [{"ant":[610.5,57.449951171875]},{"grass":[322.5,199.449951171875]}]}' + user_input = '[{"ant":[610.5,57.449951171875]},\ + {"grass":[322.5,199.449951171875]}]' correct_answer = {'grass': [[300, 200], 200], 'ant': [[500, 0], 200]} self.assertTrue(draganddrop.grade(user_input, correct_answer)) def test_lcao_correct(self): """Describe carbon molecule in LCAO-MO""" - user_input = '{"draggables":[{"1":"s_left"}, \ + user_input = '[{"1":"s_left"}, \ {"5":"s_right"},{"4":"s_sigma"},{"6":"s_sigma_star"},{"7":"p_left_1"}, \ {"8":"p_left_2"},{"10":"p_right_1"},{"9":"p_right_2"}, \ {"2":"p_pi_1"},{"3":"p_pi_2"},{"11":"s_sigma_name"}, \ {"13":"s_sigma_star_name"},{"15":"p_pi_name"},{"16":"p_pi_star_name"}, \ - {"12":"p_sigma_name"},{"14":"p_sigma_star_name"}]}' + {"12":"p_sigma_name"},{"14":"p_sigma_star_name"}]' correct_answer = [{ 'draggables': ['1', '2', '3', '4', '5', '6'], @@ -166,12 +403,12 @@ class Test_DragAndDrop_Grade(unittest.TestCase): def test_lcao_extra_element_incorrect(self): """Describe carbon molecule in LCAO-MO""" - user_input = '{"draggables":[{"1":"s_left"}, \ + user_input = '[{"1":"s_left"}, \ {"5":"s_right"},{"4":"s_sigma"},{"6":"s_sigma_star"},{"7":"p_left_1"}, \ {"8":"p_left_2"},{"17":"p_left_3"},{"10":"p_right_1"},{"9":"p_right_2"}, \ {"2":"p_pi_1"},{"3":"p_pi_2"},{"11":"s_sigma_name"}, \ {"13":"s_sigma_star_name"},{"15":"p_pi_name"},{"16":"p_pi_star_name"}, \ - {"12":"p_sigma_name"},{"14":"p_sigma_star_name"}]}' + {"12":"p_sigma_name"},{"14":"p_sigma_star_name"}]' correct_answer = [{ 'draggables': ['1', '2', '3', '4', '5', '6'], @@ -205,9 +442,9 @@ class Test_DragAndDrop_Grade(unittest.TestCase): def test_reuse_draggable_no_mupliples(self): """Test reusable draggables (no mupltiple draggables per target)""" - user_input = '{"draggables":[{"1":"target1"}, \ + user_input = '[{"1":"target1"}, \ {"2":"target2"},{"1":"target3"},{"2":"target4"},{"2":"target5"}, \ - {"3":"target6"}]}' + {"3":"target6"}]' correct_answer = [ { 'draggables': ['1'], @@ -228,9 +465,9 @@ class Test_DragAndDrop_Grade(unittest.TestCase): def test_reuse_draggable_with_mupliples(self): """Test reusable draggables with mupltiple draggables per target""" - user_input = '{"draggables":[{"1":"target1"}, \ + user_input = '[{"1":"target1"}, \ {"2":"target2"},{"1":"target1"},{"2":"target4"},{"2":"target4"}, \ - {"3":"target6"}]}' + {"3":"target6"}]' correct_answer = [ { 'draggables': ['1'], @@ -251,10 +488,10 @@ class Test_DragAndDrop_Grade(unittest.TestCase): def test_reuse_many_draggable_with_mupliples(self): """Test reusable draggables with mupltiple draggables per target""" - user_input = '{"draggables":[{"1":"target1"}, \ + user_input = '[{"1":"target1"}, \ {"2":"target2"},{"1":"target1"},{"2":"target4"},{"2":"target4"}, \ {"3":"target6"}, {"4": "target3"}, {"5": "target4"}, \ - {"5": "target5"}, {"6": "target2"}]}' + {"5": "target5"}, {"6": "target2"}]' correct_answer = [ { 'draggables': ['1', '4'], @@ -280,12 +517,12 @@ class Test_DragAndDrop_Grade(unittest.TestCase): def test_reuse_many_draggable_with_mupliples_wrong(self): """Test reusable draggables with mupltiple draggables per target""" - user_input = '{"draggables":[{"1":"target1"}, \ + user_input = '[{"1":"target1"}, \ {"2":"target2"},{"1":"target1"}, \ {"2":"target3"}, \ {"2":"target4"}, \ {"3":"target6"}, {"4": "target3"}, {"5": "target4"}, \ - {"5": "target5"}, {"6": "target2"}]}' + {"5": "target5"}, {"6": "target2"}]' correct_answer = [ { 'draggables': ['1', '4'], @@ -311,10 +548,10 @@ class Test_DragAndDrop_Grade(unittest.TestCase): def test_label_10_targets_with_a_b_c_false(self): """Test reusable draggables (no mupltiple draggables per target)""" - user_input = '{"draggables":[{"a":"target1"}, \ + user_input = '[{"a":"target1"}, \ {"b":"target2"},{"c":"target3"},{"a":"target4"},{"b":"target5"}, \ {"c":"target6"}, {"a":"target7"},{"b":"target8"},{"c":"target9"}, \ - {"a":"target1"}]}' + {"a":"target1"}]' correct_answer = [ { 'draggables': ['a'], @@ -335,10 +572,10 @@ class Test_DragAndDrop_Grade(unittest.TestCase): def test_label_10_targets_with_a_b_c_(self): """Test reusable draggables (no mupltiple draggables per target)""" - user_input = '{"draggables":[{"a":"target1"}, \ + user_input = '[{"a":"target1"}, \ {"b":"target2"},{"c":"target3"},{"a":"target4"},{"b":"target5"}, \ {"c":"target6"}, {"a":"target7"},{"b":"target8"},{"c":"target9"}, \ - {"a":"target10"}]}' + {"a":"target10"}]' correct_answer = [ { 'draggables': ['a'], @@ -359,10 +596,10 @@ class Test_DragAndDrop_Grade(unittest.TestCase): def test_label_10_targets_with_a_b_c_multiple(self): """Test reusable draggables (mupltiple draggables per target)""" - user_input = '{"draggables":[{"a":"target1"}, \ + user_input = '[{"a":"target1"}, \ {"b":"target2"},{"c":"target3"},{"b":"target5"}, \ {"c":"target6"}, {"a":"target7"},{"b":"target8"},{"c":"target9"}, \ - {"a":"target1"}]}' + {"a":"target1"}]' correct_answer = [ { 'draggables': ['a', 'a', 'a'], @@ -383,10 +620,10 @@ class Test_DragAndDrop_Grade(unittest.TestCase): def test_label_10_targets_with_a_b_c_multiple_false(self): """Test reusable draggables (mupltiple draggables per target)""" - user_input = '{"draggables":[{"a":"target1"}, \ + user_input = '[{"a":"target1"}, \ {"b":"target2"},{"c":"target3"},{"a":"target4"},{"b":"target5"}, \ {"c":"target6"}, {"a":"target7"},{"b":"target8"},{"c":"target9"}, \ - {"a":"target1"}]}' + {"a":"target1"}]' correct_answer = [ { 'draggables': ['a', 'a', 'a'], @@ -407,10 +644,10 @@ class Test_DragAndDrop_Grade(unittest.TestCase): def test_label_10_targets_with_a_b_c_reused(self): """Test a b c in 10 labels reused""" - user_input = '{"draggables":[{"a":"target1"}, \ + user_input = '[{"a":"target1"}, \ {"b":"target2"},{"c":"target3"},{"b":"target5"}, \ {"c":"target6"}, {"b":"target8"},{"c":"target9"}, \ - {"a":"target10"}]}' + {"a":"target10"}]' correct_answer = [ { 'draggables': ['a', 'a'], @@ -431,10 +668,10 @@ class Test_DragAndDrop_Grade(unittest.TestCase): def test_label_10_targets_with_a_b_c_reused_false(self): """Test a b c in 10 labels reused false""" - user_input = '{"draggables":[{"a":"target1"}, \ + user_input = '[{"a":"target1"}, \ {"b":"target2"},{"c":"target3"},{"b":"target5"}, {"a":"target8"},\ {"c":"target6"}, {"b":"target8"},{"c":"target9"}, \ - {"a":"target10"}]}' + {"a":"target10"}]' correct_answer = [ { 'draggables': ['a', 'a'], @@ -455,9 +692,9 @@ class Test_DragAndDrop_Grade(unittest.TestCase): def test_mixed_reuse_and_not_reuse(self): """Test reusable draggables """ - user_input = '{"draggables":[{"a":"target1"}, \ + user_input = '[{"a":"target1"}, \ {"b":"target2"},{"c":"target3"}, {"a":"target4"},\ - {"a":"target5"}]}' + {"a":"target5"}]' correct_answer = [ { 'draggables': ['a', 'b'], @@ -473,8 +710,8 @@ class Test_DragAndDrop_Grade(unittest.TestCase): def test_mixed_reuse_and_not_reuse_number(self): """Test reusable draggables with number """ - user_input = '{"draggables":[{"a":"target1"}, \ - {"b":"target2"},{"c":"target3"}, {"a":"target4"}]}' + user_input = '[{"a":"target1"}, \ + {"b":"target2"},{"c":"target3"}, {"a":"target4"}]' correct_answer = [ { 'draggables': ['a', 'a', 'b'], @@ -490,8 +727,8 @@ class Test_DragAndDrop_Grade(unittest.TestCase): def test_mixed_reuse_and_not_reuse_number_false(self): """Test reusable draggables with numbers, but wrong""" - user_input = '{"draggables":[{"a":"target1"}, \ - {"b":"target2"},{"c":"target3"}, {"a":"target4"}, {"a":"target10"}]}' + user_input = '[{"a":"target1"}, \ + {"b":"target2"},{"c":"target3"}, {"a":"target4"}, {"a":"target10"}]' correct_answer = [ { 'draggables': ['a', 'a', 'b'], @@ -506,9 +743,9 @@ class Test_DragAndDrop_Grade(unittest.TestCase): self.assertFalse(draganddrop.grade(user_input, correct_answer)) def test_alternative_correct_answer(self): - user_input = '{"draggables":[{"name_with_icon":"t1"},\ + user_input = '[{"name_with_icon":"t1"},\ {"name_with_icon":"t1"},{"name_with_icon":"t1"},{"name4":"t1"}, \ - {"name4":"t1"}]}' + {"name4":"t1"}]' correct_answer = [ {'draggables': ['name4'], 'targets': ['t1', 't1'], 'rule': 'exact'}, {'draggables': ['name_with_icon'], 'targets': ['t1', 't1', 't1'], @@ -521,14 +758,13 @@ class Test_DragAndDrop_Populate(unittest.TestCase): def test_1(self): correct_answer = {'1': [[40, 10], 29], 'name_with_icon': [20, 20]} - user_input = '{"draggables": \ - [{"1": [10, 10]}, {"name_with_icon": [20, 20]}]}' + user_input = '[{"1": [10, 10]}, {"name_with_icon": [20, 20]}]' dnd = draganddrop.DragAndDrop(correct_answer, user_input) - correct_groups = {'1': ['name_with_icon'], '0': ['1']} - correct_positions = {'1': {'exact': [[20, 20]]}, '0': {'exact': [[[40, 10], 29]]}} - user_groups = {'1': [u'name_with_icon'], '0': [u'1']} - user_positions = {'1': {'user': [[20, 20]]}, '0': {'user': [[10, 10]]}} + correct_groups = [['1'], ['name_with_icon']] + correct_positions = [{'exact': [[[40, 10], 29]]}, {'exact': [[20, 20]]}] + user_groups = [['1'], ['name_with_icon']] + user_positions = [{'user': [[10, 10]]}, {'user': [[20, 20]]}] self.assertEqual(correct_groups, dnd.correct_groups) self.assertEqual(correct_positions, dnd.correct_positions) @@ -539,49 +775,49 @@ class Test_DragAndDrop_Populate(unittest.TestCase): class Test_DraAndDrop_Compare_Positions(unittest.TestCase): def test_1(self): - dnd = draganddrop.DragAndDrop({'1': 't1'}, '{"draggables": [{"1": "t1"}]}') + dnd = draganddrop.DragAndDrop({'1': 't1'}, '[{"1": "t1"}]') self.assertTrue(dnd.compare_positions(correct=[[1, 1], [2, 3]], user=[[2, 3], [1, 1]], flag='anyof')) def test_2a(self): - dnd = draganddrop.DragAndDrop({'1': 't1'}, '{"draggables": [{"1": "t1"}]}') + dnd = draganddrop.DragAndDrop({'1': 't1'}, '[{"1": "t1"}]') self.assertTrue(dnd.compare_positions(correct=[[1, 1], [2, 3]], user=[[2, 3], [1, 1]], flag='exact')) def test_2b(self): - dnd = draganddrop.DragAndDrop({'1': 't1'}, '{"draggables": [{"1": "t1"}]}') + dnd = draganddrop.DragAndDrop({'1': 't1'}, '[{"1": "t1"}]') self.assertFalse(dnd.compare_positions(correct=[[1, 1], [2, 3]], user=[[2, 13], [1, 1]], flag='exact')) def test_3(self): - dnd = draganddrop.DragAndDrop({'1': 't1'}, '{"draggables": [{"1": "t1"}]}') + dnd = draganddrop.DragAndDrop({'1': 't1'}, '[{"1": "t1"}]') self.assertFalse(dnd.compare_positions(correct=["a", "b"], user=["a", "b", "c"], flag='anyof')) def test_4(self): - dnd = draganddrop.DragAndDrop({'1': 't1'}, '{"draggables": [{"1": "t1"}]}') + dnd = draganddrop.DragAndDrop({'1': 't1'}, '[{"1": "t1"}]') self.assertTrue(dnd.compare_positions(correct=["a", "b", "c"], user=["a", "b"], flag='anyof')) def test_5(self): - dnd = draganddrop.DragAndDrop({'1': 't1'}, '{"draggables": [{"1": "t1"}]}') + dnd = draganddrop.DragAndDrop({'1': 't1'}, '[{"1": "t1"}]') self.assertFalse(dnd.compare_positions(correct=["a", "b", "c"], user=["a", "c", "b"], flag='exact')) def test_6(self): - dnd = draganddrop.DragAndDrop({'1': 't1'}, '{"draggables": [{"1": "t1"}]}') + dnd = draganddrop.DragAndDrop({'1': 't1'}, '[{"1": "t1"}]') self.assertTrue(dnd.compare_positions(correct=["a", "b", "c"], user=["a", "c", "b"], flag='anyof')) def test_7(self): - dnd = draganddrop.DragAndDrop({'1': 't1'}, '{"draggables": [{"1": "t1"}]}') + dnd = draganddrop.DragAndDrop({'1': 't1'}, '[{"1": "t1"}]') self.assertFalse(dnd.compare_positions(correct=["a", "b", "b"], user=["a", "c", "b"], flag='anyof')) diff --git a/common/lib/capa/capa/xqueue_interface.py b/common/lib/capa/capa/xqueue_interface.py index 8dbe2c84aa..5cf2488af0 100644 --- a/common/lib/capa/capa/xqueue_interface.py +++ b/common/lib/capa/capa/xqueue_interface.py @@ -7,7 +7,7 @@ import logging import requests -log = logging.getLogger('mitx.' + __name__) +log = logging.getLogger(__name__) dateformat = '%Y%m%d%H%M%S' diff --git a/common/lib/capa/setup.py b/common/lib/capa/setup.py index 15b3015930..d9c813f55c 100644 --- a/common/lib/capa/setup.py +++ b/common/lib/capa/setup.py @@ -4,5 +4,5 @@ setup( name="capa", version="0.1", packages=find_packages(exclude=["tests"]), - install_requires=['distribute', 'pyparsing'], + install_requires=['distribute==0.6.30', 'pyparsing==1.5.6'], ) diff --git a/common/lib/tempdir.py b/common/lib/tempdir.py new file mode 100644 index 0000000000..0acd92ba33 --- /dev/null +++ b/common/lib/tempdir.py @@ -0,0 +1,17 @@ +"""Make temporary directories nicely.""" + +import atexit +import os.path +import shutil +import tempfile + +def mkdtemp_clean(suffix="", prefix="tmp", dir=None): + """Just like mkdtemp, but the directory will be deleted when the process ends.""" + the_dir = tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=dir) + atexit.register(cleanup_tempdir, the_dir) + return the_dir + +def cleanup_tempdir(the_dir): + """Called on process exit to remove a temp directory.""" + if os.path.exists(the_dir): + shutil.rmtree(the_dir) diff --git a/common/lib/xmodule/setup.py b/common/lib/xmodule/setup.py index ec369420cd..85d42690b9 100644 --- a/common/lib/xmodule/setup.py +++ b/common/lib/xmodule/setup.py @@ -28,6 +28,7 @@ setup( "image = xmodule.backcompat_module:TranslateCustomTagDescriptor", "error = xmodule.error_module:ErrorDescriptor", "peergrading = xmodule.peer_grading_module:PeerGradingDescriptor", + "poll_question = xmodule.poll_module:PollDescriptor", "problem = xmodule.capa_module:CapaDescriptor", "problemset = xmodule.seq_module:SequenceDescriptor", "randomize = xmodule.randomize_module:RandomizeDescriptor", @@ -45,7 +46,9 @@ setup( "static_tab = xmodule.html_module:StaticTabDescriptor", "custom_tag_template = xmodule.raw_module:RawDescriptor", "about = xmodule.html_module:AboutDescriptor", + "wrapper = xmodule.wrapper_module:WrapperDescriptor", "graphical_slider_tool = xmodule.gst_module:GraphicalSliderToolDescriptor", + "annotatable = xmodule.annotatable_module:AnnotatableDescriptor", "foldit = xmodule.foldit_module:FolditDescriptor", ] } diff --git a/common/lib/xmodule/xmodule/abtest_module.py b/common/lib/xmodule/xmodule/abtest_module.py index 537d864127..0e1c66df8e 100644 --- a/common/lib/xmodule/xmodule/abtest_module.py +++ b/common/lib/xmodule/xmodule/abtest_module.py @@ -1,4 +1,3 @@ -import json import random import logging from lxml import etree @@ -7,6 +6,7 @@ from xmodule.x_module import XModule from xmodule.raw_module import RawDescriptor from xmodule.xml_module import XmlDescriptor from xmodule.exceptions import InvalidDefinitionError +from xblock.core import String, Scope, Object, BlockScope DEFAULT = "_DEFAULT_GROUP" @@ -31,29 +31,42 @@ def group_from_value(groups, v): return g -class ABTestModule(XModule): +class ABTestFields(object): + group_portions = Object(help="What proportions of students should go in each group", default={DEFAULT: 1}, scope=Scope.content) + group_assignments = Object(help="What group this user belongs to", scope=Scope.student_preferences, default={}) + group_content = Object(help="What content to display to each group", scope=Scope.content, default={DEFAULT: []}) + experiment = String(help="Experiment that this A/B test belongs to", scope=Scope.content) + has_children = True + + +class ABTestModule(ABTestFields, XModule): """ Implements an A/B test with an aribtrary number of competing groups """ - def __init__(self, system, location, definition, descriptor, instance_state=None, shared_state=None, **kwargs): - XModule.__init__(self, system, location, definition, descriptor, instance_state, shared_state, **kwargs) - - if shared_state is None: + def __init__(self, *args, **kwargs): + XModule.__init__(self, *args, **kwargs) + if self.group is None: self.group = group_from_value( - self.definition['data']['group_portions'].items(), + self.group_portions.items(), random.uniform(0, 1) ) - else: - shared_state = json.loads(shared_state) - self.group = shared_state['group'] - def get_shared_state(self): - return json.dumps({'group': self.group}) + @property + def group(self): + return self.group_assignments.get(self.experiment) + + @group.setter + def group(self, value): + self.group_assignments[self.experiment] = value + + @group.deleter + def group(self): + del self.group_assignments[self.experiment] def get_child_descriptors(self): - active_locations = set(self.definition['data']['group_content'][self.group]) + active_locations = set(self.group_content[self.group]) return [desc for desc in self.descriptor.get_children() if desc.location.url() in active_locations] def displayable_items(self): @@ -64,43 +77,11 @@ class ABTestModule(XModule): # TODO (cpennington): Use Groups should be a first class object, rather than being # managed by ABTests -class ABTestDescriptor(RawDescriptor, XmlDescriptor): +class ABTestDescriptor(ABTestFields, RawDescriptor, XmlDescriptor): module_class = ABTestModule template_dir_name = "abtest" - def __init__(self, system, definition=None, **kwargs): - """ - definition is a dictionary with the following layout: - {'data': { - 'experiment': 'the name of the experiment', - 'group_portions': { - 'group_a': 0.1, - 'group_b': 0.2 - }, - 'group_contents': { - 'group_a': [ - 'url://for/content/module/1', - 'url://for/content/module/2', - ], - 'group_b': [ - 'url://for/content/module/3', - ], - DEFAULT: [ - 'url://for/default/content/1' - ] - } - }, - 'children': [ - 'url://for/content/module/1', - 'url://for/content/module/2', - 'url://for/content/module/3', - 'url://for/default/content/1', - ]} - """ - kwargs['shared_state_key'] = definition['data']['experiment'] - RawDescriptor.__init__(self, system, definition, **kwargs) - @classmethod def definition_from_xml(cls, xml_object, system): """ @@ -118,19 +99,16 @@ class ABTestDescriptor(RawDescriptor, XmlDescriptor): "ABTests must specify an experiment. Not found in:\n{xml}" .format(xml=etree.tostring(xml_object, pretty_print=True))) - definition = { - 'data': { - 'experiment': experiment, - 'group_portions': {}, - 'group_content': {DEFAULT: []}, - }, - 'children': []} + group_portions = {} + group_content = {} + children = [] + for group in xml_object: if group.tag == 'default': name = DEFAULT else: name = group.get('name') - definition['data']['group_portions'][name] = float(group.get('portion', 0)) + group_portions[name] = float(group.get('portion', 0)) child_content_urls = [] for child in group: @@ -140,29 +118,33 @@ class ABTestDescriptor(RawDescriptor, XmlDescriptor): log.exception("Unable to load child when parsing ABTest. Continuing...") continue - definition['data']['group_content'][name] = child_content_urls - definition['children'].extend(child_content_urls) + group_content[name] = child_content_urls + children.extend(child_content_urls) default_portion = 1 - sum( - portion for (name, portion) in definition['data']['group_portions'].items()) + portion for (name, portion) in group_portions.items() + ) if default_portion < 0: raise InvalidDefinitionError("ABTest portions must add up to less than or equal to 1") - definition['data']['group_portions'][DEFAULT] = default_portion - definition['children'].sort() + group_portions[DEFAULT] = default_portion + children.sort() - return definition + return { + 'group_portions': group_portions, + 'group_content': group_content, + }, children def definition_to_xml(self, resource_fs): xml_object = etree.Element('abtest') - xml_object.set('experiment', self.definition['data']['experiment']) - for name, group in self.definition['data']['group_content'].items(): + xml_object.set('experiment', self.experiment) + for name, group in self.group_content.items(): if name == DEFAULT: group_elem = etree.SubElement(xml_object, 'default') else: group_elem = etree.SubElement(xml_object, 'group', attrib={ - 'portion': str(self.definition['data']['group_portions'][name]), + 'portion': str(self.group_portions[name]), 'name': name, }) @@ -172,6 +154,5 @@ class ABTestDescriptor(RawDescriptor, XmlDescriptor): return xml_object - def has_dynamic_children(self): return True diff --git a/common/lib/xmodule/xmodule/annotatable_module.py b/common/lib/xmodule/xmodule/annotatable_module.py new file mode 100644 index 0000000000..db2aa13cb7 --- /dev/null +++ b/common/lib/xmodule/xmodule/annotatable_module.py @@ -0,0 +1,135 @@ +import logging + +from lxml import etree +from pkg_resources import resource_string, resource_listdir + +from xmodule.x_module import XModule +from xmodule.raw_module import RawDescriptor +from xmodule.contentstore.content import StaticContent +from xblock.core import Scope, String + +log = logging.getLogger(__name__) + + +class AnnotatableFields(object): + data = String(help="XML data for the annotation", scope=Scope.content) + + +class AnnotatableModule(AnnotatableFields, XModule): + js = {'coffee': [resource_string(__name__, 'js/src/javascript_loader.coffee'), + resource_string(__name__, 'js/src/collapsible.coffee'), + resource_string(__name__, 'js/src/html/display.coffee'), + resource_string(__name__, 'js/src/annotatable/display.coffee')], + 'js': [] + } + js_module_name = "Annotatable" + css = {'scss': [resource_string(__name__, 'css/annotatable/display.scss')]} + icon_class = 'annotatable' + + + def __init__(self, *args, **kwargs): + XModule.__init__(self, *args, **kwargs) + + xmltree = etree.fromstring(self.data) + + self.instructions = self._extract_instructions(xmltree) + self.content = etree.tostring(xmltree, encoding='unicode') + self.element_id = self.location.html_id() + self.highlight_colors = ['yellow', 'orange', 'purple', 'blue', 'green'] + + def _get_annotation_class_attr(self, index, el): + """ Returns a dict with the CSS class attribute to set on the annotation + and an XML key to delete from the element. + """ + + attr = {} + cls = ['annotatable-span', 'highlight'] + highlight_key = 'highlight' + color = el.get(highlight_key) + + if color is not None: + if color in self.highlight_colors: + cls.append('highlight-'+color) + attr['_delete'] = highlight_key + attr['value'] = ' '.join(cls) + + return { 'class' : attr } + + def _get_annotation_data_attr(self, index, el): + """ Returns a dict in which the keys are the HTML data attributes + to set on the annotation element. Each data attribute has a + corresponding 'value' and (optional) '_delete' key to specify + an XML attribute to delete. + """ + + data_attrs = {} + attrs_map = { + 'body': 'data-comment-body', + 'title': 'data-comment-title', + 'problem': 'data-problem-id' + } + + for xml_key in attrs_map.keys(): + if xml_key in el.attrib: + value = el.get(xml_key, '') + html_key = attrs_map[xml_key] + data_attrs[html_key] = { 'value': value, '_delete': xml_key } + + return data_attrs + + def _render_annotation(self, index, el): + """ Renders an annotation element for HTML output. """ + attr = {} + attr.update(self._get_annotation_class_attr(index, el)) + attr.update(self._get_annotation_data_attr(index, el)) + + el.tag = 'span' + + for key in attr.keys(): + el.set(key, attr[key]['value']) + if '_delete' in attr[key] and attr[key]['_delete'] is not None: + delete_key = attr[key]['_delete'] + del el.attrib[delete_key] + + + def _render_content(self): + """ Renders annotatable content with annotation spans and returns HTML. """ + xmltree = etree.fromstring(self.content) + xmltree.tag = 'div' + if 'display_name' in xmltree.attrib: + del xmltree.attrib['display_name'] + + index = 0 + for el in xmltree.findall('.//annotation'): + self._render_annotation(index, el) + index += 1 + + return etree.tostring(xmltree, encoding='unicode') + + def _extract_instructions(self, xmltree): + """ Removes from the xmltree and returns them as a string, otherwise None. """ + instructions = xmltree.find('instructions') + if instructions is not None: + instructions.tag = 'div' + xmltree.remove(instructions) + return etree.tostring(instructions, encoding='unicode') + return None + + def get_html(self): + """ Renders parameters to template. """ + context = { + 'display_name': self.display_name_with_default, + 'element_id': self.element_id, + 'instructions_html': self.instructions, + 'content_html': self._render_content() + } + + return self.system.render_template('annotatable.html', context) + + +class AnnotatableDescriptor(AnnotatableFields, RawDescriptor): + module_class = AnnotatableModule + stores_state = True + template_dir_name = "annotatable" + mako_template = "widgets/raw-edit.html" + diff --git a/common/lib/xmodule/xmodule/backcompat_module.py b/common/lib/xmodule/xmodule/backcompat_module.py index 40ffd46d1c..9e7b132e9e 100644 --- a/common/lib/xmodule/xmodule/backcompat_module.py +++ b/common/lib/xmodule/xmodule/backcompat_module.py @@ -1,7 +1,7 @@ """ These modules exist to translate old format XML into newer, semantic forms """ -from x_module import XModuleDescriptor +from .x_module import XModuleDescriptor from lxml import etree from functools import wraps import logging diff --git a/common/lib/xmodule/xmodule/capa_module.py b/common/lib/xmodule/xmodule/capa_module.py index 4635cc6871..e66b1d3495 100644 --- a/common/lib/xmodule/xmodule/capa_module.py +++ b/common/lib/xmodule/xmodule/capa_module.py @@ -6,25 +6,45 @@ import hashlib import json import logging import traceback -import re import sys -from datetime import timedelta from lxml import etree from pkg_resources import resource_string from capa.capa_problem import LoncapaProblem from capa.responsetypes import StudentInputError from capa.util import convert_files_to_filenames -from progress import Progress +from .progress import Progress from xmodule.x_module import XModule from xmodule.raw_module import RawDescriptor from xmodule.exceptions import NotFoundError +from xblock.core import Integer, Scope, BlockScope, ModelType, String, Boolean, Object, Float +from .fields import Timedelta log = logging.getLogger("mitx.courseware") -#----------------------------------------------------------------------------- -TIMEDELTA_REGEX = re.compile(r'^((?P\d+?) day(?:s?))?(\s)?((?P\d+?) hour(?:s?))?(\s)?((?P\d+?) minute(?:s)?)?(\s)?((?P\d+?) second(?:s)?)?$') + +class StringyInteger(Integer): + """ + A model type that converts from strings to integers when reading from json + """ + def from_json(self, value): + try: + return int(value) + except: + return None + + +class StringyFloat(Float): + """ + A model type that converts from string to floats when reading from json + """ + def from_json(self, value): + try: + return float(value) + except: + return None + # Generated this many different variants of problems with rerandomize=per_student NUM_RANDOMIZATION_BINS = 20 @@ -45,41 +65,15 @@ def randomization_bin(seed, problem_id): return int(h.hexdigest()[:7], 16) % NUM_RANDOMIZATION_BINS -def only_one(lst, default="", process=lambda x: x): - """ - If lst is empty, returns default +class Randomization(String): + def from_json(self, value): + if value in ("", "true"): + return "always" + elif value == "false": + return "per_student" + return value - If lst has a single element, applies process to that element and returns it. - - Otherwise, raises an exception. - """ - if len(lst) == 0: - return default - elif len(lst) == 1: - return process(lst[0]) - else: - raise Exception('Malformed XML: expected at most one element in list.') - - -def parse_timedelta(time_str): - """ - time_str: A string with the following components: - day[s] (optional) - hour[s] (optional) - minute[s] (optional) - second[s] (optional) - - Returns a datetime.timedelta parsed from the string - """ - parts = TIMEDELTA_REGEX.match(time_str) - if not parts: - return - parts = parts.groupdict() - time_params = {} - for (name, param) in parts.iteritems(): - if param: - time_params[name] = int(param) - return timedelta(**time_params) + to_json = from_json class ComplexEncoder(json.JSONEncoder): @@ -89,13 +83,32 @@ class ComplexEncoder(json.JSONEncoder): return json.JSONEncoder.default(self, obj) -class CapaModule(XModule): +class CapaFields(object): + attempts = StringyInteger(help="Number of attempts taken by the student on this problem", default=0, scope=Scope.student_state) + max_attempts = StringyInteger(help="Maximum number of attempts that a student is allowed", scope=Scope.settings) + due = String(help="Date that this problem is due by", scope=Scope.settings) + graceperiod = Timedelta(help="Amount of time after the due date that submissions will be accepted", scope=Scope.settings) + showanswer = String(help="When to show the problem answer to the student", scope=Scope.settings, default="closed") + force_save_button = Boolean(help="Whether to force the save button to appear on the page", scope=Scope.settings, default=False) + rerandomize = Randomization(help="When to rerandomize the problem", default="always", scope=Scope.settings) + data = String(help="XML data for the problem", scope=Scope.content) + correct_map = Object(help="Dictionary with the correctness of current student answers", scope=Scope.student_state, default={}) + student_answers = Object(help="Dictionary with the current student responses", scope=Scope.student_state) + done = Boolean(help="Whether the student has answered the problem", scope=Scope.student_state) + display_name = String(help="Display name for this module", scope=Scope.settings) + seed = StringyInteger(help="Random seed for this student", scope=Scope.student_state) + weight = StringyFloat(help="How much to weight this problem by", scope=Scope.settings) + markdown = String(help="Markdown source of this module", scope=Scope.settings) + + +class CapaModule(CapaFields, XModule): ''' An XModule implementing LonCapa format problems, implemented by way of capa.capa_problem.LoncapaProblem ''' icon_class = 'problem' + js = {'coffee': [resource_string(__name__, 'js/src/capa/display.coffee'), resource_string(__name__, 'js/src/collapsible.coffee'), resource_string(__name__, 'js/src/javascript_loader.coffee'), @@ -107,61 +120,25 @@ class CapaModule(XModule): js_module_name = "Problem" css = {'scss': [resource_string(__name__, 'css/capa/display.scss')]} - def __init__(self, system, location, definition, descriptor, instance_state=None, - shared_state=None, **kwargs): - XModule.__init__(self, system, location, definition, descriptor, instance_state, - shared_state, **kwargs) + def __init__(self, system, location, descriptor, model_data): + XModule.__init__(self, system, location, descriptor, model_data) - self.attempts = 0 - self.max_attempts = None - - dom2 = etree.fromstring(definition['data']) - - display_due_date_string = self.metadata.get('due', None) - if display_due_date_string is not None: - self.display_due_date = dateutil.parser.parse(display_due_date_string) - #log.debug("Parsed " + display_due_date_string + - # " to " + str(self.display_due_date)) + if self.due: + due_date = dateutil.parser.parse(self.due) else: - self.display_due_date = None + due_date = None - grace_period_string = self.metadata.get('graceperiod', None) - if grace_period_string is not None and self.display_due_date: - self.grace_period = parse_timedelta(grace_period_string) - self.close_date = self.display_due_date + self.grace_period - #log.debug("Then parsed " + grace_period_string + - # " to closing date" + str(self.close_date)) + if self.graceperiod is not None and due_date: + self.close_date = due_date + self.graceperiod else: - self.grace_period = None - self.close_date = self.display_due_date + self.close_date = due_date - max_attempts = self.metadata.get('attempts', None) - if max_attempts: - self.max_attempts = int(max_attempts) - else: - self.max_attempts = None - - self.show_answer = self.metadata.get('showanswer', 'closed') - - self.force_save_button = self.metadata.get('force_save_button', 'false') - - if self.show_answer == "": - self.show_answer = "closed" - - if instance_state is not None: - instance_state = json.loads(instance_state) - if instance_state is not None and 'attempts' in instance_state: - self.attempts = instance_state['attempts'] - - self.name = only_one(dom2.xpath('/problem/@name')) - - if self.rerandomize == 'never': - self.seed = 1 - elif self.rerandomize == "per_student" and hasattr(self.system, 'seed'): - # see comment on randomization_bin - self.seed = randomization_bin(system.seed, self.location.url) - else: - self.seed = None + if self.seed is None: + if self.rerandomize == 'never': + self.seed = 1 + elif self.rerandomize == "per_student" and hasattr(self.system, 'seed'): + # see comment on randomization_bin + self.seed = randomization_bin(system.seed, self.location.url) # Need the problem location in openendedresponse to send out. Adding # it to the system here seems like the least clunky way to get it @@ -171,8 +148,7 @@ class CapaModule(XModule): try: # TODO (vshnayder): move as much as possible of this work and error # checking to descriptor load time - self.lcp = LoncapaProblem(self.definition['data'], self.location.html_id(), - instance_state, seed=self.seed, system=self.system) + self.lcp = self.new_lcp(self.get_state_for_lcp()) except Exception as err: msg = 'cannot create LoncapaProblem {loc}: {err}'.format( loc=self.location.url(), err=err) @@ -189,35 +165,38 @@ class CapaModule(XModule): problem_text = ('' 'Problem %s has an error:%s' % (self.location.url(), msg)) - self.lcp = LoncapaProblem( - problem_text, self.location.html_id(), - instance_state, seed=self.seed, system=self.system) + self.lcp = self.new_lcp(self.get_state_for_lcp(), text=problem_text) else: # add extra info and raise raise Exception(msg), None, sys.exc_info()[2] - @property - def rerandomize(self): - """ - Property accessor that returns self.metadata['rerandomize'] in a - canonical form - """ - rerandomize = self.metadata.get('rerandomize', 'always') - if rerandomize in ("", "always", "true"): - return "always" - elif rerandomize in ("false", "per_student"): - return "per_student" - elif rerandomize == "never": - return "never" - elif rerandomize == "onreset": - return "onreset" - else: - raise Exception("Invalid rerandomize attribute " + rerandomize) + self.set_state_from_lcp() - def get_instance_state(self): - state = self.lcp.get_state() - state['attempts'] = self.attempts - return json.dumps(state) + def new_lcp(self, state, text=None): + if text is None: + text = self.data + + return LoncapaProblem( + problem_text=text, + id=self.location.html_id(), + state=state, + system=self.system, + ) + + def get_state_for_lcp(self): + return { + 'done': self.done, + 'correct_map': self.correct_map, + 'student_answers': self.student_answers, + 'seed': self.seed, + } + + def set_state_from_lcp(self): + lcp_state = self.lcp.get_state() + self.done = lcp_state['done'] + self.correct_map = lcp_state['correct_map'] + self.student_answers = lcp_state['student_answers'] + self.seed = lcp_state['seed'] def get_score(self): return self.lcp.get_score() @@ -234,7 +213,7 @@ class CapaModule(XModule): if total > 0: try: return Progress(score, total) - except Exception as err: + except Exception: log.exception("Got bad progress") return None return None @@ -247,117 +226,191 @@ class CapaModule(XModule): 'progress': Progress.to_js_status_str(self.get_progress()) }) + def check_button_name(self): + """ + Determine the name for the "check" button. + Usually it is just "Check", but if this is the student's + final attempt, change the name to "Final Check" + """ + if self.max_attempts is not None: + final_check = (self.attempts >= self.max_attempts - 1) + else: + final_check = False + + return "Final Check" if final_check else "Check" + + def should_show_check_button(self): + """ + Return True/False to indicate whether to show the "Check" button. + """ + submitted_without_reset = (self.is_completed() and self.rerandomize == "always") + + # If the problem is closed (past due / too many attempts) + # then we do NOT show the "check" button + # Also, do not show the "check" button if we're waiting + # for the user to reset a randomized problem + if self.closed() or submitted_without_reset: + return False + else: + return True + + def should_show_reset_button(self): + """ + Return True/False to indicate whether to show the "Reset" button. + """ + is_survey_question = (self.max_attempts == 0) + + if self.rerandomize in ["always", "onreset"]: + + # If the problem is closed (and not a survey question with max_attempts==0), + # then do NOT show the reset button. + # If the problem hasn't been submitted yet, then do NOT show + # the reset button. + if (self.closed() and not is_survey_question) or not self.is_completed(): + return False + else: + return True + # Only randomized problems need a "reset" button + else: + return False + + def should_show_save_button(self): + """ + Return True/False to indicate whether to show the "Save" button. + """ + + # If the user has forced the save button to display, + # then show it as long as the problem is not closed + # (past due / too many attempts) + if self.force_save_button == "true": + return not self.closed() + else: + is_survey_question = (self.max_attempts == 0) + needs_reset = self.is_completed() and self.rerandomize == "always" + + # If the student has unlimited attempts, and their answers + # are not randomized, then we do not need a save button + # because they can use the "Check" button without consequences. + # + # The consequences we want to avoid are: + # * Using up an attempt (if max_attempts is set) + # * Changing the current problem, and no longer being + # able to view it (if rerandomize is "always") + # + # In those cases. the if statement below is false, + # and the save button can still be displayed. + # + if self.max_attempts is None and self.rerandomize != "always": + return False + + # If the problem is closed (and not a survey question with max_attempts==0), + # then do NOT show the save button + # If we're waiting for the user to reset a randomized problem + # then do NOT show the save button + elif (self.closed() and not is_survey_question) or needs_reset: + return False + else: + return True + + def handle_problem_html_error(self, err): + """ + Change our problem to a dummy problem containing + a warning message to display to users. + + Returns the HTML to show to users + + *err* is the Exception encountered while rendering the problem HTML. + """ + log.exception(err) + + # TODO (vshnayder): another switch on DEBUG. + if self.system.DEBUG: + msg = ( + '[courseware.capa.capa_module] ' + 'Failed to generate HTML for problem %s' % + (self.location.url())) + msg += '

              Error:

              %s

              ' % str(err).replace('<', '<') + msg += '

              %s

              ' % traceback.format_exc().replace('<', '<') + html = msg + + # We're in non-debug mode, and possibly even in production. We want + # to avoid bricking of problem as much as possible + else: + # We're in non-debug mode, and possibly even in production. We want + # to avoid bricking of problem as much as possible + + # Presumably, student submission has corrupted LoncapaProblem HTML. + # First, pull down all student answers + student_answers = self.lcp.student_answers + answer_ids = student_answers.keys() + + # Some inputtypes, such as dynamath, have additional "hidden" state that + # is not exposed to the student. Keep those hidden + # TODO: Use regex, e.g. 'dynamath' is suffix at end of answer_id + hidden_state_keywords = ['dynamath'] + for answer_id in answer_ids: + for hidden_state_keyword in hidden_state_keywords: + if answer_id.find(hidden_state_keyword) >= 0: + student_answers.pop(answer_id) + + # Next, generate a fresh LoncapaProblem + self.lcp = self.new_lcp(None) + self.set_state_from_lcp() + + # Prepend a scary warning to the student + warning = '
              '\ + '

              Warning: The problem has been reset to its initial state!

              '\ + 'The problem\'s state was corrupted by an invalid submission. ' \ + 'The submission consisted of:'\ + '
                ' + for student_answer in student_answers.values(): + if student_answer != '': + warning += '
              • ' + cgi.escape(student_answer) + '
              • ' + warning += '
              '\ + 'If this error persists, please contact the course staff.'\ + '
              ' + + html = warning + try: + html += self.lcp.get_html() + except Exception: # Couldn't do it. Give up + log.exception("Unable to generate html from LoncapaProblem") + raise + + return html + + def get_problem_html(self, encapsulate=True): '''Return html for the problem. Adds check, reset, save buttons as necessary based on the problem config and state.''' try: html = self.lcp.get_html() + + # If we cannot construct the problem HTML, + # then generate an error message instead. except Exception, err: - log.exception(err) + html = self.handle_problem_html_error(err) - # TODO (vshnayder): another switch on DEBUG. - if self.system.DEBUG: - msg = ( - '[courseware.capa.capa_module] ' - 'Failed to generate HTML for problem %s' % - (self.location.url())) - msg += '

              Error:

              %s

              ' % str(err).replace('<', '<') - msg += '

              %s

              ' % traceback.format_exc().replace('<', '<') - html = msg - else: - # We're in non-debug mode, and possibly even in production. We want - # to avoid bricking of problem as much as possible - # Presumably, student submission has corrupted LoncapaProblem HTML. - # First, pull down all student answers - student_answers = self.lcp.student_answers - answer_ids = student_answers.keys() - - # Some inputtypes, such as dynamath, have additional "hidden" state that - # is not exposed to the student. Keep those hidden - # TODO: Use regex, e.g. 'dynamath' is suffix at end of answer_id - hidden_state_keywords = ['dynamath'] - for answer_id in answer_ids: - for hidden_state_keyword in hidden_state_keywords: - if answer_id.find(hidden_state_keyword) >= 0: - student_answers.pop(answer_id) - - # Next, generate a fresh LoncapaProblem - self.lcp = LoncapaProblem(self.definition['data'], self.location.html_id(), - state=None, # Tabula rasa - seed=self.seed, system=self.system) - - # Prepend a scary warning to the student - warning = '
              '\ - '

              Warning: The problem has been reset to its initial state!

              '\ - 'The problem\'s state was corrupted by an invalid submission. ' \ - 'The submission consisted of:'\ - '
                ' - for student_answer in student_answers.values(): - if student_answer != '': - warning += '
              • ' + cgi.escape(student_answer) + '
              • ' - warning += '
              '\ - 'If this error persists, please contact the course staff.'\ - '
              ' - - html = warning - try: - html += self.lcp.get_html() - except Exception, err: # Couldn't do it. Give up - log.exception(err) - raise - - content = {'name': self.display_name, - 'html': html, - 'weight': self.descriptor.weight, - } - - # We using strings as truthy values, because the terminology of the - # check button is context-specific. - - # Put a "Check" button if unlimited attempts or still some left - if self.max_attempts is None or self.attempts < self.max_attempts - 1: - check_button = "Check" + # The convention is to pass the name of the check button + # if we want to show a check button, and False otherwise + # This works because non-empty strings evaluate to True + if self.should_show_check_button(): + check_button = self.check_button_name() else: - # Will be final check so let user know that - check_button = "Final Check" - - reset_button = True - save_button = True - - # If we're after deadline, or user has exhausted attempts, - # question is read-only. - if self.closed(): check_button = False - reset_button = False - save_button = False - # User submitted a problem, and hasn't reset. We don't want - # more submissions. - if self.lcp.done and self.rerandomize == "always": - check_button = False - save_button = False - - # Only show the reset button if pressing it will show different values - if self.rerandomize not in ["always", "onreset"]: - reset_button = False - - # User hasn't submitted an answer yet -- we don't want resets - if not self.lcp.done: - reset_button = False - - # We may not need a "save" button if infinite number of attempts and - # non-randomized. The problem author can force it. It's a bit weird for - # randomization to control this; should perhaps be cleaned up. - if (self.force_save_button == "false") and (self.max_attempts is None and self.rerandomize != "always"): - save_button = False + content = {'name': self.display_name_with_default, + 'html': html, + 'weight': self.weight, + } context = {'problem': content, 'id': self.id, 'check_button': check_button, - 'reset_button': reset_button, - 'save_button': save_button, + 'reset_button': self.should_show_reset_button(), + 'save_button': self.should_show_save_button(), 'answer_available': self.answer_available(), 'ajax_url': self.system.ajax_url, 'attempts_used': self.attempts, @@ -390,6 +443,7 @@ class CapaModule(XModule): 'problem_save': self.save_problem, 'problem_show': self.get_answer, 'score_update': self.update_score, + 'input_ajax': self.lcp.handle_input_ajax } if dispatch not in handlers: @@ -413,7 +467,7 @@ class CapaModule(XModule): def closed(self): ''' Is the student still allowed to submit answers? ''' - if self.attempts == self.max_attempts: + if self.max_attempts is not None and self.attempts >= self.max_attempts: return True if self.is_past_due(): return True @@ -429,29 +483,37 @@ class CapaModule(XModule): # used by conditional module return self.attempts > 0 + def is_correct(self): + """True if full points""" + d = self.get_score() + return d['score'] == d['total'] + def answer_available(self): ''' Is the user allowed to see an answer? ''' - if self.show_answer == '': + if self.showanswer == '': return False - elif self.show_answer == "never": + elif self.showanswer == "never": return False elif self.system.user_is_staff: # This is after the 'never' check because admins can see the answer # unless the problem explicitly prevents it return True - elif self.show_answer == 'attempted': + elif self.showanswer == 'attempted': return self.attempts > 0 - elif self.show_answer == 'answered': + elif self.showanswer == 'answered': # NOTE: this is slightly different from 'attempted' -- resetting the problems # makes lcp.done False, but leaves attempts unchanged. return self.lcp.done - elif self.show_answer == 'closed': + elif self.showanswer == 'closed': return self.closed() - elif self.show_answer == 'past_due': + elif self.showanswer == 'finished': + return self.closed() or self.is_correct() + + elif self.showanswer == 'past_due': return self.is_past_due() - elif self.show_answer == 'always': + elif self.showanswer == 'always': return True return False @@ -470,6 +532,8 @@ class CapaModule(XModule): queuekey = get['queuekey'] score_msg = get['xqueue_body'] self.lcp.update_score(score_msg, queuekey) + self.set_state_from_lcp() + self.publish_grade() return dict() # No AJAX return is needed @@ -481,13 +545,14 @@ class CapaModule(XModule): ''' event_info = dict() event_info['problem_id'] = self.location.url() - self.system.track_function('show_answer', event_info) + self.system.track_function('showanswer', event_info) if not self.answer_available(): raise NotFoundError('Answer is not available') else: answers = self.lcp.get_question_answers() + self.set_state_from_lcp() - # answers (eg ) may have embedded images + # answers (eg ) may have embedded images # but be careful, some problems are using non-string answer dicts new_answers = dict() for answer_id in answers: @@ -513,30 +578,80 @@ class CapaModule(XModule): @staticmethod def make_dict_of_responses(get): '''Make dictionary of student responses (aka "answers") - get is POST dictionary. + get is POST dictionary (Djano QueryDict). + + The *get* dict has keys of the form 'x_y', which are mapped + to key 'y' in the returned dict. For example, + 'input_1_2_3' would be mapped to '1_2_3' in the returned dict. + + Some inputs always expect a list in the returned dict + (e.g. checkbox inputs). The convention is that + keys in the *get* dict that end with '[]' will always + have list values in the returned dict. + For example, if the *get* dict contains {'input_1[]': 'test' } + then the output dict would contain {'1': ['test'] } + (the value is a list). + + Raises an exception if: + + A key in the *get* dictionary does not contain >= 1 underscores + (e.g. "input" is invalid; "input_1" is valid) + + Two keys end up with the same name in the returned dict. + (e.g. 'input_1' and 'input_1[]', which both get mapped + to 'input_1' in the returned dict) ''' answers = dict() + for key in get: # e.g. input_resistor_1 ==> resistor_1 _, _, name = key.partition('_') - # This allows for answers which require more than one value for - # the same form input (e.g. checkbox inputs). The convention is that - # if the name ends with '[]' (which looks like an array), then the - # answer will be an array. - if not name.endswith('[]'): - answers[name] = get[key] + # If key has no underscores, then partition + # will return (key, '', '') + # We detect this and raise an error + if not name: + raise ValueError("%s must contain at least one underscore" % str(key)) + else: - name = name[:-2] - answers[name] = get.getlist(key) + # This allows for answers which require more than one value for + # the same form input (e.g. checkbox inputs). The convention is that + # if the name ends with '[]' (which looks like an array), then the + # answer will be an array. + is_list_key = name.endswith('[]') + name = name[:-2] if is_list_key else name + + if is_list_key: + val = get.getlist(key) + else: + val = get[key] + + # If the name already exists, then we don't want + # to override it. Raise an error instead + if name in answers: + raise ValueError("Key %s already exists in answers dict" % str(name)) + else: + answers[name] = val return answers + def publish_grade(self): + """ + Publishes the student's current grade to the system as an event + """ + score = self.lcp.get_score() + self.system.publish({ + 'event_name': 'grade', + 'value': score['score'], + 'max_value': score['total'], + }) + + def check_problem(self, get): ''' Checks whether answers to a problem are correct, and returns a map of correct/incorrect answers: - {'success' : bool, + {'success' : 'correct' | 'incorrect' | AJAX alert msg string, 'contents' : html} ''' event_info = dict() @@ -545,7 +660,6 @@ class CapaModule(XModule): answers = self.make_dict_of_responses(get) event_info['answers'] = convert_files_to_filenames(answers) - # Too late. Cannot submit if self.closed(): event_info['failure'] = 'closed' @@ -553,7 +667,7 @@ class CapaModule(XModule): raise NotFoundError('Problem is closed') # Problem submitted. Student should reset before checking again - if self.lcp.done and self.rerandomize == "always": + if self.done and self.rerandomize == "always": event_info['failure'] = 'unreset' self.system.track_function('save_problem_check_fail', event_info) raise NotFoundError('Problem must be reset before it can be checked again') @@ -565,12 +679,11 @@ class CapaModule(XModule): waittime_between_requests = self.system.xqueue['waittime'] if (current_time - prev_submit_time).total_seconds() < waittime_between_requests: msg = 'You must wait at least %d seconds between submissions' % waittime_between_requests - return {'success': msg, 'html': ''} # Prompts a modal dialog in ajax callback + return {'success': msg, 'html': ''} # Prompts a modal dialog in ajax callback try: - old_state = self.lcp.get_state() - lcp_id = self.lcp.problem_id correct_map = self.lcp.grade_answers(answers) + self.set_state_from_lcp() except StudentInputError as inst: log.exception("StudentInputError in capa_module:problem_check") return {'success': inst.message} @@ -579,12 +692,14 @@ class CapaModule(XModule): msg = "Error checking problem: " + str(err) msg += '\nTraceback:\n' + traceback.format_exc() return {'success': msg} - log.exception("Error in capa_module problem checking") - raise Exception("error in capa_module") + raise self.attempts = self.attempts + 1 self.lcp.done = True + self.set_state_from_lcp() + self.publish_grade() + # success = correct if ALL questions in this problem are correct success = 'correct' for answer_id in correct_map: @@ -595,11 +710,11 @@ class CapaModule(XModule): # 'success' will always be incorrect event_info['correct_map'] = correct_map.get_dict() event_info['success'] = success - event_info['attempts'] = self.attempts + event_info['attempts'] = self.attempts self.system.track_function('save_problem_check', event_info) - if hasattr(self.system, 'psychometrics_handler'): # update PsychometricsData using callback - self.system.psychometrics_handler(self.get_instance_state()) + if hasattr(self.system, 'psychometrics_handler'): # update PsychometricsData using callback + self.system.psychometrics_handler(self.get_instance_state()) # render problem into HTML html = self.get_problem_html(encapsulate=False) @@ -622,31 +737,41 @@ class CapaModule(XModule): event_info['answers'] = answers # Too late. Cannot submit - if self.closed(): + if self.closed() and not self.max_attempts ==0: event_info['failure'] = 'closed' self.system.track_function('save_problem_fail', event_info) return {'success': False, - 'error': "Problem is closed"} + 'msg': "Problem is closed"} # Problem submitted. Student should reset before saving # again. - if self.lcp.done and self.rerandomize == "always": + if self.done and self.rerandomize == "always": event_info['failure'] = 'done' self.system.track_function('save_problem_fail', event_info) return {'success': False, - 'error': "Problem needs to be reset prior to save."} + 'msg': "Problem needs to be reset prior to save"} self.lcp.student_answers = answers - # TODO: should this be save_problem_fail? Looks like success to me... - self.system.track_function('save_problem_fail', event_info) - return {'success': True} + self.set_state_from_lcp() + + self.system.track_function('save_problem_success', event_info) + msg = "Your answers have been saved" + if not self.max_attempts ==0: + msg += " but not graded. Hit 'Check' to grade them." + return {'success': True, + 'msg': msg} def reset_problem(self, get): ''' Changes problem state to unfinished -- removes student answers, and causes problem to rerender itself. - Returns problem html as { 'html' : html-string }. + Returns a dictionary of the form: + {'success': True/False, + 'html': Problem HTML string } + + If an error occurs, the dictionary will also have an + 'error' key containing an error message. ''' event_info = dict() event_info['old_state'] = self.lcp.get_state() @@ -658,29 +783,33 @@ class CapaModule(XModule): return {'success': False, 'error': "Problem is closed"} - if not self.lcp.done: + if not self.done: event_info['failure'] = 'not_done' self.system.track_function('reset_problem_fail', event_info) return {'success': False, 'error': "Refresh the page and make an attempt before resetting."} - self.lcp.do_reset() if self.rerandomize in ["always", "onreset"]: # reset random number generator seed (note the self.lcp.get_state() # in next line) - self.lcp.seed = None + seed = None + else: + seed = self.lcp.seed - self.lcp = LoncapaProblem(self.definition['data'], - self.location.html_id(), self.lcp.get_state(), - system=self.system) + # Generate a new problem with either the previous seed or a new seed + self.lcp = self.new_lcp({'seed': seed}) + + # Pull in the new problem seed + self.set_state_from_lcp() event_info['new_state'] = self.lcp.get_state() self.system.track_function('reset_problem', event_info) - return {'html': self.get_problem_html(encapsulate=False)} + return {'success': True, + 'html': self.get_problem_html(encapsulate=False)} -class CapaDescriptor(RawDescriptor): +class CapaDescriptor(CapaFields, RawDescriptor): """ Module implementing problems in the LON-CAPA format, as implemented by capa.capa_problem @@ -701,20 +830,27 @@ class CapaDescriptor(RawDescriptor): # actually use type and points? metadata_attributes = RawDescriptor.metadata_attributes + ('type', 'points') + # The capa format specifies that what we call max_attempts in the code + # is the attribute `attempts`. This will do that conversion + metadata_translations = dict(RawDescriptor.metadata_translations) + metadata_translations['attempts'] = 'max_attempts' + def get_context(self): _context = RawDescriptor.get_context(self) - _context.update({'markdown': self.metadata.get('markdown', ''), - 'enable_markdown' : 'markdown' in self.metadata}) + _context.update({'markdown': self.markdown, + 'enable_markdown': self.markdown is not None}) return _context @property def editable_metadata_fields(self): - """Remove any metadata from the editable fields which have their own editor or shouldn't be edited by user.""" - subset = [field for field in super(CapaDescriptor,self).editable_metadata_fields - if field not in ['markdown', 'empty']] + """Remove metadata from the editable fields since it has its own editor""" + subset = super(CapaDescriptor, self).editable_metadata_fields + if 'markdown' in subset: + del subset['markdown'] + if 'empty' in subset: + del subset['empty'] return subset - # VS[compat] # TODO (cpennington): Delete this method once all fall 2012 course are being # edited in the cms @@ -724,12 +860,3 @@ class CapaDescriptor(RawDescriptor): 'problems/' + path[8:], path[8:], ] - - def __init__(self, *args, **kwargs): - super(CapaDescriptor, self).__init__(*args, **kwargs) - - weight_string = self.metadata.get('weight', None) - if weight_string: - self.weight = float(weight_string) - else: - self.weight = None diff --git a/common/lib/xmodule/xmodule/combined_open_ended_module.py b/common/lib/xmodule/xmodule/combined_open_ended_module.py index 2da15a4086..48fbfcced1 100644 --- a/common/lib/xmodule/xmodule/combined_open_ended_module.py +++ b/common/lib/xmodule/xmodule/combined_open_ended_module.py @@ -1,37 +1,77 @@ -import copy -from fs.errors import ResourceNotFoundError -import itertools import json import logging from lxml import etree -from lxml.html import rewrite_links -from path import path -import os -import sys from pkg_resources import resource_string -from .capa_module import only_one, ComplexEncoder -from .editing_module import EditingDescriptor -from .html_checker import check_html -from progress import Progress -from .stringify import stringify_children +from xmodule.raw_module import RawDescriptor from .x_module import XModule -from .xml_module import XmlDescriptor -from xmodule.modulestore import Location -from combined_open_ended_modulev1 import CombinedOpenEndedV1Module, CombinedOpenEndedV1Descriptor +from xblock.core import Integer, Scope, BlockScope, ModelType, String, Boolean, Object, Float, List +from xmodule.open_ended_grading_classes.combined_open_ended_modulev1 import CombinedOpenEndedV1Module, CombinedOpenEndedV1Descriptor +from collections import namedtuple log = logging.getLogger("mitx.courseware") +V1_SETTINGS_ATTRIBUTES = ["display_name", "attempts", "is_graded", "accept_file_upload", + "skip_spelling_checks", "due", "graceperiod", "max_score"] -VERSION_TUPLES = ( - ('1', CombinedOpenEndedV1Descriptor, CombinedOpenEndedV1Module), -) +V1_STUDENT_ATTRIBUTES = ["current_task_number", "task_states", "state", + "student_attempts", "ready_to_reset"] + +V1_ATTRIBUTES = V1_SETTINGS_ATTRIBUTES + V1_STUDENT_ATTRIBUTES + +VersionTuple = namedtuple('VersionTuple', ['descriptor', 'module', 'settings_attributes', 'student_attributes']) +VERSION_TUPLES = { + 1: VersionTuple(CombinedOpenEndedV1Descriptor, CombinedOpenEndedV1Module, V1_SETTINGS_ATTRIBUTES, + V1_STUDENT_ATTRIBUTES), +} DEFAULT_VERSION = 1 -DEFAULT_VERSION = str(DEFAULT_VERSION) -class CombinedOpenEndedModule(XModule): + +class VersionInteger(Integer): + """ + A model type that converts from strings to integers when reading from json. + Also does error checking to see if version is correct or not. + """ + + def from_json(self, value): + try: + value = int(value) + if value not in VERSION_TUPLES: + version_error_string = "Could not find version {0}, using version {1} instead" + log.error(version_error_string.format(value, DEFAULT_VERSION)) + value = DEFAULT_VERSION + except: + value = DEFAULT_VERSION + return value + + +class CombinedOpenEndedFields(object): + display_name = String(help="Display name for this module", default="Open Ended Grading", scope=Scope.settings) + current_task_number = Integer(help="Current task that the student is on.", default=0, scope=Scope.student_state) + task_states = List(help="List of state dictionaries of each task within this module.", scope=Scope.student_state) + state = String(help="Which step within the current task that the student is on.", default="initial", + scope=Scope.student_state) + student_attempts = Integer(help="Number of attempts taken by the student on this problem", default=0, + scope=Scope.student_state) + ready_to_reset = Boolean(help="If the problem is ready to be reset or not.", default=False, + scope=Scope.student_state) + attempts = Integer(help="Maximum number of attempts that a student is allowed.", default=1, scope=Scope.settings) + is_graded = Boolean(help="Whether or not the problem is graded.", default=False, scope=Scope.settings) + accept_file_upload = Boolean(help="Whether or not the problem accepts file uploads.", default=False, + scope=Scope.settings) + skip_spelling_checks = Boolean(help="Whether or not to skip initial spelling checks.", default=True, + scope=Scope.settings) + due = String(help="Date that this problem is due by", default=None, scope=Scope.settings) + graceperiod = String(help="Amount of time after the due date that submissions will be accepted", default=None, + scope=Scope.settings) + max_score = Integer(help="Maximum score for the problem.", default=1, scope=Scope.settings) + version = VersionInteger(help="Current version number", default=DEFAULT_VERSION, scope=Scope.settings) + data = String(help="XML data for the problem", scope=Scope.content) + + +class CombinedOpenEndedModule(CombinedOpenEndedFields, XModule): """ This is a module that encapsulates all open ended grading (self assessment, peer assessment, etc). It transitions between problems, and support arbitrary ordering. @@ -62,6 +102,8 @@ class CombinedOpenEndedModule(XModule): INTERMEDIATE_DONE = 'intermediate_done' DONE = 'done' + icon_class = 'problem' + js = {'coffee': [resource_string(__name__, 'js/src/combinedopenended/display.coffee'), resource_string(__name__, 'js/src/collapsible.coffee'), resource_string(__name__, 'js/src/javascript_loader.coffee'), @@ -70,11 +112,8 @@ class CombinedOpenEndedModule(XModule): css = {'scss': [resource_string(__name__, 'css/combinedopenended/display.scss')]} - def __init__(self, system, location, definition, descriptor, - instance_state=None, shared_state=None, **kwargs): - XModule.__init__(self, system, location, definition, descriptor, - instance_state, shared_state, **kwargs) - + def __init__(self, system, location, descriptor, model_data): + XModule.__init__(self, system, location, descriptor, model_data) """ Definition file should have one or many task blocks, a rubric block, and a prompt block: @@ -113,45 +152,37 @@ class CombinedOpenEndedModule(XModule): self.system = system self.system.set('location', location) - # Load instance state - if instance_state is not None: - instance_state = json.loads(instance_state) - else: - instance_state = {} + if self.task_states is None: + self.task_states = [] - self.version = self.metadata.get('version', DEFAULT_VERSION) - if not isinstance(self.version, basestring): - try: - self.version = str(self.version) - except: - log.error("Version {0} is not correct. Going with version {1}".format(self.version, DEFAULT_VERSION)) - self.version = DEFAULT_VERSION + version_tuple = VERSION_TUPLES[self.version] - versions = [i[0] for i in VERSION_TUPLES] - descriptors = [i[1] for i in VERSION_TUPLES] - modules = [i[2] for i in VERSION_TUPLES] + self.student_attributes = version_tuple.student_attributes + self.settings_attributes = version_tuple.settings_attributes - try: - version_index = versions.index(self.version) - except: - log.error("Version {0} is not correct. Going with version {1}".format(self.version, DEFAULT_VERSION)) - self.version = DEFAULT_VERSION - version_index = versions.index(self.version) + attributes = self.student_attributes + self.settings_attributes static_data = { - 'rewrite_content_links' : self.rewrite_content_links, + 'rewrite_content_links': self.rewrite_content_links, } - - self.child_descriptor = descriptors[version_index](self.system) - self.child_definition = descriptors[version_index].definition_from_xml(etree.fromstring(definition['xml_string']), self.system) - self.child_module = modules[version_index](self.system, location, self.child_definition, self.child_descriptor, - instance_state = json.dumps(instance_state), metadata = self.metadata, static_data= static_data) + instance_state = {k: getattr(self, k) for k in attributes} + self.child_descriptor = version_tuple.descriptor(self.system) + self.child_definition = version_tuple.descriptor.definition_from_xml(etree.fromstring(self.data), self.system) + self.child_module = version_tuple.module(self.system, location, self.child_definition, self.child_descriptor, + instance_state=instance_state, static_data=static_data, + attributes=attributes) + self.save_instance_data() def get_html(self): - return self.child_module.get_html() + self.save_instance_data() + return_value = self.child_module.get_html() + return return_value def handle_ajax(self, dispatch, get): - return self.child_module.handle_ajax(dispatch, get) + self.save_instance_data() + return_value = self.child_module.handle_ajax(dispatch, get) + self.save_instance_data() + return return_value def get_instance_state(self): return self.child_module.get_instance_state() @@ -159,8 +190,8 @@ class CombinedOpenEndedModule(XModule): def get_score(self): return self.child_module.get_score() - def max_score(self): - return self.child_module.max_score() + #def max_score(self): + # return self.child_module.max_score() def get_progress(self): return self.child_module.get_progress() @@ -169,16 +200,18 @@ class CombinedOpenEndedModule(XModule): def due_date(self): return self.child_module.due_date - @property - def display_name(self): - return self.child_module.display_name + def save_instance_data(self): + for attribute in self.student_attributes: + child_attr = getattr(self.child_module, attribute) + if child_attr != getattr(self, attribute): + setattr(self, attribute, getattr(self.child_module, attribute)) -class CombinedOpenEndedDescriptor(XmlDescriptor, EditingDescriptor): +class CombinedOpenEndedDescriptor(CombinedOpenEndedFields, RawDescriptor): """ Module for adding combined open ended questions """ - mako_template = "widgets/html-edit.html" + mako_template = "widgets/raw-edit.html" module_class = CombinedOpenEndedModule filename_extension = "xml" @@ -186,35 +219,3 @@ class CombinedOpenEndedDescriptor(XmlDescriptor, EditingDescriptor): has_score = True template_dir_name = "combinedopenended" - js = {'coffee': [resource_string(__name__, 'js/src/html/edit.coffee')]} - js_module_name = "HTMLEditingDescriptor" - - @classmethod - def definition_from_xml(cls, xml_object, system): - """ - Pull out the individual tasks, the rubric, and the prompt, and parse - - Returns: - { - 'rubric': 'some-html', - 'prompt': 'some-html', - 'task_xml': dictionary of xml strings, - } - """ - - return {'xml_string' : etree.tostring(xml_object), 'metadata' : xml_object.attrib} - - - def definition_to_xml(self, resource_fs): - '''Return an xml element representing this definition.''' - elt = etree.Element('combinedopenended') - - def add_child(k): - child_str = '<{tag}>{body}'.format(tag=k, body=self.definition[k]) - child_node = etree.fromstring(child_str) - elt.append(child_node) - - for child in ['task']: - add_child(child) - - return elt \ No newline at end of file diff --git a/common/lib/xmodule/xmodule/conditional_module.py b/common/lib/xmodule/xmodule/conditional_module.py index 787d355c4a..a9375cae78 100644 --- a/common/lib/xmodule/xmodule/conditional_module.py +++ b/common/lib/xmodule/xmodule/conditional_module.py @@ -1,126 +1,147 @@ +"""Conditional module is the xmodule, which you can use for disabling +some xmodules by conditions. +""" + import json import logging +from lxml import etree +from pkg_resources import resource_string from xmodule.x_module import XModule from xmodule.modulestore import Location from xmodule.seq_module import SequenceDescriptor +from xblock.core import String, Scope, List +from xmodule.modulestore.exceptions import ItemNotFoundError -from pkg_resources import resource_string log = logging.getLogger('mitx.' + __name__) -class ConditionalModule(XModule): - ''' +class ConditionalFields(object): + show_tag_list = List(help="Poll answers", scope=Scope.content) + + +class ConditionalModule(ConditionalFields, XModule): + """ Blocks child module from showing unless certain conditions are met. Example: - + + - - + tag attributes: + sources - location id of required modules, separated by ';' - ''' + completed - map to `is_completed` module method + attempted - map to `is_attempted` module method + poll_answer - map to `poll_answer` module attribute + voted - map to `voted` module attribute - js = {'coffee': [resource_string(__name__, 'js/src/conditional/display.coffee'), + tag attributes: + sources - location id of modules, separated by ';' + """ + + js = {'coffee': [resource_string(__name__, 'js/src/javascript_loader.coffee'), + resource_string(__name__, 'js/src/conditional/display.coffee'), resource_string(__name__, 'js/src/collapsible.coffee'), - resource_string(__name__, 'js/src/javascript_loader.coffee'), + ]} js_module_name = "Conditional" css = {'scss': [resource_string(__name__, 'css/capa/display.scss')]} + # Map + # key: + # value: + conditions_map = { + 'poll_answer': 'poll_answer', # poll_question attr + 'completed': 'is_completed', # capa_problem attr + 'attempted': 'is_attempted', # capa_problem attr + 'voted': 'voted' # poll_question attr + } - def __init__(self, system, location, definition, descriptor, instance_state=None, shared_state=None, **kwargs): - """ - In addition to the normal XModule init, provide: - - self.condition = string describing condition required - - """ - XModule.__init__(self, system, location, definition, descriptor, instance_state, shared_state, **kwargs) - self.contents = None - self.condition = self.metadata.get('condition', '') - self._get_required_modules() - children = self.get_display_items() - if children: - self.icon_class = children[0].get_icon_class() - #log.debug('conditional module required=%s' % self.required_modules_list) - - def _get_required_modules(self): - self.required_modules = [] - for descriptor in self.descriptor.get_required_module_descriptors(): - module = self.system.get_module(descriptor) - self.required_modules.append(module) - #log.debug('required_modules=%s' % (self.required_modules)) + def _get_condition(self): + # Get first valid condition. + for xml_attr, attr_name in self.conditions_map.iteritems(): + xml_value = self.descriptor.xml_attributes.get(xml_attr) + if xml_value: + return xml_value, attr_name + raise Exception('Error in conditional module: unknown condition "%s"' + % xml_attr) def is_condition_satisfied(self): - self._get_required_modules() + self.required_modules = [self.system.get_module(descriptor) for + descriptor in self.descriptor.get_required_module_descriptors()] - if self.condition == 'require_completed': - # all required modules must be completed, as determined by - # the modules .is_completed() method - for module in self.required_modules: - #log.debug('in is_condition_satisfied; student_answers=%s' % module.lcp.student_answers) - #log.debug('in is_condition_satisfied; instance_state=%s' % module.instance_state) - if not hasattr(module, 'is_completed'): - raise Exception('Error in conditional module: required module %s has no .is_completed() method' % module) - if not module.is_completed(): - log.debug('conditional module: %s not completed' % module) - return False - else: - log.debug('conditional module: %s IS completed' % module) - return True - elif self.condition == 'require_attempted': - # all required modules must be attempted, as determined by - # the modules .is_attempted() method - for module in self.required_modules: - if not hasattr(module, 'is_attempted'): - raise Exception('Error in conditional module: required module %s has no .is_attempted() method' % module) - if not module.is_attempted(): - log.debug('conditional module: %s not attempted' % module) - return False - else: - log.debug('conditional module: %s IS attempted' % module) - return True - else: - raise Exception('Error in conditional module: unknown condition "%s"' % self.condition) + xml_value, attr_name = self._get_condition() - return True + if xml_value and self.required_modules: + for module in self.required_modules: + if not hasattr(module, attr_name): + raise Exception('Error in conditional module: \ + required module {module} has no {module_attr}'.format( + module=module, module_attr=attr_name)) + + attr = getattr(module, attr_name) + if callable(attr): + attr = attr() + + if xml_value != str(attr): + break + else: + return True + return False def get_html(self): - self.is_condition_satisfied() + # Calculate html ids of dependencies + self.required_html_ids = [descriptor.location.html_id() for + descriptor in self.descriptor.get_required_module_descriptors()] + return self.system.render_template('conditional_ajax.html', { 'element_id': self.location.html_id(), 'id': self.id, 'ajax_url': self.system.ajax_url, + 'depends': ';'.join(self.required_html_ids) }) def handle_ajax(self, dispatch, post): - ''' - This is called by courseware.module_render, to handle an AJAX call. - ''' - #log.debug('conditional_module handle_ajax: dispatch=%s' % dispatch) - + """This is called by courseware.moduleodule_render, to handle + an AJAX call. + """ if not self.is_condition_satisfied(): - context = {'module': self} - html = self.system.render_template('conditional_module.html', context) - return json.dumps({'html': html}) + message = self.descriptor.xml_attributes.get('message') + context = {'module': self, + 'message': message} + html = self.system.render_template('conditional_module.html', + context) + return json.dumps({'html': [html], 'message': bool(message)}) - if self.contents is None: - self.contents = [child.get_html() for child in self.get_display_items()] - - # for now, just deal with one child - html = self.contents[0] + html = [child.get_html() for child in self.get_display_items()] return json.dumps({'html': html}) + def get_icon_class(self): + new_class = 'other' + if self.is_condition_satisfied(): + # HACK: This shouldn't be hard-coded to two types + # OBSOLETE: This obsoletes 'type' + class_priority = ['video', 'problem'] + + child_classes = [self.system.get_module(child_descriptor).get_icon_class() + for child_descriptor in self.descriptor.get_children()] + for c in class_priority: + if c in child_classes: + new_class = c + return new_class + + +class ConditionalDescriptor(ConditionalFields, SequenceDescriptor): + """Descriptor for conditional xmodule.""" + _tag_name = 'conditional' -class ConditionalDescriptor(SequenceDescriptor): module_class = ConditionalModule filename_extension = "xml" @@ -128,26 +149,68 @@ class ConditionalDescriptor(SequenceDescriptor): stores_state = True has_score = False - def __init__(self, *args, **kwargs): - super(ConditionalDescriptor, self).__init__(*args, **kwargs) - required_module_list = [tuple(x.split('/', 1)) for x in self.metadata.get('required', '').split('&')] - self.required_module_locations = [] - for rm in required_module_list: - try: - (tag, name) = rm - except Exception as err: - msg = "Specification of required module in conditional is broken: %s" % self.metadata.get('required') - log.warning(msg) - self.system.error_tracker(msg) - continue - loc = self.location.dict() - loc['category'] = tag - loc['name'] = name - self.required_module_locations.append(Location(loc)) - log.debug('ConditionalDescriptor required_module_locations=%s' % self.required_module_locations) + @staticmethod + def parse_sources(xml_element, system, return_descriptor=False): + """Parse xml_element 'sources' attr and: + if return_descriptor=True - return list of descriptors + if return_descriptor=False - return list of locations + """ + result = [] + sources = xml_element.get('sources') + if sources: + locations = [location.strip() for location in sources.split(';')] + for location in locations: + if Location.is_valid(location): # Check valid location url. + try: + if return_descriptor: + descriptor = system.load_item(location) + result.append(descriptor) + else: + result.append(location) + except ItemNotFoundError: + msg = "Invalid module by location." + log.exception(msg) + system.error_tracker(msg) + return result def get_required_module_descriptors(self): - """Returns a list of XModuleDescritpor instances upon which this module depends, but are - not children of this module""" - return [self.system.load_item(loc) for loc in self.required_module_locations] + """Returns a list of XModuleDescritpor instances upon + which this module depends. + """ + return ConditionalDescriptor.parse_sources( + self.xml_attributes, self.system, True) + + @classmethod + def definition_from_xml(cls, xml_object, system): + children = [] + show_tag_list = [] + for child in xml_object: + if child.tag == 'show': + location = ConditionalDescriptor.parse_sources( + child, system) + children.extend(location) + show_tag_list.extend(location) + else: + try: + descriptor = system.process_xml(etree.tostring(child)) + module_url = descriptor.location.url() + children.append(module_url) + except: + msg = "Unable to load child when parsing Conditional." + log.exception(msg) + system.error_tracker(msg) + return {'show_tag_list': show_tag_list}, children + + def definition_to_xml(self, resource_fs): + xml_object = etree.Element(self._tag_name) + for child in self.get_children(): + location = str(child.location) + if location in self.show_tag_list: + show_str = '<{tag_name} sources="{sources}" />'.format( + tag_name='show', sources=location) + xml_object.append(etree.fromstring(show_str)) + else: + xml_object.append( + etree.fromstring(child.export_to_xml(resource_fs))) + return xml_object diff --git a/common/lib/xmodule/xmodule/contentstore/content.py b/common/lib/xmodule/xmodule/contentstore/content.py index be33401bc8..9dc4b1367b 100644 --- a/common/lib/xmodule/xmodule/contentstore/content.py +++ b/common/lib/xmodule/xmodule/contentstore/content.py @@ -35,7 +35,8 @@ class StaticContent(object): @staticmethod def compute_location(org, course, name, revision=None, is_thumbnail=False): name = name.replace('/', '_') - return Location([XASSET_LOCATION_TAG, org, course, 'asset' if not is_thumbnail else 'thumbnail', Location.clean(name), revision]) + return Location([XASSET_LOCATION_TAG, org, course, 'asset' if not is_thumbnail else 'thumbnail', + Location.clean_keeping_underscores(name), revision]) def get_id(self): return StaticContent.get_id_from_location(self.location) diff --git a/common/lib/xmodule/xmodule/course_module.py b/common/lib/xmodule/xmodule/course_module.py index 2c69c449ba..7c47e0887a 100644 --- a/common/lib/xmodule/xmodule/course_module.py +++ b/common/lib/xmodule/xmodule/course_module.py @@ -9,7 +9,7 @@ from datetime import datetime from xmodule.modulestore import Location from xmodule.seq_module import SequenceDescriptor, SequenceModule -from xmodule.timeparse import parse_time, stringify_time +from xmodule.timeparse import parse_time from xmodule.util.decorators import lazyproperty from xmodule.graders import grader_from_conf from datetime import datetime @@ -19,107 +19,212 @@ import requests import time import copy +from xblock.core import Scope, ModelType, List, String, Object, Boolean +from .fields import Date + log = logging.getLogger(__name__) +class StringOrDate(Date): + def from_json(self, value): + """ + Parse an optional metadata key containing a time: if present, complain + if it doesn't parse. + Return None if not present or invalid. + """ + if value is None: + return None + + try: + return time.strptime(value, self.time_format) + except ValueError: + return value + + def to_json(self, value): + """ + Convert a time struct to a string + """ + if value is None: + return None + + try: + return time.strftime(self.time_format, value) + except (ValueError, TypeError): + return value + + + edx_xml_parser = etree.XMLParser(dtd_validation=False, load_dtd=False, remove_comments=True, remove_blank_text=True) _cached_toc = {} +class Textbook(object): + def __init__(self, title, book_url): + self.title = title + self.book_url = book_url + self.start_page = int(self.table_of_contents[0].attrib['page']) -class CourseDescriptor(SequenceDescriptor): - module_class = SequenceModule + # The last page should be the last element in the table of contents, + # but it may be nested. So recurse all the way down the last element + last_el = self.table_of_contents[-1] + while last_el.getchildren(): + last_el = last_el[-1] - template_dir_name = 'course' + self.end_page = int(last_el.attrib['page']) - class Textbook: - def __init__(self, title, book_url): - self.title = title - self.book_url = book_url - self.table_of_contents = self._get_toc_from_s3() - self.start_page = int(self.table_of_contents[0].attrib['page']) + @lazyproperty + def table_of_contents(self): + """ + Accesses the textbook's table of contents (default name "toc.xml") at the URL self.book_url - # The last page should be the last element in the table of contents, - # but it may be nested. So recurse all the way down the last element - last_el = self.table_of_contents[-1] - while last_el.getchildren(): - last_el = last_el[-1] + Returns XML tree representation of the table of contents + """ + toc_url = self.book_url + 'toc.xml' - self.end_page = int(last_el.attrib['page']) + # cdodge: I've added this caching of TOC because in Mongo-backed instances (but not Filesystem stores) + # course modules have a very short lifespan and are constantly being created and torn down. + # Since this module in the __init__() method does a synchronous call to AWS to get the TOC + # this is causing a big performance problem. So let's be a bit smarter about this and cache + # each fetch and store in-mem for 10 minutes. + # NOTE: I have to get this onto sandbox ASAP as we're having runtime failures. I'd like to swing back and + # rewrite to use the traditional Django in-memory cache. + try: + # see if we already fetched this + if toc_url in _cached_toc: + (table_of_contents, timestamp) = _cached_toc[toc_url] + age = datetime.now() - timestamp + # expire every 10 minutes + if age.seconds < 600: + return table_of_contents + except Exception as err: + pass - @property - def table_of_contents(self): - return self.table_of_contents + # Get the table of contents from S3 + log.info("Retrieving textbook table of contents from %s" % toc_url) + try: + r = requests.get(toc_url) + except Exception as err: + msg = 'Error %s: Unable to retrieve textbook table of contents at %s' % (err, toc_url) + log.error(msg) + raise Exception(msg) - def _get_toc_from_s3(self): - """ - Accesses the textbook's table of contents (default name "toc.xml") at the URL self.book_url + # TOC is XML. Parse it + try: + table_of_contents = etree.fromstring(r.text) + except Exception as err: + msg = 'Error %s: Unable to parse XML for textbook table of contents at %s' % (err, toc_url) + log.error(msg) + raise Exception(msg) - Returns XML tree representation of the table of contents - """ - toc_url = self.book_url + 'toc.xml' + return table_of_contents - # cdodge: I've added this caching of TOC because in Mongo-backed instances (but not Filesystem stores) - # course modules have a very short lifespan and are constantly being created and torn down. - # Since this module in the __init__() method does a synchronous call to AWS to get the TOC - # this is causing a big performance problem. So let's be a bit smarter about this and cache - # each fetch and store in-mem for 10 minutes. - # NOTE: I have to get this onto sandbox ASAP as we're having runtime failures. I'd like to swing back and - # rewrite to use the traditional Django in-memory cache. + +class TextbookList(List): + def from_json(self, values): + textbooks = [] + for title, book_url in values: try: - # see if we already fetched this - if toc_url in _cached_toc: - (table_of_contents, timestamp) = _cached_toc[toc_url] - age = datetime.now() - timestamp - # expire every 10 minutes - if age.seconds < 600: - return table_of_contents - except Exception as err: - pass - - # Get the table of contents from S3 - log.info("Retrieving textbook table of contents from %s" % toc_url) - try: - r = requests.get(toc_url) - except Exception as err: - msg = 'Error %s: Unable to retrieve textbook table of contents at %s' % (err, toc_url) - log.error(msg) - raise Exception(msg) - - # TOC is XML. Parse it - try: - table_of_contents = etree.fromstring(r.text) - _cached_toc[toc_url] = (table_of_contents, datetime.now()) - except Exception as err: - msg = 'Error %s: Unable to parse XML for textbook table of contents at %s' % (err, toc_url) - log.error(msg) - raise Exception(msg) - - return table_of_contents - - def __init__(self, system, definition=None, **kwargs): - super(CourseDescriptor, self).__init__(system, definition, **kwargs) - self.textbooks = [] - for title, book_url in self.definition['data']['textbooks']: - try: - self.textbooks.append(self.Textbook(title, book_url)) + textbooks.append(Textbook(title, book_url)) except: # If we can't get to S3 (e.g. on a train with no internet), don't break # the rest of the courseware. log.exception("Couldn't load textbook ({0}, {1})".format(title, book_url)) continue - self.wiki_slug = self.definition['data']['wiki_slug'] or self.location.course + return textbooks + + def to_json(self, values): + json_data = [] + for val in values: + if isinstance(val, Textbook): + json_data.append((val.title, val.book_url)) + elif isinstance(val, tuple): + json_data.append(val) + else: + continue + return json_data + + +class CourseFields(object): + textbooks = TextbookList(help="List of pairs of (title, url) for textbooks used in this course", scope=Scope.content) + wiki_slug = String(help="Slug that points to the wiki for this course", scope=Scope.content) + enrollment_start = Date(help="Date that enrollment for this class is opened", scope=Scope.settings) + enrollment_end = Date(help="Date that enrollment for this class is closed", scope=Scope.settings) + start = Date(help="Start time when this module is visible", scope=Scope.settings) + end = Date(help="Date that this class ends", scope=Scope.settings) + advertised_start = StringOrDate(help="Date that this course is advertised to start", scope=Scope.settings) + grading_policy = Object(help="Grading policy definition for this class", scope=Scope.content) + show_calculator = Boolean(help="Whether to show the calculator in this course", default=False, scope=Scope.settings) + display_name = String(help="Display name for this module", scope=Scope.settings) + tabs = List(help="List of tabs to enable in this course", scope=Scope.settings) + end_of_course_survey_url = String(help="Url for the end-of-course survey", scope=Scope.settings) + discussion_blackouts = List(help="List of pairs of start/end dates for discussion blackouts", scope=Scope.settings) + discussion_topics = Object( + help="Map of topics names to ids", + scope=Scope.settings, + computed_default=lambda c: {'General': {'id': c.location.html_id()}}, + ) + testcenter_info = Object(help="Dictionary of Test Center info", scope=Scope.settings) + announcement = Date(help="Date this course is announced", scope=Scope.settings) + cohort_config = Object(help="Dictionary defining cohort configuration", scope=Scope.settings) + is_new = Boolean(help="Whether this course should be flagged as new", scope=Scope.settings) + no_grade = Boolean(help="True if this course isn't graded", default=False, scope=Scope.settings) + disable_progress_graph = Boolean(help="True if this course shouldn't display the progress graph", default=False, scope=Scope.settings) + pdf_textbooks = List(help="List of dictionaries containing pdf_textbook configuration", scope=Scope.settings) + html_textbooks = List(help="List of dictionaries containing html_textbook configuration", scope=Scope.settings) + remote_gradebook = Object(scope=Scope.settings) + allow_anonymous = Boolean(scope=Scope.settings, default=True) + allow_anonymous_to_peers = Boolean(scope=Scope.settings, default=False) + advanced_modules = List(help="Beta modules used in your course", scope=Scope.settings) + has_children = True + + info_sidebar_name = String(scope=Scope.settings, default='Course Handouts') + + # An extra property is used rather than the wiki_slug/number because + # there are courses that change the number for different runs. This allows + # courses to share the same css_class across runs even if they have + # different numbers. + # + # TODO get rid of this as soon as possible or potentially build in a robust + # way to add in course-specific styling. There needs to be a discussion + # about the right way to do this, but arjun will address this ASAP. Also + # note that the courseware template needs to change when this is removed. + css_class = String(help="DO NOT USE THIS", scope=Scope.settings) + + # TODO: This is a quick kludge to allow CS50 (and other courses) to + # specify their own discussion forums as external links by specifying a + # "discussion_link" in their policy JSON file. This should later get + # folded in with Syllabus, Course Info, and additional Custom tabs in a + # more sensible framework later. + discussion_link = String(help="DO NOT USE THIS", scope=Scope.settings) + + # TODO: same as above, intended to let internal CS50 hide the progress tab + # until we get grade integration set up. + # Explicit comparison to True because we always want to return a bool. + hide_progress_tab = Boolean(help="DO NOT USE THIS", scope=Scope.settings) + + +class CourseDescriptor(CourseFields, SequenceDescriptor): + module_class = SequenceModule + + template_dir_name = 'course' + + + def __init__(self, *args, **kwargs): + super(CourseDescriptor, self).__init__(*args, **kwargs) + + if self.wiki_slug is None: + self.wiki_slug = self.location.course msg = None if self.start is None: msg = "Course loaded without a valid start date. id = %s" % self.id # hack it -- start in 1970 - self.metadata['start'] = stringify_time(time.gmtime(0)) + self.start = time.gmtime(0) log.critical(msg) - system.error_tracker(msg) + self.system.error_tracker(msg) # NOTE: relies on the modulestore to call set_grading_policy() right after # init. (Modulestore is in charge of figuring out where to load the policy from) @@ -127,10 +232,11 @@ class CourseDescriptor(SequenceDescriptor): # NOTE (THK): This is a last-minute addition for Fall 2012 launch to dynamically # disable the syllabus content for courses that do not provide a syllabus self.syllabus_present = self.system.resources_fs.exists(path('syllabus')) - self.set_grading_policy(self.definition['data'].get('grading_policy', None)) + self._grading_policy = {} + self.set_grading_policy(self.grading_policy) self.test_center_exams = [] - test_center_info = self.metadata.get('testcenter_info') + test_center_info = self.testcenter_info if test_center_info is not None: for exam_name in test_center_info: try: @@ -143,11 +249,11 @@ class CourseDescriptor(SequenceDescriptor): log.error(msg) continue - def defaut_grading_policy(self): + def default_grading_policy(self): """ Return a dict which is a copy of the default grading policy """ - default = {"GRADER": [ + return {"GRADER": [ { "type": "Homework", "min_count": 12, @@ -179,7 +285,6 @@ class CourseDescriptor(SequenceDescriptor): "GRADE_CUTOFFS": { "Pass": 0.5 }} - return copy.deepcopy(default) def set_grading_policy(self, course_policy): """ @@ -190,17 +295,15 @@ class CourseDescriptor(SequenceDescriptor): course_policy = {} # Load the global settings as a dictionary - grading_policy = self.defaut_grading_policy() + grading_policy = self.default_grading_policy() # Override any global settings with the course settings grading_policy.update(course_policy) # Here is where we should parse any configurations, so that we can fail early - grading_policy['RAW_GRADER'] = grading_policy['GRADER'] # used for cms access - grading_policy['GRADER'] = grader_from_conf(grading_policy['GRADER']) - self._grading_policy = grading_policy - - + # Use setters so that side effecting to .definitions works + self.raw_grader = grading_policy['GRADER'] # used for cms access + self.grade_cutoffs = grading_policy['GRADE_CUTOFFS'] @classmethod def read_grading_policy(cls, paths, system): @@ -223,7 +326,6 @@ class CourseDescriptor(SequenceDescriptor): return policy_str - @classmethod def from_xml(cls, xml_data, system, org=None, course=None): instance = super(CourseDescriptor, cls).from_xml(xml_data, system, org, course) @@ -247,18 +349,17 @@ class CourseDescriptor(SequenceDescriptor): policy = json.loads(cls.read_grading_policy(paths, system)) except ValueError: system.error_tracker("Unable to decode grading policy as json") - policy = None + policy = {} # cdodge: import the grading policy information that is on disk and put into the # descriptor 'definition' bucket as a dictionary so that it is persisted in the DB - instance.definition['data']['grading_policy'] = policy + instance.grading_policy = policy # now set the current instance. set_grading_policy() will apply some inheritance rules instance.set_grading_policy(policy) return instance - @classmethod def definition_from_xml(cls, xml_object, system): textbooks = [] @@ -273,12 +374,12 @@ class CourseDescriptor(SequenceDescriptor): wiki_slug = wiki_tag.attrib.get("slug", default=None) xml_object.remove(wiki_tag) - definition = super(CourseDescriptor, cls).definition_from_xml(xml_object, system) + definition, children = super(CourseDescriptor, cls).definition_from_xml(xml_object, system) - definition.setdefault('data', {})['textbooks'] = textbooks - definition['data']['wiki_slug'] = wiki_slug + definition['textbooks'] = textbooks + definition['wiki_slug'] = wiki_slug - return definition + return definition, children def has_ended(self): """ @@ -293,33 +394,9 @@ class CourseDescriptor(SequenceDescriptor): def has_started(self): return time.gmtime() > self.start - @property - def end(self): - return self._try_parse_time("end") - @end.setter - def end(self, value): - if isinstance(value, time.struct_time): - self.metadata['end'] = stringify_time(value) - @property - def enrollment_start(self): - return self._try_parse_time("enrollment_start") - - @enrollment_start.setter - def enrollment_start(self, value): - if isinstance(value, time.struct_time): - self.metadata['enrollment_start'] = stringify_time(value) - @property - def enrollment_end(self): - return self._try_parse_time("enrollment_end") - - @enrollment_end.setter - def enrollment_end(self, value): - if isinstance(value, time.struct_time): - self.metadata['enrollment_end'] = stringify_time(value) - @property def grader(self): - return self._grading_policy['GRADER'] + return grader_from_conf(self.raw_grader) @property def raw_grader(self): @@ -329,7 +406,7 @@ class CourseDescriptor(SequenceDescriptor): def raw_grader(self, value): # NOTE WELL: this change will not update the processed graders. If we need that, this needs to call grader_from_conf self._grading_policy['RAW_GRADER'] = value - self.definition['data'].setdefault('grading_policy', {})['GRADER'] = value + self.grading_policy['GRADER'] = value @property def grade_cutoffs(self): @@ -338,45 +415,58 @@ class CourseDescriptor(SequenceDescriptor): @grade_cutoffs.setter def grade_cutoffs(self, value): self._grading_policy['GRADE_CUTOFFS'] = value - self.definition['data'].setdefault('grading_policy', {})['GRADE_CUTOFFS'] = value + + # XBlock fields don't update after mutation + policy = self.grading_policy + policy['GRADE_CUTOFFS'] = value + self.grading_policy = policy @property def lowest_passing_grade(self): return min(self._grading_policy['GRADE_CUTOFFS'].values()) - @property - def tabs(self): - """ - Return the tabs config, as a python object, or None if not specified. - """ - return self.metadata.get('tabs') - - @tabs.setter - def tabs(self, value): - self.metadata['tabs'] = value - - @property - def show_calculator(self): - return self.metadata.get("show_calculator", None) == "Yes" - @property def is_cohorted(self): """ Return whether the course is cohorted. """ - config = self.metadata.get("cohort_config") + config = self.cohort_config if config is None: return False return bool(config.get("cohorted")) + @property + def auto_cohort(self): + """ + Return whether the course is auto-cohorted. + """ + if not self.is_cohorted: + return False + + return bool(self.cohort_config.get( + "auto_cohort", False)) + + @property + def auto_cohort_groups(self): + """ + Return the list of groups to put students into. Returns [] if not + specified. Returns specified list even if is_cohorted and/or auto_cohort are + false. + """ + if self.cohort_config is None: + return [] + else: + return self.cohort_config.get("auto_cohort_groups", []) + + @property def top_level_discussion_topic_ids(self): """ Return list of topic ids defined in course policy. """ - topics = self.metadata.get("discussion_topics", {}) + topics = self.discussion_topics return [d["id"] for d in topics.values()] @@ -387,7 +477,7 @@ class CourseDescriptor(SequenceDescriptor): the empty set. Note that all inline discussions are automatically cohorted based on the course's is_cohorted setting. """ - config = self.metadata.get("cohort_config") + config = self.cohort_config if config is None: return set() @@ -396,13 +486,13 @@ class CourseDescriptor(SequenceDescriptor): @property - def is_new(self): + def is_newish(self): """ - Returns if the course has been flagged as new in the metadata. If + Returns if the course has been flagged as new. If there is no flag, return a heuristic value considering the announcement and the start dates. """ - flag = self.metadata.get('is_new', None) + flag = self.is_new if flag is None: # Use a heuristic if the course has not been flagged announcement, start, now = self._sorting_dates() @@ -422,8 +512,8 @@ class CourseDescriptor(SequenceDescriptor): @property def sorting_score(self): """ - Returns a number that can be used to sort the courses according - the how "new"" they are. The "newness"" score is computed using a + Returns a tuple that can be used to sort the courses according + the how "new" they are. The "newness" score is computed using a heuristic that takes into account the announcement and (advertized) start dates of the course if available. @@ -448,12 +538,13 @@ class CourseDescriptor(SequenceDescriptor): def to_datetime(timestamp): return datetime(*timestamp[:6]) - def get_date(field): - timetuple = self._try_parse_time(field) - return to_datetime(timetuple) if timetuple else None - - announcement = get_date('announcement') - start = get_date('advertised_start') or to_datetime(self.start) + announcement = self.announcement + if announcement is not None: + announcement = to_datetime(announcement) + if self.advertised_start is None or isinstance(self.advertised_start, basestring): + start = to_datetime(self.start) + else: + start = to_datetime(self.advertised_start) now = to_datetime(time.gmtime()) return announcement, start, now @@ -478,7 +569,7 @@ class CourseDescriptor(SequenceDescriptor): all_descriptors - This contains a list of all xmodules that can effect grading a student. This is used to efficiently fetch - all the xmodule state for a StudentModuleCache without walking + all the xmodule state for a ModelDataCache without walking the descriptor tree again. @@ -496,14 +587,14 @@ class CourseDescriptor(SequenceDescriptor): for c in self.get_children(): sections = [] for s in c.get_children(): - if s.metadata.get('graded', False): + if s.lms.graded: xmoduledescriptors = list(yield_descriptor_descendents(s)) xmoduledescriptors.append(s) # The xmoduledescriptors included here are only the ones that have scores. section_description = {'section_descriptor': s, 'xmoduledescriptors': filter(lambda child: child.has_score, xmoduledescriptors)} - section_format = s.metadata.get('format', "") + section_format = s.lms.format if s.lms.format is not None else '' graded_sections[section_format] = graded_sections.get(section_format, []) + [section_description] all_descriptors.extend(xmoduledescriptors) @@ -544,58 +635,23 @@ class CourseDescriptor(SequenceDescriptor): @property def start_date_text(self): - parsed_advertised_start = self._try_parse_time('advertised_start') - - # If the advertised start isn't a real date string, we assume it's free - # form text... - if parsed_advertised_start is None and \ - ('advertised_start' in self.metadata): - return self.metadata['advertised_start'] - - displayed_start = parsed_advertised_start or self.start - - # If we have neither an advertised start or a real start, just return TBD - if not displayed_start: - return "TBD" - - return time.strftime("%b %d, %Y", displayed_start) + if isinstance(self.advertised_start, basestring): + return self.advertised_start + elif self.advertised_start is None and self.start is None: + return 'TBD' + else: + return time.strftime("%b %d, %Y", self.advertised_start or self.start) @property def end_date_text(self): return time.strftime("%b %d, %Y", self.end) - # An extra property is used rather than the wiki_slug/number because - # there are courses that change the number for different runs. This allows - # courses to share the same css_class across runs even if they have - # different numbers. - # - # TODO get rid of this as soon as possible or potentially build in a robust - # way to add in course-specific styling. There needs to be a discussion - # about the right way to do this, but arjun will address this ASAP. Also - # note that the courseware template needs to change when this is removed. - @property - def css_class(self): - return self.metadata.get('css_class', '') - - @property - def info_sidebar_name(self): - return self.metadata.get('info_sidebar_name', 'Course Handouts') - - @property - def discussion_link(self): - """TODO: This is a quick kludge to allow CS50 (and other courses) to - specify their own discussion forums as external links by specifying a - "discussion_link" in their policy JSON file. This should later get - folded in with Syllabus, Course Info, and additional Custom tabs in a - more sensible framework later.""" - return self.metadata.get('discussion_link', None) - @property def forum_posts_allowed(self): try: blackout_periods = [(parse_time(start), parse_time(end)) for start, end - in self.metadata.get('discussion_blackouts', [])] + in self.discussion_blackouts] now = time.gmtime() for start, end in blackout_periods: if start <= now <= end: @@ -605,23 +661,6 @@ class CourseDescriptor(SequenceDescriptor): return True - @property - def hide_progress_tab(self): - """TODO: same as above, intended to let internal CS50 hide the progress tab - until we get grade integration set up.""" - # Explicit comparison to True because we always want to return a bool. - return self.metadata.get('hide_progress_tab') == True - - @property - def end_of_course_survey_url(self): - """ - Pull from policy. Once we have our own survey module set up, can change this to point to an automatically - created survey for each class. - - Returns None if no url specified. - """ - return self.metadata.get('end_of_course_survey_url') - class TestCenterExam(object): def __init__(self, course_id, exam_name, exam_info): self.course_id = course_id @@ -707,10 +746,6 @@ class CourseDescriptor(SequenceDescriptor): def get_test_center_exam(self, exam_series_code): exams = [exam for exam in self.test_center_exams if exam.exam_series_code == exam_series_code] return exams[0] if len(exams) == 1 else None - - @property - def title(self): - return self.display_name @property def number(self): diff --git a/common/lib/xmodule/xmodule/css/annotatable/display.scss b/common/lib/xmodule/xmodule/css/annotatable/display.scss new file mode 100644 index 0000000000..308b379ec1 --- /dev/null +++ b/common/lib/xmodule/xmodule/css/annotatable/display.scss @@ -0,0 +1,169 @@ +$border-color: #C8C8C8; +$body-font-size: em(14); + +.annotatable-header { + margin-bottom: .5em; + .annotatable-title { + font-size: em(22); + text-transform: uppercase; + padding: 2px 4px; + } +} + +.annotatable-section { + position: relative; + padding: .5em 1em; + border: 1px solid $border-color; + border-radius: .5em; + margin-bottom: .5em; + + &.shaded { background-color: #EDEDED; } + + .annotatable-section-title { + font-weight: bold; + a { font-weight: normal; } + } + .annotatable-section-body { + border-top: 1px solid $border-color; + margin-top: .5em; + padding-top: .5em; + @include clearfix; + } + + ul.instructions-template { + list-style: disc; + margin-left: 4em; + b { font-weight: bold; } + i { font-style: italic; } + code { + display: inline; + white-space: pre; + font-family: Courier New, monospace; + } + } +} + +.annotatable-toggle { + position: absolute; + right: 0; + margin: 2px 1em 2px 0; + &.expanded:after { content: " \2191" } + &.collapsed:after { content: " \2193" } +} + +.annotatable-span { + display: inline; + cursor: pointer; + + @each $highlight in ( + (yellow rgba(255,255,10,0.3) rgba(255,255,10,0.9)), + (red rgba(178,19,16,0.3) rgba(178,19,16,0.9)), + (orange rgba(255,165,0,0.3) rgba(255,165,0,0.9)), + (green rgba(25,255,132,0.3) rgba(25,255,132,0.9)), + (blue rgba(35,163,255,0.3) rgba(35,163,255,0.9)), + (purple rgba(115,9,178,0.3) rgba(115,9,178,0.9))) { + + $marker: nth($highlight,1); + $color: nth($highlight,2); + $selected_color: nth($highlight,3); + + @if $marker == yellow { + &.highlight { + background-color: $color; + &.selected { background-color: $selected_color; } + } + } + &.highlight-#{$marker} { + background-color: $color; + &.selected { background-color: $selected_color; } + } + } + + &.hide { + cursor: none; + background-color: inherit; + .annotatable-icon { + display: none; + } + } + + .annotatable-comment { + display: none; + } +} + +.ui-tooltip.qtip.ui-tooltip { + font-size: $body-font-size; + border: 1px solid #333; + border-radius: 1em; + background-color: rgba(0,0,0,.85); + color: #fff; + -webkit-font-smoothing: antialiased; + + .ui-tooltip-titlebar { + font-size: em(16); + color: inherit; + background-color: transparent; + padding: 5px 10px; + border: none; + .ui-tooltip-title { + padding: 5px 0px; + border-bottom: 2px solid #333; + font-weight: bold; + } + .ui-tooltip-icon { + right: 10px; + background: #333; + } + .ui-state-hover { + color: inherit; + border: 1px solid #ccc; + } + } + .ui-tooltip-content { + color: inherit; + font-size: em(14); + text-align: left; + font-weight: 400; + padding: 0 10px 10px 10px; + background-color: transparent; + } + p { + color: inherit; + line-height: normal; + } +} + +.ui-tooltip.qtip.ui-tooltip-annotatable { + max-width: 375px; + .ui-tooltip-content { + padding: 0 10px; + .annotatable-comment { + display: block; + margin: 0px 0px 10px 0; + max-height: 225px; + overflow: auto; + } + .annotatable-reply { + display: block; + border-top: 2px solid #333; + padding: 5px 0; + margin: 0; + text-align: center; + } + } + &:after { + content: ''; + display: inline-block; + position: absolute; + bottom: -20px; + left: 50%; + height: 0; + width: 0; + margin-left: -5px; + border: 10px solid transparent; + border-top-color: rgba(0, 0, 0, .85); + } +} + + diff --git a/common/lib/xmodule/xmodule/css/capa/display.scss b/common/lib/xmodule/xmodule/css/capa/display.scss index d40bdb556e..ab23bc1b48 100644 --- a/common/lib/xmodule/xmodule/css/capa/display.scss +++ b/common/lib/xmodule/xmodule/css/capa/display.scss @@ -40,8 +40,16 @@ section.problem { @include clearfix; label.choicegroup_correct{ - text:after{ + &:after{ content: url('../images/correct-icon.png'); + margin-left:15px + } + } + + label.choicegroup_incorrect{ + &:after{ + content: url('../images/incorrect-icon.png'); + margin-left:15px; } } @@ -52,6 +60,7 @@ section.problem { .indicator_container { float: left; width: 25px; + height: 1px; margin-right: 15px; } @@ -69,7 +78,7 @@ section.problem { } text { - display: block; + display: inline; margin-left: 25px; } } @@ -227,6 +236,15 @@ section.problem { background: url('../images/correct-icon.png') center center no-repeat; height: 20px; position: relative; + top: 3px; + width: 25px; + } + + &.partially-correct { + @include inline-block(); + background: url('../images/partially-correct-icon.png') center center no-repeat; + height: 20px; + position: relative; top: 6px; width: 25px; } @@ -237,7 +255,7 @@ section.problem { height: 20px; width: 20px; position: relative; - top: 6px; + top: 3px; } } @@ -802,4 +820,91 @@ section.problem { display: none; } } + + .annotation-input { + $yellow: rgba(255,255,10,0.3); + + border: 1px solid #ccc; + border-radius: 1em; + margin: 0 0 1em 0; + + .annotation-header { + font-weight: bold; + border-bottom: 1px solid #ccc; + padding: .5em 1em; + } + .annotation-body { padding: .5em 1em; } + a.annotation-return { + float: right; + font: inherit; + font-weight: normal; + } + a.annotation-return:after { content: " \2191" } + + .block, ul.tags { + margin: .5em 0; + padding: 0; + } + .block-highlight { + padding: .5em; + color: #333; + font-style: normal; + background-color: $yellow; + border: 1px solid darken($yellow, 10%); + } + .block-comment { font-style: italic; } + + ul.tags { + display: block; + list-style-type: none; + margin-left: 1em; + li { + display: block; + margin: 1em 0 0 0; + position: relative; + .tag { + display: inline-block; + cursor: pointer; + border: 1px solid rgb(102,102,102); + margin-left: 40px; + &.selected { + background-color: $yellow; + } + } + .tag-status { + position: absolute; + left: 0; + } + .tag-status, .tag { padding: .25em .5em; } + } + } + textarea.comment { + $num-lines-to-show: 5; + $line-height: 1.4em; + $padding: .2em; + width: 100%; + padding: $padding (2 * $padding); + line-height: $line-height; + height: ($num-lines-to-show * $line-height) + (2*$padding) - (($line-height - 1)/2); + } + .answer-annotation { display: block; margin: 0; } + + /* for debugging the input value field. enable the debug flag on the inputtype */ + .debug-value { + color: #fff; + padding: 1em; + margin: 1em 0; + background-color: #999; + border: 1px solid #000; + input[type="text"] { width: 100%; } + pre { background-color: #CCC; color: #000; } + &:before { + display: block; + content: "debug input value"; + text-transform: uppercase; + font-weight: bold; + font-size: 1.5em; + } + } + } } diff --git a/common/lib/xmodule/xmodule/css/foldit/leaderboard.scss b/common/lib/xmodule/xmodule/css/foldit/leaderboard.scss new file mode 100644 index 0000000000..5342c985c2 --- /dev/null +++ b/common/lib/xmodule/xmodule/css/foldit/leaderboard.scss @@ -0,0 +1,20 @@ +$leaderboard: #F4F4F4; + +section.foldit { + div.folditchallenge { + table { + border: 1px solid lighten($leaderboard, 10%); + border-collapse: collapse; + margin-top: 20px; + } + th { + background: $leaderboard; + color: darken($leaderboard, 25%); + } + td { + background: lighten($leaderboard, 3%); + border-bottom: 1px solid #fff; + padding: 8px; + } + } +} diff --git a/common/lib/xmodule/xmodule/css/html/display.scss b/common/lib/xmodule/xmodule/css/html/display.scss index 956923c6d0..93138ac5a9 100644 --- a/common/lib/xmodule/xmodule/css/html/display.scss +++ b/common/lib/xmodule/xmodule/css/html/display.scss @@ -49,10 +49,18 @@ p { em, i { font-style: italic; + + span { + font-style: italic; + } } strong, b { font-weight: bold; + + span { + font-weight: bold; + } } p + p, ul + p, ol + p { diff --git a/common/lib/xmodule/xmodule/css/poll/display.scss b/common/lib/xmodule/xmodule/css/poll/display.scss new file mode 100644 index 0000000000..cfc03bcf91 --- /dev/null +++ b/common/lib/xmodule/xmodule/css/poll/display.scss @@ -0,0 +1,221 @@ +section.poll_question { + @media print { + display: block; + width: auto; + padding: 0; + + canvas, img { + page-break-inside: avoid; + } + } + + .inline { + display: inline; + } + + h3 { + margin-top: 0; + margin-bottom: 15px; + color: #fe57a1; + font-size: 1.9em; + + &.problem-header { + section.staff { + margin-top: 30px; + font-size: 80%; + } + } + + @media print { + display: block; + width: auto; + border-right: 0; + } + } + + p { + text-align: justify; + font-weight: bold; + } + + .poll_answer { + margin-bottom: 20px; + + &.short { + clear: both; + } + + .question { + height: auto; + clear: both; + min-height: 30px; + + &.short { + clear: none; + width: 30%; + display: inline; + float: left; + } + + .button { + -webkit-appearance: none; + -webkit-background-clip: padding-box; + -webkit-border-image: none; + -webkit-box-align: center; + -webkit-box-shadow: rgb(255, 255, 255) 0px 1px 0px 0px inset; + -webkit-font-smoothing: antialiased; + -webkit-rtl-ordering: logical; + -webkit-user-select: text; + -webkit-writing-mode: horizontal-tb; + background-clip: padding-box; + background-color: rgb(238, 238, 238); + background-image: -webkit-linear-gradient(top, rgb(238, 238, 238), rgb(210, 210, 210)); + border-bottom-color: rgb(202, 202, 202); + border-bottom-left-radius: 3px; + border-bottom-right-radius: 3px; + border-bottom-style: solid; + border-bottom-width: 1px; + border-left-color: rgb(202, 202, 202); + border-left-style: solid; + border-left-width: 1px; + border-right-color: rgb(202, 202, 202); + border-right-style: solid; + border-right-width: 1px; + border-top-color: rgb(202, 202, 202); + border-top-left-radius: 3px; + border-top-right-radius: 3px; + border-top-style: solid; + border-top-width: 1px; + box-shadow: rgb(255, 255, 255) 0px 1px 0px 0px inset; + box-sizing: border-box; + color: rgb(51, 51, 51); + cursor: pointer; + + /* display: inline-block; */ + display: inline; + float: left; + + font-family: 'Open Sans', Verdana, Geneva, sans-serif; + font-size: 13px; + font-style: normal; + font-variant: normal; + font-weight: bold; + + letter-spacing: normal; + line-height: 25.59375px; + margin-bottom: 15px; + margin: 0px; + padding: 0px; + text-align: center; + text-decoration: none; + text-indent: 0px; + text-shadow: rgb(248, 248, 248) 0px 1px 0px; + text-transform: none; + vertical-align: top; + white-space: pre-line; + + width: 25px; + height: 25px; + + word-spacing: 0px; + writing-mode: lr-tb; + } + .button.answered { + -webkit-box-shadow: rgb(97, 184, 225) 0px 1px 0px 0px inset; + background-color: rgb(29, 157, 217); + background-image: -webkit-linear-gradient(top, rgb(29, 157, 217), rgb(14, 124, 176)); + border-bottom-color: rgb(13, 114, 162); + border-left-color: rgb(13, 114, 162); + border-right-color: rgb(13, 114, 162); + border-top-color: rgb(13, 114, 162); + box-shadow: rgb(97, 184, 225) 0px 1px 0px 0px inset; + color: rgb(255, 255, 255); + text-shadow: rgb(7, 103, 148) 0px 1px 0px; + } + + .text { + display: inline; + float: left; + width: 80%; + text-align: left; + min-height: 30px; + margin-left: 20px; + height: auto; + margin-bottom: 20px; + cursor: pointer; + + &.short { + width: 100px; + } + } + } + + .stats { + min-height: 40px; + margin-top: 20px; + clear: both; + + &.short { + margin-top: 0; + clear: none; + display: inline; + float: right; + width: 70%; + } + + .bar { + width: 75%; + height: 20px; + border: 1px solid black; + display: inline; + float: left; + margin-right: 10px; + + &.short { + width: 65%; + height: 20px; + margin-top: 3px; + } + + .percent { + background-color: gray; + width: 0px; + height: 20px; + + &.short { } + } + } + + .number { + width: 80px; + display: inline; + float: right; + height: 28px; + text-align: right; + + &.short { + width: 120px; + height: auto; + } + } + } + } + + .poll_answer.answered { + -webkit-box-shadow: rgb(97, 184, 225) 0px 1px 0px 0px inset; + background-color: rgb(29, 157, 217); + background-image: -webkit-linear-gradient(top, rgb(29, 157, 217), rgb(14, 124, 176)); + border-bottom-color: rgb(13, 114, 162); + border-left-color: rgb(13, 114, 162); + border-right-color: rgb(13, 114, 162); + border-top-color: rgb(13, 114, 162); + box-shadow: rgb(97, 184, 225) 0px 1px 0px 0px inset; + color: rgb(255, 255, 255); + text-shadow: rgb(7, 103, 148) 0px 1px 0px; + } + + .button.reset-button { + clear: both; + float: right; + } +} diff --git a/cms/djangoapps/__init__.py b/common/lib/xmodule/xmodule/css/wrapper/display.scss similarity index 100% rename from cms/djangoapps/__init__.py rename to common/lib/xmodule/xmodule/css/wrapper/display.scss diff --git a/common/lib/xmodule/xmodule/discussion_module.py b/common/lib/xmodule/xmodule/discussion_module.py index 6ddfcbe6c0..7725a88e77 100644 --- a/common/lib/xmodule/xmodule/discussion_module.py +++ b/common/lib/xmodule/xmodule/discussion_module.py @@ -3,35 +3,38 @@ from pkg_resources import resource_string, resource_listdir from xmodule.x_module import XModule from xmodule.raw_module import RawDescriptor - -import json +from xblock.core import String, Scope -class DiscussionModule(XModule): +class DiscussionFields(object): + discussion_id = String(scope=Scope.settings) + discussion_category = String(scope=Scope.settings) + discussion_target = String(scope=Scope.settings) + sort_key = String(scope=Scope.settings) + + +class DiscussionModule(DiscussionFields, XModule): js = {'coffee': [resource_string(__name__, 'js/src/time.coffee'), resource_string(__name__, 'js/src/discussion/display.coffee')] } js_module_name = "InlineDiscussion" + + def get_html(self): context = { 'discussion_id': self.discussion_id, } return self.system.render_template('discussion/_discussion_module.html', context) - def __init__(self, system, location, definition, descriptor, - instance_state=None, shared_state=None, **kwargs): - XModule.__init__(self, system, location, definition, descriptor, - instance_state, shared_state, **kwargs) - if isinstance(instance_state, str): - instance_state = json.loads(instance_state) - xml_data = etree.fromstring(definition['data']) - self.discussion_id = xml_data.attrib['id'] - self.title = xml_data.attrib['for'] - self.discussion_category = xml_data.attrib['discussion_category'] - - -class DiscussionDescriptor(RawDescriptor): +class DiscussionDescriptor(DiscussionFields, RawDescriptor): module_class = DiscussionModule template_dir_name = "discussion" + + # The discussion XML format uses `id` and `for` attributes, + # but these would overload other module attributes, so we prefix them + # for actual use in the code + metadata_translations = dict(RawDescriptor.metadata_translations) + metadata_translations['id'] = 'discussion_id' + metadata_translations['for'] = 'discussion_target' diff --git a/common/lib/xmodule/xmodule/editing_module.py b/common/lib/xmodule/xmodule/editing_module.py index e025179b63..b93727a96b 100644 --- a/common/lib/xmodule/xmodule/editing_module.py +++ b/common/lib/xmodule/xmodule/editing_module.py @@ -1,11 +1,16 @@ from pkg_resources import resource_string from xmodule.mako_module import MakoModuleDescriptor +from xblock.core import Scope, String import logging log = logging.getLogger(__name__) -class EditingDescriptor(MakoModuleDescriptor): +class EditingFields(object): + data = String(scope=Scope.content, default='') + + +class EditingDescriptor(EditingFields, MakoModuleDescriptor): """ Module that provides a raw editing view of its data and children. It does not perform any validation on its definition---just passes it along to the browser. @@ -20,7 +25,7 @@ class EditingDescriptor(MakoModuleDescriptor): def get_context(self): _context = MakoModuleDescriptor.get_context(self) # Add our specific template information (the raw data body) - _context.update({'data': self.definition.get('data', '')}) + _context.update({'data': self.data}) return _context diff --git a/common/lib/xmodule/xmodule/error_module.py b/common/lib/xmodule/xmodule/error_module.py index 2df47e05e6..d2135302da 100644 --- a/common/lib/xmodule/xmodule/error_module.py +++ b/common/lib/xmodule/xmodule/error_module.py @@ -8,6 +8,7 @@ from xmodule.x_module import XModule from xmodule.editing_module import JSONEditingDescriptor from xmodule.errortracker import exc_info_to_str from xmodule.modulestore import Location +from xblock.core import String, Scope log = logging.getLogger(__name__) @@ -20,7 +21,14 @@ log = logging.getLogger(__name__) # decides whether to create a staff or not-staff module. -class ErrorModule(XModule): +class ErrorFields(object): + contents = String(scope=Scope.content) + error_msg = String(scope=Scope.content) + display_name = String(scope=Scope.settings) + + +class ErrorModule(ErrorFields, XModule): + def get_html(self): '''Show an error to staff. TODO (vshnayder): proper style, divs, etc. @@ -28,12 +36,12 @@ class ErrorModule(XModule): # staff get to see all the details return self.system.render_template('module-error.html', { 'staff_access': True, - 'data': self.definition['data']['contents'], - 'error': self.definition['data']['error_msg'], + 'data': self.contents, + 'error': self.error_msg, }) -class NonStaffErrorModule(XModule): +class NonStaffErrorModule(ErrorFields, XModule): def get_html(self): '''Show an error to a student. TODO (vshnayder): proper style, divs, etc. @@ -46,7 +54,7 @@ class NonStaffErrorModule(XModule): }) -class ErrorDescriptor(JSONEditingDescriptor): +class ErrorDescriptor(ErrorFields, JSONEditingDescriptor): """ Module that provides a raw editing view of broken xml. """ @@ -66,26 +74,22 @@ class ErrorDescriptor(JSONEditingDescriptor): name=hashlib.sha1(contents).hexdigest() ) - definition = { - 'data': { - 'error_msg': str(error_msg), - 'contents': contents, - } - } - # real metadata stays in the content, but add a display name - metadata = {'display_name': 'Error: ' + location.name} + model_data = { + 'error_msg': str(error_msg), + 'contents': contents, + 'display_name': 'Error: ' + location.name + } return ErrorDescriptor( system, - definition, - location=location, - metadata=metadata + location, + model_data, ) def get_context(self): return { 'module': self, - 'data': self.definition['data']['contents'], + 'data': self.contents, } @classmethod @@ -101,10 +105,7 @@ class ErrorDescriptor(JSONEditingDescriptor): def from_descriptor(cls, descriptor, error_msg='Error not available'): return cls._construct( descriptor.system, - json.dumps({ - 'definition': descriptor.definition, - 'metadata': descriptor.metadata, - }, indent=4), + descriptor._model_data, error_msg, location=descriptor.location, ) @@ -148,14 +149,14 @@ class ErrorDescriptor(JSONEditingDescriptor): files, etc. That would just get re-wrapped on import. ''' try: - xml = etree.fromstring(self.definition['data']['contents']) + xml = etree.fromstring(self.contents) return etree.tostring(xml, encoding='unicode') except etree.XMLSyntaxError: # still not valid. root = etree.Element('error') - root.text = self.definition['data']['contents'] + root.text = self.contents err_node = etree.SubElement(root, 'error_msg') - err_node.text = self.definition['data']['error_msg'] + err_node.text = self.error_msg return etree.tostring(root, encoding='unicode') diff --git a/common/lib/xmodule/xmodule/fields.py b/common/lib/xmodule/xmodule/fields.py new file mode 100644 index 0000000000..fb80752e56 --- /dev/null +++ b/common/lib/xmodule/xmodule/fields.py @@ -0,0 +1,69 @@ +import time +import logging +import re + +from datetime import timedelta +from xblock.core import ModelType + +log = logging.getLogger(__name__) + + +class Date(ModelType): + time_format = "%Y-%m-%dT%H:%M" + + def from_json(self, value): + """ + Parse an optional metadata key containing a time: if present, complain + if it doesn't parse. + Return None if not present or invalid. + """ + if value is None: + return None + + try: + return time.strptime(value, self.time_format) + except ValueError as e: + msg = "Field {0} has bad value '{1}': '{2}'".format( + self._name, value, e) + log.warning(msg) + return None + + def to_json(self, value): + """ + Convert a time struct to a string + """ + if value is None: + return None + + return time.strftime(self.time_format, value) + + +TIMEDELTA_REGEX = re.compile(r'^((?P\d+?) day(?:s?))?(\s)?((?P\d+?) hour(?:s?))?(\s)?((?P\d+?) minute(?:s)?)?(\s)?((?P\d+?) second(?:s)?)?$') +class Timedelta(ModelType): + def from_json(self, time_str): + """ + time_str: A string with the following components: + day[s] (optional) + hour[s] (optional) + minute[s] (optional) + second[s] (optional) + + Returns a datetime.timedelta parsed from the string + """ + parts = TIMEDELTA_REGEX.match(time_str) + if not parts: + return + parts = parts.groupdict() + time_params = {} + for (name, param) in parts.iteritems(): + if param: + time_params[name] = int(param) + return timedelta(**time_params) + + def to_json(self, value): + values = [] + for attr in ('days', 'hours', 'minutes', 'seconds'): + cur_value = getattr(value, attr, 0) + if cur_value > 0: + values.append("%d %s" % (cur_value, attr)) + return ' '.join(values) \ No newline at end of file diff --git a/common/lib/xmodule/xmodule/foldit_module.py b/common/lib/xmodule/xmodule/foldit_module.py index ea16fee7f1..884f9e2df2 100644 --- a/common/lib/xmodule/xmodule/foldit_module.py +++ b/common/lib/xmodule/xmodule/foldit_module.py @@ -7,31 +7,46 @@ from pkg_resources import resource_string from xmodule.editing_module import EditingDescriptor from xmodule.x_module import XModule from xmodule.xml_module import XmlDescriptor +from xblock.core import Scope, Integer, String log = logging.getLogger(__name__) -class FolditModule(XModule): - def __init__(self, system, location, definition, descriptor, - instance_state=None, shared_state=None, **kwargs): - XModule.__init__(self, system, location, definition, descriptor, - instance_state, shared_state, **kwargs) - # ooh look--I'm lazy, so hardcoding the 7.00x required level. - # If we need it generalized, can pull from the xml later - self.required_level = 4 - self.required_sublevel = 5 +class FolditFields(object): + # default to what Spring_7012x uses + required_level = Integer(default=4, scope=Scope.settings) + required_sublevel = Integer(default=5, scope=Scope.settings) + due = String(help="Date that this problem is due by", scope=Scope.settings, default='') + + show_basic_score = String(scope=Scope.settings, default='false') + show_leaderboard = String(scope=Scope.settings, default='false') + + +class FolditModule(FolditFields, XModule): + + css = {'scss': [resource_string(__name__, 'css/foldit/leaderboard.scss')]} + + def __init__(self, *args, **kwargs): + XModule.__init__(self, *args, **kwargs) + """ + + Example: + + """ def parse_due_date(): """ Pull out the date, or None """ - s = self.metadata.get("due") + s = self.due if s: return parser.parse(s) else: return None - self.due_str = self.metadata.get("due", "None") - self.due = parse_due_date() + self.due_time = parse_due_date() def is_complete(self): """ @@ -46,7 +61,7 @@ class FolditModule(XModule): self.system.anonymous_student_id, self.required_level, self.required_sublevel, - self.due) + self.due_time) return complete def completed_puzzles(self): @@ -66,6 +81,17 @@ class FolditModule(XModule): PuzzleComplete.completed_puzzles(self.system.anonymous_student_id), key=lambda d: (d['set'], d['subset'])) + def puzzle_leaders(self, n=10): + """ + Returns a list of n pairs (user, score) corresponding to the top + scores; the pairs are in descending order of score. + """ + from foldit.models import Score + + leaders = [(e['username'], e['score']) for e in Score.get_tops_n(10)] + leaders.sort(key=lambda x: -x[1]) + + return leaders def get_html(self): """ @@ -75,15 +101,48 @@ class FolditModule(XModule): self.required_level, self.required_sublevel) + showbasic = (self.show_basic_score.lower() == "true") + showleader = (self.show_leaderboard.lower() == "true") + context = { - 'due': self.due_str, + 'due': self.due, 'success': self.is_complete(), 'goal_level': goal_level, 'completed': self.completed_puzzles(), + 'top_scores': self.puzzle_leaders(), + 'show_basic': showbasic, + 'show_leader': showleader, + 'folditbasic': self.get_basicpuzzles_html(), + 'folditchallenge': self.get_challenge_html() } return self.system.render_template('foldit.html', context) + def get_basicpuzzles_html(self): + """ + Render html for the basic puzzle section. + """ + goal_level = '{0}-{1}'.format( + self.required_level, + self.required_sublevel) + + context = { + 'due': self.due, + 'success': self.is_complete(), + 'goal_level': goal_level, + 'completed': self.completed_puzzles(), + } + return self.system.render_template('folditbasic.html', context) + + def get_challenge_html(self): + """ + Render html for challenge (i.e., the leaderboard) + """ + + context = { + 'top_scores': self.puzzle_leaders()} + + return self.system.render_template('folditchallenge.html', context) def get_score(self): """ @@ -97,9 +156,10 @@ class FolditModule(XModule): return 1 -class FolditDescriptor(XmlDescriptor, EditingDescriptor): + +class FolditDescriptor(FolditFields, XmlDescriptor, EditingDescriptor): """ - Module for adding open ended response questions to courses + Module for adding Foldit problems to courses """ mako_template = "widgets/html-edit.html" module_class = FolditModule @@ -118,7 +178,8 @@ class FolditDescriptor(XmlDescriptor, EditingDescriptor): @classmethod def definition_from_xml(cls, xml_object, system): - """ - For now, don't need anything from the xml - """ - return {} + return ({}, []) + + def definition_to_xml(self): + xml_object = etree.Element('foldit') + return xml_object diff --git a/common/lib/xmodule/xmodule/gst_module.py b/common/lib/xmodule/xmodule/gst_module.py index ef1be96c84..00e8cf1f10 100644 --- a/common/lib/xmodule/xmodule/gst_module.py +++ b/common/lib/xmodule/xmodule/gst_module.py @@ -14,12 +14,18 @@ from xmodule.xml_module import XmlDescriptor from xmodule.x_module import XModule from xmodule.stringify import stringify_children from pkg_resources import resource_string +from xblock.core import String, Scope log = logging.getLogger(__name__) -class GraphicalSliderToolModule(XModule): +class GraphicalSliderToolFields(object): + render = String(scope=Scope.content) + configuration = String(scope=Scope.content) + + +class GraphicalSliderToolModule(GraphicalSliderToolFields, XModule): ''' Graphical-Slider-Tool Module ''' @@ -43,15 +49,6 @@ class GraphicalSliderToolModule(XModule): } js_module_name = "GraphicalSliderTool" - def __init__(self, system, location, definition, descriptor, instance_state=None, - shared_state=None, **kwargs): - """ - For XML file format please look at documentation. TODO - receive - information where to store XML documentation. - """ - XModule.__init__(self, system, location, definition, descriptor, - instance_state, shared_state, **kwargs) - def get_html(self): """ Renders parameters to template. """ @@ -60,14 +57,14 @@ class GraphicalSliderToolModule(XModule): self.html_class = self.location.category self.configuration_json = self.build_configuration_json() params = { - 'gst_html': self.substitute_controls(self.definition['render']), + 'gst_html': self.substitute_controls(self.render), 'element_id': self.html_id, 'element_class': self.html_class, 'configuration_json': self.configuration_json } - self.content = self.system.render_template( + content = self.system.render_template( 'graphical_slider_tool.html', params) - return self.content + return content def substitute_controls(self, html_string): """ Substitutes control elements (slider, textbox and plot) in @@ -139,10 +136,10 @@ class GraphicalSliderToolModule(XModule): # added for interface compatibility with xmltodict.parse # class added for javascript's part purposes return json.dumps(xmltodict.parse('' + self.definition['configuration'] + '')) + '">' + self.configuration + '')) -class GraphicalSliderToolDescriptor(MakoModuleDescriptor, XmlDescriptor): +class GraphicalSliderToolDescriptor(GraphicalSliderToolFields, MakoModuleDescriptor, XmlDescriptor): module_class = GraphicalSliderToolModule template_dir_name = 'graphical_slider_tool' @@ -177,14 +174,14 @@ class GraphicalSliderToolDescriptor(MakoModuleDescriptor, XmlDescriptor): return { 'render': parse('render'), 'configuration': parse('configuration') - } + }, [] def definition_to_xml(self, resource_fs): '''Return an xml element representing this definition.''' xml_object = etree.Element('graphical_slider_tool') def add_child(k): - child_str = '<{tag}>{body}'.format(tag=k, body=self.definition[k]) + child_str = '<{tag}>{body}'.format(tag=k, body=getattr(self, k)) child_node = etree.fromstring(child_str) xml_object.append(child_node) diff --git a/common/lib/xmodule/xmodule/html_module.py b/common/lib/xmodule/xmodule/html_module.py index 456ea3cf10..e9cec32e3e 100644 --- a/common/lib/xmodule/xmodule/html_module.py +++ b/common/lib/xmodule/xmodule/html_module.py @@ -7,10 +7,9 @@ from lxml import etree from path import path from pkg_resources import resource_string -from xmodule.contentstore.content import XASSET_SRCREF_PREFIX, StaticContent +from xblock.core import Scope, String from xmodule.editing_module import EditingDescriptor from xmodule.html_checker import check_html -from xmodule.modulestore import Location from xmodule.stringify import stringify_children from xmodule.x_module import XModule from xmodule.xml_module import XmlDescriptor, name_to_pathname @@ -18,7 +17,11 @@ from xmodule.xml_module import XmlDescriptor, name_to_pathname log = logging.getLogger("mitx.courseware") -class HtmlModule(XModule): +class HtmlFields(object): + data = String(help="Html contents to display for this module", scope=Scope.content) + + +class HtmlModule(HtmlFields, XModule): js = {'coffee': [resource_string(__name__, 'js/src/javascript_loader.coffee'), resource_string(__name__, 'js/src/collapsible.coffee'), resource_string(__name__, 'js/src/html/display.coffee') @@ -28,17 +31,10 @@ class HtmlModule(XModule): css = {'scss': [resource_string(__name__, 'css/html/display.scss')]} def get_html(self): - return self.html - - def __init__(self, system, location, definition, descriptor, - instance_state=None, shared_state=None, **kwargs): - XModule.__init__(self, system, location, definition, descriptor, - instance_state, shared_state, **kwargs) - self.html = self.definition['data'] + return self.data - -class HtmlDescriptor(XmlDescriptor, EditingDescriptor): +class HtmlDescriptor(HtmlFields, XmlDescriptor, EditingDescriptor): """ Module for putting raw html in a course """ @@ -91,7 +87,7 @@ class HtmlDescriptor(XmlDescriptor, EditingDescriptor): if filename is None: definition_xml = copy.deepcopy(xml_object) cls.clean_metadata_from_xml(definition_xml) - return {'data': stringify_children(definition_xml)} + return {'data': stringify_children(definition_xml)}, [] else: # html is special. cls.filename_extension is 'xml', but # if 'filename' is in the definition, that means to load @@ -105,8 +101,6 @@ class HtmlDescriptor(XmlDescriptor, EditingDescriptor): filepath = "{base}/{name}.html".format(base=base, name=filename) #log.debug("looking for html file for {0} at {1}".format(location, filepath)) - - # VS[compat] # TODO (cpennington): If the file doesn't exist at the right path, # give the class a chance to fix it up. The file will be written out @@ -135,7 +129,7 @@ class HtmlDescriptor(XmlDescriptor, EditingDescriptor): # for Fall 2012 LMS migration: keep filename (and unmangled filename) definition['filename'] = [filepath, filename] - return definition + return definition, [] except (ResourceNotFoundError) as err: msg = 'Unable to load file contents at path {0}: {1} '.format( @@ -151,19 +145,18 @@ class HtmlDescriptor(XmlDescriptor, EditingDescriptor): string to filename.html. ''' try: - return etree.fromstring(self.definition['data']) + return etree.fromstring(self.data) except etree.XMLSyntaxError: pass # Not proper format. Write html to file, return an empty tag pathname = name_to_pathname(self.url_name) - pathdir = path(pathname).dirname() filepath = u'{category}/{pathname}.html'.format(category=self.category, pathname=pathname) resource_fs.makedir(os.path.dirname(filepath), recursive=True, allow_recreate=True) with resource_fs.open(filepath, 'w') as file: - file.write(self.definition['data'].encode('utf-8')) + file.write(self.data.encode('utf-8')) # write out the relative name relname = path(pathname).basename() @@ -175,8 +168,11 @@ class HtmlDescriptor(XmlDescriptor, EditingDescriptor): @property def editable_metadata_fields(self): """Remove any metadata from the editable fields which have their own editor or shouldn't be edited by user.""" - subset = [field for field in super(HtmlDescriptor,self).editable_metadata_fields - if field not in ['empty']] + subset = super(HtmlDescriptor, self).editable_metadata_fields + + if 'empty' in subset: + del subset['empty'] + return subset diff --git a/common/lib/xmodule/xmodule/js/fixtures/annotatable.html b/common/lib/xmodule/xmodule/js/fixtures/annotatable.html new file mode 100644 index 0000000000..61020d95e8 --- /dev/null +++ b/common/lib/xmodule/xmodule/js/fixtures/annotatable.html @@ -0,0 +1,35 @@ +
              +
              +
              +
              First Annotation Exercise
              +
              +
              +
              + Instructions + Collapse Instructions +
              +
              +

              The main goal of this exercise is to start practicing the art of slow reading.

              +
              +
              +
              +
              + Guided Discussion + Hide Annotations +
              +
              +
              + |87 No, those who are really responsible are Zeus and Fate [Moira] and the Fury [Erinys] who roams in the mist.
              + |88 They are the ones who
              + |100 He [= Zeus], making a formal declaration [eukhesthai], spoke up at a meeting of all the gods and said:
              + |101 “hear me, all gods and all goddesses,
              + |113 but he swore a great oath. + And right then and there
              +
              +
              +
              + +
              Return to Annotation
              +
              Return to Annotation
              +
              Return to Annotation
              + diff --git a/common/lib/xmodule/xmodule/js/spec/annotatable/display_spec.coffee b/common/lib/xmodule/xmodule/js/spec/annotatable/display_spec.coffee new file mode 100644 index 0000000000..3adb028f97 --- /dev/null +++ b/common/lib/xmodule/xmodule/js/spec/annotatable/display_spec.coffee @@ -0,0 +1,9 @@ +describe 'Annotatable', -> + beforeEach -> + loadFixtures 'annotatable.html' + describe 'constructor', -> + el = $('.xmodule_display.xmodule_AnnotatableModule') + beforeEach -> + @annotatable = new Annotatable(el) + it 'works', -> + expect(1).toBe(1) \ No newline at end of file diff --git a/common/lib/xmodule/xmodule/js/spec/problem/edit_spec.coffee b/common/lib/xmodule/xmodule/js/spec/problem/edit_spec.coffee index 9b8062d60d..5161e658e7 100644 --- a/common/lib/xmodule/xmodule/js/spec/problem/edit_spec.coffee +++ b/common/lib/xmodule/xmodule/js/spec/problem/edit_spec.coffee @@ -119,13 +119,13 @@ describe 'MarkdownEditingDescriptor', ->

              The answer is correct if it is within a specified numerical tolerance of the expected answer.

              Enter the numerical value of Pi:

              - +

              Enter the approximate value of 502*9:

              - + @@ -147,6 +147,20 @@ describe 'MarkdownEditingDescriptor', ->
              + """) + it 'will convert 0 as a numerical response (instead of string response)', -> + data = MarkdownEditingDescriptor.markdownToXml(""" + Enter 0 with a tolerance: + = 0 +- .02 + """) + expect(data).toEqual(""" +

              Enter 0 with a tolerance:

              + + + + + +
              """) it 'converts multiple choice to xml', -> data = MarkdownEditingDescriptor.markdownToXml("""A multiple choice problem presents radio buttons for student input. Students can only select a single option presented. Multiple Choice questions have been the subject of many areas of research due to the early invention and adoption of bubble sheets. diff --git a/common/lib/xmodule/xmodule/js/src/annotatable/display.coffee b/common/lib/xmodule/xmodule/js/src/annotatable/display.coffee new file mode 100644 index 0000000000..2ad49ae6d7 --- /dev/null +++ b/common/lib/xmodule/xmodule/js/src/annotatable/display.coffee @@ -0,0 +1,197 @@ +class @Annotatable + _debug: false + + # selectors for the annotatable xmodule + toggleAnnotationsSelector: '.annotatable-toggle-annotations' + toggleInstructionsSelector: '.annotatable-toggle-instructions' + instructionsSelector: '.annotatable-instructions' + sectionSelector: '.annotatable-section' + spanSelector: '.annotatable-span' + replySelector: '.annotatable-reply' + + # these selectors are for responding to events from the annotation capa problem type + problemXModuleSelector: '.xmodule_CapaModule' + problemSelector: 'section.problem' + problemInputSelector: 'section.problem .annotation-input' + problemReturnSelector: 'section.problem .annotation-return' + + constructor: (el) -> + console.log 'loaded Annotatable' if @_debug + @el = el + @$el = $(el) + @init() + + $: (selector) -> + $(selector, @el) + + init: () -> + @initEvents() + @initTips() + + initEvents: () -> + # Initialize toggle handlers for the instructions and annotations sections + [@annotationsHidden, @instructionsHidden] = [false, false] + @$(@toggleAnnotationsSelector).bind 'click', @onClickToggleAnnotations + @$(@toggleInstructionsSelector).bind 'click', @onClickToggleInstructions + + # Initialize handler for 'reply to annotation' events that scroll to + # the associated problem. The reply buttons are part of the tooltip + # content. It's important that the tooltips be configured to render + # as descendants of the annotation module and *not* the document.body. + @$el.delegate @replySelector, 'click', @onClickReply + + # Initialize handler for 'return to annotation' events triggered from problems. + # 1) There are annotationinput capa problems rendered on the page + # 2) Each one has an embedded return link (see annotation capa problem template). + # Since the capa problem injects HTML content via AJAX, the best we can do is + # is let the click events bubble up to the body and handle them there. + $('body').delegate @problemReturnSelector, 'click', @onClickReturn + + initTips: () -> + # tooltips are used to display annotations for highlighted text spans + @$(@spanSelector).each (index, el) => + $(el).qtip(@getSpanTipOptions el) + + getSpanTipOptions: (el) -> + content: + title: + text: @makeTipTitle(el) + text: @makeTipContent(el) + position: + my: 'bottom center' # of tooltip + at: 'top center' # of target + target: $(el) # where the tooltip was triggered (i.e. the annotation span) + container: @$el + adjust: + y: -5 + show: + event: 'click mouseenter' + solo: true + hide: + event: 'click mouseleave' + delay: 500, + fixed: true # don't hide the tooltip if it is moused over + style: + classes: 'ui-tooltip-annotatable' + events: + show: @onShowTip + + onClickToggleAnnotations: (e) => @toggleAnnotations() + + onClickToggleInstructions: (e) => @toggleInstructions() + + onClickReply: (e) => @replyTo(e.currentTarget) + + onClickReturn: (e) => @returnFrom(e.currentTarget) + + onShowTip: (event, api) => + event.preventDefault() if @annotationsHidden + + getSpanForProblemReturn: (el) -> + problem_id = $(@problemReturnSelector).index(el) + @$(@spanSelector).filter("[data-problem-id='#{problem_id}']") + + getProblem: (el) -> + problem_id = @getProblemId(el) + $(@problemSelector).has(@problemInputSelector).eq(problem_id) + + getProblemId: (el) -> + $(el).data('problem-id') + + toggleAnnotations: () -> + hide = (@annotationsHidden = not @annotationsHidden) + @toggleAnnotationButtonText hide + @toggleSpans hide + @toggleTips hide + + toggleTips: (hide) -> + visible = @findVisibleTips() + @hideTips visible + + toggleAnnotationButtonText: (hide) -> + buttonText = (if hide then 'Show' else 'Hide')+' Annotations' + @$(@toggleAnnotationsSelector).text(buttonText) + + toggleInstructions: () -> + hide = (@instructionsHidden = not @instructionsHidden) + @toggleInstructionsButton hide + @toggleInstructionsText hide + + toggleInstructionsButton: (hide) -> + txt = (if hide then 'Expand' else 'Collapse')+' Instructions' + cls = (if hide then ['expanded', 'collapsed'] else ['collapsed','expanded']) + @$(@toggleInstructionsSelector).text(txt).removeClass(cls[0]).addClass(cls[1]) + + toggleInstructionsText: (hide) -> + slideMethod = (if hide then 'slideUp' else 'slideDown') + @$(@instructionsSelector)[slideMethod]() + + toggleSpans: (hide) -> + @$(@spanSelector).toggleClass 'hide', hide, 250 + + replyTo: (buttonEl) -> + offset = -20 + el = @getProblem buttonEl + if el.length > 0 + @scrollTo(el, @afterScrollToProblem, offset) + else + console.log('problem not found. event: ', e) if @_debug + + returnFrom: (buttonEl) -> + offset = -200 + el = @getSpanForProblemReturn buttonEl + if el.length > 0 + @scrollTo(el, @afterScrollToSpan, offset) + else + console.log('span not found. event:', e) if @_debug + + scrollTo: (el, after, offset = -20) -> + $('html,body').scrollTo(el, { + duration: 500 + onAfter: @_once => after?.call this, el + offset: offset + }) if $(el).length > 0 + + afterScrollToProblem: (problem_el) -> + problem_el.effect 'highlight', {}, 500 + + afterScrollToSpan: (span_el) -> + span_el.addClass 'selected', 400, 'swing', -> + span_el.removeClass 'selected', 400, 'swing' + + makeTipContent: (el) -> + (api) => + text = $(el).data('comment-body') + comment = @createComment(text) + problem_id = @getProblemId(el) + reply = @createReplyLink(problem_id) + $(comment).add(reply) + + makeTipTitle: (el) -> + (api) => + title = $(el).data('comment-title') + (if title then title else 'Commentary') + + createComment: (text) -> + $("
              #{text}
              ") + + createReplyLink: (problem_id) -> + $("Reply to Annotation") + + findVisibleTips: () -> + visible = [] + @$(@spanSelector).each (index, el) -> + api = $(el).qtip('api') + tip = $(api?.elements.tooltip) + if tip.is(':visible') + visible.push el + visible + + hideTips: (elements) -> + $(elements).qtip('hide') + + _once: (fn) -> + done = false + return => + fn.call this unless done + done = true diff --git a/common/lib/xmodule/xmodule/js/src/capa/display.coffee b/common/lib/xmodule/xmodule/js/src/capa/display.coffee index 57ff85298c..158c2b98d0 100644 --- a/common/lib/xmodule/xmodule/js/src/capa/display.coffee +++ b/common/lib/xmodule/xmodule/js/src/capa/display.coffee @@ -76,6 +76,24 @@ class @Problem # TODO: Some logic to dynamically adjust polling rate based on queuelen window.queuePollerID = window.setTimeout(@poll, 1000) + + # Use this if you want to make an ajax call on the input type object + # static method so you don't have to instantiate a Problem in order to use it + # Input: + # url: the AJAX url of the problem + # input_id: the input_id of the input you would like to make the call on + # NOTE: the id is the ${id} part of "input_${id}" during rendering + # If this function is passed the entire prefixed id, the backend may have trouble + # finding the correct input + # dispatch: string that indicates how this data should be handled by the inputtype + # callback: the function that will be called once the AJAX call has been completed. + # It will be passed a response object + @inputAjax: (url, input_id, dispatch, data, callback) -> + data['dispatch'] = dispatch + data['input_id'] = input_id + $.postWithPrefix "#{url}/input_ajax", data, callback + + render: (content) -> if content @el.html(content) @@ -262,9 +280,8 @@ class @Problem save: => Logger.log 'problem_save', @answers $.postWithPrefix "#{@url}/problem_save", @answers, (response) => - if response.success - saveMessage = "Your answers have been saved but not graded. Hit 'Check' to grade them." - @gentle_alert saveMessage + saveMessage = response.msg + @gentle_alert saveMessage @updateProgress response refreshMath: (event, element) => diff --git a/common/lib/xmodule/xmodule/js/src/combinedopenended/display.coffee b/common/lib/xmodule/xmodule/js/src/combinedopenended/display.coffee index fd0391450b..c749d65b45 100644 --- a/common/lib/xmodule/xmodule/js/src/combinedopenended/display.coffee +++ b/common/lib/xmodule/xmodule/js/src/combinedopenended/display.coffee @@ -1,6 +1,61 @@ class @Rubric constructor: () -> + @initialize: (location) -> + $('.rubric').data("location", location) + $('input[class="score-selection"]').change @tracking_callback + # set up the hotkeys + $(window).unbind('keydown', @keypress_callback) + $(window).keydown @keypress_callback + # display the 'current' carat + @categories = $('.rubric-category') + @category = $(@categories.first()) + @category.prepend('> ') + @category_index = 0 + + + @keypress_callback: (event) => + # don't try to do this when user is typing in a text input + if $(event.target).is('input, textarea') + return + # for when we select via top row + if event.which >= 48 and event.which <= 57 + selected = event.which - 48 + # for when we select via numpad + else if event.which >= 96 and event.which <= 105 + selected = event.which - 96 + # we don't want to do anything since we haven't pressed a number + else + return + + # if we actually have a current category (not past the end) + if(@category_index <= @categories.length) + # find the valid selections for this category + inputs = $("input[name='score-selection-#{@category_index}']") + max_score = inputs.length - 1 + + if selected > max_score or selected < 0 + return + inputs.filter("input[value=#{selected}]").click() + + # move to the next category + old_category_text = @category.html().substring(5) + @category.html(old_category_text) + @category_index++ + @category = $(@categories[@category_index]) + @category.prepend('> ') + + @tracking_callback: (event) -> + target_selection = $(event.target).val() + # chop off the beginning of the name so that we can get the number of the category + category = $(event.target).data("category") + location = $('.rubric').data('location') + # probably want the original problem location as well + + data = {location: location, selection: target_selection, category: category} + Logger.log 'rubric_select', data + + # finds the scores for each rubric category @get_score_list: () => # find the number of categories: @@ -34,6 +89,7 @@ class @CombinedOpenEnded constructor: (element) -> @element=element @reinitialize(element) + $(window).keydown @keydown_handler reinitialize: (element) -> @wrapper=$(element).find('section.xmodule_CombinedOpenEndedModule') @@ -45,6 +101,9 @@ class @CombinedOpenEnded @task_count = @el.data('task-count') @task_number = @el.data('task-number') @accept_file_upload = @el.data('accept-file-upload') + @location = @el.data('location') + # set up handlers for click tracking + Rubric.initialize(@location) @allow_reset = @el.data('allow_reset') @reset_button = @$('.reset-button') @@ -89,6 +148,8 @@ class @CombinedOpenEnded @can_upload_files = false @open_ended_child= @$('.open-ended-child') + @out_of_sync_message = 'The problem state got out of sync. Try reloading the page.' + if @task_number>1 @prompt_hide() else if @task_number==1 and @child_state!='initial' @@ -116,6 +177,9 @@ class @CombinedOpenEnded @submit_evaluation_button = $('.submit-evaluation-button') @submit_evaluation_button.click @message_post Collapsible.setCollapsibles(@results_container) + # make sure we still have click tracking + $('.evaluation-response a').click @log_feedback_click + $('input[name="evaluation-score"]').change @log_feedback_selection show_results: (event) => status_item = $(event.target).parent() @@ -153,7 +217,6 @@ class @CombinedOpenEnded @legend_container= $('.legend-container') message_post: (event)=> - Logger.log 'message_post', @answers external_grader_message=$(event.target).parent().parent().parent() evaluation_scoring = $(event.target).parent() @@ -182,6 +245,7 @@ class @CombinedOpenEnded $('section.evaluation').slideToggle() @message_wrapper.html(response.message_html) + $.ajaxWithPrefix("#{@ajax_url}/save_post_assessment", settings) @@ -283,6 +347,7 @@ class @CombinedOpenEnded if response.success @rubric_wrapper.html(response.rubric_html) @rubric_wrapper.show() + Rubric.initialize(@location) @answer_area.html(response.student_response) @child_state = 'assessing' @find_assessment_elements() @@ -293,7 +358,12 @@ class @CombinedOpenEnded $.ajaxWithPrefix("#{@ajax_url}/save_answer",settings) else - @errors_area.html('Problem state got out of sync. Try reloading the page.') + @errors_area.html(@out_of_sync_message) + + keydown_handler: (e) => + # only do anything when the key pressed is the 'enter' key + if e.which == 13 && @child_state == 'assessing' && Rubric.check_complete() + @save_assessment(e) save_assessment: (event) => event.preventDefault() @@ -315,7 +385,7 @@ class @CombinedOpenEnded else @errors_area.html(response.error) else - @errors_area.html('Problem state got out of sync. Try reloading the page.') + @errors_area.html(@out_of_sync_message) save_hint: (event) => event.preventDefault() @@ -330,7 +400,7 @@ class @CombinedOpenEnded else @errors_area.html(response.error) else - @errors_area.html('Problem state got out of sync. Try reloading the page.') + @errors_area.html(@out_of_sync_message) skip_post_assessment: => if @child_state == 'post_assessment' @@ -342,7 +412,7 @@ class @CombinedOpenEnded else @errors_area.html(response.error) else - @errors_area.html('Problem state got out of sync. Try reloading the page.') + @errors_area.html(@out_of_sync_message) reset: (event) => event.preventDefault() @@ -362,7 +432,7 @@ class @CombinedOpenEnded else @errors_area.html(response.error) else - @errors_area.html('Problem state got out of sync. Try reloading the page.') + @errors_area.html(@out_of_sync_message) next_problem: => if @child_state == 'done' @@ -385,7 +455,7 @@ class @CombinedOpenEnded else @errors_area.html(response.error) else - @errors_area.html('Problem state got out of sync. Try reloading the page.') + @errors_area.html(@out_of_sync_message) gentle_alert: (msg) => if @el.find('.open-ended-alert').length @@ -404,7 +474,7 @@ class @CombinedOpenEnded $.postWithPrefix "#{@ajax_url}/check_for_score", (response) => if response.state == "done" or response.state=="post_assessment" delete window.queuePollerID - location.reload() + @reload() else window.queuePollerID = window.setTimeout(@poll, 10000) @@ -438,7 +508,9 @@ class @CombinedOpenEnded @prompt_container.toggleClass('open') if @question_header.text() == "(Hide)" new_text = "(Show)" + Logger.log 'oe_hide_question', {location: @location} else + Logger.log 'oe_show_question', {location: @location} new_text = "(Hide)" @question_header.text(new_text) @@ -454,4 +526,16 @@ class @CombinedOpenEnded @prompt_container.toggleClass('open') @question_header.text("(Show)") + log_feedback_click: (event) -> + link_text = $(event.target).html() + if link_text == 'See full feedback' + Logger.log 'oe_show_full_feedback', {} + else if link_text == 'Respond to Feedback' + Logger.log 'oe_show_respond_to_feedback', {} + else + generated_event_type = link_text.toLowerCase().replace(" ","_") + Logger.log "oe_" + generated_event_type, {} + log_feedback_selection: (event) -> + target_selection = $(event.target).val() + Logger.log 'oe_feedback_response_selected', {value: target_selection} diff --git a/common/lib/xmodule/xmodule/js/src/conditional/display.coffee b/common/lib/xmodule/xmodule/js/src/conditional/display.coffee index 33dcb29079..857424c1dc 100644 --- a/common/lib/xmodule/xmodule/js/src/conditional/display.coffee +++ b/common/lib/xmodule/xmodule/js/src/conditional/display.coffee @@ -1,26 +1,35 @@ class @Conditional - constructor: (element) -> + constructor: (element, callerElId) -> @el = $(element).find('.conditional-wrapper') - @id = @el.data('problem-id') - @element_id = @el.attr('id') + + @callerElId = callerElId + + if callerElId isnt undefined + dependencies = @el.data('depends') + if (typeof dependencies is 'string') and (dependencies.length > 0) and (dependencies.indexOf(callerElId) is -1) + return + @url = @el.data('url') - @render() + @render(element) - $: (selector) -> - $(selector, @el) - - updateProgress: (response) => - if response.progress_changed - @el.attr progress: response.progress_status - @el.trigger('progressChanged') - - render: (content) -> - if content - @el.html(content) - XModule.loadModules(@el) - else + render: (element) -> $.postWithPrefix "#{@url}/conditional_get", (response) => - @el.html(response.html) - XModule.loadModules(@el) + @el.html '' + @el.append(i) for i in response.html + parentEl = $(element).parent() + parentId = parentEl.attr 'id' + + if response.message is false + if parentId.indexOf('vert') is 0 + parentEl.hide() + else + $(element).hide() + else + if parentId.indexOf('vert') is 0 + parentEl.show() + else + $(element).show() + + XModule.loadModules @el diff --git a/common/lib/xmodule/xmodule/js/src/peergrading/peer_grading_problem.coffee b/common/lib/xmodule/xmodule/js/src/peergrading/peer_grading_problem.coffee index 5770238649..4bdb4bdf05 100644 --- a/common/lib/xmodule/xmodule/js/src/peergrading/peer_grading_problem.coffee +++ b/common/lib/xmodule/xmodule/js/src/peergrading/peer_grading_problem.coffee @@ -175,6 +175,7 @@ class @PeerGradingProblem @prompt_container = $('.prompt-container') @rubric_container = $('.rubric-container') @flag_student_container = $('.flag-student-container') + @answer_unknown_container = $('.answer-unknown-container') @calibration_panel = $('.calibration-panel') @grading_panel = $('.grading-panel') @content_panel = $('.content-panel') @@ -208,6 +209,10 @@ class @PeerGradingProblem @interstitial_page_button = $('.interstitial-page-button') @calibration_interstitial_page_button = $('.calibration-interstitial-page-button') @flag_student_checkbox = $('.flag-checkbox') + @answer_unknown_checkbox = $('.answer-unknown-checkbox') + + $(window).keydown @keydown_handler + @collapse_question() Collapsible.setCollapsibles(@content_panel) @@ -249,9 +254,6 @@ class @PeerGradingProblem fetch_submission_essay: () => @backend.post('get_next_submission', {location: @location}, @render_submission) - gentle_alert: (msg) => - @grading_message.fadeIn() - @grading_message.html("

              " + msg + "

              ") construct_data: () -> data = @@ -262,6 +264,7 @@ class @PeerGradingProblem submission_key: @submission_key_input.val() feedback: @feedback_area.val() submission_flagged: @flag_student_checkbox.is(':checked') + answer_unknown: @answer_unknown_checkbox.is(':checked') return data @@ -334,6 +337,14 @@ class @PeerGradingProblem @show_submit_button() @grade = Rubric.get_total_score() + keydown_handler: (event) => + if event.which == 13 && @submit_button.is(':visible') + if @calibration + @submit_calibration_essay() + else + @submit_grade() + + ########## @@ -360,6 +371,8 @@ class @PeerGradingProblem @calibration_panel.find('.grading-text').hide() @grading_panel.find('.grading-text').hide() @flag_student_container.hide() + @answer_unknown_container.hide() + @feedback_area.val("") @submit_button.unbind('click') @@ -388,6 +401,7 @@ class @PeerGradingProblem @calibration_panel.find('.grading-text').show() @grading_panel.find('.grading-text').show() @flag_student_container.show() + @answer_unknown_container.show() @feedback_area.val("") @submit_button.unbind('click') @@ -420,6 +434,7 @@ class @PeerGradingProblem @submit_button.hide() @action_button.hide() @calibration_feedback_panel.hide() + Rubric.initialize(@location) render_calibration_feedback: (response) => @@ -466,11 +481,17 @@ class @PeerGradingProblem # And now hook up an event handler again $("input[class='score-selection']").change @graded_callback + gentle_alert: (msg) => + @grading_message.fadeIn() + @grading_message.html("

              " + msg + "

              ") + collapse_question: () => @prompt_container.slideToggle() @prompt_container.toggleClass('open') if @question_header.text() == "(Hide)" + Logger.log 'peer_grading_hide_question', {location: @location} new_text = "(Show)" else + Logger.log 'peer_grading_show_question', {location: @location} new_text = "(Hide)" @question_header.text(new_text) diff --git a/common/lib/xmodule/xmodule/js/src/poll/logme.js b/common/lib/xmodule/xmodule/js/src/poll/logme.js new file mode 100644 index 0000000000..c045757044 --- /dev/null +++ b/common/lib/xmodule/xmodule/js/src/poll/logme.js @@ -0,0 +1,54 @@ +// Wrapper for RequireJS. It will make the standard requirejs(), require(), and +// define() functions from Require JS available inside the anonymous function. +(function (requirejs, require, define) { + +define('logme', [], function () { + var debugMode; + + // debugMode can be one of the following: + // + // true - All messages passed to logme will be written to the internal + // browser console. + // false - Suppress all output to the internal browser console. + // + // Obviously, if anywhere there is a direct console.log() call, we can't do + // anything about it. That's why use logme() - it will allow to turn off + // the output of debug information with a single change to a variable. + debugMode = true; + + return logme; + + /* + * function: logme + * + * A helper function that provides logging facilities. We don't want + * to call console.log() directly, because sometimes it is not supported + * by the browser. Also when everything is routed through this function. + * the logging output can be easily turned off. + * + * logme() supports multiple parameters. Each parameter will be passed to + * console.log() function separately. + * + */ + function logme() { + var i; + + if ( + (typeof debugMode === 'undefined') || + (debugMode !== true) || + (typeof window.console === 'undefined') + ) { + return; + } + + for (i = 0; i < arguments.length; i++) { + window.console.log(arguments[i]); + } + } // End-of: function logme +}); + +// End of wrapper for RequireJS. As you can see, we are passing +// namespaced Require JS variables to an anonymous function. Within +// it, you can use the standard requirejs(), require(), and define() +// functions as if they were in the global namespace. +}(RequireJS.requirejs, RequireJS.require, RequireJS.define)); // End-of: (function (requirejs, require, define) diff --git a/common/lib/xmodule/xmodule/js/src/poll/poll.js b/common/lib/xmodule/xmodule/js/src/poll/poll.js new file mode 100644 index 0000000000..a2ccbc7c03 --- /dev/null +++ b/common/lib/xmodule/xmodule/js/src/poll/poll.js @@ -0,0 +1,5 @@ +window.Poll = function (el) { + RequireJS.require(['PollMain'], function (PollMain) { + new PollMain(el); + }); +}; diff --git a/common/lib/xmodule/xmodule/js/src/poll/poll_main.js b/common/lib/xmodule/xmodule/js/src/poll/poll_main.js new file mode 100644 index 0000000000..74f2a488d7 --- /dev/null +++ b/common/lib/xmodule/xmodule/js/src/poll/poll_main.js @@ -0,0 +1,323 @@ +(function (requirejs, require, define) { +define('PollMain', ['logme'], function (logme) { + +PollMain.prototype = { + +'showAnswerGraph': function (poll_answers, total) { + var _this, totalValue; + + totalValue = parseFloat(total); + if (isFinite(totalValue) === false) { + return; + } + + _this = this; + + $.each(poll_answers, function (index, value) { + var numValue, percentValue; + + numValue = parseFloat(value); + if (isFinite(numValue) === false) { + return; + } + + percentValue = (numValue / totalValue) * 100.0; + + _this.answersObj[index].statsEl.show(); + _this.answersObj[index].numberEl.html('' + value + ' (' + percentValue.toFixed(1) + '%)'); + _this.answersObj[index].percentEl.css({ + 'width': '' + percentValue.toFixed(1) + '%' + }); + }); +}, + +'submitAnswer': function (answer, answerObj) { + var _this; + + // Make sure that the user can answer a question only once. + if (this.questionAnswered === true) { + return; + } + this.questionAnswered = true; + + _this = this; + + console.log('submit answer'); + + answerObj.buttonEl.addClass('answered'); + + // Send the data to the server as an AJAX request. Attach a callback that will + // be fired on server's response. + $.postWithPrefix( + _this.ajax_url + '/' + answer, {}, + function (response) { + console.log('success! response = '); + console.log(response); + + _this.showAnswerGraph(response.poll_answers, response.total); + + if (_this.canReset === true) { + _this.resetButton.show(); + } + + // Initialize Conditional constructors. + if (_this.wrapperSectionEl !== null) { + $(_this.wrapperSectionEl).find('.xmodule_ConditionalModule').each(function (index, value) { + new window.Conditional(value, _this.id.replace(/^poll_/, '')); + }); + } + } + ); + +}, // End-of: 'submitAnswer': function (answer, answerEl) { + + +'submitReset': function () { + var _this; + + _this = this; + + console.log('submit reset'); + + // Send the data to the server as an AJAX request. Attach a callback that will + // be fired on server's response. + $.postWithPrefix( + this.ajax_url + '/' + 'reset_poll', + {}, + function (response) { + console.log('success! response = '); + console.log(response); + + if ( + (response.hasOwnProperty('status') !== true) || + (typeof response.status !== 'string') || + (response.status.toLowerCase() !== 'success')) { + return; + } + + _this.questionAnswered = false; + _this.questionEl.find('.button.answered').removeClass('answered'); + _this.questionEl.find('.stats').hide(); + _this.resetButton.hide(); + + // Initialize Conditional constructors. We will specify the third parameter as 'true' + // notifying the constructor that this is a reset operation. + if (_this.wrapperSectionEl !== null) { + $(_this.wrapperSectionEl).find('.xmodule_ConditionalModule').each(function (index, value) { + new window.Conditional(value, _this.id.replace(/^poll_/, '')); + }); + } + } + ); +}, // End-of: 'submitAnswer': function (answer, answerEl) { + +'postInit': function () { + var _this; + + // Access this object inside inner functions. + _this = this; + + if ( + (this.jsonConfig.poll_answer.length > 0) && + (this.jsonConfig.answers.hasOwnProperty(this.jsonConfig.poll_answer) === false) + ) { + this.questionEl.append( + '

              Error!

              ' + + '

              XML data format changed. List of answers was modified, but poll data was not updated.

              ' + ); + + return; + } + + // Get the DOM id of the question. + this.id = this.questionEl.attr('id'); + + // Get the URL to which we will post the users answer to the question. + this.ajax_url = this.questionEl.data('ajax-url'); + + this.questionHtmlMarkup = $('
              ').html(this.jsonConfig.question).text(); + this.questionEl.append(this.questionHtmlMarkup); + + // When the user selects and answer, we will set this flag to true. + this.questionAnswered = false; + + this.answersObj = {}; + this.shortVersion = true; + + $.each(this.jsonConfig.answers, function (index, value) { + if (value.length >= 18) { + _this.shortVersion = false; + } + }); + + $.each(this.jsonConfig.answers, function (index, value) { + var answer; + + answer = {}; + + _this.answersObj[index] = answer; + + answer.el = $('
              '); + + answer.questionEl = $('
              '); + answer.buttonEl = $('
              '); + answer.textEl = $('
              '); + answer.questionEl.append(answer.buttonEl); + answer.questionEl.append(answer.textEl); + + answer.el.append(answer.questionEl); + + answer.statsEl = $('
              '); + answer.barEl = $('
              '); + answer.percentEl = $('
              '); + answer.barEl.append(answer.percentEl); + answer.numberEl = $('
              '); + answer.statsEl.append(answer.barEl); + answer.statsEl.append(answer.numberEl); + + answer.statsEl.hide(); + + answer.el.append(answer.statsEl); + + answer.textEl.html(value); + + if (_this.shortVersion === true) { + $.each(answer, function (index, value) { + if (value instanceof jQuery) { + value.addClass('short'); + } + }); + } + + answer.el.appendTo(_this.questionEl); + + answer.textEl.on('click', function () { + _this.submitAnswer(index, answer); + }); + + answer.buttonEl.on('click', function () { + _this.submitAnswer(index, answer); + }); + + if (index === _this.jsonConfig.poll_answer) { + answer.buttonEl.addClass('answered'); + _this.questionAnswered = true; + } + }); + + console.log(this.jsonConfig.reset); + + if ((typeof this.jsonConfig.reset === 'string') && (this.jsonConfig.reset.toLowerCase() === 'true')) { + this.canReset = true; + + this.resetButton = $('
              Change your vote
              '); + + if (this.questionAnswered === false) { + this.resetButton.hide(); + } + + this.resetButton.appendTo(this.questionEl); + + this.resetButton.on('click', function () { + _this.submitReset(); + }); + } else { + this.canReset = false; + } + + // If it turns out that the user already answered the question, show the answers graph. + if (this.questionAnswered === true) { + this.showAnswerGraph(this.jsonConfig.poll_answers, this.jsonConfig.total); + } +} // End-of: 'postInit': function () { +}; // End-of: PollMain.prototype = { + +return PollMain; + +function PollMain(el) { + var _this; + + this.questionEl = $(el).find('.poll_question'); + if (this.questionEl.length !== 1) { + // We require one question DOM element. + logme('ERROR: PollMain constructor requires one question DOM element.'); + + return; + } + + // Just a safety precussion. If we run this code more than once, multiple 'click' callback handlers will be + // attached to the same DOM elements. We don't want this to happen. + if (this.questionEl.attr('poll_main_processed') === 'true') { + logme( + 'ERROR: PolMain JS constructor was called on a DOM element that has already been processed once.' + ); + + return; + } + + // This element was not processed earlier. + // Make sure that next time we will not process this element a second time. + this.questionEl.attr('poll_main_processed', 'true'); + + // Access this object inside inner functions. + _this = this; + + // DOM element which contains the current poll along with any conditionals. By default we assume that such + // element is not present. We will try to find it. + this.wrapperSectionEl = null; + + (function (tempEl, c1) { + while (tempEl.tagName.toLowerCase() !== 'body') { + tempEl = $(tempEl).parent()[0]; + c1 += 1; + + if ( + (tempEl.tagName.toLowerCase() === 'section') && + ($(tempEl).hasClass('xmodule_WrapperModule') === true) + ) { + _this.wrapperSectionEl = tempEl; + + break; + } else if (c1 > 50) { + // In case something breaks, and we enter an endless loop, a sane + // limit for loop iterations. + + break; + } + } + }($(el)[0], 0)); + + try { + this.jsonConfig = JSON.parse(this.questionEl.children('.poll_question_div').html()); + + $.postWithPrefix( + '' + this.questionEl.data('ajax-url') + '/' + 'get_state', {}, + function (response) { + _this.jsonConfig.poll_answer = response.poll_answer; + _this.jsonConfig.total = response.total; + + $.each(response.poll_answers, function (index, value) { + _this.jsonConfig.poll_answers[index] = value; + }); + + _this.questionEl.children('.poll_question_div').html(JSON.stringify(_this.jsonConfig)); + + _this.postInit(); + } + ); + + return; + } catch (err) { + logme( + 'ERROR: Invalid JSON config for poll ID "' + this.id + '".', + 'Error messsage: "' + err.message + '".' + ); + + return; + } +} // End-of: function PollMain(el) { + +}); // End-of: define('PollMain', ['logme'], function (logme) { + +// End-of: (function (requirejs, require, define) { +}(RequireJS.requirejs, RequireJS.require, RequireJS.define)); diff --git a/common/lib/xmodule/xmodule/js/src/problem/edit.coffee b/common/lib/xmodule/xmodule/js/src/problem/edit.coffee index 2bfe483a7f..b723f230e9 100644 --- a/common/lib/xmodule/xmodule/js/src/problem/edit.coffee +++ b/common/lib/xmodule/xmodule/js/src/problem/edit.coffee @@ -231,13 +231,14 @@ class @MarkdownEditingDescriptor extends XModule.Descriptor // replace string and numerical xml = xml.replace(/^\=\s*(.*?$)/gm, function(match, p) { var string; - var params = /(.*?)\+\-\s*(.*?$)/.exec(p); - if(parseFloat(p)) { + var floatValue = parseFloat(p); + if(!isNaN(floatValue)) { + var params = /(.*?)\+\-\s*(.*?$)/.exec(p); if(params) { - string = '\n'; + string = '\n'; string += ' \n'; } else { - string = '\n'; + string = '\n'; } string += ' \n'; string += '\n\n'; diff --git a/common/lib/xmodule/xmodule/js/src/sequence/display.coffee b/common/lib/xmodule/xmodule/js/src/sequence/display.coffee index 793e7f4f3c..0e4c9788ba 100644 --- a/common/lib/xmodule/xmodule/js/src/sequence/display.coffee +++ b/common/lib/xmodule/xmodule/js/src/sequence/display.coffee @@ -56,7 +56,7 @@ class @Sequence element.removeClass('progress-none') .removeClass('progress-some') .removeClass('progress-done') - + switch progress when 'none' then element.addClass('progress-none') when 'in_progress' then element.addClass('progress-some') @@ -65,6 +65,11 @@ class @Sequence toggleArrows: => @$('.sequence-nav-buttons a').unbind('click') + if @contents.length == 0 + @$('.sequence-nav-buttons .prev a').addClass('disabled') + @$('.sequence-nav-buttons .next a').addClass('disabled') + return + if @position == 1 @$('.sequence-nav-buttons .prev a').addClass('disabled') else @@ -105,8 +110,8 @@ class @Sequence if (1 <= new_position) and (new_position <= @num_contents) Logger.log "seq_goto", old: @position, new: new_position, id: @id - - # On Sequence chage, destroy any existing polling thread + + # On Sequence chage, destroy any existing polling thread # for queued submissions, see ../capa/display.coffee if window.queuePollerID window.clearTimeout(window.queuePollerID) diff --git a/common/lib/xmodule/xmodule/js/src/video/display.coffee b/common/lib/xmodule/xmodule/js/src/video/display.coffee index 1876330340..aadafbc8d0 100644 --- a/common/lib/xmodule/xmodule/js/src/video/display.coffee +++ b/common/lib/xmodule/xmodule/js/src/video/display.coffee @@ -4,7 +4,6 @@ class @Video @id = @el.attr('id').replace(/video_/, '') @start = @el.data('start') @end = @el.data('end') - @caption_data_dir = @el.data('caption-data-dir') @caption_asset_path = @el.data('caption-asset-path') @show_captions = @el.data('show-captions') == "true" window.player = null diff --git a/common/lib/xmodule/xmodule/js/src/wrapper/edit.coffee b/common/lib/xmodule/xmodule/js/src/wrapper/edit.coffee new file mode 100644 index 0000000000..a13c5a8bc7 --- /dev/null +++ b/common/lib/xmodule/xmodule/js/src/wrapper/edit.coffee @@ -0,0 +1,10 @@ +class @WrapperDescriptor extends XModule.Descriptor + constructor: (@element) -> + console.log 'WrapperDescriptor' + @$items = $(@element).find(".vert-mod") + @$items.sortable( + update: (event, ui) => @update() + ) + + save: -> + children: $('.vert-mod li', @element).map((idx, el) -> $(el).data('id')).toArray() diff --git a/common/lib/xmodule/xmodule/mako_module.py b/common/lib/xmodule/xmodule/mako_module.py index dab5d5e85b..84db6ad779 100644 --- a/common/lib/xmodule/xmodule/mako_module.py +++ b/common/lib/xmodule/xmodule/mako_module.py @@ -1,5 +1,5 @@ -from x_module import XModuleDescriptor, DescriptorSystem -import logging +from .x_module import XModuleDescriptor, DescriptorSystem +from .modulestore.inheritance import own_metadata class MakoDescriptorSystem(DescriptorSystem): @@ -21,21 +21,21 @@ class MakoModuleDescriptor(XModuleDescriptor): the descriptor as the `module` parameter to that template """ - def __init__(self, system, definition=None, **kwargs): + def __init__(self, system, location, model_data): if getattr(system, 'render_template', None) is None: raise TypeError('{system} must have a render_template function' ' in order to use a MakoDescriptor'.format( system=system)) - super(MakoModuleDescriptor, self).__init__(system, definition, **kwargs) + super(MakoModuleDescriptor, self).__init__(system, location, model_data) def get_context(self): """ Return the context to render the mako template with """ - return {'module': self, - 'metadata': self.metadata, - 'editable_metadata_fields': self.editable_metadata_fields - } + return { + 'module': self, + 'editable_metadata_fields': self.editable_metadata_fields, + } def get_html(self): return self.system.render_template( @@ -44,5 +44,10 @@ class MakoModuleDescriptor(XModuleDescriptor): # cdodge: encapsulate a means to expose "editable" metadata fields (i.e. not internal system metadata) @property def editable_metadata_fields(self): - subset = [name for name in self.metadata.keys() if name not in self.system_metadata_fields] - return subset + fields = {} + for field, value in own_metadata(self).items(): + if field in self.system_metadata_fields: + continue + + fields[field] = value + return fields diff --git a/common/lib/xmodule/xmodule/modulestore/__init__.py b/common/lib/xmodule/xmodule/modulestore/__init__.py index a9df6c3504..022e016a58 100644 --- a/common/lib/xmodule/xmodule/modulestore/__init__.py +++ b/common/lib/xmodule/xmodule/modulestore/__init__.py @@ -23,6 +23,15 @@ URL_RE = re.compile(""" (@(?P[^/]+))? """, re.VERBOSE) +MISSING_SLASH_URL_RE = re.compile(""" + (?P[^:]+):/ + (?P[^/]+)/ + (?P[^/]+)/ + (?P[^/]+)/ + (?P[^@]+) + (@(?P[^/]+))? + """, re.VERBOSE) + # TODO (cpennington): We should decide whether we want to expand the # list of valid characters in a location INVALID_CHARS = re.compile(r"[^\w.-]") @@ -62,6 +71,17 @@ class Location(_LocationBase): """ return Location._clean(value, INVALID_CHARS) + + @staticmethod + def clean_keeping_underscores(value): + """ + Return value, replacing INVALID_CHARS, but not collapsing multiple '_' chars. + This for cleaning asset names, as the YouTube ID's may have underscores in them, and we need the + transcript asset name to match. In the future we may want to change the behavior of _clean. + """ + return INVALID_CHARS.sub('_', value) + + @staticmethod def clean_for_url_name(value): """ @@ -164,12 +184,16 @@ class Location(_LocationBase): if isinstance(location, basestring): match = URL_RE.match(location) if match is None: - log.debug('location is instance of %s but no URL match' % basestring) - raise InvalidLocationError(location) - else: - groups = match.groupdict() - check_dict(groups) - return _LocationBase.__new__(_cls, **groups) + # cdodge: + # check for a dropped slash near the i4x:// element of the location string. This can happen with some + # redirects (e.g. edx.org -> www.edx.org which I think happens in Nginx) + match = MISSING_SLASH_URL_RE.match(location) + if match is None: + log.debug('location is instance of %s but no URL match' % basestring) + raise InvalidLocationError(location) + groups = match.groupdict() + check_dict(groups) + return _LocationBase.__new__(_cls, **groups) elif isinstance(location, (list, tuple)): if len(location) not in (5, 6): log.debug('location has wrong length') @@ -399,6 +423,7 @@ class ModuleStoreBase(ModuleStore): Set up the error-tracking logic. ''' self._location_errors = {} # location -> ErrorLog + self.metadata_inheritance_cache = None def _get_errorlog(self, location): """ diff --git a/common/lib/xmodule/xmodule/modulestore/django.py b/common/lib/xmodule/xmodule/modulestore/django.py index 0b86c2fea4..b0a65273c7 100644 --- a/common/lib/xmodule/xmodule/modulestore/django.py +++ b/common/lib/xmodule/xmodule/modulestore/django.py @@ -33,11 +33,12 @@ def modulestore(name='default'): class_ = load_function(settings.MODULESTORE[name]['ENGINE']) options = {} + options.update(settings.MODULESTORE[name]['OPTIONS']) for key in FUNCTION_KEYS: if key in options: options[key] = load_function(options[key]) - + _MODULESTORES[name] = class_( **options ) diff --git a/common/lib/xmodule/xmodule/modulestore/draft.py b/common/lib/xmodule/xmodule/modulestore/draft.py index 81f4da2780..71922c08df 100644 --- a/common/lib/xmodule/xmodule/modulestore/draft.py +++ b/common/lib/xmodule/xmodule/modulestore/draft.py @@ -15,11 +15,11 @@ def as_draft(location): def wrap_draft(item): """ - Sets `item.metadata['is_draft']` to `True` if the item is a - draft, and false otherwise. Sets the item's location to the + Sets `item.cms.is_draft` to `True` if the item is a + draft, and `False` otherwise. Sets the item's location to the non-draft location in either case """ - item.metadata['is_draft'] = item.location.revision == DRAFT + item.cms.is_draft = item.location.revision == DRAFT item.location = item.location._replace(revision=None) return item @@ -118,7 +118,7 @@ class DraftModuleStore(ModuleStoreBase): """ draft_loc = as_draft(location) draft_item = self.get_item(location) - if not draft_item.metadata['is_draft']: + if not draft_item.cms.is_draft: self.clone_item(location, draft_loc) return super(DraftModuleStore, self).update_item(draft_loc, data) @@ -133,7 +133,7 @@ class DraftModuleStore(ModuleStoreBase): """ draft_loc = as_draft(location) draft_item = self.get_item(location) - if not draft_item.metadata['is_draft']: + if not draft_item.cms.is_draft: self.clone_item(location, draft_loc) return super(DraftModuleStore, self).update_children(draft_loc, children) @@ -149,7 +149,7 @@ class DraftModuleStore(ModuleStoreBase): draft_loc = as_draft(location) draft_item = self.get_item(location) - if not draft_item.metadata['is_draft']: + if not draft_item.cms.is_draft: self.clone_item(location, draft_loc) if 'is_draft' in metadata: @@ -179,13 +179,11 @@ class DraftModuleStore(ModuleStoreBase): Save a current draft to the underlying modulestore """ draft = self.get_item(location) - metadata = {} - metadata.update(draft.metadata) - metadata['published_date'] = tuple(datetime.utcnow().timetuple()) - metadata['published_by'] = published_by_id - super(DraftModuleStore, self).update_item(location, draft.definition.get('data', {})) - super(DraftModuleStore, self).update_children(location, draft.definition.get('children', [])) - super(DraftModuleStore, self).update_metadata(location, metadata) + draft.cms.published_date = datetime.utcnow() + draft.cms.published_by = published_by_id + super(DraftModuleStore, self).update_item(location, draft._model_data._kvs._data) + super(DraftModuleStore, self).update_children(location, draft._model_data._kvs._children) + super(DraftModuleStore, self).update_metadata(location, draft._model_data._kvs._metadata) self.delete_item(location) def unpublish(self, location): diff --git a/common/lib/xmodule/xmodule/modulestore/inheritance.py b/common/lib/xmodule/xmodule/modulestore/inheritance.py new file mode 100644 index 0000000000..d819abe367 --- /dev/null +++ b/common/lib/xmodule/xmodule/modulestore/inheritance.py @@ -0,0 +1,67 @@ +from xblock.core import Scope + +# A list of metadata that this module can inherit from its parent module +INHERITABLE_METADATA = ( + 'graded', 'start', 'due', 'graceperiod', 'showanswer', 'rerandomize', + # TODO (ichuang): used for Fall 2012 xqa server access + 'xqa_key', + # How many days early to show a course element to beta testers (float) + # intended to be set per-course, but can be overridden in for specific + # elements. Can be a float. + 'days_early_for_beta' +) + +def compute_inherited_metadata(descriptor): + """Given a descriptor, traverse all of its descendants and do metadata + inheritance. Should be called on a CourseDescriptor after importing a + course. + + NOTE: This means that there is no such thing as lazy loading at the + moment--this accesses all the children.""" + for child in descriptor.get_children(): + inherit_metadata(child, descriptor._model_data) + compute_inherited_metadata(child) + + +def inherit_metadata(descriptor, model_data): + """ + Updates this module with metadata inherited from a containing module. + Only metadata specified in self.inheritable_metadata will + be inherited + """ + if not hasattr(descriptor, '_inherited_metadata'): + setattr(descriptor, '_inherited_metadata', {}) + + # Set all inheritable metadata from kwargs that are + # in self.inheritable_metadata and aren't already set in metadata + for attr in INHERITABLE_METADATA: + if attr not in descriptor._model_data and attr in model_data: + descriptor._inherited_metadata[attr] = model_data[attr] + descriptor._model_data[attr] = model_data[attr] + + +def own_metadata(module): + """ + Return a dictionary that contains only non-inherited field keys, + mapped to their values + """ + inherited_metadata = getattr(module, '_inherited_metadata', {}) + metadata = {} + for field in module.fields + module.lms.fields: + # Only save metadata that wasn't inherited + if field.scope != Scope.settings: + continue + + if field.name in inherited_metadata and module._model_data.get(field.name) == inherited_metadata.get(field.name): + continue + + if field.name not in module._model_data: + continue + + try: + metadata[field.name] = module._model_data[field.name] + except KeyError: + # Ignore any missing keys in _model_data + pass + + return metadata diff --git a/common/lib/xmodule/xmodule/modulestore/mongo.py b/common/lib/xmodule/xmodule/modulestore/mongo.py index f4db62ac31..aceebbf15f 100644 --- a/common/lib/xmodule/xmodule/modulestore/mongo.py +++ b/common/lib/xmodule/xmodule/modulestore/mongo.py @@ -1,35 +1,113 @@ import pymongo import sys import logging +import copy from bson.son import SON +from collections import namedtuple from fs.osfs import OSFS from itertools import repeat from path import path +from datetime import datetime, timedelta from importlib import import_module from xmodule.errortracker import null_error_tracker, exc_info_to_str -from xmodule.x_module import XModuleDescriptor from xmodule.mako_module import MakoDescriptorSystem +from xmodule.x_module import XModuleDescriptor from xmodule.error_module import ErrorDescriptor +from xblock.runtime import DbModel, KeyValueStore, InvalidScopeError +from xblock.core import Scope from . import ModuleStoreBase, Location from .draft import DraftModuleStore from .exceptions import (ItemNotFoundError, DuplicateItemError) +from .inheritance import own_metadata, INHERITABLE_METADATA, inherit_metadata + +log = logging.getLogger(__name__) # TODO (cpennington): This code currently operates under the assumption that # there is only one revision for each item. Once we start versioning inside the CMS, # that assumption will have to change +class MongoKeyValueStore(KeyValueStore): + """ + A KeyValueStore that maps keyed data access to one of the 3 data areas + known to the MongoModuleStore (data, children, and metadata) + """ + def __init__(self, data, children, metadata): + self._data = data + self._children = children + self._metadata = metadata + + def get(self, key): + if key.scope == Scope.children: + return self._children + elif key.scope == Scope.parent: + return None + elif key.scope == Scope.settings: + return self._metadata[key.field_name] + elif key.scope == Scope.content: + if key.field_name == 'data' and not isinstance(self._data, dict): + return self._data + else: + return self._data[key.field_name] + else: + raise InvalidScopeError(key.scope) + + def set(self, key, value): + if key.scope == Scope.children: + self._children = value + elif key.scope == Scope.settings: + self._metadata[key.field_name] = value + elif key.scope == Scope.content: + if key.field_name == 'data' and not isinstance(self._data, dict): + self._data = value + else: + self._data[key.field_name] = value + else: + raise InvalidScopeError(key.scope) + + def delete(self, key): + if key.scope == Scope.children: + self._children = [] + elif key.scope == Scope.settings: + if key.field_name in self._metadata: + del self._metadata[key.field_name] + elif key.scope == Scope.content: + if key.field_name == 'data' and not isinstance(self._data, dict): + self._data = None + else: + del self._data[key.field_name] + else: + raise InvalidScopeError(key.scope) + + def has(self, key): + if key.scope in (Scope.children, Scope.parent): + return True + elif key.scope == Scope.settings: + return key.field_name in self._metadata + elif key.scope == Scope.content: + if key.field_name == 'data' and not isinstance(self._data, dict): + return True + else: + return key.field_name in self._data + else: + return False + +MongoUsage = namedtuple('MongoUsage', 'id, def_id') + + class CachingDescriptorSystem(MakoDescriptorSystem): """ A system that has a cache of module json that it will use to load modules from, with a backup of calling to the underlying modulestore for more data + TODO (cdodge) when the 'split module store' work has been completed we can remove all + references to metadata_inheritance_tree """ def __init__(self, modulestore, module_data, default_class, resources_fs, - error_tracker, render_template): + error_tracker, render_template, metadata_inheritance_tree = None): """ modulestore: the module store that can be used to retrieve additional modules @@ -54,19 +132,45 @@ class CachingDescriptorSystem(MakoDescriptorSystem): # cdodge: other Systems have a course_id attribute defined. To keep things consistent, let's # define an attribute here as well, even though it's None self.course_id = None + self.metadata_inheritance_tree = metadata_inheritance_tree def load_item(self, location): location = Location(location) json_data = self.module_data.get(location) if json_data is None: - return self.modulestore.get_item(location) + module = self.modulestore.get_item(location) + if module is not None: + # update our own cache after going to the DB to get cache miss + self.module_data.update(module.system.module_data) + return module else: - # TODO (vshnayder): metadata inheritance is somewhat broken because mongo, doesn't - # always load an entire course. We're punting on this until after launch, and then - # will build a proper course policy framework. + # load the module and apply the inherited metadata try: - return XModuleDescriptor.load_from_json(json_data, self, self.default_class) + class_ = XModuleDescriptor.load_class( + json_data['location']['category'], + self.default_class + ) + definition = json_data.get('definition', {}) + metadata = json_data.get('metadata', {}) + for old_name, new_name in class_.metadata_translations.items(): + if old_name in metadata: + metadata[new_name] = metadata[old_name] + del metadata[old_name] + + kvs = MongoKeyValueStore( + definition.get('data', {}), + definition.get('children', []), + metadata, + ) + + model_data = DbModel(kvs, class_, None, MongoUsage(self.course_id, location)) + module = class_(self, location, model_data) + if self.metadata_inheritance_tree is not None: + metadata_to_inherit = self.metadata_inheritance_tree.get('parent_metadata', {}).get(location.url(), {}) + inherit_metadata(module, metadata_to_inherit) + return module except: + log.warning("Failed to load descriptor", exc_info=True) return ErrorDescriptor.from_json( json_data, self, @@ -143,6 +247,94 @@ class MongoModuleStore(ModuleStoreBase): self.error_tracker = error_tracker self.render_template = render_template + def get_metadata_inheritance_tree(self, location): + ''' + TODO (cdodge) This method can be deleted when the 'split module store' work has been completed + ''' + + # get all collections in the course, this query should not return any leaf nodes + # note this is a bit ugly as when we add new categories of containers, we have to add it here + query = { + '_id.org': location.org, + '_id.course': location.course, + '_id.category': {'$in': [ 'course', 'chapter', 'sequential', 'vertical']} + } + # we just want the Location, children, and metadata + record_filter = {'_id': 1, 'definition.children': 1, 'metadata': 1} + + # call out to the DB + resultset = self.collection.find(query, record_filter) + + results_by_url = {} + root = None + + # now go through the results and order them by the location url + for result in resultset: + location = Location(result['_id']) + results_by_url[location.url()] = result + if location.category == 'course': + root = location.url() + + # now traverse the tree and compute down the inherited metadata + metadata_to_inherit = {} + def _compute_inherited_metadata(url): + my_metadata = {} + # check for presence of metadata key. Note that a given module may not yet be fully formed. + # example: update_item -> update_children -> update_metadata sequence on new item create + # if we get called here without update_metadata called first then 'metadata' hasn't been set + # as we're not fully transactional at the DB layer. Same comment applies to below key name + # check + my_metadata = results_by_url[url].get('metadata', {}) + for key in my_metadata.keys(): + if key not in INHERITABLE_METADATA: + del my_metadata[key] + results_by_url[url]['metadata'] = my_metadata + + # go through all the children and recurse, but only if we have + # in the result set. Remember results will not contain leaf nodes + for child in results_by_url[url].get('definition',{}).get('children',[]): + if child in results_by_url: + new_child_metadata = copy.deepcopy(my_metadata) + new_child_metadata.update(results_by_url[child].get('metadata', {})) + results_by_url[child]['metadata'] = new_child_metadata + metadata_to_inherit[child] = new_child_metadata + _compute_inherited_metadata(child) + else: + # this is likely a leaf node, so let's record what metadata we need to inherit + metadata_to_inherit[child] = my_metadata + + + if root is not None: + _compute_inherited_metadata(root) + + return {'parent_metadata': metadata_to_inherit, + 'timestamp' : datetime.now()} + + def get_cached_metadata_inheritance_tree(self, location, force_refresh=False): + ''' + TODO (cdodge) This method can be deleted when the 'split module store' work has been completed + ''' + key_name = '{0}/{1}'.format(location.org, location.course) + + tree = None + if self.metadata_inheritance_cache is not None: + tree = self.metadata_inheritance_cache.get(key_name) + else: + # This is to help guard against an accident prod runtime without a cache + logging.warning('Running MongoModuleStore without metadata_inheritance_cache. This should not happen in production!') + + if tree is None or force_refresh: + tree = self.get_metadata_inheritance_tree(location) + if self.metadata_inheritance_cache is not None: + self.metadata_inheritance_cache.set(key_name, tree) + + return tree + + def clear_cached_metadata_inheritance_tree(self, location): + key_name = '{0}/{1}'.format(location.org, location.course) + if self.metadata_inheritance_cache is not None: + self.metadata_inheritance_cache.delete(key_name) + def _clean_item_data(self, item): """ Renames the '_id' field in item to 'location' @@ -188,7 +380,7 @@ class MongoModuleStore(ModuleStoreBase): """ Load an XModuleDescriptor from item, using the children stored in data_cache """ - data_dir = item.get('metadata', {}).get('data_dir', item['location']['course']) + data_dir = getattr(item, 'data_dir', item['location']['course']) root = self.fs_root / data_dir if not root.isdir(): @@ -196,6 +388,10 @@ class MongoModuleStore(ModuleStoreBase): resource_fs = OSFS(root) + metadata_inheritance_tree = self.get_cached_metadata_inheritance_tree(Location(item['location'])) + + # TODO (cdodge): When the 'split module store' work has been completed, we should remove + # the 'metadata_inheritance_tree' parameter system = CachingDescriptorSystem( self, data_cache, @@ -203,6 +399,7 @@ class MongoModuleStore(ModuleStoreBase): resource_fs, self.error_tracker, self.render_template, + metadata_inheritance_tree = metadata_inheritance_tree ) return system.load_item(item['location']) @@ -261,11 +458,11 @@ class MongoModuleStore(ModuleStoreBase): descendents of the queried modules for more efficient results later in the request. The depth is counted in the number of calls to get_children() to cache. None indicates to cache all descendents. - """ location = Location.ensure_fully_specified(location) item = self._find_one(location) - return self._load_items([item], depth)[0] + module = self._load_items([item], depth)[0] + return module def get_instance(self, course_id, location, depth=0): """ @@ -285,7 +482,8 @@ class MongoModuleStore(ModuleStoreBase): sort=[('revision', pymongo.ASCENDING)], ) - return self._load_items(list(items), depth) + modules = self._load_items(list(items), depth) + return modules def clone_item(self, source, location): """ @@ -304,16 +502,22 @@ class MongoModuleStore(ModuleStoreBase): if location.category == 'static_tab': course = self.get_course_for_item(item.location) existing_tabs = course.tabs or [] - existing_tabs.append({'type': 'static_tab', 'name': item.metadata.get('display_name'), 'url_slug': item.location.name}) + existing_tabs.append({ + 'type': 'static_tab', + 'name': item.display_name, + 'url_slug': item.location.name + }) course.tabs = existing_tabs - self.update_metadata(course.location, course.metadata) + self.update_metadata(course.location, course._model_data._kvs._metadata) return item except pymongo.errors.DuplicateKeyError: raise DuplicateItemError(location) + # recompute (and update) the metadata inheritance tree which is cached + self.get_cached_metadata_inheritance_tree(Location(location), force_refresh = True) - def get_course_for_item(self, location): + def get_course_for_item(self, location, depth=0): ''' VS[compat] cdodge: for a given Xmodule, return the course that it belongs to @@ -327,15 +531,15 @@ class MongoModuleStore(ModuleStoreBase): # know the 'name' parameter in this context, so we have # to assume there's only one item in this query even though we are not specifying a name course_search_location = ['i4x', location.org, location.course, 'course', None] - courses = self.get_items(course_search_location) + courses = self.get_items(course_search_location, depth=depth) # make sure we found exactly one match on this above course search found_cnt = len(courses) if found_cnt == 0: - raise BaseException('Could not find course at {0}'.format(course_search_location)) + raise Exception('Could not find course at {0}'.format(course_search_location)) if found_cnt > 1: - raise BaseException('Found more than one course at {0}. There should only be one!!! Dump = {1}'.format(course_search_location, courses)) + raise Exception('Found more than one course at {0}. There should only be one!!! Dump = {1}'.format(course_search_location, courses)) return courses[0] @@ -377,6 +581,8 @@ class MongoModuleStore(ModuleStoreBase): """ self._update_single_item(location, {'definition.children': children}) + # recompute (and update) the metadata inheritance tree which is cached + self.get_cached_metadata_inheritance_tree(Location(location), force_refresh = True) def update_metadata(self, location, metadata): """ @@ -398,10 +604,11 @@ class MongoModuleStore(ModuleStoreBase): tab['name'] = metadata.get('display_name') break course.tabs = existing_tabs - self.update_metadata(course.location, course.metadata) + self.update_metadata(course.location, own_metadata(course)) self._update_single_item(location, {'metadata': metadata}) - + # recompute (and update) the metadata inheritance tree which is cached + self.get_cached_metadata_inheritance_tree(loc, force_refresh = True) def delete_item(self, location): """ @@ -417,9 +624,11 @@ class MongoModuleStore(ModuleStoreBase): course = self.get_course_for_item(item.location) existing_tabs = course.tabs or [] course.tabs = [tab for tab in existing_tabs if tab.get('url_slug') != location.name] - self.update_metadata(course.location, course.metadata) + self.update_metadata(course.location, own_metadata(course)) self.collection.remove({'_id': Location(location).dict()}) + # recompute (and update) the metadata inheritance tree which is cached + self.get_cached_metadata_inheritance_tree(Location(location), force_refresh = True) def get_parent_locations(self, location, course_id): diff --git a/common/lib/xmodule/xmodule/modulestore/store_utilities.py b/common/lib/xmodule/xmodule/modulestore/store_utilities.py index 192b012bef..cb3cd375a7 100644 --- a/common/lib/xmodule/xmodule/modulestore/store_utilities.py +++ b/common/lib/xmodule/xmodule/modulestore/store_utilities.py @@ -5,128 +5,134 @@ from xmodule.modulestore.mongo import MongoModuleStore def clone_course(modulestore, contentstore, source_location, dest_location, delete_original=False): - # first check to see if the modulestore is Mongo backed - if not isinstance(modulestore, MongoModuleStore): - raise Exception("Expected a MongoModuleStore in the runtime. Aborting....") - - # check to see if the dest_location exists as an empty course - # we need an empty course because the app layers manage the permissions and users - if not modulestore.has_item(dest_location): - raise Exception("An empty course at {0} must have already been created. Aborting...".format(dest_location)) - - # verify that the dest_location really is an empty course, which means only one - dest_modules = modulestore.get_items([dest_location.tag, dest_location.org, dest_location.course, None, None, None]) - - if len(dest_modules) != 1: - raise Exception("Course at destination {0} is not an empty course. You can only clone into an empty course. Aborting...".format(dest_location)) - - # check to see if the source course is actually there - if not modulestore.has_item(source_location): - raise Exception("Cannot find a course at {0}. Aborting".format(source_location)) - - # Get all modules under this namespace which is (tag, org, course) tuple - - modules = modulestore.get_items([source_location.tag, source_location.org, source_location.course, None, None, None]) - - for module in modules: - original_loc = Location(module.location) - - if original_loc.category != 'course': - module.location = module.location._replace(tag=dest_location.tag, org=dest_location.org, - course=dest_location.course) - else: - # on the course module we also have to update the module name - module.location = module.location._replace(tag=dest_location.tag, org=dest_location.org, - course=dest_location.course, name=dest_location.name) - - print "Cloning module {0} to {1}....".format(original_loc, module.location) - - if 'data' in module.definition: - modulestore.update_item(module.location, module.definition['data']) - - # repoint children - if 'children' in module.definition: - new_children = [] - for child_loc_url in module.definition['children']: - child_loc = Location(child_loc_url) - child_loc = child_loc._replace(tag=dest_location.tag, org=dest_location.org, - course=dest_location.course) - new_children = new_children + [child_loc.url()] - - modulestore.update_children(module.location, new_children) - - # save metadata - modulestore.update_metadata(module.location, module.metadata) - - # now iterate through all of the assets and clone them - # first the thumbnails - thumbs = contentstore.get_all_content_thumbnails_for_course(source_location) - for thumb in thumbs: - thumb_loc = Location(thumb["_id"]) - content = contentstore.find(thumb_loc) - content.location = content.location._replace(org=dest_location.org, - course=dest_location.course) - - print "Cloning thumbnail {0} to {1}".format(thumb_loc, content.location) - - contentstore.save(content) - - # now iterate through all of the assets, also updating the thumbnail pointer - - assets = contentstore.get_all_content_for_course(source_location) - for asset in assets: - asset_loc = Location(asset["_id"]) - content = contentstore.find(asset_loc) - content.location = content.location._replace(org=dest_location.org, - course=dest_location.course) - - # be sure to update the pointer to the thumbnail - if content.thumbnail_location is not None: - content.thumbnail_location = content.thumbnail_location._replace(org=dest_location.org, - course=dest_location.course) - - print "Cloning asset {0} to {1}".format(asset_loc, content.location) - - contentstore.save(content) - - return True - - -def delete_course(modulestore, contentstore, source_location): # first check to see if the modulestore is Mongo backed - if not isinstance(modulestore, MongoModuleStore): - raise Exception("Expected a MongoModuleStore in the runtime. Aborting....") + if not isinstance(modulestore, MongoModuleStore): + raise Exception("Expected a MongoModuleStore in the runtime. Aborting....") - # check to see if the source course is actually there - if not modulestore.has_item(source_location): - raise Exception("Cannot find a course at {0}. Aborting".format(source_location)) + # check to see if the dest_location exists as an empty course + # we need an empty course because the app layers manage the permissions and users + if not modulestore.has_item(dest_location): + raise Exception("An empty course at {0} must have already been created. Aborting...".format(dest_location)) - # first delete all of the thumbnails - thumbs = contentstore.get_all_content_thumbnails_for_course(source_location) - for thumb in thumbs: - thumb_loc = Location(thumb["_id"]) - id = StaticContent.get_id_from_location(thumb_loc) - print "Deleting {0}...".format(id) - contentstore.delete(id) + # verify that the dest_location really is an empty course, which means only one + dest_modules = modulestore.get_items([dest_location.tag, dest_location.org, dest_location.course, None, None, None]) - # then delete all of the assets - assets = contentstore.get_all_content_for_course(source_location) - for asset in assets: - asset_loc = Location(asset["_id"]) - id = StaticContent.get_id_from_location(asset_loc) - print "Deleting {0}...".format(id) - contentstore.delete(id) + if len(dest_modules) != 1: + raise Exception("Course at destination {0} is not an empty course. You can only clone into an empty course. Aborting...".format(dest_location)) - # then delete all course modules - modules = modulestore.get_items([source_location.tag, source_location.org, source_location.course, None, None, None]) + # check to see if the source course is actually there + if not modulestore.has_item(source_location): + raise Exception("Cannot find a course at {0}. Aborting".format(source_location)) - for module in modules: - if module.category != 'course': # save deleting the course module for last - print "Deleting {0}...".format(module.location) - modulestore.delete_item(module.location) + # Get all modules under this namespace which is (tag, org, course) tuple - # finally delete the top-level course module itself - print "Deleting {0}...".format(source_location) - modulestore.delete_item(source_location) + modules = modulestore.get_items([source_location.tag, source_location.org, source_location.course, None, None, None]) - return True + for module in modules: + original_loc = Location(module.location) + + if original_loc.category != 'course': + module.location = module.location._replace(tag=dest_location.tag, org=dest_location.org, + course=dest_location.course) + else: + # on the course module we also have to update the module name + module.location = module.location._replace(tag=dest_location.tag, org=dest_location.org, + course=dest_location.course, name=dest_location.name) + + print "Cloning module {0} to {1}....".format(original_loc, module.location) + + modulestore.update_item(module.location, module._model_data._kvs._data) + + # repoint children + if module.has_children: + new_children = [] + for child_loc_url in module.children: + child_loc = Location(child_loc_url) + child_loc = child_loc._replace( + tag=dest_location.tag, + org=dest_location.org, + course=dest_location.course + ) + new_children.append(child_loc.url()) + + modulestore.update_children(module.location, new_children) + + # save metadata + modulestore.update_metadata(module.location, module._model_data._kvs._metadata) + + # now iterate through all of the assets and clone them + # first the thumbnails + thumbs = contentstore.get_all_content_thumbnails_for_course(source_location) + for thumb in thumbs: + thumb_loc = Location(thumb["_id"]) + content = contentstore.find(thumb_loc) + content.location = content.location._replace(org=dest_location.org, + course=dest_location.course) + + print "Cloning thumbnail {0} to {1}".format(thumb_loc, content.location) + + contentstore.save(content) + + # now iterate through all of the assets, also updating the thumbnail pointer + + assets = contentstore.get_all_content_for_course(source_location) + for asset in assets: + asset_loc = Location(asset["_id"]) + content = contentstore.find(asset_loc) + content.location = content.location._replace(org=dest_location.org, + course=dest_location.course) + + # be sure to update the pointer to the thumbnail + if content.thumbnail_location is not None: + content.thumbnail_location = content.thumbnail_location._replace(org=dest_location.org, + course=dest_location.course) + + print "Cloning asset {0} to {1}".format(asset_loc, content.location) + + contentstore.save(content) + + return True + + +def delete_course(modulestore, contentstore, source_location, commit = False): + # first check to see if the modulestore is Mongo backed + if not isinstance(modulestore, MongoModuleStore): + raise Exception("Expected a MongoModuleStore in the runtime. Aborting....") + + # check to see if the source course is actually there + if not modulestore.has_item(source_location): + raise Exception("Cannot find a course at {0}. Aborting".format(source_location)) + + # first delete all of the thumbnails + thumbs = contentstore.get_all_content_thumbnails_for_course(source_location) + for thumb in thumbs: + thumb_loc = Location(thumb["_id"]) + id = StaticContent.get_id_from_location(thumb_loc) + print "Deleting {0}...".format(id) + if commit: + contentstore.delete(id) + + # then delete all of the assets + assets = contentstore.get_all_content_for_course(source_location) + for asset in assets: + asset_loc = Location(asset["_id"]) + id = StaticContent.get_id_from_location(asset_loc) + print "Deleting {0}...".format(id) + if commit: + contentstore.delete(id) + + # then delete all course modules + modules = modulestore.get_items([source_location.tag, source_location.org, source_location.course, None, None, None]) + + for module in modules: + if module.category != 'course': # save deleting the course module for last + print "Deleting {0}...".format(module.location) + if commit: + modulestore.delete_item(module.location) + + # finally delete the top-level course module itself + print "Deleting {0}...".format(source_location) + if commit: + modulestore.delete_item(source_location) + + return True diff --git a/common/lib/xmodule/xmodule/modulestore/tests/factories.py b/common/lib/xmodule/xmodule/modulestore/tests/factories.py index 1259da2690..b842ffe9dd 100644 --- a/common/lib/xmodule/xmodule/modulestore/tests/factories.py +++ b/common/lib/xmodule/xmodule/modulestore/tests/factories.py @@ -4,6 +4,7 @@ from uuid import uuid4 from xmodule.modulestore import Location from xmodule.modulestore.django import modulestore from xmodule.timeparse import stringify_time +from xmodule.modulestore.inheritance import own_metadata def XMODULE_COURSE_CREATION(class_to_create, **kwargs): @@ -40,19 +41,18 @@ class XModuleCourseFactory(Factory): # This metadata code was copied from cms/djangoapps/contentstore/views.py if display_name is not None: - new_course.metadata['display_name'] = display_name + new_course.display_name = display_name - new_course.metadata['data_dir'] = uuid4().hex - new_course.metadata['start'] = stringify_time(gmtime()) + new_course.start = gmtime() new_course.tabs = [{"type": "courseware"}, - {"type": "course_info", "name": "Course Info"}, - {"type": "discussion", "name": "Discussion"}, - {"type": "wiki", "name": "Wiki"}, - {"type": "progress", "name": "Progress"}] + {"type": "course_info", "name": "Course Info"}, + {"type": "discussion", "name": "Discussion"}, + {"type": "wiki", "name": "Wiki"}, + {"type": "progress", "name": "Progress"}] # Update the data in the mongo datastore - store.update_metadata(new_course.location.url(), new_course.own_metadata) + store.update_metadata(new_course.location.url(), own_metadata(new_course)) return new_course @@ -99,17 +99,14 @@ class XModuleItemFactory(Factory): new_item = store.clone_item(template, dest_location) - # TODO: This needs to be deleted when we have proper storage for static content - new_item.metadata['data_dir'] = parent.metadata['data_dir'] - # replace the display name with an optional parameter passed in from the caller if display_name is not None: - new_item.metadata['display_name'] = display_name + new_item.display_name = display_name - store.update_metadata(new_item.location.url(), new_item.own_metadata) + store.update_metadata(new_item.location.url(), own_metadata(new_item)) if new_item.location.category not in DETACHED_CATEGORIES: - store.update_children(parent_location, parent.definition.get('children', []) + [new_item.location.url()]) + store.update_children(parent_location, parent.children + [new_item.location.url()]) return new_item diff --git a/common/lib/xmodule/xmodule/modulestore/tests/test_location.py b/common/lib/xmodule/xmodule/modulestore/tests/test_location.py index 0772951884..f0f0e8bf48 100644 --- a/common/lib/xmodule/xmodule/modulestore/tests/test_location.py +++ b/common/lib/xmodule/xmodule/modulestore/tests/test_location.py @@ -119,11 +119,11 @@ def test_equality(): # All the cleaning functions should do the same thing with these general_pairs = [('', ''), - (' ', '_'), - ('abc,', 'abc_'), - ('ab fg!@//\\aj', 'ab_fg_aj'), - (u"ab\xA9", "ab_"), # no unicode allowed for now - ] + (' ', '_'), + ('abc,', 'abc_'), + ('ab fg!@//\\aj', 'ab_fg_aj'), + (u"ab\xA9", "ab_"), # no unicode allowed for now + ] def test_clean(): @@ -131,7 +131,7 @@ def test_clean(): ('a:b', 'a_b'), # no colons in non-name components ('a-b', 'a-b'), # dashes ok ('a.b', 'a.b'), # dot ok - ] + ] for input, output in pairs: assert_equals(Location.clean(input), output) @@ -141,17 +141,17 @@ def test_clean_for_url_name(): ('a:b', 'a:b'), # colons ok in names ('a-b', 'a-b'), # dashes ok in names ('a.b', 'a.b'), # dot ok in names - ] + ] for input, output in pairs: assert_equals(Location.clean_for_url_name(input), output) def test_clean_for_html(): pairs = general_pairs + [ - ("a:b", "a_b"), # no colons for html use - ("a-b", "a-b"), # dashes ok (though need to be replaced in various use locations. ugh.) - ('a.b', 'a_b'), # no dots. - ] + ("a:b", "a_b"), # no colons for html use + ("a-b", "a-b"), # dashes ok (though need to be replaced in various use locations. ugh.) + ('a.b', 'a_b'), # no dots. + ] for input, output in pairs: assert_equals(Location.clean_for_html(input), output) diff --git a/common/lib/xmodule/xmodule/modulestore/tests/test_modulestore.py b/common/lib/xmodule/xmodule/modulestore/tests/test_modulestore.py index 94ea622907..469eedac05 100644 --- a/common/lib/xmodule/xmodule/modulestore/tests/test_modulestore.py +++ b/common/lib/xmodule/xmodule/modulestore/tests/test_modulestore.py @@ -12,7 +12,7 @@ def check_path_to_location(modulestore): ("edX/toy/2012_Fall", "Overview", "Welcome", None)), ("i4x://edX/toy/chapter/Overview", ("edX/toy/2012_Fall", "Overview", None, None)), - ) + ) course_id = "edX/toy/2012_Fall" for location, expected in should_work: @@ -20,6 +20,6 @@ def check_path_to_location(modulestore): not_found = ( "i4x://edX/toy/video/WelcomeX", "i4x://edX/toy/course/NotHome" - ) + ) for location in not_found: assert_raises(ItemNotFoundError, path_to_location, modulestore, course_id, location) diff --git a/common/lib/xmodule/xmodule/modulestore/xml.py b/common/lib/xmodule/xmodule/modulestore/xml.py index 1bd27189e9..677f8b7d6a 100644 --- a/common/lib/xmodule/xmodule/modulestore/xml.py +++ b/common/lib/xmodule/xmodule/modulestore/xml.py @@ -23,13 +23,14 @@ from xmodule.html_module import HtmlDescriptor from . import ModuleStoreBase, Location from .exceptions import ItemNotFoundError +from .inheritance import compute_inherited_metadata edx_xml_parser = etree.XMLParser(dtd_validation=False, load_dtd=False, remove_comments=True, remove_blank_text=True) etree.set_default_parser(edx_xml_parser) -log = logging.getLogger('mitx.' + __name__) +log = logging.getLogger(__name__) # VS[compat] @@ -73,7 +74,8 @@ class ImportSystem(XMLParsingSystem, MakoDescriptorSystem): # VS[compat]. Take this out once course conversion is done (perhaps leave the uniqueness check) # tags that really need unique names--they store (or should store) state. - need_uniq_names = ('problem', 'sequential', 'video', 'course', 'chapter', 'videosequence', 'timelimit') + need_uniq_names = ('problem', 'sequential', 'video', 'course', 'chapter', + 'videosequence', 'poll_question', 'timelimit') attr = xml_data.attrib tag = xml_data.tag @@ -161,7 +163,6 @@ class ImportSystem(XMLParsingSystem, MakoDescriptorSystem): etree.tostring(xml_data, encoding='unicode'), self, self.org, self.course, xmlstore.default_class) except Exception as err: - print err, self.load_error_modules if not self.load_error_modules: raise @@ -174,7 +175,7 @@ class ImportSystem(XMLParsingSystem, MakoDescriptorSystem): # Normally, we don't want lots of exception traces in our logs from common # content problems. But if you're debugging the xml loading code itself, # uncomment the next line. - # log.exception(msg) + log.exception(msg) self.error_tracker(msg) err_msg = msg + "\n" + exc_info_to_str(sys.exc_info()) @@ -186,12 +187,13 @@ class ImportSystem(XMLParsingSystem, MakoDescriptorSystem): err_msg ) - descriptor.metadata['data_dir'] = course_dir + setattr(descriptor, 'data_dir', course_dir) xmlstore.modules[course_id][descriptor.location] = descriptor - for child in descriptor.get_children(): - parent_tracker.add_parent(child.location, descriptor.location) + if hasattr(descriptor, 'children'): + for child in descriptor.get_children(): + parent_tracker.add_parent(child.location, descriptor.location) return descriptor render_template = lambda: '' @@ -318,8 +320,6 @@ class XMLModuleStore(ModuleStoreBase): # Didn't load course. Instead, save the errors elsewhere. self.errored_courses[course_dir] = errorlog - - def __unicode__(self): ''' String representation - for debugging @@ -345,8 +345,6 @@ class XMLModuleStore(ModuleStoreBase): log.warning(msg + " " + str(err)) return {} - - def load_course(self, course_dir, tracker): """ Load a course into this module store @@ -430,7 +428,7 @@ class XMLModuleStore(ModuleStoreBase): # breaks metadata inheritance via get_children(). Instead # (actually, in addition to, for now), we do a final inheritance pass # after we have the course descriptor. - XModuleDescriptor.compute_inherited_metadata(course_descriptor) + compute_inherited_metadata(course_descriptor) # now import all pieces of course_info which is expected to be stored # in /info or /info/ @@ -449,7 +447,6 @@ class XMLModuleStore(ModuleStoreBase): def load_extra_content(self, system, course_descriptor, category, base_dir, course_dir, url_name): - self._load_extra_content(system, course_descriptor, category, base_dir, course_dir) # then look in a override folder based on the course run @@ -460,26 +457,29 @@ class XMLModuleStore(ModuleStoreBase): def _load_extra_content(self, system, course_descriptor, category, path, course_dir): for filepath in glob.glob(path / '*'): - if not os.path.isdir(filepath): - with open(filepath) as f: - try: - html = f.read().decode('utf-8') - # tabs are referenced in policy.json through a 'slug' which is just the filename without the .html suffix - slug = os.path.splitext(os.path.basename(filepath))[0] - loc = Location('i4x', course_descriptor.location.org, course_descriptor.location.course, category, slug) - module = HtmlDescriptor(system, definition={'data': html}, **{'location': loc}) - # VS[compat]: - # Hack because we need to pull in the 'display_name' for static tabs (because we need to edit them) - # from the course policy - if category == "static_tab": - for tab in course_descriptor.tabs or []: - if tab.get('url_slug') == slug: - module.metadata['display_name'] = tab['name'] - module.metadata['data_dir'] = course_dir - self.modules[course_descriptor.id][module.location] = module - except Exception, e: - logging.exception("Failed to load {0}. Skipping... Exception: {1}".format(filepath, str(e))) - system.error_tracker("ERROR: " + str(e)) + if not os.path.isfile(filepath): + continue + + with open(filepath) as f: + try: + html = f.read().decode('utf-8') + # tabs are referenced in policy.json through a 'slug' which is just the filename without the .html suffix + slug = os.path.splitext(os.path.basename(filepath))[0] + loc = Location('i4x', course_descriptor.location.org, course_descriptor.location.course, category, slug) + module = HtmlDescriptor(system, loc, {'data': html}) + # VS[compat]: + # Hack because we need to pull in the 'display_name' for static tabs (because we need to edit them) + # from the course policy + if category == "static_tab": + for tab in course_descriptor.tabs or []: + if tab.get('url_slug') == slug: + module.display_name = tab['name'] + module.data_dir = course_dir + self.modules[course_descriptor.id][module.location] = module + except Exception, e: + logging.exception("Failed to load {0}. Skipping... Exception: {1}".format(filepath, str(e))) + system.error_tracker("ERROR: " + str(e)) + def get_instance(self, course_id, location, depth=0): """ diff --git a/common/lib/xmodule/xmodule/modulestore/xml_exporter.py b/common/lib/xmodule/xmodule/modulestore/xml_exporter.py index 55844116c6..0724211ed3 100644 --- a/common/lib/xmodule/xmodule/modulestore/xml_exporter.py +++ b/common/lib/xmodule/xmodule/modulestore/xml_exporter.py @@ -1,53 +1,52 @@ import logging from xmodule.modulestore import Location from xmodule.modulestore.django import modulestore +from xmodule.modulestore.inheritance import own_metadata from fs.osfs import OSFS from json import dumps def export_to_xml(modulestore, contentstore, course_location, root_dir, course_dir): - course = modulestore.get_item(course_location) + course = modulestore.get_item(course_location) - fs = OSFS(root_dir) - export_fs = fs.makeopendir(course_dir) + fs = OSFS(root_dir) + export_fs = fs.makeopendir(course_dir) - xml = course.export_to_xml(export_fs) - with export_fs.open('course.xml', 'w') as course_xml: - course_xml.write(xml) + xml = course.export_to_xml(export_fs) + with export_fs.open('course.xml', 'w') as course_xml: + course_xml.write(xml) - # export the static assets - contentstore.export_all_for_course(course_location, root_dir + '/' + course_dir + '/static/') + # export the static assets + contentstore.export_all_for_course(course_location, root_dir + '/' + course_dir + '/static/') - # export the static tabs - export_extra_content(export_fs, modulestore, course_location, 'static_tab', 'tabs', '.html') + # export the static tabs + export_extra_content(export_fs, modulestore, course_location, 'static_tab', 'tabs', '.html') - # export the custom tags - export_extra_content(export_fs, modulestore, course_location, 'custom_tag_template', 'custom_tags') + # export the custom tags + export_extra_content(export_fs, modulestore, course_location, 'custom_tag_template', 'custom_tags') - # export the course updates - export_extra_content(export_fs, modulestore, course_location, 'course_info', 'info', '.html') + # export the course updates + export_extra_content(export_fs, modulestore, course_location, 'course_info', 'info', '.html') - # export the grading policy - policies_dir = export_fs.makeopendir('policies') - course_run_policy_dir = policies_dir.makeopendir(course.location.name) - if 'grading_policy' in course.definition['data']: + # export the grading policy + policies_dir = export_fs.makeopendir('policies') + course_run_policy_dir = policies_dir.makeopendir(course.location.name) with course_run_policy_dir.open('grading_policy.json', 'w') as grading_policy: - grading_policy.write(dumps(course.definition['data']['grading_policy'])) + grading_policy.write(dumps(course.grading_policy)) - # export all of the course metadata in policy.json - with course_run_policy_dir.open('policy.json', 'w') as course_policy: - policy = {} - policy = {'course/' + course.location.name: course.metadata} - course_policy.write(dumps(policy)) + # export all of the course metadata in policy.json + with course_run_policy_dir.open('policy.json', 'w') as course_policy: + policy = {'course/' + course.location.name: own_metadata(course)} + course_policy.write(dumps(policy)) def export_extra_content(export_fs, modulestore, course_location, category_type, dirname, file_suffix=''): - query_loc = Location('i4x', course_location.org, course_location.course, category_type, None) - items = modulestore.get_items(query_loc) + query_loc = Location('i4x', course_location.org, course_location.course, category_type, None) + items = modulestore.get_items(query_loc) - if len(items) > 0: - item_dir = export_fs.makeopendir(dirname) - for item in items: - with item_dir.open(item.location.name + file_suffix, 'w') as item_file: - item_file.write(item.definition['data'].encode('utf8')) + if len(items) > 0: + item_dir = export_fs.makeopendir(dirname) + for item in items: + with item_dir.open(item.location.name + file_suffix, 'w') as item_file: + item_file.write(item.data.encode('utf8')) diff --git a/common/lib/xmodule/xmodule/modulestore/xml_importer.py b/common/lib/xmodule/xmodule/modulestore/xml_importer.py index 0b77900ae9..fa232596f2 100644 --- a/common/lib/xmodule/xmodule/modulestore/xml_importer.py +++ b/common/lib/xmodule/xmodule/modulestore/xml_importer.py @@ -8,6 +8,7 @@ from .xml import XMLModuleStore from .exceptions import DuplicateItemError from xmodule.modulestore import Location from xmodule.contentstore.content import StaticContent, XASSET_SRCREF_PREFIX +from .inheritance import own_metadata log = logging.getLogger(__name__) @@ -20,6 +21,8 @@ def import_static_content(modules, course_loc, course_data_path, static_content_ # now import all static assets static_dir = course_data_path / subpath + verbose = True + for dirname, dirnames, filenames in os.walk(static_dir): for filename in filenames: @@ -95,6 +98,79 @@ def verify_content_links(module, base_dir, static_content_store, link, remap_dic return link +def import_module_from_xml(modulestore, static_content_store, course_data_path, module, target_location_namespace=None, verbose=False): + # remap module to the new namespace + if target_location_namespace is not None: + # This looks a bit wonky as we need to also change the 'name' of the imported course to be what + # the caller passed in + if module.location.category != 'course': + module.location = module.location._replace(tag=target_location_namespace.tag, org=target_location_namespace.org, + course=target_location_namespace.course) + else: + module.location = module.location._replace(tag=target_location_namespace.tag, org=target_location_namespace.org, + course=target_location_namespace.course, name=target_location_namespace.name) + + # then remap children pointers since they too will be re-namespaced + if module.has_children: + children_locs = module.children + new_locs = [] + for child in children_locs: + child_loc = Location(child) + new_child_loc = child_loc._replace(tag=target_location_namespace.tag, org=target_location_namespace.org, + course=target_location_namespace.course) + + new_locs.append(new_child_loc.url()) + + module.children = new_locs + + if hasattr(module, 'data'): + # cdodge: now go through any link references to '/static/' and make sure we've imported + # it as a StaticContent asset + try: + remap_dict = {} + + # use the rewrite_links as a utility means to enumerate through all links + # in the module data. We use that to load that reference into our asset store + # IMPORTANT: There appears to be a bug in lxml.rewrite_link which makes us not be able to + # do the rewrites natively in that code. + # For example, what I'm seeing is -> + # Note the dropped element closing tag. This causes the LMS to fail when rendering modules - that's + # no good, so we have to do this kludge + if isinstance(module.data, str) or isinstance(module.data, unicode): # some module 'data' fields are non strings which blows up the link traversal code + lxml_rewrite_links(module.data, lambda link: verify_content_links(module, course_data_path, + static_content_store, link, remap_dict)) + + for key in remap_dict.keys(): + module.data = module.data.replace(key, remap_dict[key]) + + except Exception: + logging.exception("failed to rewrite links on {0}. Continuing...".format(module.location)) + + modulestore.update_item(module.location, module.data) + + if module.has_children: + modulestore.update_children(module.location, module.children) + + modulestore.update_metadata(module.location, own_metadata(module)) + + +def import_course_from_xml(modulestore, static_content_store, course_data_path, module, target_location_namespace=None, verbose=False): + # cdodge: more hacks (what else). Seems like we have a problem when importing a course (like 6.002) which + # does not have any tabs defined in the policy file. The import goes fine and then displays fine in LMS, + # but if someone tries to add a new tab in the CMS, then the LMS barfs because it expects that - + # if there is *any* tabs - then there at least needs to be some predefined ones + if module.tabs is None or len(module.tabs) == 0: + module.tabs = [{"type": "courseware"}, + {"type": "course_info", "name": "Course Info"}, + {"type": "discussion", "name": "Discussion"}, + {"type": "wiki", "name": "Wiki"}] # note, add 'progress' when we can support it on Edge + + # a bit of a hack, but typically the "course image" which is shown on marketing pages is hard coded to /images/course_image.jpg + # so let's make sure we import in case there are no other references to it in the modules + verify_content_links(module, course_data_path, static_content_store, '/static/images/course_image.jpg') + import_module_from_xml(modulestore, static_content_store, course_data_path, module, target_location_namespace, verbose=verbose) + + def import_from_xml(store, data_dir, course_dirs=None, default_class='xmodule.raw_module.RawDescriptor', load_error_modules=True, static_content_store=None, target_location_namespace=None, verbose=False): @@ -135,7 +211,7 @@ def import_from_xml(store, data_dir, course_dirs=None, # course module is committed first into the store for module in module_store.modules[course_id].itervalues(): if module.category == 'course': - course_data_path = path(data_dir) / module.metadata['data_dir'] + course_data_path = path(data_dir) / module.data_dir course_location = module.location module = remap_namespace(module, target_location_namespace) @@ -151,10 +227,10 @@ def import_from_xml(store, data_dir, course_dirs=None, {"type": "wiki", "name": "Wiki"}] # note, add 'progress' when we can support it on Edge - store.update_item(module.location, module.definition['data']) - if 'children' in module.definition: - store.update_children(module.location, module.definition['children']) - store.update_metadata(module.location, dict(module.own_metadata)) + if hasattr(module, 'data'): + store.update_item(module.location, module.data) + store.update_children(module.location, module.children) + store.update_metadata(module.location, dict(own_metadata(module))) # a bit of a hack, but typically the "course image" which is shown on marketing pages is hard coded to /images/course_image.jpg # so let's make sure we import in case there are no other references to it in the modules @@ -186,8 +262,8 @@ def import_from_xml(store, data_dir, course_dirs=None, if verbose: log.debug('importing module location {0}'.format(module.location)) - if 'data' in module.definition: - module_data = module.definition['data'] + if hasattr(module, 'data'): + module_data = module.data # cdodge: now go through any link references to '/static/' and make sure we've imported # it as a StaticContent asset @@ -213,16 +289,15 @@ def import_from_xml(store, data_dir, course_dirs=None, store.update_item(module.location, module_data) - if 'children' in module.definition: - store.update_children(module.location, module.definition['children']) + if hasattr(module, 'children') and module.children != []: + store.update_children(module.location, module.children) # NOTE: It's important to use own_metadata here to avoid writing # inherited metadata everywhere. - store.update_metadata(module.location, dict(module.own_metadata)) + store.update_metadata(module.location, dict(own_metadata(module))) return module_store, course_items - def remap_namespace(module, target_location_namespace): if target_location_namespace is None: return module @@ -237,21 +312,21 @@ def remap_namespace(module, target_location_namespace): course=target_location_namespace.course, name=target_location_namespace.name) # then remap children pointers since they too will be re-namespaced - children_locs = module.definition.get('children') - if children_locs is not None: - new_locs = [] - for child in children_locs: - child_loc = Location(child) - new_child_loc = child_loc._replace(tag=target_location_namespace.tag, org=target_location_namespace.org, - course=target_location_namespace.course) + if hasattr(module,'children'): + children_locs = module.children + if children_locs is not None and children_locs != []: + new_locs = [] + for child in children_locs: + child_loc = Location(child) + new_child_loc = child_loc._replace(tag=target_location_namespace.tag, org=target_location_namespace.org, + course=target_location_namespace.course) - new_locs.append(new_child_loc.url()) + new_locs.append(new_child_loc.url()) - module.definition['children'] = new_locs + module.children = new_locs return module - def validate_category_hierarchy(module_store, course_id, parent_category, expected_child_category): err_cnt = 0 @@ -262,7 +337,7 @@ def validate_category_hierarchy(module_store, course_id, parent_category, expect parents.append(module) for parent in parents: - for child_loc in [Location(child) for child in parent.definition.get('children', [])]: + for child_loc in [Location(child) for child in parent.children]: if child_loc.category != expected_child_category: err_cnt += 1 print 'ERROR: child {0} of parent {1} was expected to be category of {2} but was {3}'.format( @@ -274,7 +349,7 @@ def validate_category_hierarchy(module_store, course_id, parent_category, expect def validate_data_source_path_existence(path, is_err=True, extra_msg=None): _cnt = 0 if not os.path.exists(path): - print ("{0}: Expected folder at {1}. {2}".format('ERROR' if is_err == True else 'WARNING', path, extra_msg if + print ("{0}: Expected folder at {1}. {2}".format('ERROR' if is_err == True else 'WARNING', path, extra_msg if extra_msg is not None else '')) _cnt = 1 return _cnt diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/__init__.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/__init__.py new file mode 100644 index 0000000000..9aa77fde52 --- /dev/null +++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/__init__.py @@ -0,0 +1 @@ +__author__ = 'vik' diff --git a/common/lib/xmodule/xmodule/combined_open_ended_modulev1.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py similarity index 74% rename from common/lib/xmodule/xmodule/combined_open_ended_modulev1.py rename to common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py index ce5d55d7b7..98a54601de 100644 --- a/common/lib/xmodule/xmodule/combined_open_ended_modulev1.py +++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py @@ -1,40 +1,23 @@ -import copy -from fs.errors import ResourceNotFoundError -import itertools import json import logging from lxml import etree from lxml.html import rewrite_links -from path import path -import os -import sys -import re - -from pkg_resources import resource_string - -from .capa_module import only_one, ComplexEncoder -from .editing_module import EditingDescriptor -from .html_checker import check_html -from progress import Progress -from .stringify import stringify_children -from .x_module import XModule -from .xml_module import XmlDescriptor -from xmodule.modulestore import Location +from xmodule.timeinfo import TimeInfo +from xmodule.capa_module import ComplexEncoder +from xmodule.editing_module import EditingDescriptor +from xmodule.progress import Progress +from xmodule.stringify import stringify_children +from xmodule.xml_module import XmlDescriptor import self_assessment_module import open_ended_module -from combined_open_ended_rubric import CombinedOpenEndedRubric, RubricParsingError, GRADER_TYPE_IMAGE_DICT, HUMAN_GRADER_TYPE, LEGEND_LIST -from .stringify import stringify_children -import dateutil -import dateutil.parser -import datetime -from timeparse import parse_timedelta +from .combined_open_ended_rubric import CombinedOpenEndedRubric, GRADER_TYPE_IMAGE_DICT, HUMAN_GRADER_TYPE, LEGEND_LIST log = logging.getLogger("mitx.courseware") # Set the default number of max attempts. Should be 1 for production # Set higher for debugging/testing # attempts specified in xml definition overrides this. -MAX_ATTEMPTS = 10000 +MAX_ATTEMPTS = 1 # Set maximum available number of points. # Overriden by max_score specified in xml. @@ -55,9 +38,14 @@ ACCEPT_FILE_UPLOAD = False TRUE_DICT = ["True", True, "TRUE", "true"] HUMAN_TASK_TYPE = { - 'selfassessment' : "Self Assessment", - 'openended' : "edX Assessment", - } + 'selfassessment': "Self Assessment", + 'openended': "edX Assessment", +} + +#Default value that controls whether or not to skip basic spelling checks in the controller +#Metadata overrides this +SKIP_BASIC_CHECKS = False + class CombinedOpenEndedV1Module(): """ @@ -73,7 +61,7 @@ class CombinedOpenEndedV1Module(): 'save_assessment' -- Saves the student assessment (or external grader assessment) 'save_post_assessment' -- saves a post assessment (hint, feedback on feedback, etc) ajax actions implemented by combined open ended module are: - 'reset' -- resets the whole combined open ended module and returns to the first child module + 'reset' -- resets the whole combined open ended module and returns to the first child moduleresource_string 'next_problem' -- moves to the next child module 'get_results' -- gets results from a given child module @@ -90,16 +78,11 @@ class CombinedOpenEndedV1Module(): INTERMEDIATE_DONE = 'intermediate_done' DONE = 'done' - js = {'coffee': [resource_string(__name__, 'js/src/combinedopenended/display.coffee'), - resource_string(__name__, 'js/src/collapsible.coffee'), - resource_string(__name__, 'js/src/javascript_loader.coffee'), - ]} - js_module_name = "CombinedOpenEnded" - - css = {'scss': [resource_string(__name__, 'css/combinedopenended/display.scss')]} + #Where the templates live for this problem + TEMPLATE_DIR = "combinedopenended" def __init__(self, system, location, definition, descriptor, - instance_state=None, shared_state=None, metadata = None, static_data = None, **kwargs): + instance_state=None, shared_state=None, metadata=None, static_data=None, **kwargs): """ Definition file should have one or many task blocks, a rubric block, and a prompt block: @@ -136,16 +119,9 @@ class CombinedOpenEndedV1Module(): """ - self.metadata = metadata - self.display_name = metadata.get('display_name', "Open Ended") - self.rewrite_content_links = static_data.get('rewrite_content_links',"") - - - # Load instance state - if instance_state is not None: - instance_state = json.loads(instance_state) - else: - instance_state = {} + self.instance_state = instance_state + self.display_name = instance_state.get('display_name', "Open Ended") + self.rewrite_content_links = static_data.get('rewrite_content_links', "") #We need to set the location here so the child modules can use it system.set('location', location) @@ -158,39 +134,28 @@ class CombinedOpenEndedV1Module(): #Overall state of the combined open ended module self.state = instance_state.get('state', self.INITIAL) - self.attempts = instance_state.get('attempts', 0) + self.student_attempts = instance_state.get('student_attempts', 0) #Allow reset is true if student has failed the criteria to move to the next child task - self.allow_reset = instance_state.get('ready_to_reset', False) - self.max_attempts = int(self.metadata.get('attempts', MAX_ATTEMPTS)) - self.is_scored = self.metadata.get('is_graded', IS_SCORED) in TRUE_DICT - self.accept_file_upload = self.metadata.get('accept_file_upload', ACCEPT_FILE_UPLOAD) in TRUE_DICT + self.ready_to_reset = instance_state.get('ready_to_reset', False) + self.attempts = self.instance_state.get('attempts', MAX_ATTEMPTS) + self.is_scored = self.instance_state.get('is_graded', IS_SCORED) in TRUE_DICT + self.accept_file_upload = self.instance_state.get('accept_file_upload', ACCEPT_FILE_UPLOAD) in TRUE_DICT + self.skip_basic_checks = self.instance_state.get('skip_spelling_checks', SKIP_BASIC_CHECKS) in TRUE_DICT - display_due_date_string = self.metadata.get('due', None) - if display_due_date_string is not None: - try: - self.display_due_date = dateutil.parser.parse(display_due_date_string) - except ValueError: - log.error("Could not parse due date {0} for location {1}".format(display_due_date_string, location)) - raise - else: - self.display_due_date = None + display_due_date_string = self.instance_state.get('due', None) - grace_period_string = self.metadata.get('graceperiod', None) - if grace_period_string is not None and self.display_due_date: - try: - self.grace_period = parse_timedelta(grace_period_string) - self.close_date = self.display_due_date + self.grace_period - except: - log.error("Error parsing the grace period {0} for location {1}".format(grace_period_string, location)) - raise - else: - self.grace_period = None - self.close_date = self.display_due_date + grace_period_string = self.instance_state.get('graceperiod', None) + try: + self.timeinfo = TimeInfo(display_due_date_string, grace_period_string) + except: + log.error("Error parsing due date information in location {0}".format(location)) + raise + self.display_due_date = self.timeinfo.display_due_date # Used for progress / grading. Currently get credit just for # completion (doesn't matter if you self-assessed correct/incorrect). - self._max_score = int(self.metadata.get('max_score', MAX_SCORE)) + self._max_score = self.instance_state.get('max_score', MAX_SCORE) self.rubric_renderer = CombinedOpenEndedRubric(system, True) rubric_string = stringify_children(definition['rubric']) @@ -199,13 +164,15 @@ class CombinedOpenEndedV1Module(): #Static data is passed to the child modules to render self.static_data = { 'max_score': self._max_score, - 'max_attempts': self.max_attempts, + 'max_attempts': self.attempts, 'prompt': definition['prompt'], 'rubric': definition['rubric'], 'display_name': self.display_name, 'accept_file_upload': self.accept_file_upload, - 'close_date' : self.close_date, - } + 'close_date': self.timeinfo.close_date, + 's3_interface': self.system.s3_interface, + 'skip_basic_checks': self.skip_basic_checks, + } self.task_xml = definition['task_xml'] self.location = location @@ -231,10 +198,10 @@ class CombinedOpenEndedV1Module(): last_response = last_response_data['response'] loaded_task_state = json.loads(current_task_state) - if loaded_task_state['state'] == self.INITIAL: - loaded_task_state['state'] = self.ASSESSING - loaded_task_state['created'] = True - loaded_task_state['history'].append({'answer': last_response}) + if loaded_task_state['child_state'] == self.INITIAL: + loaded_task_state['child_state'] = self.ASSESSING + loaded_task_state['child_created'] = True + loaded_task_state['child_history'].append({'answer': last_response}) current_task_state = json.dumps(loaded_task_state) return current_task_state @@ -248,15 +215,15 @@ class CombinedOpenEndedV1Module(): child_modules = { 'openended': open_ended_module.OpenEndedModule, 'selfassessment': self_assessment_module.SelfAssessmentModule, - } + } child_descriptors = { 'openended': open_ended_module.OpenEndedDescriptor, 'selfassessment': self_assessment_module.SelfAssessmentDescriptor, - } + } children = { 'modules': child_modules, 'descriptors': child_descriptors, - } + } return children def setup_next_task(self, reset=False): @@ -273,8 +240,8 @@ class CombinedOpenEndedV1Module(): self.current_task_xml = self.task_xml[self.current_task_number] if self.current_task_number > 0: - self.allow_reset = self.check_allow_reset() - if self.allow_reset: + self.ready_to_reset = self.check_allow_reset() + if self.ready_to_reset: self.current_task_number = self.current_task_number - 1 current_task_type = self.get_tag_name(self.current_task_xml) @@ -292,31 +259,34 @@ class CombinedOpenEndedV1Module(): self.current_task_parsed_xml = self.current_task_descriptor.definition_from_xml(etree_xml, self.system) if current_task_state is None and self.current_task_number == 0: self.current_task = child_task_module(self.system, self.location, - self.current_task_parsed_xml, self.current_task_descriptor, self.static_data) + self.current_task_parsed_xml, self.current_task_descriptor, + self.static_data) self.task_states.append(self.current_task.get_instance_state()) self.state = self.ASSESSING elif current_task_state is None and self.current_task_number > 0: last_response_data = self.get_last_response(self.current_task_number - 1) last_response = last_response_data['response'] current_task_state = json.dumps({ - 'state': self.ASSESSING, + 'child_state': self.ASSESSING, 'version': self.STATE_VERSION, 'max_score': self._max_score, - 'attempts': 0, - 'created': True, - 'history': [{'answer': last_response}], - }) + 'child_attempts': 0, + 'child_created': True, + 'child_history': [{'answer': last_response}], + }) self.current_task = child_task_module(self.system, self.location, - self.current_task_parsed_xml, self.current_task_descriptor, self.static_data, - instance_state=current_task_state) + self.current_task_parsed_xml, self.current_task_descriptor, + self.static_data, + instance_state=current_task_state) self.task_states.append(self.current_task.get_instance_state()) self.state = self.ASSESSING else: if self.current_task_number > 0 and not reset: current_task_state = self.overwrite_state(current_task_state) self.current_task = child_task_module(self.system, self.location, - self.current_task_parsed_xml, self.current_task_descriptor, self.static_data, - instance_state=current_task_state) + self.current_task_parsed_xml, self.current_task_descriptor, + self.static_data, + instance_state=current_task_state) return True @@ -327,17 +297,17 @@ class CombinedOpenEndedV1Module(): Input: None Output: the allow_reset attribute of the current module. """ - if not self.allow_reset: + if not self.ready_to_reset: if self.current_task_number > 0: last_response_data = self.get_last_response(self.current_task_number - 1) current_response_data = self.get_current_attributes(self.current_task_number) - if(current_response_data['min_score_to_attempt'] > last_response_data['score'] - or current_response_data['max_score_to_attempt'] < last_response_data['score']): + if (current_response_data['min_score_to_attempt'] > last_response_data['score'] + or current_response_data['max_score_to_attempt'] < last_response_data['score']): self.state = self.DONE - self.allow_reset = True + self.ready_to_reset = True - return self.allow_reset + return self.ready_to_reset def get_context(self): """ @@ -351,15 +321,16 @@ class CombinedOpenEndedV1Module(): context = { 'items': [{'content': task_html}], 'ajax_url': self.system.ajax_url, - 'allow_reset': self.allow_reset, + 'allow_reset': self.ready_to_reset, 'state': self.state, 'task_count': len(self.task_xml), 'task_number': self.current_task_number + 1, 'status': self.get_status(False), 'display_name': self.display_name, 'accept_file_upload': self.accept_file_upload, - 'legend_list' : LEGEND_LIST, - } + 'location': self.location, + 'legend_list': LEGEND_LIST, + } return context @@ -370,7 +341,7 @@ class CombinedOpenEndedV1Module(): Output: rendered html """ context = self.get_context() - html = self.system.render_template('combined_open_ended.html', context) + html = self.system.render_template('{0}/combined_open_ended.html'.format(self.TEMPLATE_DIR), context) return html def get_html_nonsystem(self): @@ -381,7 +352,7 @@ class CombinedOpenEndedV1Module(): Output: HTML rendered directly via Mako """ context = self.get_context() - html = self.system.render_template('combined_open_ended.html', context) + html = self.system.render_template('{0}/combined_open_ended.html'.format(self.TEMPLATE_DIR), context) return html def get_html_base(self): @@ -428,7 +399,7 @@ class CombinedOpenEndedV1Module(): task_parsed_xml = task_descriptor.definition_from_xml(etree_xml, self.system) task = children['modules'][task_type](self.system, self.location, task_parsed_xml, task_descriptor, - self.static_data, instance_state=task_state) + self.static_data, instance_state=task_state) last_response = task.latest_answer() last_score = task.latest_score() last_post_assessment = task.latest_post_assessment(self.system) @@ -446,21 +417,21 @@ class CombinedOpenEndedV1Module(): else: last_post_evaluation = task.format_feedback_with_evaluation(self.system, last_post_assessment) last_post_assessment = last_post_evaluation - rubric_data = task._parse_score_msg(task.history[-1].get('post_assessment', ""), self.system) + rubric_data = task._parse_score_msg(task.child_history[-1].get('post_assessment', ""), self.system) rubric_scores = rubric_data['rubric_scores'] grader_types = rubric_data['grader_types'] feedback_items = rubric_data['feedback_items'] - feedback_dicts = rubric_data['feedback_dicts'] + feedback_dicts = rubric_data['feedback_dicts'] grader_ids = rubric_data['grader_ids'] - submission_ids = rubric_data['submission_ids'] - elif task_type== "selfassessment": + submission_ids = rubric_data['submission_ids'] + elif task_type == "selfassessment": rubric_scores = last_post_assessment grader_types = ['SA'] feedback_items = [''] last_post_assessment = "" last_correctness = task.is_last_response_correct() max_score = task.max_score() - state = task.state + state = task.child_state if task_type in HUMAN_TASK_TYPE: human_task_name = HUMAN_TASK_TYPE[task_type] else: @@ -470,7 +441,7 @@ class CombinedOpenEndedV1Module(): human_state = task.HUMAN_NAMES[state] else: human_state = state - if len(grader_types)>0: + if len(grader_types) > 0: grader_type = grader_types[0] else: grader_type = "IN" @@ -492,15 +463,15 @@ class CombinedOpenEndedV1Module(): 'correct': last_correctness, 'min_score_to_attempt': min_score_to_attempt, 'max_score_to_attempt': max_score_to_attempt, - 'rubric_scores' : rubric_scores, - 'grader_types' : grader_types, - 'feedback_items' : feedback_items, - 'grader_type' : grader_type, - 'human_grader_type' : human_grader_name, - 'feedback_dicts' : feedback_dicts, - 'grader_ids' : grader_ids, - 'submission_ids' : submission_ids, - } + 'rubric_scores': rubric_scores, + 'grader_types': grader_types, + 'feedback_items': feedback_items, + 'grader_type': grader_type, + 'human_grader_type': human_grader_name, + 'feedback_dicts': feedback_dicts, + 'grader_ids': grader_ids, + 'submission_ids': submission_ids, + } return last_response_dict def update_task_states(self): @@ -510,10 +481,10 @@ class CombinedOpenEndedV1Module(): Output: boolean indicating whether or not the task state changed. """ changed = False - if not self.allow_reset: + if not self.ready_to_reset: self.task_states[self.current_task_number] = self.current_task.get_instance_state() current_task_state = json.loads(self.task_states[self.current_task_number]) - if current_task_state['state'] == self.DONE: + if current_task_state['child_state'] == self.DONE: self.current_task_number += 1 if self.current_task_number >= (len(self.task_xml)): self.state = self.DONE @@ -543,22 +514,29 @@ class CombinedOpenEndedV1Module(): Output: Dictionary to be rendered via ajax that contains the result html. """ all_responses = [] - loop_up_to_task = self.current_task_number+1 - for i in xrange(0,loop_up_to_task): + loop_up_to_task = self.current_task_number + 1 + for i in xrange(0, loop_up_to_task): all_responses.append(self.get_last_response(i)) - rubric_scores = [all_responses[i]['rubric_scores'] for i in xrange(0,len(all_responses)) if len(all_responses[i]['rubric_scores'])>0 and all_responses[i]['grader_types'][0] in HUMAN_GRADER_TYPE.keys()] - grader_types = [all_responses[i]['grader_types'] for i in xrange(0,len(all_responses)) if len(all_responses[i]['grader_types'])>0 and all_responses[i]['grader_types'][0] in HUMAN_GRADER_TYPE.keys()] - feedback_items = [all_responses[i]['feedback_items'] for i in xrange(0,len(all_responses)) if len(all_responses[i]['feedback_items'])>0 and all_responses[i]['grader_types'][0] in HUMAN_GRADER_TYPE.keys()] - rubric_html = self.rubric_renderer.render_combined_rubric(stringify_children(self.static_data['rubric']), rubric_scores, - grader_types, feedback_items) + rubric_scores = [all_responses[i]['rubric_scores'] for i in xrange(0, len(all_responses)) if + len(all_responses[i]['rubric_scores']) > 0 and all_responses[i]['grader_types'][ + 0] in HUMAN_GRADER_TYPE.keys()] + grader_types = [all_responses[i]['grader_types'] for i in xrange(0, len(all_responses)) if + len(all_responses[i]['grader_types']) > 0 and all_responses[i]['grader_types'][ + 0] in HUMAN_GRADER_TYPE.keys()] + feedback_items = [all_responses[i]['feedback_items'] for i in xrange(0, len(all_responses)) if + len(all_responses[i]['feedback_items']) > 0 and all_responses[i]['grader_types'][ + 0] in HUMAN_GRADER_TYPE.keys()] + rubric_html = self.rubric_renderer.render_combined_rubric(stringify_children(self.static_data['rubric']), + rubric_scores, + grader_types, feedback_items) response_dict = all_responses[-1] context = { 'results': rubric_html, - 'task_name' : 'Scored Rubric', - 'class_name' : 'combined-rubric-container' + 'task_name': 'Scored Rubric', + 'class_name': 'combined-rubric-container' } - html = self.system.render_template('combined_open_ended_results.html', context) + html = self.system.render_template('{0}/combined_open_ended_results.html'.format(self.TEMPLATE_DIR), context) return {'html': html, 'success': True} def get_legend(self, get): @@ -568,9 +546,9 @@ class CombinedOpenEndedV1Module(): Output: Dictionary to be rendered via ajax that contains the result html. """ context = { - 'legend_list' : LEGEND_LIST, - } - html = self.system.render_template('combined_open_ended_legend.html', context) + 'legend_list': LEGEND_LIST, + } + html = self.system.render_template('{0}/combined_open_ended_legend.html'.format(self.TEMPLATE_DIR), context) return {'html': html, 'success': True} def get_results(self, get): @@ -580,15 +558,16 @@ class CombinedOpenEndedV1Module(): Output: Dictionary to be rendered via ajax that contains the result html. """ self.update_task_states() - loop_up_to_task = self.current_task_number+1 - all_responses =[] - for i in xrange(0,loop_up_to_task): + loop_up_to_task = self.current_task_number + 1 + all_responses = [] + for i in xrange(0, loop_up_to_task): all_responses.append(self.get_last_response(i)) context_list = [] for ri in all_responses: - for i in xrange(0,len(ri['rubric_scores'])): - feedback = ri['feedback_dicts'][i].get('feedback','') - rubric_data = self.rubric_renderer.render_rubric(stringify_children(self.static_data['rubric']), ri['rubric_scores'][i]) + for i in xrange(0, len(ri['rubric_scores'])): + feedback = ri['feedback_dicts'][i].get('feedback', '') + rubric_data = self.rubric_renderer.render_rubric(stringify_children(self.static_data['rubric']), + ri['rubric_scores'][i]) if rubric_data['success']: rubric_html = rubric_data['html'] else: @@ -596,24 +575,24 @@ class CombinedOpenEndedV1Module(): context = { 'rubric_html': rubric_html, 'grader_type': ri['grader_type'], - 'feedback' : feedback, - 'grader_id' : ri['grader_ids'][i], - 'submission_id' : ri['submission_ids'][i], + 'feedback': feedback, + 'grader_id': ri['grader_ids'][i], + 'submission_id': ri['submission_ids'][i], } context_list.append(context) - feedback_table = self.system.render_template('open_ended_result_table.html', { - 'context_list' : context_list, - 'grader_type_image_dict' : GRADER_TYPE_IMAGE_DICT, - 'human_grader_types' : HUMAN_GRADER_TYPE, + feedback_table = self.system.render_template('{0}/open_ended_result_table.html'.format(self.TEMPLATE_DIR), { + 'context_list': context_list, + 'grader_type_image_dict': GRADER_TYPE_IMAGE_DICT, + 'human_grader_types': HUMAN_GRADER_TYPE, 'rows': 50, 'cols': 50, }) context = { 'results': feedback_table, - 'task_name' : "Feedback", - 'class_name' : "result-container", - } - html = self.system.render_template('combined_open_ended_results.html', context) + 'task_name': "Feedback", + 'class_name': "result-container", + } + html = self.system.render_template('{0}/combined_open_ended_results.html'.format(self.TEMPLATE_DIR), context) return {'html': html, 'success': True} def get_status_ajax(self, get): @@ -641,8 +620,8 @@ class CombinedOpenEndedV1Module(): 'reset': self.reset, 'get_results': self.get_results, 'get_combined_rubric': self.get_rubric, - 'get_status' : self.get_status_ajax, - 'get_legend' : self.get_legend, + 'get_status': self.get_status_ajax, + 'get_legend': self.get_legend, } if dispatch not in handlers: @@ -659,7 +638,7 @@ class CombinedOpenEndedV1Module(): Output: Dictionary to be rendered """ self.update_task_states() - return {'success': True, 'html': self.get_html_nonsystem(), 'allow_reset': self.allow_reset} + return {'success': True, 'html': self.get_html_nonsystem(), 'allow_reset': self.ready_to_reset} def reset(self, get): """ @@ -668,23 +647,26 @@ class CombinedOpenEndedV1Module(): Output: AJAX dictionary to tbe rendered """ if self.state != self.DONE: - if not self.allow_reset: + if not self.ready_to_reset: return self.out_of_sync_error(get) - if self.attempts > self.max_attempts: + if self.student_attempts > self.attempts: return { 'success': False, - 'error': 'Too many attempts.' + #This is a student_facing_error + 'error': ('You have attempted this question {0} times. ' + 'You are only allowed to attempt it {1} times.').format( + self.student_attempts, self.attempts) } self.state = self.INITIAL - self.allow_reset = False + self.ready_to_reset = False for i in xrange(0, len(self.task_xml)): self.current_task_number = i self.setup_next_task(reset=True) self.current_task.reset(self.system) self.task_states[self.current_task_number] = self.current_task.get_instance_state() self.current_task_number = 0 - self.allow_reset = False + self.ready_to_reset = False self.setup_next_task() return {'success': True, 'html': self.get_html_nonsystem()} @@ -700,9 +682,9 @@ class CombinedOpenEndedV1Module(): 'current_task_number': self.current_task_number, 'state': self.state, 'task_states': self.task_states, - 'attempts': self.attempts, - 'ready_to_reset': self.allow_reset, - } + 'student_attempts': self.student_attempts, + 'ready_to_reset': self.ready_to_reset, + } return json.dumps(state) @@ -720,11 +702,12 @@ class CombinedOpenEndedV1Module(): context = { 'status_list': status, - 'grader_type_image_dict' : GRADER_TYPE_IMAGE_DICT, - 'legend_list' : LEGEND_LIST, - 'render_via_ajax' : render_via_ajax, + 'grader_type_image_dict': GRADER_TYPE_IMAGE_DICT, + 'legend_list': LEGEND_LIST, + 'render_via_ajax': render_via_ajax, } - status_html = self.system.render_template("combined_open_ended_status.html", context) + status_html = self.system.render_template("{0}/combined_open_ended_status.html".format(self.TEMPLATE_DIR), + context) return status_html @@ -735,7 +718,7 @@ class CombinedOpenEndedV1Module(): entirely, in which case they will be in the self.DONE state), and if it is scored or not. @return: Boolean corresponding to the above. """ - return (self.state == self.DONE or self.allow_reset) and self.is_scored + return (self.state == self.DONE or self.ready_to_reset) and self.is_scored def get_score(self): """ @@ -757,7 +740,7 @@ class CombinedOpenEndedV1Module(): score_dict = { 'score': score, 'total': max_score, - } + } return score_dict @@ -786,7 +769,7 @@ class CombinedOpenEndedV1Module(): return progress_object -class CombinedOpenEndedV1Descriptor(XmlDescriptor, EditingDescriptor): +class CombinedOpenEndedV1Descriptor(): """ Module for adding combined open ended questions """ @@ -798,8 +781,8 @@ class CombinedOpenEndedV1Descriptor(XmlDescriptor, EditingDescriptor): has_score = True template_dir_name = "combinedopenended" - js = {'coffee': [resource_string(__name__, 'js/src/html/edit.coffee')]} - js_module_name = "HTMLEditingDescriptor" + def __init__(self, system): + self.system =system @classmethod def definition_from_xml(cls, xml_object, system): @@ -816,7 +799,10 @@ class CombinedOpenEndedV1Descriptor(XmlDescriptor, EditingDescriptor): expected_children = ['task', 'rubric', 'prompt'] for child in expected_children: if len(xml_object.xpath(child)) == 0: - raise ValueError("Combined Open Ended definition must include at least one '{0}' tag".format(child)) + #This is a staff_facing_error + raise ValueError( + "Combined Open Ended definition must include at least one '{0}' tag. Contact the learning sciences group for assistance.".format( + child)) def parse_task(k): """Assumes that xml_object has child k""" @@ -841,4 +827,4 @@ class CombinedOpenEndedV1Descriptor(XmlDescriptor, EditingDescriptor): for child in ['task']: add_child(child) - return elt \ No newline at end of file + return elt diff --git a/common/lib/xmodule/xmodule/combined_open_ended_rubric.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_rubric.py similarity index 56% rename from common/lib/xmodule/xmodule/combined_open_ended_rubric.py rename to common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_rubric.py index 7c00c5f029..bceb12e444 100644 --- a/common/lib/xmodule/xmodule/combined_open_ended_rubric.py +++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_rubric.py @@ -4,26 +4,26 @@ from lxml import etree log = logging.getLogger(__name__) GRADER_TYPE_IMAGE_DICT = { - '8B' : '/static/images/random_grading_icon.png', - 'SA' : '/static/images/self_assessment_icon.png', - 'PE' : '/static/images/peer_grading_icon.png', - 'ML' : '/static/images/ml_grading_icon.png', - 'IN' : '/static/images/peer_grading_icon.png', - 'BC' : '/static/images/ml_grading_icon.png', - } + 'SA': '/static/images/self_assessment_icon.png', + 'PE': '/static/images/peer_grading_icon.png', + 'ML': '/static/images/ml_grading_icon.png', + 'IN': '/static/images/peer_grading_icon.png', + 'BC': '/static/images/ml_grading_icon.png', +} HUMAN_GRADER_TYPE = { - '8B' : 'Magic-8-Ball-Assessment', - 'SA' : 'Self-Assessment', - 'PE' : 'Peer-Assessment', - 'IN' : 'Instructor-Assessment', - 'ML' : 'AI-Assessment', - 'BC' : 'AI-Assessment', - } + 'SA': 'Self-Assessment', + 'PE': 'Peer-Assessment', + 'IN': 'Instructor-Assessment', + 'ML': 'AI-Assessment', + 'BC': 'AI-Assessment', +} DO_NOT_DISPLAY = ['BC', 'IN'] -LEGEND_LIST = [{'name' : HUMAN_GRADER_TYPE[k], 'image' : GRADER_TYPE_IMAGE_DICT[k]} for k in GRADER_TYPE_IMAGE_DICT.keys() if k not in DO_NOT_DISPLAY ] +LEGEND_LIST = [{'name': HUMAN_GRADER_TYPE[k], 'image': GRADER_TYPE_IMAGE_DICT[k]} for k in GRADER_TYPE_IMAGE_DICT.keys() + if k not in DO_NOT_DISPLAY] + class RubricParsingError(Exception): def __init__(self, msg): @@ -31,13 +31,14 @@ class RubricParsingError(Exception): class CombinedOpenEndedRubric(object): + TEMPLATE_DIR = "combinedopenended/openended" - def __init__ (self, system, view_only = False): + def __init__(self, system, view_only=False): self.has_score = False self.view_only = view_only self.system = system - def render_rubric(self, rubric_xml, score_list = None): + def render_rubric(self, rubric_xml, score_list=None): ''' render_rubric: takes in an xml string and outputs the corresponding html for that xml, given the type of rubric we're generating @@ -50,38 +51,42 @@ class CombinedOpenEndedRubric(object): success = False try: rubric_categories = self.extract_categories(rubric_xml) - if score_list and len(score_list)==len(rubric_categories): - for i in xrange(0,len(rubric_categories)): + if score_list and len(score_list) == len(rubric_categories): + for i in xrange(0, len(rubric_categories)): category = rubric_categories[i] - for j in xrange(0,len(category['options'])): - if score_list[i]==j: + for j in xrange(0, len(category['options'])): + if score_list[i] == j: rubric_categories[i]['options'][j]['selected'] = True rubric_scores = [cat['score'] for cat in rubric_categories] max_scores = map((lambda cat: cat['options'][-1]['points']), rubric_categories) max_score = max(max_scores) - rubric_template = 'open_ended_rubric.html' + rubric_template = '{0}/open_ended_rubric.html'.format(self.TEMPLATE_DIR) if self.view_only: - rubric_template = 'open_ended_view_only_rubric.html' + rubric_template = '{0}/open_ended_view_only_rubric.html'.format(self.TEMPLATE_DIR) html = self.system.render_template(rubric_template, - {'categories': rubric_categories, - 'has_score': self.has_score, - 'view_only': self.view_only, - 'max_score': max_score, - 'combined_rubric' : False - }) + {'categories': rubric_categories, + 'has_score': self.has_score, + 'view_only': self.view_only, + 'max_score': max_score, + 'combined_rubric': False + }) success = True except: - error_message = "[render_rubric] Could not parse the rubric with xml: {0}".format(rubric_xml) - log.error(error_message) + #This is a staff_facing_error + error_message = "[render_rubric] Could not parse the rubric with xml: {0}. Contact the learning sciences group for assistance.".format( + rubric_xml) + log.exception(error_message) raise RubricParsingError(error_message) - return {'success' : success, 'html' : html, 'rubric_scores' : rubric_scores} + return {'success': success, 'html': html, 'rubric_scores': rubric_scores} def check_if_rubric_is_parseable(self, rubric_string, location, max_score_allowed, max_score): rubric_dict = self.render_rubric(rubric_string) success = rubric_dict['success'] rubric_feedback = rubric_dict['html'] if not success: - error_message = "Could not parse rubric : {0} for location {1}".format(rubric_string, location.url()) + #This is a staff_facing_error + error_message = "Could not parse rubric : {0} for location {1}. Contact the learning sciences group for assistance.".format( + rubric_string, location.url()) log.error(error_message) raise RubricParsingError(error_message) @@ -90,14 +95,16 @@ class CombinedOpenEndedRubric(object): for category in rubric_categories: total = total + len(category['options']) - 1 if len(category['options']) > (max_score_allowed + 1): - error_message = "Number of score points in rubric {0} higher than the max allowed, which is {1}".format( + #This is a staff_facing_error + error_message = "Number of score points in rubric {0} higher than the max allowed, which is {1}. Contact the learning sciences group for assistance.".format( len(category['options']), max_score_allowed) log.error(error_message) raise RubricParsingError(error_message) - if total != max_score: - error_msg = "The max score {0} for problem {1} does not match the total number of points in the rubric {2}".format( - max_score, location, total) + if int(total) != int(max_score): + #This is a staff_facing_error + error_msg = "The max score {0} for problem {1} does not match the total number of points in the rubric {2}. Contact the learning sciences group for assistance.".format( + max_score, location, total) log.error(error_msg) raise RubricParsingError(error_msg) @@ -118,12 +125,14 @@ class CombinedOpenEndedRubric(object): categories = [] for category in element: if category.tag != 'category': - raise RubricParsingError("[extract_categories] Expected a tag: got {0} instead".format(category.tag)) + #This is a staff_facing_error + raise RubricParsingError( + "[extract_categories] Expected a tag: got {0} instead. Contact the learning sciences group for assistance.".format( + category.tag)) else: categories.append(self.extract_category(category)) return categories - def extract_category(self, category): ''' construct an individual category @@ -144,12 +153,18 @@ class CombinedOpenEndedRubric(object): self.has_score = True # if we are missing the score tag and we are expecting one elif self.has_score: - raise RubricParsingError("[extract_category] Category {0} is missing a score".format(descriptionxml.text)) + #This is a staff_facing_error + raise RubricParsingError( + "[extract_category] Category {0} is missing a score. Contact the learning sciences group for assistance.".format( + descriptionxml.text)) # parse description if descriptionxml.tag != 'description': - raise RubricParsingError("[extract_category]: expected description tag, got {0} instead".format(descriptionxml.tag)) + #This is a staff_facing_error + raise RubricParsingError( + "[extract_category]: expected description tag, got {0} instead. Contact the learning sciences group for assistance.".format( + descriptionxml.tag)) description = descriptionxml.text @@ -159,7 +174,10 @@ class CombinedOpenEndedRubric(object): # parse options for option in optionsxml: if option.tag != 'option': - raise RubricParsingError("[extract_category]: expected option tag, got {0} instead".format(option.tag)) + #This is a staff_facing_error + raise RubricParsingError( + "[extract_category]: expected option tag, got {0} instead. Contact the learning sciences group for assistance.".format( + option.tag)) else: pointstr = option.get("points") if pointstr: @@ -168,13 +186,17 @@ class CombinedOpenEndedRubric(object): try: points = int(pointstr) except ValueError: - raise RubricParsingError("[extract_category]: expected points to have int, got {0} instead".format(pointstr)) + #This is a staff_facing_error + raise RubricParsingError( + "[extract_category]: expected points to have int, got {0} instead. Contact the learning sciences group for assistance.".format( + pointstr)) elif autonumbering: # use the generated one if we're in the right mode points = cur_points cur_points = cur_points + 1 else: - raise Exception("[extract_category]: missing points attribute. Cannot continue to auto-create points values after a points value is explicitly defined.") + raise Exception( + "[extract_category]: missing points attribute. Cannot continue to auto-create points values after a points value is explicitly defined.") selected = score == points optiontext = option.text @@ -184,48 +206,51 @@ class CombinedOpenEndedRubric(object): options = sorted(options, key=lambda option: option['points']) CombinedOpenEndedRubric.validate_options(options) - return {'description': description, 'options': options, 'score' : score} + return {'description': description, 'options': options, 'score': score} - def render_combined_rubric(self,rubric_xml,scores,score_types,feedback_types): - success, score_tuples = CombinedOpenEndedRubric.reformat_scores_for_rendering(scores,score_types,feedback_types) + def render_combined_rubric(self, rubric_xml, scores, score_types, feedback_types): + success, score_tuples = CombinedOpenEndedRubric.reformat_scores_for_rendering(scores, score_types, + feedback_types) rubric_categories = self.extract_categories(rubric_xml) max_scores = map((lambda cat: cat['options'][-1]['points']), rubric_categories) max_score = max(max_scores) - for i in xrange(0,len(rubric_categories)): + for i in xrange(0, len(rubric_categories)): category = rubric_categories[i] - for j in xrange(0,len(category['options'])): + for j in xrange(0, len(category['options'])): rubric_categories[i]['options'][j]['grader_types'] = [] for tuple in score_tuples: - if tuple[1] == i and tuple[2] ==j: + if tuple[1] == i and tuple[2] == j: for grader_type in tuple[3]: rubric_categories[i]['options'][j]['grader_types'].append(grader_type) - log.debug(rubric_categories) - html = self.system.render_template('open_ended_combined_rubric.html', - {'categories': rubric_categories, - 'has_score': True, - 'view_only': True, - 'max_score': max_score, - 'combined_rubric' : True, - 'grader_type_image_dict' : GRADER_TYPE_IMAGE_DICT, - 'human_grader_types' : HUMAN_GRADER_TYPE, - }) + html = self.system.render_template('{0}/open_ended_combined_rubric.html'.format(self.TEMPLATE_DIR), + {'categories': rubric_categories, + 'has_score': True, + 'view_only': True, + 'max_score': max_score, + 'combined_rubric': True, + 'grader_type_image_dict': GRADER_TYPE_IMAGE_DICT, + 'human_grader_types': HUMAN_GRADER_TYPE, + }) return html - @staticmethod def validate_options(options): ''' Validates a set of options. This can and should be extended to filter out other bad edge cases ''' if len(options) == 0: - raise RubricParsingError("[extract_category]: no options associated with this category") + #This is a staff_facing_error + raise RubricParsingError( + "[extract_category]: no options associated with this category. Contact the learning sciences group for assistance.") if len(options) == 1: return prev = options[0]['points'] for option in options[1:]: if prev == option['points']: - raise RubricParsingError("[extract_category]: found duplicate point values between two different options") + #This is a staff_facing_error + raise RubricParsingError( + "[extract_category]: found duplicate point values between two different options. Contact the learning sciences group for assistance.") else: prev = option['points'] @@ -240,36 +265,39 @@ class CombinedOpenEndedRubric(object): @return: """ success = False - if len(scores)==0: - log.error("Score length is 0.") + if len(scores) == 0: + #This is a dev_facing_error + log.error("Score length is 0 when trying to reformat rubric scores for rendering.") return success, "" if len(scores) != len(score_types) or len(feedback_types) != len(scores): - log.error("Length mismatches.") + #This is a dev_facing_error + log.error("Length mismatches when trying to reformat rubric scores for rendering. " + "Scores: {0}, Score Types: {1} Feedback Types: {2}".format(scores, score_types, feedback_types)) return success, "" score_lists = [] score_type_list = [] feedback_type_list = [] - for i in xrange(0,len(scores)): + for i in xrange(0, len(scores)): score_cont_list = scores[i] - for j in xrange(0,len(score_cont_list)): + for j in xrange(0, len(score_cont_list)): score_list = score_cont_list[j] score_lists.append(score_list) score_type_list.append(score_types[i][j]) feedback_type_list.append(feedback_types[i][j]) score_list_len = len(score_lists[0]) - for i in xrange(0,len(score_lists)): + for i in xrange(0, len(score_lists)): score_list = score_lists[i] - if len(score_list)!=score_list_len: + if len(score_list) != score_list_len: return success, "" score_tuples = [] - for i in xrange(0,len(score_lists)): - for j in xrange(0,len(score_lists[i])): - tuple = [1,j,score_lists[i][j],[],[]] - score_tuples, tup_ind = CombinedOpenEndedRubric.check_for_tuple_matches(score_tuples,tuple) + for i in xrange(0, len(score_lists)): + for j in xrange(0, len(score_lists[i])): + tuple = [1, j, score_lists[i][j], [], []] + score_tuples, tup_ind = CombinedOpenEndedRubric.check_for_tuple_matches(score_tuples, tuple) score_tuples[tup_ind][0] += 1 score_tuples[tup_ind][3].append(score_type_list[i]) score_tuples[tup_ind][4].append(feedback_type_list[i]) @@ -289,18 +317,12 @@ class CombinedOpenEndedRubric(object): category = tuple[1] score = tuple[2] tup_ind = -1 - for t in xrange(0,len(tuples)): + for t in xrange(0, len(tuples)): if tuples[t][1] == category and tuples[t][2] == score: tup_ind = t break if tup_ind == -1: - tuples.append([0,category,score,[],[]]) - tup_ind = len(tuples)-1 + tuples.append([0, category, score, [], []]) + tup_ind = len(tuples) - 1 return tuples, tup_ind - - - - - - diff --git a/lms/djangoapps/open_ended_grading/controller_query_service.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/controller_query_service.py similarity index 76% rename from lms/djangoapps/open_ended_grading/controller_query_service.py rename to common/lib/xmodule/xmodule/open_ended_grading_classes/controller_query_service.py index 83d5617bd2..08f2a95387 100644 --- a/lms/djangoapps/open_ended_grading/controller_query_service.py +++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/controller_query_service.py @@ -1,14 +1,5 @@ -import json import logging -import requests -from requests.exceptions import RequestException, ConnectionError, HTTPError -import sys -from xmodule.grading_service_module import GradingService, GradingServiceError - -from django.conf import settings -from django.http import HttpResponse, Http404 -from xmodule.x_module import ModuleSystem -from mitxmako.shortcuts import render_to_string +from .grading_service_module import GradingService log = logging.getLogger(__name__) @@ -17,9 +8,12 @@ class ControllerQueryService(GradingService): """ Interface to staff grading backend. """ - def __init__(self, config): - config['system'] = ModuleSystem(None, None, None, render_to_string, None) + + def __init__(self, config, system): + config['system'] = system super(ControllerQueryService, self).__init__(config) + self.url = config['url'] + config['grading_controller'] + self.login_url = self.url + '/login/' self.check_eta_url = self.url + '/get_submission_eta/' self.is_unique_url = self.url + '/is_name_unique/' self.combined_notifications_url = self.url + '/combined_notifications/' @@ -66,7 +60,7 @@ class ControllerQueryService(GradingService): def get_flagged_problem_list(self, course_id): params = { 'course_id': course_id, - } + } response = self.get(self.flagged_problem_list_url, params) return response @@ -77,7 +71,21 @@ class ControllerQueryService(GradingService): 'student_id': student_id, 'submission_id': submission_id, 'action_type': action_type - } + } response = self.post(self.take_action_on_flags_url, params) return response + + +def convert_seconds_to_human_readable(seconds): + if seconds < 60: + human_string = "{0} seconds".format(seconds) + elif seconds < 60 * 60: + human_string = "{0} minutes".format(round(seconds / 60, 1)) + elif seconds < (24 * 60 * 60): + human_string = "{0} hours".format(round(seconds / (60 * 60), 1)) + else: + human_string = "{0} days".format(round(seconds / (60 * 60 * 24), 1)) + + eta_string = "{0}".format(human_string) + return eta_string diff --git a/common/lib/xmodule/xmodule/grading_service_module.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/grading_service_module.py similarity index 83% rename from common/lib/xmodule/xmodule/grading_service_module.py rename to common/lib/xmodule/xmodule/open_ended_grading_classes/grading_service_module.py index 9af28a72c5..f3f6568b1e 100644 --- a/common/lib/xmodule/xmodule/grading_service_module.py +++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/grading_service_module.py @@ -5,7 +5,7 @@ import requests from requests.exceptions import RequestException, ConnectionError, HTTPError import sys -from xmodule.combined_open_ended_rubric import CombinedOpenEndedRubric, RubricParsingError +from .combined_open_ended_rubric import CombinedOpenEndedRubric from lxml import etree log = logging.getLogger(__name__) @@ -19,11 +19,10 @@ class GradingService(object): """ Interface to staff grading backend. """ + def __init__(self, config): self.username = config['username'] self.password = config['password'] - self.url = config['url'] - self.login_url = self.url + '/login/' self.session = requests.session() self.system = config['system'] @@ -36,8 +35,8 @@ class GradingService(object): Returns the decoded json dict of the response. """ response = self.session.post(self.login_url, - {'username': self.username, - 'password': self.password, }) + {'username': self.username, + 'password': self.password, }) response.raise_for_status() @@ -49,10 +48,12 @@ class GradingService(object): """ try: op = lambda: self.session.post(url, data=data, - allow_redirects=allow_redirects) + allow_redirects=allow_redirects) r = self._try_with_login(op) except (RequestException, ConnectionError, HTTPError) as err: # reraise as promised GradingServiceError, but preserve stacktrace. + #This is a dev_facing_error + log.error("Problem posting data to the grading controller. URL: {0}, data: {1}".format(url, data)) raise GradingServiceError, str(err), sys.exc_info()[2] return r.text @@ -63,12 +64,14 @@ class GradingService(object): """ log.debug(params) op = lambda: self.session.get(url, - allow_redirects=allow_redirects, - params=params) + allow_redirects=allow_redirects, + params=params) try: r = self._try_with_login(op) except (RequestException, ConnectionError, HTTPError) as err: # reraise as promised GradingServiceError, but preserve stacktrace. + #This is a dev_facing_error + log.error("Problem getting data from the grading controller. URL: {0}, params: {1}".format(url, params)) raise GradingServiceError, str(err), sys.exc_info()[2] return r.text @@ -90,7 +93,7 @@ class GradingService(object): r = self._login() if r and not r.get('success'): log.warning("Couldn't log into staff_grading backend. Response: %s", - r) + r) # try again response = operation() response.raise_for_status() @@ -121,11 +124,13 @@ class GradingService(object): return response_json # if we can't parse the rubric into HTML, except etree.XMLSyntaxError, RubricParsingError: + #This is a dev_facing_error log.exception("Cannot parse rubric string. Raw string: {0}" .format(rubric)) return {'success': False, 'error': 'Error displaying submission'} except ValueError: + #This is a dev_facing_error log.exception("Error parsing response: {0}".format(response)) return {'success': False, 'error': "Error displaying submission"} diff --git a/common/lib/xmodule/xmodule/open_ended_image_submission.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/open_ended_image_submission.py similarity index 91% rename from common/lib/xmodule/xmodule/open_ended_image_submission.py rename to common/lib/xmodule/xmodule/open_ended_grading_classes/open_ended_image_submission.py index 66500146ed..6956f336a5 100644 --- a/common/lib/xmodule/xmodule/open_ended_image_submission.py +++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/open_ended_image_submission.py @@ -5,6 +5,7 @@ to send them to S3. try: from PIL import Image + ENABLE_PIL = True except: ENABLE_PIL = False @@ -13,11 +14,6 @@ from urlparse import urlparse import requests from boto.s3.connection import S3Connection from boto.s3.key import Key -#TODO: Settings import is needed now in order to specify the URL and keys for amazon s3 (to upload images). -#Eventually, the goal is to replace the global django settings import with settings specifically -#for this module. There is no easy way to do this now, so piggybacking on the django settings -#makes sense. -from django.conf import settings import pickle import logging import re @@ -56,6 +52,7 @@ class ImageProperties(object): """ Class to check properties of an image and to validate if they are allowed. """ + def __init__(self, image_data): """ Initializes class variables @@ -97,7 +94,7 @@ class ImageProperties(object): g = rgb[1] b = rgb[2] check_r = (r > 60) - check_g = (r * 0.4) < g < (r * 0.85) + check_g = (r * 0.4) < g < (r * 0.85) check_b = (r * 0.2) < b < (r * 0.7) colors_okay = check_r and check_b and check_g except: @@ -146,6 +143,7 @@ class URLProperties(object): Checks to see if a URL points to acceptable content. Added to check if students are submitting reasonable links to the peer grading image functionality of the external grading service. """ + def __init__(self, url_string): self.url_string = url_string @@ -217,11 +215,11 @@ def run_image_tests(image): success = image_properties.run_tests() except: log.exception("Cannot run image tests in combined open ended xmodule. May be an issue with a particular image," - "or an issue with the deployment configuration of PIL/Pillow") + "or an issue with the deployment configuration of PIL/Pillow") return success -def upload_to_s3(file_to_upload, keyname): +def upload_to_s3(file_to_upload, keyname, s3_interface): ''' Upload file to S3 using provided keyname. @@ -237,8 +235,8 @@ def upload_to_s3(file_to_upload, keyname): #im.save(out_im, 'PNG') try: - conn = S3Connection(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY) - bucketname = str(settings.AWS_STORAGE_BUCKET_NAME) + conn = S3Connection(s3_interface['access_key'], s3_interface['secret_access_key']) + bucketname = str(s3_interface['storage_bucket_name']) bucket = conn.create_bucket(bucketname.lower()) k = Key(bucket) @@ -256,8 +254,10 @@ def upload_to_s3(file_to_upload, keyname): return True, public_url except: - error_message = "Could not connect to S3." - log.exception(error_message) + #This is a dev_facing_error + error_message = "Could not connect to S3 to upload peer grading image. Trying to utilize bucket: {0}".format( + bucketname.lower()) + log.error(error_message) return False, error_message diff --git a/common/lib/xmodule/xmodule/open_ended_module.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/open_ended_module.py similarity index 79% rename from common/lib/xmodule/xmodule/open_ended_module.py rename to common/lib/xmodule/xmodule/open_ended_grading_classes/open_ended_module.py index 98260f3401..1f84d2ab8c 100644 --- a/common/lib/xmodule/xmodule/open_ended_module.py +++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/open_ended_module.py @@ -5,28 +5,16 @@ hints, answers, and assessment judgment (currently only correct/incorrect). Parses xml definition file--see below for exact format. """ -import copy -from fs.errors import ResourceNotFoundError -import itertools import json import logging from lxml import etree -from lxml.html import rewrite_links -from path import path -import os -import sys -import hashlib import capa.xqueue_interface as xqueue_interface -from pkg_resources import resource_string - -from .capa_module import only_one, ComplexEncoder -from .editing_module import EditingDescriptor -from .html_checker import check_html -from progress import Progress -from .stringify import stringify_children -from .xml_module import XmlDescriptor -from xmodule.modulestore import Location +from xmodule.capa_module import ComplexEncoder +from xmodule.editing_module import EditingDescriptor +from xmodule.progress import Progress +from xmodule.stringify import stringify_children +from xmodule.xml_module import XmlDescriptor from capa.util import * import openendedchild @@ -34,7 +22,7 @@ from numpy import median from datetime import datetime -from combined_open_ended_rubric import CombinedOpenEndedRubric +from .combined_open_ended_rubric import CombinedOpenEndedRubric log = logging.getLogger("mitx.courseware") @@ -52,6 +40,8 @@ class OpenEndedModule(openendedchild.OpenEndedChild): """ + TEMPLATE_DIR = "combinedopenended/openended" + def setup_response(self, system, location, definition, descriptor): """ Sets up the response type. @@ -71,19 +61,21 @@ class OpenEndedModule(openendedchild.OpenEndedChild): self.submission_id = None self.grader_id = None + error_message = "No {0} found in problem xml for open ended problem. Contact the learning sciences group for assistance." if oeparam is None: - raise ValueError("No oeparam found in problem xml.") - if self.prompt is None: - raise ValueError("No prompt found in problem xml.") - if self.rubric is None: - raise ValueError("No rubric found in problem xml.") + #This is a staff_facing_error + raise ValueError(error_message.format('oeparam')) + if self.child_prompt is None: + raise ValueError(error_message.format('prompt')) + if self.child_rubric is None: + raise ValueError(error_message.format('rubric')) - self._parse(oeparam, self.prompt, self.rubric, system) + self._parse(oeparam, self.child_prompt, self.child_rubric, system) - if self.created == True and self.state == self.ASSESSING: - self.created = False + if self.child_created == True and self.child_state == self.ASSESSING: + self.child_created = False self.send_to_grader(self.latest_answer(), system) - self.created = False + self.child_created = False def _parse(self, oeparam, prompt, rubric, system): ''' @@ -97,8 +89,8 @@ class OpenEndedModule(openendedchild.OpenEndedChild): # Note that OpenEndedResponse is agnostic to the specific contents of grader_payload prompt_string = stringify_children(prompt) rubric_string = stringify_children(rubric) - self.prompt = prompt_string - self.rubric = rubric_string + self.child_prompt = prompt_string + self.child_rubric = rubric_string grader_payload = oeparam.find('grader_payload') grader_payload = grader_payload.text if grader_payload is not None else '' @@ -110,19 +102,23 @@ class OpenEndedModule(openendedchild.OpenEndedChild): # __init__ adds it (easiest way to get problem location into # response types) except TypeError, ValueError: - log.exception("Grader payload %r is not a json object!", grader_payload) + #This is a dev_facing_error + log.exception( + "Grader payload from external open ended grading server is not a json object! Object: {0}".format( + grader_payload)) self.initial_display = find_with_default(oeparam, 'initial_display', '') self.answer = find_with_default(oeparam, 'answer_display', 'No answer given.') parsed_grader_payload.update({ - 'location': system.location.url(), + 'location': self.location_string, 'course_id': system.course_id, 'prompt': prompt_string, 'rubric': rubric_string, 'initial_display': self.initial_display, 'answer': self.answer, - 'problem_id': self.display_name + 'problem_id': self.display_name, + 'skip_basic_checks': self.skip_basic_checks, }) updated_grader_payload = json.dumps(parsed_grader_payload) @@ -135,7 +131,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): @param system: ModuleSystem @return: Success indicator """ - self.state = self.DONE + self.child_state = self.DONE return {'success': True} def message_post(self, get, system): @@ -145,24 +141,29 @@ class OpenEndedModule(openendedchild.OpenEndedChild): """ event_info = dict() - event_info['problem_id'] = system.location.url() + event_info['problem_id'] = self.location_string event_info['student_id'] = system.anonymous_student_id event_info['survey_responses'] = get survey_responses = event_info['survey_responses'] for tag in ['feedback', 'submission_id', 'grader_id', 'score']: if tag not in survey_responses: - return {'success': False, 'msg': "Could not find needed tag {0}".format(tag)} + #This is a student_facing_error + return {'success': False, + 'msg': "Could not find needed tag {0} in the survey responses. Please try submitting again.".format( + tag)} try: submission_id = int(survey_responses['submission_id']) grader_id = int(survey_responses['grader_id']) feedback = str(survey_responses['feedback'].encode('ascii', 'ignore')) score = int(survey_responses['score']) except: + #This is a dev_facing_error error_message = ("Could not parse submission id, grader id, " "or feedback from message_post ajax call. Here is the message data: {0}".format( survey_responses)) log.exception(error_message) + #This is a student_facing_error return {'success': False, 'msg': "There was an error saving your feedback. Please contact course staff."} qinterface = system.xqueue['interface'] @@ -170,7 +171,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): anonymous_student_id = system.anonymous_student_id queuekey = xqueue_interface.make_hashkey(str(system.seed) + qtime + anonymous_student_id + - str(len(self.history))) + str(len(self.child_history))) xheader = xqueue_interface.make_xheader( lms_callback_url=system.xqueue['callback_url'], @@ -190,15 +191,16 @@ class OpenEndedModule(openendedchild.OpenEndedChild): } (error, msg) = qinterface.send_to_queue(header=xheader, - body=json.dumps(contents)) + body=json.dumps(contents)) #Convert error to a success value success = True if error: success = False - self.state = self.DONE + self.child_state = self.DONE + #This is a student_facing_message return {'success': success, 'msg': "Successfully submitted your feedback."} def send_to_grader(self, submission, system): @@ -220,11 +222,11 @@ class OpenEndedModule(openendedchild.OpenEndedChild): # Generate header queuekey = xqueue_interface.make_hashkey(str(system.seed) + qtime + anonymous_student_id + - str(len(self.history))) + str(len(self.child_history))) xheader = xqueue_interface.make_xheader(lms_callback_url=system.xqueue['callback_url'], - lms_key=queuekey, - queue_name=self.queue_name) + lms_key=queuekey, + queue_name=self.queue_name) contents = self.payload.copy() @@ -242,7 +244,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): # Submit request. When successful, 'msg' is the prior length of the queue (error, msg) = qinterface.send_to_queue(header=xheader, - body=json.dumps(contents)) + body=json.dumps(contents)) # State associated with the queueing request queuestate = {'key': queuekey, @@ -263,11 +265,10 @@ class OpenEndedModule(openendedchild.OpenEndedChild): self.record_latest_score(new_score_msg['score']) self.record_latest_post_assessment(score_msg) - self.state = self.POST_ASSESSMENT + self.child_state = self.POST_ASSESSMENT return True - def get_answers(self): """ Gets and shows the answer for this problem. @@ -301,7 +302,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): # We want to display available feedback in a particular order. # This dictionary specifies which goes first--lower first. - priorities = { # These go at the start of the feedback + priorities = {# These go at the start of the feedback 'spelling': 0, 'grammar': 1, # needs to be after all the other feedback @@ -348,18 +349,22 @@ class OpenEndedModule(openendedchild.OpenEndedChild): for tag in ['success', 'feedback', 'submission_id', 'grader_id']: if tag not in response_items: - return format_feedback('errors', 'Error getting feedback') + #This is a student_facing_error + return format_feedback('errors', 'Error getting feedback from grader.') feedback_items = response_items['feedback'] try: feedback = json.loads(feedback_items) except (TypeError, ValueError): - log.exception("feedback_items have invalid json %r", feedback_items) - return format_feedback('errors', 'Could not parse feedback') + #This is a dev_facing_error + log.exception("feedback_items from external open ended grader have invalid json {0}".format(feedback_items)) + #This is a student_facing_error + return format_feedback('errors', 'Error getting feedback from grader.') if response_items['success']: if len(feedback) == 0: - return format_feedback('errors', 'No feedback available') + #This is a student_facing_error + return format_feedback('errors', 'No feedback available from grader.') for tag in do_not_render: if tag in feedback: @@ -368,6 +373,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): feedback_lst = sorted(feedback.items(), key=get_priority) feedback_list_part1 = u"\n".join(format_feedback(k, v) for k, v in feedback_lst) else: + #This is a student_facing_error feedback_list_part1 = format_feedback('errors', response_items['feedback']) feedback_list_part2 = (u"\n".join([format_feedback_hidden(feedback_type, value) @@ -395,10 +401,10 @@ class OpenEndedModule(openendedchild.OpenEndedChild): rubric_scores = rubric_dict['rubric_scores'] if not response_items['success']: - return system.render_template("open_ended_error.html", - {'errors': feedback}) + return system.render_template("{0}/open_ended_error.html".format(self.TEMPLATE_DIR), + {'errors': feedback}) - feedback_template = system.render_template("open_ended_feedback.html", { + feedback_template = system.render_template("{0}/open_ended_feedback.html".format(self.TEMPLATE_DIR), { 'grader_type': response_items['grader_type'], 'score': "{0} / {1}".format(response_items['score'], self.max_score()), 'feedback': feedback, @@ -407,7 +413,6 @@ class OpenEndedModule(openendedchild.OpenEndedChild): return feedback_template, rubric_scores - def _parse_score_msg(self, score_msg, system, join_feedback=True): """ Grader reply is a JSON-dump of the following dict @@ -433,24 +438,26 @@ class OpenEndedModule(openendedchild.OpenEndedChild): 'valid': False, 'score': 0, 'feedback': '', - 'rubric_scores' : [[0]], - 'grader_types' : [''], - 'feedback_items' : [''], - 'feedback_dicts' : [{}], - 'grader_ids' : [0], - 'submission_ids' : [0], - } + 'rubric_scores': [[0]], + 'grader_types': [''], + 'feedback_items': [''], + 'feedback_dicts': [{}], + 'grader_ids': [0], + 'submission_ids': [0], + } try: score_result = json.loads(score_msg) except (TypeError, ValueError): - error_message = ("External grader message should be a JSON-serialized dict." + #This is a dev_facing_error + error_message = ("External open ended grader message should be a JSON-serialized dict." " Received score_msg = {0}".format(score_msg)) log.error(error_message) fail['feedback'] = error_message return fail if not isinstance(score_result, dict): - error_message = ("External grader message should be a JSON-serialized dict." + #This is a dev_facing_error + error_message = ("External open ended grader message should be a JSON-serialized dict." " Received score_result = {0}".format(score_result)) log.error(error_message) fail['feedback'] = error_message @@ -458,12 +465,13 @@ class OpenEndedModule(openendedchild.OpenEndedChild): for tag in ['score', 'feedback', 'grader_type', 'success', 'grader_id', 'submission_id']: if tag not in score_result: - error_message = ("External grader message is missing required tag: {0}" + #This is a dev_facing_error + error_message = ("External open ended grader message is missing required tag: {0}" .format(tag)) log.error(error_message) fail['feedback'] = error_message return fail - #This is to support peer grading + #This is to support peer grading if isinstance(score_result['score'], list): feedback_items = [] rubric_scores = [] @@ -520,12 +528,12 @@ class OpenEndedModule(openendedchild.OpenEndedChild): 'valid': True, 'score': score, 'feedback': feedback, - 'rubric_scores' : rubric_scores, - 'grader_types' : grader_types, - 'feedback_items' : feedback_items, - 'feedback_dicts' : feedback_dicts, - 'grader_ids' : grader_ids, - 'submission_ids' : submission_ids, + 'rubric_scores': rubric_scores, + 'grader_types': grader_types, + 'feedback_items': feedback_items, + 'feedback_dicts': feedback_dicts, + 'grader_ids': grader_ids, + 'submission_ids': submission_ids, } def latest_post_assessment(self, system, short_feedback=False, join_feedback=True): @@ -534,16 +542,16 @@ class OpenEndedModule(openendedchild.OpenEndedChild): @param short_feedback: If the long feedback is wanted or not @return: Returns formatted feedback """ - if not self.history: + if not self.child_history: return "" - feedback_dict = self._parse_score_msg(self.history[-1].get('post_assessment', ""), system, - join_feedback=join_feedback) + feedback_dict = self._parse_score_msg(self.child_history[-1].get('post_assessment', ""), system, + join_feedback=join_feedback) if not short_feedback: return feedback_dict['feedback'] if feedback_dict['valid'] else '' if feedback_dict['valid']: short_feedback = self._convert_longform_feedback_to_html( - json.loads(self.history[-1].get('post_assessment', ""))) + json.loads(self.child_history[-1].get('post_assessment', ""))) return short_feedback if feedback_dict['valid'] else '' def format_feedback_with_evaluation(self, system, feedback): @@ -553,7 +561,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): @return: Rendered html """ context = {'msg': feedback, 'id': "1", 'rows': 50, 'cols': 50} - html = system.render_template('open_ended_evaluation.html', context) + html = system.render_template('{0}/open_ended_evaluation.html'.format(self.TEMPLATE_DIR), context) return html def handle_ajax(self, dispatch, get, system): @@ -575,7 +583,10 @@ class OpenEndedModule(openendedchild.OpenEndedChild): } if dispatch not in handlers: - return 'Error' + #This is a dev_facing_error + log.error("Cannot find {0} in handlers in handle_ajax function for open_ended_module.py".format(dispatch)) + #This is a dev_facing_error + return json.dumps({'error': 'Error handling action. Please try again.', 'success': False}) before = self.get_progress() d = handlers[dispatch](get, system) @@ -593,7 +604,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): @param system: Modulesystem (needed to align with other ajax functions) @return: Returns the current state """ - state = self.state + state = self.child_state return {'state': state} def save_answer(self, get, system): @@ -609,22 +620,28 @@ class OpenEndedModule(openendedchild.OpenEndedChild): if closed: return msg - if self.state != self.INITIAL: + if self.child_state != self.INITIAL: return self.out_of_sync_error(get) # add new history element with answer and empty score and hint. success, get = self.append_image_to_student_answer(get) error_message = "" if success: - get['student_answer'] = OpenEndedModule.sanitize_html(get['student_answer']) - self.new_history_entry(get['student_answer']) - self.send_to_grader(get['student_answer'], system) - self.change_state(self.ASSESSING) + success, allowed_to_submit, error_message = self.check_if_student_can_submit() + if allowed_to_submit: + get['student_answer'] = OpenEndedModule.sanitize_html(get['student_answer']) + self.new_history_entry(get['student_answer']) + self.send_to_grader(get['student_answer'], system) + self.change_state(self.ASSESSING) + else: + #Error message already defined + success = False else: + #This is a student_facing_error error_message = "There was a problem saving the image in your submission. Please try a different image, or try pasting a link to an image into the answer box." return { - 'success': True, + 'success': success, 'error': error_message, 'student_response': get['student_answer'] } @@ -649,21 +666,24 @@ class OpenEndedModule(openendedchild.OpenEndedChild): Output: Rendered HTML """ #set context variables and render template - if self.state != self.INITIAL: + eta_string = None + if self.child_state != self.INITIAL: latest = self.latest_answer() previous_answer = latest if latest is not None else self.initial_display post_assessment = self.latest_post_assessment(system) score = self.latest_score() correct = 'correct' if self.is_submission_correct(score) else 'incorrect' + if self.child_state == self.ASSESSING: + eta_string = self.get_eta() else: post_assessment = "" correct = "" previous_answer = self.initial_display context = { - 'prompt': self.prompt, + 'prompt': self.child_prompt, 'previous_answer': previous_answer, - 'state': self.state, + 'state': self.child_state, 'allow_reset': self._allow_reset(), 'rows': 30, 'cols': 80, @@ -672,12 +692,13 @@ class OpenEndedModule(openendedchild.OpenEndedChild): 'child_type': 'openended', 'correct': correct, 'accept_file_upload': self.accept_file_upload, + 'eta_message': eta_string, } - html = system.render_template('open_ended.html', context) + html = system.render_template('{0}/open_ended.html'.format(self.TEMPLATE_DIR), context) return html -class OpenEndedDescriptor(XmlDescriptor, EditingDescriptor): +class OpenEndedDescriptor(): """ Module for adding open ended response questions to courses """ @@ -689,8 +710,8 @@ class OpenEndedDescriptor(XmlDescriptor, EditingDescriptor): has_score = True template_dir_name = "openended" - js = {'coffee': [resource_string(__name__, 'js/src/html/edit.coffee')]} - js_module_name = "HTMLEditingDescriptor" + def __init__(self, system): + self.system =system @classmethod def definition_from_xml(cls, xml_object, system): @@ -704,13 +725,16 @@ class OpenEndedDescriptor(XmlDescriptor, EditingDescriptor): """ for child in ['openendedparam']: if len(xml_object.xpath(child)) != 1: - raise ValueError("Open Ended definition must include exactly one '{0}' tag".format(child)) + #This is a staff_facing_error + raise ValueError( + "Open Ended definition must include exactly one '{0}' tag. Contact the learning sciences group for assistance.".format( + child)) def parse(k): """Assumes that xml_object has child k""" return xml_object.xpath(k)[0] - return {'oeparam': parse('openendedparam'), } + return {'oeparam': parse('openendedparam')} def definition_to_xml(self, resource_fs): diff --git a/common/lib/xmodule/xmodule/openendedchild.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/openendedchild.py similarity index 63% rename from common/lib/xmodule/xmodule/openendedchild.py rename to common/lib/xmodule/xmodule/open_ended_grading_classes/openendedchild.py index c83b0f0ea3..2e49565bec 100644 --- a/common/lib/xmodule/xmodule/openendedchild.py +++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/openendedchild.py @@ -1,29 +1,19 @@ -import copy -from fs.errors import ResourceNotFoundError -import itertools import json import logging -from lxml import etree -from lxml.html import rewrite_links from lxml.html.clean import Cleaner, autolink_html -from path import path -import os -import sys -import hashlib -import capa.xqueue_interface as xqueue_interface import re -from pkg_resources import resource_string - -from .capa_module import only_one, ComplexEncoder -from .editing_module import EditingDescriptor -from .html_checker import check_html -from progress import Progress -from .stringify import stringify_children -from .xml_module import XmlDescriptor +from xmodule.capa_module import ComplexEncoder +import open_ended_image_submission +from xmodule.editing_module import EditingDescriptor +from xmodule.html_checker import check_html +from xmodule.progress import Progress +from xmodule.stringify import stringify_children +from xmodule.xml_module import XmlDescriptor from xmodule.modulestore import Location from capa.util import * -import open_ended_image_submission +from .peer_grading_service import PeerGradingService, MockPeerGradingService +import controller_query_service from datetime import datetime @@ -74,11 +64,15 @@ class OpenEndedChild(object): 'done': 'Done', } - def __init__(self, system, location, definition, descriptor, static_data, + def __init__(self, system, location, definition, descriptor, static_data, instance_state=None, shared_state=None, **kwargs): # Load instance state + if instance_state is not None: - instance_state = json.loads(instance_state) + try: + instance_state = json.loads(instance_state) + except: + log.error("Could not load instance state for open ended. Setting it to nothing.: {0}".format(instance_state)) else: instance_state = {} @@ -86,24 +80,39 @@ class OpenEndedChild(object): # None for any element, and score and hint can be None for the last (current) # element. # Scores are on scale from 0 to max_score - self.history = instance_state.get('history', []) - self.state = instance_state.get('state', self.INITIAL) + self.child_history=instance_state.get('child_history',[]) + self.child_state=instance_state.get('child_state', self.INITIAL) + self.child_created = instance_state.get('child_created', False) + self.child_attempts = instance_state.get('child_attempts', 0) - self.created = instance_state.get('created', False) - - self.attempts = instance_state.get('attempts', 0) self.max_attempts = static_data['max_attempts'] - - self.prompt = static_data['prompt'] - self.rubric = static_data['rubric'] + self.child_prompt = static_data['prompt'] + self.child_rubric = static_data['rubric'] self.display_name = static_data['display_name'] self.accept_file_upload = static_data['accept_file_upload'] self.close_date = static_data['close_date'] + self.s3_interface = static_data['s3_interface'] + self.skip_basic_checks = static_data['skip_basic_checks'] + self._max_score = static_data['max_score'] # Used for progress / grading. Currently get credit just for # completion (doesn't matter if you self-assessed correct/incorrect). - self._max_score = static_data['max_score'] + if system.open_ended_grading_interface: + self.peer_gs = PeerGradingService(system.open_ended_grading_interface, system) + self.controller_qs = controller_query_service.ControllerQueryService(system.open_ended_grading_interface, + system) + else: + self.peer_gs = MockPeerGradingService() + self.controller_qs = None + + self.system = system + + self.location_string = location + try: + self.location_string = self.location_string.url() + except: + pass self.setup_response(system, location, definition, descriptor) @@ -127,41 +136,45 @@ class OpenEndedChild(object): if self.closed(): return True, { 'success': False, - 'error': 'This problem is now closed.' + #This is a student_facing_error + 'error': 'The problem close date has passed, and this problem is now closed.' } - elif self.attempts > self.max_attempts: + elif self.child_attempts > self.max_attempts: return True, { 'success': False, - 'error': 'Too many attempts.' + #This is a student_facing_error + 'error': 'You have attempted this problem {0} times. You are allowed {1} attempts.'.format( + self.child_attempts, self.max_attempts + ) } else: return False, {} def latest_answer(self): """Empty string if not available""" - if not self.history: + if not self.child_history: return "" - return self.history[-1].get('answer', "") + return self.child_history[-1].get('answer', "") def latest_score(self): """None if not available""" - if not self.history: + if not self.child_history: return None - return self.history[-1].get('score') + return self.child_history[-1].get('score') def latest_post_assessment(self, system): """Empty string if not available""" - if not self.history: + if not self.child_history: return "" - return self.history[-1].get('post_assessment', "") + return self.child_history[-1].get('post_assessment', "") @staticmethod def sanitize_html(answer): try: answer = autolink_html(answer) cleaner = Cleaner(style=True, links=True, add_nofollow=False, page_structure=True, safe_attrs_only=True, - host_whitelist=open_ended_image_submission.TRUSTED_IMAGE_DOMAINS, - whitelist_tags=set(['embed', 'iframe', 'a', 'img'])) + host_whitelist=open_ended_image_submission.TRUSTED_IMAGE_DOMAINS, + whitelist_tags=set(['embed', 'iframe', 'a', 'img'])) clean_html = cleaner.clean_html(answer) clean_html = re.sub(r'

              $', '', re.sub(r'^

              ', '', clean_html)) except: @@ -175,30 +188,30 @@ class OpenEndedChild(object): @return: None """ answer = OpenEndedChild.sanitize_html(answer) - self.history.append({'answer': answer}) + self.child_history.append({'answer': answer}) def record_latest_score(self, score): """Assumes that state is right, so we're adding a score to the latest history element""" - self.history[-1]['score'] = score + self.child_history[-1]['score'] = score def record_latest_post_assessment(self, post_assessment): """Assumes that state is right, so we're adding a score to the latest history element""" - self.history[-1]['post_assessment'] = post_assessment + self.child_history[-1]['post_assessment'] = post_assessment def change_state(self, new_state): """ A centralized place for state changes--allows for hooks. If the current state matches the old state, don't run any hooks. """ - if self.state == new_state: + if self.child_state == new_state: return - self.state = new_state + self.child_state = new_state - if self.state == self.DONE: - self.attempts += 1 + if self.child_state == self.DONE: + self.child_attempts += 1 def get_instance_state(self): """ @@ -207,17 +220,17 @@ class OpenEndedChild(object): state = { 'version': self.STATE_VERSION, - 'history': self.history, - 'state': self.state, + 'child_history': self.child_history, + 'child_state': self.child_state, 'max_score': self._max_score, - 'attempts': self.attempts, - 'created': False, + 'child_attempts': self.child_attempts, + 'child_created': False, } return json.dumps(state) def _allow_reset(self): """Can the module be reset?""" - return (self.state == self.DONE and self.attempts < self.max_attempts) + return (self.child_state == self.DONE and self.child_attempts < self.max_attempts) def max_score(self): """ @@ -249,9 +262,10 @@ class OpenEndedChild(object): ''' if self._max_score > 0: try: - return Progress(self.get_score()['score'], self._max_score) + return Progress(int(self.get_score()['score']), int(self._max_score)) except Exception as err: - log.exception("Got bad progress") + #This is a dev_facing_error + log.exception("Got bad progress from open ended child module. Max Score: {0}".format(self._max_score)) return None return None @@ -259,10 +273,12 @@ class OpenEndedChild(object): """ return dict out-of-sync error message, and also log. """ - log.warning("Assessment module state out sync. state: %r, get: %r. %s", - self.state, get, msg) + #This is a dev_facing_error + log.warning("Open ended child state out sync. state: %r, get: %r. %s", + self.child_state, get, msg) + #This is a student_facing_error return {'success': False, - 'error': 'The problem state got out-of-sync'} + 'error': 'The problem state got out-of-sync. Please try reloading the page.'} def get_html(self): """ @@ -285,7 +301,7 @@ class OpenEndedChild(object): @return: Boolean correct. """ correct = False - if(isinstance(score, (int, long, float, complex))): + if (isinstance(score, (int, long, float, complex))): score_ratio = int(score) / float(self.max_score()) correct = (score_ratio >= 0.66) return correct @@ -319,7 +335,8 @@ class OpenEndedChild(object): try: image_data.seek(0) - success, s3_public_url = open_ended_image_submission.upload_to_s3(image_data, image_key) + success, s3_public_url = open_ended_image_submission.upload_to_s3(image_data, image_key, + self.s3_interface) except: log.exception("Could not upload image to S3.") @@ -340,6 +357,10 @@ class OpenEndedChild(object): if get_data['can_upload_files'] in ['true', '1']: has_file_to_upload = True file = get_data['student_file'][0] + if self.system.track_fuction: + self.system.track_function('open_ended_image_upload', {'filename': file.name}) + else: + log.info("No tracking function found when uploading image.") uploaded_to_s3, image_ok, s3_public_url = self.upload_image_to_s3(file) if uploaded_to_s3: image_tag = self.generate_image_tag_from_url(s3_public_url, file.name) @@ -377,9 +398,9 @@ class OpenEndedChild(object): #In this case, an image was submitted by the student, but the image could not be uploaded to S3. Likely #a config issue (development vs deployment). For now, just treat this as a "success" log.exception("Student AJAX post to combined open ended xmodule indicated that it contained an image, " - "but the image was not able to be uploaded to S3. This could indicate a config" - "issue with this deployment, but it could also indicate a problem with S3 or with the" - "student image itself.") + "but the image was not able to be uploaded to S3. This could indicate a config" + "issue with this deployment, but it could also indicate a problem with S3 or with the" + "student image itself.") overall_success = True elif not has_file_to_upload: #If there is no file to upload, probably the student has embedded the link in the answer text @@ -408,3 +429,57 @@ class OpenEndedChild(object): success = True return success, string + + def check_if_student_can_submit(self): + location = self.location_string + + student_id = self.system.anonymous_student_id + success = False + allowed_to_submit = True + response = {} + #This is a student_facing_error + error_string = ("You need to peer grade {0} more in order to make another submission. " + "You have graded {1}, and {2} are required. You have made {3} successful peer grading submissions.") + try: + response = self.peer_gs.get_data_for_location(self.location_string, student_id) + count_graded = response['count_graded'] + count_required = response['count_required'] + student_sub_count = response['student_sub_count'] + success = True + except: + #This is a dev_facing_error + log.error("Could not contact external open ended graders for location {0} and student {1}".format( + self.location_string, student_id)) + #This is a student_facing_error + error_message = "Could not contact the graders. Please notify course staff." + return success, allowed_to_submit, error_message + if count_graded >= count_required: + return success, allowed_to_submit, "" + else: + allowed_to_submit = False + #This is a student_facing_error + error_message = error_string.format(count_required - count_graded, count_graded, count_required, + student_sub_count) + return success, allowed_to_submit, error_message + + def get_eta(self): + if self.controller_qs: + response = self.controller_qs.check_for_eta(self.location_string) + try: + response = json.loads(response) + except: + pass + else: + return "" + + success = response['success'] + if isinstance(success, basestring): + success = (success.lower() == "true") + + if success: + eta = controller_query_service.convert_seconds_to_human_readable(response['eta']) + eta_string = "Please check back for your response in at most {0}.".format(eta) + else: + eta_string = "" + + return eta_string diff --git a/common/lib/xmodule/xmodule/peer_grading_service.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/peer_grading_service.py similarity index 78% rename from common/lib/xmodule/xmodule/peer_grading_service.py rename to common/lib/xmodule/xmodule/open_ended_grading_classes/peer_grading_service.py index 8c50b6ff0a..85c7a98132 100644 --- a/common/lib/xmodule/xmodule/peer_grading_service.py +++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/peer_grading_service.py @@ -1,18 +1,7 @@ import json import logging -import requests -from requests.exceptions import RequestException, ConnectionError, HTTPError -import sys -#TODO: Settings import is needed now in order to specify the URL where to find the peer grading service. -#Eventually, the goal is to replace the global django settings import with settings specifically -#for this xmodule. There is no easy way to do this now, so piggybacking on the django settings -#makes sense. -from django.conf import settings - -from combined_open_ended_rubric import CombinedOpenEndedRubric, RubricParsingError -from lxml import etree -from grading_service_module import GradingService, GradingServiceError +from .grading_service_module import GradingService log = logging.getLogger(__name__) @@ -25,9 +14,12 @@ class PeerGradingService(GradingService): """ Interface with the grading controller for peer grading """ + def __init__(self, config, system): config['system'] = system super(PeerGradingService, self).__init__(config) + self.url = config['url'] + config['peer_grading'] + self.login_url = self.url + '/login/' self.get_next_submission_url = self.url + '/get_next_submission/' self.save_grade_url = self.url + '/save_grade/' self.is_student_calibrated_url = self.url + '/is_student_calibrated/' @@ -39,16 +31,17 @@ class PeerGradingService(GradingService): self.system = system def get_data_for_location(self, problem_location, student_id): - response = self.get(self.get_data_for_location_url, - {'location': problem_location, 'student_id': student_id}) + params = {'location': problem_location, 'student_id': student_id} + response = self.get(self.get_data_for_location_url, params) return self.try_to_decode(response) def get_next_submission(self, problem_location, grader_id): response = self.get(self.get_next_submission_url, - {'location': problem_location, 'grader_id': grader_id}) + {'location': problem_location, 'grader_id': grader_id}) return self.try_to_decode(self._render_rubric(response)) - def save_grade(self, location, grader_id, submission_id, score, feedback, submission_key, rubric_scores, submission_flagged): + def save_grade(self, location, grader_id, submission_id, score, feedback, submission_key, rubric_scores, + submission_flagged): data = {'grader_id': grader_id, 'submission_id': submission_id, 'score': score, @@ -98,6 +91,7 @@ class PeerGradingService(GradingService): pass return text + """ This is a mock peer grading service that can be used for unit tests without making actual service calls to the grading controller @@ -115,7 +109,7 @@ class MockPeerGradingService(object): 'max_score': 4}) def save_grade(self, location, grader_id, submission_id, - score, feedback, submission_key): + score, feedback, submission_key, rubric_scores, submission_flagged): return json.dumps({'success': True}) def is_student_calibrated(self, problem_location, grader_id): @@ -131,7 +125,8 @@ class MockPeerGradingService(object): 'max_score': 4}) def save_calibration_essay(self, problem_location, grader_id, - calibration_essay_id, submission_key, score, feedback): + calibration_essay_id, submission_key, score, + feedback, rubric_scores): return {'success': True, 'actual_score': 2} def get_problem_list(self, course_id, grader_id): @@ -142,25 +137,3 @@ class MockPeerGradingService(object): json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo2', 'problem_name': "Problem 2", 'num_graded': 1, 'num_pending': 5}) ]}) - -_service = None - - -def peer_grading_service(system): - """ - Return a peer grading service instance--if settings.MOCK_PEER_GRADING is True, - returns a mock one, otherwise a real one. - - Caches the result, so changing the setting after the first call to this - function will have no effect. - """ - global _service - if _service is not None: - return _service - - if settings.MOCK_PEER_GRADING: - _service = MockPeerGradingService() - else: - _service = PeerGradingService(settings.PEER_GRADING_INTERFACE, system) - - return _service diff --git a/common/lib/xmodule/xmodule/self_assessment_module.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/self_assessment_module.py similarity index 70% rename from common/lib/xmodule/xmodule/self_assessment_module.py rename to common/lib/xmodule/xmodule/open_ended_grading_classes/self_assessment_module.py index 0d1092f96f..5fb901d49c 100644 --- a/common/lib/xmodule/xmodule/self_assessment_module.py +++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/self_assessment_module.py @@ -1,27 +1,13 @@ -import copy -from fs.errors import ResourceNotFoundError -import itertools import json import logging from lxml import etree -from lxml.html import rewrite_links -from path import path -import os -import sys -from pkg_resources import resource_string - -from .capa_module import only_one, ComplexEncoder -from .editing_module import EditingDescriptor -from .html_checker import check_html -from progress import Progress -from .stringify import stringify_children -from .x_module import XModule -from .xml_module import XmlDescriptor -from xmodule.modulestore import Location +from xmodule.capa_module import ComplexEncoder +from xmodule.progress import Progress +from xmodule.stringify import stringify_children import openendedchild -from combined_open_ended_rubric import CombinedOpenEndedRubric +from .combined_open_ended_rubric import CombinedOpenEndedRubric log = logging.getLogger("mitx.courseware") @@ -43,6 +29,12 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): """ + TEMPLATE_DIR = "combinedopenended/selfassessment" + # states + INITIAL = 'initial' + ASSESSING = 'assessing' + REQUEST_HINT = 'request_hint' + DONE = 'done' def setup_response(self, system, location, definition, descriptor): """ @@ -53,8 +45,8 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): @param descriptor: SelfAssessmentDescriptor @return: None """ - self.prompt = stringify_children(self.prompt) - self.rubric = stringify_children(self.rubric) + self.child_prompt = stringify_children(self.child_prompt) + self.child_rubric = stringify_children(self.child_rubric) def get_html(self, system): """ @@ -63,27 +55,26 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): @return: Rendered HTML """ #set context variables and render template - if self.state != self.INITIAL: + if self.child_state != self.INITIAL: latest = self.latest_answer() previous_answer = latest if latest is not None else '' else: previous_answer = '' context = { - 'prompt': self.prompt, + 'prompt': self.child_prompt, 'previous_answer': previous_answer, 'ajax_url': system.ajax_url, 'initial_rubric': self.get_rubric_html(system), - 'state': self.state, + 'state': self.child_state, 'allow_reset': self._allow_reset(), 'child_type': 'selfassessment', 'accept_file_upload': self.accept_file_upload, } - html = system.render_template('self_assessment_prompt.html', context) + html = system.render_template('{0}/self_assessment_prompt.html'.format(self.TEMPLATE_DIR), context) return html - def handle_ajax(self, dispatch, get, system): """ This is called by courseware.module_render, to handle an AJAX call. @@ -102,7 +93,10 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): } if dispatch not in handlers: - return 'Error' + #This is a dev_facing_error + log.error("Cannot find {0} in handlers in handle_ajax function for open_ended_module.py".format(dispatch)) + #This is a dev_facing_error + return json.dumps({'error': 'Error handling action. Please try again.', 'success': False}) before = self.get_progress() d = handlers[dispatch](get, system) @@ -117,11 +111,11 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): """ Return the appropriate version of the rubric, based on the state. """ - if self.state == self.INITIAL: + if self.child_state == self.INITIAL: return '' rubric_renderer = CombinedOpenEndedRubric(system, False) - rubric_dict = rubric_renderer.render_rubric(self.rubric) + rubric_dict = rubric_renderer.render_rubric(self.child_rubric) success = rubric_dict['success'] rubric_html = rubric_dict['html'] @@ -130,23 +124,24 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): 'max_score': self._max_score, } - if self.state == self.ASSESSING: + if self.child_state == self.ASSESSING: context['read_only'] = False - elif self.state in (self.POST_ASSESSMENT, self.DONE): + elif self.child_state in (self.POST_ASSESSMENT, self.DONE): context['read_only'] = True else: - raise ValueError("Illegal state '%r'" % self.state) + #This is a dev_facing_error + raise ValueError("Self assessment module is in an illegal state '{0}'".format(self.child_state)) - return system.render_template('self_assessment_rubric.html', context) + return system.render_template('{0}/self_assessment_rubric.html'.format(self.TEMPLATE_DIR), context) def get_hint_html(self, system): """ Return the appropriate version of the hint view, based on state. """ - if self.state in (self.INITIAL, self.ASSESSING): + if self.child_state in (self.INITIAL, self.ASSESSING): return '' - if self.state == self.DONE: + if self.child_state == self.DONE: # display the previous hint latest = self.latest_post_assessment(system) hint = latest if latest is not None else '' @@ -155,15 +150,15 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): context = {'hint': hint} - if self.state == self.POST_ASSESSMENT: + if self.child_state == self.POST_ASSESSMENT: context['read_only'] = False - elif self.state == self.DONE: + elif self.child_state == self.DONE: context['read_only'] = True else: - raise ValueError("Illegal state '%r'" % self.state) - - return system.render_template('self_assessment_hint.html', context) + #This is a dev_facing_error + raise ValueError("Self Assessment module is in an illegal state '{0}'".format(self.child_state)) + return system.render_template('{0}/self_assessment_hint.html'.format(self.TEMPLATE_DIR), context) def save_answer(self, get, system): """ @@ -182,17 +177,23 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): if closed: return msg - if self.state != self.INITIAL: + if self.child_state != self.INITIAL: return self.out_of_sync_error(get) error_message = "" # add new history element with answer and empty score and hint. success, get = self.append_image_to_student_answer(get) if success: - get['student_answer'] = SelfAssessmentModule.sanitize_html(get['student_answer']) - self.new_history_entry(get['student_answer']) - self.change_state(self.ASSESSING) + success, allowed_to_submit, error_message = self.check_if_student_can_submit() + if allowed_to_submit: + get['student_answer'] = SelfAssessmentModule.sanitize_html(get['student_answer']) + self.new_history_entry(get['student_answer']) + self.change_state(self.ASSESSING) + else: + #Error message already defined + success = False else: + #This is a student_facing_error error_message = "There was a problem saving the image in your submission. Please try a different image, or try pasting a link to an image into the answer box." return { @@ -217,16 +218,19 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): 'message_html' only if success is true """ - if self.state != self.ASSESSING: + if self.child_state != self.ASSESSING: return self.out_of_sync_error(get) try: score = int(get['assessment']) score_list = get.getlist('score_list[]') - for i in xrange(0,len(score_list)): + for i in xrange(0, len(score_list)): score_list[i] = int(score_list[i]) except ValueError: - return {'success': False, 'error': "Non-integer score value, or no score list"} + #This is a dev_facing_error + log.error("Non-integer score value passed to save_assessment ,or no score list present.") + #This is a student_facing_error + return {'success': False, 'error': "Error saving your score. Please notify course staff."} #Record score as assessment and rubric scores as post assessment self.record_latest_score(score) @@ -237,7 +241,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): self.change_state(self.DONE) d['allow_reset'] = self._allow_reset() - d['state'] = self.state + d['state'] = self.child_state return d def save_hint(self, get, system): @@ -251,7 +255,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): with the error key only present if success is False and message_html only if True. ''' - if self.state != self.POST_ASSESSMENT: + if self.child_state != self.POST_ASSESSMENT: # Note: because we only ask for hints on wrong answers, may not have # the same number of hints and answers. return self.out_of_sync_error(get) @@ -264,16 +268,17 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): 'allow_reset': self._allow_reset()} def latest_post_assessment(self, system): - latest_post_assessment = super(SelfAssessmentModule, self).latest_post_assessment(system) + latest_post_assessment = super(SelfAssessmentModule, self).latest_post_assessment(system) try: rubric_scores = json.loads(latest_post_assessment) except: + #This is a dev_facing_error log.error("Cannot parse rubric scores in self assessment module from {0}".format(latest_post_assessment)) rubric_scores = [] return [rubric_scores] -class SelfAssessmentDescriptor(XmlDescriptor, EditingDescriptor): +class SelfAssessmentDescriptor(): """ Module for adding self assessment questions to courses """ @@ -285,9 +290,8 @@ class SelfAssessmentDescriptor(XmlDescriptor, EditingDescriptor): has_score = True template_dir_name = "selfassessment" - js = {'coffee': [resource_string(__name__, 'js/src/html/edit.coffee')]} - js_module_name = "HTMLEditingDescriptor" - css = {'scss': [resource_string(__name__, 'css/editor/edit.scss'), resource_string(__name__, 'css/html/edit.scss')]} + def __init__(self, system): + self.system =system @classmethod def definition_from_xml(cls, xml_object, system): @@ -303,7 +307,10 @@ class SelfAssessmentDescriptor(XmlDescriptor, EditingDescriptor): expected_children = [] for child in expected_children: if len(xml_object.xpath(child)) != 1: - raise ValueError("Self assessment definition must include exactly one '{0}' tag".format(child)) + #This is a staff_facing_error + raise ValueError( + "Self assessment definition must include exactly one '{0}' tag. Contact the learning sciences group for assistance.".format( + child)) def parse(k): """Assumes that xml_object has child k""" @@ -316,7 +323,7 @@ class SelfAssessmentDescriptor(XmlDescriptor, EditingDescriptor): elt = etree.Element('selfassessment') def add_child(k): - child_str = '<{tag}>{body}'.format(tag=k, body=self.definition[k]) + child_str = '<{tag}>{body}'.format(tag=k, body=getattr(self, k)) child_node = etree.fromstring(child_str) elt.append(child_node) diff --git a/common/lib/xmodule/xmodule/peer_grading_module.py b/common/lib/xmodule/xmodule/peer_grading_module.py index 20f71f3b3c..e18f2ceca3 100644 --- a/common/lib/xmodule/xmodule/peer_grading_module.py +++ b/common/lib/xmodule/xmodule/peer_grading_module.py @@ -1,39 +1,20 @@ -""" -This module provides an interface on the grading-service backend -for peer grading - -Use peer_grading_service() to get the version specified -in settings.PEER_GRADING_INTERFACE - -""" import json import logging -import requests -import sys -from django.conf import settings - -from combined_open_ended_rubric import CombinedOpenEndedRubric from lxml import etree -import copy -import itertools -import json -import logging -from lxml.html import rewrite_links -import os - +from datetime import datetime from pkg_resources import resource_string -from .capa_module import only_one, ComplexEncoder -from .editing_module import EditingDescriptor -from .html_checker import check_html -from progress import Progress +from .capa_module import ComplexEncoder from .stringify import stringify_children from .x_module import XModule -from .xml_module import XmlDescriptor +from xmodule.raw_module import RawDescriptor from xmodule.modulestore import Location +from xmodule.modulestore.django import modulestore +from .timeinfo import TimeInfo +from xblock.core import Object, Integer, Boolean, String, Scope -from peer_grading_service import peer_grading_service, GradingServiceError +from xmodule.open_ended_grading_classes.peer_grading_service import PeerGradingService, GradingServiceError, MockPeerGradingService log = logging.getLogger(__name__) @@ -43,58 +24,83 @@ TRUE_DICT = [True, "True", "true", "TRUE"] MAX_SCORE = 1 IS_GRADED = True +EXTERNAL_GRADER_NO_CONTACT_ERROR = "Failed to contact external graders. Please notify course staff." -class PeerGradingModule(XModule): + +class PeerGradingFields(object): + use_for_single_location = Boolean(help="Whether to use this for a single location or as a panel.", default=USE_FOR_SINGLE_LOCATION, scope=Scope.settings) + link_to_location = String(help="The location this problem is linked to.", default=LINK_TO_LOCATION, scope=Scope.settings) + is_graded = Boolean(help="Whether or not this module is scored.",default=IS_GRADED, scope=Scope.settings) + display_due_date_string = String(help="Due date that should be displayed.", default=None, scope=Scope.settings) + grace_period_string = String(help="Amount of grace to give on the due date.", default=None, scope=Scope.settings) + max_grade = Integer(help="The maximum grade that a student can receieve for this problem.", default=MAX_SCORE, scope=Scope.settings) + student_data_for_location = Object(help="Student data for a given peer grading problem.", default=json.dumps({}),scope=Scope.student_state) + + +class PeerGradingModule(PeerGradingFields, XModule): _VERSION = 1 js = {'coffee': [resource_string(__name__, 'js/src/peergrading/peer_grading.coffee'), resource_string(__name__, 'js/src/peergrading/peer_grading_problem.coffee'), resource_string(__name__, 'js/src/collapsible.coffee'), resource_string(__name__, 'js/src/javascript_loader.coffee'), - ]} + ]} js_module_name = "PeerGrading" css = {'scss': [resource_string(__name__, 'css/combinedopenended/display.scss')]} - def __init__(self, system, location, definition, descriptor, - instance_state=None, shared_state=None, **kwargs): - XModule.__init__(self, system, location, definition, descriptor, - instance_state, shared_state, **kwargs) - - # Load instance state - if instance_state is not None: - instance_state = json.loads(instance_state) - else: - instance_state = {} + def __init__(self, system, location, descriptor, model_data): + XModule.__init__(self, system, location, descriptor, model_data) #We need to set the location here so the child modules can use it system.set('location', location) self.system = system - self.peer_gs = peer_grading_service(self.system) + if (self.system.open_ended_grading_interface): + self.peer_gs = PeerGradingService(self.system.open_ended_grading_interface, self.system) + else: + self.peer_gs = MockPeerGradingService() - self.use_for_single_location = self.metadata.get('use_for_single_location', USE_FOR_SINGLE_LOCATION) - if isinstance(self.use_for_single_location, basestring): - self.use_for_single_location = (self.use_for_single_location in TRUE_DICT) + if self.use_for_single_location in TRUE_DICT: + try: + self.linked_problem = modulestore().get_instance(self.system.course_id, self.link_to_location) + except: + log.error("Linked location {0} for peer grading module {1} does not exist".format( + self.link_to_location, self.location)) + raise + due_date = self.linked_problem._model_data.get('peer_grading_due', None) + if due_date: + self._model_data['due'] = due_date - self.is_graded = self.metadata.get('is_graded', IS_GRADED) - if isinstance(self.is_graded, basestring): - self.is_graded = (self.is_graded in TRUE_DICT) + try: + self.timeinfo = TimeInfo(self.display_due_date_string, self.grace_period_string) + except: + log.error("Error parsing due date information in location {0}".format(location)) + raise - self.link_to_location = self.metadata.get('link_to_location', USE_FOR_SINGLE_LOCATION) - if self.use_for_single_location == True: - #This will raise an exception if the location is invalid - link_to_location_object = Location(self.link_to_location) + self.display_due_date = self.timeinfo.display_due_date + + try: + self.student_data_for_location = json.loads(self.student_data_for_location) + except: + pass self.ajax_url = self.system.ajax_url if not self.ajax_url.endswith("/"): self.ajax_url = self.ajax_url + "/" - self.student_data_for_location = instance_state.get('student_data_for_location', {}) - self.max_grade = instance_state.get('max_grade', MAX_SCORE) if not isinstance(self.max_grade, (int, long)): #This could result in an exception, but not wrapping in a try catch block so it moves up the stack self.max_grade = int(self.max_grade) + def closed(self): + return self._closed(self.timeinfo) + + def _closed(self, timeinfo): + if timeinfo.close_date is not None and datetime.utcnow() > timeinfo.close_date: + return True + return False + + def _err_response(self, msg): """ Return a HttpResponse with a json dump with success=False, and the given error message. @@ -114,7 +120,9 @@ class PeerGradingModule(XModule): Needs to be implemented by inheritors. Renders the HTML that students see. @return: """ - if not self.use_for_single_location: + if self.closed(): + return self.peer_grading_closed() + if self.use_for_single_location not in TRUE_DICT: return self.peer_grading() else: return self.peer_grading_problem({'location': self.link_to_location})['html'] @@ -131,10 +139,13 @@ class PeerGradingModule(XModule): 'save_grade': self.save_grade, 'save_calibration_essay': self.save_calibration_essay, 'problem': self.peer_grading_problem, - } + } if dispatch not in handlers: - return 'Error' + #This is a dev_facing_error + log.error("Cannot find {0} in handlers in handle_ajax function for open_ended_module.py".format(dispatch)) + #This is a dev_facing_error + return json.dumps({'error': 'Error handling action. Please try again.', 'success': False}) d = handlers[dispatch](get) @@ -142,7 +153,7 @@ class PeerGradingModule(XModule): def query_data_for_location(self): student_id = self.system.anonymous_student_id - location = self.system.location + location = self.link_to_location success = False response = {} @@ -152,6 +163,7 @@ class PeerGradingModule(XModule): count_required = response['count_required'] success = True except GradingServiceError: + #This is a dev_facing_error log.exception("Error getting location data from controller for location {0}, student {1}" .format(location, student_id)) @@ -161,7 +173,7 @@ class PeerGradingModule(XModule): pass def get_score(self): - if not self.use_for_single_location or not self.is_graded: + if self.use_for_single_location not in TRUE_DICT or self.is_graded not in TRUE_DICT: return None try: @@ -170,19 +182,21 @@ class PeerGradingModule(XModule): except: success, response = self.query_data_for_location() if not success: - log.exception("No instance data found and could not get data from controller for loc {0} student {1}".format( - self.system.location, self.system.anonymous_student_id - )) + log.exception( + "No instance data found and could not get data from controller for loc {0} student {1}".format( + self.system.location.url(), self.system.anonymous_student_id + )) return None count_graded = response['count_graded'] count_required = response['count_required'] if count_required > 0 and count_graded >= count_required: + #Ensures that once a student receives a final score for peer grading, that it does not change. self.student_data_for_location = response score_dict = { 'score': int(count_graded >= count_required), 'total': self.max_grade, - } + } return score_dict @@ -193,7 +207,7 @@ class PeerGradingModule(XModule): randomization, and 5/7 on another ''' max_grade = None - if self.use_for_single_location and self.is_graded: + if self.use_for_single_location in TRUE_DICT and self.is_graded in TRUE_DICT: max_grade = self.max_grade return max_grade @@ -226,10 +240,12 @@ class PeerGradingModule(XModule): response = self.peer_gs.get_next_submission(location, grader_id) return response except GradingServiceError: + #This is a dev_facing_error log.exception("Error getting next submission. server url: {0} location: {1}, grader_id: {2}" .format(self.peer_gs.url, location, grader_id)) + #This is a student_facing_error return {'success': False, - 'error': 'Could not connect to grading service'} + 'error': EXTERNAL_GRADER_NO_CONTACT_ERROR} def save_grade(self, get): """ @@ -247,7 +263,8 @@ class PeerGradingModule(XModule): error: if there was an error in the submission, this is the error message """ - required = set(['location', 'submission_id', 'submission_key', 'score', 'feedback', 'rubric_scores[]', 'submission_flagged']) + required = set(['location', 'submission_id', 'submission_key', 'score', 'feedback', 'rubric_scores[]', + 'submission_flagged']) success, message = self._check_required(get, required) if not success: return self._err_response(message) @@ -263,17 +280,19 @@ class PeerGradingModule(XModule): try: response = self.peer_gs.save_grade(location, grader_id, submission_id, - score, feedback, submission_key, rubric_scores, submission_flagged) + score, feedback, submission_key, rubric_scores, submission_flagged) return response except GradingServiceError: - log.exception("""Error saving grade. server url: {0}, location: {1}, submission_id:{2}, + #This is a dev_facing_error + log.exception("""Error saving grade to open ended grading service. server url: {0}, location: {1}, submission_id:{2}, submission_key: {3}, score: {4}""" .format(self.peer_gs.url, - location, submission_id, submission_key, score) + location, submission_id, submission_key, score) ) + #This is a student_facing_error return { 'success': False, - 'error': 'Could not connect to grading service' + 'error': EXTERNAL_GRADER_NO_CONTACT_ERROR } def is_student_calibrated(self, get): @@ -306,11 +325,13 @@ class PeerGradingModule(XModule): response = self.peer_gs.is_student_calibrated(location, grader_id) return response except GradingServiceError: - log.exception("Error from grading service. server url: {0}, grader_id: {0}, location: {1}" + #This is a dev_facing_error + log.exception("Error from open ended grading service. server url: {0}, grader_id: {0}, location: {1}" .format(self.peer_gs.url, grader_id, location)) + #This is a student_facing_error return { 'success': False, - 'error': 'Could not connect to grading service' + 'error': EXTERNAL_GRADER_NO_CONTACT_ERROR } def show_calibration_essay(self, get): @@ -349,16 +370,20 @@ class PeerGradingModule(XModule): response = self.peer_gs.show_calibration_essay(location, grader_id) return response except GradingServiceError: - log.exception("Error from grading service. server url: {0}, location: {0}" + #This is a dev_facing_error + log.exception("Error from open ended grading service. server url: {0}, location: {0}" .format(self.peer_gs.url, location)) + #This is a student_facing_error return {'success': False, - 'error': 'Could not connect to grading service'} + 'error': EXTERNAL_GRADER_NO_CONTACT_ERROR} # if we can't parse the rubric into HTML, except etree.XMLSyntaxError: + #This is a dev_facing_error log.exception("Cannot parse rubric string. Raw string: {0}" .format(rubric)) + #This is a student_facing_error return {'success': False, - 'error': 'Error displaying submission'} + 'error': 'Error displaying submission. Please notify course staff.'} def save_calibration_essay(self, get): @@ -394,11 +419,25 @@ class PeerGradingModule(XModule): try: response = self.peer_gs.save_calibration_essay(location, grader_id, calibration_essay_id, - submission_key, score, feedback, rubric_scores) + submission_key, score, feedback, rubric_scores) return response except GradingServiceError: - log.exception("Error saving calibration grade, location: {0}, submission_id: {1}, submission_key: {2}, grader_id: {3}".format(location, submission_id, submission_key, grader_id)) - return self._err_response('Could not connect to grading service') + #This is a dev_facing_error + log.exception( + "Error saving calibration grade, location: {0}, submission_id: {1}, submission_key: {2}, grader_id: {3}".format( + location, submission_id, submission_key, grader_id)) + #This is a student_facing_error + return self._err_response('There was an error saving your score. Please notify course staff.') + + def peer_grading_closed(self): + ''' + Show the Peer grading closed template + ''' + html = self.system.render_template('peer_grading/peer_grading_closed.html', { + 'use_for_single_location': self.use_for_single_location + }) + return html + def peer_grading(self, get=None): ''' @@ -419,12 +458,52 @@ class PeerGradingModule(XModule): problem_list = problem_list_dict['problem_list'] except GradingServiceError: - error_text = "Error occured while contacting the grading service" + #This is a student_facing_error + error_text = EXTERNAL_GRADER_NO_CONTACT_ERROR + log.error(error_text) success = False # catch error if if the json loads fails except ValueError: - error_text = "Could not get problem list" + #This is a student_facing_error + error_text = "Could not get list of problems to peer grade. Please notify course staff." + log.error(error_text) success = False + except: + log.exception("Could not contact peer grading service.") + success = False + + + def _find_corresponding_module_for_location(location): + ''' + find the peer grading module that links to the given location + ''' + try: + return modulestore().get_instance(self.system.course_id, location) + except: + # the linked problem doesn't exist + log.error("Problem {0} does not exist in this course".format(location)) + raise + + + for problem in problem_list: + problem_location = problem['location'] + descriptor = _find_corresponding_module_for_location(problem_location) + if descriptor: + problem['due'] = descriptor._model_data.get('peer_grading_due', None) + grace_period_string = descriptor._model_data.get('graceperiod', None) + try: + problem_timeinfo = TimeInfo(problem['due'], grace_period_string) + except: + log.error("Malformed due date or grace period string for location {0}".format(problem_location)) + raise + if self._closed(problem_timeinfo): + problem['closed'] = True + else: + problem['closed'] = False + else: + # if we can't find the due date, assume that it doesn't have one + problem['due'] = None + problem['closed'] = False ajax_url = self.ajax_url html = self.system.render_template('peer_grading/peer_grading.html', { @@ -436,7 +515,7 @@ class PeerGradingModule(XModule): # Checked above 'staff_access': False, 'use_single_location': self.use_for_single_location, - }) + }) return html @@ -444,9 +523,12 @@ class PeerGradingModule(XModule): ''' Show individual problem interface ''' - if get == None or get.get('location') == None: - if not self.use_for_single_location: + if get is None or get.get('location') is None: + if self.use_for_single_location not in TRUE_DICT: #This is an error case, because it must be set to use a single location to be called without get parameters + #This is a dev_facing_error + log.error( + "Peer grading problem in peer_grading_module called with no get parameters, but use_for_single_location is False.") return {'html': "", 'success': False} problem_location = self.link_to_location @@ -462,7 +544,7 @@ class PeerGradingModule(XModule): # Checked above 'staff_access': False, 'use_single_location': self.use_for_single_location, - }) + }) return {'html': html, 'success': True} @@ -475,65 +557,19 @@ class PeerGradingModule(XModule): state = { 'student_data_for_location': self.student_data_for_location, - } + } return json.dumps(state) -class PeerGradingDescriptor(XmlDescriptor, EditingDescriptor): +class PeerGradingDescriptor(PeerGradingFields, RawDescriptor): """ - Module for adding combined open ended questions + Module for adding peer grading questions """ - mako_template = "widgets/html-edit.html" + mako_template = "widgets/raw-edit.html" module_class = PeerGradingModule filename_extension = "xml" stores_state = True has_score = True template_dir_name = "peer_grading" - - js = {'coffee': [resource_string(__name__, 'js/src/html/edit.coffee')]} - js_module_name = "HTMLEditingDescriptor" - - @classmethod - def definition_from_xml(cls, xml_object, system): - """ - Pull out the individual tasks, the rubric, and the prompt, and parse - - Returns: - { - 'rubric': 'some-html', - 'prompt': 'some-html', - 'task_xml': dictionary of xml strings, - } - """ - log.debug("In definition") - expected_children = [] - for child in expected_children: - if len(xml_object.xpath(child)) == 0: - raise ValueError("Peer grading definition must include at least one '{0}' tag".format(child)) - - def parse_task(k): - """Assumes that xml_object has child k""" - return [stringify_children(xml_object.xpath(k)[i]) for i in xrange(0, len(xml_object.xpath(k)))] - - def parse(k): - """Assumes that xml_object has child k""" - return xml_object.xpath(k)[0] - - return {} - - - def definition_to_xml(self, resource_fs): - '''Return an xml element representing this definition.''' - elt = etree.Element('peergrading') - - def add_child(k): - child_str = '<{tag}>{body}'.format(tag=k, body=self.definition[k]) - child_node = etree.fromstring(child_str) - elt.append(child_node) - - for child in ['task']: - add_child(child) - - return elt diff --git a/common/lib/xmodule/xmodule/plugin.py b/common/lib/xmodule/xmodule/plugin.py new file mode 100644 index 0000000000..5cf9c647aa --- /dev/null +++ b/common/lib/xmodule/xmodule/plugin.py @@ -0,0 +1,64 @@ +import pkg_resources +import logging + +log = logging.getLogger(__name__) + +class PluginNotFoundError(Exception): + pass + + +class Plugin(object): + """ + Base class for a system that uses entry_points to load plugins. + + Implementing classes are expected to have the following attributes: + + entry_point: The name of the entry point to load plugins from + """ + + _plugin_cache = None + + @classmethod + def load_class(cls, identifier, default=None): + """ + Loads a single class instance specified by identifier. If identifier + specifies more than a single class, then logs a warning and returns the + first class identified. + + If default is not None, will return default if no entry_point matching + identifier is found. Otherwise, will raise a ModuleMissingError + """ + if cls._plugin_cache is None: + cls._plugin_cache = {} + + if identifier not in cls._plugin_cache: + identifier = identifier.lower() + classes = list(pkg_resources.iter_entry_points( + cls.entry_point, name=identifier)) + + if len(classes) > 1: + log.warning("Found multiple classes for {entry_point} with " + "identifier {id}: {classes}. " + "Returning the first one.".format( + entry_point=cls.entry_point, + id=identifier, + classes=", ".join( + class_.module_name for class_ in classes))) + + if len(classes) == 0: + if default is not None: + return default + raise PluginNotFoundError(identifier) + + cls._plugin_cache[identifier] = classes[0].load() + return cls._plugin_cache[identifier] + + @classmethod + def load_classes(cls): + """ + Returns a list of containing the identifiers and their corresponding classes for all + of the available instances of this plugin + """ + return [(class_.name, class_.load()) + for class_ + in pkg_resources.iter_entry_points(cls.entry_point)] diff --git a/common/lib/xmodule/xmodule/poll_module.py b/common/lib/xmodule/xmodule/poll_module.py new file mode 100644 index 0000000000..0fb3bfb496 --- /dev/null +++ b/common/lib/xmodule/xmodule/poll_module.py @@ -0,0 +1,205 @@ +"""Poll module is ungraded xmodule used by students to +to do set of polls. + +On the client side we show: +If student does not yet anwered - Question with set of choices. +If student have answered - Question with statistics for each answers. + +Student can't change his answer. +""" + +import cgi +import json +import logging +from copy import deepcopy +from collections import OrderedDict + +from lxml import etree +from pkg_resources import resource_string + +from xmodule.x_module import XModule +from xmodule.stringify import stringify_children +from xmodule.mako_module import MakoModuleDescriptor +from xmodule.xml_module import XmlDescriptor +from xblock.core import Scope, String, Object, Boolean, List + +log = logging.getLogger(__name__) + + +class PollFields(object): + # Name of poll to use in links to this poll + display_name = String(help="Display name for this module", scope=Scope.settings) + + voted = Boolean(help="Whether this student has voted on the poll", scope=Scope.student_state, default=False) + poll_answer = String(help="Student answer", scope=Scope.student_state, default='') + poll_answers = Object(help="All possible answers for the poll fro other students", scope=Scope.content) + + answers = List(help="Poll answers from xml", scope=Scope.content, default=[]) + question = String(help="Poll question", scope=Scope.content, default='') + + +class PollModule(PollFields, XModule): + """Poll Module""" + js = { + 'coffee': [resource_string(__name__, 'js/src/javascript_loader.coffee')], + 'js': [resource_string(__name__, 'js/src/poll/logme.js'), + resource_string(__name__, 'js/src/poll/poll.js'), + resource_string(__name__, 'js/src/poll/poll_main.js')] + } + css = {'scss': [resource_string(__name__, 'css/poll/display.scss')]} + js_module_name = "Poll" + + def handle_ajax(self, dispatch, get): + """Ajax handler. + + Args: + dispatch: string request slug + get: dict request get parameters + + Returns: + json string + """ + if dispatch in self.poll_answers and not self.voted: + # FIXME: fix this, when xblock will support mutable types. + # Now we use this hack. + temp_poll_answers = self.poll_answers + temp_poll_answers[dispatch] += 1 + self.poll_answers = temp_poll_answers + + self.voted = True + self.poll_answer = dispatch + return json.dumps({'poll_answers': self.poll_answers, + 'total': sum(self.poll_answers.values()), + 'callback': {'objectName': 'Conditional'} + }) + elif dispatch == 'get_state': + return json.dumps({'poll_answer': self.poll_answer, + 'poll_answers': self.poll_answers, + 'total': sum(self.poll_answers.values()) + }) + elif dispatch == 'reset_poll' and self.voted and \ + self.descriptor.xml_attributes.get('reset', 'True').lower() != 'false': + self.voted = False + + # FIXME: fix this, when xblock will support mutable types. + # Now we use this hack. + temp_poll_answers = self.poll_answers + temp_poll_answers[self.poll_answer] -= 1 + self.poll_answers = temp_poll_answers + + self.poll_answer = '' + return json.dumps({'status': 'success'}) + else: # return error message + return json.dumps({'error': 'Unknown Command!'}) + + def get_html(self): + """Renders parameters to template.""" + params = { + 'element_id': self.location.html_id(), + 'element_class': self.location.category, + 'ajax_url': self.system.ajax_url, + 'configuration_json': self.dump_poll(), + } + self.content = self.system.render_template('poll.html', params) + return self.content + + def dump_poll(self): + """Dump poll information. + + Returns: + string - Serialize json. + """ + # FIXME: hack for resolving caching `default={}` during definition + # poll_answers field + if self.poll_answers is None: + self.poll_answers = {} + + answers_to_json = OrderedDict() + + # FIXME: fix this, when xblock support mutable types. + # Now we use this hack. + temp_poll_answers = self.poll_answers + + # Fill self.poll_answers, prepare data for template context. + for answer in self.answers: + # Set default count for answer = 0. + if answer['id'] not in temp_poll_answers: + temp_poll_answers[answer['id']] = 0 + answers_to_json[answer['id']] = cgi.escape(answer['text']) + self.poll_answers = temp_poll_answers + + return json.dumps({'answers': answers_to_json, + 'question': cgi.escape(self.question), + # to show answered poll after reload: + 'poll_answer': self.poll_answer, + 'poll_answers': self.poll_answers if self.voted else {}, + 'total': sum(self.poll_answers.values()) if self.voted else 0, + 'reset': str(self.descriptor.xml_attributes.get('reset', 'true')).lower()}) + + +class PollDescriptor(PollFields, MakoModuleDescriptor, XmlDescriptor): + _tag_name = 'poll_question' + _child_tag_name = 'answer' + + module_class = PollModule + template_dir_name = 'poll' + stores_state = True + + @classmethod + def definition_from_xml(cls, xml_object, system): + """Pull out the data into dictionary. + + Args: + xml_object: xml from file. + system: `system` object. + + Returns: + (definition, children) - tuple + definition - dict: + { + 'answers': , + 'question': + } + """ + # Check for presense of required tags in xml. + if len(xml_object.xpath(cls._child_tag_name)) == 0: + raise ValueError("Poll_question definition must include \ + at least one 'answer' tag") + + xml_object_copy = deepcopy(xml_object) + answers = [] + for element_answer in xml_object_copy.findall(cls._child_tag_name): + answer_id = element_answer.get('id', None) + if answer_id: + answers.append({ + 'id': answer_id, + 'text': stringify_children(element_answer) + }) + xml_object_copy.remove(element_answer) + + definition = { + 'answers': answers, + 'question': stringify_children(xml_object_copy) + } + children = [] + + return (definition, children) + + def definition_to_xml(self, resource_fs): + """Return an xml element representing to this definition.""" + poll_str = '<{tag_name}>{text}'.format( + tag_name=self._tag_name, text=self.question) + xml_object = etree.fromstring(poll_str) + xml_object.set('display_name', self.display_name) + + def add_child(xml_obj, answer): + child_str = '<{tag_name} id="{id}">{text}'.format( + tag_name=self._child_tag_name, id=answer['id'], + text=answer['text']) + child_node = etree.fromstring(child_str) + xml_object.append(child_node) + + for answer in self.answers: + add_child(xml_object, answer) + + return xml_object diff --git a/common/lib/xmodule/xmodule/randomize_module.py b/common/lib/xmodule/xmodule/randomize_module.py index b336789193..6620ab3cf7 100644 --- a/common/lib/xmodule/xmodule/randomize_module.py +++ b/common/lib/xmodule/xmodule/randomize_module.py @@ -1,19 +1,19 @@ -import json import logging import random -from xmodule.mako_module import MakoModuleDescriptor from xmodule.x_module import XModule -from xmodule.xml_module import XmlDescriptor -from xmodule.modulestore import Location from xmodule.seq_module import SequenceDescriptor -from pkg_resources import resource_string +from xblock.core import Scope, Integer log = logging.getLogger('mitx.' + __name__) -class RandomizeModule(XModule): +class RandomizeFields(object): + choice = Integer(help="Which random child was chosen", scope=Scope.student_state) + + +class RandomizeModule(RandomizeFields, XModule): """ Chooses a random child module. Chooses the same one every time for each student. @@ -35,30 +35,23 @@ class RandomizeModule(XModule): grading interaction is a tangle between super and subclasses of descriptors and modules. """ - - def __init__(self, system, location, definition, descriptor, - instance_state=None, shared_state=None, **kwargs): - XModule.__init__(self, system, location, definition, descriptor, - instance_state, shared_state, **kwargs) + def __init__(self, *args, **kwargs): + XModule.__init__(self, *args, **kwargs) # NOTE: calling self.get_children() creates a circular reference-- # it calls get_child_descriptors() internally, but that doesn't work until # we've picked a choice num_choices = len(self.descriptor.get_children()) - self.choice = None - if instance_state is not None: - state = json.loads(instance_state) - self.choice = state.get('choice', None) - if self.choice > num_choices: - # Oops. Children changed. Reset. - self.choice = None + if self.choice > num_choices: + # Oops. Children changed. Reset. + self.choice = None if self.choice is None: # choose one based on the system seed, or randomly if that's not available if num_choices > 0: - if system.seed is not None: - self.choice = system.seed % num_choices + if self.system.seed is not None: + self.choice = self.system.seed % num_choices else: self.choice = random.randrange(0, num_choices) @@ -72,11 +65,6 @@ class RandomizeModule(XModule): self.child_descriptor = None self.child = None - - def get_instance_state(self): - return json.dumps({'choice': self.choice}) - - def get_child_descriptors(self): """ For grading--return just the chosen child. @@ -98,7 +86,7 @@ class RandomizeModule(XModule): return self.child.get_icon_class() if self.child else 'other' -class RandomizeDescriptor(SequenceDescriptor): +class RandomizeDescriptor(RandomizeFields, SequenceDescriptor): # the editing interface can be the same as for sequences -- just a container module_class = RandomizeModule @@ -107,6 +95,7 @@ class RandomizeDescriptor(SequenceDescriptor): stores_state = True def definition_to_xml(self, resource_fs): + xml_object = etree.Element('randomize') for child in self.get_children(): xml_object.append( diff --git a/common/lib/xmodule/xmodule/raw_module.py b/common/lib/xmodule/xmodule/raw_module.py index 4a2bfbceaf..2c6e157018 100644 --- a/common/lib/xmodule/xmodule/raw_module.py +++ b/common/lib/xmodule/xmodule/raw_module.py @@ -3,6 +3,7 @@ from xmodule.editing_module import XMLEditingDescriptor from xmodule.xml_module import XmlDescriptor import logging import sys +from xblock.core import String, Scope log = logging.getLogger(__name__) @@ -12,17 +13,19 @@ class RawDescriptor(XmlDescriptor, XMLEditingDescriptor): Module that provides a raw editing view of its data and children. It requires that the definition xml is valid. """ + data = String(help="XML data for the module", scope=Scope.content) + @classmethod def definition_from_xml(cls, xml_object, system): - return {'data': etree.tostring(xml_object, pretty_print=True, encoding='unicode')} + return {'data': etree.tostring(xml_object, pretty_print=True, encoding='unicode')}, [] def definition_to_xml(self, resource_fs): try: - return etree.fromstring(self.definition['data']) + return etree.fromstring(self.data) except etree.XMLSyntaxError as err: # Can't recover here, so just add some info and # re-raise - lines = self.definition['data'].split('\n') + lines = self.data.split('\n') line, offset = err.position msg = ("Unable to create xml for problem {loc}. " "Context: '{context}'".format( diff --git a/common/lib/xmodule/xmodule/schematic_module.py b/common/lib/xmodule/xmodule/schematic_module.py index 21dd33a897..d15d629c24 100644 --- a/common/lib/xmodule/xmodule/schematic_module.py +++ b/common/lib/xmodule/xmodule/schematic_module.py @@ -1,6 +1,6 @@ import json -from x_module import XModule, XModuleDescriptor +from .x_module import XModule, XModuleDescriptor class ModuleDescriptor(XModuleDescriptor): diff --git a/common/lib/xmodule/xmodule/seq_module.py b/common/lib/xmodule/xmodule/seq_module.py index 36011744f5..f8e982f1a0 100644 --- a/common/lib/xmodule/xmodule/seq_module.py +++ b/common/lib/xmodule/xmodule/seq_module.py @@ -8,6 +8,7 @@ from xmodule.xml_module import XmlDescriptor from xmodule.x_module import XModule from xmodule.progress import Progress from xmodule.exceptions import NotFoundError +from xblock.core import Integer, Scope from pkg_resources import resource_string log = logging.getLogger(__name__) @@ -17,7 +18,15 @@ log = logging.getLogger(__name__) class_priority = ['video', 'problem'] -class SequenceModule(XModule): +class SequenceFields(object): + has_children = True + + # NOTE: Position is 1-indexed. This is silly, but there are now student + # positions saved on prod, so it's not easy to fix. + position = Integer(help="Last tab viewed in this sequence", scope=Scope.student_state) + + +class SequenceModule(SequenceFields, XModule): ''' Layout module which lays out content in a temporal sequence ''' js = {'coffee': [resource_string(__name__, @@ -26,22 +35,13 @@ class SequenceModule(XModule): css = {'scss': [resource_string(__name__, 'css/sequence/display.scss')]} js_module_name = "Sequence" - def __init__(self, system, location, definition, descriptor, instance_state=None, - shared_state=None, **kwargs): - XModule.__init__(self, system, location, definition, descriptor, - instance_state, shared_state, **kwargs) - # NOTE: Position is 1-indexed. This is silly, but there are now student - # positions saved on prod, so it's not easy to fix. - self.position = 1 - if instance_state is not None: - state = json.loads(instance_state) - if 'position' in state: - self.position = int(state['position']) + def __init__(self, *args, **kwargs): + XModule.__init__(self, *args, **kwargs) # if position is specified in system, then use that instead - if system.get('position'): - self.position = int(system.get('position')) + if self.system.get('position'): + self.position = int(self.system.get('position')) self.rendered = False @@ -70,6 +70,11 @@ class SequenceModule(XModule): raise NotFoundError('Unexpected dispatch type') def render(self): + # If we're rendering this sequence, but no position is set yet, + # default the position to the first element + if self.position is None: + self.position = 1 + if self.rendered: return ## Returns a set of all types of all sub-children @@ -79,9 +84,9 @@ class SequenceModule(XModule): childinfo = { 'content': child.get_html(), 'title': "\n".join( - grand_child.display_name.strip() + grand_child.display_name for grand_child in child.get_children() - if 'display_name' in grand_child.metadata + if grand_child.display_name is not None ), 'progress_status': Progress.to_js_status_str(progress), 'progress_detail': Progress.to_js_detail_str(progress), @@ -89,7 +94,7 @@ class SequenceModule(XModule): 'id': child.id, } if childinfo['title'] == '': - childinfo['title'] = child.metadata.get('display_name', '') + childinfo['title'] = child.display_name_with_default contents.append(childinfo) params = {'items': contents, @@ -112,11 +117,11 @@ class SequenceModule(XModule): return new_class -class SequenceDescriptor(MakoModuleDescriptor, XmlDescriptor): +class SequenceDescriptor(SequenceFields, MakoModuleDescriptor, XmlDescriptor): mako_template = 'widgets/sequence-edit.html' module_class = SequenceModule - stores_state = True # For remembering where in the sequence the student is + stores_state = True # For remembering where in the sequence the student is js = {'coffee': [resource_string(__name__, 'js/src/sequence/edit.coffee')]} js_module_name = "SequenceDescriptor" @@ -132,7 +137,7 @@ class SequenceDescriptor(MakoModuleDescriptor, XmlDescriptor): if system.error_tracker is not None: system.error_tracker("ERROR: " + str(e)) continue - return {'children': children} + return {}, children def definition_to_xml(self, resource_fs): xml_object = etree.Element('sequential') diff --git a/common/lib/xmodule/xmodule/stringify.py b/common/lib/xmodule/xmodule/stringify.py index 5a640e91b1..35587d3b09 100644 --- a/common/lib/xmodule/xmodule/stringify.py +++ b/common/lib/xmodule/xmodule/stringify.py @@ -1,4 +1,5 @@ -from itertools import chain +# -*- coding: utf-8 -*- + from lxml import etree diff --git a/common/lib/xmodule/xmodule/template_module.py b/common/lib/xmodule/xmodule/template_module.py index 5f376945eb..d79d2a163e 100644 --- a/common/lib/xmodule/xmodule/template_module.py +++ b/common/lib/xmodule/xmodule/template_module.py @@ -28,11 +28,6 @@ class CustomTagModule(XModule): More information given in the text """ - def __init__(self, system, location, definition, descriptor, - instance_state=None, shared_state=None, **kwargs): - XModule.__init__(self, system, location, definition, descriptor, - instance_state, shared_state, **kwargs) - def get_html(self): return self.descriptor.rendered_html @@ -62,19 +57,15 @@ class CustomTagDescriptor(RawDescriptor): # cdodge: look up the template as a module template_loc = self.location._replace(category='custom_tag_template', name=template_name) - template_module = self.system.load_item(template_loc) - template_module_data = template_module.definition['data'] + template_module = modulestore().get_instance(system.course_id, template_loc) + template_module_data = template_module.data template = Template(template_module_data) return template.render(**params) - def __init__(self, system, definition, **kwargs): - '''Render and save the template for this descriptor instance''' - super(CustomTagDescriptor, self).__init__(system, definition, **kwargs) - @property def rendered_html(self): - return self.render_template(self.system, self.definition['data']) + return self.render_template(self.system, self.data) def export_to_file(self): """ diff --git a/common/lib/xmodule/xmodule/templates/annotatable/default.yaml b/common/lib/xmodule/xmodule/templates/annotatable/default.yaml new file mode 100644 index 0000000000..31dd489fb4 --- /dev/null +++ b/common/lib/xmodule/xmodule/templates/annotatable/default.yaml @@ -0,0 +1,20 @@ +--- +metadata: + display_name: 'Annotation' +data: | + + +

              Enter your (optional) instructions for the exercise in HTML format.

              +

              Annotations are specified by an <annotation> tag which may may have the following attributes:

              +
                +
              • title (optional). Title of the annotation. Defaults to Commentary if omitted.
              • +
              • body (required). Text of the annotation.
              • +
              • problem (optional). Numeric index of the problem associated with this annotation. This is a zero-based index, so the first problem on the page would have problem="0".
              • +
              • highlight (optional). Possible values: yellow, red, orange, green, blue, or purple. Defaults to yellow if this attribute is omitted.
              • +
              + +

              Add your HTML with annotation spans here.

              +

              Lorem ipsum dolor sit amet, consectetur adipiscing elit. Ut sodales laoreet est, egestas gravida felis egestas nec. Aenean at volutpat erat. Cras commodo viverra nibh in aliquam.

              +

              Nulla facilisi. Pellentesque id vestibulum libero. Suspendisse potenti. Morbi scelerisque nisi vitae felis dictum mattis. Nam sit amet magna elit. Nullam volutpat cursus est, sit amet sagittis odio vulputate et. Curabitur euismod, orci in vulputate imperdiet, augue lorem tempor purus, id aliquet augue turpis a est. Aenean a sagittis libero. Praesent fringilla pretium magna, non condimentum risus elementum nec. Pellentesque faucibus elementum pharetra. Pellentesque vitae metus eros.

              + +children: [] diff --git a/common/lib/xmodule/xmodule/templates/combinedopenended/default.yaml b/common/lib/xmodule/xmodule/templates/combinedopenended/default.yaml new file mode 100644 index 0000000000..f2aba0e18b --- /dev/null +++ b/common/lib/xmodule/xmodule/templates/combinedopenended/default.yaml @@ -0,0 +1,44 @@ +--- +metadata: + display_name: Open Ended Response + max_attempts: 1 + max_score: 1 + is_graded: False + version: 1 + display_name: Open Ended Response + skip_spelling_checks: False + accept_file_upload: False +data: | + + + + + Category 1 + + + + + + +

              Why is the sky blue?

              +
              + + + + + + + Enter essay here. + This is the answer. + {"grader_settings" : "peer_grading.conf", "problem_id" : "700x/Demo"} + + + +
              + + +children: [] diff --git a/common/lib/xmodule/xmodule/templates/peer_grading/default.yaml b/common/lib/xmodule/xmodule/templates/peer_grading/default.yaml new file mode 100644 index 0000000000..cb8e29dfa2 --- /dev/null +++ b/common/lib/xmodule/xmodule/templates/peer_grading/default.yaml @@ -0,0 +1,13 @@ +--- +metadata: + display_name: Peer Grading Interface + attempts: 1 + use_for_single_location: False + link_to_location: None + is_graded: False + max_grade: 1 +data: | + + + +children: [] diff --git a/common/lib/xmodule/xmodule/templates/problem/latex_problem.yaml b/common/lib/xmodule/xmodule/templates/problem/latex_problem.yaml index 434354e4c7..81cb9dc353 100644 --- a/common/lib/xmodule/xmodule/templates/problem/latex_problem.yaml +++ b/common/lib/xmodule/xmodule/templates/problem/latex_problem.yaml @@ -1,7 +1,7 @@ --- metadata: display_name: Problem Written in LaTeX - source_processor_url: https://qisx.mit.edu:5443/latex2edx + source_processor_url: https://studio-input-filter.mitx.mit.edu/latex2edx source_code: | % Nearly any kind of edX problem can be authored using Latex as % the source language. Write latex as usual, including equations. The diff --git a/common/lib/xmodule/xmodule/tests/__init__.py b/common/lib/xmodule/xmodule/tests/__init__.py index 04e7ee19b1..1a10654f6c 100644 --- a/common/lib/xmodule/xmodule/tests/__init__.py +++ b/common/lib/xmodule/xmodule/tests/__init__.py @@ -19,20 +19,45 @@ import xmodule from xmodule.x_module import ModuleSystem from mock import Mock -test_system = ModuleSystem( - ajax_url='courses/course_id/modx/a_location', - track_function=Mock(), - get_module=Mock(), - # "render" to just the context... - render_template=lambda template, context: str(context), - replace_urls=Mock(), - user=Mock(is_staff=False), - filestore=Mock(), - debug=True, - xqueue={'interface': None, 'callback_url': '/', 'default_queuename': 'testqueue', 'waittime': 10}, - node_path=os.environ.get("NODE_PATH", "/usr/local/lib/node_modules"), - anonymous_student_id='student' -) +open_ended_grading_interface = { + 'url': 'http://sandbox-grader-001.m.edx.org/peer_grading', + 'username': 'incorrect_user', + 'password': 'incorrect_pass', + 'staff_grading' : 'staff_grading', + 'peer_grading' : 'peer_grading', + 'grading_controller' : 'grading_controller' + } + + +def test_system(): + """ + Construct a test ModuleSystem instance. + + By default, the render_template() method simply returns + the context it is passed as a string. + You can override this behavior by monkey patching: + + system = test_system() + system.render_template = my_render_func + + where my_render_func is a function of the form + my_render_func(template, context) + """ + return ModuleSystem( + ajax_url='courses/course_id/modx/a_location', + track_function=Mock(), + get_module=Mock(), + render_template=lambda template, context: str(context), + replace_urls=lambda html: str(html), + user=Mock(is_staff=False), + filestore=Mock(), + debug=True, + xqueue={'interface': None, 'callback_url': '/', 'default_queuename': 'testqueue', 'waittime': 10}, + node_path=os.environ.get("NODE_PATH", "/usr/local/lib/node_modules"), + xblock_model_data=lambda descriptor: descriptor._model_data, + anonymous_student_id='student', + open_ended_grading_interface= open_ended_grading_interface + ) class ModelsTest(unittest.TestCase): diff --git a/common/lib/xmodule/xmodule/tests/test_annotatable_module.py b/common/lib/xmodule/xmodule/tests/test_annotatable_module.py new file mode 100644 index 0000000000..43eae8e43e --- /dev/null +++ b/common/lib/xmodule/xmodule/tests/test_annotatable_module.py @@ -0,0 +1,127 @@ +"""Module annotatable tests""" + +import unittest + +from lxml import etree +from mock import Mock + +from xmodule.annotatable_module import AnnotatableModule +from xmodule.modulestore import Location + +from . import test_system + +class AnnotatableModuleTestCase(unittest.TestCase): + location = Location(["i4x", "edX", "toy", "annotatable", "guided_discussion"]) + sample_xml = ''' + + Read the text. +

              + Sing, + O goddess, + the anger of Achilles son of Peleus, + that brought countless ills upon the Achaeans. Many a brave soul did it send + hurrying down to Hades, and many a hero did it yield a prey to dogs and +

              vultures, for so were the counsels + of Jove fulfilled from the day on which the son of Atreus, king of men, and great + Achilles, first fell out with one another.
              +

              + The Iliad of Homer by Samuel Butler +
              + ''' + descriptor = Mock() + module_data = {'data': sample_xml} + + def setUp(self): + self.annotatable = AnnotatableModule(test_system(), self.location, self.descriptor, self.module_data) + + def test_annotation_data_attr(self): + el = etree.fromstring('test') + + expected_attr = { + 'data-comment-body': {'value': 'foo', '_delete': 'body' }, + 'data-comment-title': {'value': 'bar', '_delete': 'title'}, + 'data-problem-id': {'value': '0', '_delete': 'problem'} + } + + actual_attr = self.annotatable._get_annotation_data_attr(0, el) + + self.assertTrue(type(actual_attr) is dict) + self.assertDictEqual(expected_attr, actual_attr) + + def test_annotation_class_attr_default(self): + xml = 'test' + el = etree.fromstring(xml) + + expected_attr = { 'class': { 'value': 'annotatable-span highlight' } } + actual_attr = self.annotatable._get_annotation_class_attr(0, el) + + self.assertTrue(type(actual_attr) is dict) + self.assertDictEqual(expected_attr, actual_attr) + + def test_annotation_class_attr_with_valid_highlight(self): + xml = 'test' + + for color in self.annotatable.highlight_colors: + el = etree.fromstring(xml.format(highlight=color)) + value = 'annotatable-span highlight highlight-{highlight}'.format(highlight=color) + + expected_attr = { 'class': { + 'value': value, + '_delete': 'highlight' } + } + actual_attr = self.annotatable._get_annotation_class_attr(0, el) + + self.assertTrue(type(actual_attr) is dict) + self.assertDictEqual(expected_attr, actual_attr) + + def test_annotation_class_attr_with_invalid_highlight(self): + xml = 'test' + + for invalid_color in ['rainbow', 'blink', 'invisible', '', None]: + el = etree.fromstring(xml.format(highlight=invalid_color)) + expected_attr = { 'class': { + 'value': 'annotatable-span highlight', + '_delete': 'highlight' } + } + actual_attr = self.annotatable._get_annotation_class_attr(0, el) + + self.assertTrue(type(actual_attr) is dict) + self.assertDictEqual(expected_attr, actual_attr) + + def test_render_annotation(self): + expected_html = 'z' + expected_el = etree.fromstring(expected_html) + + actual_el = etree.fromstring('z') + self.annotatable._render_annotation(0, actual_el) + + self.assertEqual(expected_el.tag, actual_el.tag) + self.assertEqual(expected_el.text, actual_el.text) + self.assertDictEqual(dict(expected_el.attrib), dict(actual_el.attrib)) + + def test_render_content(self): + content = self.annotatable._render_content() + el = etree.fromstring(content) + + self.assertEqual('div', el.tag, 'root tag is a div') + + expected_num_annotations = 5 + actual_num_annotations = el.xpath('count(//span[contains(@class,"annotatable-span")])') + self.assertEqual(expected_num_annotations, actual_num_annotations, 'check number of annotations') + + def test_get_html(self): + context = self.annotatable.get_html() + for key in ['display_name', 'element_id', 'content_html', 'instructions_html']: + self.assertIn(key, context) + + def test_extract_instructions(self): + xmltree = etree.fromstring(self.sample_xml) + + expected_xml = u"
              Read the text.
              " + actual_xml = self.annotatable._extract_instructions(xmltree) + self.assertIsNotNone(actual_xml) + self.assertEqual(expected_xml.strip(), actual_xml.strip()) + + xmltree = etree.fromstring('foo') + actual = self.annotatable._extract_instructions(xmltree) + self.assertIsNone(actual) diff --git a/common/lib/xmodule/xmodule/tests/test_capa_module.py b/common/lib/xmodule/xmodule/tests/test_capa_module.py index a22fcdb5f6..d2458cb3d0 100644 --- a/common/lib/xmodule/xmodule/tests/test_capa_module.py +++ b/common/lib/xmodule/xmodule/tests/test_capa_module.py @@ -1,13 +1,18 @@ import datetime import json -from mock import Mock +from mock import Mock, MagicMock, patch from pprint import pprint import unittest +import random +import xmodule +import capa from xmodule.capa_module import CapaModule from xmodule.modulestore import Location from lxml import etree +from django.http import QueryDict + from . import test_system @@ -33,6 +38,18 @@ class CapaFactory(object): CapaFactory.num += 1 return CapaFactory.num + @staticmethod + def input_key(): + """ Return the input key to use when passing GET parameters """ + return ("input_" + CapaFactory.answer_key()) + + @staticmethod + def answer_key(): + """ Return the key stored in the capa problem answer dict """ + return ("-".join(['i4x', 'edX', 'capa_test', 'problem', + 'SampleProblem%d' % CapaFactory.num]) + + "_2_1") + @staticmethod def create(graceperiod=None, due=None, @@ -42,6 +59,8 @@ class CapaFactory(object): force_save_button=None, attempts=None, problem_state=None, + correct=False, + done=None ): """ All parameters are optional, and are added to the created problem if specified. @@ -59,40 +78,42 @@ class CapaFactory(object): attempts: also added to instance state. Will be converted to an int. """ - definition = {'data': CapaFactory.sample_problem_xml, } location = Location(["i4x", "edX", "capa_test", "problem", "SampleProblem{0}".format(CapaFactory.next_num())]) - metadata = {} - if graceperiod is not None: - metadata['graceperiod'] = graceperiod - if due is not None: - metadata['due'] = due - if max_attempts is not None: - metadata['attempts'] = max_attempts - if showanswer is not None: - metadata['showanswer'] = showanswer - if force_save_button is not None: - metadata['force_save_button'] = force_save_button - if rerandomize is not None: - metadata['rerandomize'] = rerandomize + model_data = {'data': CapaFactory.sample_problem_xml} + if graceperiod is not None: + model_data['graceperiod'] = graceperiod + if due is not None: + model_data['due'] = due + if max_attempts is not None: + model_data['max_attempts'] = max_attempts + if showanswer is not None: + model_data['showanswer'] = showanswer + if force_save_button is not None: + model_data['force_save_button'] = force_save_button + if rerandomize is not None: + model_data['rerandomize'] = rerandomize + if done is not None: + model_data['done'] = done descriptor = Mock(weight="1") - instance_state_dict = {} if problem_state is not None: - instance_state_dict = problem_state + model_data.update(problem_state) if attempts is not None: # converting to int here because I keep putting "0" and "1" in the tests # since everything else is a string. - instance_state_dict['attempts'] = int(attempts) - if len(instance_state_dict) > 0: - instance_state = json.dumps(instance_state_dict) - else: - instance_state = None + model_data['attempts'] = int(attempts) - module = CapaModule(test_system, location, - definition, descriptor, - instance_state, None, metadata=metadata) + system = test_system() + system.render_template = Mock(return_value="
              Test Template HTML
              ") + module = CapaModule(system, location, descriptor, model_data) + + if correct: + # TODO: probably better to actually set the internal state properly, but... + module.get_score = lambda: {'score': 1, 'total': 1} + else: + module.get_score = lambda: {'score': 0, 'total': 1} return module @@ -100,7 +121,6 @@ class CapaFactory(object): class CapaModuleTest(unittest.TestCase): - def setUp(self): now = datetime.datetime.now() day_delta = datetime.timedelta(days=1) @@ -120,6 +140,20 @@ class CapaModuleTest(unittest.TestCase): self.assertNotEqual(module.url_name, other_module.url_name, "Factory should be creating unique names for each problem") + + + + def test_correct(self): + """ + Check that the factory creates correct and incorrect problems properly. + """ + module = CapaFactory.create() + self.assertEqual(module.get_score()['score'], 0) + + other_module = CapaFactory.create(correct=True) + self.assertEqual(other_module.get_score()['score'], 1) + + def test_showanswer_default(self): """ Make sure the show answer logic does the right thing. @@ -152,6 +186,7 @@ class CapaModuleTest(unittest.TestCase): max_attempts="1", attempts="0", due=self.yesterday_str) + self.assertTrue(after_due_date.answer_available()) @@ -178,7 +213,7 @@ class CapaModuleTest(unittest.TestCase): for everyone--e.g. after due date + grace period. """ - # can see after attempts used up, even with due date in the future + # can't see after attempts used up, even with due date in the future used_all_attempts = CapaFactory.create(showanswer='past_due', max_attempts="1", attempts="1", @@ -209,3 +244,618 @@ class CapaModuleTest(unittest.TestCase): due=self.yesterday_str, graceperiod=self.two_day_delta_str) self.assertFalse(still_in_grace.answer_available()) + + def test_showanswer_finished(self): + """ + With showanswer="finished" should show answer after the problem is closed, + or after the answer is correct. + """ + + # can see after attempts used up, even with due date in the future + used_all_attempts = CapaFactory.create(showanswer='finished', + max_attempts="1", + attempts="1", + due=self.tomorrow_str) + self.assertTrue(used_all_attempts.answer_available()) + + + # can see after due date + past_due_date = CapaFactory.create(showanswer='finished', + max_attempts="1", + attempts="0", + due=self.yesterday_str) + self.assertTrue(past_due_date.answer_available()) + + + # can't see because attempts left and wrong + attempts_left_open = CapaFactory.create(showanswer='finished', + max_attempts="1", + attempts="0", + due=self.tomorrow_str) + self.assertFalse(attempts_left_open.answer_available()) + + # _can_ see because attempts left and right + correct_ans = CapaFactory.create(showanswer='finished', + max_attempts="1", + attempts="0", + due=self.tomorrow_str, + correct=True) + self.assertTrue(correct_ans.answer_available()) + + + # Can see even though grace period hasn't expired, because have no more + # attempts. + still_in_grace = CapaFactory.create(showanswer='finished', + max_attempts="1", + attempts="1", + due=self.yesterday_str, + graceperiod=self.two_day_delta_str) + self.assertTrue(still_in_grace.answer_available()) + + + def test_closed(self): + + # Attempts < Max attempts --> NOT closed + module = CapaFactory.create(max_attempts="1", attempts="0") + self.assertFalse(module.closed()) + + # Attempts < Max attempts --> NOT closed + module = CapaFactory.create(max_attempts="2", attempts="1") + self.assertFalse(module.closed()) + + # Attempts = Max attempts --> closed + module = CapaFactory.create(max_attempts="1", attempts="1") + self.assertTrue(module.closed()) + + # Attempts > Max attempts --> closed + module = CapaFactory.create(max_attempts="1", attempts="2") + self.assertTrue(module.closed()) + + # Max attempts = 0 --> closed + module = CapaFactory.create(max_attempts="0", attempts="2") + self.assertTrue(module.closed()) + + # Past due --> closed + module = CapaFactory.create(max_attempts="1", attempts="0", + due=self.yesterday_str) + self.assertTrue(module.closed()) + + + def test_parse_get_params(self): + + # We have to set up Django settings in order to use QueryDict + from django.conf import settings + settings.configure() + + # Valid GET param dict + valid_get_dict = self._querydict_from_dict({'input_1': 'test', + 'input_1_2': 'test', + 'input_1_2_3': 'test', + 'input_[]_3': 'test', + 'input_4': None, + 'input_5': [], + 'input_6': 5}) + + result = CapaModule.make_dict_of_responses(valid_get_dict) + + # Expect that we get a dict with "input" stripped from key names + # and that we get the same values back + for key in result.keys(): + original_key = "input_" + key + self.assertTrue(original_key in valid_get_dict, + "Output dict should have key %s" % original_key) + self.assertEqual(valid_get_dict[original_key], result[key]) + + + # Valid GET param dict with list keys + valid_get_dict = self._querydict_from_dict({'input_2[]': ['test1', 'test2']}) + result = CapaModule.make_dict_of_responses(valid_get_dict) + self.assertTrue('2' in result) + self.assertEqual(['test1', 'test2'], result['2']) + + # If we use [] at the end of a key name, we should always + # get a list, even if there's just one value + valid_get_dict = self._querydict_from_dict({'input_1[]': 'test'}) + result = CapaModule.make_dict_of_responses(valid_get_dict) + self.assertEqual(result['1'], ['test']) + + # If we have no underscores in the name, then the key is invalid + invalid_get_dict = self._querydict_from_dict({'input': 'test'}) + with self.assertRaises(ValueError): + result = CapaModule.make_dict_of_responses(invalid_get_dict) + + + # Two equivalent names (one list, one non-list) + # One of the values would overwrite the other, so detect this + # and raise an exception + invalid_get_dict = self._querydict_from_dict({'input_1[]': 'test 1', + 'input_1': 'test 2'}) + with self.assertRaises(ValueError): + result = CapaModule.make_dict_of_responses(invalid_get_dict) + + def _querydict_from_dict(self, param_dict): + """ Create a Django QueryDict from a Python dictionary """ + + # QueryDict objects are immutable by default, so we make + # a copy that we can update. + querydict = QueryDict('') + copyDict = querydict.copy() + + for (key, val) in param_dict.items(): + + # QueryDicts handle lists differently from ordinary values, + # so we have to specifically tell the QueryDict that + # this is a list + if type(val) is list: + copyDict.setlist(key, val) + else: + copyDict[key] = val + + return copyDict + + + def test_check_problem_correct(self): + + module = CapaFactory.create(attempts=1) + + # Simulate that all answers are marked correct, no matter + # what the input is, by patching CorrectMap.is_correct() + # Also simulate rendering the HTML + with patch('capa.correctmap.CorrectMap.is_correct') as mock_is_correct,\ + patch('xmodule.capa_module.CapaModule.get_problem_html') as mock_html: + mock_is_correct.return_value = True + mock_html.return_value = "Test HTML" + + # Check the problem + get_request_dict = { CapaFactory.input_key(): '3.14'} + result = module.check_problem(get_request_dict) + + # Expect that the problem is marked correct + self.assertEqual(result['success'], 'correct') + + # Expect that we get the (mocked) HTML + self.assertEqual(result['contents'], 'Test HTML') + + # Expect that the number of attempts is incremented by 1 + self.assertEqual(module.attempts, 2) + + def test_check_problem_incorrect(self): + + module = CapaFactory.create(attempts=0) + + # Simulate marking the input incorrect + with patch('capa.correctmap.CorrectMap.is_correct') as mock_is_correct: + mock_is_correct.return_value = False + + # Check the problem + get_request_dict = { CapaFactory.input_key(): '0'} + result = module.check_problem(get_request_dict) + + # Expect that the problem is marked correct + self.assertEqual(result['success'], 'incorrect') + + # Expect that the number of attempts is incremented by 1 + self.assertEqual(module.attempts, 1) + + + def test_check_problem_closed(self): + module = CapaFactory.create(attempts=3) + + # Problem closed -- cannot submit + # Simulate that CapaModule.closed() always returns True + with patch('xmodule.capa_module.CapaModule.closed') as mock_closed: + mock_closed.return_value = True + with self.assertRaises(xmodule.exceptions.NotFoundError): + get_request_dict = { CapaFactory.input_key(): '3.14'} + module.check_problem(get_request_dict) + + # Expect that number of attempts NOT incremented + self.assertEqual(module.attempts, 3) + + def test_check_problem_resubmitted_with_randomize(self): + # Randomize turned on + module = CapaFactory.create(rerandomize='always', attempts=0) + + # Simulate that the problem is completed + module.done = True + + # Expect that we cannot submit + with self.assertRaises(xmodule.exceptions.NotFoundError): + get_request_dict = {CapaFactory.input_key(): '3.14'} + module.check_problem(get_request_dict) + + # Expect that number of attempts NOT incremented + self.assertEqual(module.attempts, 0) + + def test_check_problem_resubmitted_no_randomize(self): + # Randomize turned off + module = CapaFactory.create(rerandomize='never', attempts=0, done=True) + + # Expect that we can submit successfully + get_request_dict = {CapaFactory.input_key(): '3.14'} + result = module.check_problem(get_request_dict) + + self.assertEqual(result['success'], 'correct') + + # Expect that number of attempts IS incremented + self.assertEqual(module.attempts, 1) + + def test_check_problem_queued(self): + module = CapaFactory.create(attempts=1) + + # Simulate that the problem is queued + with patch('capa.capa_problem.LoncapaProblem.is_queued') \ + as mock_is_queued,\ + patch('capa.capa_problem.LoncapaProblem.get_recentmost_queuetime') \ + as mock_get_queuetime: + + mock_is_queued.return_value = True + mock_get_queuetime.return_value = datetime.datetime.now() + + get_request_dict = { CapaFactory.input_key(): '3.14'} + result = module.check_problem(get_request_dict) + + # Expect an AJAX alert message in 'success' + self.assertTrue('You must wait' in result['success']) + + # Expect that the number of attempts is NOT incremented + self.assertEqual(module.attempts, 1) + + + def test_check_problem_student_input_error(self): + module = CapaFactory.create(attempts=1) + + # Simulate a student input exception + with patch('capa.capa_problem.LoncapaProblem.grade_answers') as mock_grade: + mock_grade.side_effect = capa.responsetypes.StudentInputError('test error') + + get_request_dict = { CapaFactory.input_key(): '3.14'} + result = module.check_problem(get_request_dict) + + # Expect an AJAX alert message in 'success' + self.assertTrue('test error' in result['success']) + + # Expect that the number of attempts is NOT incremented + self.assertEqual(module.attempts, 1) + + + def test_reset_problem(self): + module = CapaFactory.create(done=True) + module.new_lcp = Mock(wraps=module.new_lcp) + + # Stub out HTML rendering + with patch('xmodule.capa_module.CapaModule.get_problem_html') as mock_html: + mock_html.return_value = "
              Test HTML
              " + + # Reset the problem + get_request_dict = {} + result = module.reset_problem(get_request_dict) + + # Expect that the request was successful + self.assertTrue('success' in result and result['success']) + + # Expect that the problem HTML is retrieved + self.assertTrue('html' in result) + self.assertEqual(result['html'], "
              Test HTML
              ") + + # Expect that the problem was reset + module.new_lcp.assert_called_once_with({'seed': None}) + + + def test_reset_problem_closed(self): + module = CapaFactory.create() + + # Simulate that the problem is closed + with patch('xmodule.capa_module.CapaModule.closed') as mock_closed: + mock_closed.return_value = True + + # Try to reset the problem + get_request_dict = {} + result = module.reset_problem(get_request_dict) + + # Expect that the problem was NOT reset + self.assertTrue('success' in result and not result['success']) + + + def test_reset_problem_not_done(self): + # Simulate that the problem is NOT done + module = CapaFactory.create(done=False) + + # Try to reset the problem + get_request_dict = {} + result = module.reset_problem(get_request_dict) + + # Expect that the problem was NOT reset + self.assertTrue('success' in result and not result['success']) + + + def test_save_problem(self): + module = CapaFactory.create(done=False) + + # Save the problem + get_request_dict = { CapaFactory.input_key(): '3.14'} + result = module.save_problem(get_request_dict) + + # Expect that answers are saved to the problem + expected_answers = { CapaFactory.answer_key(): '3.14'} + self.assertEqual(module.lcp.student_answers, expected_answers) + + # Expect that the result is success + self.assertTrue('success' in result and result['success']) + + + def test_save_problem_closed(self): + module = CapaFactory.create(done=False) + + # Simulate that the problem is closed + with patch('xmodule.capa_module.CapaModule.closed') as mock_closed: + mock_closed.return_value = True + + # Try to save the problem + get_request_dict = { CapaFactory.input_key(): '3.14'} + result = module.save_problem(get_request_dict) + + # Expect that the result is failure + self.assertTrue('success' in result and not result['success']) + + + def test_save_problem_submitted_with_randomize(self): + module = CapaFactory.create(rerandomize='always', done=True) + + # Try to save + get_request_dict = { CapaFactory.input_key(): '3.14'} + result = module.save_problem(get_request_dict) + + # Expect that we cannot save + self.assertTrue('success' in result and not result['success']) + + + def test_save_problem_submitted_no_randomize(self): + module = CapaFactory.create(rerandomize='never', done=True) + + # Try to save + get_request_dict = { CapaFactory.input_key(): '3.14'} + result = module.save_problem(get_request_dict) + + # Expect that we succeed + self.assertTrue('success' in result and result['success']) + + def test_check_button_name(self): + + # If last attempt, button name changes to "Final Check" + # Just in case, we also check what happens if we have + # more attempts than allowed. + attempts = random.randint(1, 10) + module = CapaFactory.create(attempts=attempts -1, max_attempts=attempts) + self.assertEqual(module.check_button_name(), "Final Check") + + module = CapaFactory.create(attempts=attempts, max_attempts=attempts) + self.assertEqual(module.check_button_name(), "Final Check") + + module = CapaFactory.create(attempts=attempts + 1, max_attempts=attempts) + self.assertEqual(module.check_button_name(), "Final Check") + + # Otherwise, button name is "Check" + module = CapaFactory.create(attempts=attempts -2, max_attempts=attempts) + self.assertEqual(module.check_button_name(), "Check") + + module = CapaFactory.create(attempts=attempts -3, max_attempts=attempts) + self.assertEqual(module.check_button_name(), "Check") + + # If no limit on attempts, then always show "Check" + module = CapaFactory.create(attempts=attempts -3) + self.assertEqual(module.check_button_name(), "Check") + + module = CapaFactory.create(attempts=0) + self.assertEqual(module.check_button_name(), "Check") + + def test_should_show_check_button(self): + + attempts = random.randint(1, 10) + + # If we're after the deadline, do NOT show check button + module = CapaFactory.create(due=self.yesterday_str) + self.assertFalse(module.should_show_check_button()) + + # If user is out of attempts, do NOT show the check button + module = CapaFactory.create(attempts=attempts, max_attempts=attempts) + self.assertFalse(module.should_show_check_button()) + + # If survey question (max_attempts = 0), do NOT show the check button + module = CapaFactory.create(max_attempts=0) + self.assertFalse(module.should_show_check_button()) + + # If user submitted a problem but hasn't reset, + # do NOT show the check button + # Note: we can only reset when rerandomize="always" + module = CapaFactory.create(rerandomize="always", done=True) + self.assertFalse(module.should_show_check_button()) + + # Otherwise, DO show the check button + module = CapaFactory.create() + self.assertTrue(module.should_show_check_button()) + + # If the user has submitted the problem + # and we do NOT have a reset button, then we can show the check button + # Setting rerandomize to "never" ensures that the reset button + # is not shown + module = CapaFactory.create(rerandomize="never", done=True) + self.assertTrue(module.should_show_check_button()) + + + def test_should_show_reset_button(self): + + attempts = random.randint(1, 10) + + # If we're after the deadline, do NOT show the reset button + module = CapaFactory.create(due=self.yesterday_str, done=True) + self.assertFalse(module.should_show_reset_button()) + + # If the user is out of attempts, do NOT show the reset button + module = CapaFactory.create(attempts=attempts, max_attempts=attempts, done=True) + self.assertFalse(module.should_show_reset_button()) + + # If we're NOT randomizing, then do NOT show the reset button + module = CapaFactory.create(rerandomize="never", done=True) + self.assertFalse(module.should_show_reset_button()) + + # If the user hasn't submitted an answer yet, + # then do NOT show the reset button + module = CapaFactory.create(done=False) + self.assertFalse(module.should_show_reset_button()) + + # Otherwise, DO show the reset button + module = CapaFactory.create(done=True) + self.assertTrue(module.should_show_reset_button()) + + # If survey question for capa (max_attempts = 0), + # DO show the reset button + module = CapaFactory.create(max_attempts=0, done=True) + self.assertTrue(module.should_show_reset_button()) + + + def test_should_show_save_button(self): + + attempts = random.randint(1, 10) + + # If we're after the deadline, do NOT show the save button + module = CapaFactory.create(due=self.yesterday_str, done=True) + self.assertFalse(module.should_show_save_button()) + + # If the user is out of attempts, do NOT show the save button + module = CapaFactory.create(attempts=attempts, max_attempts=attempts, done=True) + self.assertFalse(module.should_show_save_button()) + + # If user submitted a problem but hasn't reset, do NOT show the save button + module = CapaFactory.create(rerandomize="always", done=True) + self.assertFalse(module.should_show_save_button()) + + # If the user has unlimited attempts and we are not randomizing, + # then do NOT show a save button + # because they can keep using "Check" + module = CapaFactory.create(max_attempts=None, rerandomize="never", done=False) + self.assertFalse(module.should_show_save_button()) + + module = CapaFactory.create(max_attempts=None, rerandomize="never", done=True) + self.assertFalse(module.should_show_save_button()) + + # Otherwise, DO show the save button + module = CapaFactory.create(done=False) + self.assertTrue(module.should_show_save_button()) + + # If we're not randomizing and we have limited attempts, then we can save + module = CapaFactory.create(rerandomize="never", max_attempts=2, done=True) + self.assertTrue(module.should_show_save_button()) + + # If survey question for capa (max_attempts = 0), + # DO show the save button + module = CapaFactory.create(max_attempts=0, done=False) + self.assertTrue(module.should_show_save_button()) + + def test_should_show_save_button_force_save_button(self): + # If we're after the deadline, do NOT show the save button + # even though we're forcing a save + module = CapaFactory.create(due=self.yesterday_str, + force_save_button="true", + done=True) + self.assertFalse(module.should_show_save_button()) + + # If the user is out of attempts, do NOT show the save button + attempts = random.randint(1, 10) + module = CapaFactory.create(attempts=attempts, + max_attempts=attempts, + force_save_button="true", + done=True) + self.assertFalse(module.should_show_save_button()) + + # Otherwise, if we force the save button, + # then show it even if we would ordinarily + # require a reset first + module = CapaFactory.create(force_save_button="true", + rerandomize="always", + done=True) + self.assertTrue(module.should_show_save_button()) + + def test_no_max_attempts(self): + module = CapaFactory.create(max_attempts='') + html = module.get_problem_html() + # assert that we got here without exploding + + + def test_get_problem_html(self): + module = CapaFactory.create() + + # We've tested the show/hide button logic in other tests, + # so here we hard-wire the values + show_check_button = bool(random.randint(0, 1) % 2) + show_reset_button = bool(random.randint(0, 1) % 2) + show_save_button = bool(random.randint(0, 1) % 2) + + module.should_show_check_button = Mock(return_value=show_check_button) + module.should_show_reset_button = Mock(return_value=show_reset_button) + module.should_show_save_button = Mock(return_value=show_save_button) + + # Mock the system rendering function + module.system.render_template = Mock(return_value="
              Test Template HTML
              ") + + # Patch the capa problem's HTML rendering + with patch('capa.capa_problem.LoncapaProblem.get_html') as mock_html: + mock_html.return_value = "
              Test Problem HTML
              " + + # Render the problem HTML + html = module.get_problem_html(encapsulate=False) + + # Also render the problem encapsulated in a
              + html_encapsulated = module.get_problem_html(encapsulate=True) + + # Expect that we get the rendered template back + self.assertEqual(html, "
              Test Template HTML
              ") + + # Check the rendering context + render_args, _ = module.system.render_template.call_args + self.assertEqual(len(render_args), 2) + + template_name = render_args[0] + self.assertEqual(template_name, "problem.html") + + context = render_args[1] + self.assertEqual(context['problem']['html'], "
              Test Problem HTML
              ") + self.assertEqual(bool(context['check_button']), show_check_button) + self.assertEqual(bool(context['reset_button']), show_reset_button) + self.assertEqual(bool(context['save_button']), show_save_button) + + # Assert that the encapsulated html contains the original html + self.assertTrue(html in html_encapsulated) + + + def test_get_problem_html_error(self): + """ + In production, when an error occurs with the problem HTML + rendering, a "dummy" problem is created with an error + message to display to the user. + """ + module = CapaFactory.create() + + # Save the original problem so we can compare it later + original_problem = module.lcp + + # Simulate throwing an exception when the capa problem + # is asked to render itself as HTML + module.lcp.get_html = Mock(side_effect=Exception("Test")) + + # Stub out the test_system rendering function + module.system.render_template = Mock(return_value="
              Test Template HTML
              ") + + # Turn off DEBUG + module.system.DEBUG = False + + # Try to render the module with DEBUG turned off + html = module.get_problem_html() + + # Check the rendering context + render_args, _ = module.system.render_template.call_args + context = render_args[1] + self.assertTrue("error" in context['problem']['html']) + + # Expect that the module has created a new dummy problem with the error + self.assertNotEqual(original_problem, module.lcp) diff --git a/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py b/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py index c2b27e4953..09c86baf27 100644 --- a/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py +++ b/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py @@ -2,9 +2,9 @@ import json from mock import Mock, MagicMock, ANY import unittest -from xmodule.openendedchild import OpenEndedChild -from xmodule.open_ended_module import OpenEndedModule -from xmodule.combined_open_ended_modulev1 import CombinedOpenEndedV1Module +from xmodule.open_ended_grading_classes.openendedchild import OpenEndedChild +from xmodule.open_ended_grading_classes.open_ended_module import OpenEndedModule +from xmodule.open_ended_grading_classes.combined_open_ended_modulev1 import CombinedOpenEndedV1Module from xmodule.modulestore import Location from lxml import etree @@ -12,6 +12,9 @@ import capa.xqueue_interface as xqueue_interface from datetime import datetime from . import test_system + +import test_util_open_ended + """ Tests for the various pieces of the CombinedOpenEndedGrading system @@ -37,37 +40,38 @@ class OpenEndedChildTest(unittest.TestCase): max_score = 1 static_data = { - 'max_attempts': 20, - 'prompt': prompt, - 'rubric': rubric, - 'max_score': max_score, - 'display_name': 'Name', - 'accept_file_upload': False, - 'close_date': None - } + 'max_attempts': 20, + 'prompt': prompt, + 'rubric': rubric, + 'max_score': max_score, + 'display_name': 'Name', + 'accept_file_upload': False, + 'close_date': None, + 's3_interface': "", + 'open_ended_grading_interface': {}, + 'skip_basic_checks': False, + } definition = Mock() descriptor = Mock() def setUp(self): - self.openendedchild = OpenEndedChild(test_system, self.location, - self.definition, self.descriptor, self.static_data, self.metadata) + self.test_system = test_system() + self.openendedchild = OpenEndedChild(self.test_system, self.location, + self.definition, self.descriptor, self.static_data, self.metadata) def test_latest_answer_empty(self): answer = self.openendedchild.latest_answer() self.assertEqual(answer, "") - def test_latest_score_empty(self): answer = self.openendedchild.latest_score() self.assertEqual(answer, None) - def test_latest_post_assessment_empty(self): - answer = self.openendedchild.latest_post_assessment(test_system) + answer = self.openendedchild.latest_post_assessment(self.test_system) self.assertEqual(answer, "") - def test_new_history_entry(self): new_answer = "New Answer" self.openendedchild.new_history_entry(new_answer) @@ -93,7 +97,6 @@ class OpenEndedChildTest(unittest.TestCase): score = self.openendedchild.latest_score() self.assertEqual(score, 4) - def test_record_latest_post_assessment(self): new_answer = "New Answer" self.openendedchild.new_history_entry(new_answer) @@ -101,7 +104,7 @@ class OpenEndedChildTest(unittest.TestCase): post_assessment = "Post assessment" self.openendedchild.record_latest_post_assessment(post_assessment) self.assertEqual(post_assessment, - self.openendedchild.latest_post_assessment(test_system)) + self.openendedchild.latest_post_assessment(self.test_system)) def test_get_score(self): new_answer = "New Answer" @@ -118,24 +121,22 @@ class OpenEndedChildTest(unittest.TestCase): self.assertEqual(score['score'], new_score) self.assertEqual(score['total'], self.static_data['max_score']) - def test_reset(self): - self.openendedchild.reset(test_system) + self.openendedchild.reset(self.test_system) state = json.loads(self.openendedchild.get_instance_state()) - self.assertEqual(state['state'], OpenEndedChild.INITIAL) - + self.assertEqual(state['child_state'], OpenEndedChild.INITIAL) def test_is_last_response_correct(self): new_answer = "New Answer" self.openendedchild.new_history_entry(new_answer) self.openendedchild.record_latest_score(self.static_data['max_score']) self.assertEqual(self.openendedchild.is_last_response_correct(), - 'correct') + 'correct') self.openendedchild.new_history_entry(new_answer) self.openendedchild.record_latest_score(0) self.assertEqual(self.openendedchild.is_last_response_correct(), - 'incorrect') + 'incorrect') class OpenEndedModuleTest(unittest.TestCase): @@ -153,15 +154,18 @@ class OpenEndedModuleTest(unittest.TestCase): max_score = 4 static_data = { - 'max_attempts': 20, - 'prompt': prompt, - 'rubric': rubric, - 'max_score': max_score, - 'display_name': 'Name', - 'accept_file_upload': False, - 'rewrite_content_links' : "", - 'close_date': None, - } + 'max_attempts': 20, + 'prompt': prompt, + 'rubric': rubric, + 'max_score': max_score, + 'display_name': 'Name', + 'accept_file_upload': False, + 'rewrite_content_links': "", + 'close_date': None, + 's3_interface': test_util_open_ended.S3_INTERFACE, + 'open_ended_grading_interface': test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE, + 'skip_basic_checks': False, + } oeparam = etree.XML(''' @@ -174,90 +178,93 @@ class OpenEndedModuleTest(unittest.TestCase): descriptor = Mock() def setUp(self): - test_system.location = self.location + self.test_system = test_system() + + self.test_system.location = self.location self.mock_xqueue = MagicMock() self.mock_xqueue.send_to_queue.return_value = (None, "Message") - test_system.xqueue = {'interface': self.mock_xqueue, 'callback_url': '/', 'default_queuename': 'testqueue', 'waittime': 1} - self.openendedmodule = OpenEndedModule(test_system, self.location, - self.definition, self.descriptor, self.static_data, self.metadata) + self.test_system.xqueue = {'interface': self.mock_xqueue, 'callback_url': '/', 'default_queuename': 'testqueue', + 'waittime': 1} + self.openendedmodule = OpenEndedModule(self.test_system, self.location, + self.definition, self.descriptor, self.static_data, self.metadata) def test_message_post(self): get = {'feedback': 'feedback text', - 'submission_id': '1', - 'grader_id': '1', - 'score': 3} + 'submission_id': '1', + 'grader_id': '1', + 'score': 3} qtime = datetime.strftime(datetime.now(), xqueue_interface.dateformat) - student_info = {'anonymous_student_id': test_system.anonymous_student_id, - 'submission_time': qtime} + student_info = {'anonymous_student_id': self.test_system.anonymous_student_id, + 'submission_time': qtime} contents = { - 'feedback': get['feedback'], - 'submission_id': int(get['submission_id']), - 'grader_id': int(get['grader_id']), - 'score': get['score'], - 'student_info': json.dumps(student_info) - } + 'feedback': get['feedback'], + 'submission_id': int(get['submission_id']), + 'grader_id': int(get['grader_id']), + 'score': get['score'], + 'student_info': json.dumps(student_info) + } - result = self.openendedmodule.message_post(get, test_system) + result = self.openendedmodule.message_post(get, self.test_system) self.assertTrue(result['success']) # make sure it's actually sending something we want to the queue self.mock_xqueue.send_to_queue.assert_called_with(body=json.dumps(contents), header=ANY) state = json.loads(self.openendedmodule.get_instance_state()) - self.assertIsNotNone(state['state'], OpenEndedModule.DONE) + self.assertIsNotNone(state['child_state'], OpenEndedModule.DONE) def test_send_to_grader(self): submission = "This is a student submission" qtime = datetime.strftime(datetime.now(), xqueue_interface.dateformat) - student_info = {'anonymous_student_id': test_system.anonymous_student_id, - 'submission_time': qtime} + student_info = {'anonymous_student_id': self.test_system.anonymous_student_id, + 'submission_time': qtime} contents = self.openendedmodule.payload.copy() contents.update({ 'student_info': json.dumps(student_info), 'student_response': submission, 'max_score': self.max_score - }) - result = self.openendedmodule.send_to_grader(submission, test_system) + }) + result = self.openendedmodule.send_to_grader(submission, self.test_system) self.assertTrue(result) self.mock_xqueue.send_to_queue.assert_called_with(body=json.dumps(contents), header=ANY) def update_score_single(self): self.openendedmodule.new_history_entry("New Entry") score_msg = { - 'correct': True, - 'score': 4, - 'msg': 'Grader Message', - 'feedback': "Grader Feedback" - } + 'correct': True, + 'score': 4, + 'msg': 'Grader Message', + 'feedback': "Grader Feedback" + } get = {'queuekey': "abcd", - 'xqueue_body': score_msg} - self.openendedmodule.update_score(get, test_system) + 'xqueue_body': score_msg} + self.openendedmodule.update_score(get, self.test_system) def update_score_single(self): self.openendedmodule.new_history_entry("New Entry") feedback = { - "success": True, - "feedback": "Grader Feedback" - } + "success": True, + "feedback": "Grader Feedback" + } score_msg = { - 'correct': True, - 'score': 4, - 'msg': 'Grader Message', - 'feedback': json.dumps(feedback), - 'grader_type': 'IN', - 'grader_id': '1', - 'submission_id': '1', - 'success': True, - 'rubric_scores': [0], - 'rubric_scores_complete': True, - 'rubric_xml': etree.tostring(self.rubric) - } + 'correct': True, + 'score': 4, + 'msg': 'Grader Message', + 'feedback': json.dumps(feedback), + 'grader_type': 'IN', + 'grader_id': '1', + 'submission_id': '1', + 'success': True, + 'rubric_scores': [0], + 'rubric_scores_complete': True, + 'rubric_xml': etree.tostring(self.rubric) + } get = {'queuekey': "abcd", - 'xqueue_body': json.dumps(score_msg)} - self.openendedmodule.update_score(get, test_system) + 'xqueue_body': json.dumps(score_msg)} + self.openendedmodule.update_score(get, self.test_system) def test_latest_post_assessment(self): self.update_score_single() - assessment = self.openendedmodule.latest_post_assessment(test_system) + assessment = self.openendedmodule.latest_post_assessment(self.test_system) self.assertFalse(assessment == '') # check for errors self.assertFalse('errors' in assessment) @@ -285,15 +292,18 @@ class CombinedOpenEndedModuleTest(unittest.TestCase): metadata = {'attempts': '10', 'max_score': max_score} static_data = { - 'max_attempts': 20, - 'prompt': prompt, - 'rubric': rubric, - 'max_score': max_score, - 'display_name': 'Name', - 'accept_file_upload' : False, - 'rewrite_content_links' : "", - 'close_date' : "", - } + 'max_attempts': 20, + 'prompt': prompt, + 'rubric': rubric, + 'max_score': max_score, + 'display_name': 'Name', + 'accept_file_upload': False, + 'rewrite_content_links': "", + 'close_date': "", + 's3_interface': test_util_open_ended.S3_INTERFACE, + 'open_ended_grading_interface': test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE, + 'skip_basic_checks': False, + } oeparam = etree.XML(''' @@ -315,17 +325,26 @@ class CombinedOpenEndedModuleTest(unittest.TestCase): ''' task_xml2 = ''' - - Enter essay here. - This is the answer. - {"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"} - - ''' + + Enter essay here. + This is the answer. + {"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"} + + ''' definition = {'prompt': etree.XML(prompt), 'rubric': etree.XML(rubric), 'task_xml': [task_xml1, task_xml2]} descriptor = Mock() def setUp(self): - self.combinedoe = CombinedOpenEndedV1Module(test_system, self.location, self.definition, self.descriptor, static_data = self.static_data, metadata=self.metadata) + self.test_system = test_system() + # TODO: this constructor call is definitely wrong, but neither branch + # of the merge matches the module constructor. Someone (Vik?) should fix this. + self.combinedoe = CombinedOpenEndedV1Module(self.test_system, + self.location, + self.definition, + self.descriptor, + static_data=self.static_data, + metadata=self.metadata, + instance_state={}) def test_get_tag_name(self): name = self.combinedoe.get_tag_name("Tag") diff --git a/common/lib/xmodule/xmodule/tests/test_conditional.py b/common/lib/xmodule/xmodule/tests/test_conditional.py index 361a6ea785..1b2da0b74a 100644 --- a/common/lib/xmodule/xmodule/tests/test_conditional.py +++ b/common/lib/xmodule/xmodule/tests/test_conditional.py @@ -56,6 +56,9 @@ class ConditionalModuleTest(unittest.TestCase): '''Get a dummy system''' return DummySystem(load_error_modules) + def setUp(self): + self.test_system = test_system() + def get_course(self, name): """Get a test course by directory name. If there's more than one, error.""" print "Importing {0}".format(name) @@ -70,52 +73,51 @@ class ConditionalModuleTest(unittest.TestCase): """Make sure that conditional module works""" print "Starting import" - course = self.get_course('conditional') + course = self.get_course('conditional_and_poll') print "Course: ", course print "id: ", course.id - instance_states = dict(problem=None) - shared_state = None - def inner_get_module(descriptor): if isinstance(descriptor, Location): location = descriptor descriptor = self.modulestore.get_instance(course.id, location, depth=None) location = descriptor.location - instance_state = instance_states.get(location.category, None) - print "inner_get_module, location=%s, inst_state=%s" % (location, instance_state) - return descriptor.xmodule_constructor(test_system)(instance_state, shared_state) + return descriptor.xmodule(self.test_system) - location = Location(["i4x", "edX", "cond_test", "conditional", "condone"]) + # edx - HarvardX + # cond_test - ER22x + location = Location(["i4x", "HarvardX", "ER22x", "conditional", "condone"]) def replace_urls(text, staticfiles_prefix=None, replace_prefix='/static/', course_namespace=None): return text - test_system.replace_urls = replace_urls - test_system.get_module = inner_get_module + self.test_system.replace_urls = replace_urls + self.test_system.get_module = inner_get_module module = inner_get_module(location) print "module: ", module - print "module definition: ", module.definition + print "module.conditions_map: ", module.conditions_map print "module children: ", module.get_children() print "module display items (children): ", module.get_display_items() html = module.get_html() print "html type: ", type(html) print "html: ", html - html_expect = "{'ajax_url': 'courses/course_id/modx/a_location', 'element_id': 'i4x-edX-cond_test-conditional-condone', 'id': 'i4x://edX/cond_test/conditional/condone'}" + html_expect = "{'ajax_url': 'courses/course_id/modx/a_location', 'element_id': 'i4x-HarvardX-ER22x-conditional-condone', 'id': 'i4x://HarvardX/ER22x/conditional/condone', 'depends': 'i4x-HarvardX-ER22x-problem-choiceprob'}" self.assertEqual(html, html_expect) gdi = module.get_display_items() print "gdi=", gdi ajax = json.loads(module.handle_ajax('', '')) - self.assertTrue('xmodule.conditional_module' in ajax['html']) print "ajax: ", ajax + html = ajax['html'] + self.assertFalse(any(['This is a secret' in item for item in html])) # now change state of the capa problem to make it completed - instance_states['problem'] = json.dumps({'attempts': 1}) + inner_get_module(Location('i4x://HarvardX/ER22x/problem/choiceprob')).attempts = 1 ajax = json.loads(module.handle_ajax('', '')) - self.assertTrue('This is a secret' in ajax['html']) print "post-attempt ajax: ", ajax + html = ajax['html'] + self.assertTrue(any(['This is a secret' in item for item in html])) diff --git a/common/lib/xmodule/xmodule/tests/test_content.py b/common/lib/xmodule/xmodule/tests/test_content.py index 1bcd2f4ebe..e73c33197c 100644 --- a/common/lib/xmodule/xmodule/tests/test_content.py +++ b/common/lib/xmodule/xmodule/tests/test_content.py @@ -19,9 +19,14 @@ class ContentTest(unittest.TestCase): content = StaticContent('loc', 'name', 'content_type', 'data') self.assertIsNone(content.thumbnail_location) - def test_generate_thumbnail_nonimage(self): + def test_generate_thumbnail_image(self): contentStore = ContentStore() - content = Content(Location(u'c4x', u'mitX', u'800', u'asset', u'monsters.jpg'), None) + content = Content(Location(u'c4x', u'mitX', u'800', u'asset', u'monsters__.jpg'), None) (thumbnail_content, thumbnail_file_location) = contentStore.generate_thumbnail(content) self.assertIsNone(thumbnail_content) - self.assertEqual(Location(u'c4x', u'mitX', u'800', u'thumbnail', u'monsters.jpg'), thumbnail_file_location) + self.assertEqual(Location(u'c4x', u'mitX', u'800', u'thumbnail', u'monsters__.jpg'), thumbnail_file_location) + def test_compute_location(self): + # We had a bug that __ got converted into a single _. Make sure that substitution of INVALID_CHARS (like space) + # still happen. + asset_location = StaticContent.compute_location('mitX', '400', 'subs__1eo_jXvZnE .srt.sjson') + self.assertEqual(Location(u'c4x', u'mitX', u'400', u'asset', u'subs__1eo_jXvZnE_.srt.sjson', None), asset_location) diff --git a/common/lib/xmodule/xmodule/tests/test_course_module.py b/common/lib/xmodule/xmodule/tests/test_course_module.py index 712b095696..59099b0dff 100644 --- a/common/lib/xmodule/xmodule/tests/test_course_module.py +++ b/common/lib/xmodule/xmodule/tests/test_course_module.py @@ -39,7 +39,7 @@ class DummySystem(ImportSystem): class IsNewCourseTestCase(unittest.TestCase): """Make sure the property is_new works on courses""" @staticmethod - def get_dummy_course(start, announcement=None, is_new=None): + def get_dummy_course(start, announcement=None, is_new=None, advertised_start=None): """Get a dummy course""" system = DummySystem(load_error_modules=True) @@ -49,71 +49,87 @@ class IsNewCourseTestCase(unittest.TestCase): is_new = to_attrb('is_new', is_new) announcement = to_attrb('announcement', announcement) + advertised_start = to_attrb('advertised_start', advertised_start) start_xml = ''' + {is_new} + {advertised_start}> Two houses, ... '''.format(org=ORG, course=COURSE, start=start, is_new=is_new, - announcement=announcement) + announcement=announcement, advertised_start=advertised_start) return system.process_xml(start_xml) @patch('xmodule.course_module.time.gmtime') def test_sorting_score(self, gmtime_mock): gmtime_mock.return_value = NOW - dates = [('2012-10-01T12:00', '2012-09-01T12:00'), # 0 - ('2012-12-01T12:00', '2012-11-01T12:00'), # 1 - ('2013-02-01T12:00', '2012-12-01T12:00'), # 2 - ('2013-02-01T12:00', '2012-11-10T12:00'), # 3 - ('2013-02-01T12:00', None), # 4 - ('2013-03-01T12:00', None), # 5 - ('2013-04-01T12:00', None), # 6 - ('2012-11-01T12:00', None), # 7 - ('2012-09-01T12:00', None), # 8 - ('1990-01-01T12:00', None), # 9 - ('2013-01-02T12:00', None), # 10 - ('2013-01-10T12:00', '2012-12-31T12:00'), # 11 - ('2013-01-10T12:00', '2013-01-01T12:00'), # 12 + + day1 = '2012-01-01T12:00' + day2 = '2012-01-02T12:00' + + dates = [ + # Announce date takes priority over actual start + # and courses announced on a later date are newer + # than courses announced for an earlier date + ((day1, day2, None), (day1, day1, None), self.assertLess), + ((day1, day1, None), (day2, day1, None), self.assertEqual), + + # Announce dates take priority over advertised starts + ((day1, day2, day1), (day1, day1, day1), self.assertLess), + ((day1, day1, day2), (day2, day1, day2), self.assertEqual), + + # Later start == newer course + ((day2, None, None), (day1, None, None), self.assertLess), + ((day1, None, None), (day1, None, None), self.assertEqual), + + # Non-parseable advertised starts are ignored in preference + # to actual starts + ((day2, None, "Spring 2013"), (day1, None, "Fall 2012"), self.assertLess), + ((day1, None, "Spring 2013"), (day1, None, "Fall 2012"), self.assertEqual), + + # Parseable advertised starts take priority over start dates + ((day1, None, day2), (day1, None, day1), self.assertLess), + ((day2, None, day2), (day1, None, day2), self.assertEqual), + ] data = [] - for i, d in enumerate(dates): - descriptor = self.get_dummy_course(start=d[0], announcement=d[1]) - score = descriptor.sorting_score - data.append((score, i)) + for a, b, assertion in dates: + a_score = self.get_dummy_course(start=a[0], announcement=a[1], advertised_start=a[2]).sorting_score + b_score = self.get_dummy_course(start=b[0], announcement=b[1], advertised_start=b[2]).sorting_score + print "Comparing %s to %s" % (a, b) + assertion(a_score, b_score) - result = [d[1] for d in sorted(data)] - assert(result == [12, 11, 2, 3, 1, 0, 6, 5, 4, 10, 7, 8, 9]) @patch('xmodule.course_module.time.gmtime') - def test_is_new(self, gmtime_mock): + def test_is_newish(self, gmtime_mock): gmtime_mock.return_value = NOW descriptor = self.get_dummy_course(start='2012-12-02T12:00', is_new=True) - assert(descriptor.is_new is True) + assert(descriptor.is_newish is True) descriptor = self.get_dummy_course(start='2013-02-02T12:00', is_new=False) - assert(descriptor.is_new is False) + assert(descriptor.is_newish is False) descriptor = self.get_dummy_course(start='2013-02-02T12:00', is_new=True) - assert(descriptor.is_new is True) + assert(descriptor.is_newish is True) descriptor = self.get_dummy_course(start='2013-01-15T12:00') - assert(descriptor.is_new is True) + assert(descriptor.is_newish is True) descriptor = self.get_dummy_course(start='2013-03-00T12:00') - assert(descriptor.is_new is True) + assert(descriptor.is_newish is True) descriptor = self.get_dummy_course(start='2012-10-15T12:00') - assert(descriptor.is_new is False) + assert(descriptor.is_newish is False) descriptor = self.get_dummy_course(start='2012-12-31T12:00') - assert(descriptor.is_new is True) + assert(descriptor.is_newish is True) diff --git a/common/lib/xmodule/xmodule/tests/test_export.py b/common/lib/xmodule/xmodule/tests/test_export.py index da1b04bd94..443014f9ef 100644 --- a/common/lib/xmodule/xmodule/tests/test_export.py +++ b/common/lib/xmodule/xmodule/tests/test_export.py @@ -4,7 +4,7 @@ from fs.osfs import OSFS from nose.tools import assert_equals, assert_true from path import path from tempfile import mkdtemp -from shutil import copytree +import shutil from xmodule.modulestore.xml import XMLModuleStore @@ -18,27 +18,16 @@ TEST_DIR = TEST_DIR / 'test' DATA_DIR = TEST_DIR / 'data' -def strip_metadata(descriptor, key): - """ - Recursively strips tag from all children. - """ - print "strip {key} from {desc}".format(key=key, desc=descriptor.location.url()) - descriptor.metadata.pop(key, None) - for d in descriptor.get_children(): - strip_metadata(d, key) - - def strip_filenames(descriptor): """ Recursively strips 'filename' from all children's definitions. """ print "strip filename from {desc}".format(desc=descriptor.location.url()) - descriptor.definition.pop('filename', None) + descriptor._model_data.pop('filename', None) for d in descriptor.get_children(): strip_filenames(d) - class RoundTripTestCase(unittest.TestCase): ''' Check that our test courses roundtrip properly. Same course imported , than exported, then imported again. @@ -46,11 +35,11 @@ class RoundTripTestCase(unittest.TestCase): Thus we make sure that export and import work properly. ''' def check_export_roundtrip(self, data_dir, course_dir): - root_dir = path(mkdtemp()) + root_dir = path(self.temp_dir) print "Copying test course to temp dir {0}".format(root_dir) data_dir = path(data_dir) - copytree(data_dir / course_dir, root_dir / course_dir) + shutil.copytree(data_dir / course_dir, root_dir / course_dir) print "Starting import" initial_import = XMLModuleStore(root_dir, course_dirs=[course_dir]) @@ -77,10 +66,6 @@ class RoundTripTestCase(unittest.TestCase): exported_course = courses2[0] print "Checking course equality" - # HACK: data_dir metadata tags break equality because they - # aren't real metadata, and depend on paths. Remove them. - strip_metadata(initial_course, 'data_dir') - strip_metadata(exported_course, 'data_dir') # HACK: filenames change when changing file formats # during imports from old-style courses. Ignore them. @@ -105,9 +90,10 @@ class RoundTripTestCase(unittest.TestCase): self.assertEquals(initial_import.modules[course_id][location], second_import.modules[course_id][location]) - def setUp(self): self.maxDiff = None + self.temp_dir = mkdtemp() + self.addCleanup(shutil.rmtree, self.temp_dir) def test_toy_roundtrip(self): self.check_export_roundtrip(DATA_DIR, "toy") @@ -118,6 +104,9 @@ class RoundTripTestCase(unittest.TestCase): def test_full_roundtrip(self): self.check_export_roundtrip(DATA_DIR, "full") + def test_conditional_and_poll_roundtrip(self): + self.check_export_roundtrip(DATA_DIR, "conditional_and_poll") + def test_selfassessment_roundtrip(self): #Test selfassessment xmodule to see if it exports correctly self.check_export_roundtrip(DATA_DIR, "self_assessment") diff --git a/common/lib/xmodule/xmodule/tests/test_import.py b/common/lib/xmodule/xmodule/tests/test_import.py index 42072ffe4d..37b1d35938 100644 --- a/common/lib/xmodule/xmodule/tests/test_import.py +++ b/common/lib/xmodule/xmodule/tests/test_import.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- + from path import path import unittest from fs.memoryfs import MemoryFS @@ -12,6 +14,7 @@ from xmodule.errortracker import make_error_tracker from xmodule.modulestore import Location from xmodule.modulestore.xml import ImportSystem, XMLModuleStore from xmodule.modulestore.exceptions import ItemNotFoundError +from xmodule.modulestore.inheritance import compute_inherited_metadata from .test_export import DATA_DIR @@ -75,7 +78,6 @@ class ImportTestCase(BaseCourseTestCase): self.assertEqual(descriptor.__class__.__name__, 'ErrorDescriptor') - def test_unique_url_names(self): '''Check that each error gets its very own url_name''' bad_xml = '''''' @@ -87,7 +89,6 @@ class ImportTestCase(BaseCourseTestCase): self.assertNotEqual(descriptor1.location, descriptor2.location) - def test_reimport(self): '''Make sure an already-exported error xml tag loads properly''' @@ -103,8 +104,10 @@ class ImportTestCase(BaseCourseTestCase): self.assertEqual(re_import_descriptor.__class__.__name__, 'ErrorDescriptor') - self.assertEqual(descriptor.definition['data'], - re_import_descriptor.definition['data']) + self.assertEqual(descriptor.contents, + re_import_descriptor.contents) + self.assertEqual(descriptor.error_msg, + re_import_descriptor.error_msg) def test_fixed_xml_tag(self): """Make sure a tag that's been fixed exports as the original tag type""" @@ -138,23 +141,20 @@ class ImportTestCase(BaseCourseTestCase): url_name = 'test1' start_xml = ''' + due="{due}" url_name="{url_name}" unicorn="purple"> Two houses, ... - '''.format(grace=v, org=ORG, course=COURSE, url_name=url_name) + '''.format(due=v, org=ORG, course=COURSE, url_name=url_name) descriptor = system.process_xml(start_xml) + compute_inherited_metadata(descriptor) - print descriptor, descriptor.metadata - self.assertEqual(descriptor.metadata['graceperiod'], v) - self.assertEqual(descriptor.metadata['unicorn'], 'purple') + print descriptor, descriptor._model_data + self.assertEqual(descriptor.lms.due, v) - # Check that the child inherits graceperiod correctly + # Check that the child inherits due correctly child = descriptor.get_children()[0] - self.assertEqual(child.metadata['graceperiod'], v) - - # check that the child does _not_ inherit any unicorns - self.assertTrue('unicorn' not in child.metadata) + self.assertEqual(child.lms.due, v) # Now export and check things resource_fs = MemoryFS() @@ -181,12 +181,12 @@ class ImportTestCase(BaseCourseTestCase): # did we successfully strip the url_name from the definition contents? self.assertTrue('url_name' not in course_xml.attrib) - # Does the chapter tag now have a graceperiod attribute? + # Does the chapter tag now have a due attribute? # hardcoded path to child with resource_fs.open('chapter/ch.xml') as f: chapter_xml = etree.fromstring(f.read()) self.assertEqual(chapter_xml.tag, 'chapter') - self.assertFalse('graceperiod' in chapter_xml.attrib) + self.assertFalse('due' in chapter_xml.attrib) def test_is_pointer_tag(self): """ @@ -224,13 +224,12 @@ class ImportTestCase(BaseCourseTestCase): def check_for_key(key, node): "recursive check for presence of key" print "Checking {0}".format(node.location.url()) - self.assertTrue(key in node.metadata) + self.assertTrue(key in node._model_data) for c in node.get_children(): check_for_key(key, c) check_for_key('graceperiod', course) - def test_policy_loading(self): """Make sure that when two courses share content with the same org and course names, policy applies to the right one.""" @@ -252,8 +251,7 @@ class ImportTestCase(BaseCourseTestCase): # Also check that keys from policy are run through the # appropriate attribute maps -- 'graded' should be True, not 'true' - self.assertEqual(toy.metadata['graded'], True) - + self.assertEqual(toy.lms.graded, True) def test_definition_loading(self): """When two courses share the same org and course name and @@ -271,9 +269,8 @@ class ImportTestCase(BaseCourseTestCase): location = Location(["i4x", "edX", "toy", "video", "Welcome"]) toy_video = modulestore.get_instance(toy_id, location) two_toy_video = modulestore.get_instance(two_toy_id, location) - self.assertEqual(toy_video.metadata['youtube'], "1.0:p2Q6BrNhdh8") - self.assertEqual(two_toy_video.metadata['youtube'], "1.0:p2Q6BrNhdh9") - + self.assertEqual(etree.fromstring(toy_video.data).get('youtube'), "1.0:p2Q6BrNhdh8") + self.assertEqual(etree.fromstring(two_toy_video.data).get('youtube'), "1.0:p2Q6BrNhdh9") def test_colon_in_url_name(self): """Ensure that colons in url_names convert to file paths properly""" @@ -331,6 +328,22 @@ class ImportTestCase(BaseCourseTestCase): self.assertEqual(len(video.url_name), len('video_') + 12) + def test_poll_and_conditional_xmodule(self): + modulestore = XMLModuleStore(DATA_DIR, course_dirs=['conditional_and_poll']) + + course = modulestore.get_courses()[0] + chapters = course.get_children() + ch1 = chapters[0] + sections = ch1.get_children() + + self.assertEqual(len(sections), 1) + + location = course.location + location = Location(location.tag, location.org, location.course, + 'sequential', 'Problem_Demos') + module = modulestore.get_instance(course.id, location) + self.assertEqual(len(module.children), 2) + def test_error_on_import(self): '''Check that when load_error_module is false, an exception is raised, rather than returning an ErrorModule''' @@ -354,7 +367,7 @@ class ImportTestCase(BaseCourseTestCase): render_string_from_sample_gst_xml = """ \ """.strip() - self.assertEqual(gst_sample.definition['render'], render_string_from_sample_gst_xml) + self.assertEqual(gst_sample.render, render_string_from_sample_gst_xml) def test_cohort_config(self): """ @@ -370,13 +383,13 @@ class ImportTestCase(BaseCourseTestCase): self.assertFalse(course.is_cohorted) # empty config -> False - course.metadata['cohort_config'] = {} + course.cohort_config = {} self.assertFalse(course.is_cohorted) # false config -> False - course.metadata['cohort_config'] = {'cohorted': False} + course.cohort_config = {'cohorted': False} self.assertFalse(course.is_cohorted) # and finally... - course.metadata['cohort_config'] = {'cohorted': True} + course.cohort_config = {'cohorted': True} self.assertTrue(course.is_cohorted) diff --git a/common/lib/xmodule/xmodule/tests/test_logic.py b/common/lib/xmodule/xmodule/tests/test_logic.py new file mode 100644 index 0000000000..018b40427e --- /dev/null +++ b/common/lib/xmodule/xmodule/tests/test_logic.py @@ -0,0 +1,66 @@ +# -*- coding: utf-8 -*- + +import json +import unittest + +from xmodule.poll_module import PollDescriptor +from xmodule.conditional_module import ConditionalDescriptor + + +class LogicTest(unittest.TestCase): + """Base class for testing xmodule logic.""" + descriptor_class = None + raw_model_data = {} + + def setUp(self): + class EmptyClass: pass + + self.system = None + self.location = None + self.descriptor = EmptyClass() + + self.xmodule_class = self.descriptor_class.module_class + self.xmodule = self.xmodule_class(self.system, self.location, + self.descriptor, self.raw_model_data) + + def ajax_request(self, dispatch, get): + return json.loads(self.xmodule.handle_ajax(dispatch, get)) + + +class PollModuleTest(LogicTest): + descriptor_class = PollDescriptor + raw_model_data = { + 'poll_answers': {'Yes': 1, 'Dont_know': 0, 'No': 0}, + 'voted': False, + 'poll_answer': '' + } + + def test_bad_ajax_request(self): + response = self.ajax_request('bad_answer', {}) + self.assertDictEqual(response, {'error': 'Unknown Command!'}) + + def test_good_ajax_request(self): + response = self.ajax_request('No', {}) + + poll_answers = response['poll_answers'] + total = response['total'] + callback = response['callback'] + + self.assertDictEqual(poll_answers, {'Yes': 1, 'Dont_know': 0, 'No': 1}) + self.assertEqual(total, 2) + self.assertDictEqual(callback, {'objectName': 'Conditional'}) + self.assertEqual(self.xmodule.poll_answer, 'No') + + +class ConditionalModuleTest(LogicTest): + descriptor_class = ConditionalDescriptor + + def test_ajax_request(self): + # Mock is_condition_satisfied + self.xmodule.is_condition_satisfied = lambda: True + setattr(self.xmodule.descriptor, 'get_children', lambda: []) + + response = self.ajax_request('No', {}) + html = response['html'] + + self.assertEqual(html, []) diff --git a/common/lib/xmodule/xmodule/tests/test_randomize_module.py b/common/lib/xmodule/xmodule/tests/test_randomize_module.py index 456fd379a5..59cf5a59f3 100644 --- a/common/lib/xmodule/xmodule/tests/test_randomize_module.py +++ b/common/lib/xmodule/xmodule/tests/test_randomize_module.py @@ -13,7 +13,7 @@ COURSE = 'test_course' START = '2013-01-01T01:00:00' -from test_course_module import DummySystem as DummyImportSystem +from .test_course_module import DummySystem as DummyImportSystem from . import test_system diff --git a/common/lib/xmodule/xmodule/tests/test_self_assessment.py b/common/lib/xmodule/xmodule/tests/test_self_assessment.py index 0ebfe64bfb..593b3fea01 100644 --- a/common/lib/xmodule/xmodule/tests/test_self_assessment.py +++ b/common/lib/xmodule/xmodule/tests/test_self_assessment.py @@ -1,17 +1,17 @@ import json -from mock import Mock +from mock import Mock, MagicMock import unittest -from xmodule.self_assessment_module import SelfAssessmentModule +from xmodule.open_ended_grading_classes.self_assessment_module import SelfAssessmentModule from xmodule.modulestore import Location from lxml import etree -from nose.plugins.skip import SkipTest from . import test_system +import test_util_open_ended + class SelfAssessmentTest(unittest.TestCase): - rubric = ''' Response Quality @@ -24,13 +24,11 @@ class SelfAssessmentTest(unittest.TestCase): 'prompt': prompt, 'submitmessage': 'Shall we submit now?', 'hintprompt': 'Consider this...', - } + } location = Location(["i4x", "edX", "sa_test", "selfassessment", "SampleQuestion"]) - metadata = {'attempts': '10'} - descriptor = Mock() def setUp(self): @@ -41,40 +39,62 @@ class SelfAssessmentTest(unittest.TestCase): 'attempts': 2}) static_data = { - 'max_attempts': 10, - 'rubric': etree.XML(self.rubric), - 'prompt': self.prompt, - 'max_score': 1, - 'display_name': "Name", - 'accept_file_upload': False, - 'close_date': None - } + 'max_attempts': 10, + 'rubric': etree.XML(self.rubric), + 'prompt': self.prompt, + 'max_score': 1, + 'display_name': "Name", + 'accept_file_upload': False, + 'close_date': None, + 's3_interface': test_util_open_ended.S3_INTERFACE, + 'open_ended_grading_interface': test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE, + 'skip_basic_checks': False, + } - self.module = SelfAssessmentModule(test_system, self.location, - self.definition, self.descriptor, - static_data, - state, metadata=self.metadata) + self.module = SelfAssessmentModule(test_system(), self.location, + self.definition, + self.descriptor, + static_data) def test_get_html(self): - html = self.module.get_html(test_system) + html = self.module.get_html(self.module.system) self.assertTrue("This is sample prompt text" in html) def test_self_assessment_flow(self): - raise SkipTest() + responses = {'assessment': '0', 'score_list[]': ['0', '0']} + + def get_fake_item(name): + return responses[name] + + def get_data_for_location(self, location, student): + return { + 'count_graded': 0, + 'count_required': 0, + 'student_sub_count': 0, + } + + mock_query_dict = MagicMock() + mock_query_dict.__getitem__.side_effect = get_fake_item + mock_query_dict.getlist = get_fake_item + + self.module.peer_gs.get_data_for_location = get_data_for_location + self.assertEqual(self.module.get_score()['score'], 0) - self.module.save_answer({'student_answer': "I am an answer"}, test_system) - self.assertEqual(self.module.state, self.module.ASSESSING) - - self.module.save_assessment({'assessment': '0'}, test_system) - self.assertEqual(self.module.state, self.module.DONE) + self.module.save_answer({'student_answer': "I am an answer"}, + self.module.system) + self.assertEqual(self.module.child_state, self.module.ASSESSING) + self.module.save_assessment(mock_query_dict, self.module.system) + self.assertEqual(self.module.child_state, self.module.DONE) d = self.module.reset({}) self.assertTrue(d['success']) - self.assertEqual(self.module.state, self.module.INITIAL) + self.assertEqual(self.module.child_state, self.module.INITIAL) # if we now assess as right, skip the REQUEST_HINT state - self.module.save_answer({'student_answer': 'answer 4'}, test_system) - self.module.save_assessment({'assessment': '1'}, test_system) - self.assertEqual(self.module.state, self.module.DONE) + self.module.save_answer({'student_answer': 'answer 4'}, + self.module.system) + responses['assessment'] = '1' + self.module.save_assessment(mock_query_dict, self.module.system) + self.assertEqual(self.module.child_state, self.module.DONE) diff --git a/common/lib/xmodule/xmodule/tests/test_util_open_ended.py b/common/lib/xmodule/xmodule/tests/test_util_open_ended.py new file mode 100644 index 0000000000..db580f1e0e --- /dev/null +++ b/common/lib/xmodule/xmodule/tests/test_util_open_ended.py @@ -0,0 +1,14 @@ +OPEN_ENDED_GRADING_INTERFACE = { + 'url': 'http://127.0.0.1:3033/', + 'username': 'incorrect', + 'password': 'incorrect', + 'staff_grading': 'staff_grading', + 'peer_grading': 'peer_grading', + 'grading_controller': 'grading_controller' +} + +S3_INTERFACE = { + 'aws_access_key': "", + 'aws_secret_key': "", + "aws_bucket_name": "", +} \ No newline at end of file diff --git a/common/lib/xmodule/xmodule/timeinfo.py b/common/lib/xmodule/xmodule/timeinfo.py new file mode 100644 index 0000000000..615a7b2c73 --- /dev/null +++ b/common/lib/xmodule/xmodule/timeinfo.py @@ -0,0 +1,39 @@ +import dateutil +import dateutil.parser +import datetime +from .timeparse import parse_timedelta + +import logging +log = logging.getLogger(__name__) + +class TimeInfo(object): + """ + This is a simple object that calculates and stores datetime information for an XModule + based on the due date string and the grace period string + + So far it parses out three different pieces of time information: + self.display_due_date - the 'official' due date that gets displayed to students + self.grace_period - the length of the grace period + self.close_date - the real due date + + """ + def __init__(self, display_due_date_string, grace_period_string): + if display_due_date_string is not None: + try: + self.display_due_date = dateutil.parser.parse(display_due_date_string) + except ValueError: + log.error("Could not parse due date {0}".format(display_due_date_string)) + raise + else: + self.display_due_date = None + + if grace_period_string is not None and self.display_due_date: + try: + self.grace_period = parse_timedelta(grace_period_string) + self.close_date = self.display_due_date + self.grace_period + except: + log.error("Error parsing the grace period {0}".format(grace_period_string)) + raise + else: + self.grace_period = None + self.close_date = self.display_due_date diff --git a/common/lib/xmodule/xmodule/timelimit_module.py b/common/lib/xmodule/xmodule/timelimit_module.py index 9abb5d183f..efa47a5dca 100644 --- a/common/lib/xmodule/xmodule/timelimit_module.py +++ b/common/lib/xmodule/xmodule/timelimit_module.py @@ -9,35 +9,31 @@ from xmodule.xml_module import XmlDescriptor from xmodule.x_module import XModule from xmodule.progress import Progress from xmodule.exceptions import NotFoundError +from xblock.core import Float, String, Boolean, Scope log = logging.getLogger(__name__) -class TimeLimitModule(XModule): - ''' + +class TimeLimitFields(object): + beginning_at = Float(help="The time this timer was started", scope=Scope.student_state) + ending_at = Float(help="The time this timer will end", scope=Scope.student_state) + accomodation_code = String(help="A code indicating accommodations to be given the student", scope=Scope.student_state) + time_expired_redirect_url = String(help="Url to redirect users to after the timelimit has expired", scope=Scope.settings) + duration = Float(help="The length of this timer", scope=Scope.settings) + suppress_toplevel_navigation = Boolean(help="Whether the toplevel navigation should be suppressed when viewing this module", scope=Scope.settings) + + +class TimeLimitModule(TimeLimitFields, XModule): + ''' Wrapper module which imposes a time constraint for the completion of its child. ''' - def __init__(self, system, location, definition, descriptor, instance_state=None, - shared_state=None, **kwargs): - XModule.__init__(self, system, location, definition, descriptor, - instance_state, shared_state, **kwargs) + def __init__(self, *args, **kwargs): + XModule.__init__(self, *args, **kwargs) self.rendered = False - self.beginning_at = None - self.ending_at = None - self.accommodation_code = None - - if instance_state is not None: - state = json.loads(instance_state) - if 'beginning_at' in state: - self.beginning_at = state['beginning_at'] - if 'ending_at' in state: - self.ending_at = state['ending_at'] - if 'accommodation_code' in state: - self.accommodation_code = state['accommodation_code'] - # For a timed activity, we are only interested here # in time-related accommodations, and these should be disjoint. # (For proctored exams, it is possible to have multiple accommodations @@ -50,7 +46,7 @@ class TimeLimitModule(XModule): ) def _get_accommodated_duration(self, duration): - ''' + ''' Get duration for activity, as adjusted for accommodations. Input and output are expressed in seconds. ''' @@ -70,35 +66,25 @@ class TimeLimitModule(XModule): @property def has_begun(self): return self.beginning_at is not None - - @property + + @property def has_ended(self): if not self.ending_at: return False return self.ending_at < time() - + def begin(self, duration): - ''' + ''' Sets the starting time and ending time for the activity, based on the duration provided (in seconds). ''' self.beginning_at = time() modified_duration = self._get_accommodated_duration(duration) self.ending_at = self.beginning_at + modified_duration - + def get_remaining_time_in_ms(self): return int((self.ending_at - time()) * 1000) - def get_instance_state(self): - state = {} - if self.beginning_at: - state['beginning_at'] = self.beginning_at - if self.ending_at: - state['ending_at'] = self.ending_at - if self.accommodation_code: - state['accommodation_code'] = self.accommodation_code - return json.dumps(state) - def get_html(self): self.render() return self.content @@ -133,12 +119,12 @@ class TimeLimitModule(XModule): else: return "other" -class TimeLimitDescriptor(XMLEditingDescriptor, XmlDescriptor): +class TimeLimitDescriptor(TimeLimitFields, XMLEditingDescriptor, XmlDescriptor): module_class = TimeLimitModule # For remembering when a student started, and when they should end - stores_state = True + stores_state = True @classmethod def definition_from_xml(cls, xml_object, system): @@ -151,7 +137,7 @@ class TimeLimitDescriptor(XMLEditingDescriptor, XmlDescriptor): if system.error_tracker is not None: system.error_tracker("ERROR: " + str(e)) continue - return {'children': children} + return {}, children def definition_to_xml(self, resource_fs): xml_object = etree.Element('timelimit') diff --git a/common/lib/xmodule/xmodule/vertical_module.py b/common/lib/xmodule/xmodule/vertical_module.py index 5827ea96a9..610d180c11 100644 --- a/common/lib/xmodule/xmodule/vertical_module.py +++ b/common/lib/xmodule/xmodule/vertical_module.py @@ -8,11 +8,15 @@ from pkg_resources import resource_string class_priority = ['video', 'problem'] -class VerticalModule(XModule): +class VerticalFields(object): + has_children = True + + +class VerticalModule(VerticalFields, XModule): ''' Layout module for laying out submodules vertically.''' - def __init__(self, system, location, definition, descriptor, instance_state=None, shared_state=None, **kwargs): - XModule.__init__(self, system, location, definition, descriptor, instance_state, shared_state, **kwargs) + def __init__(self, *args, **kwargs): + XModule.__init__(self, *args, **kwargs) self.contents = None def get_html(self): @@ -42,7 +46,7 @@ class VerticalModule(XModule): return new_class -class VerticalDescriptor(SequenceDescriptor): +class VerticalDescriptor(VerticalFields, SequenceDescriptor): module_class = VerticalModule js = {'coffee': [resource_string(__name__, 'js/src/vertical/edit.coffee')]} diff --git a/common/lib/xmodule/xmodule/video_module.py b/common/lib/xmodule/xmodule/video_module.py index 27388f7630..0203299b40 100644 --- a/common/lib/xmodule/xmodule/video_module.py +++ b/common/lib/xmodule/xmodule/video_module.py @@ -8,9 +8,8 @@ from django.http import Http404 from xmodule.x_module import XModule from xmodule.raw_module import RawDescriptor -from xmodule.modulestore.xml import XMLModuleStore -from xmodule.modulestore.django import modulestore from xmodule.contentstore.content import StaticContent +from xblock.core import Integer, Scope, String import datetime import time @@ -18,7 +17,13 @@ import time log = logging.getLogger(__name__) -class VideoModule(XModule): +class VideoFields(object): + data = String(help="XML data for the problem", scope=Scope.content) + position = Integer(help="Current position in the video", scope=Scope.student_state, default=0) + display_name = String(help="Display name for this module", scope=Scope.settings) + + +class VideoModule(VideoFields, XModule): video_time = 0 icon_class = 'video' @@ -32,23 +37,16 @@ class VideoModule(XModule): css = {'scss': [resource_string(__name__, 'css/video/display.scss')]} js_module_name = "Video" - def __init__(self, system, location, definition, descriptor, - instance_state=None, shared_state=None, **kwargs): - XModule.__init__(self, system, location, definition, descriptor, - instance_state, shared_state, **kwargs) - xmltree = etree.fromstring(self.definition['data']) + def __init__(self, *args, **kwargs): + XModule.__init__(self, *args, **kwargs) + + xmltree = etree.fromstring(self.data) self.youtube = xmltree.get('youtube') - self.position = 0 self.show_captions = xmltree.get('show_captions', 'true') self.source = self._get_source(xmltree) self.track = self._get_track(xmltree) self.start_time, self.end_time = self._get_timeframe(xmltree) - if instance_state is not None: - state = json.loads(instance_state) - if 'position' in state: - self.position = int(float(state['position'])) - def _get_source(self, xmltree): # find the first valid source return self._get_first_external(xmltree, 'source') @@ -120,13 +118,6 @@ class VideoModule(XModule): return self.youtube def get_html(self): - if isinstance(modulestore(), XMLModuleStore): - # VS[compat] - # cdodge: filesystem static content support. - caption_asset_path = "/static/{0}/subs/".format(self.metadata['data_dir']) - else: - caption_asset_path = StaticContent.get_base_url_path_for_course_assets(self.location) + '/subs_' - # We normally let JS parse this, but in the case that we need a hacked # out player because YouTube has broken their \ No newline at end of file diff --git a/common/test/data/conditional_and_poll/chapter/Staff.xml b/common/test/data/conditional_and_poll/chapter/Staff.xml new file mode 100644 index 0000000000..e1d5216f6d --- /dev/null +++ b/common/test/data/conditional_and_poll/chapter/Staff.xml @@ -0,0 +1,3 @@ + + + diff --git a/common/test/data/conditional_and_poll/conditional/condone.xml b/common/test/data/conditional_and_poll/conditional/condone.xml new file mode 100644 index 0000000000..80b061e244 --- /dev/null +++ b/common/test/data/conditional_and_poll/conditional/condone.xml @@ -0,0 +1,3 @@ + + + diff --git a/common/test/data/conditional_and_poll/course.xml b/common/test/data/conditional_and_poll/course.xml new file mode 120000 index 0000000000..f4f5c17b87 --- /dev/null +++ b/common/test/data/conditional_and_poll/course.xml @@ -0,0 +1 @@ +roots/2013_Spring.xml \ No newline at end of file diff --git a/common/test/data/conditional_and_poll/course/2013_Spring.xml b/common/test/data/conditional_and_poll/course/2013_Spring.xml new file mode 100644 index 0000000000..2eea422a2f --- /dev/null +++ b/common/test/data/conditional_and_poll/course/2013_Spring.xml @@ -0,0 +1,12 @@ + + + + + + + + + + + diff --git a/common/test/data/conditional_and_poll/creating_course.xml b/common/test/data/conditional_and_poll/creating_course.xml new file mode 100644 index 0000000000..4c90f1c2ec --- /dev/null +++ b/common/test/data/conditional_and_poll/creating_course.xml @@ -0,0 +1,8 @@ + diff --git a/common/test/data/conditional_and_poll/html/secret_page.xml b/common/test/data/conditional_and_poll/html/secret_page.xml new file mode 100644 index 0000000000..63be3cfa8d --- /dev/null +++ b/common/test/data/conditional_and_poll/html/secret_page.xml @@ -0,0 +1,4 @@ + +

              This is a secret!

              + + diff --git a/common/test/data/conditional_and_poll/info/2013_Spring/handouts.html b/common/test/data/conditional_and_poll/info/2013_Spring/handouts.html new file mode 100644 index 0000000000..35f2c89474 --- /dev/null +++ b/common/test/data/conditional_and_poll/info/2013_Spring/handouts.html @@ -0,0 +1,3 @@ +
                +
              1. A list of course handouts, or an empty file if there are none.
              2. +
              diff --git a/common/test/data/conditional_and_poll/info/2013_Spring/updates.html b/common/test/data/conditional_and_poll/info/2013_Spring/updates.html new file mode 100644 index 0000000000..9744c1699d --- /dev/null +++ b/common/test/data/conditional_and_poll/info/2013_Spring/updates.html @@ -0,0 +1,10 @@ + +
                + +
              1. December 9

                +
                +

                Announcement text

                +
                +
              2. + +
              diff --git a/common/test/data/conditional_and_poll/policies/2013_Spring/policy.json b/common/test/data/conditional_and_poll/policies/2013_Spring/policy.json new file mode 100644 index 0000000000..e2a204815c --- /dev/null +++ b/common/test/data/conditional_and_poll/policies/2013_Spring/policy.json @@ -0,0 +1,8 @@ +{ + "course/2013_Spring": { + "start": "2099-01-01T00:00", + "advertised_start" : "Spring 2013", + "display_name": "Justice" + } + +} diff --git a/common/test/data/conditional_and_poll/problem/choiceprob.xml b/common/test/data/conditional_and_poll/problem/choiceprob.xml new file mode 100644 index 0000000000..fa91954977 --- /dev/null +++ b/common/test/data/conditional_and_poll/problem/choiceprob.xml @@ -0,0 +1,22 @@ + + + +

              Consider a hypothetical magnetic field pointing out of your computer screen. Now imagine an electron traveling from right to left in the plane of your screen. A diagram of this situation is show below…

              +
              + +

              a. The magnitude of the force experienced by the electron is proportional the product of which of the following? (Select all that apply.)

              + + + + +Magnetic field strength… +Electric field strength… +Electric charge of the electron… +Radius of the electron… +Mass of the electron… +Velocity of the electron… + + + + +
              diff --git a/common/test/data/conditional_and_poll/roots/2013_Spring.xml b/common/test/data/conditional_and_poll/roots/2013_Spring.xml new file mode 100644 index 0000000000..1b97a5a714 --- /dev/null +++ b/common/test/data/conditional_and_poll/roots/2013_Spring.xml @@ -0,0 +1,2 @@ + + diff --git a/common/test/data/conditional_and_poll/sequential/Problem_Demos.xml b/common/test/data/conditional_and_poll/sequential/Problem_Demos.xml new file mode 100644 index 0000000000..e10298336d --- /dev/null +++ b/common/test/data/conditional_and_poll/sequential/Problem_Demos.xml @@ -0,0 +1,31 @@ + + + +

              What's the Right Thing to Do?

              +

              Suppose four shipwrecked sailors are stranded at sea in a lifeboat, without + food or water. Would it be wrong for three of them to kill and eat the cabin + boy, in order to save their own lives?

              + Yes + No + Don't know +
              + +

              What's the Right Thing to Do?

              +

              Suppose four shipwrecked sailors are stranded at sea in a lifeboat, without + food or water. Would it be wrong for three of them to kill and eat the cabin + boy, in order to save their own lives?

              + Yes + No + Don't know +
              +
              + + + + Condition: first_poll - Yes + + In first condition. + + + +
              diff --git a/common/test/data/conditional_and_poll/static/README b/common/test/data/conditional_and_poll/static/README new file mode 100644 index 0000000000..e22f378b5e --- /dev/null +++ b/common/test/data/conditional_and_poll/static/README @@ -0,0 +1,5 @@ +Images, handouts, and other statically-served content should go ONLY +in this directory. + +Images for the front page should go in static/images. The frontpage +banner MUST be named course_image.jpg \ No newline at end of file diff --git a/common/test/data/conditional_and_poll/static/images/course_image.jpg b/common/test/data/conditional_and_poll/static/images/course_image.jpg new file mode 100644 index 0000000000..b6a64b9396 Binary files /dev/null and b/common/test/data/conditional_and_poll/static/images/course_image.jpg differ diff --git a/common/test/data/conditional_and_poll/static/images/professor-sandel.jpg b/common/test/data/conditional_and_poll/static/images/professor-sandel.jpg new file mode 100644 index 0000000000..41bde60165 Binary files /dev/null and b/common/test/data/conditional_and_poll/static/images/professor-sandel.jpg differ diff --git a/common/test/data/simple/course.xml b/common/test/data/simple/course.xml index 86dc8df45c..660411384f 100644 --- a/common/test/data/simple/course.xml +++ b/common/test/data/simple/course.xml @@ -15,7 +15,7 @@
              - +