diff --git a/.gitignore b/.gitignore index 2fd1ca0181..8fb170c30f 100644 --- a/.gitignore +++ b/.gitignore @@ -27,4 +27,7 @@ lms/lib/comment_client/python nosetests.xml cover_html/ .idea/ -chromedriver.log \ No newline at end of file +.redcar/ +chromedriver.log +/nbproject +ghostdriver.log diff --git a/.pylintrc b/.pylintrc index ce2f2e3b87..9ea1e62ad4 100644 --- a/.pylintrc +++ b/.pylintrc @@ -12,7 +12,7 @@ profile=no # Add files or directories to the blacklist. They should be base names, not # paths. -ignore=CVS +ignore=CVS, migrations # Pickle collected data for later comparisons. persistent=yes @@ -33,7 +33,16 @@ load-plugins= # can either give multiple identifier separated by comma (,) or put this option # multiple time (only on the command line, not in the configuration file where # it should appear only once). -disable=E1102,W0142 +disable= +# W0141: Used builtin function 'map' +# W0142: Used * or ** magic +# R0201: Method could be a function +# R0901: Too many ancestors +# R0902: Too many instance attributes +# R0903: Too few public methods (1/2) +# R0904: Too many public methods +# R0913: Too many arguments + W0141,W0142,R0201,R0901,R0902,R0903,R0904,R0913 [REPORTS] @@ -43,7 +52,7 @@ disable=E1102,W0142 output-format=text # Include message's id in output -include-ids=no +include-ids=yes # Put messages in a separate file for each module / package specified on the # command line instead of printing them on stdout. Reports (if any) will be @@ -97,7 +106,7 @@ bad-functions=map,filter,apply,input module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ # Regular expression which should only match correct module level names -const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ +const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__)|log|urlpatterns)$ # Regular expression which should only match correct class names class-rgx=[A-Z_][a-zA-Z0-9]+$ @@ -106,7 +115,7 @@ class-rgx=[A-Z_][a-zA-Z0-9]+$ function-rgx=[a-z_][a-z0-9_]{2,30}$ # Regular expression which should only match correct method names -method-rgx=[a-z_][a-z0-9_]{2,30}$ +method-rgx=([a-z_][a-z0-9_]{2,60}|setUp|set[Uu]pClass|tearDown|tear[Dd]ownClass|assert[A-Z]\w*)$ # Regular expression which should only match correct instance attribute names attr-rgx=[a-z_][a-z0-9_]{2,30}$ @@ -129,7 +138,7 @@ bad-names=foo,bar,baz,toto,tutu,tata # Regular expression which should only match functions or classes name which do # not require a docstring -no-docstring-rgx=__.*__ +no-docstring-rgx=(__.*__|test_.*) [MISCELLANEOUS] diff --git a/.ruby-version b/.ruby-version index dd472cffa2..311baaf3e2 100644 --- a/.ruby-version +++ b/.ruby-version @@ -1 +1 @@ -1.8.7-p371 \ No newline at end of file +1.9.3-p374 diff --git a/Gemfile b/Gemfile index 43a9f6e2b1..7f7b146978 100644 --- a/Gemfile +++ b/Gemfile @@ -1,4 +1,4 @@ -source :rubygems +source 'https://rubygems.org' gem 'rake', '~> 10.0.3' gem 'sass', '3.1.15' gem 'bourbon', '~> 1.3.6' diff --git a/cms/.coveragerc b/cms/.coveragerc index b7ae181e99..4f0dbebe79 100644 --- a/cms/.coveragerc +++ b/cms/.coveragerc @@ -2,7 +2,7 @@ [run] data_file = reports/cms/.coverage source = cms,common/djangoapps -omit = cms/envs/*, cms/manage.py +omit = cms/envs/*, cms/manage.py, common/djangoapps/terrain/*, common/djangoapps/*/migrations/* [report] ignore_errors = True diff --git a/cms/djangoapps/contentstore/__init__.py b/cms/djangoapps/contentstore/__init__.py index e8dccbbf60..8b13789179 100644 --- a/cms/djangoapps/contentstore/__init__.py +++ b/cms/djangoapps/contentstore/__init__.py @@ -1,3 +1 @@ -from xmodule.templates import update_templates -update_templates() diff --git a/cms/djangoapps/contentstore/course_info_model.py b/cms/djangoapps/contentstore/course_info_model.py index 153d13dd13..589db4ac56 100644 --- a/cms/djangoapps/contentstore/course_info_model.py +++ b/cms/djangoapps/contentstore/course_info_model.py @@ -5,9 +5,11 @@ from lxml import html import re from django.http import HttpResponseBadRequest import logging +import django.utils -## TODO store as array of { date, content } and override course_info_module.definition_from_xml -## This should be in a class which inherits from XmlDescriptor +# # TODO store as array of { date, content } and override course_info_module.definition_from_xml +# # This should be in a class which inherits from XmlDescriptor +log = logging.getLogger(__name__) def get_course_updates(location): @@ -26,9 +28,11 @@ def get_course_updates(location): # purely to handle free formed updates not done via editor. Actually kills them, but at least doesn't break. try: - course_html_parsed = html.fromstring(course_updates.definition['data']) + course_html_parsed = html.fromstring(course_updates.data) except: - course_html_parsed = html.fromstring("
    ") + log.error("Cannot parse: " + course_updates.data) + escaped = django.utils.html.escape(course_updates.data) + course_html_parsed = html.fromstring("
    1. " + escaped + "
    ") # Confirm that root is
      , iterate over
    1. , pull out

      subs and then rest of val course_upd_collection = [] @@ -60,13 +64,15 @@ def update_course_updates(location, update, passed_id=None): try: course_updates = modulestore('direct').get_item(location) except ItemNotFoundError: - return HttpResponseBadRequest + return HttpResponseBadRequest() # purely to handle free formed updates not done via editor. Actually kills them, but at least doesn't break. try: - course_html_parsed = html.fromstring(course_updates.definition['data']) + course_html_parsed = html.fromstring(course_updates.data) except: - course_html_parsed = html.fromstring("
        ") + log.error("Cannot parse: " + course_updates.data) + escaped = django.utils.html.escape(course_updates.data) + course_html_parsed = html.fromstring("
        1. " + escaped + "
        ") # No try/catch b/c failure generates an error back to client new_html_parsed = html.fromstring('
      1. ' + update['date'] + '

        ' + update['content'] + '
      2. ') @@ -85,12 +91,18 @@ def update_course_updates(location, update, passed_id=None): passed_id = course_updates.location.url() + "/" + str(idx) # update db record - course_updates.definition['data'] = html.tostring(course_html_parsed) - modulestore('direct').update_item(location, course_updates.definition['data']) + course_updates.data = html.tostring(course_html_parsed) + modulestore('direct').update_item(location, course_updates.data) + + if (len(new_html_parsed) == 1): + content = new_html_parsed[0].tail + else: + content = "\n".join([html.tostring(ele) + for ele in new_html_parsed[1:]]) return {"id": passed_id, "date": update['date'], - "content": update['content']} + "content": content} def delete_course_update(location, update, passed_id): @@ -99,19 +111,21 @@ def delete_course_update(location, update, passed_id): Returns the resulting course_updates b/c their ids change. """ if not passed_id: - return HttpResponseBadRequest + return HttpResponseBadRequest() try: course_updates = modulestore('direct').get_item(location) except ItemNotFoundError: - return HttpResponseBadRequest + return HttpResponseBadRequest() # TODO use delete_blank_text parser throughout and cache as a static var in a class # purely to handle free formed updates not done via editor. Actually kills them, but at least doesn't break. try: - course_html_parsed = html.fromstring(course_updates.definition['data']) + course_html_parsed = html.fromstring(course_updates.data) except: - course_html_parsed = html.fromstring("
          ") + log.error("Cannot parse: " + course_updates.data) + escaped = django.utils.html.escape(course_updates.data) + course_html_parsed = html.fromstring("
          1. " + escaped + "
          ") if course_html_parsed.tag == 'ol': # ??? Should this use the id in the json or in the url or does it matter? @@ -122,9 +136,9 @@ def delete_course_update(location, update, passed_id): course_html_parsed.remove(element_to_delete) # update db record - course_updates.definition['data'] = html.tostring(course_html_parsed) + course_updates.data = html.tostring(course_html_parsed) store = modulestore('direct') - store.update_item(location, course_updates.definition['data']) + store.update_item(location, course_updates.data) return get_course_updates(location) @@ -133,7 +147,6 @@ def get_idx(passed_id): """ From the url w/ idx appended, get the idx. """ - # TODO compile this regex into a class static and reuse for each call - idx_matcher = re.search(r'.*/(\d+)$', passed_id) + idx_matcher = re.search(r'.*?/?(\d+)$', passed_id) if idx_matcher: return int(idx_matcher.group(1)) diff --git a/cms/djangoapps/contentstore/features/advanced-settings.feature b/cms/djangoapps/contentstore/features/advanced-settings.feature new file mode 100644 index 0000000000..db7294c14c --- /dev/null +++ b/cms/djangoapps/contentstore/features/advanced-settings.feature @@ -0,0 +1,42 @@ +Feature: Advanced (manual) course policy + In order to specify course policy settings for which no custom user interface exists + I want to be able to manually enter JSON key /value pairs + + Scenario: A course author sees default advanced settings + Given I have opened a new course in Studio + When I select the Advanced Settings + Then I see default advanced settings + + Scenario: Add new entries, and they appear alphabetically after save + Given I am on the Advanced Course Settings page in Studio + Then the settings are alphabetized + + Scenario: Test cancel editing key value + Given I am on the Advanced Course Settings page in Studio + When I edit the value of a policy key + And I press the "Cancel" notification button + Then the policy key value is unchanged + And I reload the page + Then the policy key value is unchanged + + Scenario: Test editing key value + Given I am on the Advanced Course Settings page in Studio + When I edit the value of a policy key + And I press the "Save" notification button + Then the policy key value is changed + And I reload the page + Then the policy key value is changed + + Scenario: Test how multi-line input appears + Given I am on the Advanced Course Settings page in Studio + When I create a JSON object as a value + Then it is displayed as formatted + And I reload the page + Then it is displayed as formatted + + Scenario: Test automatic quoting of non-JSON values + Given I am on the Advanced Course Settings page in Studio + When I create a non-JSON value not in quotes + Then it is displayed as a string + And I reload the page + Then it is displayed as a string diff --git a/cms/djangoapps/contentstore/features/advanced-settings.py b/cms/djangoapps/contentstore/features/advanced-settings.py new file mode 100644 index 0000000000..16562b6b15 --- /dev/null +++ b/cms/djangoapps/contentstore/features/advanced-settings.py @@ -0,0 +1,132 @@ +#pylint: disable=C0111 +#pylint: disable=W0621 + +from lettuce import world, step +from common import * +import time +from terrain.steps import reload_the_page + +from nose.tools import assert_true, assert_false, assert_equal + +""" +http://selenium.googlecode.com/svn/trunk/docs/api/py/webdriver/selenium.webdriver.common.keys.html +""" +from selenium.webdriver.common.keys import Keys + +KEY_CSS = '.key input.policy-key' +VALUE_CSS = 'textarea.json' +DISPLAY_NAME_KEY = "display_name" +DISPLAY_NAME_VALUE = '"Robot Super Course"' + +############### ACTIONS #################### + +@step('I select the Advanced Settings$') +def i_select_advanced_settings(step): + expand_icon_css = 'li.nav-course-settings i.icon-expand' + if world.browser.is_element_present_by_css(expand_icon_css): + world.css_click(expand_icon_css) + link_css = 'li.nav-course-settings-advanced a' + world.css_click(link_css) + + +@step('I am on the Advanced Course Settings page in Studio$') +def i_am_on_advanced_course_settings(step): + step.given('I have opened a new course in Studio') + step.given('I select the Advanced Settings') + + +@step(u'I press the "([^"]*)" notification button$') +def press_the_notification_button(step, name): + css = 'a.%s-button' % name.lower() + world.css_click_at(css) + + +@step(u'I edit the value of a policy key$') +def edit_the_value_of_a_policy_key(step): + """ + It is hard to figure out how to get into the CodeMirror + area, so cheat and do it from the policy key field :) + """ + e = world.css_find(KEY_CSS)[get_index_of(DISPLAY_NAME_KEY)] + e._element.send_keys(Keys.TAB, Keys.END, Keys.ARROW_LEFT, ' ', 'X') + + +@step('I create a JSON object as a value$') +def create_JSON_object(step): + change_display_name_value(step, '{"key": "value", "key_2": "value_2"}') + + +@step('I create a non-JSON value not in quotes$') +def create_value_not_in_quotes(step): + change_display_name_value(step, 'quote me') + + +############### RESULTS #################### +@step('I see default advanced settings$') +def i_see_default_advanced_settings(step): + # Test only a few of the existing properties (there are around 34 of them) + assert_policy_entries( + ["advanced_modules", DISPLAY_NAME_KEY, "show_calculator"], ["[]", DISPLAY_NAME_VALUE, "false"]) + + +@step('the settings are alphabetized$') +def they_are_alphabetized(step): + key_elements = world.css_find(KEY_CSS) + all_keys = [] + for key in key_elements: + all_keys.append(key.value) + + assert_equal(sorted(all_keys), all_keys, "policy keys were not sorted") + + +@step('it is displayed as formatted$') +def it_is_formatted(step): + assert_policy_entries([DISPLAY_NAME_KEY], ['{\n "key": "value",\n "key_2": "value_2"\n}']) + + +@step('it is displayed as a string') +def it_is_formatted(step): + assert_policy_entries([DISPLAY_NAME_KEY], ['"quote me"']) + + +@step(u'the policy key value is unchanged$') +def the_policy_key_value_is_unchanged(step): + assert_equal(get_display_name_value(), DISPLAY_NAME_VALUE) + + +@step(u'the policy key value is changed$') +def the_policy_key_value_is_changed(step): + assert_equal(get_display_name_value(), '"Robot Super Course X"') + + +############# HELPERS ############### +def assert_policy_entries(expected_keys, expected_values): + for counter in range(len(expected_keys)): + index = get_index_of(expected_keys[counter]) + assert_false(index == -1, "Could not find key: " + expected_keys[counter]) + assert_equal(expected_values[counter], world.css_find(VALUE_CSS)[index].value, "value is incorrect") + + +def get_index_of(expected_key): + for counter in range(len(world.css_find(KEY_CSS))): + # Sometimes get stale reference if I hold on to the array of elements + key = world.css_find(KEY_CSS)[counter].value + if key == expected_key: + return counter + + return -1 + + +def get_display_name_value(): + index = get_index_of(DISPLAY_NAME_KEY) + return world.css_find(VALUE_CSS)[index].value + + +def change_display_name_value(step, new_value): + e = world.css_find(KEY_CSS)[get_index_of(DISPLAY_NAME_KEY)] + display_name = get_display_name_value() + for count in range(len(display_name)): + e._element.send_keys(Keys.TAB, Keys.END, Keys.BACK_SPACE) + # Must delete "" before typing the JSON value + e._element.send_keys(Keys.TAB, Keys.END, Keys.BACK_SPACE, Keys.BACK_SPACE, new_value) + press_the_notification_button(step, "Save") diff --git a/cms/djangoapps/contentstore/features/checklists.feature b/cms/djangoapps/contentstore/features/checklists.feature new file mode 100644 index 0000000000..bccb80b8d7 --- /dev/null +++ b/cms/djangoapps/contentstore/features/checklists.feature @@ -0,0 +1,24 @@ +Feature: Course checklists + + Scenario: A course author sees checklists defined by edX + Given I have opened a new course in Studio + When I select Checklists from the Tools menu + Then I see the four default edX checklists + + Scenario: A course author can mark tasks as complete + Given I have opened Checklists + Then I can check and uncheck tasks in a checklist + And They are correctly selected after I reload the page + + Scenario: A task can link to a location within Studio + Given I have opened Checklists + When I select a link to the course outline + Then I am brought to the course outline page + And I press the browser back button + Then I am brought back to the course outline in the correct state + + Scenario: A task can link to a location outside Studio + Given I have opened Checklists + When I select a link to help page + Then I am brought to the help page in a new window + diff --git a/cms/djangoapps/contentstore/features/checklists.py b/cms/djangoapps/contentstore/features/checklists.py new file mode 100644 index 0000000000..dc399f5fac --- /dev/null +++ b/cms/djangoapps/contentstore/features/checklists.py @@ -0,0 +1,123 @@ +#pylint: disable=C0111 +#pylint: disable=W0621 + +from lettuce import world, step +from nose.tools import assert_true, assert_equal +from terrain.steps import reload_the_page +from selenium.common.exceptions import StaleElementReferenceException + +############### ACTIONS #################### +@step('I select Checklists from the Tools menu$') +def i_select_checklists(step): + expand_icon_css = 'li.nav-course-tools i.icon-expand' + if world.browser.is_element_present_by_css(expand_icon_css): + world.css_click(expand_icon_css) + link_css = 'li.nav-course-tools-checklists a' + world.css_click(link_css) + + +@step('I have opened Checklists$') +def i_have_opened_checklists(step): + step.given('I have opened a new course in Studio') + step.given('I select Checklists from the Tools menu') + + +@step('I see the four default edX checklists$') +def i_see_default_checklists(step): + checklists = world.css_find('.checklist-title') + assert_equal(4, len(checklists)) + assert_true(checklists[0].text.endswith('Getting Started With Studio')) + assert_true(checklists[1].text.endswith('Draft a Rough Course Outline')) + assert_true(checklists[2].text.endswith("Explore edX\'s Support Tools")) + assert_true(checklists[3].text.endswith('Draft Your Course About Page')) + + +@step('I can check and uncheck tasks in a checklist$') +def i_can_check_and_uncheck_tasks(step): + # Use the 2nd checklist as a reference + verifyChecklist2Status(0, 7, 0) + toggleTask(1, 0) + verifyChecklist2Status(1, 7, 14) + toggleTask(1, 3) + verifyChecklist2Status(2, 7, 29) + toggleTask(1, 6) + verifyChecklist2Status(3, 7, 43) + toggleTask(1, 3) + verifyChecklist2Status(2, 7, 29) + + +@step('They are correctly selected after I reload the page$') +def tasks_correctly_selected_after_reload(step): + reload_the_page(step) + verifyChecklist2Status(2, 7, 29) + # verify that task 7 is still selected by toggling its checkbox state and making sure that it deselects + toggleTask(1, 6) + verifyChecklist2Status(1, 7, 14) + + +@step('I select a link to the course outline$') +def i_select_a_link_to_the_course_outline(step): + clickActionLink(1, 0, 'Edit Course Outline') + + +@step('I am brought to the course outline page$') +def i_am_brought_to_course_outline(step): + assert_equal('Course Outline', world.css_find('.outline .title-1')[0].text) + assert_equal(1, len(world.browser.windows)) + + +@step('I am brought back to the course outline in the correct state$') +def i_am_brought_back_to_course_outline(step): + step.given('I see the four default edX checklists') + # In a previous step, we selected (1, 0) in order to click the 'Edit Course Outline' link. + # Make sure the task is still showing as selected (there was a caching bug with the collection). + verifyChecklist2Status(1, 7, 14) + + +@step('I select a link to help page$') +def i_select_a_link_to_the_help_page(step): + clickActionLink(2, 0, 'Visit Studio Help') + + +@step('I am brought to the help page in a new window$') +def i_am_brought_to_help_page_in_new_window(step): + step.given('I see the four default edX checklists') + windows = world.browser.windows + assert_equal(2, len(windows)) + world.browser.switch_to_window(windows[1]) + assert_equal('http://help.edge.edx.org/', world.browser.url) + + + + +############### HELPER METHODS #################### +def verifyChecklist2Status(completed, total, percentage): + def verify_count(driver): + try: + statusCount = world.css_find('#course-checklist1 .status-count').first + return statusCount.text == str(completed) + except StaleElementReferenceException: + return False + + world.wait_for(verify_count) + assert_equal(str(total), world.css_find('#course-checklist1 .status-amount').first.text) + # Would like to check the CSS width, but not sure how to do that. + assert_equal(str(percentage), world.css_find('#course-checklist1 .viz-checklist-status-value .int').first.text) + + +def toggleTask(checklist, task): + world.css_click('#course-checklist' + str(checklist) +'-task' + str(task)) + + +def clickActionLink(checklist, task, actionText): + # toggle checklist item to make sure that the link button is showing + toggleTask(checklist, task) + action_link = world.css_find('#course-checklist' + str(checklist) + ' a')[task] + + # text will be empty initially, wait for it to populate + def verify_action_link_text(driver): + return action_link.text == actionText + + world.wait_for(verify_action_link_text) + action_link.click() + diff --git a/cms/djangoapps/contentstore/features/common.py b/cms/djangoapps/contentstore/features/common.py index f868b598a8..3878340af3 100644 --- a/cms/djangoapps/contentstore/features/common.py +++ b/cms/djangoapps/contentstore/features/common.py @@ -1,26 +1,27 @@ +#pylint: disable=C0111 +#pylint: disable=W0621 + from lettuce import world, step -from factories import * -from django.core.management import call_command -from lettuce.django import django_url -from django.conf import settings -from django.core.management import call_command from nose.tools import assert_true from nose.tools import assert_equal -import xmodule.modulestore.django + +from xmodule.modulestore.django import _MODULESTORES, modulestore +from xmodule.templates import update_templates +from auth.authz import get_user_by_email from logging import getLogger logger = getLogger(__name__) ########### STEP HELPERS ############## - @step('I (?:visit|access|open) the Studio homepage$') def i_visit_the_studio_homepage(step): # To make this go to port 8001, put # LETTUCE_SERVER_PORT = 8001 # in your settings.py file. - world.browser.visit(django_url('/')) - assert world.browser.is_element_present_by_css('body.no-header', 10) + world.visit('/') + signin_css = 'a.action-signin' + assert world.is_css_present(signin_css) @step('I am logged into Studio$') @@ -41,17 +42,23 @@ def i_press_the_category_delete_icon(step, category): css = 'a.delete-button.delete-subsection-button span.delete-icon' else: assert False, 'Invalid category: %s' % category - css_click(css) + world.css_click(css) + + +@step('I have opened a new course in Studio$') +def i_have_opened_a_new_course(step): + world.clear_courses() + log_into_studio() + create_a_course() + ####### HELPER FUNCTIONS ############## - - def create_studio_user( uname='robot', email='robot+studio@edx.org', password='test', is_staff=False): - studio_user = UserFactory.build( + studio_user = world.UserFactory.build( username=uname, email=email, password=password, @@ -59,50 +66,20 @@ def create_studio_user( studio_user.set_password(password) studio_user.save() - registration = RegistrationFactory(user=studio_user) + registration = world.RegistrationFactory(user=studio_user) registration.register(studio_user) registration.activate() - user_profile = UserProfileFactory(user=studio_user) - - -def flush_xmodule_store(): - # Flush and initialize the module store - # It needs the templates because it creates new records - # by cloning from the template. - # Note that if your test module gets in some weird state - # (though it shouldn't), do this manually - # from the bash shell to drop it: - # $ mongo test_xmodule --eval "db.dropDatabase()" - xmodule.modulestore.django._MODULESTORES = {} - xmodule.modulestore.django.modulestore().collection.drop() - xmodule.templates.update_templates() - - -def assert_css_with_text(css, text): - assert_true(world.browser.is_element_present_by_css(css, 5)) - assert_equal(world.browser.find_by_css(css).text, text) - - -def css_click(css): - world.browser.find_by_css(css).first.click() - - -def css_fill(css, value): - world.browser.find_by_css(css).first.fill(value) - - -def clear_courses(): - flush_xmodule_store() + user_profile = world.UserProfileFactory(user=studio_user) def fill_in_course_info( name='Robot Super Course', org='MITx', num='101'): - css_fill('.new-course-name', name) - css_fill('.new-course-org', org) - css_fill('.new-course-number', num) + world.css_fill('.new-course-name', name) + world.css_fill('.new-course-org', org) + world.css_fill('.new-course-number', num) def log_into_studio( @@ -110,39 +87,56 @@ def log_into_studio( email='robot+studio@edx.org', password='test', is_staff=False): + create_studio_user(uname=uname, email=email, is_staff=is_staff) + world.browser.cookies.delete() - world.browser.visit(django_url('/')) - world.browser.is_element_present_by_css('body.no-header', 10) + world.visit('/') + + signin_css = 'a.action-signin' + world.is_css_present(signin_css) + world.css_click(signin_css) login_form = world.browser.find_by_css('form#login_form') login_form.find_by_name('email').fill(email) login_form.find_by_name('password').fill(password) login_form.find_by_name('submit').click() - assert_true(world.browser.is_element_present_by_css('.new-course-button', 5)) + assert_true(world.is_css_present('.new-course-button')) def create_a_course(): - css_click('a.new-course-button') - fill_in_course_info() - css_click('input.new-course-save') - assert_true(world.browser.is_element_present_by_css('a#courseware-tab', 5)) + c = world.CourseFactory.create(org='MITx', course='999', display_name='Robot Super Course') + + # Add the user to the instructor group of the course + # so they will have the permissions to see it in studio + g = world.GroupFactory.create(name='instructor_MITx/999/Robot_Super_Course') + u = get_user_by_email('robot+studio@edx.org') + u.groups.add(g) + u.save() + world.browser.reload() + + course_link_css = 'span.class-name' + world.css_click(course_link_css) + course_title_css = 'span.course-title' + assert_true(world.is_css_present(course_title_css)) def add_section(name='My Section'): link_css = 'a.new-courseware-section-button' - css_click(link_css) - name_css = '.new-section-name' - save_css = '.new-section-name-save' - css_fill(name_css, name) - css_click(save_css) + world.css_click(link_css) + name_css = 'input.new-section-name' + save_css = 'input.new-section-name-save' + world.css_fill(name_css, name) + world.css_click(save_css) + span_css = 'span.section-name-span' + assert_true(world.is_css_present(span_css)) def add_subsection(name='Subsection One'): css = 'a.new-subsection-item' - css_click(css) + world.css_click(css) name_css = 'input.new-subsection-name-input' save_css = 'input.new-subsection-name-save' - css_fill(name_css, name) - css_click(save_css) + world.css_fill(name_css, name) + world.css_click(save_css) diff --git a/cms/djangoapps/contentstore/features/course-settings.feature b/cms/djangoapps/contentstore/features/course-settings.feature new file mode 100644 index 0000000000..e869bfe47a --- /dev/null +++ b/cms/djangoapps/contentstore/features/course-settings.feature @@ -0,0 +1,25 @@ +Feature: Course Settings + As a course author, I want to be able to configure my course settings. + + Scenario: User can set course dates + Given I have opened a new course in Studio + When I select Schedule and Details + And I set course dates + Then I see the set dates on refresh + + Scenario: User can clear previously set course dates (except start date) + Given I have set course dates + And I clear all the dates except start + Then I see cleared dates on refresh + + Scenario: User cannot clear the course start date + Given I have set course dates + And I clear the course start date + Then I receive a warning about course start date + And The previously set start date is shown on refresh + + Scenario: User can correct the course start date warning + Given I have tried to clear the course start + And I have entered a new course start date + Then The warning about course start date goes away + And My new course start date is shown on refresh diff --git a/cms/djangoapps/contentstore/features/course-settings.py b/cms/djangoapps/contentstore/features/course-settings.py new file mode 100644 index 0000000000..9eb5b0951d --- /dev/null +++ b/cms/djangoapps/contentstore/features/course-settings.py @@ -0,0 +1,165 @@ +#pylint: disable=C0111 +#pylint: disable=W0621 + +from lettuce import world, step +from terrain.steps import reload_the_page +from selenium.webdriver.common.keys import Keys +import time + +from nose.tools import assert_true, assert_false, assert_equal + +COURSE_START_DATE_CSS = "#course-start-date" +COURSE_END_DATE_CSS = "#course-end-date" +ENROLLMENT_START_DATE_CSS = "#course-enrollment-start-date" +ENROLLMENT_END_DATE_CSS = "#course-enrollment-end-date" + +COURSE_START_TIME_CSS = "#course-start-time" +COURSE_END_TIME_CSS = "#course-end-time" +ENROLLMENT_START_TIME_CSS = "#course-enrollment-start-time" +ENROLLMENT_END_TIME_CSS = "#course-enrollment-end-time" + +DUMMY_TIME = "3:30pm" +DEFAULT_TIME = "12:00am" + + +############### ACTIONS #################### +@step('I select Schedule and Details$') +def test_i_select_schedule_and_details(step): + expand_icon_css = 'li.nav-course-settings i.icon-expand' + if world.browser.is_element_present_by_css(expand_icon_css): + world.css_click(expand_icon_css) + link_css = 'li.nav-course-settings-schedule a' + world.css_click(link_css) + + +@step('I have set course dates$') +def test_i_have_set_course_dates(step): + step.given('I have opened a new course in Studio') + step.given('I select Schedule and Details') + step.given('And I set course dates') + + +@step('And I set course dates$') +def test_and_i_set_course_dates(step): + set_date_or_time(COURSE_START_DATE_CSS, '12/20/2013') + set_date_or_time(COURSE_END_DATE_CSS, '12/26/2013') + set_date_or_time(ENROLLMENT_START_DATE_CSS, '12/1/2013') + set_date_or_time(ENROLLMENT_END_DATE_CSS, '12/10/2013') + + set_date_or_time(COURSE_START_TIME_CSS, DUMMY_TIME) + set_date_or_time(ENROLLMENT_END_TIME_CSS, DUMMY_TIME) + + pause() + + +@step('Then I see the set dates on refresh$') +def test_then_i_see_the_set_dates_on_refresh(step): + reload_the_page(step) + verify_date_or_time(COURSE_START_DATE_CSS, '12/20/2013') + verify_date_or_time(COURSE_END_DATE_CSS, '12/26/2013') + verify_date_or_time(ENROLLMENT_START_DATE_CSS, '12/01/2013') + verify_date_or_time(ENROLLMENT_END_DATE_CSS, '12/10/2013') + + verify_date_or_time(COURSE_START_TIME_CSS, DUMMY_TIME) + # Unset times get set to 12 AM once the corresponding date has been set. + verify_date_or_time(COURSE_END_TIME_CSS, DEFAULT_TIME) + verify_date_or_time(ENROLLMENT_START_TIME_CSS, DEFAULT_TIME) + verify_date_or_time(ENROLLMENT_END_TIME_CSS, DUMMY_TIME) + + +@step('And I clear all the dates except start$') +def test_and_i_clear_all_the_dates_except_start(step): + set_date_or_time(COURSE_END_DATE_CSS, '') + set_date_or_time(ENROLLMENT_START_DATE_CSS, '') + set_date_or_time(ENROLLMENT_END_DATE_CSS, '') + + pause() + + +@step('Then I see cleared dates on refresh$') +def test_then_i_see_cleared_dates_on_refresh(step): + reload_the_page(step) + verify_date_or_time(COURSE_END_DATE_CSS, '') + verify_date_or_time(ENROLLMENT_START_DATE_CSS, '') + verify_date_or_time(ENROLLMENT_END_DATE_CSS, '') + + verify_date_or_time(COURSE_END_TIME_CSS, '') + verify_date_or_time(ENROLLMENT_START_TIME_CSS, '') + verify_date_or_time(ENROLLMENT_END_TIME_CSS, '') + + # Verify course start date (required) and time still there + verify_date_or_time(COURSE_START_DATE_CSS, '12/20/2013') + verify_date_or_time(COURSE_START_TIME_CSS, DUMMY_TIME) + + +@step('I clear the course start date$') +def test_i_clear_the_course_start_date(step): + set_date_or_time(COURSE_START_DATE_CSS, '') + + +@step('I receive a warning about course start date$') +def test_i_receive_a_warning_about_course_start_date(step): + assert_true(world.css_has_text('.message-error', 'The course must have an assigned start date.')) + assert_true('error' in world.css_find(COURSE_START_DATE_CSS).first._element.get_attribute('class')) + assert_true('error' in world.css_find(COURSE_START_TIME_CSS).first._element.get_attribute('class')) + + +@step('The previously set start date is shown on refresh$') +def test_the_previously_set_start_date_is_shown_on_refresh(step): + reload_the_page(step) + verify_date_or_time(COURSE_START_DATE_CSS, '12/20/2013') + verify_date_or_time(COURSE_START_TIME_CSS, DUMMY_TIME) + + +@step('Given I have tried to clear the course start$') +def test_i_have_tried_to_clear_the_course_start(step): + step.given("I have set course dates") + step.given("I clear the course start date") + step.given("I receive a warning about course start date") + + +@step('I have entered a new course start date$') +def test_i_have_entered_a_new_course_start_date(step): + set_date_or_time(COURSE_START_DATE_CSS, '12/22/2013') + pause() + + +@step('The warning about course start date goes away$') +def test_the_warning_about_course_start_date_goes_away(step): + assert_equal(0, len(world.css_find('.message-error'))) + assert_false('error' in world.css_find(COURSE_START_DATE_CSS).first._element.get_attribute('class')) + assert_false('error' in world.css_find(COURSE_START_TIME_CSS).first._element.get_attribute('class')) + + +@step('My new course start date is shown on refresh$') +def test_my_new_course_start_date_is_shown_on_refresh(step): + reload_the_page(step) + verify_date_or_time(COURSE_START_DATE_CSS, '12/22/2013') + # Time should have stayed from before attempt to clear date. + verify_date_or_time(COURSE_START_TIME_CSS, DUMMY_TIME) + + +############### HELPER METHODS #################### +def set_date_or_time(css, date_or_time): + """ + Sets date or time field. + """ + world.css_fill(css, date_or_time) + e = world.css_find(css).first + # hit Enter to apply the changes + e._element.send_keys(Keys.ENTER) + + +def verify_date_or_time(css, date_or_time): + """ + Verifies date or time field. + """ + assert_equal(date_or_time, world.css_find(css).first.value) + + +def pause(): + """ + Must sleep briefly to allow last time save to finish, + else refresh of browser will fail. + """ + time.sleep(float(1)) diff --git a/cms/djangoapps/contentstore/features/courses.feature b/cms/djangoapps/contentstore/features/courses.feature index 39d39b50aa..455313b0e2 100644 --- a/cms/djangoapps/contentstore/features/courses.feature +++ b/cms/djangoapps/contentstore/features/courses.feature @@ -10,4 +10,4 @@ Feature: Create Course And I fill in the new course information And I press the "Save" button Then the Courseware page has loaded in Studio - And I see a link for adding a new section \ No newline at end of file + And I see a link for adding a new section diff --git a/cms/djangoapps/contentstore/features/courses.py b/cms/djangoapps/contentstore/features/courses.py index d2d038a928..5da7720945 100644 --- a/cms/djangoapps/contentstore/features/courses.py +++ b/cms/djangoapps/contentstore/features/courses.py @@ -1,3 +1,6 @@ +#pylint: disable=C0111 +#pylint: disable=W0621 + from lettuce import world, step from common import * @@ -6,12 +9,12 @@ from common import * @step('There are no courses$') def no_courses(step): - clear_courses() + world.clear_courses() @step('I click the New Course button$') def i_click_new_course(step): - css_click('.new-course-button') + world.css_click('.new-course-button') @step('I fill in the new course information$') @@ -27,36 +30,36 @@ def i_create_a_course(step): @step('I click the course link in My Courses$') def i_click_the_course_link_in_my_courses(step): course_css = 'span.class-name' - css_click(course_css) + world.css_click(course_css) ############ ASSERTIONS ################### @step('the Courseware page has loaded in Studio$') def courseware_page_has_loaded_in_studio(step): - courseware_css = 'a#courseware-tab' - assert world.browser.is_element_present_by_css(courseware_css) + course_title_css = 'span.course-title' + assert world.is_css_present(course_title_css) @step('I see the course listed in My Courses$') def i_see_the_course_in_my_courses(step): course_css = 'span.class-name' - assert_css_with_text(course_css, 'Robot Super Course') + assert world.css_has_text(course_css, 'Robot Super Course') @step('the course is loaded$') def course_is_loaded(step): class_css = 'a.class-name' - assert_css_with_text(class_css, 'Robot Super Course') + assert world.css_has_text(course_css, 'Robot Super Cousre') @step('I am on the "([^"]*)" tab$') def i_am_on_tab(step, tab_name): header_css = 'div.inner-wrapper h1' - assert_css_with_text(header_css, tab_name) + assert world.css_has_text(header_css, tab_name) @step('I see a link for adding a new section$') def i_see_new_section_link(step): link_css = 'a.new-courseware-section-button' - assert_css_with_text(link_css, 'New Section') + assert world.css_has_text(link_css, '+ New Section') diff --git a/cms/djangoapps/contentstore/features/factories.py b/cms/djangoapps/contentstore/features/factories.py deleted file mode 100644 index 087ceaaa2d..0000000000 --- a/cms/djangoapps/contentstore/features/factories.py +++ /dev/null @@ -1,34 +0,0 @@ -import factory -from student.models import User, UserProfile, Registration -from datetime import datetime -import uuid - - -class UserProfileFactory(factory.Factory): - FACTORY_FOR = UserProfile - - user = None - name = 'Robot Studio' - courseware = 'course.xml' - - -class RegistrationFactory(factory.Factory): - FACTORY_FOR = Registration - - user = None - activation_key = uuid.uuid4().hex - - -class UserFactory(factory.Factory): - FACTORY_FOR = User - - username = 'robot-studio' - email = 'robot+studio@edx.org' - password = 'test' - first_name = 'Robot' - last_name = 'Studio' - is_staff = False - is_active = True - is_superuser = False - last_login = datetime.now() - date_joined = datetime.now() diff --git a/cms/djangoapps/contentstore/features/section.feature b/cms/djangoapps/contentstore/features/section.feature index ad00ba2911..08d38367bc 100644 --- a/cms/djangoapps/contentstore/features/section.feature +++ b/cms/djangoapps/contentstore/features/section.feature @@ -11,6 +11,14 @@ Feature: Create Section And I see a release date for my section And I see a link to create a new subsection + Scenario: Add a new section (with a quote in the name) to a course (bug #216) + Given I have opened a new course in Studio + When I click the New Section link + And I enter a section name with a quote and click save + Then I see my section name with a quote on the Courseware page + And I click to edit the section name + Then I see the complete section name with a quote in the editor + Scenario: Edit section release date Given I have opened a new course in Studio And I have added a new section @@ -18,9 +26,10 @@ Feature: Create Section And I save a new section release date Then the section release date is updated + @skip-phantom Scenario: Delete section Given I have opened a new course in Studio And I have added a new section When I press the "section" delete icon And I confirm the alert - Then the section does not exist \ No newline at end of file + Then the section does not exist diff --git a/cms/djangoapps/contentstore/features/section.py b/cms/djangoapps/contentstore/features/section.py index 3bcaeab6c4..0c0f5536a0 100644 --- a/cms/djangoapps/contentstore/features/section.py +++ b/cms/djangoapps/contentstore/features/section.py @@ -1,28 +1,29 @@ +#pylint: disable=C0111 +#pylint: disable=W0621 + from lettuce import world, step from common import * +from nose.tools import assert_equal +from selenium.webdriver.common.keys import Keys +import time ############### ACTIONS #################### -@step('I have opened a new course in Studio$') -def i_have_opened_a_new_course(step): - clear_courses() - log_into_studio() - create_a_course() - - @step('I click the new section link$') def i_click_new_section_link(step): link_css = 'a.new-courseware-section-button' - css_click(link_css) + world.css_click(link_css) @step('I enter the section name and click save$') def i_save_section_name(step): - name_css = '.new-section-name' - save_css = '.new-section-name-save' - css_fill(name_css, 'My Section') - css_click(save_css) + save_section_name('My Section') + + +@step('I enter a section name with a quote and click save$') +def i_save_section_name_with_quote(step): + save_section_name('Section with "Quote"') @step('I have added a new section$') @@ -33,26 +34,47 @@ def i_have_added_new_section(step): @step('I click the Edit link for the release date$') def i_click_the_edit_link_for_the_release_date(step): button_css = 'div.section-published-date a.edit-button' - css_click(button_css) + world.css_click(button_css) @step('I save a new section release date$') def i_save_a_new_section_release_date(step): date_css = 'input.start-date.date.hasDatepicker' time_css = 'input.start-time.time.ui-timepicker-input' - css_fill(date_css, '12/25/2013') - # click here to make the calendar go away - css_click(time_css) - css_fill(time_css, '12:00am') - css_click('a.save-button') + world.css_fill(date_css, '12/25/2013') + # hit TAB to get to the time field + e = world.css_find(date_css).first + e._element.send_keys(Keys.TAB) + world.css_fill(time_css, '12:00am') + e = world.css_find(time_css).first + e._element.send_keys(Keys.TAB) + time.sleep(float(1)) + world.browser.click_link_by_text('Save') + ############ ASSERTIONS ################### @step('I see my section on the Courseware page$') def i_see_my_section_on_the_courseware_page(step): - section_css = 'span.section-name-span' - assert_css_with_text(section_css, 'My Section') + see_my_section_on_the_courseware_page('My Section') + + +@step('I see my section name with a quote on the Courseware page$') +def i_see_my_section_name_with_quote_on_the_courseware_page(step): + see_my_section_on_the_courseware_page('Section with "Quote"') + + +@step('I click to edit the section name$') +def i_click_to_edit_section_name(step): + world.css_click('span.section-name-span') + + +@step('I see the complete section name with a quote in the editor$') +def i_see_complete_section_name_with_quote_in_editor(step): + css = '.edit-section-name' + assert world.is_css_present(css) + assert_equal(world.browser.find_by_css(css).value, 'Section with "Quote"') @step('the section does not exist$') @@ -66,7 +88,7 @@ def i_see_a_release_date_for_my_section(step): import re css = 'span.published-status' - assert world.browser.is_element_present_by_css(css) + assert world.is_css_present(css) status_text = world.browser.find_by_css(css).text # e.g. 11/06/2012 at 16:25 @@ -80,17 +102,31 @@ def i_see_a_release_date_for_my_section(step): @step('I see a link to create a new subsection$') def i_see_a_link_to_create_a_new_subsection(step): css = 'a.new-subsection-item' - assert world.browser.is_element_present_by_css(css) + assert world.is_css_present(css) @step('the section release date picker is not visible$') def the_section_release_date_picker_not_visible(step): css = 'div.edit-subsection-publish-settings' - assert False, world.browser.find_by_css(css).visible + assert not world.css_visible(css) @step('the section release date is updated$') def the_section_release_date_is_updated(step): css = 'span.published-status' - status_text = world.browser.find_by_css(css).text - assert status_text == 'Will Release: 12/25/2013 at 12:00am' + status_text = world.css_text(css) + assert_equal(status_text, 'Will Release: 12/25/2013 at 12:00am') + + +############ HELPER METHODS ################### + +def save_section_name(name): + name_css = '.new-section-name' + save_css = '.new-section-name-save' + world.css_fill(name_css, name) + world.css_click(save_css) + + +def see_my_section_on_the_courseware_page(name): + section_css = 'span.section-name-span' + assert world.css_has_text(section_css, name) diff --git a/cms/djangoapps/contentstore/features/signup.feature b/cms/djangoapps/contentstore/features/signup.feature index 8a6f93d33b..03a1c9524a 100644 --- a/cms/djangoapps/contentstore/features/signup.feature +++ b/cms/djangoapps/contentstore/features/signup.feature @@ -5,8 +5,8 @@ Feature: Sign in Scenario: Sign up from the homepage Given I visit the Studio homepage - When I click the link with the text "Sign up" + When I click the link with the text "Sign Up" And I fill in the registration form - And I press the "Create My Account" button on the registration form + And I press the Create My Account button on the registration form Then I should see be on the studio home page - And I should see the message "please click on the activation link in your email." \ No newline at end of file + And I should see the message "please click on the activation link in your email." diff --git a/cms/djangoapps/contentstore/features/signup.py b/cms/djangoapps/contentstore/features/signup.py index e105b674f7..6ca358183b 100644 --- a/cms/djangoapps/contentstore/features/signup.py +++ b/cms/djangoapps/contentstore/features/signup.py @@ -1,4 +1,8 @@ +#pylint: disable=C0111 +#pylint: disable=W0621 + from lettuce import world, step +from common import * @step('I fill in the registration form$') @@ -11,10 +15,13 @@ def i_fill_in_the_registration_form(step): register_form.find_by_name('terms_of_service').check() -@step('I press the "([^"]*)" button on the registration form$') -def i_press_the_button_on_the_registration_form(step, button): - register_form = world.browser.find_by_css('form#register_form') - register_form.find_by_value(button).click() +@step('I press the Create My Account button on the registration form$') +def i_press_the_button_on_the_registration_form(step): + submit_css = 'form#register_form button#submit' + # Workaround for click not working on ubuntu + # for some unknown reason. + e = world.css_find(submit_css) + e.type(' ') @step('I should see be on the studio home page$') diff --git a/cms/djangoapps/contentstore/features/studio-overview-togglesection.feature b/cms/djangoapps/contentstore/features/studio-overview-togglesection.feature index 5276b90d12..762dea6838 100644 --- a/cms/djangoapps/contentstore/features/studio-overview-togglesection.feature +++ b/cms/djangoapps/contentstore/features/studio-overview-togglesection.feature @@ -1,29 +1,30 @@ Feature: Overview Toggle Section In order to quickly view the details of a course's section or to scan the inventory of sections - As a course author - I want to toggle the visibility of each section's subsection details in the overview listing + As a course author + I want to toggle the visibility of each section's subsection details in the overview listing Scenario: The default layout for the overview page is to show sections in expanded view Given I have a course with multiple sections - When I navigate to the course overview page - Then I see the "Collapse All Sections" link - And all sections are expanded + When I navigate to the course overview page + Then I see the "Collapse All Sections" link + And all sections are expanded - Scenario: Expand/collapse for a course with no sections + Scenario: Expand /collapse for a course with no sections Given I have a course with no sections - When I navigate to the course overview page - Then I do not see the "Collapse All Sections" link + When I navigate to the course overview page + Then I do not see the "Collapse All Sections" link Scenario: Collapse link appears after creating first section of a course Given I have a course with no sections - When I navigate to the course overview page - And I add a section - Then I see the "Collapse All Sections" link - And all sections are expanded + When I navigate to the course overview page + And I add a section + Then I see the "Collapse All Sections" link + And all sections are expanded + @skip-phantom Scenario: Collapse link is not removed after last section of a course is deleted Given I have a course with 1 section - And I navigate to the course overview page + And I navigate to the course overview page When I press the "section" delete icon And I confirm the alert Then I see the "Collapse All Sections" link diff --git a/cms/djangoapps/contentstore/features/studio-overview-togglesection.py b/cms/djangoapps/contentstore/features/studio-overview-togglesection.py index 00aa39455d..7f717b731c 100644 --- a/cms/djangoapps/contentstore/features/studio-overview-togglesection.py +++ b/cms/djangoapps/contentstore/features/studio-overview-togglesection.py @@ -1,5 +1,7 @@ +#pylint: disable=C0111 +#pylint: disable=W0621 + from lettuce import world, step -from terrain.factories import * from common import * from nose.tools import assert_true, assert_false, assert_equal @@ -9,16 +11,16 @@ logger = getLogger(__name__) @step(u'I have a course with no sections$') def have_a_course(step): - clear_courses() - course = CourseFactory.create() + world.clear_courses() + course = world.CourseFactory.create() @step(u'I have a course with 1 section$') def have_a_course_with_1_section(step): - clear_courses() - course = CourseFactory.create() - section = ItemFactory.create(parent_location=course.location) - subsection1 = ItemFactory.create( + world.clear_courses() + course = world.CourseFactory.create() + section = world.ItemFactory.create(parent_location=course.location) + subsection1 = world.ItemFactory.create( parent_location=section.location, template='i4x://edx/templates/sequential/Empty', display_name='Subsection One',) @@ -26,21 +28,21 @@ def have_a_course_with_1_section(step): @step(u'I have a course with multiple sections$') def have_a_course_with_two_sections(step): - clear_courses() - course = CourseFactory.create() - section = ItemFactory.create(parent_location=course.location) - subsection1 = ItemFactory.create( + world.clear_courses() + course = world.CourseFactory.create() + section = world.ItemFactory.create(parent_location=course.location) + subsection1 = world.ItemFactory.create( parent_location=section.location, template='i4x://edx/templates/sequential/Empty', display_name='Subsection One',) - section2 = ItemFactory.create( + section2 = world.ItemFactory.create( parent_location=course.location, display_name='Section Two',) - subsection2 = ItemFactory.create( + subsection2 = world.ItemFactory.create( parent_location=section2.location, template='i4x://edx/templates/sequential/Empty', display_name='Subsection Alpha',) - subsection3 = ItemFactory.create( + subsection3 = world.ItemFactory.create( parent_location=section2.location, template='i4x://edx/templates/sequential/Empty', display_name='Subsection Beta',) @@ -50,7 +52,7 @@ def have_a_course_with_two_sections(step): def navigate_to_the_course_overview_page(step): log_into_studio(is_staff=True) course_locator = '.class-name' - css_click(course_locator) + world.css_click(course_locator) @step(u'I navigate to the courseware page of a course with multiple sections') @@ -67,44 +69,44 @@ def i_add_a_section(step): @step(u'I click the "([^"]*)" link$') def i_click_the_text_span(step, text): span_locator = '.toggle-button-sections span' - assert_true(world.browser.is_element_present_by_css(span_locator, 5)) + assert_true(world.browser.is_element_present_by_css(span_locator)) # first make sure that the expand/collapse text is the one you expected assert_equal(world.browser.find_by_css(span_locator).value, text) - css_click(span_locator) + world.css_click(span_locator) @step(u'I collapse the first section$') def i_collapse_a_section(step): collapse_locator = 'section.courseware-section a.collapse' - css_click(collapse_locator) + world.css_click(collapse_locator) @step(u'I expand the first section$') def i_expand_a_section(step): expand_locator = 'section.courseware-section a.expand' - css_click(expand_locator) + world.css_click(expand_locator) @step(u'I see the "([^"]*)" link$') def i_see_the_span_with_text(step, text): span_locator = '.toggle-button-sections span' - assert_true(world.browser.is_element_present_by_css(span_locator, 5)) - assert_equal(world.browser.find_by_css(span_locator).value, text) - assert_true(world.browser.find_by_css(span_locator).visible) + assert_true(world.is_css_present(span_locator)) + assert_equal(world.css_find(span_locator).value, text) + assert_true(world.css_visible(span_locator)) @step(u'I do not see the "([^"]*)" link$') def i_do_not_see_the_span_with_text(step, text): # Note that the span will exist on the page but not be visible span_locator = '.toggle-button-sections span' - assert_true(world.browser.is_element_present_by_css(span_locator)) - assert_false(world.browser.find_by_css(span_locator).visible) + assert_true(world.is_css_present(span_locator)) + assert_false(world.css_visible(span_locator)) @step(u'all sections are expanded$') def all_sections_are_expanded(step): subsection_locator = 'div.subsection-list' - subsections = world.browser.find_by_css(subsection_locator) + subsections = world.css_find(subsection_locator) for s in subsections: assert_true(s.visible) @@ -112,6 +114,6 @@ def all_sections_are_expanded(step): @step(u'all sections are collapsed$') def all_sections_are_expanded(step): subsection_locator = 'div.subsection-list' - subsections = world.browser.find_by_css(subsection_locator) + subsections = world.css_find(subsection_locator) for s in subsections: assert_false(s.visible) diff --git a/cms/djangoapps/contentstore/features/subsection.feature b/cms/djangoapps/contentstore/features/subsection.feature index 5acb5bfe44..e913c6a4bf 100644 --- a/cms/djangoapps/contentstore/features/subsection.feature +++ b/cms/djangoapps/contentstore/features/subsection.feature @@ -9,6 +9,23 @@ Feature: Create Subsection And I enter the subsection name and click save Then I see my subsection on the Courseware page + Scenario: Add a new subsection (with a name containing a quote) to a section (bug #216) + Given I have opened a new course section in Studio + When I click the New Subsection link + And I enter a subsection name with a quote and click save + Then I see my subsection name with a quote on the Courseware page + And I click to edit the subsection name + Then I see the complete subsection name with a quote in the editor + + Scenario: Assign grading type to a subsection and verify it is still shown after refresh (bug #258) + Given I have opened a new course section in Studio + And I have added a new subsection + And I mark it as Homework + Then I see it marked as Homework + And I reload the page + Then I see it marked as Homework + + @skip-phantom Scenario: Delete a subsection Given I have opened a new course section in Studio And I have added a new subsection diff --git a/cms/djangoapps/contentstore/features/subsection.py b/cms/djangoapps/contentstore/features/subsection.py index e2041b8dbf..4ab27fcb49 100644 --- a/cms/djangoapps/contentstore/features/subsection.py +++ b/cms/djangoapps/contentstore/features/subsection.py @@ -1,12 +1,16 @@ +#pylint: disable=C0111 +#pylint: disable=W0621 + from lettuce import world, step from common import * +from nose.tools import assert_equal ############### ACTIONS #################### @step('I have opened a new course section in Studio$') def i_have_opened_a_new_course_section(step): - clear_courses() + world.clear_courses() log_into_studio() create_a_course() add_section() @@ -14,34 +18,77 @@ def i_have_opened_a_new_course_section(step): @step('I click the New Subsection link') def i_click_the_new_subsection_link(step): - css = 'a.new-subsection-item' - css_click(css) + world.css_click('a.new-subsection-item') @step('I enter the subsection name and click save$') def i_save_subsection_name(step): - name_css = 'input.new-subsection-name-input' - save_css = 'input.new-subsection-name-save' - css_fill(name_css, 'Subsection One') - css_click(save_css) + save_subsection_name('Subsection One') + + +@step('I enter a subsection name with a quote and click save$') +def i_save_subsection_name_with_quote(step): + save_subsection_name('Subsection With "Quote"') + + +@step('I click to edit the subsection name$') +def i_click_to_edit_subsection_name(step): + world.css_click('span.subsection-name-value') + + +@step('I see the complete subsection name with a quote in the editor$') +def i_see_complete_subsection_name_with_quote_in_editor(step): + css = '.subsection-display-name-input' + assert world.is_css_present(css) + assert_equal(world.css_find(css).value, 'Subsection With "Quote"') @step('I have added a new subsection$') def i_have_added_a_new_subsection(step): add_subsection() + +@step('I mark it as Homework$') +def i_mark_it_as_homework(step): + world.css_click('a.menu-toggle') + world.browser.click_link_by_text('Homework') + + +@step('I see it marked as Homework$') +def i_see_it_marked__as_homework(step): + assert_equal(world.css_find(".status-label").value, 'Homework') + + ############ ASSERTIONS ################### @step('I see my subsection on the Courseware page$') def i_see_my_subsection_on_the_courseware_page(step): - css = 'span.subsection-name' - assert world.browser.is_element_present_by_css(css) - css = 'span.subsection-name-value' - assert_css_with_text(css, 'Subsection One') + see_subsection_name('Subsection One') + + +@step('I see my subsection name with a quote on the Courseware page$') +def i_see_my_subsection_name_with_quote_on_the_courseware_page(step): + see_subsection_name('Subsection With "Quote"') @step('the subsection does not exist$') def the_subsection_does_not_exist(step): css = 'span.subsection-name' assert world.browser.is_element_not_present_by_css(css) + + +############ HELPER METHODS ################### + +def save_subsection_name(name): + name_css = 'input.new-subsection-name-input' + save_css = 'input.new-subsection-name-save' + world.css_fill(name_css, name) + world.css_click(save_css) + + +def see_subsection_name(name): + css = 'span.subsection-name' + assert world.is_css_present(css) + css = 'span.subsection-name-value' + assert world.css_has_text(css, name) diff --git a/cms/djangoapps/contentstore/management/commands/delete_course.py b/cms/djangoapps/contentstore/management/commands/delete_course.py index bb38e72d44..fc92205030 100644 --- a/cms/djangoapps/contentstore/management/commands/delete_course.py +++ b/cms/djangoapps/contentstore/management/commands/delete_course.py @@ -7,7 +7,7 @@ from xmodule.modulestore.django import modulestore from xmodule.contentstore.django import contentstore from xmodule.modulestore import Location from xmodule.course_module import CourseDescriptor -from prompt import query_yes_no +from .prompt import query_yes_no from auth.authz import _delete_course_group @@ -17,22 +17,29 @@ from auth.authz import _delete_course_group class Command(BaseCommand): - help = \ -'''Delete a MongoDB backed course''' + help = '''Delete a MongoDB backed course''' def handle(self, *args, **options): - if len(args) != 1: - raise CommandError("delete_course requires one argument: ") + if len(args) != 1 and len(args) != 2: + raise CommandError("delete_course requires one or more arguments: |commit|") loc_str = args[0] + commit = False + if len(args) == 2: + commit = args[1] == 'commit' + + if commit: + print 'Actually going to delete the course from DB....' + ms = modulestore('direct') cs = contentstore() if query_yes_no("Deleting course {0}. Confirm?".format(loc_str), default="no"): - if query_yes_no("Are you sure. This action cannot be undone!", default="no"): - loc = CourseDescriptor.id_to_location(loc_str) - if delete_course(ms, cs, loc) == True: - print 'removing User permissions from course....' - # in the django layer, we need to remove all the user permissions groups associated with this course - _delete_course_group(loc) + if query_yes_no("Are you sure. This action cannot be undone!", default="no"): + loc = CourseDescriptor.id_to_location(loc_str) + if delete_course(ms, cs, loc, commit) == True: + print 'removing User permissions from course....' + # in the django layer, we need to remove all the user permissions groups associated with this course + if commit: + _delete_course_group(loc) diff --git a/cms/djangoapps/contentstore/management/commands/prompt.py b/cms/djangoapps/contentstore/management/commands/prompt.py index 211c48406c..40a39d0a11 100644 --- a/cms/djangoapps/contentstore/management/commands/prompt.py +++ b/cms/djangoapps/contentstore/management/commands/prompt.py @@ -13,7 +13,7 @@ def query_yes_no(question, default="yes"): """ valid = {"yes":True, "y":True, "ye":True, "no":False, "n":False} - if default == None: + if default is None: prompt = " [y/n] " elif default == "yes": prompt = " [Y/n] " diff --git a/cms/djangoapps/contentstore/management/commands/update_templates.py b/cms/djangoapps/contentstore/management/commands/update_templates.py new file mode 100644 index 0000000000..b30d30480a --- /dev/null +++ b/cms/djangoapps/contentstore/management/commands/update_templates.py @@ -0,0 +1,9 @@ +from xmodule.templates import update_templates +from django.core.management.base import BaseCommand + +class Command(BaseCommand): + help = \ +'''Imports and updates the Studio component templates from the code pack and put in the DB''' + + def handle(self, *args, **options): + update_templates() \ No newline at end of file diff --git a/cms/djangoapps/contentstore/module_info_model.py b/cms/djangoapps/contentstore/module_info_model.py index 7ed4505c94..8ea6add88d 100644 --- a/cms/djangoapps/contentstore/module_info_model.py +++ b/cms/djangoapps/contentstore/module_info_model.py @@ -15,10 +15,10 @@ def get_module_info(store, location, parent_location=None, rewrite_static_links= template_location = Location(['i4x', 'edx', 'templates', location.category, 'Empty']) module = store.clone_item(template_location, location) - data = module.definition['data'] + data = module.data if rewrite_static_links: data = replace_static_urls( - module.definition['data'], + module.data, None, course_namespace=Location([ module.location.tag, @@ -32,7 +32,8 @@ def get_module_info(store, location, parent_location=None, rewrite_static_links= return { 'id': module.location.url(), 'data': data, - 'metadata': module.metadata + # TODO (cpennington): This really shouldn't have to do this much reaching in to get the metadata + 'metadata': module._model_data._kvs._metadata } @@ -70,23 +71,23 @@ def set_module_info(store, location, post_data): # 'apply' the submitted metadata, so we don't end up deleting system metadata if post_data.get('metadata') is not None: posted_metadata = post_data['metadata'] - + # update existing metadata with submitted metadata (which can be partial) # IMPORTANT NOTE: if the client passed pack 'null' (None) for a piece of metadata that means 'remove it' - for metadata_key in posted_metadata.keys(): - + for metadata_key, value in posted_metadata.items(): + # let's strip out any metadata fields from the postback which have been identified as system metadata # and therefore should not be user-editable, so we should accept them back from the client if metadata_key in module.system_metadata_fields: del posted_metadata[metadata_key] elif posted_metadata[metadata_key] is None: # remove both from passed in collection as well as the collection read in from the modulestore - if metadata_key in module.metadata: - del module.metadata[metadata_key] + if metadata_key in module._model_data: + del module._model_data[metadata_key] del posted_metadata[metadata_key] - - # overlay the new metadata over the modulestore sourced collection to support partial updates - module.metadata.update(posted_metadata) - + else: + module._model_data[metadata_key] = value + # commit to datastore - store.update_metadata(location, module.metadata) + # TODO (cpennington): This really shouldn't have to do this much reaching in to get the metadata + store.update_metadata(location, module._model_data._kvs._metadata) diff --git a/cms/djangoapps/contentstore/tests/test_checklists.py b/cms/djangoapps/contentstore/tests/test_checklists.py new file mode 100644 index 0000000000..f0889b0861 --- /dev/null +++ b/cms/djangoapps/contentstore/tests/test_checklists.py @@ -0,0 +1,96 @@ +""" Unit tests for checklist methods in views.py. """ +from contentstore.utils import get_modulestore, get_url_reverse +from contentstore.tests.test_course_settings import CourseTestCase +from xmodule.modulestore.inheritance import own_metadata +from xmodule.modulestore.tests.factories import CourseFactory +from django.core.urlresolvers import reverse +import json + + +class ChecklistTestCase(CourseTestCase): + """ Test for checklist get and put methods. """ + def setUp(self): + """ Creates the test course. """ + super(ChecklistTestCase, self).setUp() + self.course = CourseFactory.create(org='mitX', number='333', display_name='Checklists Course') + + def get_persisted_checklists(self): + """ Returns the checklists as persisted in the modulestore. """ + modulestore = get_modulestore(self.course.location) + return modulestore.get_item(self.course.location).checklists + + def test_get_checklists(self): + """ Tests the get checklists method. """ + checklists_url = get_url_reverse('Checklists', self.course) + response = self.client.get(checklists_url) + self.assertContains(response, "Getting Started With Studio") + payload = response.content + + # Now delete the checklists from the course and verify they get repopulated (for courses + # created before checklists were introduced). + self.course.checklists = None + modulestore = get_modulestore(self.course.location) + modulestore.update_metadata(self.course.location, own_metadata(self.course)) + self.assertEquals(self.get_persisted_checklists(), None) + response = self.client.get(checklists_url) + self.assertEquals(payload, response.content) + + def test_update_checklists_no_index(self): + """ No checklist index, should return all of them. """ + update_url = reverse('checklists_updates', kwargs={ + 'org': self.course.location.org, + 'course': self.course.location.course, + 'name': self.course.location.name}) + + returned_checklists = json.loads(self.client.get(update_url).content) + self.assertListEqual(self.get_persisted_checklists(), returned_checklists) + + def test_update_checklists_index_ignored_on_get(self): + """ Checklist index ignored on get. """ + update_url = reverse('checklists_updates', kwargs={'org': self.course.location.org, + 'course': self.course.location.course, + 'name': self.course.location.name, + 'checklist_index': 1}) + + returned_checklists = json.loads(self.client.get(update_url).content) + self.assertListEqual(self.get_persisted_checklists(), returned_checklists) + + def test_update_checklists_post_no_index(self): + """ No checklist index, will error on post. """ + update_url = reverse('checklists_updates', kwargs={'org': self.course.location.org, + 'course': self.course.location.course, + 'name': self.course.location.name}) + response = self.client.post(update_url) + self.assertContains(response, 'Could not save checklist', status_code=400) + + def test_update_checklists_index_out_of_range(self): + """ Checklist index out of range, will error on post. """ + update_url = reverse('checklists_updates', kwargs={'org': self.course.location.org, + 'course': self.course.location.course, + 'name': self.course.location.name, + 'checklist_index': 100}) + response = self.client.post(update_url) + self.assertContains(response, 'Could not save checklist', status_code=400) + + def test_update_checklists_index(self): + """ Check that an update of a particular checklist works. """ + update_url = reverse('checklists_updates', kwargs={'org': self.course.location.org, + 'course': self.course.location.course, + 'name': self.course.location.name, + 'checklist_index': 2}) + payload = self.course.checklists[2] + self.assertFalse(payload.get('is_checked')) + payload['is_checked'] = True + + returned_checklist = json.loads(self.client.post(update_url, json.dumps(payload), "application/json").content) + self.assertTrue(returned_checklist.get('is_checked')) + self.assertEqual(self.get_persisted_checklists()[2], returned_checklist) + + def test_update_checklists_delete_unsupported(self): + """ Delete operation is not supported. """ + update_url = reverse('checklists_updates', kwargs={'org': self.course.location.org, + 'course': self.course.location.course, + 'name': self.course.location.name, + 'checklist_index': 100}) + response = self.client.delete(update_url) + self.assertContains(response, 'Unsupported request', status_code=400) \ No newline at end of file diff --git a/cms/djangoapps/contentstore/tests/test_contentstore.py b/cms/djangoapps/contentstore/tests/test_contentstore.py index 7622ee7661..49a609a879 100644 --- a/cms/djangoapps/contentstore/tests/test_contentstore.py +++ b/cms/djangoapps/contentstore/tests/test_contentstore.py @@ -5,29 +5,28 @@ from django.test.utils import override_settings from django.conf import settings from django.core.urlresolvers import reverse from path import path -from tempfile import mkdtemp +from tempdir import mkdtemp_clean +from datetime import timedelta import json from fs.osfs import OSFS import copy -from mock import Mock -from json import dumps, loads +from json import loads -from student.models import Registration from django.contrib.auth.models import User -from cms.djangoapps.contentstore.utils import get_modulestore +from contentstore.utils import get_modulestore -from utils import ModuleStoreTestCase, parse_json +from .utils import ModuleStoreTestCase, parse_json from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory from xmodule.modulestore import Location from xmodule.modulestore.store_utilities import clone_course from xmodule.modulestore.store_utilities import delete_course -from xmodule.modulestore.django import modulestore, _MODULESTORES +from xmodule.modulestore.django import modulestore from xmodule.contentstore.django import contentstore from xmodule.templates import update_templates from xmodule.modulestore.xml_exporter import export_to_xml -from xmodule.modulestore.xml_importer import import_from_xml -from xmodule.templates import update_templates +from xmodule.modulestore.xml_importer import import_from_xml, perform_xlint +from xmodule.modulestore.inheritance import own_metadata from xmodule.capa_module import CapaDescriptor from xmodule.course_module import CourseDescriptor @@ -38,6 +37,14 @@ TEST_DATA_MODULESTORE = copy.deepcopy(settings.MODULESTORE) TEST_DATA_MODULESTORE['default']['OPTIONS']['fs_root'] = path('common/test/data') TEST_DATA_MODULESTORE['direct']['OPTIONS']['fs_root'] = path('common/test/data') +class MongoCollectionFindWrapper(object): + def __init__(self, original): + self.original = original + self.counter = 0 + + def find(self, query, *args, **kwargs): + self.counter = self.counter+1 + return self.original(query, *args, **kwargs) @override_settings(MODULESTORE=TEST_DATA_MODULESTORE) class ContentStoreToyCourseTest(ModuleStoreTestCase): @@ -63,7 +70,6 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase): self.client = Client() self.client.login(username=uname, password=password) - def check_edit_unit(self, test_course_name): import_from_xml(modulestore(), 'common/test/data/', [test_course_name]) @@ -79,11 +85,48 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase): def test_edit_unit_full(self): self.check_edit_unit('full') + def _get_draft_counts(self, item): + cnt = 1 if getattr(item, 'is_draft', False) else 0 + for child in item.get_children(): + cnt = cnt + self._get_draft_counts(child) + + return cnt + + def test_get_depth_with_drafts(self): + import_from_xml(modulestore(), 'common/test/data/', ['simple']) + + course = modulestore('draft').get_item(Location(['i4x', 'edX', 'simple', + 'course', '2012_Fall', None]), depth=None) + + # make sure no draft items have been returned + num_drafts = self._get_draft_counts(course) + self.assertEqual(num_drafts, 0) + + problem = modulestore('draft').get_item(Location(['i4x', 'edX', 'simple', + 'problem', 'ps01-simple', None])) + + # put into draft + modulestore('draft').clone_item(problem.location, problem.location) + + # make sure we can query that item and verify that it is a draft + draft_problem = modulestore('draft').get_item(Location(['i4x', 'edX', 'simple', + 'problem', 'ps01-simple', None])) + self.assertTrue(getattr(draft_problem,'is_draft', False)) + + #now requery with depth + course = modulestore('draft').get_item(Location(['i4x', 'edX', 'simple', + 'course', '2012_Fall', None]), depth=None) + + # make sure just one draft item have been returned + num_drafts = self._get_draft_counts(course) + self.assertEqual(num_drafts, 1) + + def test_static_tab_reordering(self): import_from_xml(modulestore(), 'common/test/data/', ['full']) - ms = modulestore('direct') - course = ms.get_item(Location(['i4x', 'edX', 'full', 'course', '6.002_Spring_2012', None])) + module_store = modulestore('direct') + course = module_store.get_item(Location(['i4x', 'edX', 'full', 'course', '6.002_Spring_2012', None])) # reverse the ordering reverse_tabs = [] @@ -91,9 +134,9 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase): if tab['type'] == 'static_tab': reverse_tabs.insert(0, 'i4x://edX/full/static_tab/{0}'.format(tab['url_slug'])) - resp = self.client.post(reverse('reorder_static_tabs'), json.dumps({'tabs': reverse_tabs}), "application/json") + self.client.post(reverse('reorder_static_tabs'), json.dumps({'tabs': reverse_tabs}), "application/json") - course = ms.get_item(Location(['i4x', 'edX', 'full', 'course', '6.002_Spring_2012', None])) + course = module_store.get_item(Location(['i4x', 'edX', 'full', 'course', '6.002_Spring_2012', None])) # compare to make sure that the tabs information is in the expected order after the server call course_tabs = [] @@ -103,29 +146,77 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase): self.assertEqual(reverse_tabs, course_tabs) + def test_import_polls(self): + import_from_xml(modulestore(), 'common/test/data/', ['full']) + + module_store = modulestore('direct') + found = False + + item = None + items = module_store.get_items(['i4x', 'edX', 'full', 'poll_question', None, None]) + found = len(items) > 0 + + self.assertTrue(found) + # check that there's actually content in the 'question' field + self.assertGreater(len(items[0].question),0) + + def test_xlint_fails(self): + err_cnt = perform_xlint('common/test/data', ['full']) + self.assertGreater(err_cnt, 0) + + def test_delete(self): + import_from_xml(modulestore(), 'common/test/data/', ['full']) + + module_store = modulestore('direct') + + sequential = module_store.get_item(Location(['i4x', 'edX', 'full', 'sequential', 'Administrivia_and_Circuit_Elements', None])) + + chapter = module_store.get_item(Location(['i4x', 'edX', 'full', 'chapter','Week_1', None])) + + # make sure the parent no longer points to the child object which was deleted + self.assertTrue(sequential.location.url() in chapter.children) + + self.client.post(reverse('delete_item'), + json.dumps({'id': sequential.location.url(), 'delete_children': 'true', 'delete_all_versions': 'true'}), + "application/json") + + found = False + try: + module_store.get_item(Location(['i4x', 'edX', 'full', 'sequential', 'Administrivia_and_Circuit_Elements', None])) + found = True + except ItemNotFoundError: + pass + + self.assertFalse(found) + + chapter = module_store.get_item(Location(['i4x', 'edX', 'full', 'chapter','Week_1', None])) + + # make sure the parent no longer points to the child object which was deleted + self.assertFalse(sequential.location.url() in chapter.children) + def test_about_overrides(self): ''' This test case verifies that a course can use specialized override for about data, e.g. /about/Fall_2012/effort.html while there is a base definition in /about/effort.html ''' import_from_xml(modulestore(), 'common/test/data/', ['full']) - ms = modulestore('direct') - effort = ms.get_item(Location(['i4x', 'edX', 'full', 'about', 'effort', None])) - self.assertEqual(effort.definition['data'], '6 hours') + module_store = modulestore('direct') + effort = module_store.get_item(Location(['i4x', 'edX', 'full', 'about', 'effort', None])) + self.assertEqual(effort.data, '6 hours') # this one should be in a non-override folder - effort = ms.get_item(Location(['i4x', 'edX', 'full', 'about', 'end_date', None])) - self.assertEqual(effort.definition['data'], 'TBD') + effort = module_store.get_item(Location(['i4x', 'edX', 'full', 'about', 'end_date', None])) + self.assertEqual(effort.data, 'TBD') def test_remove_hide_progress_tab(self): import_from_xml(modulestore(), 'common/test/data/', ['full']) - ms = modulestore('direct') - cs = contentstore() + module_store = modulestore('direct') + content_store = contentstore() source_location = CourseDescriptor.id_to_location('edX/full/6.002_Spring_2012') - course = ms.get_item(source_location) - self.assertNotIn('hide_progress_tab', course.metadata) + course = module_store.get_item(source_location) + self.assertFalse(course.hide_progress_tab) def test_clone_course(self): @@ -143,19 +234,19 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase): data = parse_json(resp) self.assertEqual(data['id'], 'i4x://MITx/999/course/Robot_Super_Course') - ms = modulestore('direct') - cs = contentstore() + module_store = modulestore('direct') + content_store = contentstore() source_location = CourseDescriptor.id_to_location('edX/full/6.002_Spring_2012') dest_location = CourseDescriptor.id_to_location('MITx/999/Robot_Super_Course') - clone_course(ms, cs, source_location, dest_location) + clone_course(module_store, content_store, source_location, dest_location) # now loop through all the units in the course and verify that the clone can render them, which # means the objects are at least present - items = ms.get_items(Location(['i4x', 'edX', 'full', 'vertical', None])) + items = module_store.get_items(Location(['i4x', 'edX', 'full', 'vertical', None])) self.assertGreater(len(items), 0) - clone_items = ms.get_items(Location(['i4x', 'MITx', '999', 'vertical', None])) + clone_items = module_store.get_items(Location(['i4x', 'MITx', '999', 'vertical', None])) self.assertGreater(len(clone_items), 0) for descriptor in items: new_loc = descriptor.location._replace(org='MITx', course='999') @@ -163,17 +254,21 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase): resp = self.client.get(reverse('edit_unit', kwargs={'location': new_loc.url()})) self.assertEqual(resp.status_code, 200) + def test_bad_contentstore_request(self): + resp = self.client.get('http://localhost:8001/c4x/CDX/123123/asset/&images_circuits_Lab7Solution2.png') + self.assertEqual(resp.status_code, 400) + def test_delete_course(self): import_from_xml(modulestore(), 'common/test/data/', ['full']) - ms = modulestore('direct') - cs = contentstore() + module_store = modulestore('direct') + content_store = contentstore() location = CourseDescriptor.id_to_location('edX/full/6.002_Spring_2012') - delete_course(ms, cs, location) + delete_course(module_store, content_store, location, commit=True) - items = ms.get_items(Location(['i4x', 'edX', 'full', 'vertical', None])) + items = module_store.get_items(Location(['i4x', 'edX', 'full', 'vertical', None])) self.assertEqual(len(items), 0) def verify_content_existence(self, modulestore, root_dir, location, dirname, category_name, filename_suffix=''): @@ -188,54 +283,54 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase): self.assertTrue(fs.exists(item.location.name + filename_suffix)) def test_export_course(self): - ms = modulestore('direct') - cs = contentstore() + module_store = modulestore('direct') + content_store = contentstore() - import_from_xml(ms, 'common/test/data/', ['full']) + import_from_xml(module_store, 'common/test/data/', ['full']) location = CourseDescriptor.id_to_location('edX/full/6.002_Spring_2012') - root_dir = path(mkdtemp()) + root_dir = path(mkdtemp_clean()) print 'Exporting to tempdir = {0}'.format(root_dir) # export out to a tempdir - export_to_xml(ms, cs, location, root_dir, 'test_export') + export_to_xml(module_store, content_store, location, root_dir, 'test_export') # check for static tabs - self.verify_content_existence(ms, root_dir, location, 'tabs', 'static_tab', '.html') + self.verify_content_existence(module_store, root_dir, location, 'tabs', 'static_tab', '.html') # check for custom_tags - self.verify_content_existence(ms, root_dir, location, 'info', 'course_info', '.html') + self.verify_content_existence(module_store, root_dir, location, 'info', 'course_info', '.html') # check for custom_tags - self.verify_content_existence(ms, root_dir, location, 'custom_tags', 'custom_tag_template') + self.verify_content_existence(module_store, root_dir, location, 'custom_tags', 'custom_tag_template') # check for graiding_policy.json fs = OSFS(root_dir / 'test_export/policies/6.002_Spring_2012') self.assertTrue(fs.exists('grading_policy.json')) - course = ms.get_item(location) + course = module_store.get_item(location) # compare what's on disk compared to what we have in our course - with fs.open('grading_policy.json','r') as grading_policy: - on_disk = loads(grading_policy.read()) - self.assertEqual(on_disk, course.definition['data']['grading_policy']) + with fs.open('grading_policy.json', 'r') as grading_policy: + on_disk = loads(grading_policy.read()) + self.assertEqual(on_disk, course.grading_policy) #check for policy.json self.assertTrue(fs.exists('policy.json')) # compare what's on disk to what we have in the course module - with fs.open('policy.json','r') as course_policy: + with fs.open('policy.json', 'r') as course_policy: on_disk = loads(course_policy.read()) self.assertIn('course/6.002_Spring_2012', on_disk) - self.assertEqual(on_disk['course/6.002_Spring_2012'], course.metadata) + self.assertEqual(on_disk['course/6.002_Spring_2012'], own_metadata(course)) # remove old course - delete_course(ms, cs, location) + delete_course(module_store, content_store, location) # reimport - import_from_xml(ms, root_dir, ['test_export']) + import_from_xml(module_store, root_dir, ['test_export']) - items = ms.get_items(Location(['i4x', 'edX', 'full', 'vertical', None])) + items = module_store.get_items(Location(['i4x', 'edX', 'full', 'vertical', None])) self.assertGreater(len(items), 0) for descriptor in items: print "Checking {0}....".format(descriptor.location.url()) @@ -245,11 +340,11 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase): shutil.rmtree(root_dir) def test_course_handouts_rewrites(self): - ms = modulestore('direct') - cs = contentstore() + module_store = modulestore('direct') + content_store = contentstore() # import a test course - import_from_xml(ms, 'common/test/data/', ['full']) + import_from_xml(module_store, 'common/test/data/', ['full']) handout_location = Location(['i4x', 'edX', 'full', 'course_info', 'handouts']) @@ -263,6 +358,56 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase): # note, we know the link it should be because that's what in the 'full' course in the test data self.assertContains(resp, '/c4x/edX/full/asset/handouts_schematic_tutorial.pdf') + def test_prefetch_children(self): + import_from_xml(modulestore(), 'common/test/data/', ['full']) + module_store = modulestore('direct') + location = CourseDescriptor.id_to_location('edX/full/6.002_Spring_2012') + + wrapper = MongoCollectionFindWrapper(module_store.collection.find) + module_store.collection.find = wrapper.find + course = module_store.get_item(location, depth=2) + + # make sure we haven't done too many round trips to DB + # note we say 4 round trips here for 1) the course, 2 & 3) for the chapters and sequentials, and + # 4) because of the RT due to calculating the inherited metadata + self.assertEqual(wrapper.counter, 4) + + # make sure we pre-fetched a known sequential which should be at depth=2 + self.assertTrue(Location(['i4x', 'edX', 'full', 'sequential', + 'Administrivia_and_Circuit_Elements', None]) in course.system.module_data) + + # make sure we don't have a specific vertical which should be at depth=3 + self.assertFalse(Location(['i4x', 'edX', 'full', 'vertical', 'vertical_58', + None]) in course.system.module_data) + + def test_export_course_with_unknown_metadata(self): + module_store = modulestore('direct') + content_store = contentstore() + + import_from_xml(module_store, 'common/test/data/', ['full']) + location = CourseDescriptor.id_to_location('edX/full/6.002_Spring_2012') + + root_dir = path(mkdtemp_clean()) + + course = module_store.get_item(location) + + metadata = own_metadata(course) + # add a bool piece of unknown metadata so we can verify we don't throw an exception + metadata['new_metadata'] = True + + module_store.update_metadata(location, metadata) + + print 'Exporting to tempdir = {0}'.format(root_dir) + + # export out to a tempdir + exported = False + try: + export_to_xml(module_store, content_store, location, root_dir, 'test_export') + exported = True + except Exception: + pass + + self.assertTrue(exported) class ContentStoreTest(ModuleStoreTestCase): """ @@ -342,7 +487,7 @@ class ContentStoreTest(ModuleStoreTestCase): # Create a course so there is something to view resp = self.client.get(reverse('index')) self.assertContains(resp, - '

          My Courses

          ', + '

          My Courses

          ', status_code=200, html=True) @@ -378,7 +523,7 @@ class ContentStoreTest(ModuleStoreTestCase): resp = self.client.get(reverse('course_index', kwargs=data)) self.assertContains(resp, - 'Robot Super Course', + '
          ', status_code=200, html=True) @@ -405,7 +550,7 @@ class ContentStoreTest(ModuleStoreTestCase): problem_data = { 'parent_location': 'i4x://MITx/999/course/Robot_Super_Course', - 'template': 'i4x://edx/templates/problem/Empty' + 'template': 'i4x://edx/templates/problem/Blank_Common_Problem' } resp = self.client.post(reverse('clone_item'), problem_data) @@ -418,22 +563,77 @@ class ContentStoreTest(ModuleStoreTestCase): self.assertIsInstance(problem, CapaDescriptor, "New problem is not a CapaDescriptor") context = problem.get_context() self.assertIn('markdown', context, "markdown is missing from context") - self.assertIn('markdown', problem.metadata, "markdown is missing from metadata") self.assertNotIn('markdown', problem.editable_metadata_fields, "Markdown slipped into the editable metadata fields") + def test_import_metadata_with_attempts_empty_string(self): + import_from_xml(modulestore(), 'common/test/data/', ['simple']) + module_store = modulestore('direct') + did_load_item = False + try: + module_store.get_item(Location(['i4x', 'edX', 'simple', 'problem', 'ps01-simple', None])) + did_load_item = True + except ItemNotFoundError: + pass + + # make sure we found the item (e.g. it didn't error while loading) + self.assertTrue(did_load_item) + + def test_metadata_inheritance(self): + import_from_xml(modulestore(), 'common/test/data/', ['full']) + + module_store = modulestore('direct') + course = module_store.get_item(Location(['i4x', 'edX', 'full', 'course', '6.002_Spring_2012', None])) + + verticals = module_store.get_items(['i4x', 'edX', 'full', 'vertical', None, None]) + + # let's assert on the metadata_inheritance on an existing vertical + for vertical in verticals: + self.assertEqual(course.lms.xqa_key, vertical.lms.xqa_key) + + self.assertGreater(len(verticals), 0) + + new_component_location = Location('i4x', 'edX', 'full', 'html', 'new_component') + source_template_location = Location('i4x', 'edx', 'templates', 'html', 'Blank_HTML_Page') + + # crate a new module and add it as a child to a vertical + module_store.clone_item(source_template_location, new_component_location) + parent = verticals[0] + module_store.update_children(parent.location, parent.children + [new_component_location.url()]) + + # flush the cache + module_store.refresh_cached_metadata_inheritance_tree(new_component_location) + new_module = module_store.get_item(new_component_location) + + # check for grace period definition which should be defined at the course level + self.assertEqual(parent.lms.graceperiod, new_module.lms.graceperiod) + + self.assertEqual(course.lms.xqa_key, new_module.lms.xqa_key) + + # + # now let's define an override at the leaf node level + # + new_module.lms.graceperiod = timedelta(1) + module_store.update_metadata(new_module.location, own_metadata(new_module)) + + # flush the cache and refetch + module_store.refresh_cached_metadata_inheritance_tree(new_component_location) + new_module = module_store.get_item(new_component_location) + + self.assertEqual(timedelta(1), new_module.lms.graceperiod) + class TemplateTestCase(ModuleStoreTestCase): - def test_template_cleanup(self): - ms = modulestore('direct') + def test_template_cleanup(self): + module_store = modulestore('direct') # insert a bogus template in the store bogus_template_location = Location('i4x', 'edx', 'templates', 'html', 'bogus') - source_template_location = Location('i4x', 'edx', 'templates', 'html', 'Empty') - - ms.clone_item(source_template_location, bogus_template_location) + source_template_location = Location('i4x', 'edx', 'templates', 'html', 'Blank_HTML_Page') - verify_create = ms.get_item(bogus_template_location) + module_store.clone_item(source_template_location, bogus_template_location) + + verify_create = module_store.get_item(bogus_template_location) self.assertIsNotNone(verify_create) # now run cleanup @@ -442,10 +642,8 @@ class TemplateTestCase(ModuleStoreTestCase): # now try to find dangling template, it should not be in DB any longer asserted = False try: - verify_create = ms.get_item(bogus_template_location) + verify_create = module_store.get_item(bogus_template_location) except ItemNotFoundError: asserted = True - self.assertTrue(asserted) - - + self.assertTrue(asserted) diff --git a/cms/djangoapps/contentstore/tests/test_course_settings.py b/cms/djangoapps/contentstore/tests/test_course_settings.py index 925b2431b9..fe90ad18aa 100644 --- a/cms/djangoapps/contentstore/tests/test_course_settings.py +++ b/cms/djangoapps/contentstore/tests/test_course_settings.py @@ -1,46 +1,25 @@ import datetime -import time import json -import calendar import copy -from util import converters -from util.converters import jsdate_to_time from django.contrib.auth.models import User from django.test.client import Client from django.core.urlresolvers import reverse from django.utils.timezone import UTC -import xmodule from xmodule.modulestore import Location -from cms.djangoapps.models.settings.course_details import (CourseDetails, +from models.settings.course_details import (CourseDetails, CourseSettingsEncoder) -from cms.djangoapps.models.settings.course_grading import CourseGradingModel -from cms.djangoapps.contentstore.utils import get_modulestore +from models.settings.course_grading import CourseGradingModel +from contentstore.utils import get_modulestore -from django.test import TestCase -from utils import ModuleStoreTestCase +from .utils import ModuleStoreTestCase from xmodule.modulestore.tests.factories import CourseFactory - -# YYYY-MM-DDThh:mm:ss.s+/-HH:MM -class ConvertersTestCase(TestCase): - @staticmethod - def struct_to_datetime(struct_time): - return datetime.datetime(struct_time.tm_year, struct_time.tm_mon, struct_time.tm_mday, struct_time.tm_hour, - struct_time.tm_min, struct_time.tm_sec, tzinfo=UTC()) - - def compare_dates(self, date1, date2, expected_delta): - dt1 = ConvertersTestCase.struct_to_datetime(date1) - dt2 = ConvertersTestCase.struct_to_datetime(date2) - self.assertEqual(dt1 - dt2, expected_delta, str(date1) + "-" + str(date2) + "!=" + str(expected_delta)) - - def test_iso_to_struct(self): - self.compare_dates(converters.jsdate_to_time("2013-01-01"), converters.jsdate_to_time("2012-12-31"), datetime.timedelta(days=1)) - self.compare_dates(converters.jsdate_to_time("2013-01-01T00"), converters.jsdate_to_time("2012-12-31T23"), datetime.timedelta(hours=1)) - self.compare_dates(converters.jsdate_to_time("2013-01-01T00:00"), converters.jsdate_to_time("2012-12-31T23:59"), datetime.timedelta(minutes=1)) - self.compare_dates(converters.jsdate_to_time("2013-01-01T00:00:00"), converters.jsdate_to_time("2012-12-31T23:59:59"), datetime.timedelta(seconds=1)) - +from models.settings.course_metadata import CourseMetadata +from xmodule.modulestore.xml_importer import import_from_xml +from xmodule.modulestore.django import modulestore +from xmodule.fields import Date class CourseTestCase(ModuleStoreTestCase): def setUp(self): @@ -103,7 +82,7 @@ class CourseDetailsTestCase(CourseTestCase): self.assertIsNone(jsondetails['effort'], "effort somehow initialized") def test_update_and_fetch(self): - ## NOTE: I couldn't figure out how to validly test time setting w/ all the conversions + # # NOTE: I couldn't figure out how to validly test time setting w/ all the conversions jsondetails = CourseDetails.fetch(self.course_location) jsondetails.syllabus = "bar" # encode - decode to convert date fields and other data which changes form @@ -143,10 +122,6 @@ class CourseDetailsViewTest(CourseTestCase): def test_update_and_fetch(self): details = CourseDetails.fetch(self.course_location) - resp = self.client.get(reverse('course_settings', kwargs={'org': self.course_location.org, 'course': self.course_location.course, - 'name': self.course_location.name})) - self.assertContains(resp, '
        1. Course Details
        2. ', status_code=200, html=True) - # resp s/b json from here on url = reverse('course_settings', kwargs={'org': self.course_location.org, 'course': self.course_location.course, 'name': self.course_location.name, 'section': 'details'}) @@ -173,19 +148,26 @@ class CourseDetailsViewTest(CourseTestCase): self.assertEqual(details['intro_video'], encoded.get('intro_video', None), context + " intro_video not ==") self.assertEqual(details['effort'], encoded['effort'], context + " efforts not ==") + @staticmethod + def struct_to_datetime(struct_time): + return datetime.datetime(struct_time.tm_year, struct_time.tm_mon, + struct_time.tm_mday, struct_time.tm_hour, + struct_time.tm_min, struct_time.tm_sec, tzinfo=UTC()) + def compare_date_fields(self, details, encoded, context, field): if details[field] is not None: + date = Date() if field in encoded and encoded[field] is not None: - encoded_encoded = jsdate_to_time(encoded[field]) - dt1 = ConvertersTestCase.struct_to_datetime(encoded_encoded) + encoded_encoded = date.from_json(encoded[field]) + dt1 = CourseDetailsViewTest.struct_to_datetime(encoded_encoded) if isinstance(details[field], datetime.datetime): dt2 = details[field] else: - details_encoded = jsdate_to_time(details[field]) - dt2 = ConvertersTestCase.struct_to_datetime(details_encoded) + details_encoded = date.from_json(details[field]) + dt2 = CourseDetailsViewTest.struct_to_datetime(details_encoded) - expected_delta = datetime.timedelta(0) + expected_delta = datetime.timedelta(0) self.assertEqual(dt1 - dt2, expected_delta, str(dt1) + "!=" + str(dt2) + " at " + context) else: self.fail(field + " missing from encoded but in details at " + context) @@ -249,8 +231,9 @@ class CourseGradingTest(CourseTestCase): altered_grader = CourseGradingModel.update_from_json(test_grader.__dict__) self.assertDictEqual(test_grader.__dict__, altered_grader.__dict__, "cutoff add D") - test_grader.grace_period = {'hours' : 4, 'minutes' : 5, 'seconds': 0} + test_grader.grace_period = {'hours': 4, 'minutes': 5, 'seconds': 0} altered_grader = CourseGradingModel.update_from_json(test_grader.__dict__) + print test_grader.grace_period, altered_grader.grace_period self.assertDictEqual(test_grader.__dict__, altered_grader.__dict__, "4 hour grace period") def test_update_grader_from_json(self): @@ -265,3 +248,64 @@ class CourseGradingTest(CourseTestCase): test_grader.graders[1]['drop_count'] = test_grader.graders[1].get('drop_count') + 1 altered_grader = CourseGradingModel.update_grader_from_json(test_grader.course_location, test_grader.graders[1]) self.assertDictEqual(test_grader.graders[1], altered_grader, "drop_count[1] + 2") + +class CourseMetadataEditingTest(CourseTestCase): + def setUp(self): + CourseTestCase.setUp(self) + # add in the full class too + import_from_xml(modulestore(), 'common/test/data/', ['full']) + self.fullcourse_location = Location(['i4x', 'edX', 'full', 'course', '6.002_Spring_2012', None]) + + + def test_fetch_initial_fields(self): + test_model = CourseMetadata.fetch(self.course_location) + self.assertIn('display_name', test_model, 'Missing editable metadata field') + self.assertEqual(test_model['display_name'], 'Robot Super Course', "not expected value") + + test_model = CourseMetadata.fetch(self.fullcourse_location) + self.assertNotIn('graceperiod', test_model, 'blacklisted field leaked in') + self.assertIn('display_name', test_model, 'full missing editable metadata field') + self.assertEqual(test_model['display_name'], 'Testing', "not expected value") + self.assertIn('rerandomize', test_model, 'Missing rerandomize metadata field') + self.assertIn('showanswer', test_model, 'showanswer field ') + self.assertIn('xqa_key', test_model, 'xqa_key field ') + + def test_update_from_json(self): + test_model = CourseMetadata.update_from_json(self.course_location, + { "advertised_start" : "start A", + "testcenter_info" : { "c" : "test" }, + "days_early_for_beta" : 2}) + self.update_check(test_model) + # try fresh fetch to ensure persistence + test_model = CourseMetadata.fetch(self.course_location) + self.update_check(test_model) + # now change some of the existing metadata + test_model = CourseMetadata.update_from_json(self.course_location, + { "advertised_start" : "start B", + "display_name" : "jolly roger"}) + self.assertIn('display_name', test_model, 'Missing editable metadata field') + self.assertEqual(test_model['display_name'], 'jolly roger', "not expected value") + self.assertIn('advertised_start', test_model, 'Missing revised advertised_start metadata field') + self.assertEqual(test_model['advertised_start'], 'start B', "advertised_start not expected value") + + def update_check(self, test_model): + self.assertIn('display_name', test_model, 'Missing editable metadata field') + self.assertEqual(test_model['display_name'], 'Robot Super Course', "not expected value") + self.assertIn('advertised_start', test_model, 'Missing new advertised_start metadata field') + self.assertEqual(test_model['advertised_start'], 'start A', "advertised_start not expected value") + self.assertIn('testcenter_info', test_model, 'Missing testcenter_info metadata field') + self.assertDictEqual(test_model['testcenter_info'], { "c" : "test" }, "testcenter_info not expected value") + self.assertIn('days_early_for_beta', test_model, 'Missing days_early_for_beta metadata field') + self.assertEqual(test_model['days_early_for_beta'], 2, "days_early_for_beta not expected value") + + + def test_delete_key(self): + test_model = CourseMetadata.delete_key(self.fullcourse_location, { 'deleteKeys' : ['doesnt_exist', 'showanswer', 'xqa_key']}) + # ensure no harm + self.assertNotIn('graceperiod', test_model, 'blacklisted field leaked in') + self.assertIn('display_name', test_model, 'full missing editable metadata field') + self.assertEqual(test_model['display_name'], 'Testing', "not expected value") + self.assertIn('rerandomize', test_model, 'Missing rerandomize metadata field') + # check for deletion effectiveness + self.assertEqual('closed', test_model['showanswer'], 'showanswer field still in') + self.assertEqual(None, test_model['xqa_key'], 'xqa_key field still in') diff --git a/cms/djangoapps/contentstore/tests/test_course_updates.py b/cms/djangoapps/contentstore/tests/test_course_updates.py index c57f1322f5..80d4f0bbc2 100644 --- a/cms/djangoapps/contentstore/tests/test_course_updates.py +++ b/cms/djangoapps/contentstore/tests/test_course_updates.py @@ -1,31 +1,145 @@ -from cms.djangoapps.contentstore.tests.test_course_settings import CourseTestCase +'''unit tests for course_info views and models.''' +from contentstore.tests.test_course_settings import CourseTestCase from django.core.urlresolvers import reverse import json class CourseUpdateTest(CourseTestCase): + '''The do all and end all of unit test cases.''' def test_course_update(self): + '''Go through each interface and ensure it works.''' # first get the update to force the creation - url = reverse('course_info', kwargs={'org': self.course_location.org, 'course': self.course_location.course, - 'name': self.course_location.name}) + url = reverse('course_info', + kwargs={'org': self.course_location.org, + 'course': self.course_location.course, + 'name': self.course_location.name}) self.client.get(url) - content = '' + init_content = '' payload = {'content': content, 'date': 'January 8, 2013'} - url = reverse('course_info', kwargs={'org': self.course_location.org, 'course': self.course_location.course, - 'provided_id': ''}) + url = reverse('course_info_json', + kwargs={'org': self.course_location.org, + 'course': self.course_location.course, + 'provided_id': ''}) resp = self.client.post(url, json.dumps(payload), "application/json") payload = json.loads(resp.content) - self.assertHTMLEqual(content, payload['content'], "single iframe") + self.assertHTMLEqual(payload['content'], content) - url = reverse('course_info', kwargs={'org': self.course_location.org, 'course': self.course_location.course, - 'provided_id': payload['id']}) - content += '
          div

          p

          ' + first_update_url = reverse('course_info_json', + kwargs={'org': self.course_location.org, + 'course': self.course_location.course, + 'provided_id': payload['id']}) + content += '
          div

          p

          ' payload['content'] = content + resp = self.client.post(first_update_url, json.dumps(payload), + "application/json") + + self.assertHTMLEqual(content, json.loads(resp.content)['content'], + "iframe w/ div") + + # now put in an evil update + content = '
            ' + payload = {'content': content, + 'date': 'January 11, 2013'} + url = reverse('course_info_json', + kwargs={'org': self.course_location.org, + 'course': self.course_location.course, + 'provided_id': ''}) + resp = self.client.post(url, json.dumps(payload), "application/json") - self.assertHTMLEqual(content, json.loads(resp.content)['content'], "iframe w/ div") + payload = json.loads(resp.content) + + self.assertHTMLEqual(content, payload['content'], "self closing ol") + + url = reverse('course_info_json', + kwargs={'org': self.course_location.org, + 'course': self.course_location.course, + 'provided_id': ''}) + resp = self.client.get(url) + payload = json.loads(resp.content) + self.assertTrue(len(payload) == 2) + + # can't test non-json paylod b/c expect_json throws error + # try json w/o required fields + self.assertContains( + self.client.post(url, json.dumps({'garbage': 1}), + "application/json"), + 'Failed to save', status_code=400) + + # now try to update a non-existent update + url = reverse('course_info_json', + kwargs={'org': self.course_location.org, + 'course': self.course_location.course, + 'provided_id': '9'}) + content = 'blah blah' + payload = {'content': content, + 'date': 'January 21, 2013'} + self.assertContains( + self.client.post(url, json.dumps(payload), "application/json"), + 'Failed to save', status_code=400) + + # update w/ malformed html + content = 'error' + payload = {'content': content, + 'date': 'January 11, 2013'} + url = reverse('course_info_json', kwargs={'org': self.course_location.org, + 'course': self.course_location.course, + 'provided_id': ''}) + + self.assertContains( + self.client.post(url, json.dumps(payload), "application/json"), + ' 1: - raise BaseException('Found more than one course at {0}. There should only be one!!! Dump = {1}'.format(course_search_location, courses)) + raise Exception('Found more than one course at {0}. There should only be one!!! Dump = {1}'.format(course_search_location, courses)) location = courses[0].location @@ -75,12 +80,20 @@ def get_course_for_item(location): return courses[0] -def get_lms_link_for_item(location, preview=False): +def get_lms_link_for_item(location, preview=False, course_id=None): + if course_id is None: + course_id = get_course_id(location) + if settings.LMS_BASE is not None: - lms_link = "//{preview}{lms_base}/courses/{course_id}/jump_to/{location}".format( - preview='preview.' if preview else '', - lms_base=settings.LMS_BASE, - course_id=get_course_id(location), + if preview: + lms_base = settings.MITX_FEATURES.get('PREVIEW_LMS_BASE', + 'preview.' + settings.LMS_BASE) + else: + lms_base = settings.LMS_BASE + + lms_link = "//{lms_base}/courses/{course_id}/jump_to/{location}".format( + lms_base=lms_base, + course_id=course_id, location=Location(location) ) else: @@ -128,7 +141,7 @@ def compute_unit_state(unit): 'private' content is editabled and not visible in the LMS """ - if unit.metadata.get('is_draft', False): + if getattr(unit, 'is_draft', False): try: modulestore('direct').get_item(unit.location) return UnitState.draft @@ -150,3 +163,67 @@ def update_item(location, value): get_modulestore(location).delete_item(location) else: get_modulestore(location).update_item(location, value) + + +def get_url_reverse(course_page_name, course_module): + """ + Returns the course URL link to the specified location. This value is suitable to use as an href link. + + course_page_name should correspond to an attribute in CoursePageNames (for example, 'ManageUsers' + or 'SettingsDetails'), or else it will simply be returned. This method passes back unknown values of + course_page_names so that it can also be used for absolute (known) URLs. + + course_module is used to obtain the location, org, course, and name properties for a course, if + course_page_name corresponds to an attribute in CoursePageNames. + """ + url_name = getattr(CoursePageNames, course_page_name, None) + ctx_loc = course_module.location + + if CoursePageNames.ManageUsers == url_name: + return reverse(url_name, kwargs={"location": ctx_loc}) + elif url_name in [CoursePageNames.SettingsDetails, CoursePageNames.SettingsGrading, + CoursePageNames.CourseOutline, CoursePageNames.Checklists]: + return reverse(url_name, kwargs={'org': ctx_loc.org, 'course': ctx_loc.course, 'name': ctx_loc.name}) + else: + return course_page_name + + +class CoursePageNames: + """ Constants for pages that are recognized by get_url_reverse method. """ + ManageUsers = "manage_users" + SettingsDetails = "settings_details" + SettingsGrading = "settings_grading" + CourseOutline = "course_index" + Checklists = "checklists" + +def add_open_ended_panel_tab(course): + """ + Used to add the open ended panel tab to a course if it does not exist. + @param course: A course object from the modulestore. + @return: Boolean indicating whether or not a tab was added and a list of tabs for the course. + """ + #Copy course tabs + course_tabs = copy.copy(course.tabs) + changed = False + #Check to see if open ended panel is defined in the course + if OPEN_ENDED_PANEL not in course_tabs: + #Add panel to the tabs if it is not defined + course_tabs.append(OPEN_ENDED_PANEL) + changed = True + return changed, course_tabs + +def remove_open_ended_panel_tab(course): + """ + Used to remove the open ended panel tab from a course if it exists. + @param course: A course object from the modulestore. + @return: Boolean indicating whether or not a tab was added and a list of tabs for the course. + """ + #Copy course tabs + course_tabs = copy.copy(course.tabs) + changed = False + #Check to see if open ended panel is defined in the course + if OPEN_ENDED_PANEL in course_tabs: + #Add panel to the tabs if it is not defined + course_tabs = [ct for ct in course_tabs if ct!=OPEN_ENDED_PANEL] + changed = True + return changed, course_tabs diff --git a/cms/djangoapps/contentstore/views.py b/cms/djangoapps/contentstore/views.py index af62276ec4..f8960dd65d 100644 --- a/cms/djangoapps/contentstore/views.py +++ b/cms/djangoapps/contentstore/views.py @@ -18,7 +18,8 @@ from django.core.files.temp import NamedTemporaryFile # to install PIL on MacOSX: 'easy_install http://dist.repoze.org/PIL-1.1.6.tar.gz' from PIL import Image -from django.http import HttpResponse, Http404, HttpResponseBadRequest, HttpResponseForbidden +from django.http import HttpResponse, Http404, HttpResponseBadRequest, HttpResponseForbidden, HttpResponseServerError +from django.http import HttpResponseNotFound from django.contrib.auth.decorators import login_required from django.core.exceptions import PermissionDenied from django.core.context_processors import csrf @@ -28,16 +29,20 @@ from django.conf import settings from xmodule.modulestore import Location from xmodule.modulestore.exceptions import ItemNotFoundError, InvalidLocationError +from xmodule.modulestore.inheritance import own_metadata +from xblock.core import Scope +from xblock.runtime import KeyValueStore, DbModel, InvalidScopeError from xmodule.x_module import ModuleSystem from xmodule.error_module import ErrorDescriptor from xmodule.errortracker import exc_info_to_str import static_replace from external_auth.views import ssl_login_shortcut +from xmodule.modulestore.mongo import MongoUsage from mitxmako.shortcuts import render_to_response, render_to_string from xmodule.modulestore.django import modulestore from xmodule_modifiers import replace_static_urls, wrap_xmodule -from xmodule.exceptions import NotFoundError +from xmodule.exceptions import NotFoundError, ProcessingError from functools import partial from xmodule.contentstore.django import contentstore @@ -46,19 +51,21 @@ from xmodule.contentstore.content import StaticContent from auth.authz import is_user_in_course_group_role, get_users_in_course_group_by_role from auth.authz import get_user_by_email, add_user_to_course_group, remove_user_from_course_group from auth.authz import INSTRUCTOR_ROLE_NAME, STAFF_ROLE_NAME, create_all_course_groups -from .utils import get_course_location_for_item, get_lms_link_for_item, compute_unit_state, get_date_display, UnitState, get_course_for_item +from .utils import get_course_location_for_item, get_lms_link_for_item, compute_unit_state, \ + get_date_display, UnitState, get_course_for_item, get_url_reverse, add_open_ended_panel_tab, \ + remove_open_ended_panel_tab from xmodule.modulestore.xml_importer import import_from_xml -from contentstore.course_info_model import get_course_updates,\ +from contentstore.course_info_model import get_course_updates, \ update_course_updates, delete_course_update from cache_toolbox.core import del_cached_content -from xmodule.timeparse import stringify_time from contentstore.module_info_model import get_module_info, set_module_info -from cms.djangoapps.models.settings.course_details import CourseDetails,\ +from models.settings.course_details import CourseDetails, \ CourseSettingsEncoder -from cms.djangoapps.models.settings.course_grading import CourseGradingModel -from cms.djangoapps.contentstore.utils import get_modulestore -from lxml import etree +from models.settings.course_grading import CourseGradingModel +from contentstore.utils import get_modulestore +from django.shortcuts import redirect +from models.settings.course_metadata import CourseMetadata # to install PIL on MacOSX: 'easy_install http://dist.repoze.org/PIL-1.1.6.tar.gz' @@ -67,6 +74,11 @@ log = logging.getLogger(__name__) COMPONENT_TYPES = ['customtag', 'discussion', 'html', 'problem', 'video'] +OPEN_ENDED_COMPONENT_TYPES = ["combinedopenended", "peergrading"] +ADVANCED_COMPONENT_TYPES = ['annotatable'] + OPEN_ENDED_COMPONENT_TYPES +ADVANCED_COMPONENT_CATEGORY = 'advanced' +ADVANCED_COMPONENT_POLICY_KEY = 'advanced_modules' + # cdodge: these are categories which should not be parented, they are detached from the hierarchy DETACHED_CATEGORIES = ['about', 'static_tab', 'course_info'] @@ -82,6 +94,13 @@ def signup(request): return render_to_response('signup.html', {'csrf': csrf_token}) +def old_login_redirect(request): + ''' + Redirect to the active login url. + ''' + return redirect('login', permanent=True) + + @ssl_login_shortcut @ensure_csrf_cookie def login_page(request): @@ -95,15 +114,22 @@ def login_page(request): }) +def howitworks(request): + if request.user.is_authenticated(): + return index(request) + else: + return render_to_response('howitworks.html', {}) + # ==== Views for any logged-in user ================================== + @login_required @ensure_csrf_cookie def index(request): """ List all courses available to the logged in user """ - courses = modulestore().get_items(['i4x', None, None, 'course', None]) + courses = modulestore('direct').get_items(['i4x', None, None, 'course', None]) # filter out courses that we don't have access too def course_filter(course): @@ -116,11 +142,9 @@ def index(request): return render_to_response('index.html', { 'new_course_template': Location('i4x', 'edx', 'templates', 'course', 'Empty'), - 'courses': [(course.metadata.get('display_name'), - reverse('course_index', args=[ - course.location.org, - course.location.course, - course.location.name])) + 'courses': [(course.display_name, + get_url_reverse('CourseOutline', course), + get_lms_link_for_item(course.location, course_id=course.location.course_id)) for course in courses], 'user': request.user, 'disable_course_creation': settings.MITX_FEATURES.get('DISABLE_COURSE_CREATION', False) and not request.user.is_staff @@ -129,6 +153,7 @@ def index(request): # ==== Views with per-item permissions================================ + def has_access(user, location, role=STAFF_ROLE_NAME): ''' Return True if user allowed to access this piece of data @@ -155,24 +180,23 @@ def course_index(request, org, course, name): org, course, name: Attributes of the Location for the item to edit """ - location = ['i4x', org, course, 'course', name] + location = get_location_and_verify_access(request, org, course, name) - # check that logged in user has permissions to this item - if not has_access(request.user, location): - raise PermissionDenied() + lms_link = get_lms_link_for_item(location) upload_asset_callback_url = reverse('upload_asset', kwargs={ - 'org': org, - 'course': course, - 'coursename': name - }) + 'org': org, + 'course': course, + 'coursename': name + }) - course = modulestore().get_item(location) + course = modulestore().get_item(location, depth=3) sections = course.get_children() return render_to_response('overview.html', { 'active_tab': 'courseware', 'context_course': course, + 'lms_link': lms_link, 'sections': sections, 'course_graders': json.dumps(CourseGradingModel.fetch(course.location).graders), 'parent_location': course.location, @@ -186,19 +210,14 @@ def course_index(request, org, course, name): @login_required def edit_subsection(request, location): # check that we have permissions to edit this item - if not has_access(request.user, location): + course = get_course_for_item(location) + if not has_access(request.user, course.location): raise PermissionDenied() - item = modulestore().get_item(location) + item = modulestore().get_item(location, depth=1) - # TODO: we need a smarter way to figure out what course an item is in - for course in modulestore().get_courses(): - if (course.location.org == item.location.org and - course.location.course == item.location.course): - break - - lms_link = get_lms_link_for_item(location) - preview_link = get_lms_link_for_item(location, preview=True) + lms_link = get_lms_link_for_item(location, course_id=course.location.course_id) + preview_link = get_lms_link_for_item(location, course_id=course.location.course_id, preview=True) # make sure that location references a 'sequential', otherwise return BadRequest if item.location.category != 'sequential': @@ -215,8 +234,13 @@ def edit_subsection(request, location): # remove all metadata from the generic dictionary that is presented in a more normalized UI - policy_metadata = dict((key, value) for key, value in item.metadata.iteritems() - if key not in ['display_name', 'start', 'due', 'format'] and key not in item.system_metadata_fields) + policy_metadata = dict( + (field.name, field.read_from(item)) + for field + in item.fields + if field.name not in ['display_name', 'start', 'due', 'format'] and + field.scope == Scope.settings + ) can_view_live = False subsection_units = item.get_children() @@ -227,18 +251,18 @@ def edit_subsection(request, location): break return render_to_response('edit_subsection.html', - {'subsection': item, - 'context_course': course, - 'create_new_unit_template': Location('i4x', 'edx', 'templates', 'vertical', 'Empty'), - 'lms_link': lms_link, - 'preview_link': preview_link, - 'course_graders': json.dumps(CourseGradingModel.fetch(course.location).graders), - 'parent_location': course.location, - 'parent_item': parent, - 'policy_metadata': policy_metadata, - 'subsection_units': subsection_units, - 'can_view_live': can_view_live - }) + {'subsection': item, + 'context_course': course, + 'create_new_unit_template': Location('i4x', 'edx', 'templates', 'vertical', 'Empty'), + 'lms_link': lms_link, + 'preview_link': preview_link, + 'course_graders': json.dumps(CourseGradingModel.fetch(course.location).graders), + 'parent_location': course.location, + 'parent_item': parent, + 'policy_metadata': policy_metadata, + 'subsection_units': subsection_units, + 'can_view_live': can_view_live + }) @login_required @@ -250,30 +274,44 @@ def edit_unit(request, location): id: A Location URL """ - # check that we have permissions to edit this item - if not has_access(request.user, location): + course = get_course_for_item(location) + if not has_access(request.user, course.location): raise PermissionDenied() - item = modulestore().get_item(location) + item = modulestore().get_item(location, depth=1) - # TODO: we need a smarter way to figure out what course an item is in - for course in modulestore().get_courses(): - if (course.location.org == item.location.org and - course.location.course == item.location.course): - break - - lms_link = get_lms_link_for_item(item.location) + lms_link = get_lms_link_for_item(item.location, course_id=course.location.course_id) component_templates = defaultdict(list) + # Check if there are any advanced modules specified in the course policy. These modules + # should be specified as a list of strings, where the strings are the names of the modules + # in ADVANCED_COMPONENT_TYPES that should be enabled for the course. + course_advanced_keys = course.advanced_modules + + # Set component types according to course policy file + component_types = list(COMPONENT_TYPES) + if isinstance(course_advanced_keys, list): + course_advanced_keys = [c for c in course_advanced_keys if c in ADVANCED_COMPONENT_TYPES] + if len(course_advanced_keys) > 0: + component_types.append(ADVANCED_COMPONENT_CATEGORY) + else: + log.error("Improper format for course advanced keys! {0}".format(course_advanced_keys)) + templates = modulestore().get_items(Location('i4x', 'edx', 'templates')) for template in templates: - if template.location.category in COMPONENT_TYPES: - component_templates[template.location.category].append(( - template.display_name, + category = template.location.category + + if category in course_advanced_keys: + category = ADVANCED_COMPONENT_CATEGORY + + if category in component_types: + # This is a hack to create categories for different xmodules + component_templates[category].append(( + template.display_name_with_default, template.location.url(), - 'markdown' in template.metadata, - template.location.name == 'Empty' + hasattr(template, 'markdown') and template.markdown is not None, + template.cms.empty, )) components = [ @@ -302,8 +340,11 @@ def edit_unit(request, location): break index = index + 1 - preview_lms_link = '//{preview}{lms_base}/courses/{org}/{course}/{course_name}/courseware/{section}/{subsection}/{index}'.format( - preview='preview.', + preview_lms_base = settings.MITX_FEATURES.get('PREVIEW_LMS_BASE', + 'preview.' + settings.LMS_BASE) + + preview_lms_link = '//{preview_lms_base}/courses/{org}/{course}/{course_name}/courseware/{section}/{subsection}/{index}'.format( + preview_lms_base=preview_lms_base, lms_base=settings.LMS_BASE, org=course.location.org, course=course.location.course, @@ -314,11 +355,6 @@ def edit_unit(request, location): unit_state = compute_unit_state(item) - try: - published_date = time.strftime('%B %d, %Y', item.metadata.get('published_date')) - except TypeError: - published_date = None - return render_to_response('unit.html', { 'context_course': course, 'active_tab': 'courseware', @@ -329,11 +365,11 @@ def edit_unit(request, location): 'draft_preview_link': preview_lms_link, 'published_preview_link': lms_link, 'subsection': containing_subsection, - 'release_date': get_date_display(datetime.fromtimestamp(time.mktime(containing_subsection.start))) if containing_subsection.start is not None else None, + 'release_date': get_date_display(datetime.fromtimestamp(time.mktime(containing_subsection.lms.start))) if containing_subsection.lms.start is not None else None, 'section': containing_section, 'create_new_unit_template': Location('i4x', 'edx', 'templates', 'vertical', 'Empty'), 'unit_state': unit_state, - 'published_date': published_date, + 'published_date': item.cms.published_date.strftime('%B %d, %Y') if item.cms.published_date is not None else None, }) @@ -365,7 +401,7 @@ def assignment_type_update(request, org, course, category, name): if request.method == 'GET': return HttpResponse(json.dumps(CourseGradingModel.get_section_grader_type(location)), mimetype="application/json") - elif request.method == 'POST': # post or put, doesn't matter. + elif request.method == 'POST': # post or put, doesn't matter. return HttpResponse(json.dumps(CourseGradingModel.update_section_grader_type(location, request.POST)), mimetype="application/json") @@ -398,59 +434,28 @@ def preview_dispatch(request, preview_id, location, dispatch=None): dispatch: The action to execute """ - instance_state, shared_state = load_preview_state(request, preview_id, location) descriptor = modulestore().get_item(location) - instance = load_preview_module(request, preview_id, descriptor, instance_state, shared_state) + instance = load_preview_module(request, preview_id, descriptor) # Let the module handle the AJAX try: ajax_return = instance.handle_ajax(dispatch, request.POST) + except NotFoundError: log.exception("Module indicating to user that request doesn't exist") raise Http404 + + except ProcessingError: + log.warning("Module raised an error while processing AJAX request", + exc_info=True) + return HttpResponseBadRequest() + except: log.exception("error processing ajax call") raise - save_preview_state(request, preview_id, location, instance.get_instance_state(), instance.get_shared_state()) return HttpResponse(ajax_return) -def load_preview_state(request, preview_id, location): - """ - Load the state of a preview module from the request - - preview_id (str): An identifier specifying which preview this module is used for - location: The Location of the module to dispatch to - """ - if 'preview_states' not in request.session: - request.session['preview_states'] = defaultdict(dict) - - instance_state = request.session['preview_states'][preview_id, location].get('instance') - shared_state = request.session['preview_states'][preview_id, location].get('shared') - - return instance_state, shared_state - - -def save_preview_state(request, preview_id, location, instance_state, shared_state): - """ - Save the state of a preview module to the request - - preview_id (str): An identifier specifying which preview this module is used for - location: The Location of the module to dispatch to - instance_state: The instance state to save - shared_state: The shared state to save - """ - if 'preview_states' not in request.session: - request.session['preview_states'] = defaultdict(dict) - - # request.session doesn't notice indirect changes; so, must set its dict w/ every change to get - # it to persist: http://www.djangobook.com/en/2.0/chapter14.html - preview_states = request.session['preview_states'] - preview_states[preview_id, location]['instance'] = instance_state - preview_states[preview_id, location]['shared'] = shared_state - request.session['preview_states'] = preview_states # make session mgmt notice the update - - def render_from_lms(template_name, dictionary, context=None, namespace='main'): """ Render a template using the LMS MAKO_TEMPLATES @@ -458,6 +463,33 @@ def render_from_lms(template_name, dictionary, context=None, namespace='main'): return render_to_string(template_name, dictionary, context, namespace="lms." + namespace) +class SessionKeyValueStore(KeyValueStore): + def __init__(self, request, model_data): + self._model_data = model_data + self._session = request.session + + def get(self, key): + try: + return self._model_data[key.field_name] + except (KeyError, InvalidScopeError): + return self._session[tuple(key)] + + def set(self, key, value): + try: + self._model_data[key.field_name] = value + except (KeyError, InvalidScopeError): + self._session[tuple(key)] = value + + def delete(self, key): + try: + del self._model_data[key.field_name] + except (KeyError, InvalidScopeError): + del self._session[tuple(key)] + + def has(self, key): + return key in self._model_data or key in self._session + + def preview_module_system(request, preview_id, descriptor): """ Returns a ModuleSystem for the specified descriptor that is specialized for @@ -468,6 +500,14 @@ def preview_module_system(request, preview_id, descriptor): descriptor: An XModuleDescriptor """ + def preview_model_data(descriptor): + return DbModel( + SessionKeyValueStore(request, descriptor._model_data), + descriptor.module_class, + preview_id, + MongoUsage(preview_id, descriptor.location.url()), + ) + return ModuleSystem( ajax_url=reverse('preview_dispatch', args=[preview_id, descriptor.location.url(), '']).rstrip('/'), # TODO (cpennington): Do we want to track how instructors are using the preview problems? @@ -478,6 +518,7 @@ def preview_module_system(request, preview_id, descriptor): debug=True, replace_urls=partial(static_replace.replace_static_urls, data_directory=None, course_namespace=descriptor.location), user=request.user, + xblock_model_data=preview_model_data, ) @@ -490,11 +531,11 @@ def get_preview_module(request, preview_id, descriptor): preview_id (str): An identifier specifying which preview this module is used for location: A Location """ - instance_state, shared_state = descriptor.get_sample_state()[0] - return load_preview_module(request, preview_id, descriptor, instance_state, shared_state) + + return load_preview_module(request, preview_id, descriptor) -def load_preview_module(request, preview_id, descriptor, instance_state, shared_state): +def load_preview_module(request, preview_id, descriptor): """ Return a preview XModule instantiated from the supplied descriptor, instance_state, and shared_state @@ -506,12 +547,13 @@ def load_preview_module(request, preview_id, descriptor, instance_state, shared_ """ system = preview_module_system(request, preview_id, descriptor) try: - module = descriptor.xmodule_constructor(system)(instance_state, shared_state) + module = descriptor.xmodule(system) except: + log.debug("Unable to load preview module", exc_info=True) module = ErrorDescriptor.from_descriptor( descriptor, error_msg=exc_info_to_str(sys.exc_info()) - ).xmodule_constructor(system)(None, None) + ).xmodule(system) # cdodge: Special case if module.location.category == 'static_tab': @@ -529,11 +571,9 @@ def load_preview_module(request, preview_id, descriptor, instance_state, shared_ module.get_html = replace_static_urls( module.get_html, - module.metadata.get('data_dir', module.location.course), + getattr(module, 'data_dir', module.location.course), course_namespace=Location([module.location.tag, module.location.org, module.location.course, None, None]) ) - save_preview_state(request, preview_id, descriptor.location.url(), - module.get_instance_state(), module.get_shared_state()) return module @@ -547,7 +587,7 @@ def get_module_previews(request, descriptor): """ preview_html = [] for idx, (instance_state, shared_state) in enumerate(descriptor.get_sample_state()): - module = load_preview_module(request, str(idx), descriptor, instance_state, shared_state) + module = load_preview_module(request, str(idx), descriptor) preview_html.append(module.get_html()) return preview_html @@ -594,6 +634,19 @@ def delete_item(request): if item.location.revision is None and item.location.category == 'vertical' and delete_all_versions: modulestore('direct').delete_item(item.location) + # cdodge: we need to remove our parent's pointer to us so that it is no longer dangling + if delete_all_versions: + parent_locs = modulestore('direct').get_parent_locations(item_loc, None) + + for parent_loc in parent_locs: + parent = modulestore('direct').get_item(parent_loc) + item_url = item_loc.url() + if item_url in parent.children: + children = parent.children + children.remove(item_url) + parent.children = children + modulestore('direct').update_children(parent.location, parent.children) + return HttpResponse() @@ -631,7 +684,7 @@ def save_item(request): # update existing metadata with submitted metadata (which can be partial) # IMPORTANT NOTE: if the client passed pack 'null' (None) for a piece of metadata that means 'remove it' - for metadata_key in posted_metadata.keys(): + for metadata_key, value in posted_metadata.items(): # let's strip out any metadata fields from the postback which have been identified as system metadata # and therefore should not be user-editable, so we should accept them back from the client @@ -639,15 +692,15 @@ def save_item(request): del posted_metadata[metadata_key] elif posted_metadata[metadata_key] is None: # remove both from passed in collection as well as the collection read in from the modulestore - if metadata_key in existing_item.metadata: - del existing_item.metadata[metadata_key] + if metadata_key in existing_item._model_data: + del existing_item._model_data[metadata_key] del posted_metadata[metadata_key] - - # overlay the new metadata over the modulestore sourced collection to support partial updates - existing_item.metadata.update(posted_metadata) + else: + existing_item._model_data[metadata_key] = value # commit to datastore - store.update_metadata(item_location, existing_item.metadata) + # TODO (cpennington): This really shouldn't have to do this much reaching in to get the metadata + store.update_metadata(item_location, own_metadata(existing_item)) return HttpResponse() @@ -714,23 +767,17 @@ def clone_item(request): new_item = get_modulestore(template).clone_item(template, dest_location) - # TODO: This needs to be deleted when we have proper storage for static content - new_item.metadata['data_dir'] = parent.metadata['data_dir'] - # replace the display name with an optional parameter passed in from the caller if display_name is not None: - new_item.metadata['display_name'] = display_name + new_item.display_name = display_name - get_modulestore(template).update_metadata(new_item.location.url(), new_item.own_metadata) + get_modulestore(template).update_metadata(new_item.location.url(), own_metadata(new_item)) if new_item.location.category not in DETACHED_CATEGORIES: - get_modulestore(parent.location).update_children(parent_location, parent.definition.get('children', []) + [new_item.location.url()]) + get_modulestore(parent.location).update_children(parent_location, parent.children + [new_item.location.url()]) return HttpResponse(json.dumps({'id': dest_location.url()})) -#@login_required -#@ensure_csrf_cookie - def upload_asset(request, org, course, coursename): ''' @@ -742,9 +789,7 @@ def upload_asset(request, org, course, coursename): return HttpResponseBadRequest() # construct a location from the passed in path - location = ['i4x', org, course, 'course', coursename] - if not has_access(request.user, location): - return HttpResponseForbidden() + location = get_location_and_verify_access(request, org, course, coursename) # Does the course actually exist?!? Get anything from it to prove its existance @@ -775,7 +820,7 @@ def upload_asset(request, org, course, coursename): if thumbnail_content is not None: content.thumbnail_location = thumbnail_location - #then commit the content + # then commit the content contentstore().save(content) del_cached_content(content.location) @@ -793,11 +838,10 @@ def upload_asset(request, org, course, coursename): response['asset_url'] = StaticContent.get_url_path_from_location(content.location) return response + ''' This view will return all CMS users who are editors for the specified course ''' - - @login_required @ensure_csrf_cookie def manage_users(request, location): @@ -827,12 +871,11 @@ def create_json_response(errmsg=None): return resp + ''' This POST-back view will add a user - specified by email - to the list of editors for the specified course ''' - - @expect_json @login_required @ensure_csrf_cookie @@ -861,12 +904,11 @@ def add_user(request, location): return create_json_response() + ''' This POST-back view will remove a user - specified by email - from the list of editors for the specified course ''' - - @expect_json @login_required @ensure_csrf_cookie @@ -899,11 +941,7 @@ def landing(request, org, course, coursename): @ensure_csrf_cookie def static_pages(request, org, course, coursename): - location = ['i4x', org, course, 'course', coursename] - - # check that logged in user has permissions to this item - if not has_access(request.user, location): - raise PermissionDenied() + location = get_location_and_verify_access(request, org, course, coursename) course = modulestore().get_item(location) @@ -949,7 +987,7 @@ def reorder_static_tabs(request): for tab in course.tabs: if tab['type'] == 'static_tab': reordered_tabs.append({'type': 'static_tab', - 'name': tab_items[static_tab_idx].metadata.get('display_name'), + 'name': tab_items[static_tab_idx].display_name, 'url_slug': tab_items[static_tab_idx].location.name}) static_tab_idx += 1 else: @@ -958,7 +996,7 @@ def reorder_static_tabs(request): # OK, re-assemble the static tabs in the new order course.tabs = reordered_tabs - modulestore('direct').update_metadata(course.location, course.metadata) + modulestore('direct').update_metadata(course.location, own_metadata(course)) return HttpResponse() @@ -1015,11 +1053,7 @@ def course_info(request, org, course, name, provided_id=None): org, course, name: Attributes of the Location for the item to edit """ - location = ['i4x', org, course, 'course', name] - - # check that logged in user has permissions to this item - if not has_access(request.user, location): - raise PermissionDenied() + location = get_location_and_verify_access(request, org, course, name) course_module = modulestore().get_item(location) @@ -1058,21 +1092,25 @@ def course_info_updates(request, org, course, provided_id=None): if not has_access(request.user, location): raise PermissionDenied() - # NB: we're setting Backbone.emulateHTTP to true on the client so everything comes as a post!!! - if request.method == 'POST' and 'HTTP_X_HTTP_METHOD_OVERRIDE' in request.META: - real_method = request.META['HTTP_X_HTTP_METHOD_OVERRIDE'] - else: - real_method = request.method + real_method = get_request_method(request) if request.method == 'GET': - return HttpResponse(json.dumps(get_course_updates(location)), mimetype="application/json") - elif real_method == 'DELETE': # coming as POST need to pull from Request Header X-HTTP-Method-Override DELETE - return HttpResponse(json.dumps(delete_course_update(location, request.POST, provided_id)), mimetype="application/json") + return HttpResponse(json.dumps(get_course_updates(location)), + mimetype="application/json") + elif real_method == 'DELETE': + try: + return HttpResponse(json.dumps(delete_course_update(location, + request.POST, provided_id)), mimetype="application/json") + except: + return HttpResponseBadRequest("Failed to delete", + content_type="text/plain") elif request.method == 'POST': try: - return HttpResponse(json.dumps(update_course_updates(location, request.POST, provided_id)), mimetype="application/json") + return HttpResponse(json.dumps(update_course_updates(location, + request.POST, provided_id)), mimetype="application/json") except: - return HttpResponseBadRequest("Failed to save: malformed html", content_type="text/plain") + return HttpResponseBadRequest("Failed to save", + content_type="text/plain") @expect_json @@ -1085,11 +1123,7 @@ def module_info(request, module_location): if not has_access(request.user, location): raise PermissionDenied() - # NB: we're setting Backbone.emulateHTTP to true on the client so everything comes as a post!!! - if request.method == 'POST' and 'HTTP_X_HTTP_METHOD_OVERRIDE' in request.META: - real_method = request.META['HTTP_X_HTTP_METHOD_OVERRIDE'] - else: - real_method = request.method + real_method = get_request_method(request) rewrite_static_links = request.GET.get('rewrite_url_links', 'True') in ['True', 'true'] logging.debug('rewrite_static_links = {0} {1}'.format(request.GET.get('rewrite_url_links', 'False'), rewrite_static_links)) @@ -1114,22 +1148,60 @@ def get_course_settings(request, org, course, name): org, course, name: Attributes of the Location for the item to edit """ - location = ['i4x', org, course, 'course', name] - - # check that logged in user has permissions to this item - if not has_access(request.user, location): - raise PermissionDenied() + location = get_location_and_verify_access(request, org, course, name) course_module = modulestore().get_item(location) - course_details = CourseDetails.fetch(location) return render_to_response('settings.html', { - 'active_tab': 'settings', 'context_course': course_module, + 'course_location': location, + 'details_url': reverse(course_settings_updates, + kwargs={"org": org, + "course": course, + "name": name, + "section": "details"}) + }) + + +@login_required +@ensure_csrf_cookie +def course_config_graders_page(request, org, course, name): + """ + Send models and views as well as html for editing the course settings to the client. + + org, course, name: Attributes of the Location for the item to edit + """ + location = get_location_and_verify_access(request, org, course, name) + + course_module = modulestore().get_item(location) + course_details = CourseGradingModel.fetch(location) + + return render_to_response('settings_graders.html', { + 'context_course': course_module, + 'course_location' : location, 'course_details': json.dumps(course_details, cls=CourseSettingsEncoder) }) +@login_required +@ensure_csrf_cookie +def course_config_advanced_page(request, org, course, name): + """ + Send models and views as well as html for editing the advanced course settings to the client. + + org, course, name: Attributes of the Location for the item to edit + """ + location = get_location_and_verify_access(request, org, course, name) + + course_module = modulestore().get_item(location) + + return render_to_response('settings_advanced.html', { + 'context_course': course_module, + 'course_location' : location, + 'advanced_dict' : json.dumps(CourseMetadata.fetch(location)), + }) + + @expect_json @login_required @ensure_csrf_cookie @@ -1141,11 +1213,7 @@ def course_settings_updates(request, org, course, name, section): org, course: Attributes of the Location for the item to edit section: one of details, faculty, grading, problems, discussions """ - location = ['i4x', org, course, 'course', name] - - # check that logged in user has permissions to this item - if not has_access(request.user, location): - raise PermissionDenied() + get_location_and_verify_access(request, org, course, name) if section == 'details': manager = CourseDetails @@ -1157,7 +1225,7 @@ def course_settings_updates(request, org, course, name, section): # Cannot just do a get w/o knowing the course name :-( return HttpResponse(json.dumps(manager.fetch(Location(['i4x', org, course, 'course', name])), cls=CourseSettingsEncoder), mimetype="application/json") - elif request.method == 'POST': # post or put, doesn't matter. + elif request.method == 'POST': # post or put, doesn't matter. return HttpResponse(json.dumps(manager.update_from_json(request.POST), cls=CourseSettingsEncoder), mimetype="application/json") @@ -1173,30 +1241,168 @@ def course_grader_updates(request, org, course, name, grader_index=None): org, course: Attributes of the Location for the item to edit """ - location = ['i4x', org, course, 'course', name] + location = get_location_and_verify_access(request, org, course, name) - # check that logged in user has permissions to this item - if not has_access(request.user, location): - raise PermissionDenied() - - if request.method == 'POST' and 'HTTP_X_HTTP_METHOD_OVERRIDE' in request.META: - real_method = request.META['HTTP_X_HTTP_METHOD_OVERRIDE'] - else: - real_method = request.method + real_method = get_request_method(request) if real_method == 'GET': # Cannot just do a get w/o knowing the course name :-( - return HttpResponse(json.dumps(CourseGradingModel.fetch_grader(Location(['i4x', org, course, 'course', name]), grader_index)), + return HttpResponse(json.dumps(CourseGradingModel.fetch_grader(Location(location), grader_index)), mimetype="application/json") elif real_method == "DELETE": - # ??? Shoudl this return anything? Perhaps success fail? - CourseGradingModel.delete_grader(Location(['i4x', org, course, 'course', name]), grader_index) + # ??? Should this return anything? Perhaps success fail? + CourseGradingModel.delete_grader(Location(location), grader_index) return HttpResponse() elif request.method == 'POST': # post or put, doesn't matter. - return HttpResponse(json.dumps(CourseGradingModel.update_grader_from_json(Location(['i4x', org, course, 'course', name]), request.POST)), + return HttpResponse(json.dumps(CourseGradingModel.update_grader_from_json(Location(location), request.POST)), mimetype="application/json") +# # NB: expect_json failed on ["key", "key2"] and json payload +@login_required +@ensure_csrf_cookie +def course_advanced_updates(request, org, course, name): + """ + restful CRUD operations on metadata. The payload is a json rep of the metadata dicts. For delete, otoh, + the payload is either a key or a list of keys to delete. + + org, course: Attributes of the Location for the item to edit + """ + location = get_location_and_verify_access(request, org, course, name) + + real_method = get_request_method(request) + + if real_method == 'GET': + return HttpResponse(json.dumps(CourseMetadata.fetch(location)), mimetype="application/json") + elif real_method == 'DELETE': + return HttpResponse(json.dumps(CourseMetadata.delete_key(location, json.loads(request.body))), + mimetype="application/json") + elif real_method == 'POST' or real_method == 'PUT': + # NOTE: request.POST is messed up because expect_json cloned_request.POST.copy() is creating a defective entry w/ the whole payload as the key + request_body = json.loads(request.body) + #Whether or not to filter the tabs key out of the settings metadata + filter_tabs = True + #Check to see if the user instantiated any advanced components. This is a hack to add the open ended panel tab + #to a course automatically if the user has indicated that they want to edit the combinedopenended or peergrading + #module, and to remove it if they have removed the open ended elements. + if ADVANCED_COMPONENT_POLICY_KEY in request_body: + #Check to see if the user instantiated any open ended components + found_oe_type = False + #Get the course so that we can scrape current tabs + course_module = modulestore().get_item(location) + for oe_type in OPEN_ENDED_COMPONENT_TYPES: + if oe_type in request_body[ADVANCED_COMPONENT_POLICY_KEY]: + #Add an open ended tab to the course if needed + changed, new_tabs = add_open_ended_panel_tab(course_module) + #If a tab has been added to the course, then send the metadata along to CourseMetadata.update_from_json + if changed: + request_body.update({'tabs': new_tabs}) + #Indicate that tabs should not be filtered out of the metadata + filter_tabs = False + #Set this flag to avoid the open ended tab removal code below. + found_oe_type = True + break + #If we did not find an open ended module type in the advanced settings, + # we may need to remove the open ended tab from the course. + if not found_oe_type: + #Remove open ended tab to the course if needed + changed, new_tabs = remove_open_ended_panel_tab(course_module) + if changed: + request_body.update({'tabs': new_tabs}) + #Indicate that tabs should not be filtered out of the metadata + filter_tabs = False + response_json = json.dumps(CourseMetadata.update_from_json(location, request_body, filter_tabs=filter_tabs)) + return HttpResponse(response_json, mimetype="application/json") + +@ensure_csrf_cookie +@login_required +def get_checklists(request, org, course, name): + """ + Send models, views, and html for displaying the course checklists. + + org, course, name: Attributes of the Location for the item to edit + """ + location = get_location_and_verify_access(request, org, course, name) + + modulestore = get_modulestore(location) + course_module = modulestore.get_item(location) + new_course_template = Location('i4x', 'edx', 'templates', 'course', 'Empty') + template_module = modulestore.get_item(new_course_template) + + # If course was created before checklists were introduced, copy them over from the template. + copied = False + if not course_module.checklists: + course_module.checklists = template_module.checklists + copied = True + + checklists, modified = expand_checklist_action_urls(course_module) + if copied or modified: + modulestore.update_metadata(location, own_metadata(course_module)) + return render_to_response('checklists.html', + { + 'context_course': course_module, + 'checklists': checklists + }) + + +@ensure_csrf_cookie +@login_required +def update_checklist(request, org, course, name, checklist_index=None): + """ + restful CRUD operations on course checklists. The payload is a json rep of + the modified checklist. For PUT or POST requests, the index of the + checklist being modified must be included; the returned payload will + be just that one checklist. For GET requests, the returned payload + is a json representation of the list of all checklists. + + org, course, name: Attributes of the Location for the item to edit + """ + location = get_location_and_verify_access(request, org, course, name) + modulestore = get_modulestore(location) + course_module = modulestore.get_item(location) + + real_method = get_request_method(request) + if real_method == 'POST' or real_method == 'PUT': + if checklist_index is not None and 0 <= int(checklist_index) < len(course_module.checklists): + index = int(checklist_index) + course_module.checklists[index] = json.loads(request.body) + checklists, modified = expand_checklist_action_urls(course_module) + modulestore.update_metadata(location, own_metadata(course_module)) + return HttpResponse(json.dumps(checklists[index]), mimetype="application/json") + else: + return HttpResponseBadRequest( + "Could not save checklist state because the checklist index was out of range or unspecified.", + content_type="text/plain") + elif request.method == 'GET': + # In the JavaScript view initialize method, we do a fetch to get all the checklists. + checklists, modified = expand_checklist_action_urls(course_module) + if modified: + modulestore.update_metadata(location, own_metadata(course_module)) + return HttpResponse(json.dumps(checklists), mimetype="application/json") + else: + return HttpResponseBadRequest("Unsupported request.", content_type="text/plain") + + +def expand_checklist_action_urls(course_module): + """ + Gets the checklists out of the course module and expands their action urls + if they have not yet been expanded. + + Returns the checklists with modified urls, as well as a boolean + indicating whether or not the checklists were modified. + """ + checklists = course_module.checklists + modified = False + for checklist in checklists: + if not checklist.get('action_urls_expanded', False): + for item in checklist.get('items'): + item['action_url'] = get_url_reverse(item.get('action_url'), course_module) + checklist['action_urls_expanded'] = True + modified = True + + return checklists, modified + + @login_required @ensure_csrf_cookie def asset_index(request, org, course, name): @@ -1205,18 +1411,13 @@ def asset_index(request, org, course, name): org, course, name: Attributes of the Location for the item to edit """ - location = ['i4x', org, course, 'course', name] - - # check that logged in user has permissions to this item - if not has_access(request.user, location): - raise PermissionDenied() - + location = get_location_and_verify_access(request, org, course, name) upload_asset_callback_url = reverse('upload_asset', kwargs={ - 'org': org, - 'course': course, - 'coursename': name - }) + 'org': org, + 'course': course, + 'coursename': name + }) course_module = modulestore().get_item(location) @@ -1298,13 +1499,10 @@ def create_new_course(request): new_course = modulestore('direct').clone_item(template, dest_location) if display_name is not None: - new_course.metadata['display_name'] = display_name - - # we need a 'data_dir' for legacy reasons - new_course.metadata['data_dir'] = uuid4().hex + new_course.display_name = display_name # set a default start date to now - new_course.metadata['start'] = stringify_time(time.gmtime()) + new_course.start = time.gmtime() initialize_course_tabs(new_course) @@ -1323,23 +1521,19 @@ def initialize_course_tabs(course): # This logic is repeated in xmodule/modulestore/tests/factories.py # so if you change anything here, you need to also change it there. course.tabs = [{"type": "courseware"}, - {"type": "course_info", "name": "Course Info"}, + {"type": "course_info", "name": "Course Info"}, {"type": "discussion", "name": "Discussion"}, {"type": "wiki", "name": "Wiki"}, {"type": "progress", "name": "Progress"}] - modulestore('direct').update_metadata(course.location.url(), course.own_metadata) + modulestore('direct').update_metadata(course.location.url(), own_metadata(course)) @ensure_csrf_cookie @login_required def import_course(request, org, course, name): - location = ['i4x', org, course, 'course', name] - - # check that logged in user has permissions to this item - if not has_access(request.user, location): - raise PermissionDenied() + location = get_location_and_verify_access(request, org, course, name) if request.method == 'POST': filename = request.FILES['course-data'].name @@ -1402,20 +1596,14 @@ def import_course(request, org, course, name): return render_to_response('import.html', { 'context_course': course_module, 'active_tab': 'import', - 'successful_import_redirect_url': reverse('course_index', args=[ - course_module.location.org, - course_module.location.course, - course_module.location.name]) + 'successful_import_redirect_url': get_url_reverse('CourseOutline', course_module) }) @ensure_csrf_cookie @login_required def generate_export_course(request, org, course, name): - location = ['i4x', org, course, 'course', name] - # check that logged in user has permissions to this item - if not has_access(request.user, location): - raise PermissionDenied() + location = get_location_and_verify_access(request, org, course, name) loc = Location(location) export_file = NamedTemporaryFile(prefix=name + '.', suffix=".tar.gz") @@ -1448,11 +1636,9 @@ def generate_export_course(request, org, course, name): @login_required def export_course(request, org, course, name): - location = ['i4x', org, course, 'course', name] + location = get_location_and_verify_access(request, org, course, name) + course_module = modulestore().get_item(location) - # check that logged in user has permissions to this item - if not has_access(request.user, location): - raise PermissionDenied() return render_to_response('export.html', { 'context_course': course_module, @@ -1467,3 +1653,39 @@ def event(request): console logs don't get distracted :-) ''' return HttpResponse(True) + + +def render_404(request): + return HttpResponseNotFound(render_to_string('404.html', {})) + + +def render_500(request): + return HttpResponseServerError(render_to_string('500.html', {})) + + +def get_location_and_verify_access(request, org, course, name): + """ + Create the location tuple verify that the user has permissions + to view the location. Returns the location. + """ + location = ['i4x', org, course, 'course', name] + + # check that logged in user has permissions to this item + if not has_access(request.user, location): + raise PermissionDenied() + + return location + + +def get_request_method(request): + """ + Using HTTP_X_HTTP_METHOD_OVERRIDE, in the request metadata, determine + what type of request came from the client, and return it. + """ + # NB: we're setting Backbone.emulateHTTP to true on the client so everything comes as a post!!! + if request.method == 'POST' and 'HTTP_X_HTTP_METHOD_OVERRIDE' in request.META: + real_method = request.META['HTTP_X_HTTP_METHOD_OVERRIDE'] + else: + real_method = request.method + + return real_method diff --git a/cms/djangoapps/models/settings/course_details.py b/cms/djangoapps/models/settings/course_details.py index b27f4e3804..876000c7fe 100644 --- a/cms/djangoapps/models/settings/course_details.py +++ b/cms/djangoapps/models/settings/course_details.py @@ -1,13 +1,13 @@ -from xmodule.modulestore.django import modulestore from xmodule.modulestore import Location from xmodule.modulestore.exceptions import ItemNotFoundError +from xmodule.modulestore.inheritance import own_metadata import json from json.encoder import JSONEncoder import time from contentstore.utils import get_modulestore -from util.converters import jsdate_to_time, time_to_date -from cms.djangoapps.models.settings import course_grading -from cms.djangoapps.contentstore.utils import update_item +from models.settings import course_grading +from contentstore.utils import update_item +from xmodule.fields import Date import re import logging @@ -43,25 +43,25 @@ class CourseDetails(object): temploc = course_location._replace(category='about', name='syllabus') try: - course.syllabus = get_modulestore(temploc).get_item(temploc).definition['data'] + course.syllabus = get_modulestore(temploc).get_item(temploc).data except ItemNotFoundError: pass temploc = temploc._replace(name='overview') try: - course.overview = get_modulestore(temploc).get_item(temploc).definition['data'] + course.overview = get_modulestore(temploc).get_item(temploc).data except ItemNotFoundError: pass temploc = temploc._replace(name='effort') try: - course.effort = get_modulestore(temploc).get_item(temploc).definition['data'] + course.effort = get_modulestore(temploc).get_item(temploc).data except ItemNotFoundError: pass temploc = temploc._replace(name='video') try: - raw_video = get_modulestore(temploc).get_item(temploc).definition['data'] + raw_video = get_modulestore(temploc).get_item(temploc).data course.intro_video = CourseDetails.parse_video_tag(raw_video) except ItemNotFoundError: pass @@ -80,8 +80,14 @@ class CourseDetails(object): dirty = False + # In the descriptor's setter, the date is converted to JSON using Date's to_json method. + # Calling to_json on something that is already JSON doesn't work. Since reaching directly + # into the model is nasty, convert the JSON Date to a Python date, which is what the + # setter expects as input. + date = Date() + if 'start_date' in jsondict: - converted = jsdate_to_time(jsondict['start_date']) + converted = date.from_json(jsondict['start_date']) else: converted = None if converted != descriptor.start: @@ -89,7 +95,7 @@ class CourseDetails(object): descriptor.start = converted if 'end_date' in jsondict: - converted = jsdate_to_time(jsondict['end_date']) + converted = date.from_json(jsondict['end_date']) else: converted = None @@ -98,7 +104,7 @@ class CourseDetails(object): descriptor.end = converted if 'enrollment_start' in jsondict: - converted = jsdate_to_time(jsondict['enrollment_start']) + converted = date.from_json(jsondict['enrollment_start']) else: converted = None @@ -107,7 +113,7 @@ class CourseDetails(object): descriptor.enrollment_start = converted if 'enrollment_end' in jsondict: - converted = jsdate_to_time(jsondict['enrollment_end']) + converted = date.from_json(jsondict['enrollment_end']) else: converted = None @@ -116,7 +122,7 @@ class CourseDetails(object): descriptor.enrollment_end = converted if dirty: - get_modulestore(course_location).update_metadata(course_location, descriptor.metadata) + get_modulestore(course_location).update_metadata(course_location, own_metadata(descriptor)) # NOTE: below auto writes to the db w/o verifying that any of the fields actually changed # to make faster, could compare against db or could have client send over a list of which fields changed. @@ -133,7 +139,6 @@ class CourseDetails(object): recomposed_video_tag = CourseDetails.recompose_video_tag(jsondict['intro_video']) update_item(temploc, recomposed_video_tag) - # Could just generate and return a course obj w/o doing any db reads, but I put the reads in as a means to confirm # it persisted correctly return CourseDetails.fetch(course_location) @@ -178,6 +183,6 @@ class CourseSettingsEncoder(json.JSONEncoder): elif isinstance(obj, Location): return obj.dict() elif isinstance(obj, time.struct_time): - return time_to_date(obj) + return Date().to_json(obj) else: return JSONEncoder.default(self, obj) diff --git a/cms/djangoapps/models/settings/course_grading.py b/cms/djangoapps/models/settings/course_grading.py index 3d0b8f78af..ee9b4ac0eb 100644 --- a/cms/djangoapps/models/settings/course_grading.py +++ b/cms/djangoapps/models/settings/course_grading.py @@ -1,7 +1,6 @@ from xmodule.modulestore import Location from contentstore.utils import get_modulestore -import re -from util import converters +from datetime import timedelta class CourseGradingModel(object): @@ -91,7 +90,7 @@ class CourseGradingModel(object): descriptor.raw_grader = graders_parsed descriptor.grade_cutoffs = jsondict['grade_cutoffs'] - get_modulestore(course_location).update_item(course_location, descriptor.definition['data']) + get_modulestore(course_location).update_item(course_location, descriptor._model_data._kvs._data) CourseGradingModel.update_grace_period_from_json(course_location, jsondict['grace_period']) return CourseGradingModel.fetch(course_location) @@ -119,7 +118,7 @@ class CourseGradingModel(object): else: descriptor.raw_grader.append(grader) - get_modulestore(course_location).update_item(course_location, descriptor.definition['data']) + get_modulestore(course_location).update_item(course_location, descriptor._model_data._kvs._data) return CourseGradingModel.jsonize_grader(index, descriptor.raw_grader[index]) @@ -134,7 +133,7 @@ class CourseGradingModel(object): descriptor = get_modulestore(course_location).get_item(course_location) descriptor.grade_cutoffs = cutoffs - get_modulestore(course_location).update_item(course_location, descriptor.definition['data']) + get_modulestore(course_location).update_item(course_location, descriptor._model_data._kvs._data) return cutoffs @@ -156,11 +155,11 @@ class CourseGradingModel(object): graceperiodjson = graceperiodjson['grace_period'] # lms requires these to be in a fixed order - grace_rep = "{0[hours]:d} hours {0[minutes]:d} minutes {0[seconds]:d} seconds".format(graceperiodjson) + grace_timedelta = timedelta(**graceperiodjson) descriptor = get_modulestore(course_location).get_item(course_location) - descriptor.metadata['graceperiod'] = grace_rep - get_modulestore(course_location).update_metadata(course_location, descriptor.metadata) + descriptor.lms.graceperiod = grace_timedelta + get_modulestore(course_location).update_metadata(course_location, descriptor._model_data._kvs._metadata) @staticmethod def delete_grader(course_location, index): @@ -176,7 +175,7 @@ class CourseGradingModel(object): del descriptor.raw_grader[index] # force propagation to definition descriptor.raw_grader = descriptor.raw_grader - get_modulestore(course_location).update_item(course_location, descriptor.definition['data']) + get_modulestore(course_location).update_item(course_location, descriptor._model_data._kvs._data) # NOTE cannot delete cutoffs. May be useful to reset @staticmethod @@ -189,7 +188,7 @@ class CourseGradingModel(object): descriptor = get_modulestore(course_location).get_item(course_location) descriptor.grade_cutoffs = descriptor.defaut_grading_policy['GRADE_CUTOFFS'] - get_modulestore(course_location).update_item(course_location, descriptor.definition['data']) + get_modulestore(course_location).update_item(course_location, descriptor._model_data._kvs._data) return descriptor.grade_cutoffs @@ -202,8 +201,8 @@ class CourseGradingModel(object): course_location = Location(course_location) descriptor = get_modulestore(course_location).get_item(course_location) - if 'graceperiod' in descriptor.metadata: del descriptor.metadata['graceperiod'] - get_modulestore(course_location).update_metadata(course_location, descriptor.metadata) + del descriptor.lms.graceperiod + get_modulestore(course_location).update_metadata(course_location, descriptor._model_data._kvs._metadata) @staticmethod def get_section_grader_type(location): @@ -212,7 +211,7 @@ class CourseGradingModel(object): descriptor = get_modulestore(location).get_item(location) return { - "graderType": descriptor.metadata.get('format', u"Not Graded"), + "graderType": descriptor.lms.format if descriptor.lms.format is not None else 'Not Graded', "location": location, "id": 99 # just an arbitrary value to } @@ -224,23 +223,41 @@ class CourseGradingModel(object): descriptor = get_modulestore(location).get_item(location) if 'graderType' in jsondict and jsondict['graderType'] != u"Not Graded": - descriptor.metadata['format'] = jsondict.get('graderType') - descriptor.metadata['graded'] = True + descriptor.lms.format = jsondict.get('graderType') + descriptor.lms.graded = True else: - if 'format' in descriptor.metadata: del descriptor.metadata['format'] - if 'graded' in descriptor.metadata: del descriptor.metadata['graded'] + del descriptor.lms.format + del descriptor.lms.graded - get_modulestore(location).update_metadata(location, descriptor.metadata) + get_modulestore(location).update_metadata(location, descriptor._model_data._kvs._metadata) @staticmethod def convert_set_grace_period(descriptor): - # 5 hours 59 minutes 59 seconds => { hours: 5, minutes : 59, seconds : 59} - rawgrace = descriptor.metadata.get('graceperiod', None) + # 5 hours 59 minutes 59 seconds => converted to iso format + rawgrace = descriptor.lms.graceperiod if rawgrace: - parsedgrace = {str(key): int(val) for (val, key) in re.findall('\s*(\d+)\s*(\w+)', rawgrace)} - return parsedgrace - else: return None + hours_from_days = rawgrace.days*24 + seconds = rawgrace.seconds + hours_from_seconds = int(seconds / 3600) + hours = hours_from_days + hours_from_seconds + seconds -= hours_from_seconds * 3600 + minutes = int(seconds / 60) + seconds -= minutes * 60 + + graceperiod = {'hours': 0, 'minutes': 0, 'seconds': 0} + if hours > 0: + graceperiod['hours'] = hours + + if minutes > 0: + graceperiod['minutes'] = minutes + + if seconds > 0: + graceperiod['seconds'] = seconds + + return graceperiod + else: + return None @staticmethod def parse_grader(json_grader): diff --git a/cms/djangoapps/models/settings/course_metadata.py b/cms/djangoapps/models/settings/course_metadata.py new file mode 100644 index 0000000000..70f69315ff --- /dev/null +++ b/cms/djangoapps/models/settings/course_metadata.py @@ -0,0 +1,97 @@ +from xmodule.modulestore import Location +from contentstore.utils import get_modulestore +from xmodule.x_module import XModuleDescriptor +from xmodule.modulestore.inheritance import own_metadata +from xblock.core import Scope +from xmodule.course_module import CourseDescriptor +import copy + +class CourseMetadata(object): + ''' + For CRUD operations on metadata fields which do not have specific editors + on the other pages including any user generated ones. + The objects have no predefined attrs but instead are obj encodings of the + editable metadata. + ''' + FILTERED_LIST = XModuleDescriptor.system_metadata_fields + ['start', 'end', + 'enrollment_start', 'enrollment_end', 'tabs', 'graceperiod', 'checklists'] + + @classmethod + def fetch(cls, course_location): + """ + Fetch the key:value editable course details for the given course from + persistence and return a CourseMetadata model. + """ + if not isinstance(course_location, Location): + course_location = Location(course_location) + + course = {} + + descriptor = get_modulestore(course_location).get_item(course_location) + + for field in descriptor.fields + descriptor.lms.fields: + if field.scope != Scope.settings: + continue + + if field.name not in cls.FILTERED_LIST: + course[field.name] = field.read_json(descriptor) + + return course + + @classmethod + def update_from_json(cls, course_location, jsondict, filter_tabs=True): + """ + Decode the json into CourseMetadata and save any changed attrs to the db. + + Ensures none of the fields are in the blacklist. + """ + descriptor = get_modulestore(course_location).get_item(course_location) + + dirty = False + + #Copy the filtered list to avoid permanently changing the class attribute + filtered_list = copy.copy(cls.FILTERED_LIST) + #Don't filter on the tab attribute if filter_tabs is False + if not filter_tabs: + filtered_list.remove("tabs") + + for k, v in jsondict.iteritems(): + # should it be an error if one of the filtered list items is in the payload? + if k in filtered_list: + continue + + if hasattr(descriptor, k) and getattr(descriptor, k) != v: + dirty = True + value = getattr(CourseDescriptor, k).from_json(v) + setattr(descriptor, k, value) + elif hasattr(descriptor.lms, k) and getattr(descriptor.lms, k) != k: + dirty = True + value = getattr(CourseDescriptor.lms, k).from_json(v) + setattr(descriptor.lms, k, value) + + if dirty: + get_modulestore(course_location).update_metadata(course_location, + own_metadata(descriptor)) + + # Could just generate and return a course obj w/o doing any db reads, + # but I put the reads in as a means to confirm it persisted correctly + return cls.fetch(course_location) + + @classmethod + def delete_key(cls, course_location, payload): + ''' + Remove the given metadata key(s) from the course. payload can be a + single key or [key..] + ''' + descriptor = get_modulestore(course_location).get_item(course_location) + + for key in payload['deleteKeys']: + if hasattr(descriptor, key): + delattr(descriptor, key) + elif hasattr(descriptor.lms, key): + delattr(descriptor.lms, key) + + get_modulestore(course_location).update_metadata(course_location, + own_metadata(descriptor)) + + return cls.fetch(course_location) diff --git a/cms/envs/aws.py b/cms/envs/aws.py index a147f84531..be7816d21f 100644 --- a/cms/envs/aws.py +++ b/cms/envs/aws.py @@ -62,3 +62,6 @@ AWS_SECRET_ACCESS_KEY = AUTH_TOKENS["AWS_SECRET_ACCESS_KEY"] DATABASES = AUTH_TOKENS['DATABASES'] MODULESTORE = AUTH_TOKENS['MODULESTORE'] CONTENTSTORE = AUTH_TOKENS['CONTENTSTORE'] + +# Datadog for events! +DATADOG_API = AUTH_TOKENS.get("DATADOG_API") \ No newline at end of file diff --git a/cms/envs/common.py b/cms/envs/common.py index 30aac6ea01..12fa09947a 100644 --- a/cms/envs/common.py +++ b/cms/envs/common.py @@ -20,7 +20,6 @@ Longer TODO: """ import sys -import tempfile import os.path import os import lms.envs.common @@ -59,7 +58,8 @@ sys.path.append(COMMON_ROOT / 'lib') ############################# WEB CONFIGURATION ############################# # This is where we stick our compiled template files. -MAKO_MODULE_DIR = tempfile.mkdtemp('mako') +from tempdir import mkdtemp_clean +MAKO_MODULE_DIR = mkdtemp_clean('mako') MAKO_TEMPLATES = {} MAKO_TEMPLATES['main'] = [ PROJECT_ROOT / 'templates', @@ -74,8 +74,8 @@ TEMPLATE_DIRS = MAKO_TEMPLATES['main'] MITX_ROOT_URL = '' -LOGIN_REDIRECT_URL = MITX_ROOT_URL + '/login' -LOGIN_URL = MITX_ROOT_URL + '/login' +LOGIN_REDIRECT_URL = MITX_ROOT_URL + '/signin' +LOGIN_URL = MITX_ROOT_URL + '/signin' TEMPLATE_CONTEXT_PROCESSORS = ( @@ -113,6 +113,7 @@ TEMPLATE_LOADERS = ( MIDDLEWARE_CLASSES = ( 'contentserver.middleware.StaticContentServer', + 'request_cache.middleware.RequestCache', 'django.middleware.cache.UpdateCacheMiddleware', 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', @@ -172,6 +173,9 @@ LANGUAGE_CODE = 'en' # http://www.i18nguy.com/unicode/language-identi USE_I18N = True USE_L10N = True +# Tracking +TRACK_MAX_EVENT = 10000 + # Messages MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage' @@ -275,6 +279,10 @@ INSTALLED_APPS = ( 'auth', 'student', # misleading name due to sharing with lms 'course_groups', # not used in cms (yet), but tests run + + # tracking + 'track', + # For asset pipelining 'pipeline', 'staticfiles', diff --git a/cms/envs/dev.py b/cms/envs/dev.py index 3dee93a398..c4465a0e06 100644 --- a/cms/envs/dev.py +++ b/cms/envs/dev.py @@ -4,9 +4,6 @@ This config file runs the simplest dev environment""" from .common import * from logsettings import get_logger_config -import logging -import sys - DEBUG = True TEMPLATE_DEBUG = DEBUG LOGGING = get_logger_config(ENV_ROOT / "log", @@ -99,6 +96,13 @@ CACHES = { 'KEY_PREFIX': 'general', 'VERSION': 4, 'KEY_FUNCTION': 'util.memcache.safe_key', + }, + + 'mongo_metadata_inheritance': { + 'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache', + 'LOCATION': '/var/tmp/mongo_metadata_inheritance', + 'TIMEOUT': 300, + 'KEY_FUNCTION': 'util.memcache.safe_key', } } @@ -107,3 +111,39 @@ CACHE_TIMEOUT = 0 # Dummy secret key for dev SECRET_KEY = '85920908f28904ed733fe576320db18cabd7b6cd' + +################################ PIPELINE ################################# + +PIPELINE_SASS_ARGUMENTS = '--debug-info --require {proj_dir}/static/sass/bourbon/lib/bourbon.rb'.format(proj_dir=PROJECT_ROOT) + +################################ DEBUG TOOLBAR ################################# +INSTALLED_APPS += ('debug_toolbar', 'debug_toolbar_mongo') +MIDDLEWARE_CLASSES += ('django_comment_client.utils.QueryCountDebugMiddleware', + 'debug_toolbar.middleware.DebugToolbarMiddleware',) +INTERNAL_IPS = ('127.0.0.1',) + +DEBUG_TOOLBAR_PANELS = ( + 'debug_toolbar.panels.version.VersionDebugPanel', + 'debug_toolbar.panels.timer.TimerDebugPanel', + 'debug_toolbar.panels.settings_vars.SettingsVarsDebugPanel', + 'debug_toolbar.panels.headers.HeaderDebugPanel', + 'debug_toolbar.panels.request_vars.RequestVarsDebugPanel', + 'debug_toolbar.panels.sql.SQLDebugPanel', + 'debug_toolbar.panels.signals.SignalDebugPanel', + 'debug_toolbar.panels.logger.LoggingPanel', + 'debug_toolbar_mongo.panel.MongoDebugPanel', + + # Enabling the profiler has a weird bug as of django-debug-toolbar==0.9.4 and + # Django=1.3.1/1.4 where requests to views get duplicated (your method gets + # hit twice). So you can uncomment when you need to diagnose performance + # problems, but you shouldn't leave it on. + # 'debug_toolbar.panels.profiling.ProfilingDebugPanel', + ) + +DEBUG_TOOLBAR_CONFIG = { + 'INTERCEPT_REDIRECTS': False +} + +# To see stacktraces for MongoDB queries, set this to True. +# Stacktraces slow down page loads drastically (for pages with lots of queries). +DEBUG_TOOLBAR_MONGO_STACKTRACES = True diff --git a/cms/envs/test.py b/cms/envs/test.py index 7f39e6818b..59664bfd40 100644 --- a/cms/envs/test.py +++ b/cms/envs/test.py @@ -27,6 +27,9 @@ STATIC_ROOT = TEST_ROOT / "staticfiles" GITHUB_REPO_ROOT = TEST_ROOT / "data" COMMON_TEST_DATA_ROOT = COMMON_ROOT / "test" / "data" +# Makes the tests run much faster... +SOUTH_TESTS_MIGRATE = False # To disable migrations and use syncdb instead + # TODO (cpennington): We need to figure out how envs/test.py can inject things into common.py so that we don't have to repeat this sort of thing STATICFILES_DIRS = [ COMMON_ROOT / "static", @@ -55,6 +58,10 @@ MODULESTORE = { 'direct': { 'ENGINE': 'xmodule.modulestore.mongo.MongoModuleStore', 'OPTIONS': modulestore_options + }, + 'draft': { + 'ENGINE': 'xmodule.modulestore.mongo.DraftMongoModuleStore', + 'OPTIONS': modulestore_options } } @@ -95,6 +102,13 @@ CACHES = { 'KEY_PREFIX': 'general', 'VERSION': 4, 'KEY_FUNCTION': 'util.memcache.safe_key', + }, + + 'mongo_metadata_inheritance': { + 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', + 'LOCATION': '/var/tmp/mongo_metadata_inheritance', + 'TIMEOUT': 300, + 'KEY_FUNCTION': 'util.memcache.safe_key', } } diff --git a/cms/one_time_startup.py b/cms/one_time_startup.py new file mode 100644 index 0000000000..6e88fed439 --- /dev/null +++ b/cms/one_time_startup.py @@ -0,0 +1,16 @@ +from dogapi import dog_http_api, dog_stats_api +from django.conf import settings +from xmodule.modulestore.django import modulestore +from request_cache.middleware import RequestCache + +from django.core.cache import get_cache, InvalidCacheBackendError + +cache = get_cache('mongo_metadata_inheritance') +for store_name in settings.MODULESTORE: + store = modulestore(store_name) + store.metadata_inheritance_cache_subsystem = cache + store.request_cache = RequestCache.get_request_cache() + +if hasattr(settings, 'DATADOG_API'): + dog_http_api.api_key = settings.DATADOG_API + dog_stats_api.start(api_key=settings.DATADOG_API, statsd=True) diff --git a/cms/static/client_templates/advanced_entry.html b/cms/static/client_templates/advanced_entry.html new file mode 100644 index 0000000000..6be22e2116 --- /dev/null +++ b/cms/static/client_templates/advanced_entry.html @@ -0,0 +1,11 @@ +
          1. +
            + + +
            + +
            + + +
            +
          2. \ No newline at end of file diff --git a/cms/static/client_templates/checklist.html b/cms/static/client_templates/checklist.html new file mode 100644 index 0000000000..ec6ff4e892 --- /dev/null +++ b/cms/static/client_templates/checklist.html @@ -0,0 +1,61 @@ +<% var allChecked = itemsChecked == items.length; %> +
            + class="course-checklist is-completed" + <% } else { %> + class="course-checklist" + <% } %> + id="<%= 'course-checklist' + checklistIndex %>"> + <% var widthPercentage = 'width:' + percentChecked + '%;'; %> + + <%= percentChecked %>% of checklist completed +
            +

            + + <%= checklistShortDescription %>

            + + Tasks Completed: <%= itemsChecked %>/<%= items.length %> + + +
            + +
              + <% var taskIndex = 0; %> + <% _.each(items, function(item) { %> + <% var checked = item['is_checked']; %> +
            • + class="task is-completed" + <% } else { %> + class="task" + <% } %> + > + <% var taskId = 'course-checklist' + checklistIndex + '-task' + taskIndex; %> + + checked="checked" + <% } %> + > + + + <% if (item['action_text'] !== '' && item['action_url'] !== '') { %> + + <% } %> +
            • + + <% taskIndex+=1; }) %> + +
            +
            \ No newline at end of file diff --git a/cms/static/client_templates/course_grade_policy.html b/cms/static/client_templates/course_grade_policy.html index c9a21280dd..db129614f6 100644 --- a/cms/static/client_templates/course_grade_policy.html +++ b/cms/static/client_templates/course_grade_policy.html @@ -1,69 +1,37 @@ -
          3. -
            - +
          4. +
            + + + e.g. Homework, Midterm Exams +
            -
            -
            - - e.g. Homework, Labs, Midterm Exams, Final Exam -
            -
            - - -
            - - -
            -
            - - e.g. HW, Midterm, Final -
            -
            -
            - -
            - - -
            -
            - - e.g. 25% -
            -
            -
            - -
            - - -
            -
            - - total exercises assigned -
            -
            -
            - -
            - - -
            -
            - - total exercises that won't be graded -
            -
            -
            - Delete +
            + + + e.g. HW, Midterm +
            + +
            + + + e.g. 25% +
            + +
            + + + total exercises assigned +
            + +
            + + + total exercises that won't be graded +
            + +
            + Delete +
          5. diff --git a/cms/static/coffee/files.json b/cms/static/coffee/files.json index 2249813b04..e7a66b5bc0 100644 --- a/cms/static/coffee/files.json +++ b/cms/static/coffee/files.json @@ -1,12 +1,12 @@ { - "js_files": [ - "/static/js/vendor/RequireJS.js", - "/static/js/vendor/jquery.min.js", - "/static/js/vendor/jquery-ui.min.js", - "/static/js/vendor/jquery.ui.draggable.js", - "/static/js/vendor/jquery.cookie.js", - "/static/js/vendor/json2.js", - "/static/js/vendor/underscore-min.js", - "/static/js/vendor/backbone-min.js" + "static_files": [ + "js/vendor/RequireJS.js", + "js/vendor/jquery.min.js", + "js/vendor/jquery-ui.min.js", + "js/vendor/jquery.ui.draggable.js", + "js/vendor/jquery.cookie.js", + "js/vendor/json2.js", + "js/vendor/underscore-min.js", + "js/vendor/backbone-min.js" ] } diff --git a/cms/static/coffee/src/views/tabs.coffee b/cms/static/coffee/src/views/tabs.coffee index 5a826c1794..9fbe4e5789 100644 --- a/cms/static/coffee/src/views/tabs.coffee +++ b/cms/static/coffee/src/views/tabs.coffee @@ -1,6 +1,4 @@ class CMS.Views.TabsEdit extends Backbone.View - events: - 'click .new-tab': 'addNewTab' initialize: => @$('.component').each((idx, element) => @@ -13,6 +11,7 @@ class CMS.Views.TabsEdit extends Backbone.View ) ) + @options.mast.find('.new-tab').on('click', @addNewTab) @$('.components').sortable( handle: '.drag-handle' update: @tabMoved diff --git a/cms/static/coffee/src/views/unit.coffee b/cms/static/coffee/src/views/unit.coffee index 7f5fa4adce..42127b2800 100644 --- a/cms/static/coffee/src/views/unit.coffee +++ b/cms/static/coffee/src/views/unit.coffee @@ -34,7 +34,10 @@ class CMS.Views.UnitEdit extends Backbone.View @$('.components').sortable( handle: '.drag-handle' - update: (event, ui) => @model.save(children: @components()) + update: (event, ui) => + payload = children : @components() + options = success : => @model.unset('children') + @model.save(payload, options) helper: 'clone' opacity: '0.5' placeholder: 'component-placeholder' @@ -109,7 +112,14 @@ class CMS.Views.UnitEdit extends Backbone.View id: $component.data('id') }, => $component.remove() - @model.save(children: @components()) + # b/c we don't vigilantly keep children up to date + # get rid of it before it hurts someone + # sorry for the js, i couldn't figure out the coffee equivalent + `_this.model.save({children: _this.components()}, + {success: function(model) { + model.unset('children'); + }} + );` ) deleteDraft: (event) -> @@ -157,7 +167,7 @@ class CMS.Views.UnitEdit extends Backbone.View class CMS.Views.UnitEdit.NameEdit extends Backbone.View events: - "keyup .unit-display-name-input": "saveName" + 'change .unit-display-name-input': 'saveName' initialize: => @model.on('change:metadata', @render) @@ -180,29 +190,10 @@ class CMS.Views.UnitEdit.NameEdit extends Backbone.View # Treat the metadata dictionary as immutable metadata = $.extend({}, @model.get('metadata')) metadata.display_name = @$('.unit-display-name-input').val() + @model.save(metadata: metadata) + # Update name shown in the right-hand side location summary. $('.unit-location .editing .unit-name').html(metadata.display_name) - inputField = this.$el.find('input') - - # add a spinner - @$spinner.css({ - 'position': 'absolute', - 'top': Math.floor(inputField.position().top + (inputField.outerHeight() / 2) + 3), - 'left': inputField.position().left + inputField.outerWidth() - 24, - 'margin-top': '-10px' - }); - inputField.after(@$spinner); - @$spinner.fadeIn(10) - - # save the name after a slight delay - if @timer - clearTimeout @timer - @timer = setTimeout( => - @model.save(metadata: metadata) - @timer = null - @$spinner.delay(500).fadeOut(150) - , 500) - class CMS.Views.UnitEdit.LocationState extends Backbone.View initialize: => @model.on('change:state', @render) diff --git a/cms/static/img/hiw-feature1.png b/cms/static/img/hiw-feature1.png new file mode 100644 index 0000000000..3cfd48d066 Binary files /dev/null and b/cms/static/img/hiw-feature1.png differ diff --git a/cms/static/img/hiw-feature2.png b/cms/static/img/hiw-feature2.png new file mode 100644 index 0000000000..9442325dd5 Binary files /dev/null and b/cms/static/img/hiw-feature2.png differ diff --git a/cms/static/img/hiw-feature3.png b/cms/static/img/hiw-feature3.png new file mode 100644 index 0000000000..fa6b81ae89 Binary files /dev/null and b/cms/static/img/hiw-feature3.png differ diff --git a/cms/static/img/html-icon.png b/cms/static/img/html-icon.png index e739f2fc11..8f576178b2 100644 Binary files a/cms/static/img/html-icon.png and b/cms/static/img/html-icon.png differ diff --git a/cms/static/img/large-advanced-icon.png b/cms/static/img/large-advanced-icon.png new file mode 100644 index 0000000000..c6a19ea5a9 Binary files /dev/null and b/cms/static/img/large-advanced-icon.png differ diff --git a/cms/static/img/large-annotations-icon.png b/cms/static/img/large-annotations-icon.png new file mode 100644 index 0000000000..249193521f Binary files /dev/null and b/cms/static/img/large-annotations-icon.png differ diff --git a/cms/static/img/large-discussion-icon.png b/cms/static/img/large-discussion-icon.png index 2f0bfea98f..cebf332769 100644 Binary files a/cms/static/img/large-discussion-icon.png and b/cms/static/img/large-discussion-icon.png differ diff --git a/cms/static/img/large-freeform-icon.png b/cms/static/img/large-freeform-icon.png index b1d195a7ca..0d5e454f58 100644 Binary files a/cms/static/img/large-freeform-icon.png and b/cms/static/img/large-freeform-icon.png differ diff --git a/cms/static/img/large-openended-icon.png b/cms/static/img/large-openended-icon.png new file mode 100644 index 0000000000..4d31815413 Binary files /dev/null and b/cms/static/img/large-openended-icon.png differ diff --git a/cms/static/img/large-problem-icon.png b/cms/static/img/large-problem-icon.png index b962d42b14..a30ab8eac8 100644 Binary files a/cms/static/img/large-problem-icon.png and b/cms/static/img/large-problem-icon.png differ diff --git a/cms/static/img/large-video-icon.png b/cms/static/img/large-video-icon.png index 392851324c..f1ab048b4c 100644 Binary files a/cms/static/img/large-video-icon.png and b/cms/static/img/large-video-icon.png differ diff --git a/cms/static/img/logo-edx-studio-white.png b/cms/static/img/logo-edx-studio-white.png new file mode 100644 index 0000000000..3e3ee63622 Binary files /dev/null and b/cms/static/img/logo-edx-studio-white.png differ diff --git a/cms/static/img/logo-edx-studio.png b/cms/static/img/logo-edx-studio.png new file mode 100644 index 0000000000..006194a195 Binary files /dev/null and b/cms/static/img/logo-edx-studio.png differ diff --git a/cms/static/img/pl-1x1-000.png b/cms/static/img/pl-1x1-000.png new file mode 100644 index 0000000000..b94b7a9746 Binary files /dev/null and b/cms/static/img/pl-1x1-000.png differ diff --git a/cms/static/img/pl-1x1-fff.png b/cms/static/img/pl-1x1-fff.png new file mode 100644 index 0000000000..7081c75d36 Binary files /dev/null and b/cms/static/img/pl-1x1-fff.png differ diff --git a/cms/static/img/preview-lms-staticpages.png b/cms/static/img/preview-lms-staticpages.png new file mode 100644 index 0000000000..05a62f7c7f Binary files /dev/null and b/cms/static/img/preview-lms-staticpages.png differ diff --git a/cms/static/img/thumb-hiw-feature1.png b/cms/static/img/thumb-hiw-feature1.png new file mode 100644 index 0000000000..b2dc0c00ee Binary files /dev/null and b/cms/static/img/thumb-hiw-feature1.png differ diff --git a/cms/static/img/thumb-hiw-feature2.png b/cms/static/img/thumb-hiw-feature2.png new file mode 100644 index 0000000000..e96bcad1aa Binary files /dev/null and b/cms/static/img/thumb-hiw-feature2.png differ diff --git a/cms/static/img/thumb-hiw-feature3.png b/cms/static/img/thumb-hiw-feature3.png new file mode 100644 index 0000000000..f694fca516 Binary files /dev/null and b/cms/static/img/thumb-hiw-feature3.png differ diff --git a/cms/static/js/base.js b/cms/static/js/base.js index 7e55d2b8d8..7466233331 100644 --- a/cms/static/js/base.js +++ b/cms/static/js/base.js @@ -5,7 +5,7 @@ var $newComponentItem; var $changedInput; var $spinner; -$(document).ready(function() { +$(document).ready(function () { $body = $('body'); $modal = $('.history-modal'); $modalCover = $(' diff --git a/cms/templates/activation_complete.html b/cms/templates/activation_complete.html index 5d9437ccb3..1e195a632c 100644 --- a/cms/templates/activation_complete.html +++ b/cms/templates/activation_complete.html @@ -5,7 +5,7 @@

            Activation Complete!

            -

            Thanks for activating your account. Log in here.

            +

            Thanks for activating your account. Log in here.

            diff --git a/cms/templates/asset_index.html b/cms/templates/asset_index.html index 01766e2dac..ea759d38af 100644 --- a/cms/templates/asset_index.html +++ b/cms/templates/asset_index.html @@ -1,7 +1,7 @@ <%inherit file="base.html" /> <%! from django.core.urlresolvers import reverse %> -<%block name="bodyclass">assets -<%block name="title">Courseware Assets +<%block name="bodyclass">is-signedin course uploads +<%block name="title">Files & Uploads <%namespace name='static' file='static_content.html'/> @@ -28,17 +28,32 @@ {{uploadDate}} - + +
            +
            +
            + Course Content +

            Files & Uploads

            +
            + + +
            +
            +
            @@ -69,7 +84,7 @@ ${asset['uploadDate']} - + % endfor @@ -100,7 +115,7 @@
            - +
            diff --git a/cms/templates/base.html b/cms/templates/base.html index 84f10fc2d1..e852b5d7fe 100644 --- a/cms/templates/base.html +++ b/cms/templates/base.html @@ -5,23 +5,29 @@ + + <%block name="title"></%block> | + % if context_course: + <% ctx_loc = context_course.location %> + ${context_course.display_name_with_default} | + % endif + edX Studio + + + + <%static:css group='base-style'/> - - - <%block name="title"></%block> - - - + <%block name="header_extras"> - <%include file="widgets/header.html" args="active_tab=active_tab"/> + <%include file="widgets/header.html" /> <%include file="courseware_vendor_js.html"/> @@ -39,6 +45,7 @@ + <%block name="content"> + <%include file="widgets/footer.html" /> <%block name="jsextra"> - diff --git a/cms/templates/checklists.html b/cms/templates/checklists.html new file mode 100644 index 0000000000..67ad6ce640 --- /dev/null +++ b/cms/templates/checklists.html @@ -0,0 +1,74 @@ +<%inherit file="base.html" /> +<%! from django.core.urlresolvers import reverse %> +<%block name="title">Course Checklists +<%block name="bodyclass">is-signedin course uxdesign checklists + +<%namespace name='static' file='static_content.html'/> +<%block name="jsextra"> + + + + + + + + + + +<%block name="content"> +
            +
            +
            + Tools +

            Course Checklists

            +
            +
            +
            + +
            +
            +
            + +

            Current Checklists

            + +
            + + +
            +
            + diff --git a/cms/templates/course_index.html b/cms/templates/course_index.html deleted file mode 100644 index e490ad7817..0000000000 --- a/cms/templates/course_index.html +++ /dev/null @@ -1,14 +0,0 @@ -<%inherit file="base.html" /> -<%block name="title">Course Manager -<%include file="widgets/header.html"/> - -<%block name="content"> -
            - - <%include file="widgets/navigation.html"/> - -
            -
            - -
            - diff --git a/cms/templates/course_info.html b/cms/templates/course_info.html index 83d829efa0..f9166bf166 100644 --- a/cms/templates/course_info.html +++ b/cms/templates/course_info.html @@ -2,8 +2,9 @@ <%namespace name='static' file='static_content.html'/> -<%block name="title">Course Info -<%block name="bodyclass">course-info +<%block name="title">Course Updates +<%block name="bodyclass">is-signedin course course-info updates + <%block name="jsextra"> @@ -19,8 +20,8 @@ <%block name="content"> +
            +
            +
            + Course Content +

            Static Pages

            +
            + + +
            +
            + +
            +
            + +
            +
            +
            -
            -

            Here you can add and manage additional pages for your course

            -

            These pages will be added to the primary navigation menu alongside Courseware, Course Info, Discussion, etc.

            -
            - -
              @@ -43,4 +67,17 @@
            + +
            +

            How Static Pages are Used in Your Course

            +
            + Preview of how Static Pages are used in your course +
            These pages will be presented in your course's main navigation alongside Courseware, Course Info, Discussion, etc.
            +
            + + + + close modal + +
            \ No newline at end of file diff --git a/cms/templates/edit_subsection.html b/cms/templates/edit_subsection.html index d81f577940..eb5a9a9824 100644 --- a/cms/templates/edit_subsection.html +++ b/cms/templates/edit_subsection.html @@ -7,8 +7,9 @@ %> <%! from django.core.urlresolvers import reverse %> -<%block name="bodyclass">subsection <%block name="title">CMS Subsection +<%block name="bodyclass">is-signedin course subsection + <%namespace name="units" file="widgets/units.html" /> <%namespace name='static' file='static_content.html'/> @@ -21,7 +22,7 @@
            - +
            @@ -30,18 +31,6 @@
            - - - - <%block name="jsextra"> @@ -108,7 +98,7 @@ - + \ No newline at end of file diff --git a/cms/templates/index.html b/cms/templates/index.html index 45c4edc176..9482b9d9af 100644 --- a/cms/templates/index.html +++ b/cms/templates/index.html @@ -1,6 +1,7 @@ <%inherit file="base.html" /> -<%block name="bodyclass">index -<%block name="title">Courses + +<%block name="title">My Courses +<%block name="bodyclass">is-signedin index dashboard <%block name="header_extras"> - - + \ No newline at end of file diff --git a/cms/templates/manage_users.html b/cms/templates/manage_users.html index 99ac279bfb..8a6b2fccea 100644 --- a/cms/templates/manage_users.html +++ b/cms/templates/manage_users.html @@ -1,17 +1,31 @@ <%inherit file="base.html" /> -<%block name="title">Course Staff Manager -<%block name="bodyclass">users +<%block name="title">Course Team Settings +<%block name="bodyclass">is-signedin course users settings team + <%block name="content"> +
            +
            +
            + Course Settings +

            Course Team

            +
            + + +
            +
            +
            -
            - %if allow_actions: - - New User - - %endif -

            The following list of users have been designated as course staff. This means that these users will have permissions to modify course content. You may add additional course staff below, if you are the course instructor. Please note that they must have already registered and verified their account.

            diff --git a/cms/templates/new_item.html b/cms/templates/new_item.html index 60da39fd2a..45cb157845 100644 --- a/cms/templates/new_item.html +++ b/cms/templates/new_item.html @@ -8,7 +8,7 @@
            ${module_type}
            % for template in module_templates: - ${template.display_name} + ${template.display_name_with_default} % endfor
            diff --git a/cms/templates/overview.html b/cms/templates/overview.html index 20ddcead01..d45a90093e 100644 --- a/cms/templates/overview.html +++ b/cms/templates/overview.html @@ -6,7 +6,8 @@ from datetime import datetime %> <%! from django.core.urlresolvers import reverse %> -<%block name="title">CMS Courseware Overview +<%block name="title">Course Outline +<%block name="bodyclass">is-signedin course outline <%namespace name='static' file='static_content.html'/> <%namespace name="units" file="widgets/units.html" /> @@ -31,7 +32,7 @@ window.graderTypes.course_location = new CMS.Models.Location('${parent_location}'); window.graderTypes.reset(${course_graders|n}); } - + $(".gradable-status").each(function(index, ele) { var gradeView = new CMS.Views.OverviewAssignmentGrader({ el : ele, @@ -39,7 +40,7 @@ }); }); }); - + @@ -119,13 +120,33 @@
            +
            +
            +
            + Course Content +

            Course Outline

            +
            + + +
            +
            +
            - -
            +
            % for section in sections:
            @@ -133,16 +154,16 @@

            - ${section.display_name} + ${section.display_name_with_default}

            +
            - + - -
            + +
            - +
            - +
            diff --git a/cms/templates/settings.html b/cms/templates/settings.html index c96d5686fd..e4cb4b3743 100644 --- a/cms/templates/settings.html +++ b/cms/templates/settings.html @@ -1,6 +1,6 @@ <%inherit file="base.html" /> -<%block name="bodyclass">settings -<%block name="title">Settings +<%block name="title">Schedule & Details Settings +<%block name="bodyclass">is-signedin course schedule settings <%namespace name='static' file='static_content.html'/> <%! @@ -15,716 +15,223 @@ from contentstore import utils - - - - - + + + + <%block name="content"> - -
            -
            -

            Settings

            -
            - -
            +
            +
            +
            + Settings +

            Schedule & Details

            +
            +
            +
            -
            -

            Course Details

            - -
            +
            +
            +
            +
            +
            -

            Basic Information

            - The nuts and bolts of your course +

            Basic Information

            + The nuts and bolts of your course
            -
            - -
            -
            - - This is used in your course URL, and cannot be changed -
            -
            -
            +
              +
            1. + + +
            2. -
              - -
              -
              - - This is used in your course URL, and cannot be changed -
              -
              -
              +
            3. + + +
            4. -
              - -
              -
              - - This is used in your course URL, and cannot be changed -
              -
              -
              -
            +
          6. + + +
          7. +
          + These are used in your course URL, and cannot be changed +
          -
          +
          -

          Course Schedule

          - Important steps and segments of your course +

          Course Schedule

          + Important steps and segments of your course
          -
          -

          Course Dates:

          - -
          -
          -
          - - +
            +
          1. +
            + + First day the course begins
            -
            - - +
            + +
            -
            -
          +

        3. -
          -
          -
          - - - Last day the course is active +
        4. +
          + + + Last day your course is active
          -
          - - +
          + +
          -
          -
        5. -
          + +
        -
        -

        Enrollment Dates:

        - -
        -
        -
        - - +
          +
        1. +
          + + First day students can enroll
          -
          - - +
          + +
          -
          -
        + -
        -
        -
        - - +
      3. +
        + + Last day students can enroll
        -
        - - +
        + +
        -
        -
      4. -
        - - - + + +
        -
        -
        -

        Introducing Your Course

        - Information for prospective students -
        +
        +
        +

        Introducing Your Course

        + Information for prospective students +
        -
        - -
        -
        - - Introductions, prerequisites, FAQs that are used on your course summary page -
        -
        -
        +
          +
        1. + + + Introductions, prerequisites, FAQs that are used on your course summary page +
        2. -
          - -
          +
        3. +
          - - Video restrictions go here + + Enter your YouTube video's ID (along with any restriction parameters)
          -
        4. -
          -
        + + +

        -
        +
        -

        Requirements

        - Expectations of the students taking this course +

        Requirements

        + Expectations of the students taking this course
        -
        - -
        -
        - +
          +
        1. + + Time spent on all course work -
        -
        -
        -
        -
        - -
        -

        Faculty

        - -
        -
        -

        Faculty Members

        - Individuals instructing and help with this course -
        - -
        -
        -
          -
        • -
          - -
          - -
          -
          - -
          - -
          - -
          -
          - -
          - - -
          - -
          - -
          - - A brief description of your education, experience, and expertise -
          -
          - - Delete Faculty Member
        • - -
        • -
          - -
          - -
          -
          - -
          - -
          - -
          -
          - -
          - -
          -
          - - Upload Faculty Photo - - Max size: 30KB -
          -
          -
          - -
          - -
          -
          - - A brief description of your education, experience, and expertise -
          -
          -
          -
        • -
        - - - New Faculty Member - -
        -
        -
        - -
        - -
        -

        Grading

        - -
        -
        -

        Overall Grade Range

        - Course grade ranges and their values -
        - -
        - -
        - -
        -
        -
          -
        1. 0
        2. -
        3. 10
        4. -
        5. 20
        6. -
        7. 30
        8. -
        9. 40
        10. -
        11. 50
        12. -
        13. 60
        14. -
        15. 70
        16. -
        17. 80
        18. -
        19. 90
        20. -
        21. 100
        -
          -
        -
        -
        -
        - -
        + + -
        -
        -

        General Grading

        - Deadlines and Requirements -
        +
        - -
        -
        -

        Assignment Types

        -
        - - -
        -
        - -
        -

        Problems

        - -
        -
        -

        General Settings

        - Course-wide settings for all problems -
        - -
        -

        Problem Randomization:

        - -
        -
        - - -
        - - randomize all problems -
        -
        - -
        - - -
        - - do not randomize problems -
        -
        - -
        - - -
        - - randomize problems per student -
        -
        -
        -
        - -
        -

        Show Answers:

        - -
        -
        - - -
        - - Answers will be shown after the number of attempts has been met -
        -
        - -
        - - -
        - - Answers will never be shown, regardless of attempts -
        -
        -
        -
        - -
        - - -
        -
        - - Students will this have this number of chances to answer a problem. To set infinite atttempts, use "0" -
        -
        -
        -
        - -
        -
        -

        [Assignment Type Name]

        -
        - -
        -

        Problem Randomization:

        - -
        -
        - - -
        - - randomize all problems -
        -
        - -
        - - -
        - - do not randomize problems -
        +

        Additionally, details provided on this page are also used in edX's catalog of courses, which new and returning students use to choose new courses to study.

        -
        - - -
        - - randomize problems per student -
        -
        -
        -
        - -
        -

        Show Answers:

        - -
        -
        - - -
        - - Answers will be shown after the number of attempts has been met -
        -
        - -
        - - -
        - - Answers will never be shown, regardless of attempts -
        -
        -
        -
        - -
        - - -
        -
        - - Students will this have this number of chances to answer a problem. To set infinite atttempts, use "0" -
        -
        -
        -
        -
        - -
        -

        Discussions

        - -
        -
        -

        General Settings

        - Course-wide settings for online discussion -
        - -
        -

        Anonymous Discussions:

        - -
        -
        - - -
        - - Students and faculty will be able to post anonymously -
        -
        - -
        - - -
        - - Posting anonymously is not allowed. Any previous anonymous posts will be reverted to non-anonymous -
        -
        -
        -
        - -
        -

        Anonymous Discussions:

        - -
        -
        - - -
        - - Students and faculty will be able to post anonymously -
        -
        - -
        - - -
        - - This option is disabled since there are previous discussions that are anonymous. -
        -
        -
        -
        - -
        -

        Discussion Categories

        - -
        - - - - New Discussion Category - -
        -
        -
        -
        -
        - -
        -
        - - +
        + % if context_course: + <% ctx_loc = context_course.location %> + <%! from django.core.urlresolvers import reverse %> +

        Other Course Settings

        + + % endif +
        + + +
        + \ No newline at end of file diff --git a/cms/templates/settings_advanced.html b/cms/templates/settings_advanced.html new file mode 100644 index 0000000000..838af5ada9 --- /dev/null +++ b/cms/templates/settings_advanced.html @@ -0,0 +1,116 @@ +<%inherit file="base.html" /> +<%! from django.core.urlresolvers import reverse %> +<%block name="title">Advanced Settings +<%block name="bodyclass">is-signedin course advanced settings + +<%namespace name='static' file='static_content.html'/> +<%! +from contentstore import utils +%> + +<%block name="jsextra"> + + + + + + + + + + +<%block name="content"> +
        +
        +
        + Settings +

        Advanced Settings

        +
        + +
        +
        + +
        + Your policy changes have been saved. +
        + +
        + There was an error saving your information. Please see below. +
        + +
        +
        +

        Manual Policy Definition

        + Manually Edit Course Policy Values (JSON Key / Value pairs) +
        + +

        Warning: Do not modify these policies unless you are familiar with their purpose.

        + +
          + +
        +
        +
        +
        + + +
        +
        + + +
        +
        +
        + + +

        Note: Your changes will not take effect until you save your + progress. Take care with policy value formatting, as validation is not implemented.

        +
        + +
        + +
        +
        +
        + \ No newline at end of file diff --git a/cms/templates/settings_discussions_faculty.html b/cms/templates/settings_discussions_faculty.html new file mode 100644 index 0000000000..fc30b6eebb --- /dev/null +++ b/cms/templates/settings_discussions_faculty.html @@ -0,0 +1,430 @@ + +<%inherit file="base.html" /> +<%block name="title">Schedule and details +<%block name="bodyclass">is-signedin course settings + + +<%namespace name='static' file='static_content.html'/> +<%! +from contentstore import utils +%> + + +<%block name="jsextra"> + + + + + + + + +<%block name="content"> + +
        +
        +

        Settings

        +
        +
        + +
        +

        Faculty

        + +
        +
        +

        Faculty Members

        + Individuals instructing and help with this course +
        + +
        +
        +
          +
        • +
          + +
          + +
          +
          + +
          + +
          + +
          +
          + +
          + + +
          + +
          + +
          + + A brief description of your education, experience, and expertise +
          +
          + + Delete Faculty Member +
        • + +
        • +
          + +
          + +
          +
          + +
          + +
          + +
          +
          + +
          + +
          +
          + + Upload Faculty Photo + + Max size: 30KB +
          +
          +
          + +
          + +
          +
          + + A brief description of your education, experience, and expertise +
          +
          +
          +
        • +
        + + + New Faculty Member + +
        +
        +
        + +
        + +
        +

        Problems

        + +
        +
        +

        General Settings

        + Course-wide settings for all problems +
        + +
        +

        Problem Randomization:

        + +
        +
        + + +
        + + randomize all problems +
        +
        + +
        + + +
        + + do not randomize problems +
        +
        + +
        + + +
        + + randomize problems per student +
        +
        +
        +
        + +
        +

        Show Answers:

        + +
        +
        + + +
        + + Answers will be shown after the number of attempts has been met +
        +
        + +
        + + +
        + + Answers will never be shown, regardless of attempts +
        +
        +
        +
        + +
        + + +
        +
        + + Students will this have this number of chances to answer a problem. To set infinite atttempts, use "0" +
        +
        +
        +
        + +
        +
        +

        [Assignment Type Name]

        +
        + +
        +

        Problem Randomization:

        + +
        +
        + + +
        + + randomize all problems +
        +
        + +
        + + +
        + + do not randomize problems +
        +
        + +
        + + +
        + + randomize problems per student +
        +
        +
        +
        + +
        +

        Show Answers:

        + +
        +
        + + +
        + + Answers will be shown after the number of attempts has been met +
        +
        + +
        + + +
        + + Answers will never be shown, regardless of attempts +
        +
        +
        +
        + +
        + + +
        +
        + + Students will this have this number of chances to answer a problem. To set infinite atttempts, use "0" +
        +
        +
        +
        +
        + +
        +

        Discussions

        + +
        +
        +

        General Settings

        + Course-wide settings for online discussion +
        + +
        +

        Anonymous Discussions:

        + +
        +
        + + +
        + + Students and faculty will be able to post anonymously +
        +
        + +
        + + +
        + + Posting anonymously is not allowed. Any previous anonymous posts will be reverted to non-anonymous +
        +
        +
        +
        + +
        +

        Anonymous Discussions:

        + +
        +
        + + +
        + + Students and faculty will be able to post anonymously +
        +
        + +
        + + +
        + + This option is disabled since there are previous discussions that are anonymous. +
        +
        +
        +
        + +
        +

        Discussion Categories

        + +
        + + + + New Discussion Category + +
        +
        +
        +
        +
        +
        +
        +
        + + diff --git a/cms/templates/settings_graders.html b/cms/templates/settings_graders.html new file mode 100644 index 0000000000..86be66c950 --- /dev/null +++ b/cms/templates/settings_graders.html @@ -0,0 +1,152 @@ +<%inherit file="base.html" /> +<%block name="title">Grading Settings +<%block name="bodyclass">is-signedin course grading settings + +<%namespace name='static' file='static_content.html'/> +<%! +from contentstore import utils +%> + +<%block name="jsextra"> + + + + + + + + + + + + + +<%block name="content"> +
        +
        +
        + Settings +

        Grading

        +
        +
        +
        + +
        +
        +
        +
        +
        +
        +

        Overall Grade Range

        + Your overall grading scale for student final grades +
        + +
          +
        1. +
          + +
          +
          +
            +
          1. 0
          2. +
          3. 10
          4. +
          5. 20
          6. +
          7. 30
          8. +
          9. 40
          10. +
          11. 50
          12. +
          13. 60
          14. +
          15. 70
          16. +
          17. 80
          18. +
          19. 90
          20. +
          21. 100
          22. +
          +
            +
          +
          +
          +
          +
        2. +
        +
        + +
        + +
        +
        +

        Grading Rules & Policies

        + Deadlines, requirements, and logistics around grading student work +
        + +
          +
        1. + + + Leeway on due dates +
        2. +
        +
        + +
        + +
        +
        +

        Assignment Types

        + Categories and labels for any exercises that are gradable +
        + +
          + +
        + + +
        +
        +
        + + +
        +
        + diff --git a/cms/templates/signup.html b/cms/templates/signup.html index 2c60b758e6..30c5c1cf2b 100644 --- a/cms/templates/signup.html +++ b/cms/templates/signup.html @@ -1,94 +1,141 @@ <%inherit file="base.html" /> <%! from django.core.urlresolvers import reverse %> -<%block name="title">Sign up -<%block name="bodyclass">no-header +<%block name="title">Sign Up +<%block name="bodyclass">not-signedin signup <%block name="content"> -
        +
        +
        +
        +

        Sign Up for edX Studio

        + +
        - +
        +

        I've never authored a course online before. Is there help?

        +

        Absolutely. We have created an online course, edX101, that describes some best practices: from filming video, creating exercises, to the basics of running an online course. Additionally, we're always here to help, just drop us a note.

        +
        + +
        +
        + - + ); + }); + })(this) + \ No newline at end of file diff --git a/cms/templates/unit.html b/cms/templates/unit.html index f3a779604e..e1a020dfca 100644 --- a/cms/templates/unit.html +++ b/cms/templates/unit.html @@ -1,8 +1,9 @@ <%inherit file="base.html" /> <%! from django.core.urlresolvers import reverse %> <%namespace name="units" file="widgets/units.html" /> -<%block name="bodyclass">unit -<%block name="title">CMS Unit +<%block name="title">Individual Unit +<%block name="bodyclass">is-signedin course unit + <%block name="jsextra"> @@ -31,16 +40,16 @@ This unit was originally published on ${published_date}. % endif

        - Preview the published version + View the Live Version
        -

        +

          % for id in components:
        1. % endfor -
        2. +
        3. Add New Component
            @@ -56,38 +65,66 @@
          % for type, templates in sorted(component_templates.items()):
          -

          Select ${type} component type:

          + % if type == "problem": +
          + + % endif +
          +
            + % for name, location, has_markdown, is_empty in templates: + % if has_markdown or type != "problem": + % if is_empty: +
          • + + ${name} + +
          • - +
          + % if type == "problem": +
          + +
        4. + % endif + % endif + % endfor + +
        + + % endif Cancel % endfor @@ -110,13 +147,13 @@

        This unit has been published. To make changes, you must edit a draft.

        This is a draft of the published unit. To update the live version, you must replace it with this draft.

        -
        +
        -

        This unit is scheduled to be released to students +

        This unit is scheduled to be released to students % if release_date is not None: on ${release_date} - % endif - with the subsection "${subsection.display_name}"

        + % endif + with the subsection "${subsection.display_name_with_default}"

        Delete Draft @@ -131,18 +168,18 @@
        1. - ${section.display_name} + ${section.display_name_with_default}
          1. - ${subsection.display_name} + ${subsection.display_name_with_default} ${units.enum_units(subsection, actions=False, selected=unit.location)}
        2. -
        +
        diff --git a/cms/templates/widgets/footer.html b/cms/templates/widgets/footer.html new file mode 100644 index 0000000000..0f265dfc2c --- /dev/null +++ b/cms/templates/widgets/footer.html @@ -0,0 +1,30 @@ +<%! from django.core.urlresolvers import reverse %> + + \ No newline at end of file diff --git a/cms/templates/widgets/header.html b/cms/templates/widgets/header.html index 5f41452339..d601b940f5 100644 --- a/cms/templates/widgets/header.html +++ b/cms/templates/widgets/header.html @@ -1,40 +1,118 @@ <%! from django.core.urlresolvers import reverse %> -<% active_tab_class = 'active-tab-' + active_tab if active_tab else '' %> -
        -
        -
        -
        - % if context_course: - <% ctx_loc = context_course.location %> - › - ${context_course.display_name} › - % endif +
        +
        - -
        + +
        + % if user.is_authenticated(): + + % else: + + % endif +
        + + diff --git a/cms/templates/widgets/metadata-edit.html b/cms/templates/widgets/metadata-edit.html index 590baec3c9..51fe400f88 100644 --- a/cms/templates/widgets/metadata-edit.html +++ b/cms/templates/widgets/metadata-edit.html @@ -1,18 +1,17 @@ -% if metadata: <% import hashlib hlskey = hashlib.md5(module.location.url()).hexdigest() %>
        @@ -22,4 +21,3 @@ % endif
        -% endif diff --git a/cms/templates/widgets/navigation.html b/cms/templates/widgets/navigation.html deleted file mode 100644 index f7e79bceb3..0000000000 --- a/cms/templates/widgets/navigation.html +++ /dev/null @@ -1,101 +0,0 @@ -
        -
        - - - -
        - -
          - % for week in weeks: -
        1. -
          -

          ${week.url_name}

          -
            - % if 'goals' in week.metadata: - % for goal in week.metadata['goals']: -
          • ${goal}
          • - % endfor - % else: -
          • Please create a learning goal for this week
          • - % endif -
          -
          - -
            - % for module in week.get_children(): -
          • - - ${module.display_name} -
          • - % endfor - <%include file="module-dropdown.html"/> -
          -
        2. - %endfor -
        - -
        - + Add New Section - - -
        -
        - diff --git a/cms/templates/widgets/problem-edit.html b/cms/templates/widgets/problem-edit.html index 4ff9d299ab..8ca07a7928 100644 --- a/cms/templates/widgets/problem-edit.html +++ b/cms/templates/widgets/problem-edit.html @@ -1,20 +1,20 @@ <%include file="metadata-edit.html" />
        - %if markdown != '' or data == '\n\n': + %if enable_markdown:
        • -
        • -
        • -
        • -
        • @@ -56,7 +56,7 @@
        -
        Check Multiple
        +
        Checkboxes
        @@ -67,7 +67,7 @@
        -
        String Response
        +
        Text Input
        @@ -76,7 +76,7 @@
        -
        Numerical Response
        +
        Numerical Input
        @@ -85,7 +85,7 @@
        -
        Option Response
        +
        Dropdown
        diff --git a/cms/templates/widgets/sequence-edit.html b/cms/templates/widgets/sequence-edit.html index e9d796784d..c70f2568fa 100644 --- a/cms/templates/widgets/sequence-edit.html +++ b/cms/templates/widgets/sequence-edit.html @@ -40,7 +40,7 @@ ${child.display_name} + data-preview-type="${child.module_class.js_module_name}">${child.display_name_with_default} handle %endfor diff --git a/cms/templates/widgets/source-edit.html b/cms/templates/widgets/source-edit.html index f0922831e1..c7460c9cf7 100644 --- a/cms/templates/widgets/source-edit.html +++ b/cms/templates/widgets/source-edit.html @@ -10,7 +10,7 @@

        High Level Source Editing

        -
        +
        @@ -18,6 +18,9 @@ + + +
        @@ -25,88 +28,148 @@
        - diff --git a/cms/templates/widgets/units.html b/cms/templates/widgets/units.html index 8e23b05bf8..5ac05e79eb 100644 --- a/cms/templates/widgets/units.html +++ b/cms/templates/widgets/units.html @@ -22,7 +22,7 @@ This def will enumerate through a passed in subsection and list all of the units
        - ${unit.display_name} + ${unit.display_name_with_default} % if actions:
        @@ -39,7 +39,7 @@ This def will enumerate through a passed in subsection and list all of the units - + diff --git a/cms/urls.py b/cms/urls.py index ad4dd87d74..e1eae3352a 100644 --- a/cms/urls.py +++ b/cms/urls.py @@ -1,12 +1,14 @@ from django.conf import settings from django.conf.urls import patterns, include, url +from . import one_time_startup # Uncomment the next two lines to enable the admin: # from django.contrib import admin # admin.autodiscover() urlpatterns = ('', - url(r'^$', 'contentstore.views.index', name='index'), + url(r'^$', 'contentstore.views.howitworks', name='homepage'), + url(r'^listing', 'contentstore.views.index', name='index'), url(r'^edit/(?P.*?)$', 'contentstore.views.edit_unit', name='edit_unit'), url(r'^subsection/(?P.*?)$', 'contentstore.views.edit_subsection', name='edit_subsection'), url(r'^preview_component/(?P.*?)$', 'contentstore.views.preview_component', name='preview_component'), @@ -40,31 +42,52 @@ urlpatterns = ('', 'contentstore.views.remove_user', name='remove_user'), url(r'^(?P[^/]+)/(?P[^/]+)/course/(?P[^/]+)/remove_user$', 'contentstore.views.remove_user', name='remove_user'), - url(r'^(?P[^/]+)/(?P[^/]+)/info/(?P[^/]+)$', 'contentstore.views.course_info', name='course_info'), - url(r'^(?P[^/]+)/(?P[^/]+)/course_info/updates/(?P.*)$', 'contentstore.views.course_info_updates', name='course_info'), - url(r'^(?P[^/]+)/(?P[^/]+)/settings/(?P[^/]+)$', 'contentstore.views.get_course_settings', name='course_settings'), - url(r'^(?P[^/]+)/(?P[^/]+)/settings/(?P[^/]+)/section/(?P
        [^/]+).*$', 'contentstore.views.course_settings_updates', name='course_settings'), - url(r'^(?P[^/]+)/(?P[^/]+)/grades/(?P[^/]+)/(?P.*)$', 'contentstore.views.course_grader_updates', name='course_settings'), + url(r'^(?P[^/]+)/(?P[^/]+)/info/(?P[^/]+)$', + 'contentstore.views.course_info', name='course_info'), + url(r'^(?P[^/]+)/(?P[^/]+)/course_info/updates/(?P.*)$', + 'contentstore.views.course_info_updates', name='course_info_json'), + url(r'^(?P[^/]+)/(?P[^/]+)/settings-details/(?P[^/]+)$', + 'contentstore.views.get_course_settings', name='settings_details'), + url(r'^(?P[^/]+)/(?P[^/]+)/settings-grading/(?P[^/]+)$', + 'contentstore.views.course_config_graders_page', name='settings_grading'), + url(r'^(?P[^/]+)/(?P[^/]+)/settings-details/(?P[^/]+)/section/(?P
        [^/]+).*$', + 'contentstore.views.course_settings_updates', name='course_settings'), + url(r'^(?P[^/]+)/(?P[^/]+)/settings-grading/(?P[^/]+)/(?P.*)$', + 'contentstore.views.course_grader_updates', name='course_settings'), + # This is the URL to initially render the course advanced settings. + url(r'^(?P[^/]+)/(?P[^/]+)/settings-advanced/(?P[^/]+)$', + 'contentstore.views.course_config_advanced_page', name='course_advanced_settings'), + # This is the URL used by BackBone for updating and re-fetching the model. + url(r'^(?P[^/]+)/(?P[^/]+)/settings-advanced/(?P[^/]+)/update.*$', + 'contentstore.views.course_advanced_updates', name='course_advanced_settings_updates'), - url(r'^(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/gradeas.*$', 'contentstore.views.assignment_type_update', name='assignment_type_update'), + url(r'^(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/gradeas.*$', + 'contentstore.views.assignment_type_update', name='assignment_type_update'), - url(r'^pages/(?P[^/]+)/(?P[^/]+)/course/(?P[^/]+)$', 'contentstore.views.static_pages', + url(r'^pages/(?P[^/]+)/(?P[^/]+)/course/(?P[^/]+)$', + 'contentstore.views.static_pages', name='static_pages'), - url(r'^edit_static/(?P[^/]+)/(?P[^/]+)/course/(?P[^/]+)$', 'contentstore.views.edit_static', name='edit_static'), - url(r'^edit_tabs/(?P[^/]+)/(?P[^/]+)/course/(?P[^/]+)$', 'contentstore.views.edit_tabs', name='edit_tabs'), - url(r'^(?P[^/]+)/(?P[^/]+)/assets/(?P[^/]+)$', 'contentstore.views.asset_index', name='asset_index'), + url(r'^edit_static/(?P[^/]+)/(?P[^/]+)/course/(?P[^/]+)$', + 'contentstore.views.edit_static', name='edit_static'), + url(r'^edit_tabs/(?P[^/]+)/(?P[^/]+)/course/(?P[^/]+)$', + 'contentstore.views.edit_tabs', name='edit_tabs'), + url(r'^(?P[^/]+)/(?P[^/]+)/assets/(?P[^/]+)$', + 'contentstore.views.asset_index', name='asset_index'), # this is a generic method to return the data/metadata associated with a xmodule - url(r'^module_info/(?P.*)$', 'contentstore.views.module_info', name='module_info'), + url(r'^module_info/(?P.*)$', + 'contentstore.views.module_info', name='module_info'), # temporary landing page for a course - url(r'^edge/(?P[^/]+)/(?P[^/]+)/course/(?P[^/]+)$', 'contentstore.views.landing', name='landing'), + url(r'^edge/(?P[^/]+)/(?P[^/]+)/course/(?P[^/]+)$', + 'contentstore.views.landing', name='landing'), url(r'^not_found$', 'contentstore.views.not_found', name='not_found'), url(r'^server_error$', 'contentstore.views.server_error', name='server_error'), - url(r'^(?P[^/]+)/(?P[^/]+)/assets/(?P[^/]+)$', 'contentstore.views.asset_index', name='asset_index'), + url(r'^(?P[^/]+)/(?P[^/]+)/assets/(?P[^/]+)$', + 'contentstore.views.asset_index', name='asset_index'), # temporary landing page for edge url(r'^edge$', 'contentstore.views.edge', name='edge'), @@ -76,13 +99,18 @@ urlpatterns = ('', # User creation and updating views urlpatterns += ( + url(r'^(?P[^/]+)/(?P[^/]+)/checklists/(?P[^/]+)$', 'contentstore.views.get_checklists', name='checklists'), + url(r'^(?P[^/]+)/(?P[^/]+)/checklists/(?P[^/]+)/update(/)?(?P.+)?.*$', + 'contentstore.views.update_checklist', name='checklists_updates'), + url(r'^howitworks$', 'contentstore.views.howitworks', name='howitworks'), url(r'^signup$', 'contentstore.views.signup', name='signup'), url(r'^create_account$', 'student.views.create_account'), url(r'^activate/(?P[^/]*)$', 'student.views.activate_account', name='activate'), # form page - url(r'^login$', 'contentstore.views.login_page', name='login'), + url(r'^login$', 'contentstore.views.old_login_redirect', name='old_login'), + url(r'^signin$', 'contentstore.views.login_page', name='login'), # ajax view that actually does the work url(r'^login_post$', 'student.views.login_user', name='login_post'), @@ -91,7 +119,13 @@ urlpatterns += ( ) if settings.ENABLE_JASMINE: - ## Jasmine + # # Jasmine urlpatterns = urlpatterns + (url(r'^_jasmine/', include('django_jasmine.urls')),) urlpatterns = patterns(*urlpatterns) + +# Custom error pages +handler404 = 'contentstore.views.render_404' +handler500 = 'contentstore.views.render_500' + + diff --git a/cms/xmodule_namespace.py b/cms/xmodule_namespace.py new file mode 100644 index 0000000000..c9bb8f4c6e --- /dev/null +++ b/cms/xmodule_namespace.py @@ -0,0 +1,45 @@ +""" +Namespace defining common fields used by Studio for all blocks +""" + +import datetime + +from xblock.core import Namespace, Boolean, Scope, ModelType, String + + +class StringyBoolean(Boolean): + """ + Reads strings from JSON as booleans. + + If the string is 'true' (case insensitive), then return True, + otherwise False. + + JSON values that aren't strings are returned as is + """ + def from_json(self, value): + if isinstance(value, basestring): + return value.lower() == 'true' + return value + + +class DateTuple(ModelType): + """ + ModelType that stores datetime objects as time tuples + """ + def from_json(self, value): + return datetime.datetime(*value[0:6]) + + def to_json(self, value): + if value is None: + return None + + return list(value.timetuple()) + + +class CmsNamespace(Namespace): + """ + Namespace with fields common to all blocks in Studio + """ + published_date = DateTuple(help="Date when the module was published", scope=Scope.settings) + published_by = String(help="Id of the user who published this module", scope=Scope.settings) + empty = StringyBoolean(help="Whether this is an empty template", scope=Scope.settings, default=False) diff --git a/common/djangoapps/contentserver/middleware.py b/common/djangoapps/contentserver/middleware.py index c5e887801e..8e9e70046d 100644 --- a/common/djangoapps/contentserver/middleware.py +++ b/common/djangoapps/contentserver/middleware.py @@ -5,6 +5,7 @@ from django.http import HttpResponse, Http404, HttpResponseNotModified from xmodule.contentstore.django import contentstore from xmodule.contentstore.content import StaticContent, XASSET_LOCATION_TAG +from xmodule.modulestore import InvalidLocationError from cache_toolbox.core import get_cached_content, set_cached_content from xmodule.exceptions import NotFoundError @@ -13,7 +14,14 @@ class StaticContentServer(object): def process_request(self, request): # look to see if the request is prefixed with 'c4x' tag if request.path.startswith('/' + XASSET_LOCATION_TAG + '/'): - loc = StaticContent.get_location_from_path(request.path) + try: + loc = StaticContent.get_location_from_path(request.path) + except InvalidLocationError: + # return a 'Bad Request' to browser as we have a malformed Location + response = HttpResponse() + response.status_code = 400 + return response + # first look in our cache so we don't have to round-trip to the DB content = get_cached_content(loc) if content is None: diff --git a/common/djangoapps/course_groups/cohorts.py b/common/djangoapps/course_groups/cohorts.py index 155f82e0c7..7924012bfe 100644 --- a/common/djangoapps/course_groups/cohorts.py +++ b/common/djangoapps/course_groups/cohorts.py @@ -6,6 +6,7 @@ forums, and to the cohort admin views. from django.contrib.auth.models import User from django.http import Http404 import logging +import random from courseware import courses from student.models import get_user_by_username_or_email @@ -14,6 +15,24 @@ from .models import CourseUserGroup log = logging.getLogger(__name__) +# tl;dr: global state is bad. capa reseeds random every time a problem is loaded. Even +# if and when that's fixed, it's a good idea to have a local generator to avoid any other +# code that messes with the global random module. +_local_random = None + +def local_random(): + """ + Get the local random number generator. In a function so that we don't run + random.Random() at import time. + """ + # ironic, isn't it? + global _local_random + + if _local_random is None: + _local_random = random.Random() + + return _local_random + def is_course_cohorted(course_id): """ Given a course id, return a boolean for whether or not the course is @@ -65,6 +84,22 @@ def is_commentable_cohorted(course_id, commentable_id): return ans +def get_cohorted_commentables(course_id): + """ + Given a course_id return a list of strings representing cohorted commentables + """ + + course = courses.get_course_by_id(course_id) + + if not course.is_cohorted: + # this is the easy case :) + ans = [] + else: + ans = course.cohorted_discussions + + return ans + + def get_cohort(user, course_id): """ Given a django User and a course_id, return the user's cohort in that @@ -96,9 +131,32 @@ def get_cohort(user, course_id): group_type=CourseUserGroup.COHORT, users__id=user.id) except CourseUserGroup.DoesNotExist: - # TODO: add auto-cohorting logic here once we know what that will be. + # Didn't find the group. We'll go on to create one if needed. + pass + + if not course.auto_cohort: return None + choices = course.auto_cohort_groups + n = len(choices) + if n == 0: + # Nowhere to put user + log.warning("Course %s is auto-cohorted, but there are no" + " auto_cohort_groups specified", + course_id) + return None + + # Put user in a random group, creating it if needed + group_name = local_random().choice(choices) + + group, created = CourseUserGroup.objects.get_or_create( + course_id=course_id, + group_type=CourseUserGroup.COHORT, + name=group_name) + + user.course_groups.add(group) + return group + def get_course_cohorts(course_id): """ diff --git a/common/djangoapps/course_groups/tests/tests.py b/common/djangoapps/course_groups/tests/tests.py index b3ad928b39..94d52ff6df 100644 --- a/common/djangoapps/course_groups/tests/tests.py +++ b/common/djangoapps/course_groups/tests/tests.py @@ -6,7 +6,7 @@ from django.test.utils import override_settings from course_groups.models import CourseUserGroup from course_groups.cohorts import (get_cohort, get_course_cohorts, - is_commentable_cohorted) + is_commentable_cohorted, get_cohort_by_name) from xmodule.modulestore.django import modulestore, _MODULESTORES @@ -47,7 +47,10 @@ class TestCohorts(django.test.TestCase): @staticmethod def config_course_cohorts(course, discussions, - cohorted, cohorted_discussions=None): + cohorted, + cohorted_discussions=None, + auto_cohort=None, + auto_cohort_groups=None): """ Given a course with no discussion set up, add the discussions and set the cohort config appropriately. @@ -59,6 +62,9 @@ class TestCohorts(django.test.TestCase): cohorted: bool. cohorted_discussions: optional list of topic names. If specified, converts them to use the same ids as topic names. + auto_cohort: optional bool. + auto_cohort_groups: optional list of strings + (names of groups to put students into). Returns: Nothing -- modifies course in place. @@ -70,13 +76,19 @@ class TestCohorts(django.test.TestCase): "id": to_id(name)}) for name in discussions) - course.metadata["discussion_topics"] = topics + course.discussion_topics = topics d = {"cohorted": cohorted} if cohorted_discussions is not None: d["cohorted_discussions"] = [to_id(name) for name in cohorted_discussions] - course.metadata["cohort_config"] = d + + if auto_cohort is not None: + d["auto_cohort"] = auto_cohort + if auto_cohort_groups is not None: + d["auto_cohort_groups"] = auto_cohort_groups + + course.cohort_config = d def setUp(self): @@ -89,12 +101,9 @@ class TestCohorts(django.test.TestCase): def test_get_cohort(self): - # Need to fix this, but after we're testing on staging. (Looks like - # problem is that when get_cohort internally tries to look up the - # course.id, it fails, even though we loaded it through the modulestore. - - # Proper fix: give all tests a standard modulestore that uses the test - # dir. + """ + Make sure get_cohort() does the right thing when the course is cohorted + """ course = modulestore().get_course("edX/toy/2012_Fall") self.assertEqual(course.id, "edX/toy/2012_Fall") self.assertFalse(course.is_cohorted) @@ -122,6 +131,85 @@ class TestCohorts(django.test.TestCase): self.assertEquals(get_cohort(other_user, course.id), None, "other_user shouldn't have a cohort") + def test_auto_cohorting(self): + """ + Make sure get_cohort() does the right thing when the course is auto_cohorted + """ + course = modulestore().get_course("edX/toy/2012_Fall") + self.assertEqual(course.id, "edX/toy/2012_Fall") + self.assertFalse(course.is_cohorted) + + user1 = User.objects.create(username="test", email="a@b.com") + user2 = User.objects.create(username="test2", email="a2@b.com") + user3 = User.objects.create(username="test3", email="a3@b.com") + + cohort = CourseUserGroup.objects.create(name="TestCohort", + course_id=course.id, + group_type=CourseUserGroup.COHORT) + + # user1 manually added to a cohort + cohort.users.add(user1) + + # Make the course auto cohorted... + self.config_course_cohorts(course, [], cohorted=True, + auto_cohort=True, + auto_cohort_groups=["AutoGroup"]) + + self.assertEquals(get_cohort(user1, course.id).id, cohort.id, + "user1 should stay put") + + self.assertEquals(get_cohort(user2, course.id).name, "AutoGroup", + "user2 should be auto-cohorted") + + # Now make the group list empty + self.config_course_cohorts(course, [], cohorted=True, + auto_cohort=True, + auto_cohort_groups=[]) + + self.assertEquals(get_cohort(user3, course.id), None, + "No groups->no auto-cohorting") + + # Now make it different + self.config_course_cohorts(course, [], cohorted=True, + auto_cohort=True, + auto_cohort_groups=["OtherGroup"]) + + self.assertEquals(get_cohort(user3, course.id).name, "OtherGroup", + "New list->new group") + self.assertEquals(get_cohort(user2, course.id).name, "AutoGroup", + "user2 should still be in originally placed cohort") + + + def test_auto_cohorting_randomization(self): + """ + Make sure get_cohort() randomizes properly. + """ + course = modulestore().get_course("edX/toy/2012_Fall") + self.assertEqual(course.id, "edX/toy/2012_Fall") + self.assertFalse(course.is_cohorted) + + groups = ["group_{0}".format(n) for n in range(5)] + self.config_course_cohorts(course, [], cohorted=True, + auto_cohort=True, + auto_cohort_groups=groups) + + # Assign 100 users to cohorts + for i in range(100): + user = User.objects.create(username="test_{0}".format(i), + email="a@b{0}.com".format(i)) + get_cohort(user, course.id) + + # Now make sure that the assignment was at least vaguely random: + # each cohort should have at least 1, and fewer than 50 students. + # (with 5 groups, probability of 0 users in any group is about + # .8**100= 2.0e-10) + for cohort_name in groups: + cohort = get_cohort_by_name(course.id, cohort_name) + num_users = cohort.users.count() + self.assertGreater(num_users, 1) + self.assertLess(num_users, 50) + + def test_get_course_cohorts(self): course1_id = 'a/b/c' diff --git a/common/djangoapps/heartbeat/views.py b/common/djangoapps/heartbeat/views.py index 956504407b..d7c3a32192 100644 --- a/common/djangoapps/heartbeat/views.py +++ b/common/djangoapps/heartbeat/views.py @@ -2,8 +2,9 @@ import json from datetime import datetime from django.http import HttpResponse from xmodule.modulestore.django import modulestore +from dogapi import dog_stats_api - +@dog_stats_api.timed('edxapp.heartbeat') def heartbeat(request): """ Simple view that a loadbalancer can check to verify that the app is up diff --git a/common/djangoapps/mitxmako/makoloader.py b/common/djangoapps/mitxmako/makoloader.py index 29184299b6..d623e8bcff 100644 --- a/common/djangoapps/mitxmako/makoloader.py +++ b/common/djangoapps/mitxmako/makoloader.py @@ -9,6 +9,7 @@ from django.template.loaders.app_directories import Loader as AppDirectoriesLoad from mitxmako.template import Template import mitxmako.middleware +import tempdir log = logging.getLogger(__name__) @@ -30,7 +31,7 @@ class MakoLoader(object): if module_directory is None: log.warning("For more caching of mako templates, set the MAKO_MODULE_DIR in settings!") - module_directory = tempfile.mkdtemp() + module_directory = tempdir.mkdtemp_clean() self.module_directory = module_directory diff --git a/common/djangoapps/mitxmako/middleware.py b/common/djangoapps/mitxmako/middleware.py index 64cb2e5415..3f66f8cc48 100644 --- a/common/djangoapps/mitxmako/middleware.py +++ b/common/djangoapps/mitxmako/middleware.py @@ -13,7 +13,7 @@ # limitations under the License. from mako.lookup import TemplateLookup -import tempfile +import tempdir from django.template import RequestContext from django.conf import settings @@ -29,7 +29,7 @@ class MakoMiddleware(object): module_directory = getattr(settings, 'MAKO_MODULE_DIR', None) if module_directory is None: - module_directory = tempfile.mkdtemp() + module_directory = tempdir.mkdtemp_clean() for location in template_locations: lookup[location] = TemplateLookup(directories=template_locations[location], diff --git a/cms/djangoapps/__init__.py b/common/djangoapps/request_cache/__init__.py similarity index 100% rename from cms/djangoapps/__init__.py rename to common/djangoapps/request_cache/__init__.py diff --git a/common/djangoapps/request_cache/middleware.py b/common/djangoapps/request_cache/middleware.py new file mode 100644 index 0000000000..9d3dffdf27 --- /dev/null +++ b/common/djangoapps/request_cache/middleware.py @@ -0,0 +1,20 @@ +import threading + +_request_cache_threadlocal = threading.local() +_request_cache_threadlocal.data = {} + +class RequestCache(object): + @classmethod + def get_request_cache(cls): + return _request_cache_threadlocal + + def clear_request_cache(self): + _request_cache_threadlocal.data = {} + + def process_request(self, request): + self.clear_request_cache() + return None + + def process_response(self, request, response): + self.clear_request_cache() + return response \ No newline at end of file diff --git a/common/djangoapps/static_replace/__init__.py b/common/djangoapps/static_replace/__init__.py index fb1f48d143..b73a658c5f 100644 --- a/common/djangoapps/static_replace/__init__.py +++ b/common/djangoapps/static_replace/__init__.py @@ -84,12 +84,19 @@ def replace_static_urls(text, data_directory, course_namespace=None): if rest.endswith('?raw'): return original - # course_namespace is not None, then use studio style urls - if course_namespace is not None and not isinstance(modulestore(), XMLModuleStore): - url = StaticContent.convert_legacy_static_url(rest, course_namespace) # In debug mode, if we can find the url as is, - elif settings.DEBUG and finders.find(rest, True): + if settings.DEBUG and finders.find(rest, True): return original + # if we're running with a MongoBacked store course_namespace is not None, then use studio style urls + elif course_namespace is not None and not isinstance(modulestore(), XMLModuleStore): + # first look in the static file pipeline and see if we are trying to reference + # a piece of static content which is in the mitx repo (e.g. JS associated with an xmodule) + if staticfiles_storage.exists(rest): + url = staticfiles_storage.url(rest) + else: + # if not, then assume it's courseware specific content and then look in the + # Mongo-backed database + url = StaticContent.convert_legacy_static_url(rest, course_namespace) # Otherwise, look the file up in staticfiles_storage, and append the data directory if needed else: course_path = "/".join((data_directory, rest)) diff --git a/common/djangoapps/status/tests.py b/common/djangoapps/status/tests.py index 1695663ac5..bf60017036 100644 --- a/common/djangoapps/status/tests.py +++ b/common/djangoapps/status/tests.py @@ -4,7 +4,7 @@ import os from django.test.utils import override_settings from tempfile import NamedTemporaryFile -from status import get_site_status_msg +from .status import get_site_status_msg # Get a name where we can put test files TMP_FILE = NamedTemporaryFile(delete=False) diff --git a/common/djangoapps/student/management/commands/pearson_transfer.py b/common/djangoapps/student/management/commands/pearson_transfer.py index 5eded6484a..75716c7443 100644 --- a/common/djangoapps/student/management/commands/pearson_transfer.py +++ b/common/djangoapps/student/management/commands/pearson_transfer.py @@ -10,6 +10,7 @@ import paramiko import boto dog_http_api.api_key = settings.DATADOG_API +dog_stats_api.start(api_key=settings.DATADOG_API, statsd=True) class Command(BaseCommand): diff --git a/common/djangoapps/student/management/commands/tests/test_pearson.py b/common/djangoapps/student/management/commands/tests/test_pearson.py index 12969405de..65d628fba0 100644 --- a/common/djangoapps/student/management/commands/tests/test_pearson.py +++ b/common/djangoapps/student/management/commands/tests/test_pearson.py @@ -7,6 +7,7 @@ import logging import os from tempfile import mkdtemp import cStringIO +import shutil import sys from django.test import TestCase @@ -143,23 +144,18 @@ class PearsonTestCase(TestCase): ''' Base class for tests running Pearson-related commands ''' - import_dir = mkdtemp(prefix="import") - export_dir = mkdtemp(prefix="export") def assertErrorContains(self, error_message, expected): self.assertTrue(error_message.find(expected) >= 0, 'error message "{}" did not contain "{}"'.format(error_message, expected)) + def setUp(self): + self.import_dir = mkdtemp(prefix="import") + self.addCleanup(shutil.rmtree, self.import_dir) + self.export_dir = mkdtemp(prefix="export") + self.addCleanup(shutil.rmtree, self.export_dir) + def tearDown(self): - def delete_temp_dir(dirname): - if os.path.exists(dirname): - for filename in os.listdir(dirname): - os.remove(os.path.join(dirname, filename)) - os.rmdir(dirname) - - # clean up after any test data was dumped to temp directory - delete_temp_dir(self.import_dir) - delete_temp_dir(self.export_dir) - + pass # and clean up the database: # TestCenterUser.objects.all().delete() # TestCenterRegistration.objects.all().delete() diff --git a/common/djangoapps/student/models.py b/common/djangoapps/student/models.py index 54bdd77297..56b1293c2d 100644 --- a/common/djangoapps/student/models.py +++ b/common/djangoapps/student/models.py @@ -75,10 +75,15 @@ class UserProfile(models.Model): GENDER_CHOICES = (('m', 'Male'), ('f', 'Female'), ('o', 'Other')) gender = models.CharField(blank=True, null=True, max_length=6, db_index=True, choices=GENDER_CHOICES) - LEVEL_OF_EDUCATION_CHOICES = (('p_se', 'Doctorate in science or engineering'), - ('p_oth', 'Doctorate in another field'), + + # [03/21/2013] removed these, but leaving comment since there'll still be + # p_se and p_oth in the existing data in db. + # ('p_se', 'Doctorate in science or engineering'), + # ('p_oth', 'Doctorate in another field'), + LEVEL_OF_EDUCATION_CHOICES = (('p', 'Doctorate'), ('m', "Master's or professional degree"), ('b', "Bachelor's degree"), + ('a', "Associate's degree"), ('hs', "Secondary/high school"), ('jhs', "Junior secondary/junior high/middle school"), ('el', "Elementary/primary school"), diff --git a/common/djangoapps/__init__.py b/common/djangoapps/student/tests/__init__.py similarity index 100% rename from common/djangoapps/__init__.py rename to common/djangoapps/student/tests/__init__.py diff --git a/cms/djangoapps/contentstore/tests/factories.py b/common/djangoapps/student/tests/factories.py similarity index 57% rename from cms/djangoapps/contentstore/tests/factories.py rename to common/djangoapps/student/tests/factories.py index d15610f11c..f74188725a 100644 --- a/cms/djangoapps/contentstore/tests/factories.py +++ b/common/djangoapps/student/tests/factories.py @@ -1,17 +1,26 @@ -from factory import Factory -from datetime import datetime -from uuid import uuid4 from student.models import (User, UserProfile, Registration, - CourseEnrollmentAllowed) + CourseEnrollmentAllowed, CourseEnrollment) from django.contrib.auth.models import Group +from datetime import datetime +from factory import Factory, SubFactory +from uuid import uuid4 + + +class GroupFactory(Factory): + FACTORY_FOR = Group + + name = 'staff_MITx/999/Robot_Super_Course' class UserProfileFactory(Factory): FACTORY_FOR = UserProfile user = None - name = 'Robot Studio' - courseware = 'course.xml' + name = 'Robot Test' + level_of_education = None + gender = 'm' + mailing_address = None + goals = 'World domination' class RegistrationFactory(Factory): @@ -25,21 +34,22 @@ class UserFactory(Factory): FACTORY_FOR = User username = 'robot' - email = 'robot@edx.org' + email = 'robot+test@edx.org' password = 'test' first_name = 'Robot' - last_name = 'Tester' + last_name = 'Test' is_staff = False is_active = True is_superuser = False - last_login = datetime.now() - date_joined = datetime.now() + last_login = datetime(2012, 1, 1) + date_joined = datetime(2011, 1, 1) -class GroupFactory(Factory): - FACTORY_FOR = Group +class CourseEnrollmentFactory(Factory): + FACTORY_FOR = CourseEnrollment - name = 'test_group' + user = SubFactory(UserFactory) + course_id = 'edX/toy/2012_Fall' class CourseEnrollmentAllowedFactory(Factory): diff --git a/common/djangoapps/student/tests.py b/common/djangoapps/student/tests/tests.py similarity index 97% rename from common/djangoapps/student/tests.py rename to common/djangoapps/student/tests/tests.py index 6a2d75e3d8..4638da44b2 100644 --- a/common/djangoapps/student/tests.py +++ b/common/djangoapps/student/tests/tests.py @@ -9,8 +9,8 @@ import logging from django.test import TestCase from mock import Mock -from .models import unique_id_for_user -from .views import process_survey_link, _cert_info +from student.models import unique_id_for_user +from student.views import process_survey_link, _cert_info COURSE_1 = 'edX/toy/2012_Fall' COURSE_2 = 'edx/full/6.002_Spring_2012' diff --git a/common/djangoapps/student/views.py b/common/djangoapps/student/views.py index 4413ebfc0f..8267816e2c 100644 --- a/common/djangoapps/student/views.py +++ b/common/djangoapps/student/views.py @@ -44,9 +44,8 @@ from collections import namedtuple from courseware.courses import get_courses, sort_by_announcement from courseware.access import has_access -from courseware.models import StudentModuleCache from courseware.views import get_module_for_descriptor, jump_to -from courseware.module_render import get_instance_module +from courseware.model_data import ModelDataCache from statsd import statsd @@ -115,7 +114,7 @@ def get_date_for_press(publish_date): def press(request): json_articles = cache.get("student_press_json_articles") - if json_articles == None: + if json_articles is None: if hasattr(settings, 'RSS_URL'): content = urllib.urlopen(settings.PRESS_URL).read() json_articles = json.loads(content) @@ -301,7 +300,7 @@ def change_enrollment(request): action = request.POST.get("enrollment_action", "") course_id = request.POST.get("course_id", None) - if course_id == None: + if course_id is None: return HttpResponse(json.dumps({'success': False, 'error': 'There was an error receiving the course id.'})) @@ -312,13 +311,13 @@ def change_enrollment(request): course = course_from_id(course_id) except ItemNotFoundError: log.warning("User {0} tried to enroll in non-existent course {1}" - .format(user.username, enrollment.course_id)) + .format(user.username, course_id)) return {'success': False, 'error': 'The course requested does not exist.'} if not has_access(user, course, 'enroll'): return {'success': False, 'error': 'enrollment in {} not allowed at this time' - .format(course.display_name)} + .format(course.display_name_with_default)} org, course_num, run = course_id.split("/") statsd.increment("common.student.enrollment", @@ -326,7 +325,12 @@ def change_enrollment(request): "course:{0}".format(course_num), "run:{0}".format(run)]) - enrollment, created = CourseEnrollment.objects.get_or_create(user=user, course_id=course.id) + try: + enrollment, created = CourseEnrollment.objects.get_or_create(user=user, course_id=course.id) + except IntegrityError: + # If we've already created this enrollment in a separate transaction, + # then just continue + pass return {'success': True} elif action == "unenroll": @@ -370,14 +374,14 @@ def login_user(request, error=""): try: user = User.objects.get(email=email) except User.DoesNotExist: - log.warning("Login failed - Unknown user email: {0}".format(email)) + log.warning(u"Login failed - Unknown user email: {0}".format(email)) return HttpResponse(json.dumps({'success': False, 'value': 'Email or password is incorrect.'})) # TODO: User error message username = user.username user = authenticate(username=username, password=password) if user is None: - log.warning("Login failed - password for {0} is invalid".format(email)) + log.warning(u"Login failed - password for {0} is invalid".format(email)) return HttpResponse(json.dumps({'success': False, 'value': 'Email or password is incorrect.'})) @@ -385,7 +389,7 @@ def login_user(request, error=""): try: login(request, user) if request.POST.get('remember') == 'true': - request.session.set_expiry(None) # or change to 604800 for 7 days + request.session.set_expiry(604800) log.debug("Setting user session to never expire") else: request.session.set_expiry(0) @@ -393,7 +397,7 @@ def login_user(request, error=""): log.critical("Login failed - Could not create session. Is memcached running?") log.exception(e) - log.info("Login success - {0} ({1})".format(username, email)) + log.info(u"Login success - {0} ({1})".format(username, email)) try_change_enrollment(request) @@ -401,7 +405,7 @@ def login_user(request, error=""): return HttpResponse(json.dumps({'success': True})) - log.warning("Login failed - Account not active for user {0}, resending activation".format(username)) + log.warning(u"Login failed - Account not active for user {0}, resending activation".format(username)) reactivation_email_for_user(user) not_activated_msg = "This account has not been activated. We have " + \ @@ -554,7 +558,7 @@ def create_account(request, post_override=None): try: validate_slug(post_vars['username']) except ValidationError: - js['value'] = "Username should only consist of A-Z and 0-9.".format(field=a) + js['value'] = "Username should only consist of A-Z and 0-9, with no spaces.".format(field=a) js['field'] = 'username' return HttpResponse(json.dumps(js)) @@ -1071,14 +1075,14 @@ def accept_name_change(request): @csrf_exempt def test_center_login(request): - # errors are returned by navigating to the error_url, adding a query parameter named "code" + # errors are returned by navigating to the error_url, adding a query parameter named "code" # which contains the error code describing the exceptional condition. def makeErrorURL(error_url, error_code): log.error("generating error URL with error code {}".format(error_code)) return "{}?code={}".format(error_url, error_code); - + # get provided error URL, which will be used as a known prefix for returning error messages to the - # Pearson shell. + # Pearson shell. error_url = request.POST.get("errorURL") # TODO: check that the parameters have not been tampered with, by comparing the code provided by Pearson @@ -1089,12 +1093,12 @@ def test_center_login(request): # calculate SHA for query string # TODO: figure out how to get the original query string, so we can hash it and compare. - - + + if 'clientCandidateID' not in request.POST: return HttpResponseRedirect(makeErrorURL(error_url, "missingClientCandidateID")); client_candidate_id = request.POST.get("clientCandidateID") - + # TODO: check remaining parameters, and maybe at least log if they're not matching # expected values.... # registration_id = request.POST.get("registrationID") @@ -1108,12 +1112,12 @@ def test_center_login(request): return HttpResponseRedirect(makeErrorURL(error_url, "invalidClientCandidateID")); # find testcenter_registration that matches the provided exam code: - # Note that we could rely in future on either the registrationId or the exam code, - # or possibly both. But for now we know what to do with an ExamSeriesCode, + # Note that we could rely in future on either the registrationId or the exam code, + # or possibly both. But for now we know what to do with an ExamSeriesCode, # while we currently have no record of RegistrationID values at all. if 'vueExamSeriesCode' not in request.POST: - # we are not allowed to make up a new error code, according to Pearson, - # so instead of "missingExamSeriesCode", we use a valid one that is + # we are not allowed to make up a new error code, according to Pearson, + # so instead of "missingExamSeriesCode", we use a valid one that is # inaccurate but at least distinct. (Sigh.) log.error("missing exam series code for cand ID {}".format(client_candidate_id)) return HttpResponseRedirect(makeErrorURL(error_url, "missingPartnerID")); @@ -1127,11 +1131,11 @@ def test_center_login(request): if not registrations: log.error("not able to find exam registration for exam {} and cand ID {}".format(exam_series_code, client_candidate_id)) return HttpResponseRedirect(makeErrorURL(error_url, "noTestsAssigned")); - + # TODO: figure out what to do if there are more than one registrations.... # for now, just take the first... registration = registrations[0] - + course_id = registration.course_id course = course_from_id(course_id) # assume it will be found.... if not course: @@ -1149,19 +1153,19 @@ def test_center_login(request): if not timelimit_descriptor: log.error("cand {} on exam {} for course {}: descriptor not found for location {}".format(client_candidate_id, exam_series_code, course_id, location)) return HttpResponseRedirect(makeErrorURL(error_url, "missingClientProgram")); - - timelimit_module_cache = StudentModuleCache.cache_for_descriptor_descendents(course_id, testcenteruser.user, - timelimit_descriptor, depth=None) - timelimit_module = get_module_for_descriptor(request.user, request, timelimit_descriptor, + + timelimit_module_cache = ModelDataCache.cache_for_descriptor_descendents(course_id, testcenteruser.user, + timelimit_descriptor, depth=None) + timelimit_module = get_module_for_descriptor(request.user, request, timelimit_descriptor, timelimit_module_cache, course_id, position=None) if not timelimit_module.category == 'timelimit': log.error("cand {} on exam {} for course {}: non-timelimit module at location {}".format(client_candidate_id, exam_series_code, course_id, location)) return HttpResponseRedirect(makeErrorURL(error_url, "missingClientProgram")); - + if timelimit_module and timelimit_module.has_ended: log.warning("cand {} on exam {} for course {}: test already over at {}".format(client_candidate_id, exam_series_code, course_id, timelimit_module.ending_at)) return HttpResponseRedirect(makeErrorURL(error_url, "allTestsTaken")); - + # check if we need to provide an accommodation: time_accommodation_mapping = {'ET12ET' : 'ADDHALFTIME', 'ET30MN' : 'ADD30MIN', @@ -1174,27 +1178,24 @@ def test_center_login(request): # special, hard-coded client ID used by Pearson shell for testing: if client_candidate_id == "edX003671291147": time_accommodation_code = 'TESTING' - + if time_accommodation_code: timelimit_module.accommodation_code = time_accommodation_code - instance_module = get_instance_module(course_id, testcenteruser.user, timelimit_module, timelimit_module_cache) - instance_module.state = timelimit_module.get_instance_state() - instance_module.save() log.info("cand {} on exam {} for course {}: receiving accommodation {}".format(client_candidate_id, exam_series_code, course_id, time_accommodation_code)) - + # UGLY HACK!!! - # Login assumes that authentication has occurred, and that there is a + # Login assumes that authentication has occurred, and that there is a # backend annotation on the user object, indicating which backend # against which the user was authenticated. We're authenticating here # against the registration entry, and assuming that the request given # this information is correct, we allow the user to be logged in # without a password. This could all be formalized in a backend object - # that does the above checking. + # that does the above checking. # TODO: (brian) create a backend class to do this. - # testcenteruser.user.backend = "%s.%s" % (backend.__module__, backend.__class__.__name__) - testcenteruser.user.backend = "%s.%s" % ("TestcenterAuthenticationModule", "TestcenterAuthenticationClass") + # testcenteruser.user.backend = "%s.%s" % (backend.__module__, backend.__class__.__name__) + testcenteruser.user.backend = "%s.%s" % ("TestcenterAuthenticationModule", "TestcenterAuthenticationClass") login(request, testcenteruser.user) - + # And start the test: return jump_to(request, course_id, location) @@ -1203,7 +1204,7 @@ def _get_news(top=None): "Return the n top news items on settings.RSS_URL" feed_data = cache.get("students_index_rss_feed_data") - if feed_data == None: + if feed_data is None: if hasattr(settings, 'RSS_URL'): feed_data = urllib.urlopen(settings.RSS_URL).read() else: diff --git a/lms/djangoapps/terrain/__init__.py b/common/djangoapps/terrain/__init__.py similarity index 100% rename from lms/djangoapps/terrain/__init__.py rename to common/djangoapps/terrain/__init__.py diff --git a/lms/djangoapps/terrain/browser.py b/common/djangoapps/terrain/browser.py similarity index 50% rename from lms/djangoapps/terrain/browser.py rename to common/djangoapps/terrain/browser.py index e1925bde0b..c8cc0c9e4b 100644 --- a/lms/djangoapps/terrain/browser.py +++ b/common/djangoapps/terrain/browser.py @@ -1,7 +1,11 @@ from lettuce import before, after, world from splinter.browser import Browser from logging import getLogger -import time + +# Let the LMS and CMS do their one-time setup +# For example, setting up mongo caches +from lms import one_time_startup +from cms import one_time_startup logger = getLogger(__name__) logger.info("Loading the lettuce acceptance testing terrain file...") @@ -11,20 +15,29 @@ from django.core.management import call_command @before.harvest def initial_setup(server): - # Launch firefox + ''' + Launch the browser once before executing the tests + ''' + # Launch the browser app (choose one of these below) world.browser = Browser('chrome') + # world.browser = Browser('phantomjs') + # world.browser = Browser('firefox') @before.each_scenario def reset_data(scenario): - # Clean out the django test database defined in the - # envs/acceptance.py file: mitx_all/db/test_mitx.db + ''' + Clean out the django test database defined in the + envs/acceptance.py file: mitx_all/db/test_mitx.db + ''' logger.debug("Flushing the test database...") call_command('flush', interactive=False) @after.all def teardown_browser(total): - # Quit firefox + ''' + Quit the browser after executing the tests + ''' world.browser.quit() pass diff --git a/common/djangoapps/terrain/course_helpers.py b/common/djangoapps/terrain/course_helpers.py new file mode 100644 index 0000000000..f0df456c80 --- /dev/null +++ b/common/djangoapps/terrain/course_helpers.py @@ -0,0 +1,140 @@ +#pylint: disable=C0111 +#pylint: disable=W0621 + +from lettuce import world, step +from .factories import * +from django.conf import settings +from django.http import HttpRequest +from django.contrib.auth.models import User +from django.contrib.auth import authenticate, login +from django.contrib.auth.middleware import AuthenticationMiddleware +from django.contrib.sessions.middleware import SessionMiddleware +from student.models import CourseEnrollment +from xmodule.modulestore.django import _MODULESTORES, modulestore +from xmodule.templates import update_templates +from bs4 import BeautifulSoup +import os.path +from urllib import quote_plus +from lettuce.django import django_url + + +@world.absorb +def create_user(uname): + + # If the user already exists, don't try to create it again + if len(User.objects.filter(username=uname)) > 0: + return + + portal_user = UserFactory.build(username=uname, email=uname + '@edx.org') + portal_user.set_password('test') + portal_user.save() + + registration = world.RegistrationFactory(user=portal_user) + registration.register(portal_user) + registration.activate() + + user_profile = world.UserProfileFactory(user=portal_user) + + +@world.absorb +def log_in(username, password): + ''' + Log the user in programatically + ''' + + # Authenticate the user + user = authenticate(username=username, password=password) + assert(user is not None and user.is_active) + + # Send a fake HttpRequest to log the user in + # We need to process the request using + # Session middleware and Authentication middleware + # to ensure that session state can be stored + request = HttpRequest() + SessionMiddleware().process_request(request) + AuthenticationMiddleware().process_request(request) + login(request, user) + + # Save the session + request.session.save() + + # Retrieve the sessionid and add it to the browser's cookies + cookie_dict = {settings.SESSION_COOKIE_NAME: request.session.session_key} + try: + world.browser.cookies.add(cookie_dict) + + # WebDriver has an issue where we cannot set cookies + # before we make a GET request, so if we get an error, + # we load the '/' page and try again + except: + world.browser.visit(django_url('/')) + world.browser.cookies.add(cookie_dict) + + +@world.absorb +def register_by_course_id(course_id, is_staff=False): + create_user('robot') + u = User.objects.get(username='robot') + if is_staff: + u.is_staff = True + u.save() + CourseEnrollment.objects.get_or_create(user=u, course_id=course_id) + + + +@world.absorb +def save_the_course_content(path='/tmp'): + html = world.browser.html.encode('ascii', 'ignore') + soup = BeautifulSoup(html) + + # get rid of the header, we only want to compare the body + soup.head.decompose() + + # for now, remove the data-id attributes, because they are + # causing mismatches between cms-master and master + for item in soup.find_all(attrs={'data-id': re.compile('.*')}): + del item['data-id'] + + # we also need to remove them from unrendered problems, + # where they are contained in the text of divs instead of + # in attributes of tags + # Be careful of whether or not it was the last attribute + # and needs a trailing space + for item in soup.find_all(text=re.compile(' data-id=".*?" ')): + s = unicode(item.string) + item.string.replace_with(re.sub(' data-id=".*?" ', ' ', s)) + + for item in soup.find_all(text=re.compile(' data-id=".*?"')): + s = unicode(item.string) + item.string.replace_with(re.sub(' data-id=".*?"', ' ', s)) + + # prettify the html so it will compare better, with + # each HTML tag on its own line + output = soup.prettify() + + # use string slicing to grab everything after 'courseware/' in the URL + u = world.browser.url + section_url = u[u.find('courseware/') + 11:] + + + if not os.path.exists(path): + os.makedirs(path) + + filename = '%s.html' % (quote_plus(section_url)) + f = open('%s/%s' % (path, filename), 'w') + f.write(output) + f.close + + +@world.absorb +def clear_courses(): + # Flush and initialize the module store + # It needs the templates because it creates new records + # by cloning from the template. + # Note that if your test module gets in some weird state + # (though it shouldn't), do this manually + # from the bash shell to drop it: + # $ mongo test_xmodule --eval "db.dropDatabase()" + _MODULESTORES = {} + modulestore().collection.drop() + update_templates() diff --git a/common/djangoapps/terrain/factories.py b/common/djangoapps/terrain/factories.py new file mode 100644 index 0000000000..768c51b25e --- /dev/null +++ b/common/djangoapps/terrain/factories.py @@ -0,0 +1,64 @@ +''' +Factories are defined in other modules and absorbed here into the +lettuce world so that they can be used by both unit tests +and integration / BDD tests. +''' +import student.tests.factories as sf +import xmodule.modulestore.tests.factories as xf +from lettuce import world + + +@world.absorb +class UserFactory(sf.UserFactory): + """ + User account for lms / cms + """ + pass + + +@world.absorb +class UserProfileFactory(sf.UserProfileFactory): + """ + Demographics etc for the User + """ + pass + + +@world.absorb +class RegistrationFactory(sf.RegistrationFactory): + """ + Activation key for registering the user account + """ + pass + + +@world.absorb +class GroupFactory(sf.GroupFactory): + """ + Groups for user permissions for courses + """ + pass + + +@world.absorb +class CourseEnrollmentAllowedFactory(sf.CourseEnrollmentAllowed): + """ + Users allowed to enroll in the course outside of the usual window + """ + pass + + +@world.absorb +class CourseFactory(xf.CourseFactory): + """ + Courseware courses + """ + pass + + +@world.absorb +class ItemFactory(xf.ItemFactory): + """ + Everything included inside a course + """ + pass diff --git a/lms/djangoapps/portal/features/common.py b/common/djangoapps/terrain/steps.py similarity index 51% rename from lms/djangoapps/portal/features/common.py rename to common/djangoapps/terrain/steps.py index 8bfb548367..a8a32db173 100644 --- a/lms/djangoapps/portal/features/common.py +++ b/common/djangoapps/terrain/steps.py @@ -1,11 +1,11 @@ -from lettuce import world, step # , before, after -from factories import * -from django.core.management import call_command -from nose.tools import assert_equals, assert_in +#pylint: disable=C0111 +#pylint: disable=W0621 + +from lettuce import world, step +from .course_helpers import * +from .ui_helpers import * from lettuce.django import django_url -from django.conf import settings -from django.contrib.auth.models import User -from student.models import CourseEnrollment +from nose.tools import assert_equals, assert_in import time from logging import getLogger @@ -14,46 +14,93 @@ logger = getLogger(__name__) @step(u'I wait (?:for )?"(\d+)" seconds?$') def wait(step, seconds): - time.sleep(float(seconds)) + world.wait(seconds) + + +@step('I reload the page$') +def reload_the_page(step): + world.browser.reload() + + +@step('I press the browser back button$') +def browser_back(step): + world.browser.driver.back() @step('I (?:visit|access|open) the homepage$') def i_visit_the_homepage(step): - world.browser.visit(django_url('/')) - assert world.browser.is_element_present_by_css('header.global', 10) + world.visit('/') + assert world.is_css_present('header.global') @step(u'I (?:visit|access|open) the dashboard$') def i_visit_the_dashboard(step): - world.browser.visit(django_url('/dashboard')) - assert world.browser.is_element_present_by_css('section.container.dashboard', 5) - - -@step(r'click (?:the|a) link (?:called|with the text) "([^"]*)"$') -def click_the_link_called(step, text): - world.browser.find_link_by_text(text).click() + world.visit('/dashboard') + assert world.is_css_present('section.container.dashboard') @step('I should be on the dashboard page$') def i_should_be_on_the_dashboard(step): - assert world.browser.is_element_present_by_css('section.container.dashboard', 5) + assert world.is_css_present('section.container.dashboard') assert world.browser.title == 'Dashboard' @step(u'I (?:visit|access|open) the courses page$') def i_am_on_the_courses_page(step): - world.browser.visit(django_url('/courses')) - assert world.browser.is_element_present_by_css('section.courses') + world.visit('/courses') + assert world.is_css_present('section.courses') + + +@step(u'I press the "([^"]*)" button$') +def and_i_press_the_button(step, value): + button_css = 'input[value="%s"]' % value + world.css_click(button_css) + + +@step(u'I click the link with the text "([^"]*)"$') +def click_the_link_with_the_text_group1(step, linktext): + world.click_link(linktext) @step('I should see that the path is "([^"]*)"$') def i_should_see_that_the_path_is(step, path): - assert world.browser.url == django_url(path) + assert world.url_equals(path) @step(u'the page title should be "([^"]*)"$') def the_page_title_should_be(step, title): - assert world.browser.title == title + assert_equals(world.browser.title, title) + + +@step(u'the page title should contain "([^"]*)"$') +def the_page_title_should_contain(step, title): + assert(title in world.browser.title) + + +@step('I log in$') +def i_log_in(step): + world.log_in('robot', 'test') + + +@step('I am a logged in user$') +def i_am_logged_in_user(step): + world.create_user('robot') + world.log_in('robot', 'test') + + +@step('I am not logged in$') +def i_am_not_logged_in(step): + world.browser.cookies.delete() + + +@step('I am staff for course "([^"]*)"$') +def i_am_staff_for_course_by_id(step, course_id): + world.register_by_course_id(course_id, True) + + +@step(r'click (?:the|a) link (?:called|with the text) "([^"]*)"$') +def click_the_link_called(step, text): + world.click_link(text) @step(r'should see that the url is "([^"]*)"$') @@ -68,13 +115,14 @@ def should_see_a_link_called(step, text): @step(r'should see "(.*)" (?:somewhere|anywhere) in (?:the|this) page') def should_see_in_the_page(step, text): - assert_in(text, world.browser.html) + assert_in(text, world.css_text('body')) @step('I am logged in$') def i_am_logged_in(step): world.create_user('robot') - world.log_in('robot@edx.org', 'test') + world.log_in('robot', 'test') + world.browser.visit(django_url('/')) @step('I am not logged in$') @@ -82,14 +130,6 @@ def i_am_not_logged_in(step): world.browser.cookies.delete() -@step(u'I am registered for a course$') -def i_am_registered_for_a_course(step): - world.create_user('robot') - u = User.objects.get(username='robot') - CourseEnrollment.objects.create(user=u, course_id='MITx/6.002x/2012_Fall') - world.log_in('robot@edx.org', 'test') - - @step(u'I am an edX user$') def i_am_an_edx_user(step): world.create_user('robot') diff --git a/common/djangoapps/terrain/ui_helpers.py b/common/djangoapps/terrain/ui_helpers.py new file mode 100644 index 0000000000..d4d99e17b5 --- /dev/null +++ b/common/djangoapps/terrain/ui_helpers.py @@ -0,0 +1,117 @@ +#pylint: disable=C0111 +#pylint: disable=W0621 + +from lettuce import world, step +import time +from urllib import quote_plus +from selenium.common.exceptions import WebDriverException +from selenium.webdriver.support import expected_conditions as EC +from selenium.webdriver.common.by import By +from selenium.webdriver.support.ui import WebDriverWait +from lettuce.django import django_url + + +@world.absorb +def wait(seconds): + time.sleep(float(seconds)) + + +@world.absorb +def wait_for(func): + WebDriverWait(world.browser.driver, 5).until(func) + + +@world.absorb +def visit(url): + world.browser.visit(django_url(url)) + + +@world.absorb +def url_equals(url): + return world.browser.url == django_url(url) + + +@world.absorb +def is_css_present(css_selector): + return world.browser.is_element_present_by_css(css_selector, wait_time=4) + + +@world.absorb +def css_has_text(css_selector, text): + return world.css_text(css_selector) == text + + +@world.absorb +def css_find(css): + def is_visible(driver): + return EC.visibility_of_element_located((By.CSS_SELECTOR, css,)) + + world.browser.is_element_present_by_css(css, 5) + wait_for(is_visible) + return world.browser.find_by_css(css) + + +@world.absorb +def css_click(css_selector): + ''' + First try to use the regular click method, + but if clicking in the middle of an element + doesn't work it might be that it thinks some other + element is on top of it there so click in the upper left + ''' + try: + world.browser.find_by_css(css_selector).click() + + except WebDriverException: + # Occassionally, MathJax or other JavaScript can cover up + # an element temporarily. + # If this happens, wait a second, then try again + time.sleep(1) + world.browser.find_by_css(css_selector).click() + + +@world.absorb +def css_click_at(css, x=10, y=10): + ''' + A method to click at x,y coordinates of the element + rather than in the center of the element + ''' + e = css_find(css).first + e.action_chains.move_to_element_with_offset(e._element, x, y) + e.action_chains.click() + e.action_chains.perform() + + +@world.absorb +def css_fill(css_selector, text): + world.browser.find_by_css(css_selector).first.fill(text) + + +@world.absorb +def click_link(partial_text): + world.browser.find_link_by_partial_text(partial_text).first.click() + + +@world.absorb +def css_text(css_selector): + + # Wait for the css selector to appear + if world.is_css_present(css_selector): + return world.browser.find_by_css(css_selector).first.text + else: + return "" + + +@world.absorb +def css_visible(css_selector): + return world.browser.find_by_css(css_selector).visible + + +@world.absorb +def save_the_html(path='/tmp'): + u = world.browser.url + html = world.browser.html.encode('ascii', 'ignore') + filename = '%s.html' % quote_plus(u) + f = open('%s/%s' % (path, filename), 'w') + f.write(html) + f.close diff --git a/common/djangoapps/util/converters.py b/common/djangoapps/util/converters.py deleted file mode 100644 index ec2d29ecfa..0000000000 --- a/common/djangoapps/util/converters.py +++ /dev/null @@ -1,30 +0,0 @@ -import time -import datetime -import re -import calendar - - -def time_to_date(time_obj): - """ - Convert a time.time_struct to a true universal time (can pass to js Date constructor) - """ - # TODO change to using the isoformat() function on datetime. js date can parse those - return calendar.timegm(time_obj) * 1000 - - -def jsdate_to_time(field): - """ - Convert a universal time (iso format) or msec since epoch to a time obj - """ - if field is None: - return field - elif isinstance(field, basestring): - # ISO format but ignores time zone assuming it's Z. - d = datetime.datetime(*map(int, re.split('[^\d]', field)[:6])) # stop after seconds. Debatable - return d.utctimetuple() - elif isinstance(field, (int, long, float)): - return time.gmtime(field / 1000) - elif isinstance(field, time.struct_time): - return field - else: - raise ValueError("Couldn't convert %r to time" % field) diff --git a/common/djangoapps/xmodule_modifiers.py b/common/djangoapps/xmodule_modifiers.py index 7b19c27553..d398dfef0d 100644 --- a/common/djangoapps/xmodule_modifiers.py +++ b/common/djangoapps/xmodule_modifiers.py @@ -33,7 +33,7 @@ def wrap_xmodule(get_html, module, template, context=None): def _get_html(): context.update({ 'content': get_html(), - 'display_name': module.metadata.get('display_name') if module.metadata is not None else None, + 'display_name': module.display_name, 'class_': module.__class__.__name__, 'module_name': module.js_module_name }) @@ -108,42 +108,25 @@ def add_histogram(get_html, module, user): histogram = grade_histogram(module_id) render_histogram = len(histogram) > 0 - # TODO (ichuang): Remove after fall 2012 LMS migration done - if settings.MITX_FEATURES.get('ENABLE_LMS_MIGRATION'): - [filepath, filename] = module.definition.get('filename', ['', None]) - osfs = module.system.filestore - if filename is not None and osfs.exists(filename): - # if original, unmangled filename exists then use it (github - # doesn't like symlinks) - filepath = filename - data_dir = osfs.root_path.rsplit('/')[-1] - giturl = module.metadata.get('giturl', 'https://github.com/MITx') - edit_link = "%s/%s/tree/master/%s" % (giturl, data_dir, filepath) - else: - edit_link = False - # Need to define all the variables that are about to be used - giturl = "" - data_dir = "" - source_file = module.metadata.get('source_file', '') # source used to generate the problem XML, eg latex or word + source_file = module.lms.source_file # source used to generate the problem XML, eg latex or word # useful to indicate to staff if problem has been released or not # TODO (ichuang): use _has_access_descriptor.can_load in lms.courseware.access, instead of now>mstart comparison here now = time.gmtime() is_released = "unknown" - mstart = getattr(module.descriptor, 'start') + mstart = module.descriptor.lms.start + if mstart is not None: is_released = "Yes!" if (now > mstart) else "Not yet" - staff_context = {'definition': module.definition.get('data'), - 'metadata': json.dumps(module.metadata, indent=4), + staff_context = {'fields': [(field.name, getattr(module, field.name)) for field in module.fields], + 'lms_fields': [(field.name, getattr(module.lms, field.name)) for field in module.lms.fields], 'location': module.location, - 'xqa_key': module.metadata.get('xqa_key', ''), + 'xqa_key': module.lms.xqa_key, 'source_file': source_file, - 'source_url': '%s/%s/tree/master/%s' % (giturl, data_dir, source_file), 'category': str(module.__class__.__name__), # Template uses element_id in js function names, so can't allow dashes 'element_id': module.location.html_id().replace('-', '_'), - 'edit_link': edit_link, 'user': user, 'xqa_server': settings.MITX_FEATURES.get('USE_XQA_SERVER', 'http://xqa:server@content-qa.mitx.mit.edu/xqa'), 'histogram': json.dumps(histogram), diff --git a/common/lib/capa/capa/calc.py b/common/lib/capa/capa/calc.py index 0f062d17d5..c3fe6b656b 100644 --- a/common/lib/capa/capa/calc.py +++ b/common/lib/capa/capa/calc.py @@ -183,7 +183,7 @@ def evaluator(variables, functions, string, cs=False): # 0.33k or -17 number = (Optional(minus | plus) + inner_number - + Optional(CaselessLiteral("E") + Optional("-") + number_part) + + Optional(CaselessLiteral("E") + Optional((plus | minus)) + number_part) + Optional(number_suffix)) number = number.setParseAction(number_parse_action) # Convert to number diff --git a/common/lib/capa/capa/capa_problem.py b/common/lib/capa/capa/capa_problem.py index 9b8bbd7288..6580114bcc 100644 --- a/common/lib/capa/capa/capa_problem.py +++ b/common/lib/capa/capa/capa_problem.py @@ -16,7 +16,6 @@ This is used by capa_module. from __future__ import division from datetime import datetime -import json import logging import math import numpy @@ -29,20 +28,21 @@ import sys from lxml import etree from xml.sax.saxutils import unescape +from copy import deepcopy import chem +import chem.miller import chem.chemcalc import chem.chemtools -import chem.miller import verifiers import verifiers.draganddrop import calc -from correctmap import CorrectMap +from .correctmap import CorrectMap import eia import inputtypes import customrender -from util import contextualize_text, convert_files_to_filenames +from .util import contextualize_text, convert_files_to_filenames import xqueue_interface # to be replaced with auto-registering @@ -77,7 +77,7 @@ global_context = {'random': random, # These should be removed from HTML output, including all subelements html_problem_semantics = ["codeparam", "responseparam", "answer", "script", "hintgroup", "openendedparam", "openendedrubric"] -log = logging.getLogger('mitx.' + __name__) +log = logging.getLogger(__name__) #----------------------------------------------------------------------------- # main class for this module @@ -96,8 +96,13 @@ class LoncapaProblem(object): - problem_text (string): xml defining the problem - id (string): identifier for this problem; often a filename (no spaces) - - state (dict): student state - - seed (int): random number generator seed (int) + - seed (int): random number generator seed (int) + - state (dict): containing the following keys: + - 'seed' - (int) random number generator seed + - 'student_answers' - (dict) maps input id to the stored answer for that input + - 'correct_map' (CorrectMap) a map of each input to their 'correctness' + - 'done' - (bool) indicates whether or not this problem is considered done + - 'input_state' - (dict) maps input_id to a dictionary that holds the state for that input - system (ModuleSystem): ModuleSystem instance which provides OS, rendering, and user context @@ -107,21 +112,25 @@ class LoncapaProblem(object): self.do_reset() self.problem_id = id self.system = system - self.seed = seed + if self.system is None: + raise Exception() - if state: - if 'seed' in state: - self.seed = state['seed'] - if 'student_answers' in state: - self.student_answers = state['student_answers'] - if 'correct_map' in state: - self.correct_map.set_dict(state['correct_map']) - if 'done' in state: - self.done = state['done'] + state = state if state else {} - # TODO: Does this deplete the Linux entropy pool? Is this fast enough? - if not self.seed: + # Set seed according to the following priority: + # 1. Contained in problem's state + # 2. Passed into capa_problem via constructor + # 3. Assign from the OS's random number generator + self.seed = state.get('seed', seed) + if self.seed is None: self.seed = struct.unpack('i', os.urandom(4))[0] + self.student_answers = state.get('student_answers', {}) + if 'correct_map' in state: + self.correct_map.set_dict(state['correct_map']) + self.done = state.get('done', False) + self.input_state = state.get('input_state', {}) + + # Convert startouttext and endouttext to proper problem_text = re.sub("startouttext\s*/", "text", problem_text) @@ -146,6 +155,13 @@ class LoncapaProblem(object): if not self.student_answers: # True when student_answers is an empty dict self.set_initial_display() + # dictionary of InputType objects associated with this problem + # input_id string -> InputType object + self.inputs = {} + + self.extracted_tree = self._extract_html(self.tree) + + def do_reset(self): ''' Reset internal state to unfinished, with no answers @@ -178,6 +194,7 @@ class LoncapaProblem(object): return {'seed': self.seed, 'student_answers': self.student_answers, 'correct_map': self.correct_map.get_dict(), + 'input_state': self.input_state, 'done': self.done} def get_max_score(self): @@ -227,6 +244,20 @@ class LoncapaProblem(object): self.correct_map.set_dict(cmap.get_dict()) return cmap + def ungraded_response(self, xqueue_msg, queuekey): + ''' + Handle any responses from the xqueue that do not contain grades + Will try to pass the queue message to all inputtypes that can handle ungraded responses + + Does not return any value + ''' + # check against each inputtype + for the_input in self.inputs.values(): + # if the input type has an ungraded function, pass in the values + if hasattr(the_input, 'ungraded_response'): + the_input.ungraded_response(xqueue_msg, queuekey) + + def is_queued(self): ''' Returns True if any part of the problem has been submitted to an external queue @@ -324,7 +355,27 @@ class LoncapaProblem(object): ''' Main method called externally to get the HTML to be rendered for this capa Problem. ''' - return contextualize_text(etree.tostring(self._extract_html(self.tree)), self.context) + html = contextualize_text(etree.tostring(self._extract_html(self.tree)), self.context) + return html + + + def handle_input_ajax(self, get): + ''' + InputTypes can support specialized AJAX calls. Find the correct input and pass along the correct data + + Also, parse out the dispatch from the get so that it can be passed onto the input type nicely + ''' + + # pull out the id + input_id = get['input_id'] + if self.inputs[input_id]: + dispatch = get['dispatch'] + return self.inputs[input_id].handle_ajax(dispatch, get) + else: + log.warning("Could not find matching input for id: %s" % input_id) + return {} + + # ======= Private Methods Below ======== @@ -458,6 +509,8 @@ class LoncapaProblem(object): finally: sys.path = original_path + + def _extract_html(self, problemtree): # private ''' Main (private) function which converts Problem XML tree to HTML. @@ -471,7 +524,7 @@ class LoncapaProblem(object): if (problemtree.tag == 'script' and problemtree.get('type') and 'javascript' in problemtree.get('type')): # leave javascript intact. - return problemtree + return deepcopy(problemtree) if problemtree.tag in html_problem_semantics: return @@ -484,8 +537,9 @@ class LoncapaProblem(object): msg = '' hint = '' hintmode = None + input_id = problemtree.get('id') if problemid in self.correct_map: - pid = problemtree.get('id') + pid = input_id status = self.correct_map.get_correctness(pid) msg = self.correct_map.get_msg(pid) hint = self.correct_map.get_hint(pid) @@ -494,23 +548,29 @@ class LoncapaProblem(object): value = "" if self.student_answers and problemid in self.student_answers: value = self.student_answers[problemid] - + + if input_id not in self.input_state: + self.input_state[input_id] = {} + # do the rendering - state = {'value': value, 'status': status, - 'id': problemtree.get('id'), + 'id': input_id, + 'input_state': self.input_state[input_id], 'feedback': {'message': msg, 'hint': hint, 'hintmode': hintmode, }} input_type_cls = inputtypes.registry.get_class_for_tag(problemtree.tag) - the_input = input_type_cls(self.system, problemtree, state) - return the_input.get_html() + # save the input type so that we can make ajax calls on it if we need to + self.inputs[input_id] = input_type_cls(self.system, problemtree, state) + return self.inputs[input_id].get_html() # let each Response render itself if problemtree in self.responders: - return self.responders[problemtree].render_html(self._extract_html) + overall_msg = self.correct_map.get_overall_message() + return self.responders[problemtree].render_html(self._extract_html, + response_msg=overall_msg) # let each custom renderer render itself: if problemtree.tag in customrender.registry.registered_tags(): diff --git a/common/lib/capa/capa/checker.py b/common/lib/capa/capa/checker.py index f583a5ea7d..15358aac9e 100755 --- a/common/lib/capa/capa/checker.py +++ b/common/lib/capa/capa/checker.py @@ -12,8 +12,8 @@ from path import path from cStringIO import StringIO from collections import defaultdict -from calc import UndefinedVariable -from capa_problem import LoncapaProblem +from .calc import UndefinedVariable +from .capa_problem import LoncapaProblem from mako.lookup import TemplateLookup logging.basicConfig(format="%(levelname)s %(message)s") diff --git a/common/lib/capa/capa/chem/tests.py b/common/lib/capa/capa/chem/tests.py index 571526f915..f422fcf0d1 100644 --- a/common/lib/capa/capa/chem/tests.py +++ b/common/lib/capa/capa/chem/tests.py @@ -2,7 +2,7 @@ import codecs from fractions import Fraction import unittest -from chemcalc import (compare_chemical_expression, divide_chemical_expression, +from .chemcalc import (compare_chemical_expression, divide_chemical_expression, render_to_html, chemical_equations_equal) import miller @@ -277,7 +277,6 @@ class Test_Render_Equations(unittest.TestCase): def test_render9(self): s = "5[Ni(NH3)4]^2+ + 5/2SO4^2-" - #import ipdb; ipdb.set_trace() out = render_to_html(s) correct = u'5[Ni(NH3)4]2++52SO42-' log(out + ' ------- ' + correct, 'html') diff --git a/common/lib/capa/capa/correctmap.py b/common/lib/capa/capa/correctmap.py index a78b10d07a..950cd199fc 100644 --- a/common/lib/capa/capa/correctmap.py +++ b/common/lib/capa/capa/correctmap.py @@ -27,6 +27,7 @@ class CorrectMap(object): self.cmap = dict() self.items = self.cmap.items self.keys = self.cmap.keys + self.overall_message = "" self.set(*args, **kwargs) def __getitem__(self, *args, **kwargs): @@ -46,7 +47,7 @@ class CorrectMap(object): queuestate=None, **kwargs): if answer_id is not None: - self.cmap[answer_id] = {'correctness': correctness, + self.cmap[str(answer_id)] = {'correctness': correctness, 'npoints': npoints, 'msg': msg, 'hint': hint, @@ -79,22 +80,23 @@ class CorrectMap(object): Special migration case: If correct_map is a one-level dict, then convert it to the new dict of dicts format. - ''' - if correct_map and not (type(correct_map[correct_map.keys()[0]]) == dict): - # empty current dict - self.__init__() - # create new dict entries + ''' + # empty current dict + self.__init__() + + # create new dict entries + if correct_map and not isinstance(correct_map.values()[0], dict): + # special migration for k in correct_map: - self.set(k, correct_map[k]) + self.set(k, correctness=correct_map[k]) else: - self.__init__() for k in correct_map: self.set(k, **correct_map[k]) def is_correct(self, answer_id): if answer_id in self.cmap: - return self.cmap[answer_id]['correctness'] == 'correct' + return self.cmap[answer_id]['correctness'] in ['correct', 'partially-correct'] return None def is_queued(self, answer_id): @@ -104,9 +106,13 @@ class CorrectMap(object): return self.is_queued(answer_id) and self.cmap[answer_id]['queuestate']['key'] == test_key def get_queuetime_str(self, answer_id): - return self.cmap[answer_id]['queuestate']['time'] + if self.cmap[answer_id]['queuestate']: + return self.cmap[answer_id]['queuestate']['time'] + else: + return None def get_npoints(self, answer_id): + """Return the number of points for an answer, used for partial credit.""" npoints = self.get_property(answer_id, 'npoints') if npoints is not None: return npoints @@ -153,3 +159,15 @@ class CorrectMap(object): if not isinstance(other_cmap, CorrectMap): raise Exception('CorrectMap.update called with invalid argument %s' % other_cmap) self.cmap.update(other_cmap.get_dict()) + self.set_overall_message(other_cmap.get_overall_message()) + + + def set_overall_message(self, message_str): + """ Set a message that applies to the question as a whole, + rather than to individual inputs. """ + self.overall_message = str(message_str) if message_str else "" + + def get_overall_message(self): + """ Retrieve a message that applies to the question as a whole. + If no message is available, returns the empty string """ + return self.overall_message diff --git a/common/lib/capa/capa/customrender.py b/common/lib/capa/capa/customrender.py index a925a5970d..60d3ce578b 100644 --- a/common/lib/capa/capa/customrender.py +++ b/common/lib/capa/capa/customrender.py @@ -6,7 +6,7 @@ These tags do not have state, so they just get passed the system (for access to and the xml element. """ -from registry import TagRegistry +from .registry import TagRegistry import logging import re @@ -15,9 +15,9 @@ import json from lxml import etree import xml.sax.saxutils as saxutils -from registry import TagRegistry +from .registry import TagRegistry -log = logging.getLogger('mitx.' + __name__) +log = logging.getLogger(__name__) registry = TagRegistry() diff --git a/common/lib/capa/capa/inputtypes.py b/common/lib/capa/capa/inputtypes.py index 951104501a..2febfbd5d2 100644 --- a/common/lib/capa/capa/inputtypes.py +++ b/common/lib/capa/capa/inputtypes.py @@ -37,18 +37,20 @@ graded status as'status' # makes sense, but a bunch of problems have markup that assumes block. Bigger TODO: figure out a # general css and layout strategy for capa, document it, then implement it. -from collections import namedtuple import json import logging from lxml import etree import re import shlex # for splitting quoted strings import sys -import os +import pyparsing -from registry import TagRegistry +from .registry import TagRegistry +from capa.chem import chemcalc +import xqueue_interface +from datetime import datetime -log = logging.getLogger('mitx.' + __name__) +log = logging.getLogger(__name__) ######################################################################### @@ -95,7 +97,8 @@ class Attribute(object): """ val = element.get(self.name) if self.default == self._sentinel and val is None: - raise ValueError('Missing required attribute {0}.'.format(self.name)) + raise ValueError( + 'Missing required attribute {0}.'.format(self.name)) if val is None: # not required, so return default @@ -130,6 +133,8 @@ class InputTypeBase(object): * 'id' -- the id of this input, typically "{problem-location}_{response-num}_{input-num}" * 'status' (answered, unanswered, unsubmitted) + * 'input_state' -- dictionary containing any inputtype-specific state + that has been preserved * 'feedback' (dictionary containing keys for hints, errors, or other feedback from previous attempt. Specifically 'message', 'hint', 'hintmode'. If 'hintmode' is 'always', the hint is always displayed.) @@ -147,7 +152,8 @@ class InputTypeBase(object): self.id = state.get('id', xml.get('id')) if self.id is None: - raise ValueError("input id state is None. xml is {0}".format(etree.tostring(xml))) + raise ValueError("input id state is None. xml is {0}".format( + etree.tostring(xml))) self.value = state.get('value', '') @@ -155,6 +161,7 @@ class InputTypeBase(object): self.msg = feedback.get('message', '') self.hint = feedback.get('hint', '') self.hintmode = feedback.get('hintmode', None) + self.input_state = state.get('input_state', {}) # put hint above msg if it should be displayed if self.hintmode == 'always': @@ -167,14 +174,15 @@ class InputTypeBase(object): self.process_requirements() # Call subclass "constructor" -- means they don't have to worry about calling - # super().__init__, and are isolated from changes to the input constructor interface. + # super().__init__, and are isolated from changes to the input + # constructor interface. self.setup() except Exception as err: # Something went wrong: add xml to message, but keep the traceback - msg = "Error in xml '{x}': {err} ".format(x=etree.tostring(xml), err=str(err)) + msg = "Error in xml '{x}': {err} ".format( + x=etree.tostring(xml), err=str(err)) raise Exception, msg, sys.exc_info()[2] - @classmethod def get_attributes(cls): """ @@ -184,7 +192,6 @@ class InputTypeBase(object): """ return [] - def process_requirements(self): """ Subclasses can declare lists of required and optional attributes. This @@ -194,7 +201,8 @@ class InputTypeBase(object): Processes attributes, putting the results in the self.loaded_attributes dictionary. Also creates a set self.to_render, containing the names of attributes that should be included in the context by default. """ - # Use local dicts and sets so that if there are exceptions, we don't end up in a partially-initialized state. + # Use local dicts and sets so that if there are exceptions, we don't + # end up in a partially-initialized state. loaded = {} to_render = set() for a in self.get_attributes(): @@ -215,6 +223,18 @@ class InputTypeBase(object): """ pass + def handle_ajax(self, dispatch, get): + """ + InputTypes that need to handle specialized AJAX should override this. + + Input: + dispatch: a string that can be used to determine how to handle the data passed in + get: a dictionary containing the data that was sent with the ajax call + + Output: + a dictionary object that can be serialized into JSON. This will be sent back to the Javascript. + """ + pass def _get_render_context(self): """ @@ -233,8 +253,9 @@ class InputTypeBase(object): 'value': self.value, 'status': self.status, 'msg': self.msg, - } - context.update((a, v) for (a, v) in self.loaded_attributes.iteritems() if a in self.to_render) + } + context.update((a, v) for ( + a, v) in self.loaded_attributes.iteritems() if a in self.to_render) context.update(self._extra_context()) return context @@ -352,6 +373,11 @@ class ChoiceGroup(InputTypeBase): self.choices = self.extract_choices(self.xml) + @classmethod + def get_attributes(cls): + return [Attribute("show_correctness", "always"), + Attribute("submitted_message", "Answer received.")] + def _extra_context(self): return {'input_type': self.html_input_type, 'choices': self.choices, @@ -416,7 +442,6 @@ class JavascriptInput(InputTypeBase): Attribute('display_class', None), Attribute('display_file', None), ] - def setup(self): # Need to provide a value that JSON can parse if there is no # student-supplied value yet. @@ -439,7 +464,6 @@ class TextLine(InputTypeBase): template = "textline.html" tags = ['textline'] - @classmethod def get_attributes(cls): """ @@ -454,12 +478,12 @@ class TextLine(InputTypeBase): # Attributes below used in setup(), not rendered directly. Attribute('math', None, render=False), - # TODO: 'dojs' flag is temporary, for backwards compatibility with 8.02x + # TODO: 'dojs' flag is temporary, for backwards compatibility with + # 8.02x Attribute('dojs', None, render=False), Attribute('preprocessorClassName', None, render=False), Attribute('preprocessorSrc', None, render=False), - ] - + ] def setup(self): self.do_math = bool(self.loaded_attributes['math'] or @@ -470,12 +494,12 @@ class TextLine(InputTypeBase): self.preprocessor = None if self.do_math: # Preprocessor to insert between raw input and Mathjax - self.preprocessor = {'class_name': self.loaded_attributes['preprocessorClassName'], - 'script_src': self.loaded_attributes['preprocessorSrc']} + self.preprocessor = { + 'class_name': self.loaded_attributes['preprocessorClassName'], + 'script_src': self.loaded_attributes['preprocessorSrc']} if None in self.preprocessor.values(): self.preprocessor = None - def _extra_context(self): return {'do_math': self.do_math, 'preprocessor': self.preprocessor, } @@ -519,7 +543,8 @@ class FileSubmission(InputTypeBase): """ # Check if problem has been queued self.queue_len = 0 - # Flag indicating that the problem has been queued, 'msg' is length of queue + # Flag indicating that the problem has been queued, 'msg' is length of + # queue if self.status == 'incomplete': self.status = 'queued' self.queue_len = self.msg @@ -527,7 +552,6 @@ class FileSubmission(InputTypeBase): def _extra_context(self): return {'queue_len': self.queue_len, } - return context registry.register(FileSubmission) @@ -542,8 +566,9 @@ class CodeInput(InputTypeBase): template = "codeinput.html" tags = ['codeinput', - 'textbox', # Another (older) name--at some point we may want to make it use a - # non-codemirror editor. + 'textbox', + # Another (older) name--at some point we may want to make it use a + # non-codemirror editor. ] # pulled out for testing @@ -566,22 +591,29 @@ class CodeInput(InputTypeBase): Attribute('tabsize', 4, transform=int), ] - def setup(self): + def setup_code_response_rendering(self): """ Implement special logic: handle queueing state, and default input. """ - # if no student input yet, then use the default input given by the problem - if not self.value: - self.value = self.xml.text + # if no student input yet, then use the default input given by the + # problem + if not self.value and self.xml.text: + self.value = self.xml.text.strip() # Check if problem has been queued self.queue_len = 0 - # Flag indicating that the problem has been queued, 'msg' is length of queue + # Flag indicating that the problem has been queued, 'msg' is length of + # queue if self.status == 'incomplete': self.status = 'queued' self.queue_len = self.msg self.msg = self.submitted_msg + + def setup(self): + ''' setup this input type ''' + self.setup_code_response_rendering() + def _extra_context(self): """Defined queue_len, add it """ return {'queue_len': self.queue_len, } @@ -590,8 +622,164 @@ registry.register(CodeInput) #----------------------------------------------------------------------------- + + +class MatlabInput(CodeInput): + ''' + InputType for handling Matlab code input + + TODO: API_KEY will go away once we have a way to specify it per-course + Example: + + Initial Text + + %api_key=API_KEY + + + ''' + template = "matlabinput.html" + tags = ['matlabinput'] + + plot_submitted_msg = ("Submitted. As soon as a response is returned, " + "this message will be replaced by that feedback.") + + def setup(self): + ''' + Handle matlab-specific parsing + ''' + self.setup_code_response_rendering() + + xml = self.xml + self.plot_payload = xml.findtext('./plot_payload') + + # Check if problem has been queued + self.queuename = 'matlab' + self.queue_msg = '' + if 'queue_msg' in self.input_state and self.status in ['queued','incomplete', 'unsubmitted']: + self.queue_msg = self.input_state['queue_msg'] + if 'queued' in self.input_state and self.input_state['queuestate'] is not None: + self.status = 'queued' + self.queue_len = 1 + self.msg = self.plot_submitted_msg + + + def handle_ajax(self, dispatch, get): + ''' + Handle AJAX calls directed to this input + + Args: + - dispatch (str) - indicates how we want this ajax call to be handled + - get (dict) - dictionary of key-value pairs that contain useful data + Returns: + + ''' + + if dispatch == 'plot': + return self._plot_data(get) + return {} + + def ungraded_response(self, queue_msg, queuekey): + ''' + Handle the response from the XQueue + Stores the response in the input_state so it can be rendered later + + Args: + - queue_msg (str) - message returned from the queue. The message to be rendered + - queuekey (str) - a key passed to the queue. Will be matched up to verify that this is the response we're waiting for + + Returns: + nothing + ''' + # check the queuekey against the saved queuekey + if('queuestate' in self.input_state and self.input_state['queuestate'] == 'queued' + and self.input_state['queuekey'] == queuekey): + msg = self._parse_data(queue_msg) + # save the queue message so that it can be rendered later + self.input_state['queue_msg'] = msg + self.input_state['queuestate'] = None + self.input_state['queuekey'] = None + + def _extra_context(self): + ''' Set up additional context variables''' + extra_context = { + 'queue_len': self.queue_len, + 'queue_msg': self.queue_msg + } + return extra_context + + def _parse_data(self, queue_msg): + ''' + Parses the message out of the queue message + Args: + queue_msg (str) - a JSON encoded string + Returns: + returns the value for the the key 'msg' in queue_msg + ''' + try: + result = json.loads(queue_msg) + except (TypeError, ValueError): + log.error("External message should be a JSON serialized dict." + " Received queue_msg = %s" % queue_msg) + raise + msg = result['msg'] + return msg + + + def _plot_data(self, get): + ''' + AJAX handler for the plot button + Args: + get (dict) - should have key 'submission' which contains the student submission + Returns: + dict - 'success' - whether or not we successfully queued this submission + - 'message' - message to be rendered in case of error + ''' + # only send data if xqueue exists + if self.system.xqueue is None: + return {'success': False, 'message': 'Cannot connect to the queue'} + + # pull relevant info out of get + response = get['submission'] + + # construct xqueue headers + qinterface = self.system.xqueue['interface'] + qtime = datetime.strftime(datetime.utcnow(), xqueue_interface.dateformat) + callback_url = self.system.xqueue['construct_callback']('ungraded_response') + anonymous_student_id = self.system.anonymous_student_id + queuekey = xqueue_interface.make_hashkey(str(self.system.seed) + qtime + + anonymous_student_id + + self.id) + xheader = xqueue_interface.make_xheader( + lms_callback_url = callback_url, + lms_key = queuekey, + queue_name = self.queuename) + + # save the input state + self.input_state['queuekey'] = queuekey + self.input_state['queuestate'] = 'queued' + + + # construct xqueue body + student_info = {'anonymous_student_id': anonymous_student_id, + 'submission_time': qtime} + contents = {'grader_payload': self.plot_payload, + 'student_info': json.dumps(student_info), + 'student_response': response} + + (error, msg) = qinterface.send_to_queue(header=xheader, + body = json.dumps(contents)) + + return {'success': error == 0, 'message': msg} + + +registry.register(MatlabInput) + + +#----------------------------------------------------------------------------- + class Schematic(InputTypeBase): """ + InputType for the schematic editor """ template = "schematicinput.html" @@ -610,7 +798,6 @@ class Schematic(InputTypeBase): Attribute('initial_value', None), Attribute('submit_analyses', None), ] - return context registry.register(Schematic) @@ -640,12 +827,12 @@ class ImageInput(InputTypeBase): Attribute('height'), Attribute('width'), ] - def setup(self): """ if value is of the form [x,y] then parse it and send along coordinates of previous answer """ - m = re.match('\[([0-9]+),([0-9]+)]', self.value.strip().replace(' ', '')) + m = re.match('\[([0-9]+),([0-9]+)]', + self.value.strip().replace(' ', '')) if m: # Note: we subtract 15 to compensate for the size of the dot on the screen. # (is a 30x30 image--lms/static/green-pointer.png). @@ -653,7 +840,6 @@ class ImageInput(InputTypeBase): else: (self.gx, self.gy) = (0, 0) - def _extra_context(self): return {'gx': self.gx, @@ -710,7 +896,7 @@ class VseprInput(InputTypeBase): registry.register(VseprInput) -#-------------------------------------------------------------------------------- +#------------------------------------------------------------------------- class ChemicalEquationInput(InputTypeBase): @@ -740,6 +926,46 @@ class ChemicalEquationInput(InputTypeBase): """ return {'previewer': '/static/js/capa/chemical_equation_preview.js', } + def handle_ajax(self, dispatch, get): + ''' + Since we only have chemcalc preview this input, check to see if it + matches the corresponding dispatch and send it through if it does + ''' + if dispatch == 'preview_chemcalc': + return self.preview_chemcalc(get) + return {} + + def preview_chemcalc(self, get): + """ + Render an html preview of a chemical formula or equation. get should + contain a key 'formula' and value 'some formula string'. + + Returns a json dictionary: + { + 'preview' : 'the-preview-html' or '' + 'error' : 'the-error' or '' + } + """ + + result = {'preview': '', + 'error': ''} + formula = get['formula'] + if formula is None: + result['error'] = "No formula specified." + return result + + try: + result['preview'] = chemcalc.render_to_html(formula) + except pyparsing.ParseException as p: + result['error'] = "Couldn't parse formula: {0}".format(p) + except Exception: + # this is unexpected, so log + log.warning( + "Error while previewing chemical formula", exc_info=True) + result['error'] = "Error while rendering preview" + + return result + registry.register(ChemicalEquationInput) #----------------------------------------------------------------------------- @@ -784,25 +1010,29 @@ class DragAndDropInput(InputTypeBase): 'can_reuse': ""} tag_attrs['target'] = {'id': Attribute._sentinel, - 'x': Attribute._sentinel, - 'y': Attribute._sentinel, - 'w': Attribute._sentinel, - 'h': Attribute._sentinel} + 'x': Attribute._sentinel, + 'y': Attribute._sentinel, + 'w': Attribute._sentinel, + 'h': Attribute._sentinel} dic = dict() for attr_name in tag_attrs[tag_type].keys(): dic[attr_name] = Attribute(attr_name, - default=tag_attrs[tag_type][attr_name]).parse_from_xml(tag) + default=tag_attrs[tag_type][attr_name]).parse_from_xml(tag) if tag_type == 'draggable' and not self.no_labels: dic['label'] = dic['label'] or dic['id'] + if tag_type == 'draggable': + dic['target_fields'] = [parse(target, 'target') for target in + tag.iterchildren('target')] + return dic # add labels to images?: self.no_labels = Attribute('no_labels', - default="False").parse_from_xml(self.xml) + default="False").parse_from_xml(self.xml) to_js = dict() @@ -811,16 +1041,16 @@ class DragAndDropInput(InputTypeBase): # outline places on image where to drag adn drop to_js['target_outline'] = Attribute('target_outline', - default="False").parse_from_xml(self.xml) + default="False").parse_from_xml(self.xml) # one draggable per target? to_js['one_per_target'] = Attribute('one_per_target', - default="True").parse_from_xml(self.xml) + default="True").parse_from_xml(self.xml) # list of draggables to_js['draggables'] = [parse(draggable, 'draggable') for draggable in - self.xml.iterchildren('draggable')] + self.xml.iterchildren('draggable')] # list of targets to_js['targets'] = [parse(target, 'target') for target in - self.xml.iterchildren('target')] + self.xml.iterchildren('target')] # custom background color for labels: label_bg_color = Attribute('label_bg_color', @@ -833,7 +1063,7 @@ class DragAndDropInput(InputTypeBase): registry.register(DragAndDropInput) -#-------------------------------------------------------------------------------------------------------------------- +#------------------------------------------------------------------------- class EditAMoleculeInput(InputTypeBase): @@ -871,6 +1101,7 @@ registry.register(EditAMoleculeInput) #----------------------------------------------------------------------------- + class DesignProtein2dInput(InputTypeBase): """ An input type for design of a protein in 2D. Integrates with the Protex java applet. @@ -906,36 +1137,148 @@ registry.register(DesignProtein2dInput) #----------------------------------------------------------------------------- + class EditAGeneInput(InputTypeBase): """ An input type for editing a gene. Integrates with the genex java applet. - + Example: - + """ - + template = "editageneinput.html" tags = ['editageneinput'] - + @classmethod def get_attributes(cls): """ - Note: width, hight, and dna_sequencee are required. - """ + Note: width, height, and dna_sequencee are required. + """ return [Attribute('width'), Attribute('height'), - Attribute('dna_sequence') + Attribute('dna_sequence'), + Attribute('genex_problem_number') ] - + def _extra_context(self): """ """ context = { 'applet_loader': '/static/js/capa/edit-a-gene.js', } - + return context registry.register(EditAGeneInput) +#--------------------------------------------------------------------- + + +class AnnotationInput(InputTypeBase): + """ + Input type for annotations: students can enter some notes or other text + (currently ungraded), and then choose from a set of tags/optoins, which are graded. + + Example: + + + Annotation Exercise + + They are the ones who, at the public assembly, had put savage derangement [ate] into my thinking + [phrenes] |89 on that day when I myself deprived Achilles of his honorific portion [geras] + + Agamemnon says that ate or 'derangement' was the cause of his actions: why could Zeus say the same thing? + Type a commentary below: + Select one tag: + + + + + + + + # TODO: allow ordering to be randomized + """ + + template = "annotationinput.html" + tags = ['annotationinput'] + + def setup(self): + xml = self.xml + + self.debug = False # set to True to display extra debug info with input + self.return_to_annotation = True # return only works in conjunction with annotatable xmodule + + self.title = xml.findtext('./title', 'Annotation Exercise') + self.text = xml.findtext('./text') + self.comment = xml.findtext('./comment') + self.comment_prompt = xml.findtext( + './comment_prompt', 'Type a commentary below:') + self.tag_prompt = xml.findtext('./tag_prompt', 'Select one tag:') + self.options = self._find_options() + + # Need to provide a value that JSON can parse if there is no + # student-supplied value yet. + if self.value == '': + self.value = 'null' + + self._validate_options() + + def _find_options(self): + ''' Returns an array of dicts where each dict represents an option. ''' + elements = self.xml.findall('./options/option') + return [{ + 'id': index, + 'description': option.text, + 'choice': option.get('choice') + } for (index, option) in enumerate(elements)] + + def _validate_options(self): + ''' Raises a ValueError if the choice attribute is missing or invalid. ''' + valid_choices = ('correct', 'partially-correct', 'incorrect') + for option in self.options: + choice = option['choice'] + if choice is None: + raise ValueError('Missing required choice attribute.') + elif choice not in valid_choices: + raise ValueError('Invalid choice attribute: {0}. Must be one of: {1}'.format( + choice, ', '.join(valid_choices))) + + def _unpack(self, json_value): + ''' Unpacks the json input state into a dict. ''' + d = json.loads(json_value) + if type(d) != dict: + d = {} + + comment_value = d.get('comment', '') + if not isinstance(comment_value, basestring): + comment_value = '' + + options_value = d.get('options', []) + if not isinstance(options_value, list): + options_value = [] + + return { + 'options_value': options_value, + 'has_options_value': len(options_value) > 0, # for convenience + 'comment_value': comment_value, + } + + def _extra_context(self): + extra_context = { + 'title': self.title, + 'text': self.text, + 'comment': self.comment, + 'comment_prompt': self.comment_prompt, + 'tag_prompt': self.tag_prompt, + 'options': self.options, + 'return_to_annotation': self.return_to_annotation, + 'debug': self.debug + } + + extra_context.update(self._unpack(self.value)) + + return extra_context + +registry.register(AnnotationInput) diff --git a/common/lib/capa/capa/responsetypes.py b/common/lib/capa/capa/responsetypes.py index a1a4e6b65e..5b1b46d858 100644 --- a/common/lib/capa/capa/responsetypes.py +++ b/common/lib/capa/capa/responsetypes.py @@ -17,6 +17,7 @@ import logging import numbers import numpy import os +import sys import random import re import requests @@ -28,15 +29,15 @@ from collections import namedtuple from shapely.geometry import Point, MultiPoint # specific library imports -from calc import evaluator, UndefinedVariable -from correctmap import CorrectMap +from .calc import evaluator, UndefinedVariable +from .correctmap import CorrectMap from datetime import datetime -from util import * +from .util import * from lxml import etree from lxml.html.soupparser import fromstring as fromstring_bs # uses Beautiful Soup!!! FIXME? import xqueue_interface -log = logging.getLogger('mitx.' + __name__) +log = logging.getLogger(__name__) #----------------------------------------------------------------------------- @@ -52,12 +53,17 @@ class LoncapaProblemError(Exception): class ResponseError(Exception): ''' - Error for failure in processing a response + Error for failure in processing a response, including + exceptions that occur when executing a custom script. ''' pass class StudentInputError(Exception): + ''' + Error for an invalid student input. + For example, submitting a string when the problem expects a number + ''' pass #----------------------------------------------------------------------------- @@ -128,21 +134,25 @@ class LoncapaResponse(object): for abox in inputfields: if abox.tag not in self.allowed_inputfields: - msg = "%s: cannot have input field %s" % (unicode(self), abox.tag) - msg += "\nSee XML source line %s" % getattr(xml, 'sourceline', '') + msg = "%s: cannot have input field %s" % ( + unicode(self), abox.tag) + msg += "\nSee XML source line %s" % getattr( + xml, 'sourceline', '') raise LoncapaProblemError(msg) if self.max_inputfields and len(inputfields) > self.max_inputfields: msg = "%s: cannot have more than %s input fields" % ( unicode(self), self.max_inputfields) - msg += "\nSee XML source line %s" % getattr(xml, 'sourceline', '') + msg += "\nSee XML source line %s" % getattr( + xml, 'sourceline', '') raise LoncapaProblemError(msg) for prop in self.required_attributes: if not xml.get(prop): msg = "Error in problem specification: %s missing required attribute %s" % ( unicode(self), prop) - msg += "\nSee XML source line %s" % getattr(xml, 'sourceline', '') + msg += "\nSee XML source line %s" % getattr( + xml, 'sourceline', '') raise LoncapaProblemError(msg) # ordered list of answer_id values for this response @@ -163,7 +173,8 @@ class LoncapaResponse(object): for entry in self.inputfields: answer = entry.get('correct_answer') if answer: - self.default_answer_map[entry.get('id')] = contextualize_text(answer, self.context) + self.default_answer_map[entry.get( + 'id')] = contextualize_text(answer, self.context) if hasattr(self, 'setup_response'): self.setup_response() @@ -174,13 +185,14 @@ class LoncapaResponse(object): ''' return sum(self.maxpoints.values()) - def render_html(self, renderer): + def render_html(self, renderer, response_msg=''): ''' Return XHTML Element tree representation of this Response. Arguments: - renderer : procedure which produces HTML given an ElementTree + - response_msg: a message displayed at the end of the Response ''' # render ourself as a + our content tree = etree.Element('span') @@ -195,6 +207,11 @@ class LoncapaResponse(object): if item_xhtml is not None: tree.append(item_xhtml) tree.tail = self.xml.tail + + # Add a
        for the message at the end of the response + if response_msg: + tree.append(self._render_response_msg_html(response_msg)) + return tree def evaluate_answers(self, student_answers, old_cmap): @@ -205,7 +222,8 @@ class LoncapaResponse(object): Returns the new CorrectMap, with (correctness,msg,hint,hintmode) for each answer_id. ''' new_cmap = self.get_score(student_answers) - self.get_hints(convert_files_to_filenames(student_answers), new_cmap, old_cmap) + self.get_hints(convert_files_to_filenames( + student_answers), new_cmap, old_cmap) # log.debug('new_cmap = %s' % new_cmap) return new_cmap @@ -225,26 +243,27 @@ class LoncapaResponse(object): # hint specified by function? hintfn = hintgroup.get('hintfn') if hintfn: - ''' - Hint is determined by a function defined in the ''' - snippets = [{'snippet': """ + snippets = [{'snippet': r"""
        Suppose that \(I(t)\) rises from \(0\) to \(I_S\) at a time \(t_0 \neq 0\) @@ -862,7 +920,7 @@ class CustomResponse(LoncapaResponse): correct[0] ='incorrect'
        """}, - {'snippet': """ +
        diff --git a/common/lib/capa/capa/tests/__init__.py b/common/lib/capa/capa/tests/__init__.py index 89cb5a5ee9..72d82c683b 100644 --- a/common/lib/capa/capa/tests/__init__.py +++ b/common/lib/capa/capa/tests/__init__.py @@ -2,7 +2,7 @@ import fs import fs.osfs import os -from mock import Mock +from mock import Mock, MagicMock import xml.sax.saxutils as saxutils @@ -16,6 +16,11 @@ def tst_render_template(template, context): """ return '
        {0}
        '.format(saxutils.escape(repr(context))) +def calledback_url(dispatch = 'score_update'): + return dispatch + +xqueue_interface = MagicMock() +xqueue_interface.send_to_queue.return_value = (0, 'Success!') test_system = Mock( ajax_url='courses/course_id/modx/a_location', @@ -26,7 +31,7 @@ test_system = Mock( user=Mock(), filestore=fs.osfs.OSFS(os.path.join(TEST_DIR, "test_files")), debug=True, - xqueue={'interface': None, 'callback_url': '/', 'default_queuename': 'testqueue', 'waittime': 10}, + xqueue={'interface': xqueue_interface, 'construct_callback': calledback_url, 'default_queuename': 'testqueue', 'waittime': 10}, node_path=os.environ.get("NODE_PATH", "/usr/local/lib/node_modules"), anonymous_student_id='student' ) diff --git a/common/lib/capa/capa/tests/response_xml_factory.py b/common/lib/capa/capa/tests/response_xml_factory.py new file mode 100644 index 0000000000..aa401b70cd --- /dev/null +++ b/common/lib/capa/capa/tests/response_xml_factory.py @@ -0,0 +1,707 @@ +from lxml import etree +from abc import ABCMeta, abstractmethod + + +class ResponseXMLFactory(object): + """ Abstract base class for capa response XML factories. + Subclasses override create_response_element and + create_input_element to produce XML of particular response types""" + + __metaclass__ = ABCMeta + + @abstractmethod + def create_response_element(self, **kwargs): + """ Subclasses override to return an etree element + representing the capa response XML + (e.g. ). + + The tree should NOT contain any input elements + (such as ) as these will be added later.""" + return None + + @abstractmethod + def create_input_element(self, **kwargs): + """ Subclasses override this to return an etree element + representing the capa input XML (such as )""" + return None + + def build_xml(self, **kwargs): + """ Construct an XML string for a capa response + based on **kwargs. + + **kwargs is a dictionary that will be passed + to create_response_element() and create_input_element(). + See the subclasses below for other keyword arguments + you can specify. + + For all response types, **kwargs can contain: + + *question_text*: The text of the question to display, + wrapped in

        tags. + + *explanation_text*: The detailed explanation that will + be shown if the user answers incorrectly. + + *script*: The embedded Python script (a string) + + *num_responses*: The number of responses to create [DEFAULT: 1] + + *num_inputs*: The number of input elements + to create [DEFAULT: 1] + + Returns a string representation of the XML tree. + """ + + # Retrieve keyward arguments + question_text = kwargs.get('question_text', '') + explanation_text = kwargs.get('explanation_text', '') + script = kwargs.get('script', None) + num_responses = kwargs.get('num_responses', 1) + num_inputs = kwargs.get('num_inputs', 1) + + # The root is + root = etree.Element("problem") + + # Add a script if there is one + if script: + script_element = etree.SubElement(root, "script") + script_element.set("type", "loncapa/python") + script_element.text = str(script) + + # The problem has a child

        with question text + question = etree.SubElement(root, "p") + question.text = question_text + + # Add the response(s) + for i in range(0, int(num_responses)): + response_element = self.create_response_element(**kwargs) + root.append(response_element) + + # Add input elements + for j in range(0, int(num_inputs)): + input_element = self.create_input_element(**kwargs) + if not (None == input_element): + response_element.append(input_element) + + # The problem has an explanation of the solution + if explanation_text: + explanation = etree.SubElement(root, "solution") + explanation_div = etree.SubElement(explanation, "div") + explanation_div.set("class", "detailed-solution") + explanation_div.text = explanation_text + + return etree.tostring(root) + + @staticmethod + def textline_input_xml(**kwargs): + """ Create a XML element + + Uses **kwargs: + + *math_display*: If True, then includes a MathJax display of user input + + *size*: An integer representing the width of the text line + """ + math_display = kwargs.get('math_display', False) + size = kwargs.get('size', None) + + input_element = etree.Element('textline') + + if math_display: + input_element.set('math', '1') + + if size: + input_element.set('size', str(size)) + + return input_element + + @staticmethod + def choicegroup_input_xml(**kwargs): + """ Create a XML element + + Uses **kwargs: + + *choice_type*: Can be "checkbox", "radio", or "multiple" + + *choices*: List of True/False values indicating whether + a particular choice is correct or not. + Users must choose *all* correct options in order + to be marked correct. + DEFAULT: [True] + + *choice_names": List of strings identifying the choices. + If specified, you must ensure that + len(choice_names) == len(choices) + """ + # Names of group elements + group_element_names = {'checkbox': 'checkboxgroup', + 'radio': 'radiogroup', + 'multiple': 'choicegroup'} + + # Retrieve **kwargs + choices = kwargs.get('choices', [True]) + choice_type = kwargs.get('choice_type', 'multiple') + choice_names = kwargs.get('choice_names', [None] * len(choices)) + + # Create the , , or element + assert(choice_type in group_element_names) + group_element = etree.Element(group_element_names[choice_type]) + + # Create the elements + for (correct_val, name) in zip(choices, choice_names): + choice_element = etree.SubElement(group_element, "choice") + choice_element.set("correct", "true" if correct_val else "false") + + # Add a name identifying the choice, if one exists + # For simplicity, we use the same string as both the + # name attribute and the text of the element + if name: + choice_element.text = str(name) + choice_element.set("name", str(name)) + + return group_element + + +class NumericalResponseXMLFactory(ResponseXMLFactory): + """ Factory for producing XML trees """ + + def create_response_element(self, **kwargs): + """ Create a XML element. + Uses **kwarg keys: + + *answer*: The correct answer (e.g. "5") + + *tolerance*: The tolerance within which a response + is considered correct. Can be a decimal (e.g. "0.01") + or percentage (e.g. "2%") + """ + + answer = kwargs.get('answer', None) + tolerance = kwargs.get('tolerance', None) + + response_element = etree.Element('numericalresponse') + + if answer: + response_element.set('answer', str(answer)) + + if tolerance: + responseparam_element = etree.SubElement(response_element, 'responseparam') + responseparam_element.set('type', 'tolerance') + responseparam_element.set('default', str(tolerance)) + + return response_element + + def create_input_element(self, **kwargs): + return ResponseXMLFactory.textline_input_xml(**kwargs) + + +class CustomResponseXMLFactory(ResponseXMLFactory): + """ Factory for producing XML trees """ + + def create_response_element(self, **kwargs): + """ Create a XML element. + + Uses **kwargs: + + *cfn*: the Python code to run. Can be inline code, + or the name of a function defined in earlier - - -

        Hints can be provided to students, based on the last response given, as well as the history of responses given. Here is an example of a hint produced by a Formula Response problem.

        - -

        -What is the equation of the line which passess through ($x1,$y1) and -($x2,$y2)?

        - -

        The correct answer is $answer. A common error is to invert the equation for the slope. Enter -$wrongans to see a hint.

        - - - - - - y = - - - - - You have inverted the slope in the question. - - - - - diff --git a/common/lib/capa/capa/tests/test_files/imageresponse.xml b/common/lib/capa/capa/tests/test_files/imageresponse.xml deleted file mode 100644 index 41c9f01218..0000000000 --- a/common/lib/capa/capa/tests/test_files/imageresponse.xml +++ /dev/null @@ -1,40 +0,0 @@ - -

        -Two skiers are on frictionless black diamond ski slopes. -Hello

        - - - -Click on the image where the top skier will stop momentarily if the top skier starts from rest. - -Click on the image where the lower skier will stop momentarily if the lower skier starts from rest. - -Click on either of the two positions as discussed previously. - -Click on either of the two positions as discussed previously. - -Click on either of the two positions as discussed previously. - -

        Use conservation of energy.

        -
        -
        - - - - - - - -Click on either of the two positions as discussed previously. - -Click on either of the two positions as discussed previously. - - -Click on either of the two positions as discussed previously. - -

        Use conservation of energy.

        -
        -
        - - -
        diff --git a/common/lib/capa/capa/tests/test_files/javascriptresponse.xml b/common/lib/capa/capa/tests/test_files/javascriptresponse.xml deleted file mode 100644 index 439866e62c..0000000000 --- a/common/lib/capa/capa/tests/test_files/javascriptresponse.xml +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - - - - - diff --git a/common/lib/capa/capa/tests/test_files/js/compiled/c9a9cd4242d84c924fe5f8324e9ae79d.js b/common/lib/capa/capa/tests/test_files/js/compiled/c9a9cd4242d84c924fe5f8324e9ae79d.js deleted file mode 100644 index 6670c6a09a..0000000000 --- a/common/lib/capa/capa/tests/test_files/js/compiled/c9a9cd4242d84c924fe5f8324e9ae79d.js +++ /dev/null @@ -1,50 +0,0 @@ -// Generated by CoffeeScript 1.3.3 -(function() { - var MinimaxProblemDisplay, root, - __hasProp = {}.hasOwnProperty, - __extends = function(child, parent) { for (var key in parent) { if (__hasProp.call(parent, key)) child[key] = parent[key]; } function ctor() { this.constructor = child; } ctor.prototype = parent.prototype; child.prototype = new ctor(); child.__super__ = parent.prototype; return child; }; - - MinimaxProblemDisplay = (function(_super) { - - __extends(MinimaxProblemDisplay, _super); - - function MinimaxProblemDisplay(state, submission, evaluation, container, submissionField, parameters) { - this.state = state; - this.submission = submission; - this.evaluation = evaluation; - this.container = container; - this.submissionField = submissionField; - this.parameters = parameters != null ? parameters : {}; - MinimaxProblemDisplay.__super__.constructor.call(this, this.state, this.submission, this.evaluation, this.container, this.submissionField, this.parameters); - } - - MinimaxProblemDisplay.prototype.render = function() {}; - - MinimaxProblemDisplay.prototype.createSubmission = function() { - var id, value, _ref, _results; - this.newSubmission = {}; - if (this.submission != null) { - _ref = this.submission; - _results = []; - for (id in _ref) { - value = _ref[id]; - _results.push(this.newSubmission[id] = value); - } - return _results; - } - }; - - MinimaxProblemDisplay.prototype.getCurrentSubmission = function() { - return this.newSubmission; - }; - - return MinimaxProblemDisplay; - - })(XProblemDisplay); - - root = typeof exports !== "undefined" && exports !== null ? exports : this; - - root.TestProblemDisplay = TestProblemDisplay; - -}).call(this); -; diff --git a/common/lib/capa/capa/tests/test_files/js/compiled/javascriptresponse.js b/common/lib/capa/capa/tests/test_files/js/compiled/javascriptresponse.js deleted file mode 100644 index 6670c6a09a..0000000000 --- a/common/lib/capa/capa/tests/test_files/js/compiled/javascriptresponse.js +++ /dev/null @@ -1,50 +0,0 @@ -// Generated by CoffeeScript 1.3.3 -(function() { - var MinimaxProblemDisplay, root, - __hasProp = {}.hasOwnProperty, - __extends = function(child, parent) { for (var key in parent) { if (__hasProp.call(parent, key)) child[key] = parent[key]; } function ctor() { this.constructor = child; } ctor.prototype = parent.prototype; child.prototype = new ctor(); child.__super__ = parent.prototype; return child; }; - - MinimaxProblemDisplay = (function(_super) { - - __extends(MinimaxProblemDisplay, _super); - - function MinimaxProblemDisplay(state, submission, evaluation, container, submissionField, parameters) { - this.state = state; - this.submission = submission; - this.evaluation = evaluation; - this.container = container; - this.submissionField = submissionField; - this.parameters = parameters != null ? parameters : {}; - MinimaxProblemDisplay.__super__.constructor.call(this, this.state, this.submission, this.evaluation, this.container, this.submissionField, this.parameters); - } - - MinimaxProblemDisplay.prototype.render = function() {}; - - MinimaxProblemDisplay.prototype.createSubmission = function() { - var id, value, _ref, _results; - this.newSubmission = {}; - if (this.submission != null) { - _ref = this.submission; - _results = []; - for (id in _ref) { - value = _ref[id]; - _results.push(this.newSubmission[id] = value); - } - return _results; - } - }; - - MinimaxProblemDisplay.prototype.getCurrentSubmission = function() { - return this.newSubmission; - }; - - return MinimaxProblemDisplay; - - })(XProblemDisplay); - - root = typeof exports !== "undefined" && exports !== null ? exports : this; - - root.TestProblemDisplay = TestProblemDisplay; - -}).call(this); -; diff --git a/common/lib/capa/capa/tests/test_files/multi_bare.xml b/common/lib/capa/capa/tests/test_files/multi_bare.xml deleted file mode 100644 index 20bc8f853d..0000000000 --- a/common/lib/capa/capa/tests/test_files/multi_bare.xml +++ /dev/null @@ -1,21 +0,0 @@ - - - - - This is foil One. - - - This is foil Two. - - - This is foil Three. - - - This is foil Four. - - - This is foil Five. - - - - diff --git a/common/lib/capa/capa/tests/test_files/multichoice.xml b/common/lib/capa/capa/tests/test_files/multichoice.xml deleted file mode 100644 index 60bf02ec59..0000000000 --- a/common/lib/capa/capa/tests/test_files/multichoice.xml +++ /dev/null @@ -1,21 +0,0 @@ - - - - - This is foil One. - - - This is foil Two. - - - This is foil Three. - - - This is foil Four. - - - This is foil Five. - - - - diff --git a/common/lib/capa/capa/tests/test_files/optionresponse.xml b/common/lib/capa/capa/tests/test_files/optionresponse.xml deleted file mode 100644 index 99a17e8fac..0000000000 --- a/common/lib/capa/capa/tests/test_files/optionresponse.xml +++ /dev/null @@ -1,63 +0,0 @@ - - -

        -Why do bicycles benefit from having larger wheels when going up a bump as shown in the picture?
        -Assume that for both bicycles:
        -1.) The tires have equal air pressure.
        -2.) The bicycles never leave the contact with the bump.
        -3.) The bicycles have the same mass. The bicycle tires (regardless of size) have the same mass.
        -

        -
        - -
          -
        • - -

          The bicycles with larger wheels have more time to go over the bump. This decreases the magnitude of the force needed to lift the bicycle.

          -
          - - -
        • -
        • - -

          The bicycles with larger wheels always have a smaller vertical displacement regardless of speed.

          -
          - - -
        • -
        • - -

          The bicycles with larger wheels experience a force backward with less magnitude for the same amount of time.

          -
          - - -
        • -
        • - -

          The bicycles with larger wheels experience a force backward with less magnitude for a greater amount of time.

          -
          - - -
        • -
        • - -

          The bicycles with larger wheels have more kinetic energy turned into gravitational potential energy.

          -
          - - -
        • -
        • - -

          The bicycles with larger wheels have more rotational kinetic energy, so the horizontal velocity of the biker changes less.

          -
          - - -
        • -
        - - -
        -
        -
        -
        -
        -
        diff --git a/common/lib/capa/capa/tests/test_files/stringresponse_with_hint.xml b/common/lib/capa/capa/tests/test_files/stringresponse_with_hint.xml deleted file mode 100644 index 86efdf0f18..0000000000 --- a/common/lib/capa/capa/tests/test_files/stringresponse_with_hint.xml +++ /dev/null @@ -1,25 +0,0 @@ - -

        Example: String Response Problem

        -
        -
        - - Which US state has Lansing as its capital? - - - - - - - - - The state capital of Wisconsin is Madison. - - - The state capital of Minnesota is St. Paul. - - - The state you are looking for is also known as the 'Great Lakes State' - - - -
        diff --git a/common/lib/capa/capa/tests/test_files/symbolicresponse.xml b/common/lib/capa/capa/tests/test_files/symbolicresponse.xml deleted file mode 100644 index 4dc2bc9d7b..0000000000 --- a/common/lib/capa/capa/tests/test_files/symbolicresponse.xml +++ /dev/null @@ -1,29 +0,0 @@ - - -

        Example: Symbolic Math Response Problem

        - -

        -A symbolic math response problem presents one or more symbolic math -input fields for input. Correctness of input is evaluated based on -the symbolic properties of the expression entered. The student enters -text, but sees a proper symbolic rendition of the entered formula, in -real time, next to the input box. -

        - -

        This is a correct answer which may be entered below:

        -

        cos(theta)*[[1,0],[0,1]] + i*sin(theta)*[[0,1],[1,0]]

        - - - Compute [mathjax] U = \exp\left( i \theta \left[ \begin{matrix} 0 & 1 \\ 1 & 0 \end{matrix} \right] \right) [/mathjax] - and give the resulting \(2 \times 2\) matrix.
        - Your input should be typed in as a list of lists, eg [[1,2],[3,4]].
        - [mathjax]U=[/mathjax] - - -
        -
        - -
        -
        diff --git a/common/lib/capa/capa/tests/test_files/truefalse.xml b/common/lib/capa/capa/tests/test_files/truefalse.xml deleted file mode 100644 index 60018f7a2d..0000000000 --- a/common/lib/capa/capa/tests/test_files/truefalse.xml +++ /dev/null @@ -1,21 +0,0 @@ - - - - - This is foil One. - - - This is foil Two. - - - This is foil Three. - - - This is foil Four. - - - This is foil Five. - - - - diff --git a/common/lib/capa/capa/tests/test_html_render.py b/common/lib/capa/capa/tests/test_html_render.py new file mode 100644 index 0000000000..e99308587e --- /dev/null +++ b/common/lib/capa/capa/tests/test_html_render.py @@ -0,0 +1,233 @@ +import unittest +from lxml import etree +import os +import textwrap +import json + +import mock + +from capa.capa_problem import LoncapaProblem +from .response_xml_factory import StringResponseXMLFactory, CustomResponseXMLFactory +from . import test_system + +class CapaHtmlRenderTest(unittest.TestCase): + + def test_blank_problem(self): + """ + It's important that blank problems don't break, since that's + what you start with in studio. + """ + xml_str = " " + + # Create the problem + problem = LoncapaProblem(xml_str, '1', system=test_system) + + # Render the HTML + rendered_html = etree.XML(problem.get_html()) + # expect that we made it here without blowing up + + def test_include_html(self): + # Create a test file to include + self._create_test_file('test_include.xml', + 'Test include') + + # Generate some XML with an + xml_str = textwrap.dedent(""" + + + + """) + + # Create the problem + problem = LoncapaProblem(xml_str, '1', system=test_system) + + # Render the HTML + rendered_html = etree.XML(problem.get_html()) + + # Expect that the include file was embedded in the problem + test_element = rendered_html.find("test") + self.assertEqual(test_element.tag, "test") + self.assertEqual(test_element.text, "Test include") + + + + + def test_process_outtext(self): + # Generate some XML with and + xml_str = textwrap.dedent(""" + + Test text + + """) + + # Create the problem + problem = LoncapaProblem(xml_str, '1', system=test_system) + + # Render the HTML + rendered_html = etree.XML(problem.get_html()) + + # Expect that the and + # were converted to tags + span_element = rendered_html.find('span') + self.assertEqual(span_element.text, 'Test text') + + def test_render_script(self): + # Generate some XML with a + + """) + + # Create the problem + problem = LoncapaProblem(xml_str, '1', system=test_system) + + # Render the HTML + rendered_html = etree.XML(problem.get_html()) + + # Expect that the script element has been removed from the rendered HTML + script_element = rendered_html.find('script') + self.assertEqual(None, script_element) + + def test_render_javascript(self): + # Generate some XML with a + + """) + + # Create the problem + problem = LoncapaProblem(xml_str, '1', system=test_system) + + # Render the HTML + rendered_html = etree.XML(problem.get_html()) + + + # expect the javascript is still present in the rendered html + self.assertTrue("" in etree.tostring(rendered_html)) + + + def test_render_response_xml(self): + # Generate some XML for a string response + kwargs = {'question_text': "Test question", + 'explanation_text': "Test explanation", + 'answer': 'Test answer', + 'hints': [('test prompt', 'test_hint', 'test hint text')]} + xml_str = StringResponseXMLFactory().build_xml(**kwargs) + + # Mock out the template renderer + test_system.render_template = mock.Mock() + test_system.render_template.return_value = "
        Input Template Render
        " + + # Create the problem and render the HTML + problem = LoncapaProblem(xml_str, '1', system=test_system) + rendered_html = etree.XML(problem.get_html()) + + # Expect problem has been turned into a
        + self.assertEqual(rendered_html.tag, "div") + + # Expect question text is in a

        child + question_element = rendered_html.find("p") + self.assertEqual(question_element.text, "Test question") + + # Expect that the response has been turned into a + response_element = rendered_html.find("span") + self.assertEqual(response_element.tag, "span") + + # Expect that the response + # that contains a

        for the textline + textline_element = response_element.find("div") + self.assertEqual(textline_element.text, 'Input Template Render') + + # Expect a child
        for the solution + # with the rendered template + solution_element = rendered_html.find("div") + self.assertEqual(solution_element.text, 'Input Template Render') + + # Expect that the template renderer was called with the correct + # arguments, once for the textline input and once for + # the solution + expected_textline_context = {'status': 'unsubmitted', + 'value': '', + 'preprocessor': None, + 'msg': '', + 'inline': False, + 'hidden': False, + 'do_math': False, + 'id': '1_2_1', + 'size': None} + + expected_solution_context = {'id': '1_solution_1'} + + expected_calls = [mock.call('textline.html', expected_textline_context), + mock.call('solutionspan.html', expected_solution_context), + mock.call('textline.html', expected_textline_context), + mock.call('solutionspan.html', expected_solution_context)] + + self.assertEqual(test_system.render_template.call_args_list, + expected_calls) + + + def test_render_response_with_overall_msg(self): + # CustomResponse script that sets an overall_message + script=textwrap.dedent(""" + def check_func(*args): + msg = '

        Test message 1

        Test message 2

        ' + return {'overall_message': msg, + 'input_list': [ {'ok': True, 'msg': '' } ] } + """) + + # Generate some XML for a CustomResponse + kwargs = {'script':script, 'cfn': 'check_func'} + xml_str = CustomResponseXMLFactory().build_xml(**kwargs) + + # Create the problem and render the html + problem = LoncapaProblem(xml_str, '1', system=test_system) + + # Grade the problem + correctmap = problem.grade_answers({'1_2_1': 'test'}) + + # Render the html + rendered_html = etree.XML(problem.get_html()) + + + # Expect that there is a
        within the response
        + # with css class response_message + msg_div_element = rendered_html.find(".//div[@class='response_message']") + self.assertEqual(msg_div_element.tag, "div") + self.assertEqual(msg_div_element.get('class'), "response_message") + + # Expect that the
        contains our message (as part of the XML tree) + msg_p_elements = msg_div_element.findall('p') + self.assertEqual(msg_p_elements[0].tag, "p") + self.assertEqual(msg_p_elements[0].text, "Test message 1") + + self.assertEqual(msg_p_elements[1].tag, "p") + self.assertEqual(msg_p_elements[1].text, "Test message 2") + + + def test_substitute_python_vars(self): + # Generate some XML with Python variables defined in a script + # and used later as attributes + xml_str = textwrap.dedent(""" + + + + + """) + + # Create the problem and render the HTML + problem = LoncapaProblem(xml_str, '1', system=test_system) + rendered_html = etree.XML(problem.get_html()) + + # Expect that the variable $test has been replaced with its value + span_element = rendered_html.find('span') + self.assertEqual(span_element.get('attr'), "TEST") + + def _create_test_file(self, path, content_str): + test_fp = test_system.filestore.open(path, "w") + test_fp.write(content_str) + test_fp.close() + + self.addCleanup(lambda: os.remove(test_fp.name)) diff --git a/common/lib/capa/capa/tests/test_inputtypes.py b/common/lib/capa/capa/tests/test_inputtypes.py index 4a5ea5c429..250cedd549 100644 --- a/common/lib/capa/capa/tests/test_inputtypes.py +++ b/common/lib/capa/capa/tests/test_inputtypes.py @@ -23,6 +23,7 @@ import xml.sax.saxutils as saxutils from . import test_system from capa import inputtypes +from mock import ANY # just a handy shortcut lookup_tag = inputtypes.registry.get_class_for_tag @@ -102,6 +103,8 @@ class ChoiceGroupTest(unittest.TestCase): 'choices': [('foil1', 'This is foil One.'), ('foil2', 'This is foil Two.'), ('foil3', 'This is foil Three.'), ], + 'show_correctness': 'always', + 'submitted_message': 'Answer received.', 'name_array_suffix': expected_suffix, # what is this for?? } @@ -298,6 +301,98 @@ class CodeInputTest(unittest.TestCase): self.assertEqual(context, expected) +class MatlabTest(unittest.TestCase): + ''' + Test Matlab input types + ''' + def setUp(self): + self.rows = '10' + self.cols = '80' + self.tabsize = '4' + self.mode = "" + self.payload = "payload" + self.linenumbers = 'true' + self.xml = """ + + {payload} + + """.format(r = self.rows, + c = self.cols, + tabsize = self.tabsize, + m = self.mode, + payload = self.payload, + ln = self.linenumbers) + elt = etree.fromstring(self.xml) + state = {'value': 'print "good evening"', + 'status': 'incomplete', + 'feedback': {'message': '3'}, } + + self.input_class = lookup_tag('matlabinput') + self.the_input = self.input_class(test_system, elt, state) + + + def test_rendering(self): + context = self.the_input._get_render_context() + + expected = {'id': 'prob_1_2', + 'value': 'print "good evening"', + 'status': 'queued', + 'msg': self.input_class.submitted_msg, + 'mode': self.mode, + 'rows': self.rows, + 'cols': self.cols, + 'queue_msg': '', + 'linenumbers': 'true', + 'hidden': '', + 'tabsize': int(self.tabsize), + 'queue_len': '3', + } + + self.assertEqual(context, expected) + + + def test_rendering_with_state(self): + state = {'value': 'print "good evening"', + 'status': 'incomplete', + 'input_state': {'queue_msg': 'message'}, + 'feedback': {'message': '3'}, } + elt = etree.fromstring(self.xml) + + input_class = lookup_tag('matlabinput') + the_input = self.input_class(test_system, elt, state) + context = the_input._get_render_context() + + expected = {'id': 'prob_1_2', + 'value': 'print "good evening"', + 'status': 'queued', + 'msg': self.input_class.submitted_msg, + 'mode': self.mode, + 'rows': self.rows, + 'cols': self.cols, + 'queue_msg': 'message', + 'linenumbers': 'true', + 'hidden': '', + 'tabsize': int(self.tabsize), + 'queue_len': '3', + } + + self.assertEqual(context, expected) + + def test_plot_data(self): + get = {'submission': 'x = 1234;'} + response = self.the_input.handle_ajax("plot", get) + + test_system.xqueue['interface'].send_to_queue.assert_called_with(header=ANY, body=ANY) + + self.assertTrue(response['success']) + self.assertTrue(self.the_input.input_state['queuekey'] is not None) + self.assertEqual(self.the_input.input_state['queuestate'], 'queued') + + + class SchematicTest(unittest.TestCase): ''' @@ -482,27 +577,43 @@ class ChemicalEquationTest(unittest.TestCase): ''' Check that chemical equation inputs work. ''' - - def test_rendering(self): - size = "42" - xml_str = """""".format(size=size) + def setUp(self): + self.size = "42" + xml_str = """""".format(size=self.size) element = etree.fromstring(xml_str) state = {'value': 'H2OYeah', } - the_input = lookup_tag('chemicalequationinput')(test_system, element, state) + self.the_input = lookup_tag('chemicalequationinput')(test_system, element, state) - context = the_input._get_render_context() + + def test_rendering(self): + ''' Verify that the render context matches the expected render context''' + context = self.the_input._get_render_context() expected = {'id': 'prob_1_2', 'value': 'H2OYeah', 'status': 'unanswered', 'msg': '', - 'size': size, + 'size': self.size, 'previewer': '/static/js/capa/chemical_equation_preview.js', } self.assertEqual(context, expected) + + def test_chemcalc_ajax_sucess(self): + ''' Verify that using the correct dispatch and valid data produces a valid response''' + + data = {'formula': "H"} + response = self.the_input.handle_ajax("preview_chemcalc", data) + + self.assertTrue('preview' in response) + self.assertNotEqual(response['preview'], '') + self.assertEqual(response['error'], "") + + + + class DragAndDropTest(unittest.TestCase): ''' @@ -539,14 +650,14 @@ class DragAndDropTest(unittest.TestCase): "target_outline": "false", "base_image": "/static/images/about_1.png", "draggables": [ -{"can_reuse": "", "label": "Label 1", "id": "1", "icon": ""}, -{"can_reuse": "", "label": "cc", "id": "name_with_icon", "icon": "/static/images/cc.jpg", }, -{"can_reuse": "", "label": "arrow-left", "id": "with_icon", "icon": "/static/images/arrow-left.png", "can_reuse": ""}, -{"can_reuse": "", "label": "Label2", "id": "5", "icon": "", "can_reuse": ""}, -{"can_reuse": "", "label": "Mute", "id": "2", "icon": "/static/images/mute.png", "can_reuse": ""}, -{"can_reuse": "", "label": "spinner", "id": "name_label_icon3", "icon": "/static/images/spinner.gif", "can_reuse": ""}, -{"can_reuse": "", "label": "Star", "id": "name4", "icon": "/static/images/volume.png", "can_reuse": ""}, -{"can_reuse": "", "label": "Label3", "id": "7", "icon": "", "can_reuse": ""}], +{"can_reuse": "", "label": "Label 1", "id": "1", "icon": "", "target_fields": []}, +{"can_reuse": "", "label": "cc", "id": "name_with_icon", "icon": "/static/images/cc.jpg", "target_fields": []}, +{"can_reuse": "", "label": "arrow-left", "id": "with_icon", "icon": "/static/images/arrow-left.png", "can_reuse": "", "target_fields": []}, +{"can_reuse": "", "label": "Label2", "id": "5", "icon": "", "can_reuse": "", "target_fields": []}, +{"can_reuse": "", "label": "Mute", "id": "2", "icon": "/static/images/mute.png", "can_reuse": "", "target_fields": []}, +{"can_reuse": "", "label": "spinner", "id": "name_label_icon3", "icon": "/static/images/spinner.gif", "can_reuse": "", "target_fields": []}, +{"can_reuse": "", "label": "Star", "id": "name4", "icon": "/static/images/volume.png", "can_reuse": "", "target_fields": []}, +{"can_reuse": "", "label": "Label3", "id": "7", "icon": "", "can_reuse": "", "target_fields": []}], "one_per_target": "True", "targets": [ {"y": "90", "x": "210", "id": "t1", "w": "90", "h": "90"}, @@ -570,3 +681,65 @@ class DragAndDropTest(unittest.TestCase): context.pop('drag_and_drop_json') expected.pop('drag_and_drop_json') self.assertEqual(context, expected) + + +class AnnotationInputTest(unittest.TestCase): + ''' + Make sure option inputs work + ''' + def test_rendering(self): + xml_str = ''' + + foo + bar + my comment + type a commentary + select a tag + + + + + + +''' + element = etree.fromstring(xml_str) + + value = {"comment": "blah blah", "options": [1]} + json_value = json.dumps(value) + state = { + 'value': json_value, + 'id': 'annotation_input', + 'status': 'answered' + } + + tag = 'annotationinput' + + the_input = lookup_tag(tag)(test_system, element, state) + + context = the_input._get_render_context() + + expected = { + 'id': 'annotation_input', + 'value': value, + 'status': 'answered', + 'msg': '', + 'title': 'foo', + 'text': 'bar', + 'comment': 'my comment', + 'comment_prompt': 'type a commentary', + 'tag_prompt': 'select a tag', + 'options': [ + {'id': 0, 'description': 'x', 'choice': 'correct'}, + {'id': 1, 'description': 'y', 'choice': 'incorrect'}, + {'id': 2, 'description': 'z', 'choice': 'partially-correct'} + ], + 'value': json_value, + 'options_value': value['options'], + 'has_options_value': len(value['options']) > 0, + 'comment_value': value['comment'], + 'debug': False, + 'return_to_annotation': True + } + + self.maxDiff = None + self.assertDictEqual(context, expected) diff --git a/common/lib/capa/capa/tests/test_responsetypes.py b/common/lib/capa/capa/tests/test_responsetypes.py index 18da338b91..bf64d3cc69 100644 --- a/common/lib/capa/capa/tests/test_responsetypes.py +++ b/common/lib/capa/capa/tests/test_responsetypes.py @@ -8,101 +8,176 @@ import json from nose.plugins.skip import SkipTest import os import unittest +import textwrap from . import test_system import capa.capa_problem as lcp +from capa.responsetypes import LoncapaProblemError, \ + StudentInputError, ResponseError from capa.correctmap import CorrectMap from capa.util import convert_files_to_filenames from capa.xqueue_interface import dateformat -class MultiChoiceTest(unittest.TestCase): - def test_MC_grade(self): - multichoice_file = os.path.dirname(__file__) + "/test_files/multichoice.xml" - test_lcp = lcp.LoncapaProblem(open(multichoice_file).read(), '1', system=test_system) - correct_answers = {'1_2_1': 'choice_foil3'} - self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') - false_answers = {'1_2_1': 'choice_foil2'} - self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect') +class ResponseTest(unittest.TestCase): + """ Base class for tests of capa responses.""" - def test_MC_bare_grades(self): - multichoice_file = os.path.dirname(__file__) + "/test_files/multi_bare.xml" - test_lcp = lcp.LoncapaProblem(open(multichoice_file).read(), '1', system=test_system) - correct_answers = {'1_2_1': 'choice_2'} - self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') - false_answers = {'1_2_1': 'choice_1'} - self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect') + xml_factory_class = None - def test_TF_grade(self): - truefalse_file = os.path.dirname(__file__) + "/test_files/truefalse.xml" - test_lcp = lcp.LoncapaProblem(open(truefalse_file).read(), '1', system=test_system) - correct_answers = {'1_2_1': ['choice_foil2', 'choice_foil1']} - self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') - false_answers = {'1_2_1': ['choice_foil1']} - self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect') - false_answers = {'1_2_1': ['choice_foil1', 'choice_foil3']} - self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect') - false_answers = {'1_2_1': ['choice_foil3']} - self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect') - false_answers = {'1_2_1': ['choice_foil1', 'choice_foil2', 'choice_foil3']} - self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect') + def setUp(self): + if self.xml_factory_class: + self.xml_factory = self.xml_factory_class() + + def build_problem(self, **kwargs): + xml = self.xml_factory.build_xml(**kwargs) + return lcp.LoncapaProblem(xml, '1', system=test_system) + + def assert_grade(self, problem, submission, expected_correctness): + input_dict = {'1_2_1': submission} + correct_map = problem.grade_answers(input_dict) + self.assertEquals(correct_map.get_correctness('1_2_1'), expected_correctness) + + def assert_answer_format(self, problem): + answers = problem.get_question_answers() + self.assertTrue(answers['1_2_1'] is not None) + + def assert_multiple_grade(self, problem, correct_answers, incorrect_answers): + for input_str in correct_answers: + result = problem.grade_answers({'1_2_1': input_str}).get_correctness('1_2_1') + self.assertEqual(result, 'correct', + msg="%s should be marked correct" % str(input_str)) + + for input_str in incorrect_answers: + result = problem.grade_answers({'1_2_1': input_str}).get_correctness('1_2_1') + self.assertEqual(result, 'incorrect', + msg="%s should be marked incorrect" % str(input_str)) -class ImageResponseTest(unittest.TestCase): - def test_ir_grade(self): - imageresponse_file = os.path.dirname(__file__) + "/test_files/imageresponse.xml" - test_lcp = lcp.LoncapaProblem(open(imageresponse_file).read(), '1', system=test_system) - # testing regions only - correct_answers = { - #regions - '1_2_1': '(490,11)-(556,98)', - '1_2_2': '(242,202)-(296,276)', - '1_2_3': '(490,11)-(556,98);(242,202)-(296,276)', - '1_2_4': '(490,11)-(556,98);(242,202)-(296,276)', - '1_2_5': '(490,11)-(556,98);(242,202)-(296,276)', - #testing regions and rectanges - '1_3_1': 'rectangle="(490,11)-(556,98)" \ - regions="[[[10,10], [20,10], [20, 30]], [[100,100], [120,100], [120,150]]]"', - '1_3_2': 'rectangle="(490,11)-(556,98)" \ - regions="[[[10,10], [20,10], [20, 30]], [[100,100], [120,100], [120,150]]]"', - '1_3_3': 'regions="[[[10,10], [20,10], [20, 30]], [[100,100], [120,100], [120,150]]]"', - '1_3_4': 'regions="[[[10,10], [20,10], [20, 30]], [[100,100], [120,100], [120,150]]]"', - '1_3_5': 'regions="[[[10,10], [20,10], [20, 30]]]"', - '1_3_6': 'regions="[[10,10], [30,30], [15, 15]]"', - '1_3_7': 'regions="[[10,10], [30,30], [10, 30], [30, 10]]"', - } - test_answers = { - '1_2_1': '[500,20]', - '1_2_2': '[250,300]', - '1_2_3': '[500,20]', - '1_2_4': '[250,250]', - '1_2_5': '[10,10]', +class MultiChoiceResponseTest(ResponseTest): + from response_xml_factory import MultipleChoiceResponseXMLFactory + xml_factory_class = MultipleChoiceResponseXMLFactory - '1_3_1': '[500,20]', - '1_3_2': '[15,15]', - '1_3_3': '[500,20]', - '1_3_4': '[115,115]', - '1_3_5': '[15,15]', - '1_3_6': '[20,20]', - '1_3_7': '[20,15]', - } + def test_multiple_choice_grade(self): + problem = self.build_problem(choices=[False, True, False]) - # regions - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_1'), 'correct') - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_2'), 'incorrect') - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_3'), 'correct') - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_4'), 'correct') - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_5'), 'incorrect') + # Ensure that we get the expected grades + self.assert_grade(problem, 'choice_0', 'incorrect') + self.assert_grade(problem, 'choice_1', 'correct') + self.assert_grade(problem, 'choice_2', 'incorrect') - # regions and rectangles - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_3_1'), 'correct') - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_3_2'), 'correct') - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_3_3'), 'incorrect') - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_3_4'), 'correct') - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_3_5'), 'correct') - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_3_6'), 'incorrect') - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_3_7'), 'correct') + def test_named_multiple_choice_grade(self): + problem = self.build_problem(choices=[False, True, False], + choice_names=["foil_1", "foil_2", "foil_3"]) + + # Ensure that we get the expected grades + self.assert_grade(problem, 'choice_foil_1', 'incorrect') + self.assert_grade(problem, 'choice_foil_2', 'correct') + self.assert_grade(problem, 'choice_foil_3', 'incorrect') + + +class TrueFalseResponseTest(ResponseTest): + from response_xml_factory import TrueFalseResponseXMLFactory + xml_factory_class = TrueFalseResponseXMLFactory + + def test_true_false_grade(self): + problem = self.build_problem(choices=[False, True, True]) + + # Check the results + # Mark correct if and only if ALL (and only) correct choices selected + self.assert_grade(problem, 'choice_0', 'incorrect') + self.assert_grade(problem, 'choice_1', 'incorrect') + self.assert_grade(problem, 'choice_2', 'incorrect') + self.assert_grade(problem, ['choice_0', 'choice_1', 'choice_2'], 'incorrect') + self.assert_grade(problem, ['choice_0', 'choice_2'], 'incorrect') + self.assert_grade(problem, ['choice_0', 'choice_1'], 'incorrect') + self.assert_grade(problem, ['choice_1', 'choice_2'], 'correct') + + # Invalid choices should be marked incorrect (we have no choice 3) + self.assert_grade(problem, 'choice_3', 'incorrect') + self.assert_grade(problem, 'not_a_choice', 'incorrect') + + def test_named_true_false_grade(self): + problem = self.build_problem(choices=[False, True, True], + choice_names=['foil_1', 'foil_2', 'foil_3']) + + # Check the results + # Mark correct if and only if ALL (and only) correct chocies selected + self.assert_grade(problem, 'choice_foil_1', 'incorrect') + self.assert_grade(problem, 'choice_foil_2', 'incorrect') + self.assert_grade(problem, 'choice_foil_3', 'incorrect') + self.assert_grade(problem, ['choice_foil_1', 'choice_foil_2', 'choice_foil_3'], 'incorrect') + self.assert_grade(problem, ['choice_foil_1', 'choice_foil_3'], 'incorrect') + self.assert_grade(problem, ['choice_foil_1', 'choice_foil_2'], 'incorrect') + self.assert_grade(problem, ['choice_foil_2', 'choice_foil_3'], 'correct') + + # Invalid choices should be marked incorrect + self.assert_grade(problem, 'choice_foil_4', 'incorrect') + self.assert_grade(problem, 'not_a_choice', 'incorrect') + + +class ImageResponseTest(ResponseTest): + from response_xml_factory import ImageResponseXMLFactory + xml_factory_class = ImageResponseXMLFactory + + def test_rectangle_grade(self): + # Define a rectangle with corners (10,10) and (20,20) + problem = self.build_problem(rectangle="(10,10)-(20,20)") + + # Anything inside the rectangle (and along the borders) is correct + # Everything else is incorrect + correct_inputs = ["[12,19]", "[10,10]", "[20,20]", + "[10,15]", "[20,15]", "[15,10]", "[15,20]"] + incorrect_inputs = ["[4,6]", "[25,15]", "[15,40]", "[15,4]"] + self.assert_multiple_grade(problem, correct_inputs, incorrect_inputs) + + def test_multiple_rectangles_grade(self): + # Define two rectangles + rectangle_str = "(10,10)-(20,20);(100,100)-(200,200)" + + # Expect that only points inside the rectangles are marked correct + problem = self.build_problem(rectangle=rectangle_str) + correct_inputs = ["[12,19]", "[120, 130]"] + incorrect_inputs = ["[4,6]", "[25,15]", "[15,40]", "[15,4]", + "[50,55]", "[300, 14]", "[120, 400]"] + self.assert_multiple_grade(problem, correct_inputs, incorrect_inputs) + + def test_region_grade(self): + # Define a triangular region with corners (0,0), (5,10), and (0, 10) + region_str = "[ [1,1], [5,10], [0,10] ]" + + # Expect that only points inside the triangle are marked correct + problem = self.build_problem(regions=region_str) + correct_inputs = ["[2,4]", "[1,3]"] + incorrect_inputs = ["[0,0]", "[3,5]", "[5,15]", "[30, 12]"] + self.assert_multiple_grade(problem, correct_inputs, incorrect_inputs) + + def test_multiple_regions_grade(self): + # Define multiple regions that the user can select + region_str = "[[[10,10], [20,10], [20, 30]], [[100,100], [120,100], [120,150]]]" + + # Expect that only points inside the regions are marked correct + problem = self.build_problem(regions=region_str) + correct_inputs = ["[15,12]", "[110,112]"] + incorrect_inputs = ["[0,0]", "[600,300]"] + self.assert_multiple_grade(problem, correct_inputs, incorrect_inputs) + + def test_region_and_rectangle_grade(self): + rectangle_str = "(100,100)-(200,200)" + region_str = "[[10,10], [20,10], [20, 30]]" + + # Expect that only points inside the rectangle or region are marked correct + problem = self.build_problem(regions=region_str, rectangle=rectangle_str) + correct_inputs = ["[13,12]", "[110,112]"] + incorrect_inputs = ["[0,0]", "[600,300]"] + self.assert_multiple_grade(problem, correct_inputs, incorrect_inputs) + + def test_show_answer(self): + rectangle_str = "(100,100)-(200,200)" + region_str = "[[10,10], [20,10], [20, 30]]" + + problem = self.build_problem(regions=region_str, rectangle=rectangle_str) + self.assert_answer_format(problem) class SymbolicResponseTest(unittest.TestCase): @@ -112,143 +187,246 @@ class SymbolicResponseTest(unittest.TestCase): test_lcp = lcp.LoncapaProblem(open(symbolicresponse_file).read(), '1', system=test_system) correct_answers = {'1_2_1': 'cos(theta)*[[1,0],[0,1]] + i*sin(theta)*[[0,1],[1,0]]', '1_2_1_dynamath': ''' - - - - cos - - ( - θ - ) - - - - - [ - - - - 1 - - - 0 - - - - - 0 - - - 1 - - - - ] - - + - i - - - sin - - ( - θ - ) - - - - - [ - - - - 0 - - - 1 - - - - - 1 - - - 0 - - - - ] - - - -''', + + + + cos + + ( + θ + ) + + + + + [ + + + + 1 + + + 0 + + + + + 0 + + + 1 + + + + ] + + + + i + + + sin + + ( + θ + ) + + + + + [ + + + + 0 + + + 1 + + + + + 1 + + + 0 + + + + ] + + + + ''', } wrong_answers = {'1_2_1': '2', '1_2_1_dynamath': ''' - - 2 - -''', - } + + 2 + + ''', + } self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') self.assertEquals(test_lcp.grade_answers(wrong_answers).get_correctness('1_2_1'), 'incorrect') -class OptionResponseTest(unittest.TestCase): - ''' - Run this with +class OptionResponseTest(ResponseTest): + from response_xml_factory import OptionResponseXMLFactory + xml_factory_class = OptionResponseXMLFactory - python manage.py test courseware.OptionResponseTest - ''' - def test_or_grade(self): - optionresponse_file = os.path.dirname(__file__) + "/test_files/optionresponse.xml" - test_lcp = lcp.LoncapaProblem(open(optionresponse_file).read(), '1', system=test_system) - correct_answers = {'1_2_1': 'True', - '1_2_2': 'False'} - test_answers = {'1_2_1': 'True', - '1_2_2': 'True', - } - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_1'), 'correct') - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_2'), 'incorrect') + def test_grade(self): + problem = self.build_problem(options=["first", "second", "third"], + correct_option="second") + + # Assert that we get the expected grades + self.assert_grade(problem, "first", "incorrect") + self.assert_grade(problem, "second", "correct") + self.assert_grade(problem, "third", "incorrect") + + # Options not in the list should be marked incorrect + self.assert_grade(problem, "invalid_option", "incorrect") -class FormulaResponseWithHintTest(unittest.TestCase): - ''' - Test Formula response problem with a hint - This problem also uses calc. - ''' - def test_or_grade(self): - problem_file = os.path.dirname(__file__) + "/test_files/formularesponse_with_hint.xml" - test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=test_system) - correct_answers = {'1_2_1': '2.5*x-5.0'} - test_answers = {'1_2_1': '0.4*x-5.0'} - self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') - cmap = test_lcp.grade_answers(test_answers) - self.assertEquals(cmap.get_correctness('1_2_1'), 'incorrect') - self.assertTrue('You have inverted' in cmap.get_hint('1_2_1')) +class FormulaResponseTest(ResponseTest): + from response_xml_factory import FormulaResponseXMLFactory + xml_factory_class = FormulaResponseXMLFactory + + def test_grade(self): + # Sample variables x and y in the range [-10, 10] + sample_dict = {'x': (-10, 10), 'y': (-10, 10)} + + # The expected solution is numerically equivalent to x+2y + problem = self.build_problem(sample_dict=sample_dict, + num_samples=10, + tolerance=0.01, + answer="x+2*y") + + # Expect an equivalent formula to be marked correct + # 2x - x + y + y = x + 2y + input_formula = "2*x - x + y + y" + self.assert_grade(problem, input_formula, "correct") + + # Expect an incorrect formula to be marked incorrect + # x + y != x + 2y + input_formula = "x + y" + self.assert_grade(problem, input_formula, "incorrect") + + def test_hint(self): + # Sample variables x and y in the range [-10, 10] + sample_dict = {'x': (-10, 10), 'y': (-10, 10)} + + # Give a hint if the user leaves off the coefficient + # or leaves out x + hints = [('x + 3*y', 'y_coefficient', 'Check the coefficient of y'), + ('2*y', 'missing_x', 'Try including the variable x')] + + # The expected solution is numerically equivalent to x+2y + problem = self.build_problem(sample_dict=sample_dict, + num_samples=10, + tolerance=0.01, + answer="x+2*y", + hints=hints) + + # Expect to receive a hint if we add an extra y + input_dict = {'1_2_1': "x + 2*y + y"} + correct_map = problem.grade_answers(input_dict) + self.assertEquals(correct_map.get_hint('1_2_1'), + 'Check the coefficient of y') + + # Expect to receive a hint if we leave out x + input_dict = {'1_2_1': "2*y"} + correct_map = problem.grade_answers(input_dict) + self.assertEquals(correct_map.get_hint('1_2_1'), + 'Try including the variable x') + + def test_script(self): + # Calculate the answer using a script + script = "calculated_ans = 'x+x'" + + # Sample x in the range [-10,10] + sample_dict = {'x': (-10, 10)} + + # The expected solution is numerically equivalent to 2*x + problem = self.build_problem(sample_dict=sample_dict, + num_samples=10, + tolerance=0.01, + answer="$calculated_ans", + script=script) + + # Expect that the inputs are graded correctly + self.assert_grade(problem, '2*x', 'correct') + self.assert_grade(problem, '3*x', 'incorrect') -class StringResponseWithHintTest(unittest.TestCase): - ''' - Test String response problem with a hint - ''' - def test_or_grade(self): - problem_file = os.path.dirname(__file__) + "/test_files/stringresponse_with_hint.xml" - test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=test_system) - correct_answers = {'1_2_1': 'Michigan'} - test_answers = {'1_2_1': 'Minnesota'} - self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') - cmap = test_lcp.grade_answers(test_answers) - self.assertEquals(cmap.get_correctness('1_2_1'), 'incorrect') - self.assertTrue('St. Paul' in cmap.get_hint('1_2_1')) +class StringResponseTest(ResponseTest): + from response_xml_factory import StringResponseXMLFactory + xml_factory_class = StringResponseXMLFactory + + def test_case_sensitive(self): + problem = self.build_problem(answer="Second", case_sensitive=True) + + # Exact string should be correct + self.assert_grade(problem, "Second", "correct") + + # Other strings and the lowercase version of the string are incorrect + self.assert_grade(problem, "Other String", "incorrect") + self.assert_grade(problem, "second", "incorrect") + + def test_case_insensitive(self): + problem = self.build_problem(answer="Second", case_sensitive=False) + + # Both versions of the string should be allowed, regardless + # of capitalization + self.assert_grade(problem, "Second", "correct") + self.assert_grade(problem, "second", "correct") + + # Other strings are not allowed + self.assert_grade(problem, "Other String", "incorrect") + + def test_hints(self): + hints = [("wisconsin", "wisc", "The state capital of Wisconsin is Madison"), + ("minnesota", "minn", "The state capital of Minnesota is St. Paul")] + + problem = self.build_problem(answer="Michigan", + case_sensitive=False, + hints=hints) + + # We should get a hint for Wisconsin + input_dict = {'1_2_1': 'Wisconsin'} + correct_map = problem.grade_answers(input_dict) + self.assertEquals(correct_map.get_hint('1_2_1'), + "The state capital of Wisconsin is Madison") + + # We should get a hint for Minnesota + input_dict = {'1_2_1': 'Minnesota'} + correct_map = problem.grade_answers(input_dict) + self.assertEquals(correct_map.get_hint('1_2_1'), + "The state capital of Minnesota is St. Paul") + + # We should NOT get a hint for Michigan (the correct answer) + input_dict = {'1_2_1': 'Michigan'} + correct_map = problem.grade_answers(input_dict) + self.assertEquals(correct_map.get_hint('1_2_1'), "") + + # We should NOT get a hint for any other string + input_dict = {'1_2_1': 'California'} + correct_map = problem.grade_answers(input_dict) + self.assertEquals(correct_map.get_hint('1_2_1'), "") -class CodeResponseTest(unittest.TestCase): - ''' - Test CodeResponse - TODO: Add tests for external grader messages - ''' +class CodeResponseTest(ResponseTest): + from response_xml_factory import CodeResponseXMLFactory + xml_factory_class = CodeResponseXMLFactory + + def setUp(self): + super(CodeResponseTest, self).setUp() + + grader_payload = json.dumps({"grader": "ps04/grade_square.py"}) + self.problem = self.build_problem(initial_display="def square(x):", + answer_display="answer", + grader_payload=grader_payload, + num_responses=2) + @staticmethod def make_queuestate(key, time): timestr = datetime.strftime(time, dateformat) @@ -258,171 +436,610 @@ class CodeResponseTest(unittest.TestCase): """ Simple test of whether LoncapaProblem knows when it's been queued """ - problem_file = os.path.join(os.path.dirname(__file__), "test_files/coderesponse.xml") - with open(problem_file) as input_file: - test_lcp = lcp.LoncapaProblem(input_file.read(), '1', system=test_system) - answer_ids = sorted(test_lcp.get_question_answers()) + answer_ids = sorted(self.problem.get_question_answers()) - # CodeResponse requires internal CorrectMap state. Build it now in the unqueued state - cmap = CorrectMap() - for answer_id in answer_ids: - cmap.update(CorrectMap(answer_id=answer_id, queuestate=None)) - test_lcp.correct_map.update(cmap) + # CodeResponse requires internal CorrectMap state. Build it now in the unqueued state + cmap = CorrectMap() + for answer_id in answer_ids: + cmap.update(CorrectMap(answer_id=answer_id, queuestate=None)) + self.problem.correct_map.update(cmap) - self.assertEquals(test_lcp.is_queued(), False) + self.assertEquals(self.problem.is_queued(), False) - # Now we queue the LCP - cmap = CorrectMap() - for i, answer_id in enumerate(answer_ids): - queuestate = CodeResponseTest.make_queuestate(i, datetime.now()) - cmap.update(CorrectMap(answer_id=answer_ids[i], queuestate=queuestate)) - test_lcp.correct_map.update(cmap) - - self.assertEquals(test_lcp.is_queued(), True) + # Now we queue the LCP + cmap = CorrectMap() + for i, answer_id in enumerate(answer_ids): + queuestate = CodeResponseTest.make_queuestate(i, datetime.now()) + cmap.update(CorrectMap(answer_id=answer_ids[i], queuestate=queuestate)) + self.problem.correct_map.update(cmap) + self.assertEquals(self.problem.is_queued(), True) def test_update_score(self): ''' Test whether LoncapaProblem.update_score can deliver queued result to the right subproblem ''' - problem_file = os.path.join(os.path.dirname(__file__), "test_files/coderesponse.xml") - with open(problem_file) as input_file: - test_lcp = lcp.LoncapaProblem(input_file.read(), '1', system=test_system) + answer_ids = sorted(self.problem.get_question_answers()) - answer_ids = sorted(test_lcp.get_question_answers()) + # CodeResponse requires internal CorrectMap state. Build it now in the queued state + old_cmap = CorrectMap() + for i, answer_id in enumerate(answer_ids): + queuekey = 1000 + i + queuestate = CodeResponseTest.make_queuestate(1000 + i, datetime.now()) + old_cmap.update(CorrectMap(answer_id=answer_ids[i], queuestate=queuestate)) - # CodeResponse requires internal CorrectMap state. Build it now in the queued state - old_cmap = CorrectMap() + # Message format common to external graders + grader_msg = 'MESSAGE' # Must be valid XML + correct_score_msg = json.dumps({'correct': True, 'score': 1, 'msg': grader_msg}) + incorrect_score_msg = json.dumps({'correct': False, 'score': 0, 'msg': grader_msg}) + + xserver_msgs = {'correct': correct_score_msg, + 'incorrect': incorrect_score_msg, } + + # Incorrect queuekey, state should not be updated + for correctness in ['correct', 'incorrect']: + self.problem.correct_map = CorrectMap() + self.problem.correct_map.update(old_cmap) # Deep copy + + self.problem.update_score(xserver_msgs[correctness], queuekey=0) + self.assertEquals(self.problem.correct_map.get_dict(), old_cmap.get_dict()) # Deep comparison + + for answer_id in answer_ids: + self.assertTrue(self.problem.correct_map.is_queued(answer_id)) # Should be still queued, since message undelivered + + # Correct queuekey, state should be updated + for correctness in ['correct', 'incorrect']: for i, answer_id in enumerate(answer_ids): - queuekey = 1000 + i - queuestate = CodeResponseTest.make_queuestate(1000 + i, datetime.now()) - old_cmap.update(CorrectMap(answer_id=answer_ids[i], queuestate=queuestate)) + self.problem.correct_map = CorrectMap() + self.problem.correct_map.update(old_cmap) - # Message format common to external graders - grader_msg = 'MESSAGE' # Must be valid XML - correct_score_msg = json.dumps({'correct': True, 'score': 1, 'msg': grader_msg}) - incorrect_score_msg = json.dumps({'correct': False, 'score': 0, 'msg': grader_msg}) + new_cmap = CorrectMap() + new_cmap.update(old_cmap) + npoints = 1 if correctness == 'correct' else 0 + new_cmap.set(answer_id=answer_id, npoints=npoints, correctness=correctness, msg=grader_msg, queuestate=None) - xserver_msgs = {'correct': correct_score_msg, - 'incorrect': incorrect_score_msg, } - - # Incorrect queuekey, state should not be updated - for correctness in ['correct', 'incorrect']: - test_lcp.correct_map = CorrectMap() - test_lcp.correct_map.update(old_cmap) # Deep copy - - test_lcp.update_score(xserver_msgs[correctness], queuekey=0) - self.assertEquals(test_lcp.correct_map.get_dict(), old_cmap.get_dict()) # Deep comparison - - for answer_id in answer_ids: - self.assertTrue(test_lcp.correct_map.is_queued(answer_id)) # Should be still queued, since message undelivered - - # Correct queuekey, state should be updated - for correctness in ['correct', 'incorrect']: - for i, answer_id in enumerate(answer_ids): - test_lcp.correct_map = CorrectMap() - test_lcp.correct_map.update(old_cmap) - - new_cmap = CorrectMap() - new_cmap.update(old_cmap) - npoints = 1 if correctness == 'correct' else 0 - new_cmap.set(answer_id=answer_id, npoints=npoints, correctness=correctness, msg=grader_msg, queuestate=None) - - test_lcp.update_score(xserver_msgs[correctness], queuekey=1000 + i) - self.assertEquals(test_lcp.correct_map.get_dict(), new_cmap.get_dict()) - - for j, test_id in enumerate(answer_ids): - if j == i: - self.assertFalse(test_lcp.correct_map.is_queued(test_id)) # Should be dequeued, message delivered - else: - self.assertTrue(test_lcp.correct_map.is_queued(test_id)) # Should be queued, message undelivered + self.problem.update_score(xserver_msgs[correctness], queuekey=1000 + i) + self.assertEquals(self.problem.correct_map.get_dict(), new_cmap.get_dict()) + for j, test_id in enumerate(answer_ids): + if j == i: + self.assertFalse(self.problem.correct_map.is_queued(test_id)) # Should be dequeued, message delivered + else: + self.assertTrue(self.problem.correct_map.is_queued(test_id)) # Should be queued, message undelivered def test_recentmost_queuetime(self): ''' Test whether the LoncapaProblem knows about the time of queue requests ''' - problem_file = os.path.join(os.path.dirname(__file__), "test_files/coderesponse.xml") - with open(problem_file) as input_file: - test_lcp = lcp.LoncapaProblem(input_file.read(), '1', system=test_system) + answer_ids = sorted(self.problem.get_question_answers()) - answer_ids = sorted(test_lcp.get_question_answers()) + # CodeResponse requires internal CorrectMap state. Build it now in the unqueued state + cmap = CorrectMap() + for answer_id in answer_ids: + cmap.update(CorrectMap(answer_id=answer_id, queuestate=None)) + self.problem.correct_map.update(cmap) - # CodeResponse requires internal CorrectMap state. Build it now in the unqueued state - cmap = CorrectMap() - for answer_id in answer_ids: - cmap.update(CorrectMap(answer_id=answer_id, queuestate=None)) - test_lcp.correct_map.update(cmap) + self.assertEquals(self.problem.get_recentmost_queuetime(), None) - self.assertEquals(test_lcp.get_recentmost_queuetime(), None) + # CodeResponse requires internal CorrectMap state. Build it now in the queued state + cmap = CorrectMap() + for i, answer_id in enumerate(answer_ids): + queuekey = 1000 + i + latest_timestamp = datetime.now() + queuestate = CodeResponseTest.make_queuestate(1000 + i, latest_timestamp) + cmap.update(CorrectMap(answer_id=answer_id, queuestate=queuestate)) + self.problem.correct_map.update(cmap) - # CodeResponse requires internal CorrectMap state. Build it now in the queued state - cmap = CorrectMap() - for i, answer_id in enumerate(answer_ids): - queuekey = 1000 + i - latest_timestamp = datetime.now() - queuestate = CodeResponseTest.make_queuestate(1000 + i, latest_timestamp) - cmap.update(CorrectMap(answer_id=answer_id, queuestate=queuestate)) - test_lcp.correct_map.update(cmap) + # Queue state only tracks up to second + latest_timestamp = datetime.strptime(datetime.strftime(latest_timestamp, dateformat), dateformat) - # Queue state only tracks up to second - latest_timestamp = datetime.strptime(datetime.strftime(latest_timestamp, dateformat), dateformat) + self.assertEquals(self.problem.get_recentmost_queuetime(), latest_timestamp) - self.assertEquals(test_lcp.get_recentmost_queuetime(), latest_timestamp) - - def test_convert_files_to_filenames(self): - ''' - Test whether file objects are converted to filenames without altering other structures - ''' - problem_file = os.path.join(os.path.dirname(__file__), "test_files/coderesponse.xml") - with open(problem_file) as fp: - answers_with_file = {'1_2_1': 'String-based answer', - '1_3_1': ['answer1', 'answer2', 'answer3'], - '1_4_1': [fp, fp]} - answers_converted = convert_files_to_filenames(answers_with_file) - self.assertEquals(answers_converted['1_2_1'], 'String-based answer') - self.assertEquals(answers_converted['1_3_1'], ['answer1', 'answer2', 'answer3']) - self.assertEquals(answers_converted['1_4_1'], [fp.name, fp.name]) + def test_convert_files_to_filenames(self): + ''' + Test whether file objects are converted to filenames without altering other structures + ''' + problem_file = os.path.join(os.path.dirname(__file__), "test_files/filename_convert_test.txt") + with open(problem_file) as fp: + answers_with_file = {'1_2_1': 'String-based answer', + '1_3_1': ['answer1', 'answer2', 'answer3'], + '1_4_1': [fp, fp]} + answers_converted = convert_files_to_filenames(answers_with_file) + self.assertEquals(answers_converted['1_2_1'], 'String-based answer') + self.assertEquals(answers_converted['1_3_1'], ['answer1', 'answer2', 'answer3']) + self.assertEquals(answers_converted['1_4_1'], [fp.name, fp.name]) -class ChoiceResponseTest(unittest.TestCase): +class ChoiceResponseTest(ResponseTest): + from response_xml_factory import ChoiceResponseXMLFactory + xml_factory_class = ChoiceResponseXMLFactory - def test_cr_rb_grade(self): - problem_file = os.path.dirname(__file__) + "/test_files/choiceresponse_radio.xml" - test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=test_system) - correct_answers = {'1_2_1': 'choice_2', - '1_3_1': ['choice_2', 'choice_3']} - test_answers = {'1_2_1': 'choice_2', - '1_3_1': 'choice_2', - } - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_1'), 'correct') - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_3_1'), 'incorrect') + def test_radio_group_grade(self): + problem = self.build_problem(choice_type='radio', + choices=[False, True, False]) - def test_cr_cb_grade(self): - problem_file = os.path.dirname(__file__) + "/test_files/choiceresponse_checkbox.xml" - test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=test_system) - correct_answers = {'1_2_1': 'choice_2', - '1_3_1': ['choice_2', 'choice_3'], - '1_4_1': ['choice_2', 'choice_3']} - test_answers = {'1_2_1': 'choice_2', - '1_3_1': 'choice_2', - '1_4_1': ['choice_2', 'choice_3'], - } - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_1'), 'correct') - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_3_1'), 'incorrect') - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_4_1'), 'correct') + # Check that we get the expected results + self.assert_grade(problem, 'choice_0', 'incorrect') + self.assert_grade(problem, 'choice_1', 'correct') + self.assert_grade(problem, 'choice_2', 'incorrect') + + # No choice 3 exists --> mark incorrect + self.assert_grade(problem, 'choice_3', 'incorrect') + + def test_checkbox_group_grade(self): + problem = self.build_problem(choice_type='checkbox', + choices=[False, True, True]) + + # Check that we get the expected results + # (correct if and only if BOTH correct choices chosen) + self.assert_grade(problem, ['choice_1', 'choice_2'], 'correct') + self.assert_grade(problem, 'choice_1', 'incorrect') + self.assert_grade(problem, 'choice_2', 'incorrect') + self.assert_grade(problem, ['choice_0', 'choice_1'], 'incorrect') + self.assert_grade(problem, ['choice_0', 'choice_2'], 'incorrect') + + # No choice 3 exists --> mark incorrect + self.assert_grade(problem, 'choice_3', 'incorrect') -class JavascriptResponseTest(unittest.TestCase): +class JavascriptResponseTest(ResponseTest): + from response_xml_factory import JavascriptResponseXMLFactory + xml_factory_class = JavascriptResponseXMLFactory - def test_jr_grade(self): - problem_file = os.path.dirname(__file__) + "/test_files/javascriptresponse.xml" + def test_grade(self): + # Compile coffee files into javascript used by the response coffee_file_path = os.path.dirname(__file__) + "/test_files/js/*.coffee" os.system("coffee -c %s" % (coffee_file_path)) - test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=test_system) - correct_answers = {'1_2_1': json.dumps({0: 4})} - incorrect_answers = {'1_2_1': json.dumps({0: 5})} - self.assertEquals(test_lcp.grade_answers(incorrect_answers).get_correctness('1_2_1'), 'incorrect') - self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') + problem = self.build_problem(generator_src="test_problem_generator.js", + grader_src="test_problem_grader.js", + display_class="TestProblemDisplay", + display_src="test_problem_display.js", + param_dict={'value': '4'}) + + # Test that we get graded correctly + self.assert_grade(problem, json.dumps({0: 4}), "correct") + self.assert_grade(problem, json.dumps({0: 5}), "incorrect") + + +class NumericalResponseTest(ResponseTest): + from response_xml_factory import NumericalResponseXMLFactory + xml_factory_class = NumericalResponseXMLFactory + + def test_grade_exact(self): + problem = self.build_problem(question_text="What is 2 + 2?", + explanation="The answer is 4", + answer=4) + correct_responses = ["4", "4.0", "4.00"] + incorrect_responses = ["", "3.9", "4.1", "0"] + self.assert_multiple_grade(problem, correct_responses, incorrect_responses) + + def test_grade_decimal_tolerance(self): + problem = self.build_problem(question_text="What is 2 + 2 approximately?", + explanation="The answer is 4", + answer=4, + tolerance=0.1) + correct_responses = ["4.0", "4.00", "4.09", "3.91"] + incorrect_responses = ["", "4.11", "3.89", "0"] + self.assert_multiple_grade(problem, correct_responses, incorrect_responses) + + def test_grade_percent_tolerance(self): + problem = self.build_problem(question_text="What is 2 + 2 approximately?", + explanation="The answer is 4", + answer=4, + tolerance="10%") + correct_responses = ["4.0", "4.3", "3.7", "4.30", "3.70"] + incorrect_responses = ["", "4.5", "3.5", "0"] + self.assert_multiple_grade(problem, correct_responses, incorrect_responses) + + def test_grade_with_script(self): + script_text = "computed_response = math.sqrt(4)" + problem = self.build_problem(question_text="What is sqrt(4)?", + explanation="The answer is 2", + answer="$computed_response", + script=script_text) + correct_responses = ["2", "2.0"] + incorrect_responses = ["", "2.01", "1.99", "0"] + self.assert_multiple_grade(problem, correct_responses, incorrect_responses) + + def test_grade_with_script_and_tolerance(self): + script_text = "computed_response = math.sqrt(4)" + problem = self.build_problem(question_text="What is sqrt(4)?", + explanation="The answer is 2", + answer="$computed_response", + tolerance="0.1", + script=script_text) + correct_responses = ["2", "2.0", "2.05", "1.95"] + incorrect_responses = ["", "2.11", "1.89", "0"] + self.assert_multiple_grade(problem, correct_responses, incorrect_responses) + + def test_exponential_answer(self): + problem = self.build_problem(question_text="What 5 * 10?", + explanation="The answer is 50", + answer="5e+1") + correct_responses = ["50", "50.0", "5e1", "5e+1", "50e0", "500e-1"] + incorrect_responses = ["", "3.9", "4.1", "0", "5.01e1"] + self.assert_multiple_grade(problem, correct_responses, incorrect_responses) + + +class CustomResponseTest(ResponseTest): + from response_xml_factory import CustomResponseXMLFactory + xml_factory_class = CustomResponseXMLFactory + + def test_inline_code(self): + + # For inline code, we directly modify global context variables + # 'answers' is a list of answers provided to us + # 'correct' is a list we fill in with True/False + # 'expect' is given to us (if provided in the XML) + inline_script = """correct[0] = 'correct' if (answers['1_2_1'] == expect) else 'incorrect'""" + problem = self.build_problem(answer=inline_script, expect="42") + + # Check results + self.assert_grade(problem, '42', 'correct') + self.assert_grade(problem, '0', 'incorrect') + + def test_inline_message(self): + + # Inline code can update the global messages list + # to pass messages to the CorrectMap for a particular input + # The code can also set the global overall_message (str) + # to pass a message that applies to the whole response + inline_script = textwrap.dedent(""" + messages[0] = "Test Message" + overall_message = "Overall message" + """) + problem = self.build_problem(answer=inline_script) + + input_dict = {'1_2_1': '0'} + correctmap = problem.grade_answers(input_dict) + + # Check that the message for the particular input was received + input_msg = correctmap.get_msg('1_2_1') + self.assertEqual(input_msg, "Test Message") + + # Check that the overall message (for the whole response) was received + overall_msg = correctmap.get_overall_message() + self.assertEqual(overall_msg, "Overall message") + + def test_function_code_single_input(self): + + # For function code, we pass in these arguments: + # + # 'expect' is the expect attribute of the + # + # 'answer_given' is the answer the student gave (if there is just one input) + # or an ordered list of answers (if there are multiple inputs) + # + # + # The function should return a dict of the form + # { 'ok': BOOL, 'msg': STRING } + # + script = textwrap.dedent(""" + def check_func(expect, answer_given): + return {'ok': answer_given == expect, 'msg': 'Message text'} + """) + + problem = self.build_problem(script=script, cfn="check_func", expect="42") + + # Correct answer + input_dict = {'1_2_1': '42'} + correct_map = problem.grade_answers(input_dict) + + correctness = correct_map.get_correctness('1_2_1') + msg = correct_map.get_msg('1_2_1') + + self.assertEqual(correctness, 'correct') + self.assertEqual(msg, "Message text") + + # Incorrect answer + input_dict = {'1_2_1': '0'} + correct_map = problem.grade_answers(input_dict) + + correctness = correct_map.get_correctness('1_2_1') + msg = correct_map.get_msg('1_2_1') + + self.assertEqual(correctness, 'incorrect') + self.assertEqual(msg, "Message text") + + def test_function_code_multiple_input_no_msg(self): + + # Check functions also have the option of returning + # a single boolean value + # If true, mark all the inputs correct + # If false, mark all the inputs incorrect + script = textwrap.dedent(""" + def check_func(expect, answer_given): + return (answer_given[0] == expect and + answer_given[1] == expect) + """) + + problem = self.build_problem(script=script, cfn="check_func", + expect="42", num_inputs=2) + + # Correct answer -- expect both inputs marked correct + input_dict = {'1_2_1': '42', '1_2_2': '42'} + correct_map = problem.grade_answers(input_dict) + + correctness = correct_map.get_correctness('1_2_1') + self.assertEqual(correctness, 'correct') + + correctness = correct_map.get_correctness('1_2_2') + self.assertEqual(correctness, 'correct') + + # One answer incorrect -- expect both inputs marked incorrect + input_dict = {'1_2_1': '0', '1_2_2': '42'} + correct_map = problem.grade_answers(input_dict) + + correctness = correct_map.get_correctness('1_2_1') + self.assertEqual(correctness, 'incorrect') + + correctness = correct_map.get_correctness('1_2_2') + self.assertEqual(correctness, 'incorrect') + + def test_function_code_multiple_inputs(self): + + # If the has multiple inputs associated with it, + # the check function can return a dict of the form: + # + # {'overall_message': STRING, + # 'input_list': [{'ok': BOOL, 'msg': STRING}, ...] } + # + # 'overall_message' is displayed at the end of the response + # + # 'input_list' contains dictionaries representing the correctness + # and message for each input. + script = textwrap.dedent(""" + def check_func(expect, answer_given): + check1 = (int(answer_given[0]) == 1) + check2 = (int(answer_given[1]) == 2) + check3 = (int(answer_given[2]) == 3) + return {'overall_message': 'Overall message', + 'input_list': [ + {'ok': check1, 'msg': 'Feedback 1'}, + {'ok': check2, 'msg': 'Feedback 2'}, + {'ok': check3, 'msg': 'Feedback 3'} ] } + """) + + problem = self.build_problem(script=script, + cfn="check_func", num_inputs=3) + + # Grade the inputs (one input incorrect) + input_dict = {'1_2_1': '-999', '1_2_2': '2', '1_2_3': '3'} + correct_map = problem.grade_answers(input_dict) + + # Expect that we receive the overall message (for the whole response) + self.assertEqual(correct_map.get_overall_message(), "Overall message") + + # Expect that the inputs were graded individually + self.assertEqual(correct_map.get_correctness('1_2_1'), 'incorrect') + self.assertEqual(correct_map.get_correctness('1_2_2'), 'correct') + self.assertEqual(correct_map.get_correctness('1_2_3'), 'correct') + + # Expect that we received messages for each individual input + self.assertEqual(correct_map.get_msg('1_2_1'), 'Feedback 1') + self.assertEqual(correct_map.get_msg('1_2_2'), 'Feedback 2') + self.assertEqual(correct_map.get_msg('1_2_3'), 'Feedback 3') + + def test_multiple_inputs_return_one_status(self): + # When given multiple inputs, the 'answer_given' argument + # to the check_func() is a list of inputs + # + # The sample script below marks the problem as correct + # if and only if it receives answer_given=[1,2,3] + # (or string values ['1','2','3']) + # + # Since we return a dict describing the status of one input, + # we expect that the same 'ok' value is applied to each + # of the inputs. + script = textwrap.dedent(""" + def check_func(expect, answer_given): + check1 = (int(answer_given[0]) == 1) + check2 = (int(answer_given[1]) == 2) + check3 = (int(answer_given[2]) == 3) + return {'ok': (check1 and check2 and check3), + 'msg': 'Message text'} + """) + + problem = self.build_problem(script=script, + cfn="check_func", num_inputs=3) + + # Grade the inputs (one input incorrect) + input_dict = {'1_2_1': '-999', '1_2_2': '2', '1_2_3': '3'} + correct_map = problem.grade_answers(input_dict) + + # Everything marked incorrect + self.assertEqual(correct_map.get_correctness('1_2_1'), 'incorrect') + self.assertEqual(correct_map.get_correctness('1_2_2'), 'incorrect') + self.assertEqual(correct_map.get_correctness('1_2_3'), 'incorrect') + + # Grade the inputs (everything correct) + input_dict = {'1_2_1': '1', '1_2_2': '2', '1_2_3': '3'} + correct_map = problem.grade_answers(input_dict) + + # Everything marked incorrect + self.assertEqual(correct_map.get_correctness('1_2_1'), 'correct') + self.assertEqual(correct_map.get_correctness('1_2_2'), 'correct') + self.assertEqual(correct_map.get_correctness('1_2_3'), 'correct') + + # Message is interpreted as an "overall message" + self.assertEqual(correct_map.get_overall_message(), 'Message text') + + def test_script_exception_function(self): + + # Construct a script that will raise an exception + script = textwrap.dedent(""" + def check_func(expect, answer_given): + raise Exception("Test") + """) + + problem = self.build_problem(script=script, cfn="check_func") + + # Expect that an exception gets raised when we check the answer + with self.assertRaises(ResponseError): + problem.grade_answers({'1_2_1': '42'}) + + def test_script_exception_inline(self): + + # Construct a script that will raise an exception + script = 'raise Exception("Test")' + problem = self.build_problem(answer=script) + + # Expect that an exception gets raised when we check the answer + with self.assertRaises(ResponseError): + problem.grade_answers({'1_2_1': '42'}) + + def test_invalid_dict_exception(self): + + # Construct a script that passes back an invalid dict format + script = textwrap.dedent(""" + def check_func(expect, answer_given): + return {'invalid': 'test'} + """) + + problem = self.build_problem(script=script, cfn="check_func") + + # Expect that an exception gets raised when we check the answer + with self.assertRaises(ResponseError): + problem.grade_answers({'1_2_1': '42'}) + + + def test_module_imports_inline(self): + ''' + Check that the correct modules are available to custom + response scripts + ''' + + for module_name in ['random', 'numpy', 'math', 'scipy', + 'calc', 'eia', 'chemcalc', 'chemtools', + 'miller', 'draganddrop']: + + # Create a script that checks that the name is defined + # If the name is not defined, then the script + # will raise an exception + script = textwrap.dedent(''' + correct[0] = 'correct' + assert('%s' in globals())''' % module_name) + + # Create the problem + problem = self.build_problem(answer=script) + + # Expect that we can grade an answer without + # getting an exception + try: + problem.grade_answers({'1_2_1': '42'}) + + except ResponseError: + self.fail("Could not use name '%s' in custom response" + % module_name) + + def test_module_imports_function(self): + ''' + Check that the correct modules are available to custom + response scripts + ''' + + for module_name in ['random', 'numpy', 'math', 'scipy', + 'calc', 'eia', 'chemcalc', 'chemtools', + 'miller', 'draganddrop']: + + # Create a script that checks that the name is defined + # If the name is not defined, then the script + # will raise an exception + script = textwrap.dedent(''' + def check_func(expect, answer_given): + assert('%s' in globals()) + return True''' % module_name) + + # Create the problem + problem = self.build_problem(script=script, cfn="check_func") + + # Expect that we can grade an answer without + # getting an exception + try: + problem.grade_answers({'1_2_1': '42'}) + + except ResponseError: + self.fail("Could not use name '%s' in custom response" + % module_name) + + +class SchematicResponseTest(ResponseTest): + from response_xml_factory import SchematicResponseXMLFactory + xml_factory_class = SchematicResponseXMLFactory + + def test_grade(self): + + # Most of the schematic-specific work is handled elsewhere + # (in client-side JavaScript) + # The is responsible only for executing the + # Python code in with *submission* (list) + # in the global context. + + # To test that the context is set up correctly, + # we create a script that sets *correct* to true + # if and only if we find the *submission* (list) + script = "correct = ['correct' if 'test' in submission[0] else 'incorrect']" + problem = self.build_problem(answer=script) + + # The actual dictionary would contain schematic information + # sent from the JavaScript simulation + submission_dict = {'test': 'test'} + input_dict = {'1_2_1': json.dumps(submission_dict)} + correct_map = problem.grade_answers(input_dict) + + # Expect that the problem is graded as true + # (That is, our script verifies that the context + # is what we expect) + self.assertEqual(correct_map.get_correctness('1_2_1'), 'correct') + + def test_script_exception(self): + + # Construct a script that will raise an exception + script = "raise Exception('test')" + problem = self.build_problem(answer=script) + + # Expect that an exception gets raised when we check the answer + with self.assertRaises(ResponseError): + submission_dict = {'test': 'test'} + input_dict = {'1_2_1': json.dumps(submission_dict)} + problem.grade_answers(input_dict) + + +class AnnotationResponseTest(ResponseTest): + from response_xml_factory import AnnotationResponseXMLFactory + xml_factory_class = AnnotationResponseXMLFactory + + def test_grade(self): + (correct, partially, incorrect) = ('correct', 'partially-correct', 'incorrect') + + answer_id = '1_2_1' + options = (('x', correct), ('y', partially), ('z', incorrect)) + make_answer = lambda option_ids: {answer_id: json.dumps({'options': option_ids})} + + tests = [ + {'correctness': correct, 'points': 2, 'answers': make_answer([0])}, + {'correctness': partially, 'points': 1, 'answers': make_answer([1])}, + {'correctness': incorrect, 'points': 0, 'answers': make_answer([2])}, + {'correctness': incorrect, 'points': 0, 'answers': make_answer([0, 1, 2])}, + {'correctness': incorrect, 'points': 0, 'answers': make_answer([])}, + {'correctness': incorrect, 'points': 0, 'answers': make_answer('')}, + {'correctness': incorrect, 'points': 0, 'answers': make_answer(None)}, + {'correctness': incorrect, 'points': 0, 'answers': {answer_id: 'null'}}, + ] + + for (index, test) in enumerate(tests): + expected_correctness = test['correctness'] + expected_points = test['points'] + answers = test['answers'] + + problem = self.build_problem(options=options) + correct_map = problem.grade_answers(answers) + actual_correctness = correct_map.get_correctness(answer_id) + actual_points = correct_map.get_npoints(answer_id) + + self.assertEqual(expected_correctness, actual_correctness, + msg="%s should be marked %s" % (answer_id, expected_correctness)) + self.assertEqual(expected_points, actual_points, + msg="%s should have %d points" % (answer_id, expected_points)) diff --git a/common/lib/capa/capa/util.py b/common/lib/capa/capa/util.py index a0f25c4947..9f3e8bd3a0 100644 --- a/common/lib/capa/capa/util.py +++ b/common/lib/capa/capa/util.py @@ -1,4 +1,4 @@ -from calc import evaluator, UndefinedVariable +from .calc import evaluator, UndefinedVariable #----------------------------------------------------------------------------- # diff --git a/common/lib/capa/capa/verifiers/draganddrop.py b/common/lib/capa/capa/verifiers/draganddrop.py index eb91208923..cdfa163f33 100644 --- a/common/lib/capa/capa/verifiers/draganddrop.py +++ b/common/lib/capa/capa/verifiers/draganddrop.py @@ -27,6 +27,49 @@ values are (x,y) coordinates of centers of dragged images. import json +def flat_user_answer(user_answer): + """ + Convert nested `user_answer` to flat format. + + {'up': {'first': {'p': 'p_l'}}} + + to + + {'up': 'p_l[p][first]'} + """ + + def parse_user_answer(answer): + key = answer.keys()[0] + value = answer.values()[0] + if isinstance(value, dict): + + # Make complex value: + # Example: + # Create like 'p_l[p][first]' from {'first': {'p': 'p_l'} + complex_value_list = [] + v_value = value + while isinstance(v_value, dict): + v_key = v_value.keys()[0] + v_value = v_value.values()[0] + complex_value_list.append(v_key) + + complex_value = '{0}'.format(v_value) + for i in reversed(complex_value_list): + complex_value = '{0}[{1}]'.format(complex_value, i) + + res = {key: complex_value} + return res + else: + return answer + + result = [] + for answer in user_answer: + parse_answer = parse_user_answer(answer) + result.append(parse_answer) + + return result + + class PositionsCompare(list): """ Class for comparing positions. @@ -111,42 +154,41 @@ class DragAndDrop(object): Returns: bool. ''' for draggable in self.excess_draggables: - if not self.excess_draggables[draggable]: + if self.excess_draggables[draggable]: return False # user answer has more draggables than correct answer # Number of draggables in user_groups may be differ that in # correct_groups, that is incorrect, except special case with 'number' - for groupname, draggable_ids in self.correct_groups.items(): - + for index, draggable_ids in enumerate(self.correct_groups): # 'number' rule special case # for reusable draggables we may get in self.user_groups # {'1': [u'2', u'2', u'2'], '0': [u'1', u'1'], '2': [u'3']} # if '+number' is in rule - do not remove duplicates and strip # '+number' from rule - current_rule = self.correct_positions[groupname].keys()[0] + current_rule = self.correct_positions[index].keys()[0] if 'number' in current_rule: - rule_values = self.correct_positions[groupname][current_rule] + rule_values = self.correct_positions[index][current_rule] # clean rule, do not do clean duplicate items - self.correct_positions[groupname].pop(current_rule, None) + self.correct_positions[index].pop(current_rule, None) parsed_rule = current_rule.replace('+', '').replace('number', '') - self.correct_positions[groupname][parsed_rule] = rule_values + self.correct_positions[index][parsed_rule] = rule_values else: # remove dublicates - self.user_groups[groupname] = list(set(self.user_groups[groupname])) + self.user_groups[index] = list(set(self.user_groups[index])) - if sorted(draggable_ids) != sorted(self.user_groups[groupname]): + if sorted(draggable_ids) != sorted(self.user_groups[index]): return False # Check that in every group, for rule of that group, user positions of # every element are equal with correct positions - for groupname in self.correct_groups: + for index, _ in enumerate(self.correct_groups): rules_executed = 0 for rule in ('exact', 'anyof', 'unordered_equal'): # every group has only one rule - if self.correct_positions[groupname].get(rule, None): + if self.correct_positions[index].get(rule, None): rules_executed += 1 if not self.compare_positions( - self.correct_positions[groupname][rule], - self.user_positions[groupname]['user'], flag=rule): + self.correct_positions[index][rule], + self.user_positions[index]['user'], flag=rule): return False if not rules_executed: # no correct rules for current group # probably xml content mistake - wrong rules names @@ -248,7 +290,7 @@ class DragAndDrop(object): correct_answer = {'name4': 't1', 'name_with_icon': 't1', '5': 't2', - '7':'t2'} + '7': 't2'} It is draggable_name: dragable_position mapping. @@ -284,48 +326,56 @@ class DragAndDrop(object): Args: user_answer: json - correct_answer: dict or list + correct_answer: dict or list """ - self.correct_groups = dict() # correct groups from xml - self.correct_positions = dict() # correct positions for comparing - self.user_groups = dict() # will be populated from user answer - self.user_positions = dict() # will be populated from user answer + self.correct_groups = [] # Correct groups from xml. + self.correct_positions = [] # Correct positions for comparing. + self.user_groups = [] # Will be populated from user answer. + self.user_positions = [] # Will be populated from user answer. - # convert from dict answer format to list format + # Convert from dict answer format to list format. if isinstance(correct_answer, dict): tmp = [] for key, value in correct_answer.items(): - tmp_dict = {'draggables': [], 'targets': [], 'rule': 'exact'} - tmp_dict['draggables'].append(key) - tmp_dict['targets'].append(value) - tmp.append(tmp_dict) + tmp.append({ + 'draggables': [key], + 'targets': [value], + 'rule': 'exact'}) correct_answer = tmp + # Convert string `user_answer` to object. user_answer = json.loads(user_answer) - # check if we have draggables that are not in correct answer: - self.excess_draggables = {} + # This dictionary will hold a key for each draggable the user placed on + # the image. The value is True if that draggable is not mentioned in any + # correct_answer entries. If the draggable is mentioned in at least one + # correct_answer entry, the value is False. + # default to consider every user answer excess until proven otherwise. + self.excess_draggables = dict((users_draggable.keys()[0],True) + for users_draggable in user_answer) - # create identical data structures from user answer and correct answer - for i in xrange(0, len(correct_answer)): - groupname = str(i) - self.correct_groups[groupname] = correct_answer[i]['draggables'] - self.correct_positions[groupname] = {correct_answer[i]['rule']: - correct_answer[i]['targets']} - self.user_groups[groupname] = [] - self.user_positions[groupname] = {'user': []} - for draggable_dict in user_answer['draggables']: - # draggable_dict is 1-to-1 {draggable_name: position} + # Convert nested `user_answer` to flat format. + user_answer = flat_user_answer(user_answer) + + # Create identical data structures from user answer and correct answer. + for answer in correct_answer: + user_groups_data = [] + user_positions_data = [] + for draggable_dict in user_answer: + # Draggable_dict is 1-to-1 {draggable_name: position}. draggable_name = draggable_dict.keys()[0] - if draggable_name in self.correct_groups[groupname]: - self.user_groups[groupname].append(draggable_name) - self.user_positions[groupname]['user'].append( + if draggable_name in answer['draggables']: + user_groups_data.append(draggable_name) + user_positions_data.append( draggable_dict[draggable_name]) - self.excess_draggables[draggable_name] = True - else: - self.excess_draggables[draggable_name] = \ - self.excess_draggables.get(draggable_name, False) + # proved that this is not excess + self.excess_draggables[draggable_name] = False + + self.correct_groups.append(answer['draggables']) + self.correct_positions.append({answer['rule']: answer['targets']}) + self.user_groups.append(user_groups_data) + self.user_positions.append({'user': user_positions_data}) def grade(user_input, correct_answer): diff --git a/common/lib/capa/capa/verifiers/tests_draganddrop.py b/common/lib/capa/capa/verifiers/tests_draganddrop.py index 9b1b15ce0c..75a194cc6d 100644 --- a/common/lib/capa/capa/verifiers/tests_draganddrop.py +++ b/common/lib/capa/capa/verifiers/tests_draganddrop.py @@ -1,7 +1,8 @@ import unittest import draganddrop -from draganddrop import PositionsCompare +from .draganddrop import PositionsCompare +import json class Test_PositionsCompare(unittest.TestCase): @@ -40,78 +41,314 @@ class Test_PositionsCompare(unittest.TestCase): class Test_DragAndDrop_Grade(unittest.TestCase): - def test_targets_true(self): - user_input = '{"draggables": [{"1": "t1"}, \ - {"name_with_icon": "t2"}]}' - correct_answer = {'1': 't1', 'name_with_icon': 't2'} + def test_targets_are_draggable_1(self): + user_input = json.dumps([ + {'p': 'p_l'}, + {'up': {'first': {'p': 'p_l'}}} + ]) + + correct_answer = [ + { + 'draggables': ['p'], + 'targets': [ + 'p_l', 'p_r' + ], + 'rule': 'anyof' + }, + { + 'draggables': ['up'], + 'targets': [ + 'p_l[p][first]' + ], + 'rule': 'anyof' + } + ] self.assertTrue(draganddrop.grade(user_input, correct_answer)) + def test_targets_are_draggable_2(self): + user_input = json.dumps([ + {'p': 'p_l'}, + {'p': 'p_r'}, + {'s': 's_l'}, + {'s': 's_r'}, + {'up': {'1': {'p': 'p_l'}}}, + {'up': {'3': {'p': 'p_l'}}}, + {'up': {'1': {'p': 'p_r'}}}, + {'up': {'3': {'p': 'p_r'}}}, + {'up_and_down': {'1': {'s': 's_l'}}}, + {'up_and_down': {'1': {'s': 's_r'}}} + ]) + + correct_answer = [ + { + 'draggables': ['p'], + 'targets': ['p_l', 'p_r'], + 'rule': 'unordered_equal' + }, + { + 'draggables': ['s'], + 'targets': ['s_l', 's_r'], + 'rule': 'unordered_equal' + }, + { + 'draggables': ['up_and_down'], + 'targets': [ + 's_l[s][1]', 's_r[s][1]' + ], + 'rule': 'unordered_equal' + }, + { + 'draggables': ['up'], + 'targets': [ + 'p_l[p][1]', 'p_l[p][3]', 'p_r[p][1]', 'p_r[p][3]' + ], + 'rule': 'unordered_equal' + } + ] + self.assertTrue(draganddrop.grade(user_input, correct_answer)) + + def test_targets_are_draggable_2_manual_parsing(self): + user_input = json.dumps([ + {'up': 'p_l[p][1]'}, + {'p': 'p_l'}, + {'up': 'p_l[p][3]'}, + {'up': 'p_r[p][1]'}, + {'p': 'p_r'}, + {'up': 'p_r[p][3]'}, + {'up_and_down': 's_l[s][1]'}, + {'s': 's_l'}, + {'up_and_down': 's_r[s][1]'}, + {'s': 's_r'} + ]) + + correct_answer = [ + { + 'draggables': ['p'], + 'targets': ['p_l', 'p_r'], + 'rule': 'unordered_equal' + }, + { + 'draggables': ['s'], + 'targets': ['s_l', 's_r'], + 'rule': 'unordered_equal' + }, + { + 'draggables': ['up_and_down'], + 'targets': [ + 's_l[s][1]', 's_r[s][1]' + ], + 'rule': 'unordered_equal' + }, + { + 'draggables': ['up'], + 'targets': [ + 'p_l[p][1]', 'p_l[p][3]', 'p_r[p][1]', 'p_r[p][3]' + ], + 'rule': 'unordered_equal' + } + ] + self.assertTrue(draganddrop.grade(user_input, correct_answer)) + + def test_targets_are_draggable_3_nested(self): + user_input = json.dumps([ + {'molecule': 'left_side_tagret'}, + {'molecule': 'right_side_tagret'}, + {'p': {'p_target': {'molecule': 'left_side_tagret'}}}, + {'p': {'p_target': {'molecule': 'right_side_tagret'}}}, + {'s': {'s_target': {'molecule': 'left_side_tagret'}}}, + {'s': {'s_target': {'molecule': 'right_side_tagret'}}}, + {'up': {'1': {'p': {'p_target': {'molecule': 'left_side_tagret'}}}}}, + {'up': {'3': {'p': {'p_target': {'molecule': 'left_side_tagret'}}}}}, + {'up': {'1': {'p': {'p_target': {'molecule': 'right_side_tagret'}}}}}, + {'up': {'3': {'p': {'p_target': {'molecule': 'right_side_tagret'}}}}}, + {'up_and_down': {'1': {'s': {'s_target': {'molecule': 'left_side_tagret'}}}}}, + {'up_and_down': {'1': {'s': {'s_target': {'molecule': 'right_side_tagret'}}}}} + ]) + + correct_answer = [ + { + 'draggables': ['molecule'], + 'targets': ['left_side_tagret', 'right_side_tagret'], + 'rule': 'unordered_equal' + }, + { + 'draggables': ['p'], + 'targets': [ + 'left_side_tagret[molecule][p_target]', + 'right_side_tagret[molecule][p_target]' + ], + 'rule': 'unordered_equal' + }, + { + 'draggables': ['s'], + 'targets': [ + 'left_side_tagret[molecule][s_target]', + 'right_side_tagret[molecule][s_target]' + ], + 'rule': 'unordered_equal' + }, + { + 'draggables': ['up_and_down'], + 'targets': [ + 'left_side_tagret[molecule][s_target][s][1]', + 'right_side_tagret[molecule][s_target][s][1]' + ], + 'rule': 'unordered_equal' + }, + { + 'draggables': ['up'], + 'targets': [ + 'left_side_tagret[molecule][p_target][p][1]', + 'left_side_tagret[molecule][p_target][p][3]', + 'right_side_tagret[molecule][p_target][p][1]', + 'right_side_tagret[molecule][p_target][p][3]' + ], + 'rule': 'unordered_equal' + } + ] + self.assertTrue(draganddrop.grade(user_input, correct_answer)) + + def test_targets_are_draggable_4_real_example(self): + user_input = json.dumps([ + {'single_draggable': 's_l'}, + {'single_draggable': 's_r'}, + {'single_draggable': 'p_sigma'}, + {'single_draggable': 'p_sigma*'}, + {'single_draggable': 's_sigma'}, + {'single_draggable': 's_sigma*'}, + {'double_draggable': 'p_pi*'}, + {'double_draggable': 'p_pi'}, + {'triple_draggable': 'p_l'}, + {'triple_draggable': 'p_r'}, + {'up': {'1': {'triple_draggable': 'p_l'}}}, + {'up': {'2': {'triple_draggable': 'p_l'}}}, + {'up': {'2': {'triple_draggable': 'p_r'}}}, + {'up': {'3': {'triple_draggable': 'p_r'}}}, + {'up_and_down': {'1': {'single_draggable': 's_l'}}}, + {'up_and_down': {'1': {'single_draggable': 's_r'}}}, + {'up_and_down': {'1': {'single_draggable': 's_sigma'}}}, + {'up_and_down': {'1': {'single_draggable': 's_sigma*'}}}, + {'up_and_down': {'1': {'double_draggable': 'p_pi'}}}, + {'up_and_down': {'2': {'double_draggable': 'p_pi'}}} + ]) + + # 10 targets: + # s_l, s_r, p_l, p_r, s_sigma, s_sigma*, p_pi, p_sigma, p_pi*, p_sigma* + # + # 3 draggable objects, which have targets (internal target ids - 1, 2, 3): + # single_draggable, double_draggable, triple_draggable + # + # 2 draggable objects: + # up, up_and_down + correct_answer = [ + { + 'draggables': ['triple_draggable'], + 'targets': ['p_l', 'p_r'], + 'rule': 'unordered_equal' + }, + { + 'draggables': ['double_draggable'], + 'targets': ['p_pi', 'p_pi*'], + 'rule': 'unordered_equal' + }, + { + 'draggables': ['single_draggable'], + 'targets': ['s_l', 's_r', 's_sigma', 's_sigma*', 'p_sigma', 'p_sigma*'], + 'rule': 'unordered_equal' + }, + { + 'draggables': ['up'], + 'targets': ['p_l[triple_draggable][1]', 'p_l[triple_draggable][2]', + 'p_r[triple_draggable][2]', 'p_r[triple_draggable][3]'], + 'rule': 'unordered_equal' + }, + { + 'draggables': ['up_and_down'], + 'targets': ['s_l[single_draggable][1]', 's_r[single_draggable][1]', + 's_sigma[single_draggable][1]', 's_sigma*[single_draggable][1]', + 'p_pi[double_draggable][1]', 'p_pi[double_draggable][2]'], + 'rule': 'unordered_equal' + }, + + ] + self.assertTrue(draganddrop.grade(user_input, correct_answer)) + + def test_targets_true(self): + user_input = '[{"1": "t1"}, \ + {"name_with_icon": "t2"}]' + correct_answer = {'1': 't1', 'name_with_icon': 't2'} + self.assertTrue(draganddrop.grade(user_input, correct_answer)) + + def test_expect_no_actions_wrong(self): + user_input = '[{"1": "t1"}, \ + {"name_with_icon": "t2"}]' + correct_answer = [] + self.assertFalse(draganddrop.grade(user_input, correct_answer)) + + def test_expect_no_actions_right(self): + user_input = '[]' + correct_answer = [] + self.assertTrue(draganddrop.grade(user_input, correct_answer)) + + def test_targets_false(self): - user_input = '{"draggables": [{"1": "t1"}, \ - {"name_with_icon": "t2"}]}' - correct_answer = {'1': 't3', 'name_with_icon': 't2'} + user_input = '[{"1": "t1"}, \ + {"name_with_icon": "t2"}]' + correct_answer = {'1': 't3', 'name_with_icon': 't2'} self.assertFalse(draganddrop.grade(user_input, correct_answer)) def test_multiple_images_per_target_true(self): - user_input = '{\ - "draggables": [{"1": "t1"}, {"name_with_icon": "t2"}, \ - {"2": "t1"}]}' - correct_answer = {'1': 't1', 'name_with_icon': 't2', + user_input = '[{"1": "t1"}, {"name_with_icon": "t2"}, \ + {"2": "t1"}]' + correct_answer = {'1': 't1', 'name_with_icon': 't2', '2': 't1'} self.assertTrue(draganddrop.grade(user_input, correct_answer)) def test_multiple_images_per_target_false(self): - user_input = '{\ - "draggables": [{"1": "t1"}, {"name_with_icon": "t2"}, \ - {"2": "t1"}]}' - correct_answer = {'1': 't2', 'name_with_icon': 't2', + user_input = '[{"1": "t1"}, {"name_with_icon": "t2"}, \ + {"2": "t1"}]' + correct_answer = {'1': 't2', 'name_with_icon': 't2', '2': 't1'} self.assertFalse(draganddrop.grade(user_input, correct_answer)) def test_targets_and_positions(self): - user_input = '{"draggables": [{"1": [10,10]}, \ - {"name_with_icon": [[10,10],4]}]}' + user_input = '[{"1": [10,10]}, \ + {"name_with_icon": [[10,10],4]}]' correct_answer = {'1': [10, 10], 'name_with_icon': [[10, 10], 4]} self.assertTrue(draganddrop.grade(user_input, correct_answer)) def test_position_and_targets(self): - user_input = '{"draggables": [{"1": "t1"}, {"name_with_icon": "t2"}]}' + user_input = '[{"1": "t1"}, {"name_with_icon": "t2"}]' correct_answer = {'1': 't1', 'name_with_icon': 't2'} self.assertTrue(draganddrop.grade(user_input, correct_answer)) def test_positions_exact(self): - user_input = '{"draggables": \ - [{"1": [10, 10]}, {"name_with_icon": [20, 20]}]}' + user_input = '[{"1": [10, 10]}, {"name_with_icon": [20, 20]}]' correct_answer = {'1': [10, 10], 'name_with_icon': [20, 20]} self.assertTrue(draganddrop.grade(user_input, correct_answer)) def test_positions_false(self): - user_input = '{"draggables": \ - [{"1": [10, 10]}, {"name_with_icon": [20, 20]}]}' + user_input = '[{"1": [10, 10]}, {"name_with_icon": [20, 20]}]' correct_answer = {'1': [25, 25], 'name_with_icon': [20, 20]} self.assertFalse(draganddrop.grade(user_input, correct_answer)) def test_positions_true_in_radius(self): - user_input = '{"draggables": \ - [{"1": [10, 10]}, {"name_with_icon": [20, 20]}]}' + user_input = '[{"1": [10, 10]}, {"name_with_icon": [20, 20]}]' correct_answer = {'1': [14, 14], 'name_with_icon': [20, 20]} self.assertTrue(draganddrop.grade(user_input, correct_answer)) def test_positions_true_in_manual_radius(self): - user_input = '{"draggables": \ - [{"1": [10, 10]}, {"name_with_icon": [20, 20]}]}' + user_input = '[{"1": [10, 10]}, {"name_with_icon": [20, 20]}]' correct_answer = {'1': [[40, 10], 30], 'name_with_icon': [20, 20]} self.assertTrue(draganddrop.grade(user_input, correct_answer)) def test_positions_false_in_manual_radius(self): - user_input = '{"draggables": \ - [{"1": [10, 10]}, {"name_with_icon": [20, 20]}]}' + user_input = '[{"1": [10, 10]}, {"name_with_icon": [20, 20]}]' correct_answer = {'1': [[40, 10], 29], 'name_with_icon': [20, 20]} self.assertFalse(draganddrop.grade(user_input, correct_answer)) def test_correct_answer_not_has_key_from_user_answer(self): - user_input = '{"draggables": [{"1": "t1"}, \ - {"name_with_icon": "t2"}]}' + user_input = '[{"1": "t1"}, {"name_with_icon": "t2"}]' correct_answer = {'3': 't3', 'name_with_icon': 't2'} self.assertFalse(draganddrop.grade(user_input, correct_answer)) @@ -119,20 +356,20 @@ class Test_DragAndDrop_Grade(unittest.TestCase): """Draggables can be places anywhere on base image. Place grass in the middle of the image and ant in the right upper corner.""" - user_input = '{"draggables": \ - [{"ant":[610.5,57.449951171875]},{"grass":[322.5,199.449951171875]}]}' + user_input = '[{"ant":[610.5,57.449951171875]},\ + {"grass":[322.5,199.449951171875]}]' correct_answer = {'grass': [[300, 200], 200], 'ant': [[500, 0], 200]} self.assertTrue(draganddrop.grade(user_input, correct_answer)) def test_lcao_correct(self): """Describe carbon molecule in LCAO-MO""" - user_input = '{"draggables":[{"1":"s_left"}, \ + user_input = '[{"1":"s_left"}, \ {"5":"s_right"},{"4":"s_sigma"},{"6":"s_sigma_star"},{"7":"p_left_1"}, \ {"8":"p_left_2"},{"10":"p_right_1"},{"9":"p_right_2"}, \ {"2":"p_pi_1"},{"3":"p_pi_2"},{"11":"s_sigma_name"}, \ {"13":"s_sigma_star_name"},{"15":"p_pi_name"},{"16":"p_pi_star_name"}, \ - {"12":"p_sigma_name"},{"14":"p_sigma_star_name"}]}' + {"12":"p_sigma_name"},{"14":"p_sigma_star_name"}]' correct_answer = [{ 'draggables': ['1', '2', '3', '4', '5', '6'], @@ -166,12 +403,12 @@ class Test_DragAndDrop_Grade(unittest.TestCase): def test_lcao_extra_element_incorrect(self): """Describe carbon molecule in LCAO-MO""" - user_input = '{"draggables":[{"1":"s_left"}, \ + user_input = '[{"1":"s_left"}, \ {"5":"s_right"},{"4":"s_sigma"},{"6":"s_sigma_star"},{"7":"p_left_1"}, \ {"8":"p_left_2"},{"17":"p_left_3"},{"10":"p_right_1"},{"9":"p_right_2"}, \ {"2":"p_pi_1"},{"3":"p_pi_2"},{"11":"s_sigma_name"}, \ {"13":"s_sigma_star_name"},{"15":"p_pi_name"},{"16":"p_pi_star_name"}, \ - {"12":"p_sigma_name"},{"14":"p_sigma_star_name"}]}' + {"12":"p_sigma_name"},{"14":"p_sigma_star_name"}]' correct_answer = [{ 'draggables': ['1', '2', '3', '4', '5', '6'], @@ -205,9 +442,9 @@ class Test_DragAndDrop_Grade(unittest.TestCase): def test_reuse_draggable_no_mupliples(self): """Test reusable draggables (no mupltiple draggables per target)""" - user_input = '{"draggables":[{"1":"target1"}, \ + user_input = '[{"1":"target1"}, \ {"2":"target2"},{"1":"target3"},{"2":"target4"},{"2":"target5"}, \ - {"3":"target6"}]}' + {"3":"target6"}]' correct_answer = [ { 'draggables': ['1'], @@ -228,9 +465,9 @@ class Test_DragAndDrop_Grade(unittest.TestCase): def test_reuse_draggable_with_mupliples(self): """Test reusable draggables with mupltiple draggables per target""" - user_input = '{"draggables":[{"1":"target1"}, \ + user_input = '[{"1":"target1"}, \ {"2":"target2"},{"1":"target1"},{"2":"target4"},{"2":"target4"}, \ - {"3":"target6"}]}' + {"3":"target6"}]' correct_answer = [ { 'draggables': ['1'], @@ -251,10 +488,10 @@ class Test_DragAndDrop_Grade(unittest.TestCase): def test_reuse_many_draggable_with_mupliples(self): """Test reusable draggables with mupltiple draggables per target""" - user_input = '{"draggables":[{"1":"target1"}, \ + user_input = '[{"1":"target1"}, \ {"2":"target2"},{"1":"target1"},{"2":"target4"},{"2":"target4"}, \ {"3":"target6"}, {"4": "target3"}, {"5": "target4"}, \ - {"5": "target5"}, {"6": "target2"}]}' + {"5": "target5"}, {"6": "target2"}]' correct_answer = [ { 'draggables': ['1', '4'], @@ -280,12 +517,12 @@ class Test_DragAndDrop_Grade(unittest.TestCase): def test_reuse_many_draggable_with_mupliples_wrong(self): """Test reusable draggables with mupltiple draggables per target""" - user_input = '{"draggables":[{"1":"target1"}, \ + user_input = '[{"1":"target1"}, \ {"2":"target2"},{"1":"target1"}, \ {"2":"target3"}, \ {"2":"target4"}, \ {"3":"target6"}, {"4": "target3"}, {"5": "target4"}, \ - {"5": "target5"}, {"6": "target2"}]}' + {"5": "target5"}, {"6": "target2"}]' correct_answer = [ { 'draggables': ['1', '4'], @@ -311,10 +548,10 @@ class Test_DragAndDrop_Grade(unittest.TestCase): def test_label_10_targets_with_a_b_c_false(self): """Test reusable draggables (no mupltiple draggables per target)""" - user_input = '{"draggables":[{"a":"target1"}, \ + user_input = '[{"a":"target1"}, \ {"b":"target2"},{"c":"target3"},{"a":"target4"},{"b":"target5"}, \ {"c":"target6"}, {"a":"target7"},{"b":"target8"},{"c":"target9"}, \ - {"a":"target1"}]}' + {"a":"target1"}]' correct_answer = [ { 'draggables': ['a'], @@ -335,10 +572,10 @@ class Test_DragAndDrop_Grade(unittest.TestCase): def test_label_10_targets_with_a_b_c_(self): """Test reusable draggables (no mupltiple draggables per target)""" - user_input = '{"draggables":[{"a":"target1"}, \ + user_input = '[{"a":"target1"}, \ {"b":"target2"},{"c":"target3"},{"a":"target4"},{"b":"target5"}, \ {"c":"target6"}, {"a":"target7"},{"b":"target8"},{"c":"target9"}, \ - {"a":"target10"}]}' + {"a":"target10"}]' correct_answer = [ { 'draggables': ['a'], @@ -359,10 +596,10 @@ class Test_DragAndDrop_Grade(unittest.TestCase): def test_label_10_targets_with_a_b_c_multiple(self): """Test reusable draggables (mupltiple draggables per target)""" - user_input = '{"draggables":[{"a":"target1"}, \ + user_input = '[{"a":"target1"}, \ {"b":"target2"},{"c":"target3"},{"b":"target5"}, \ {"c":"target6"}, {"a":"target7"},{"b":"target8"},{"c":"target9"}, \ - {"a":"target1"}]}' + {"a":"target1"}]' correct_answer = [ { 'draggables': ['a', 'a', 'a'], @@ -383,10 +620,10 @@ class Test_DragAndDrop_Grade(unittest.TestCase): def test_label_10_targets_with_a_b_c_multiple_false(self): """Test reusable draggables (mupltiple draggables per target)""" - user_input = '{"draggables":[{"a":"target1"}, \ + user_input = '[{"a":"target1"}, \ {"b":"target2"},{"c":"target3"},{"a":"target4"},{"b":"target5"}, \ {"c":"target6"}, {"a":"target7"},{"b":"target8"},{"c":"target9"}, \ - {"a":"target1"}]}' + {"a":"target1"}]' correct_answer = [ { 'draggables': ['a', 'a', 'a'], @@ -407,10 +644,10 @@ class Test_DragAndDrop_Grade(unittest.TestCase): def test_label_10_targets_with_a_b_c_reused(self): """Test a b c in 10 labels reused""" - user_input = '{"draggables":[{"a":"target1"}, \ + user_input = '[{"a":"target1"}, \ {"b":"target2"},{"c":"target3"},{"b":"target5"}, \ {"c":"target6"}, {"b":"target8"},{"c":"target9"}, \ - {"a":"target10"}]}' + {"a":"target10"}]' correct_answer = [ { 'draggables': ['a', 'a'], @@ -431,10 +668,10 @@ class Test_DragAndDrop_Grade(unittest.TestCase): def test_label_10_targets_with_a_b_c_reused_false(self): """Test a b c in 10 labels reused false""" - user_input = '{"draggables":[{"a":"target1"}, \ + user_input = '[{"a":"target1"}, \ {"b":"target2"},{"c":"target3"},{"b":"target5"}, {"a":"target8"},\ {"c":"target6"}, {"b":"target8"},{"c":"target9"}, \ - {"a":"target10"}]}' + {"a":"target10"}]' correct_answer = [ { 'draggables': ['a', 'a'], @@ -455,9 +692,9 @@ class Test_DragAndDrop_Grade(unittest.TestCase): def test_mixed_reuse_and_not_reuse(self): """Test reusable draggables """ - user_input = '{"draggables":[{"a":"target1"}, \ + user_input = '[{"a":"target1"}, \ {"b":"target2"},{"c":"target3"}, {"a":"target4"},\ - {"a":"target5"}]}' + {"a":"target5"}]' correct_answer = [ { 'draggables': ['a', 'b'], @@ -473,8 +710,8 @@ class Test_DragAndDrop_Grade(unittest.TestCase): def test_mixed_reuse_and_not_reuse_number(self): """Test reusable draggables with number """ - user_input = '{"draggables":[{"a":"target1"}, \ - {"b":"target2"},{"c":"target3"}, {"a":"target4"}]}' + user_input = '[{"a":"target1"}, \ + {"b":"target2"},{"c":"target3"}, {"a":"target4"}]' correct_answer = [ { 'draggables': ['a', 'a', 'b'], @@ -490,8 +727,8 @@ class Test_DragAndDrop_Grade(unittest.TestCase): def test_mixed_reuse_and_not_reuse_number_false(self): """Test reusable draggables with numbers, but wrong""" - user_input = '{"draggables":[{"a":"target1"}, \ - {"b":"target2"},{"c":"target3"}, {"a":"target4"}, {"a":"target10"}]}' + user_input = '[{"a":"target1"}, \ + {"b":"target2"},{"c":"target3"}, {"a":"target4"}, {"a":"target10"}]' correct_answer = [ { 'draggables': ['a', 'a', 'b'], @@ -506,9 +743,9 @@ class Test_DragAndDrop_Grade(unittest.TestCase): self.assertFalse(draganddrop.grade(user_input, correct_answer)) def test_alternative_correct_answer(self): - user_input = '{"draggables":[{"name_with_icon":"t1"},\ + user_input = '[{"name_with_icon":"t1"},\ {"name_with_icon":"t1"},{"name_with_icon":"t1"},{"name4":"t1"}, \ - {"name4":"t1"}]}' + {"name4":"t1"}]' correct_answer = [ {'draggables': ['name4'], 'targets': ['t1', 't1'], 'rule': 'exact'}, {'draggables': ['name_with_icon'], 'targets': ['t1', 't1', 't1'], @@ -521,14 +758,13 @@ class Test_DragAndDrop_Populate(unittest.TestCase): def test_1(self): correct_answer = {'1': [[40, 10], 29], 'name_with_icon': [20, 20]} - user_input = '{"draggables": \ - [{"1": [10, 10]}, {"name_with_icon": [20, 20]}]}' + user_input = '[{"1": [10, 10]}, {"name_with_icon": [20, 20]}]' dnd = draganddrop.DragAndDrop(correct_answer, user_input) - correct_groups = {'1': ['name_with_icon'], '0': ['1']} - correct_positions = {'1': {'exact': [[20, 20]]}, '0': {'exact': [[[40, 10], 29]]}} - user_groups = {'1': [u'name_with_icon'], '0': [u'1']} - user_positions = {'1': {'user': [[20, 20]]}, '0': {'user': [[10, 10]]}} + correct_groups = [['1'], ['name_with_icon']] + correct_positions = [{'exact': [[[40, 10], 29]]}, {'exact': [[20, 20]]}] + user_groups = [['1'], ['name_with_icon']] + user_positions = [{'user': [[10, 10]]}, {'user': [[20, 20]]}] self.assertEqual(correct_groups, dnd.correct_groups) self.assertEqual(correct_positions, dnd.correct_positions) @@ -539,49 +775,49 @@ class Test_DragAndDrop_Populate(unittest.TestCase): class Test_DraAndDrop_Compare_Positions(unittest.TestCase): def test_1(self): - dnd = draganddrop.DragAndDrop({'1': 't1'}, '{"draggables": [{"1": "t1"}]}') + dnd = draganddrop.DragAndDrop({'1': 't1'}, '[{"1": "t1"}]') self.assertTrue(dnd.compare_positions(correct=[[1, 1], [2, 3]], user=[[2, 3], [1, 1]], flag='anyof')) def test_2a(self): - dnd = draganddrop.DragAndDrop({'1': 't1'}, '{"draggables": [{"1": "t1"}]}') + dnd = draganddrop.DragAndDrop({'1': 't1'}, '[{"1": "t1"}]') self.assertTrue(dnd.compare_positions(correct=[[1, 1], [2, 3]], user=[[2, 3], [1, 1]], flag='exact')) def test_2b(self): - dnd = draganddrop.DragAndDrop({'1': 't1'}, '{"draggables": [{"1": "t1"}]}') + dnd = draganddrop.DragAndDrop({'1': 't1'}, '[{"1": "t1"}]') self.assertFalse(dnd.compare_positions(correct=[[1, 1], [2, 3]], user=[[2, 13], [1, 1]], flag='exact')) def test_3(self): - dnd = draganddrop.DragAndDrop({'1': 't1'}, '{"draggables": [{"1": "t1"}]}') + dnd = draganddrop.DragAndDrop({'1': 't1'}, '[{"1": "t1"}]') self.assertFalse(dnd.compare_positions(correct=["a", "b"], user=["a", "b", "c"], flag='anyof')) def test_4(self): - dnd = draganddrop.DragAndDrop({'1': 't1'}, '{"draggables": [{"1": "t1"}]}') + dnd = draganddrop.DragAndDrop({'1': 't1'}, '[{"1": "t1"}]') self.assertTrue(dnd.compare_positions(correct=["a", "b", "c"], user=["a", "b"], flag='anyof')) def test_5(self): - dnd = draganddrop.DragAndDrop({'1': 't1'}, '{"draggables": [{"1": "t1"}]}') + dnd = draganddrop.DragAndDrop({'1': 't1'}, '[{"1": "t1"}]') self.assertFalse(dnd.compare_positions(correct=["a", "b", "c"], user=["a", "c", "b"], flag='exact')) def test_6(self): - dnd = draganddrop.DragAndDrop({'1': 't1'}, '{"draggables": [{"1": "t1"}]}') + dnd = draganddrop.DragAndDrop({'1': 't1'}, '[{"1": "t1"}]') self.assertTrue(dnd.compare_positions(correct=["a", "b", "c"], user=["a", "c", "b"], flag='anyof')) def test_7(self): - dnd = draganddrop.DragAndDrop({'1': 't1'}, '{"draggables": [{"1": "t1"}]}') + dnd = draganddrop.DragAndDrop({'1': 't1'}, '[{"1": "t1"}]') self.assertFalse(dnd.compare_positions(correct=["a", "b", "b"], user=["a", "c", "b"], flag='anyof')) diff --git a/common/lib/capa/capa/xqueue_interface.py b/common/lib/capa/capa/xqueue_interface.py index 8dbe2c84aa..5cf2488af0 100644 --- a/common/lib/capa/capa/xqueue_interface.py +++ b/common/lib/capa/capa/xqueue_interface.py @@ -7,7 +7,7 @@ import logging import requests -log = logging.getLogger('mitx.' + __name__) +log = logging.getLogger(__name__) dateformat = '%Y%m%d%H%M%S' diff --git a/common/lib/capa/setup.py b/common/lib/capa/setup.py index 15b3015930..d9c813f55c 100644 --- a/common/lib/capa/setup.py +++ b/common/lib/capa/setup.py @@ -4,5 +4,5 @@ setup( name="capa", version="0.1", packages=find_packages(exclude=["tests"]), - install_requires=['distribute', 'pyparsing'], + install_requires=['distribute==0.6.30', 'pyparsing==1.5.6'], ) diff --git a/common/lib/tempdir.py b/common/lib/tempdir.py new file mode 100644 index 0000000000..0acd92ba33 --- /dev/null +++ b/common/lib/tempdir.py @@ -0,0 +1,17 @@ +"""Make temporary directories nicely.""" + +import atexit +import os.path +import shutil +import tempfile + +def mkdtemp_clean(suffix="", prefix="tmp", dir=None): + """Just like mkdtemp, but the directory will be deleted when the process ends.""" + the_dir = tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=dir) + atexit.register(cleanup_tempdir, the_dir) + return the_dir + +def cleanup_tempdir(the_dir): + """Called on process exit to remove a temp directory.""" + if os.path.exists(the_dir): + shutil.rmtree(the_dir) diff --git a/common/lib/xmodule/setup.py b/common/lib/xmodule/setup.py index ec369420cd..85d42690b9 100644 --- a/common/lib/xmodule/setup.py +++ b/common/lib/xmodule/setup.py @@ -28,6 +28,7 @@ setup( "image = xmodule.backcompat_module:TranslateCustomTagDescriptor", "error = xmodule.error_module:ErrorDescriptor", "peergrading = xmodule.peer_grading_module:PeerGradingDescriptor", + "poll_question = xmodule.poll_module:PollDescriptor", "problem = xmodule.capa_module:CapaDescriptor", "problemset = xmodule.seq_module:SequenceDescriptor", "randomize = xmodule.randomize_module:RandomizeDescriptor", @@ -45,7 +46,9 @@ setup( "static_tab = xmodule.html_module:StaticTabDescriptor", "custom_tag_template = xmodule.raw_module:RawDescriptor", "about = xmodule.html_module:AboutDescriptor", + "wrapper = xmodule.wrapper_module:WrapperDescriptor", "graphical_slider_tool = xmodule.gst_module:GraphicalSliderToolDescriptor", + "annotatable = xmodule.annotatable_module:AnnotatableDescriptor", "foldit = xmodule.foldit_module:FolditDescriptor", ] } diff --git a/common/lib/xmodule/xmodule/abtest_module.py b/common/lib/xmodule/xmodule/abtest_module.py index 537d864127..0e1c66df8e 100644 --- a/common/lib/xmodule/xmodule/abtest_module.py +++ b/common/lib/xmodule/xmodule/abtest_module.py @@ -1,4 +1,3 @@ -import json import random import logging from lxml import etree @@ -7,6 +6,7 @@ from xmodule.x_module import XModule from xmodule.raw_module import RawDescriptor from xmodule.xml_module import XmlDescriptor from xmodule.exceptions import InvalidDefinitionError +from xblock.core import String, Scope, Object, BlockScope DEFAULT = "_DEFAULT_GROUP" @@ -31,29 +31,42 @@ def group_from_value(groups, v): return g -class ABTestModule(XModule): +class ABTestFields(object): + group_portions = Object(help="What proportions of students should go in each group", default={DEFAULT: 1}, scope=Scope.content) + group_assignments = Object(help="What group this user belongs to", scope=Scope.student_preferences, default={}) + group_content = Object(help="What content to display to each group", scope=Scope.content, default={DEFAULT: []}) + experiment = String(help="Experiment that this A/B test belongs to", scope=Scope.content) + has_children = True + + +class ABTestModule(ABTestFields, XModule): """ Implements an A/B test with an aribtrary number of competing groups """ - def __init__(self, system, location, definition, descriptor, instance_state=None, shared_state=None, **kwargs): - XModule.__init__(self, system, location, definition, descriptor, instance_state, shared_state, **kwargs) - - if shared_state is None: + def __init__(self, *args, **kwargs): + XModule.__init__(self, *args, **kwargs) + if self.group is None: self.group = group_from_value( - self.definition['data']['group_portions'].items(), + self.group_portions.items(), random.uniform(0, 1) ) - else: - shared_state = json.loads(shared_state) - self.group = shared_state['group'] - def get_shared_state(self): - return json.dumps({'group': self.group}) + @property + def group(self): + return self.group_assignments.get(self.experiment) + + @group.setter + def group(self, value): + self.group_assignments[self.experiment] = value + + @group.deleter + def group(self): + del self.group_assignments[self.experiment] def get_child_descriptors(self): - active_locations = set(self.definition['data']['group_content'][self.group]) + active_locations = set(self.group_content[self.group]) return [desc for desc in self.descriptor.get_children() if desc.location.url() in active_locations] def displayable_items(self): @@ -64,43 +77,11 @@ class ABTestModule(XModule): # TODO (cpennington): Use Groups should be a first class object, rather than being # managed by ABTests -class ABTestDescriptor(RawDescriptor, XmlDescriptor): +class ABTestDescriptor(ABTestFields, RawDescriptor, XmlDescriptor): module_class = ABTestModule template_dir_name = "abtest" - def __init__(self, system, definition=None, **kwargs): - """ - definition is a dictionary with the following layout: - {'data': { - 'experiment': 'the name of the experiment', - 'group_portions': { - 'group_a': 0.1, - 'group_b': 0.2 - }, - 'group_contents': { - 'group_a': [ - 'url://for/content/module/1', - 'url://for/content/module/2', - ], - 'group_b': [ - 'url://for/content/module/3', - ], - DEFAULT: [ - 'url://for/default/content/1' - ] - } - }, - 'children': [ - 'url://for/content/module/1', - 'url://for/content/module/2', - 'url://for/content/module/3', - 'url://for/default/content/1', - ]} - """ - kwargs['shared_state_key'] = definition['data']['experiment'] - RawDescriptor.__init__(self, system, definition, **kwargs) - @classmethod def definition_from_xml(cls, xml_object, system): """ @@ -118,19 +99,16 @@ class ABTestDescriptor(RawDescriptor, XmlDescriptor): "ABTests must specify an experiment. Not found in:\n{xml}" .format(xml=etree.tostring(xml_object, pretty_print=True))) - definition = { - 'data': { - 'experiment': experiment, - 'group_portions': {}, - 'group_content': {DEFAULT: []}, - }, - 'children': []} + group_portions = {} + group_content = {} + children = [] + for group in xml_object: if group.tag == 'default': name = DEFAULT else: name = group.get('name') - definition['data']['group_portions'][name] = float(group.get('portion', 0)) + group_portions[name] = float(group.get('portion', 0)) child_content_urls = [] for child in group: @@ -140,29 +118,33 @@ class ABTestDescriptor(RawDescriptor, XmlDescriptor): log.exception("Unable to load child when parsing ABTest. Continuing...") continue - definition['data']['group_content'][name] = child_content_urls - definition['children'].extend(child_content_urls) + group_content[name] = child_content_urls + children.extend(child_content_urls) default_portion = 1 - sum( - portion for (name, portion) in definition['data']['group_portions'].items()) + portion for (name, portion) in group_portions.items() + ) if default_portion < 0: raise InvalidDefinitionError("ABTest portions must add up to less than or equal to 1") - definition['data']['group_portions'][DEFAULT] = default_portion - definition['children'].sort() + group_portions[DEFAULT] = default_portion + children.sort() - return definition + return { + 'group_portions': group_portions, + 'group_content': group_content, + }, children def definition_to_xml(self, resource_fs): xml_object = etree.Element('abtest') - xml_object.set('experiment', self.definition['data']['experiment']) - for name, group in self.definition['data']['group_content'].items(): + xml_object.set('experiment', self.experiment) + for name, group in self.group_content.items(): if name == DEFAULT: group_elem = etree.SubElement(xml_object, 'default') else: group_elem = etree.SubElement(xml_object, 'group', attrib={ - 'portion': str(self.definition['data']['group_portions'][name]), + 'portion': str(self.group_portions[name]), 'name': name, }) @@ -172,6 +154,5 @@ class ABTestDescriptor(RawDescriptor, XmlDescriptor): return xml_object - def has_dynamic_children(self): return True diff --git a/common/lib/xmodule/xmodule/annotatable_module.py b/common/lib/xmodule/xmodule/annotatable_module.py new file mode 100644 index 0000000000..db2aa13cb7 --- /dev/null +++ b/common/lib/xmodule/xmodule/annotatable_module.py @@ -0,0 +1,135 @@ +import logging + +from lxml import etree +from pkg_resources import resource_string, resource_listdir + +from xmodule.x_module import XModule +from xmodule.raw_module import RawDescriptor +from xmodule.contentstore.content import StaticContent +from xblock.core import Scope, String + +log = logging.getLogger(__name__) + + +class AnnotatableFields(object): + data = String(help="XML data for the annotation", scope=Scope.content) + + +class AnnotatableModule(AnnotatableFields, XModule): + js = {'coffee': [resource_string(__name__, 'js/src/javascript_loader.coffee'), + resource_string(__name__, 'js/src/collapsible.coffee'), + resource_string(__name__, 'js/src/html/display.coffee'), + resource_string(__name__, 'js/src/annotatable/display.coffee')], + 'js': [] + } + js_module_name = "Annotatable" + css = {'scss': [resource_string(__name__, 'css/annotatable/display.scss')]} + icon_class = 'annotatable' + + + def __init__(self, *args, **kwargs): + XModule.__init__(self, *args, **kwargs) + + xmltree = etree.fromstring(self.data) + + self.instructions = self._extract_instructions(xmltree) + self.content = etree.tostring(xmltree, encoding='unicode') + self.element_id = self.location.html_id() + self.highlight_colors = ['yellow', 'orange', 'purple', 'blue', 'green'] + + def _get_annotation_class_attr(self, index, el): + """ Returns a dict with the CSS class attribute to set on the annotation + and an XML key to delete from the element. + """ + + attr = {} + cls = ['annotatable-span', 'highlight'] + highlight_key = 'highlight' + color = el.get(highlight_key) + + if color is not None: + if color in self.highlight_colors: + cls.append('highlight-'+color) + attr['_delete'] = highlight_key + attr['value'] = ' '.join(cls) + + return { 'class' : attr } + + def _get_annotation_data_attr(self, index, el): + """ Returns a dict in which the keys are the HTML data attributes + to set on the annotation element. Each data attribute has a + corresponding 'value' and (optional) '_delete' key to specify + an XML attribute to delete. + """ + + data_attrs = {} + attrs_map = { + 'body': 'data-comment-body', + 'title': 'data-comment-title', + 'problem': 'data-problem-id' + } + + for xml_key in attrs_map.keys(): + if xml_key in el.attrib: + value = el.get(xml_key, '') + html_key = attrs_map[xml_key] + data_attrs[html_key] = { 'value': value, '_delete': xml_key } + + return data_attrs + + def _render_annotation(self, index, el): + """ Renders an annotation element for HTML output. """ + attr = {} + attr.update(self._get_annotation_class_attr(index, el)) + attr.update(self._get_annotation_data_attr(index, el)) + + el.tag = 'span' + + for key in attr.keys(): + el.set(key, attr[key]['value']) + if '_delete' in attr[key] and attr[key]['_delete'] is not None: + delete_key = attr[key]['_delete'] + del el.attrib[delete_key] + + + def _render_content(self): + """ Renders annotatable content with annotation spans and returns HTML. """ + xmltree = etree.fromstring(self.content) + xmltree.tag = 'div' + if 'display_name' in xmltree.attrib: + del xmltree.attrib['display_name'] + + index = 0 + for el in xmltree.findall('.//annotation'): + self._render_annotation(index, el) + index += 1 + + return etree.tostring(xmltree, encoding='unicode') + + def _extract_instructions(self, xmltree): + """ Removes from the xmltree and returns them as a string, otherwise None. """ + instructions = xmltree.find('instructions') + if instructions is not None: + instructions.tag = 'div' + xmltree.remove(instructions) + return etree.tostring(instructions, encoding='unicode') + return None + + def get_html(self): + """ Renders parameters to template. """ + context = { + 'display_name': self.display_name_with_default, + 'element_id': self.element_id, + 'instructions_html': self.instructions, + 'content_html': self._render_content() + } + + return self.system.render_template('annotatable.html', context) + + +class AnnotatableDescriptor(AnnotatableFields, RawDescriptor): + module_class = AnnotatableModule + stores_state = True + template_dir_name = "annotatable" + mako_template = "widgets/raw-edit.html" + diff --git a/common/lib/xmodule/xmodule/backcompat_module.py b/common/lib/xmodule/xmodule/backcompat_module.py index 40ffd46d1c..9e7b132e9e 100644 --- a/common/lib/xmodule/xmodule/backcompat_module.py +++ b/common/lib/xmodule/xmodule/backcompat_module.py @@ -1,7 +1,7 @@ """ These modules exist to translate old format XML into newer, semantic forms """ -from x_module import XModuleDescriptor +from .x_module import XModuleDescriptor from lxml import etree from functools import wraps import logging diff --git a/common/lib/xmodule/xmodule/capa_module.py b/common/lib/xmodule/xmodule/capa_module.py index d806ec7913..7ca38ea30a 100644 --- a/common/lib/xmodule/xmodule/capa_module.py +++ b/common/lib/xmodule/xmodule/capa_module.py @@ -6,25 +6,46 @@ import hashlib import json import logging import traceback -import re import sys -from datetime import timedelta from lxml import etree from pkg_resources import resource_string from capa.capa_problem import LoncapaProblem -from capa.responsetypes import StudentInputError +from capa.responsetypes import StudentInputError, \ + ResponseError, LoncapaProblemError from capa.util import convert_files_to_filenames -from progress import Progress +from .progress import Progress from xmodule.x_module import XModule from xmodule.raw_module import RawDescriptor -from xmodule.exceptions import NotFoundError +from xmodule.exceptions import NotFoundError, ProcessingError +from xblock.core import Integer, Scope, BlockScope, ModelType, String, Boolean, Object, Float +from .fields import Timedelta log = logging.getLogger("mitx.courseware") -#----------------------------------------------------------------------------- -TIMEDELTA_REGEX = re.compile(r'^((?P\d+?) day(?:s?))?(\s)?((?P\d+?) hour(?:s?))?(\s)?((?P\d+?) minute(?:s)?)?(\s)?((?P\d+?) second(?:s)?)?$') + +class StringyInteger(Integer): + """ + A model type that converts from strings to integers when reading from json + """ + def from_json(self, value): + try: + return int(value) + except: + return None + + +class StringyFloat(Float): + """ + A model type that converts from string to floats when reading from json + """ + def from_json(self, value): + try: + return float(value) + except: + return None + # Generated this many different variants of problems with rerandomize=per_student NUM_RANDOMIZATION_BINS = 20 @@ -45,41 +66,15 @@ def randomization_bin(seed, problem_id): return int(h.hexdigest()[:7], 16) % NUM_RANDOMIZATION_BINS -def only_one(lst, default="", process=lambda x: x): - """ - If lst is empty, returns default +class Randomization(String): + def from_json(self, value): + if value in ("", "true"): + return "always" + elif value == "false": + return "per_student" + return value - If lst has a single element, applies process to that element and returns it. - - Otherwise, raises an exception. - """ - if len(lst) == 0: - return default - elif len(lst) == 1: - return process(lst[0]) - else: - raise Exception('Malformed XML: expected at most one element in list.') - - -def parse_timedelta(time_str): - """ - time_str: A string with the following components: - day[s] (optional) - hour[s] (optional) - minute[s] (optional) - second[s] (optional) - - Returns a datetime.timedelta parsed from the string - """ - parts = TIMEDELTA_REGEX.match(time_str) - if not parts: - return - parts = parts.groupdict() - time_params = {} - for (name, param) in parts.iteritems(): - if param: - time_params[name] = int(param) - return timedelta(**time_params) + to_json = from_json class ComplexEncoder(json.JSONEncoder): @@ -89,13 +84,33 @@ class ComplexEncoder(json.JSONEncoder): return json.JSONEncoder.default(self, obj) -class CapaModule(XModule): +class CapaFields(object): + attempts = StringyInteger(help="Number of attempts taken by the student on this problem", default=0, scope=Scope.student_state) + max_attempts = StringyInteger(help="Maximum number of attempts that a student is allowed", scope=Scope.settings) + due = String(help="Date that this problem is due by", scope=Scope.settings) + graceperiod = Timedelta(help="Amount of time after the due date that submissions will be accepted", scope=Scope.settings) + showanswer = String(help="When to show the problem answer to the student", scope=Scope.settings, default="closed") + force_save_button = Boolean(help="Whether to force the save button to appear on the page", scope=Scope.settings, default=False) + rerandomize = Randomization(help="When to rerandomize the problem", default="always", scope=Scope.settings) + data = String(help="XML data for the problem", scope=Scope.content) + correct_map = Object(help="Dictionary with the correctness of current student answers", scope=Scope.student_state, default={}) + input_state = Object(help="Dictionary for maintaining the state of inputtypes", scope=Scope.student_state) + student_answers = Object(help="Dictionary with the current student responses", scope=Scope.student_state) + done = Boolean(help="Whether the student has answered the problem", scope=Scope.student_state) + display_name = String(help="Display name for this module", scope=Scope.settings) + seed = StringyInteger(help="Random seed for this student", scope=Scope.student_state) + weight = StringyFloat(help="How much to weight this problem by", scope=Scope.settings) + markdown = String(help="Markdown source of this module", scope=Scope.settings) + + +class CapaModule(CapaFields, XModule): ''' An XModule implementing LonCapa format problems, implemented by way of capa.capa_problem.LoncapaProblem ''' icon_class = 'problem' + js = {'coffee': [resource_string(__name__, 'js/src/capa/display.coffee'), resource_string(__name__, 'js/src/collapsible.coffee'), resource_string(__name__, 'js/src/javascript_loader.coffee'), @@ -107,61 +122,25 @@ class CapaModule(XModule): js_module_name = "Problem" css = {'scss': [resource_string(__name__, 'css/capa/display.scss')]} - def __init__(self, system, location, definition, descriptor, instance_state=None, - shared_state=None, **kwargs): - XModule.__init__(self, system, location, definition, descriptor, instance_state, - shared_state, **kwargs) + def __init__(self, system, location, descriptor, model_data): + XModule.__init__(self, system, location, descriptor, model_data) - self.attempts = 0 - self.max_attempts = None - - dom2 = etree.fromstring(definition['data']) - - display_due_date_string = self.metadata.get('due', None) - if display_due_date_string is not None: - self.display_due_date = dateutil.parser.parse(display_due_date_string) - #log.debug("Parsed " + display_due_date_string + - # " to " + str(self.display_due_date)) + if self.due: + due_date = dateutil.parser.parse(self.due) else: - self.display_due_date = None + due_date = None - grace_period_string = self.metadata.get('graceperiod', None) - if grace_period_string is not None and self.display_due_date: - self.grace_period = parse_timedelta(grace_period_string) - self.close_date = self.display_due_date + self.grace_period - #log.debug("Then parsed " + grace_period_string + - # " to closing date" + str(self.close_date)) + if self.graceperiod is not None and due_date: + self.close_date = due_date + self.graceperiod else: - self.grace_period = None - self.close_date = self.display_due_date + self.close_date = due_date - max_attempts = self.metadata.get('attempts', None) - if max_attempts: - self.max_attempts = int(max_attempts) - else: - self.max_attempts = None - - self.show_answer = self.metadata.get('showanswer', 'closed') - - self.force_save_button = self.metadata.get('force_save_button', 'false') - - if self.show_answer == "": - self.show_answer = "closed" - - if instance_state is not None: - instance_state = json.loads(instance_state) - if instance_state is not None and 'attempts' in instance_state: - self.attempts = instance_state['attempts'] - - self.name = only_one(dom2.xpath('/problem/@name')) - - if self.rerandomize == 'never': - self.seed = 1 - elif self.rerandomize == "per_student" and hasattr(self.system, 'seed'): - # see comment on randomization_bin - self.seed = randomization_bin(system.seed, self.location.url) - else: - self.seed = None + if self.seed is None: + if self.rerandomize == 'never': + self.seed = 1 + elif self.rerandomize == "per_student" and hasattr(self.system, 'seed'): + # see comment on randomization_bin + self.seed = randomization_bin(system.seed, self.location.url) # Need the problem location in openendedresponse to send out. Adding # it to the system here seems like the least clunky way to get it @@ -171,8 +150,17 @@ class CapaModule(XModule): try: # TODO (vshnayder): move as much as possible of this work and error # checking to descriptor load time - self.lcp = LoncapaProblem(self.definition['data'], self.location.html_id(), - instance_state, seed=self.seed, system=self.system) + self.lcp = self.new_lcp(self.get_state_for_lcp()) + + # At this point, we need to persist the randomization seed + # so that when the problem is re-loaded (to check/view/save) + # it stays the same. + # However, we do not want to write to the database + # every time the module is loaded. + # So we set the seed ONLY when there is not one set already + if self.seed is None: + self.seed = self.lcp.seed + except Exception as err: msg = 'cannot create LoncapaProblem {loc}: {err}'.format( loc=self.location.url(), err=err) @@ -189,35 +177,40 @@ class CapaModule(XModule): problem_text = ('' 'Problem %s has an error:%s' % (self.location.url(), msg)) - self.lcp = LoncapaProblem( - problem_text, self.location.html_id(), - instance_state, seed=self.seed, system=self.system) + self.lcp = self.new_lcp(self.get_state_for_lcp(), text=problem_text) else: # add extra info and raise raise Exception(msg), None, sys.exc_info()[2] - @property - def rerandomize(self): - """ - Property accessor that returns self.metadata['rerandomize'] in a - canonical form - """ - rerandomize = self.metadata.get('rerandomize', 'always') - if rerandomize in ("", "always", "true"): - return "always" - elif rerandomize in ("false", "per_student"): - return "per_student" - elif rerandomize == "never": - return "never" - elif rerandomize == "onreset": - return "onreset" - else: - raise Exception("Invalid rerandomize attribute " + rerandomize) + self.set_state_from_lcp() - def get_instance_state(self): - state = self.lcp.get_state() - state['attempts'] = self.attempts - return json.dumps(state) + def new_lcp(self, state, text=None): + if text is None: + text = self.data + + return LoncapaProblem( + problem_text=text, + id=self.location.html_id(), + state=state, + system=self.system, + ) + + def get_state_for_lcp(self): + return { + 'done': self.done, + 'correct_map': self.correct_map, + 'student_answers': self.student_answers, + 'input_state': self.input_state, + 'seed': self.seed, + } + + def set_state_from_lcp(self): + lcp_state = self.lcp.get_state() + self.done = lcp_state['done'] + self.correct_map = lcp_state['correct_map'] + self.input_state = lcp_state['input_state'] + self.student_answers = lcp_state['student_answers'] + self.seed = lcp_state['seed'] def get_score(self): return self.lcp.get_score() @@ -234,7 +227,7 @@ class CapaModule(XModule): if total > 0: try: return Progress(score, total) - except Exception as err: + except Exception: log.exception("Got bad progress") return None return None @@ -247,117 +240,191 @@ class CapaModule(XModule): 'progress': Progress.to_js_status_str(self.get_progress()) }) + def check_button_name(self): + """ + Determine the name for the "check" button. + Usually it is just "Check", but if this is the student's + final attempt, change the name to "Final Check" + """ + if self.max_attempts is not None: + final_check = (self.attempts >= self.max_attempts - 1) + else: + final_check = False + + return "Final Check" if final_check else "Check" + + def should_show_check_button(self): + """ + Return True/False to indicate whether to show the "Check" button. + """ + submitted_without_reset = (self.is_completed() and self.rerandomize == "always") + + # If the problem is closed (past due / too many attempts) + # then we do NOT show the "check" button + # Also, do not show the "check" button if we're waiting + # for the user to reset a randomized problem + if self.closed() or submitted_without_reset: + return False + else: + return True + + def should_show_reset_button(self): + """ + Return True/False to indicate whether to show the "Reset" button. + """ + is_survey_question = (self.max_attempts == 0) + + if self.rerandomize in ["always", "onreset"]: + + # If the problem is closed (and not a survey question with max_attempts==0), + # then do NOT show the reset button. + # If the problem hasn't been submitted yet, then do NOT show + # the reset button. + if (self.closed() and not is_survey_question) or not self.is_completed(): + return False + else: + return True + # Only randomized problems need a "reset" button + else: + return False + + def should_show_save_button(self): + """ + Return True/False to indicate whether to show the "Save" button. + """ + + # If the user has forced the save button to display, + # then show it as long as the problem is not closed + # (past due / too many attempts) + if self.force_save_button == "true": + return not self.closed() + else: + is_survey_question = (self.max_attempts == 0) + needs_reset = self.is_completed() and self.rerandomize == "always" + + # If the student has unlimited attempts, and their answers + # are not randomized, then we do not need a save button + # because they can use the "Check" button without consequences. + # + # The consequences we want to avoid are: + # * Using up an attempt (if max_attempts is set) + # * Changing the current problem, and no longer being + # able to view it (if rerandomize is "always") + # + # In those cases. the if statement below is false, + # and the save button can still be displayed. + # + if self.max_attempts is None and self.rerandomize != "always": + return False + + # If the problem is closed (and not a survey question with max_attempts==0), + # then do NOT show the save button + # If we're waiting for the user to reset a randomized problem + # then do NOT show the save button + elif (self.closed() and not is_survey_question) or needs_reset: + return False + else: + return True + + def handle_problem_html_error(self, err): + """ + Change our problem to a dummy problem containing + a warning message to display to users. + + Returns the HTML to show to users + + *err* is the Exception encountered while rendering the problem HTML. + """ + log.exception(err) + + # TODO (vshnayder): another switch on DEBUG. + if self.system.DEBUG: + msg = ( + '[courseware.capa.capa_module] ' + 'Failed to generate HTML for problem %s' % + (self.location.url())) + msg += '

        Error:

        %s

        ' % str(err).replace('<', '<') + msg += '

        %s

        ' % traceback.format_exc().replace('<', '<') + html = msg + + # We're in non-debug mode, and possibly even in production. We want + # to avoid bricking of problem as much as possible + else: + # We're in non-debug mode, and possibly even in production. We want + # to avoid bricking of problem as much as possible + + # Presumably, student submission has corrupted LoncapaProblem HTML. + # First, pull down all student answers + student_answers = self.lcp.student_answers + answer_ids = student_answers.keys() + + # Some inputtypes, such as dynamath, have additional "hidden" state that + # is not exposed to the student. Keep those hidden + # TODO: Use regex, e.g. 'dynamath' is suffix at end of answer_id + hidden_state_keywords = ['dynamath'] + for answer_id in answer_ids: + for hidden_state_keyword in hidden_state_keywords: + if answer_id.find(hidden_state_keyword) >= 0: + student_answers.pop(answer_id) + + # Next, generate a fresh LoncapaProblem + self.lcp = self.new_lcp(None) + self.set_state_from_lcp() + + # Prepend a scary warning to the student + warning = '
        '\ + '

        Warning: The problem has been reset to its initial state!

        '\ + 'The problem\'s state was corrupted by an invalid submission. ' \ + 'The submission consisted of:'\ + '
          ' + for student_answer in student_answers.values(): + if student_answer != '': + warning += '
        • ' + cgi.escape(student_answer) + '
        • ' + warning += '
        '\ + 'If this error persists, please contact the course staff.'\ + '
        ' + + html = warning + try: + html += self.lcp.get_html() + except Exception: # Couldn't do it. Give up + log.exception("Unable to generate html from LoncapaProblem") + raise + + return html + + def get_problem_html(self, encapsulate=True): '''Return html for the problem. Adds check, reset, save buttons as necessary based on the problem config and state.''' try: html = self.lcp.get_html() + + # If we cannot construct the problem HTML, + # then generate an error message instead. except Exception, err: - log.exception(err) + html = self.handle_problem_html_error(err) - # TODO (vshnayder): another switch on DEBUG. - if self.system.DEBUG: - msg = ( - '[courseware.capa.capa_module] ' - 'Failed to generate HTML for problem %s' % - (self.location.url())) - msg += '

        Error:

        %s

        ' % str(err).replace('<', '<') - msg += '

        %s

        ' % traceback.format_exc().replace('<', '<') - html = msg - else: - # We're in non-debug mode, and possibly even in production. We want - # to avoid bricking of problem as much as possible - # Presumably, student submission has corrupted LoncapaProblem HTML. - # First, pull down all student answers - student_answers = self.lcp.student_answers - answer_ids = student_answers.keys() - - # Some inputtypes, such as dynamath, have additional "hidden" state that - # is not exposed to the student. Keep those hidden - # TODO: Use regex, e.g. 'dynamath' is suffix at end of answer_id - hidden_state_keywords = ['dynamath'] - for answer_id in answer_ids: - for hidden_state_keyword in hidden_state_keywords: - if answer_id.find(hidden_state_keyword) >= 0: - student_answers.pop(answer_id) - - # Next, generate a fresh LoncapaProblem - self.lcp = LoncapaProblem(self.definition['data'], self.location.html_id(), - state=None, # Tabula rasa - seed=self.seed, system=self.system) - - # Prepend a scary warning to the student - warning = '
        '\ - '

        Warning: The problem has been reset to its initial state!

        '\ - 'The problem\'s state was corrupted by an invalid submission. ' \ - 'The submission consisted of:'\ - '
          ' - for student_answer in student_answers.values(): - if student_answer != '': - warning += '
        • ' + cgi.escape(student_answer) + '
        • ' - warning += '
        '\ - 'If this error persists, please contact the course staff.'\ - '
        ' - - html = warning - try: - html += self.lcp.get_html() - except Exception, err: # Couldn't do it. Give up - log.exception(err) - raise - - content = {'name': self.display_name, - 'html': html, - 'weight': self.descriptor.weight, - } - - # We using strings as truthy values, because the terminology of the - # check button is context-specific. - - # Put a "Check" button if unlimited attempts or still some left - if self.max_attempts is None or self.attempts < self.max_attempts - 1: - check_button = "Check" + # The convention is to pass the name of the check button + # if we want to show a check button, and False otherwise + # This works because non-empty strings evaluate to True + if self.should_show_check_button(): + check_button = self.check_button_name() else: - # Will be final check so let user know that - check_button = "Final Check" - - reset_button = True - save_button = True - - # If we're after deadline, or user has exhausted attempts, - # question is read-only. - if self.closed(): check_button = False - reset_button = False - save_button = False - # User submitted a problem, and hasn't reset. We don't want - # more submissions. - if self.lcp.done and self.rerandomize == "always": - check_button = False - save_button = False - - # Only show the reset button if pressing it will show different values - if self.rerandomize not in ["always", "onreset"]: - reset_button = False - - # User hasn't submitted an answer yet -- we don't want resets - if not self.lcp.done: - reset_button = False - - # We may not need a "save" button if infinite number of attempts and - # non-randomized. The problem author can force it. It's a bit weird for - # randomization to control this; should perhaps be cleaned up. - if (self.force_save_button == "false") and (self.max_attempts is None and self.rerandomize != "always"): - save_button = False + content = {'name': self.display_name_with_default, + 'html': html, + 'weight': self.weight, + } context = {'problem': content, 'id': self.id, 'check_button': check_button, - 'reset_button': reset_button, - 'save_button': save_button, + 'reset_button': self.should_show_reset_button(), + 'save_button': self.should_show_save_button(), 'answer_available': self.answer_available(), 'ajax_url': self.system.ajax_url, 'attempts_used': self.attempts, @@ -390,13 +457,22 @@ class CapaModule(XModule): 'problem_save': self.save_problem, 'problem_show': self.get_answer, 'score_update': self.update_score, + 'input_ajax': self.handle_input_ajax, + 'ungraded_response': self.handle_ungraded_response } if dispatch not in handlers: return 'Error' before = self.get_progress() - d = handlers[dispatch](get) + + try: + d = handlers[dispatch](get) + + except Exception as err: + _, _, traceback_obj = sys.exc_info() + raise ProcessingError, err.message, traceback_obj + after = self.get_progress() d.update({ 'progress_changed': after != before, @@ -413,7 +489,7 @@ class CapaModule(XModule): def closed(self): ''' Is the student still allowed to submit answers? ''' - if self.attempts == self.max_attempts: + if self.max_attempts is not None and self.attempts >= self.max_attempts: return True if self.is_past_due(): return True @@ -429,29 +505,37 @@ class CapaModule(XModule): # used by conditional module return self.attempts > 0 + def is_correct(self): + """True if full points""" + d = self.get_score() + return d['score'] == d['total'] + def answer_available(self): ''' Is the user allowed to see an answer? ''' - if self.show_answer == '': + if self.showanswer == '': return False - elif self.show_answer == "never": + elif self.showanswer == "never": return False elif self.system.user_is_staff: # This is after the 'never' check because admins can see the answer # unless the problem explicitly prevents it return True - elif self.show_answer == 'attempted': + elif self.showanswer == 'attempted': return self.attempts > 0 - elif self.show_answer == 'answered': + elif self.showanswer == 'answered': # NOTE: this is slightly different from 'attempted' -- resetting the problems # makes lcp.done False, but leaves attempts unchanged. return self.lcp.done - elif self.show_answer == 'closed': + elif self.showanswer == 'closed': return self.closed() - elif self.show_answer == 'past_due': + elif self.showanswer == 'finished': + return self.closed() or self.is_correct() + + elif self.showanswer == 'past_due': return self.is_past_due() - elif self.show_answer == 'always': + elif self.showanswer == 'always': return True return False @@ -470,9 +554,48 @@ class CapaModule(XModule): queuekey = get['queuekey'] score_msg = get['xqueue_body'] self.lcp.update_score(score_msg, queuekey) + self.set_state_from_lcp() + self.publish_grade() return dict() # No AJAX return is needed + def handle_ungraded_response(self, get): + ''' + Delivers a response from the XQueue to the capa problem + + The score of the problem will not be updated + + Args: + - get (dict) must contain keys: + queuekey - a key specific to this response + xqueue_body - the body of the response + Returns: + empty dictionary + + No ajax return is needed, so an empty dict is returned + ''' + queuekey = get['queuekey'] + score_msg = get['xqueue_body'] + # pass along the xqueue message to the problem + self.lcp.ungraded_response(score_msg, queuekey) + self.set_state_from_lcp() + return dict() + + def handle_input_ajax(self, get): + ''' + Handle ajax calls meant for a particular input in the problem + + Args: + - get (dict) - data that should be passed to the input + Returns: + - dict containing the response from the input + ''' + response = self.lcp.handle_input_ajax(get) + # save any state changes that may occur + self.set_state_from_lcp() + return response + + def get_answer(self, get): ''' For the "show answer" button. @@ -481,13 +604,14 @@ class CapaModule(XModule): ''' event_info = dict() event_info['problem_id'] = self.location.url() - self.system.track_function('show_answer', event_info) + self.system.track_function('showanswer', event_info) if not self.answer_available(): raise NotFoundError('Answer is not available') else: answers = self.lcp.get_question_answers() + self.set_state_from_lcp() - # answers (eg ) may have embedded images + # answers (eg ) may have embedded images # but be careful, some problems are using non-string answer dicts new_answers = dict() for answer_id in answers: @@ -513,30 +637,80 @@ class CapaModule(XModule): @staticmethod def make_dict_of_responses(get): '''Make dictionary of student responses (aka "answers") - get is POST dictionary. + get is POST dictionary (Djano QueryDict). + + The *get* dict has keys of the form 'x_y', which are mapped + to key 'y' in the returned dict. For example, + 'input_1_2_3' would be mapped to '1_2_3' in the returned dict. + + Some inputs always expect a list in the returned dict + (e.g. checkbox inputs). The convention is that + keys in the *get* dict that end with '[]' will always + have list values in the returned dict. + For example, if the *get* dict contains {'input_1[]': 'test' } + then the output dict would contain {'1': ['test'] } + (the value is a list). + + Raises an exception if: + + A key in the *get* dictionary does not contain >= 1 underscores + (e.g. "input" is invalid; "input_1" is valid) + + Two keys end up with the same name in the returned dict. + (e.g. 'input_1' and 'input_1[]', which both get mapped + to 'input_1' in the returned dict) ''' answers = dict() + for key in get: # e.g. input_resistor_1 ==> resistor_1 _, _, name = key.partition('_') - # This allows for answers which require more than one value for - # the same form input (e.g. checkbox inputs). The convention is that - # if the name ends with '[]' (which looks like an array), then the - # answer will be an array. - if not name.endswith('[]'): - answers[name] = get[key] + # If key has no underscores, then partition + # will return (key, '', '') + # We detect this and raise an error + if not name: + raise ValueError("%s must contain at least one underscore" % str(key)) + else: - name = name[:-2] - answers[name] = get.getlist(key) + # This allows for answers which require more than one value for + # the same form input (e.g. checkbox inputs). The convention is that + # if the name ends with '[]' (which looks like an array), then the + # answer will be an array. + is_list_key = name.endswith('[]') + name = name[:-2] if is_list_key else name + + if is_list_key: + val = get.getlist(key) + else: + val = get[key] + + # If the name already exists, then we don't want + # to override it. Raise an error instead + if name in answers: + raise ValueError("Key %s already exists in answers dict" % str(name)) + else: + answers[name] = val return answers + def publish_grade(self): + """ + Publishes the student's current grade to the system as an event + """ + score = self.lcp.get_score() + self.system.publish({ + 'event_name': 'grade', + 'value': score['score'], + 'max_value': score['total'], + }) + + def check_problem(self, get): ''' Checks whether answers to a problem are correct, and returns a map of correct/incorrect answers: - {'success' : bool, + {'success' : 'correct' | 'incorrect' | AJAX alert msg string, 'contents' : html} ''' event_info = dict() @@ -545,7 +719,6 @@ class CapaModule(XModule): answers = self.make_dict_of_responses(get) event_info['answers'] = convert_files_to_filenames(answers) - # Too late. Cannot submit if self.closed(): event_info['failure'] = 'closed' @@ -553,7 +726,7 @@ class CapaModule(XModule): raise NotFoundError('Problem is closed') # Problem submitted. Student should reset before checking again - if self.lcp.done and self.rerandomize == "always": + if self.done and self.rerandomize == "always": event_info['failure'] = 'unreset' self.system.track_function('save_problem_check_fail', event_info) raise NotFoundError('Problem must be reset before it can be checked again') @@ -565,26 +738,42 @@ class CapaModule(XModule): waittime_between_requests = self.system.xqueue['waittime'] if (current_time - prev_submit_time).total_seconds() < waittime_between_requests: msg = 'You must wait at least %d seconds between submissions' % waittime_between_requests - return {'success': msg, 'html': ''} # Prompts a modal dialog in ajax callback + return {'success': msg, 'html': ''} # Prompts a modal dialog in ajax callback try: - old_state = self.lcp.get_state() - lcp_id = self.lcp.problem_id correct_map = self.lcp.grade_answers(answers) - except StudentInputError as inst: - log.exception("StudentInputError in capa_module:problem_check") - return {'success': inst.message} + self.set_state_from_lcp() + + except (StudentInputError, ResponseError, LoncapaProblemError) as inst: + log.warning("StudentInputError in capa_module:problem_check", + exc_info=True) + + # If the user is a staff member, include + # the full exception, including traceback, + # in the response + if self.system.user_is_staff: + msg = "Staff debug info: %s" % traceback.format_exc() + + # Otherwise, display just an error message, + # without a stack trace + else: + msg = "Error: %s" % str(inst.message) + + return {'success': msg} + except Exception, err: if self.system.DEBUG: msg = "Error checking problem: " + str(err) msg += '\nTraceback:\n' + traceback.format_exc() return {'success': msg} - log.exception("Error in capa_module problem checking") - raise Exception("error in capa_module") + raise self.attempts = self.attempts + 1 self.lcp.done = True + self.set_state_from_lcp() + self.publish_grade() + # success = correct if ALL questions in this problem are correct success = 'correct' for answer_id in correct_map: @@ -595,11 +784,11 @@ class CapaModule(XModule): # 'success' will always be incorrect event_info['correct_map'] = correct_map.get_dict() event_info['success'] = success - event_info['attempts'] = self.attempts + event_info['attempts'] = self.attempts self.system.track_function('save_problem_check', event_info) - if hasattr(self.system, 'psychometrics_handler'): # update PsychometricsData using callback - self.system.psychometrics_handler(self.get_instance_state()) + if hasattr(self.system, 'psychometrics_handler'): # update PsychometricsData using callback + self.system.psychometrics_handler(self.get_instance_state()) # render problem into HTML html = self.get_problem_html(encapsulate=False) @@ -622,31 +811,41 @@ class CapaModule(XModule): event_info['answers'] = answers # Too late. Cannot submit - if self.closed(): + if self.closed() and not self.max_attempts == 0: event_info['failure'] = 'closed' self.system.track_function('save_problem_fail', event_info) return {'success': False, - 'error': "Problem is closed"} + 'msg': "Problem is closed"} # Problem submitted. Student should reset before saving # again. - if self.lcp.done and self.rerandomize == "always": + if self.done and self.rerandomize == "always": event_info['failure'] = 'done' self.system.track_function('save_problem_fail', event_info) return {'success': False, - 'error': "Problem needs to be reset prior to save."} + 'msg': "Problem needs to be reset prior to save"} self.lcp.student_answers = answers - # TODO: should this be save_problem_fail? Looks like success to me... - self.system.track_function('save_problem_fail', event_info) - return {'success': True} + self.set_state_from_lcp() + + self.system.track_function('save_problem_success', event_info) + msg = "Your answers have been saved" + if not self.max_attempts == 0: + msg += " but not graded. Hit 'Check' to grade them." + return {'success': True, + 'msg': msg} def reset_problem(self, get): ''' Changes problem state to unfinished -- removes student answers, and causes problem to rerender itself. - Returns problem html as { 'html' : html-string }. + Returns a dictionary of the form: + {'success': True/False, + 'html': Problem HTML string } + + If an error occurs, the dictionary will also have an + 'error' key containing an error message. ''' event_info = dict() event_info['old_state'] = self.lcp.get_state() @@ -658,29 +857,33 @@ class CapaModule(XModule): return {'success': False, 'error': "Problem is closed"} - if not self.lcp.done: + if not self.done: event_info['failure'] = 'not_done' self.system.track_function('reset_problem_fail', event_info) return {'success': False, 'error': "Refresh the page and make an attempt before resetting."} - self.lcp.do_reset() if self.rerandomize in ["always", "onreset"]: # reset random number generator seed (note the self.lcp.get_state() # in next line) - self.lcp.seed = None + seed = None + else: + seed = self.lcp.seed - self.lcp = LoncapaProblem(self.definition['data'], - self.location.html_id(), self.lcp.get_state(), - system=self.system) + # Generate a new problem with either the previous seed or a new seed + self.lcp = self.new_lcp({'seed': seed}) + + # Pull in the new problem seed + self.set_state_from_lcp() event_info['new_state'] = self.lcp.get_state() self.system.track_function('reset_problem', event_info) - return {'html': self.get_problem_html(encapsulate=False)} + return {'success': True, + 'html': self.get_problem_html(encapsulate=False)} -class CapaDescriptor(RawDescriptor): +class CapaDescriptor(CapaFields, RawDescriptor): """ Module implementing problems in the LON-CAPA format, as implemented by capa.capa_problem @@ -701,9 +904,15 @@ class CapaDescriptor(RawDescriptor): # actually use type and points? metadata_attributes = RawDescriptor.metadata_attributes + ('type', 'points') + # The capa format specifies that what we call max_attempts in the code + # is the attribute `attempts`. This will do that conversion + metadata_translations = dict(RawDescriptor.metadata_translations) + metadata_translations['attempts'] = 'max_attempts' + def get_context(self): _context = RawDescriptor.get_context(self) - _context.update({'markdown': self.metadata.get('markdown', '')}) + _context.update({'markdown': self.markdown, + 'enable_markdown': self.markdown is not None}) return _context @property @@ -711,10 +920,11 @@ class CapaDescriptor(RawDescriptor): """Remove metadata from the editable fields since it has its own editor""" subset = super(CapaDescriptor, self).editable_metadata_fields if 'markdown' in subset: - subset.remove('markdown') + del subset['markdown'] + if 'empty' in subset: + del subset['empty'] return subset - # VS[compat] # TODO (cpennington): Delete this method once all fall 2012 course are being # edited in the cms @@ -724,12 +934,3 @@ class CapaDescriptor(RawDescriptor): 'problems/' + path[8:], path[8:], ] - - def __init__(self, *args, **kwargs): - super(CapaDescriptor, self).__init__(*args, **kwargs) - - weight_string = self.metadata.get('weight', None) - if weight_string: - self.weight = float(weight_string) - else: - self.weight = None diff --git a/common/lib/xmodule/xmodule/combined_open_ended_module.py b/common/lib/xmodule/xmodule/combined_open_ended_module.py index 2da15a4086..f45ad39e35 100644 --- a/common/lib/xmodule/xmodule/combined_open_ended_module.py +++ b/common/lib/xmodule/xmodule/combined_open_ended_module.py @@ -1,37 +1,78 @@ -import copy -from fs.errors import ResourceNotFoundError -import itertools import json import logging from lxml import etree -from lxml.html import rewrite_links -from path import path -import os -import sys from pkg_resources import resource_string -from .capa_module import only_one, ComplexEncoder -from .editing_module import EditingDescriptor -from .html_checker import check_html -from progress import Progress -from .stringify import stringify_children +from xmodule.raw_module import RawDescriptor from .x_module import XModule -from .xml_module import XmlDescriptor -from xmodule.modulestore import Location -from combined_open_ended_modulev1 import CombinedOpenEndedV1Module, CombinedOpenEndedV1Descriptor +from xblock.core import Integer, Scope, BlockScope, ModelType, String, Boolean, Object, List +from xmodule.open_ended_grading_classes.combined_open_ended_modulev1 import CombinedOpenEndedV1Module, CombinedOpenEndedV1Descriptor +from collections import namedtuple +from xmodule.open_ended_grading_classes.xblock_field_types import StringyFloat log = logging.getLogger("mitx.courseware") +V1_SETTINGS_ATTRIBUTES = ["display_name", "attempts", "is_graded", "accept_file_upload", + "skip_spelling_checks", "due", "graceperiod"] -VERSION_TUPLES = ( - ('1', CombinedOpenEndedV1Descriptor, CombinedOpenEndedV1Module), -) +V1_STUDENT_ATTRIBUTES = ["current_task_number", "task_states", "state", + "student_attempts", "ready_to_reset"] + +V1_ATTRIBUTES = V1_SETTINGS_ATTRIBUTES + V1_STUDENT_ATTRIBUTES + +VersionTuple = namedtuple('VersionTuple', ['descriptor', 'module', 'settings_attributes', 'student_attributes']) +VERSION_TUPLES = { + 1: VersionTuple(CombinedOpenEndedV1Descriptor, CombinedOpenEndedV1Module, V1_SETTINGS_ATTRIBUTES, + V1_STUDENT_ATTRIBUTES), +} DEFAULT_VERSION = 1 -DEFAULT_VERSION = str(DEFAULT_VERSION) -class CombinedOpenEndedModule(XModule): + +class VersionInteger(Integer): + """ + A model type that converts from strings to integers when reading from json. + Also does error checking to see if version is correct or not. + """ + + def from_json(self, value): + try: + value = int(value) + if value not in VERSION_TUPLES: + version_error_string = "Could not find version {0}, using version {1} instead" + log.error(version_error_string.format(value, DEFAULT_VERSION)) + value = DEFAULT_VERSION + except: + value = DEFAULT_VERSION + return value + + +class CombinedOpenEndedFields(object): + display_name = String(help="Display name for this module", default="Open Ended Grading", scope=Scope.settings) + current_task_number = Integer(help="Current task that the student is on.", default=0, scope=Scope.student_state) + task_states = List(help="List of state dictionaries of each task within this module.", scope=Scope.student_state) + state = String(help="Which step within the current task that the student is on.", default="initial", + scope=Scope.student_state) + student_attempts = Integer(help="Number of attempts taken by the student on this problem", default=0, + scope=Scope.student_state) + ready_to_reset = Boolean(help="If the problem is ready to be reset or not.", default=False, + scope=Scope.student_state) + attempts = Integer(help="Maximum number of attempts that a student is allowed.", default=1, scope=Scope.settings) + is_graded = Boolean(help="Whether or not the problem is graded.", default=False, scope=Scope.settings) + accept_file_upload = Boolean(help="Whether or not the problem accepts file uploads.", default=False, + scope=Scope.settings) + skip_spelling_checks = Boolean(help="Whether or not to skip initial spelling checks.", default=True, + scope=Scope.settings) + due = String(help="Date that this problem is due by", default=None, scope=Scope.settings) + graceperiod = String(help="Amount of time after the due date that submissions will be accepted", default=None, + scope=Scope.settings) + version = VersionInteger(help="Current version number", default=DEFAULT_VERSION, scope=Scope.settings) + data = String(help="XML data for the problem", scope=Scope.content) + weight = StringyFloat(help="How much to weight this problem by", scope=Scope.settings) + + +class CombinedOpenEndedModule(CombinedOpenEndedFields, XModule): """ This is a module that encapsulates all open ended grading (self assessment, peer assessment, etc). It transitions between problems, and support arbitrary ordering. @@ -62,6 +103,8 @@ class CombinedOpenEndedModule(XModule): INTERMEDIATE_DONE = 'intermediate_done' DONE = 'done' + icon_class = 'problem' + js = {'coffee': [resource_string(__name__, 'js/src/combinedopenended/display.coffee'), resource_string(__name__, 'js/src/collapsible.coffee'), resource_string(__name__, 'js/src/javascript_loader.coffee'), @@ -70,16 +113,13 @@ class CombinedOpenEndedModule(XModule): css = {'scss': [resource_string(__name__, 'css/combinedopenended/display.scss')]} - def __init__(self, system, location, definition, descriptor, - instance_state=None, shared_state=None, **kwargs): - XModule.__init__(self, system, location, definition, descriptor, - instance_state, shared_state, **kwargs) - + def __init__(self, system, location, descriptor, model_data): + XModule.__init__(self, system, location, descriptor, model_data) """ Definition file should have one or many task blocks, a rubric block, and a prompt block: Sample file: - + Blah blah rubric. @@ -113,45 +153,37 @@ class CombinedOpenEndedModule(XModule): self.system = system self.system.set('location', location) - # Load instance state - if instance_state is not None: - instance_state = json.loads(instance_state) - else: - instance_state = {} + if self.task_states is None: + self.task_states = [] - self.version = self.metadata.get('version', DEFAULT_VERSION) - if not isinstance(self.version, basestring): - try: - self.version = str(self.version) - except: - log.error("Version {0} is not correct. Going with version {1}".format(self.version, DEFAULT_VERSION)) - self.version = DEFAULT_VERSION + version_tuple = VERSION_TUPLES[self.version] - versions = [i[0] for i in VERSION_TUPLES] - descriptors = [i[1] for i in VERSION_TUPLES] - modules = [i[2] for i in VERSION_TUPLES] + self.student_attributes = version_tuple.student_attributes + self.settings_attributes = version_tuple.settings_attributes - try: - version_index = versions.index(self.version) - except: - log.error("Version {0} is not correct. Going with version {1}".format(self.version, DEFAULT_VERSION)) - self.version = DEFAULT_VERSION - version_index = versions.index(self.version) + attributes = self.student_attributes + self.settings_attributes static_data = { - 'rewrite_content_links' : self.rewrite_content_links, + 'rewrite_content_links': self.rewrite_content_links, } - - self.child_descriptor = descriptors[version_index](self.system) - self.child_definition = descriptors[version_index].definition_from_xml(etree.fromstring(definition['xml_string']), self.system) - self.child_module = modules[version_index](self.system, location, self.child_definition, self.child_descriptor, - instance_state = json.dumps(instance_state), metadata = self.metadata, static_data= static_data) + instance_state = {k: getattr(self, k) for k in attributes} + self.child_descriptor = version_tuple.descriptor(self.system) + self.child_definition = version_tuple.descriptor.definition_from_xml(etree.fromstring(self.data), self.system) + self.child_module = version_tuple.module(self.system, location, self.child_definition, self.child_descriptor, + instance_state=instance_state, static_data=static_data, + attributes=attributes) + self.save_instance_data() def get_html(self): - return self.child_module.get_html() + self.save_instance_data() + return_value = self.child_module.get_html() + return return_value def handle_ajax(self, dispatch, get): - return self.child_module.handle_ajax(dispatch, get) + self.save_instance_data() + return_value = self.child_module.handle_ajax(dispatch, get) + self.save_instance_data() + return return_value def get_instance_state(self): return self.child_module.get_instance_state() @@ -169,16 +201,18 @@ class CombinedOpenEndedModule(XModule): def due_date(self): return self.child_module.due_date - @property - def display_name(self): - return self.child_module.display_name + def save_instance_data(self): + for attribute in self.student_attributes: + child_attr = getattr(self.child_module, attribute) + if child_attr != getattr(self, attribute): + setattr(self, attribute, getattr(self.child_module, attribute)) -class CombinedOpenEndedDescriptor(XmlDescriptor, EditingDescriptor): +class CombinedOpenEndedDescriptor(CombinedOpenEndedFields, RawDescriptor): """ Module for adding combined open ended questions """ - mako_template = "widgets/html-edit.html" + mako_template = "widgets/raw-edit.html" module_class = CombinedOpenEndedModule filename_extension = "xml" @@ -186,35 +220,3 @@ class CombinedOpenEndedDescriptor(XmlDescriptor, EditingDescriptor): has_score = True template_dir_name = "combinedopenended" - js = {'coffee': [resource_string(__name__, 'js/src/html/edit.coffee')]} - js_module_name = "HTMLEditingDescriptor" - - @classmethod - def definition_from_xml(cls, xml_object, system): - """ - Pull out the individual tasks, the rubric, and the prompt, and parse - - Returns: - { - 'rubric': 'some-html', - 'prompt': 'some-html', - 'task_xml': dictionary of xml strings, - } - """ - - return {'xml_string' : etree.tostring(xml_object), 'metadata' : xml_object.attrib} - - - def definition_to_xml(self, resource_fs): - '''Return an xml element representing this definition.''' - elt = etree.Element('combinedopenended') - - def add_child(k): - child_str = '<{tag}>{body}'.format(tag=k, body=self.definition[k]) - child_node = etree.fromstring(child_str) - elt.append(child_node) - - for child in ['task']: - add_child(child) - - return elt \ No newline at end of file diff --git a/common/lib/xmodule/xmodule/combined_open_ended_rubric.py b/common/lib/xmodule/xmodule/combined_open_ended_rubric.py deleted file mode 100644 index 689103a86a..0000000000 --- a/common/lib/xmodule/xmodule/combined_open_ended_rubric.py +++ /dev/null @@ -1,169 +0,0 @@ -import logging -from lxml import etree - -log = logging.getLogger(__name__) - - -class RubricParsingError(Exception): - def __init__(self, msg): - self.msg = msg - - -class CombinedOpenEndedRubric(object): - - def __init__ (self, system, view_only = False): - self.has_score = False - self.view_only = view_only - self.system = system - - def render_rubric(self, rubric_xml): - ''' - render_rubric: takes in an xml string and outputs the corresponding - html for that xml, given the type of rubric we're generating - Input: - rubric_xml: an string that has not been parsed into xml that - represents this particular rubric - Output: - html: the html that corresponds to the xml given - ''' - success = False - try: - rubric_categories = self.extract_categories(rubric_xml) - max_scores = map((lambda cat: cat['options'][-1]['points']), rubric_categories) - max_score = max(max_scores) - html = self.system.render_template('open_ended_rubric.html', - {'categories': rubric_categories, - 'has_score': self.has_score, - 'view_only': self.view_only, - 'max_score': max_score}) - success = True - except: - error_message = "[render_rubric] Could not parse the rubric with xml: {0}".format(rubric_xml) - log.error(error_message) - raise RubricParsingError(error_message) - return success, html - - def check_if_rubric_is_parseable(self, rubric_string, location, max_score_allowed, max_score): - success, rubric_feedback = self.render_rubric(rubric_string) - if not success: - error_message = "Could not parse rubric : {0} for location {1}".format(rubric_string, location.url()) - log.error(error_message) - raise RubricParsingError(error_message) - - rubric_categories = self.extract_categories(rubric_string) - total = 0 - for category in rubric_categories: - total = total + len(category['options']) - 1 - if len(category['options']) > (max_score_allowed + 1): - error_message = "Number of score points in rubric {0} higher than the max allowed, which is {1}".format( - len(category['options']), max_score_allowed) - log.error(error_message) - raise RubricParsingError(error_message) - - if total != max_score: - error_msg = "The max score {0} for problem {1} does not match the total number of points in the rubric {2}".format( - max_score, location, total) - log.error(error_msg) - raise RubricParsingError(error_msg) - - def extract_categories(self, element): - ''' - Contstruct a list of categories such that the structure looks like: - [ { category: "Category 1 Name", - options: [{text: "Option 1 Name", points: 0}, {text:"Option 2 Name", points: 5}] - }, - { category: "Category 2 Name", - options: [{text: "Option 1 Name", points: 0}, - {text: "Option 2 Name", points: 1}, - {text: "Option 3 Name", points: 2]}] - - ''' - if isinstance(element, basestring): - element = etree.fromstring(element) - categories = [] - for category in element: - if category.tag != 'category': - raise RubricParsingError("[extract_categories] Expected a tag: got {0} instead".format(category.tag)) - else: - categories.append(self.extract_category(category)) - return categories - - - def extract_category(self, category): - ''' - construct an individual category - {category: "Category 1 Name", - options: [{text: "Option 1 text", points: 1}, - {text: "Option 2 text", points: 2}]} - - all sorting and auto-point generation occurs in this function - ''' - descriptionxml = category[0] - optionsxml = category[1:] - scorexml = category[1] - score = None - if scorexml.tag == 'score': - score_text = scorexml.text - optionsxml = category[2:] - score = int(score_text) - self.has_score = True - # if we are missing the score tag and we are expecting one - elif self.has_score: - raise RubricParsingError("[extract_category] Category {0} is missing a score".format(descriptionxml.text)) - - - # parse description - if descriptionxml.tag != 'description': - raise RubricParsingError("[extract_category]: expected description tag, got {0} instead".format(descriptionxml.tag)) - - description = descriptionxml.text - - cur_points = 0 - options = [] - autonumbering = True - # parse options - for option in optionsxml: - if option.tag != 'option': - raise RubricParsingError("[extract_category]: expected option tag, got {0} instead".format(option.tag)) - else: - pointstr = option.get("points") - if pointstr: - autonumbering = False - # try to parse this into an int - try: - points = int(pointstr) - except ValueError: - raise RubricParsingError("[extract_category]: expected points to have int, got {0} instead".format(pointstr)) - elif autonumbering: - # use the generated one if we're in the right mode - points = cur_points - cur_points = cur_points + 1 - else: - raise Exception("[extract_category]: missing points attribute. Cannot continue to auto-create points values after a points value is explicitly defined.") - - selected = score == points - optiontext = option.text - options.append({'text': option.text, 'points': points, 'selected': selected}) - - # sort and check for duplicates - options = sorted(options, key=lambda option: option['points']) - CombinedOpenEndedRubric.validate_options(options) - - return {'description': description, 'options': options} - - - @staticmethod - def validate_options(options): - ''' - Validates a set of options. This can and should be extended to filter out other bad edge cases - ''' - if len(options) == 0: - raise RubricParsingError("[extract_category]: no options associated with this category") - if len(options) == 1: - return - prev = options[0]['points'] - for option in options[1:]: - if prev == option['points']: - raise RubricParsingError("[extract_category]: found duplicate point values between two different options") - else: - prev = option['points'] diff --git a/common/lib/xmodule/xmodule/conditional_module.py b/common/lib/xmodule/xmodule/conditional_module.py index 787d355c4a..b3e0e0e06b 100644 --- a/common/lib/xmodule/xmodule/conditional_module.py +++ b/common/lib/xmodule/xmodule/conditional_module.py @@ -1,126 +1,160 @@ +"""Conditional module is the xmodule, which you can use for disabling +some xmodules by conditions. +""" + import json import logging +from lxml import etree +from pkg_resources import resource_string from xmodule.x_module import XModule from xmodule.modulestore import Location from xmodule.seq_module import SequenceDescriptor +from xblock.core import String, Scope, List +from xmodule.modulestore.exceptions import ItemNotFoundError -from pkg_resources import resource_string log = logging.getLogger('mitx.' + __name__) -class ConditionalModule(XModule): - ''' +class ConditionalFields(object): + show_tag_list = List(help="Poll answers", scope=Scope.content) + + +class ConditionalModule(ConditionalFields, XModule): + """ Blocks child module from showing unless certain conditions are met. Example: - + + - - + tag attributes: + sources - location id of required modules, separated by ';' - ''' + completed - map to `is_completed` module method + attempted - map to `is_attempted` module method + poll_answer - map to `poll_answer` module attribute + voted - map to `voted` module attribute - js = {'coffee': [resource_string(__name__, 'js/src/conditional/display.coffee'), + tag attributes: + sources - location id of required modules, separated by ';' + + You can add you own rules for tag, like + "completed", "attempted" etc. To do that yo must extend + `ConditionalModule.conditions_map` variable and add pair: + my_attr: my_property/my_method + + After that you can use it: + + ... + + + And my_property/my_method will be called for required modules. + + """ + + js = {'coffee': [resource_string(__name__, 'js/src/javascript_loader.coffee'), + resource_string(__name__, 'js/src/conditional/display.coffee'), resource_string(__name__, 'js/src/collapsible.coffee'), - resource_string(__name__, 'js/src/javascript_loader.coffee'), + ]} js_module_name = "Conditional" css = {'scss': [resource_string(__name__, 'css/capa/display.scss')]} + # Map + # key: + # value: + conditions_map = { + 'poll_answer': 'poll_answer', # poll_question attr + 'completed': 'is_completed', # capa_problem attr + 'attempted': 'is_attempted', # capa_problem attr + 'voted': 'voted' # poll_question attr + } - def __init__(self, system, location, definition, descriptor, instance_state=None, shared_state=None, **kwargs): - """ - In addition to the normal XModule init, provide: - - self.condition = string describing condition required - - """ - XModule.__init__(self, system, location, definition, descriptor, instance_state, shared_state, **kwargs) - self.contents = None - self.condition = self.metadata.get('condition', '') - self._get_required_modules() - children = self.get_display_items() - if children: - self.icon_class = children[0].get_icon_class() - #log.debug('conditional module required=%s' % self.required_modules_list) - - def _get_required_modules(self): - self.required_modules = [] - for descriptor in self.descriptor.get_required_module_descriptors(): - module = self.system.get_module(descriptor) - self.required_modules.append(module) - #log.debug('required_modules=%s' % (self.required_modules)) + def _get_condition(self): + # Get first valid condition. + for xml_attr, attr_name in self.conditions_map.iteritems(): + xml_value = self.descriptor.xml_attributes.get(xml_attr) + if xml_value: + return xml_value, attr_name + raise Exception('Error in conditional module: unknown condition "%s"' + % xml_attr) def is_condition_satisfied(self): - self._get_required_modules() + self.required_modules = [self.system.get_module(descriptor) for + descriptor in self.descriptor.get_required_module_descriptors()] - if self.condition == 'require_completed': - # all required modules must be completed, as determined by - # the modules .is_completed() method - for module in self.required_modules: - #log.debug('in is_condition_satisfied; student_answers=%s' % module.lcp.student_answers) - #log.debug('in is_condition_satisfied; instance_state=%s' % module.instance_state) - if not hasattr(module, 'is_completed'): - raise Exception('Error in conditional module: required module %s has no .is_completed() method' % module) - if not module.is_completed(): - log.debug('conditional module: %s not completed' % module) - return False - else: - log.debug('conditional module: %s IS completed' % module) - return True - elif self.condition == 'require_attempted': - # all required modules must be attempted, as determined by - # the modules .is_attempted() method - for module in self.required_modules: - if not hasattr(module, 'is_attempted'): - raise Exception('Error in conditional module: required module %s has no .is_attempted() method' % module) - if not module.is_attempted(): - log.debug('conditional module: %s not attempted' % module) - return False - else: - log.debug('conditional module: %s IS attempted' % module) - return True - else: - raise Exception('Error in conditional module: unknown condition "%s"' % self.condition) + xml_value, attr_name = self._get_condition() - return True + if xml_value and self.required_modules: + for module in self.required_modules: + if not hasattr(module, attr_name): + raise Exception('Error in conditional module: \ + required module {module} has no {module_attr}'.format( + module=module, module_attr=attr_name)) + + attr = getattr(module, attr_name) + if callable(attr): + attr = attr() + + if xml_value != str(attr): + break + else: + return True + return False def get_html(self): - self.is_condition_satisfied() + # Calculate html ids of dependencies + self.required_html_ids = [descriptor.location.html_id() for + descriptor in self.descriptor.get_required_module_descriptors()] + return self.system.render_template('conditional_ajax.html', { 'element_id': self.location.html_id(), 'id': self.id, 'ajax_url': self.system.ajax_url, + 'depends': ';'.join(self.required_html_ids) }) def handle_ajax(self, dispatch, post): - ''' - This is called by courseware.module_render, to handle an AJAX call. - ''' - #log.debug('conditional_module handle_ajax: dispatch=%s' % dispatch) - + """This is called by courseware.moduleodule_render, to handle + an AJAX call. + """ if not self.is_condition_satisfied(): - context = {'module': self} - html = self.system.render_template('conditional_module.html', context) - return json.dumps({'html': html}) + message = self.descriptor.xml_attributes.get('message') + context = {'module': self, + 'message': message} + html = self.system.render_template('conditional_module.html', + context) + return json.dumps({'html': [html], 'message': bool(message)}) - if self.contents is None: - self.contents = [child.get_html() for child in self.get_display_items()] - - # for now, just deal with one child - html = self.contents[0] + html = [child.get_html() for child in self.get_display_items()] return json.dumps({'html': html}) + def get_icon_class(self): + new_class = 'other' + if self.is_condition_satisfied(): + # HACK: This shouldn't be hard-coded to two types + # OBSOLETE: This obsoletes 'type' + class_priority = ['video', 'problem'] + + child_classes = [self.system.get_module(child_descriptor).get_icon_class() + for child_descriptor in self.descriptor.get_children()] + for c in class_priority: + if c in child_classes: + new_class = c + return new_class + + +class ConditionalDescriptor(ConditionalFields, SequenceDescriptor): + """Descriptor for conditional xmodule.""" + _tag_name = 'conditional' -class ConditionalDescriptor(SequenceDescriptor): module_class = ConditionalModule filename_extension = "xml" @@ -128,26 +162,68 @@ class ConditionalDescriptor(SequenceDescriptor): stores_state = True has_score = False - def __init__(self, *args, **kwargs): - super(ConditionalDescriptor, self).__init__(*args, **kwargs) - required_module_list = [tuple(x.split('/', 1)) for x in self.metadata.get('required', '').split('&')] - self.required_module_locations = [] - for rm in required_module_list: - try: - (tag, name) = rm - except Exception as err: - msg = "Specification of required module in conditional is broken: %s" % self.metadata.get('required') - log.warning(msg) - self.system.error_tracker(msg) - continue - loc = self.location.dict() - loc['category'] = tag - loc['name'] = name - self.required_module_locations.append(Location(loc)) - log.debug('ConditionalDescriptor required_module_locations=%s' % self.required_module_locations) + @staticmethod + def parse_sources(xml_element, system, return_descriptor=False): + """Parse xml_element 'sources' attr and: + if return_descriptor=True - return list of descriptors + if return_descriptor=False - return list of locations + """ + result = [] + sources = xml_element.get('sources') + if sources: + locations = [location.strip() for location in sources.split(';')] + for location in locations: + if Location.is_valid(location): # Check valid location url. + try: + if return_descriptor: + descriptor = system.load_item(location) + result.append(descriptor) + else: + result.append(location) + except ItemNotFoundError: + msg = "Invalid module by location." + log.exception(msg) + system.error_tracker(msg) + return result def get_required_module_descriptors(self): - """Returns a list of XModuleDescritpor instances upon which this module depends, but are - not children of this module""" - return [self.system.load_item(loc) for loc in self.required_module_locations] + """Returns a list of XModuleDescritpor instances upon + which this module depends. + """ + return ConditionalDescriptor.parse_sources( + self.xml_attributes, self.system, True) + + @classmethod + def definition_from_xml(cls, xml_object, system): + children = [] + show_tag_list = [] + for child in xml_object: + if child.tag == 'show': + location = ConditionalDescriptor.parse_sources( + child, system) + children.extend(location) + show_tag_list.extend(location) + else: + try: + descriptor = system.process_xml(etree.tostring(child)) + module_url = descriptor.location.url() + children.append(module_url) + except: + msg = "Unable to load child when parsing Conditional." + log.exception(msg) + system.error_tracker(msg) + return {'show_tag_list': show_tag_list}, children + + def definition_to_xml(self, resource_fs): + xml_object = etree.Element(self._tag_name) + for child in self.get_children(): + location = str(child.location) + if location in self.show_tag_list: + show_str = '<{tag_name} sources="{sources}" />'.format( + tag_name='show', sources=location) + xml_object.append(etree.fromstring(show_str)) + else: + xml_object.append( + etree.fromstring(child.export_to_xml(resource_fs))) + return xml_object diff --git a/common/lib/xmodule/xmodule/contentstore/content.py b/common/lib/xmodule/xmodule/contentstore/content.py index be33401bc8..9dc4b1367b 100644 --- a/common/lib/xmodule/xmodule/contentstore/content.py +++ b/common/lib/xmodule/xmodule/contentstore/content.py @@ -35,7 +35,8 @@ class StaticContent(object): @staticmethod def compute_location(org, course, name, revision=None, is_thumbnail=False): name = name.replace('/', '_') - return Location([XASSET_LOCATION_TAG, org, course, 'asset' if not is_thumbnail else 'thumbnail', Location.clean(name), revision]) + return Location([XASSET_LOCATION_TAG, org, course, 'asset' if not is_thumbnail else 'thumbnail', + Location.clean_keeping_underscores(name), revision]) def get_id(self): return StaticContent.get_id_from_location(self.location) diff --git a/common/lib/xmodule/xmodule/course_module.py b/common/lib/xmodule/xmodule/course_module.py index 2c69c449ba..6f3b8e94c9 100644 --- a/common/lib/xmodule/xmodule/course_module.py +++ b/common/lib/xmodule/xmodule/course_module.py @@ -1,125 +1,228 @@ import logging from cStringIO import StringIO -from math import exp, erf +from math import exp from lxml import etree from path import path # NOTE (THK): Only used for detecting presence of syllabus import requests import time from datetime import datetime +import dateutil.parser + from xmodule.modulestore import Location from xmodule.seq_module import SequenceDescriptor, SequenceModule -from xmodule.timeparse import parse_time, stringify_time +from xmodule.timeparse import parse_time from xmodule.util.decorators import lazyproperty from xmodule.graders import grader_from_conf -from datetime import datetime import json -import logging -import requests -import time -import copy + +from xblock.core import Scope, List, String, Object, Boolean +from .fields import Date log = logging.getLogger(__name__) +class StringOrDate(Date): + def from_json(self, value): + """ + Parse an optional metadata key containing a time or a string: + if present, assume it's a string if it doesn't parse. + """ + try: + result = super(StringOrDate, self).from_json(value) + except ValueError: + return value + if result is None: + return value + else: + return result + + def to_json(self, value): + """ + Convert a time struct or string to a string. + """ + try: + result = super(StringOrDate, self).to_json(value) + except: + return value + if result is None: + return value + else: + return result + + edx_xml_parser = etree.XMLParser(dtd_validation=False, load_dtd=False, remove_comments=True, remove_blank_text=True) _cached_toc = {} -class CourseDescriptor(SequenceDescriptor): - module_class = SequenceModule +class Textbook(object): + def __init__(self, title, book_url): + self.title = title + self.book_url = book_url + self.start_page = int(self.table_of_contents[0].attrib['page']) - template_dir_name = 'course' + # The last page should be the last element in the table of contents, + # but it may be nested. So recurse all the way down the last element + last_el = self.table_of_contents[-1] + while last_el.getchildren(): + last_el = last_el[-1] - class Textbook: - def __init__(self, title, book_url): - self.title = title - self.book_url = book_url - self.table_of_contents = self._get_toc_from_s3() - self.start_page = int(self.table_of_contents[0].attrib['page']) + self.end_page = int(last_el.attrib['page']) - # The last page should be the last element in the table of contents, - # but it may be nested. So recurse all the way down the last element - last_el = self.table_of_contents[-1] - while last_el.getchildren(): - last_el = last_el[-1] + @lazyproperty + def table_of_contents(self): + """ + Accesses the textbook's table of contents (default name "toc.xml") at the URL self.book_url - self.end_page = int(last_el.attrib['page']) + Returns XML tree representation of the table of contents + """ + toc_url = self.book_url + 'toc.xml' - @property - def table_of_contents(self): - return self.table_of_contents + # cdodge: I've added this caching of TOC because in Mongo-backed instances (but not Filesystem stores) + # course modules have a very short lifespan and are constantly being created and torn down. + # Since this module in the __init__() method does a synchronous call to AWS to get the TOC + # this is causing a big performance problem. So let's be a bit smarter about this and cache + # each fetch and store in-mem for 10 minutes. + # NOTE: I have to get this onto sandbox ASAP as we're having runtime failures. I'd like to swing back and + # rewrite to use the traditional Django in-memory cache. + try: + # see if we already fetched this + if toc_url in _cached_toc: + (table_of_contents, timestamp) = _cached_toc[toc_url] + age = datetime.now() - timestamp + # expire every 10 minutes + if age.seconds < 600: + return table_of_contents + except Exception as err: + pass - def _get_toc_from_s3(self): - """ - Accesses the textbook's table of contents (default name "toc.xml") at the URL self.book_url + # Get the table of contents from S3 + log.info("Retrieving textbook table of contents from %s" % toc_url) + try: + r = requests.get(toc_url) + except Exception as err: + msg = 'Error %s: Unable to retrieve textbook table of contents at %s' % (err, toc_url) + log.error(msg) + raise Exception(msg) - Returns XML tree representation of the table of contents - """ - toc_url = self.book_url + 'toc.xml' + # TOC is XML. Parse it + try: + table_of_contents = etree.fromstring(r.text) + except Exception as err: + msg = 'Error %s: Unable to parse XML for textbook table of contents at %s' % (err, toc_url) + log.error(msg) + raise Exception(msg) - # cdodge: I've added this caching of TOC because in Mongo-backed instances (but not Filesystem stores) - # course modules have a very short lifespan and are constantly being created and torn down. - # Since this module in the __init__() method does a synchronous call to AWS to get the TOC - # this is causing a big performance problem. So let's be a bit smarter about this and cache - # each fetch and store in-mem for 10 minutes. - # NOTE: I have to get this onto sandbox ASAP as we're having runtime failures. I'd like to swing back and - # rewrite to use the traditional Django in-memory cache. + return table_of_contents + + +class TextbookList(List): + def from_json(self, values): + textbooks = [] + for title, book_url in values: try: - # see if we already fetched this - if toc_url in _cached_toc: - (table_of_contents, timestamp) = _cached_toc[toc_url] - age = datetime.now() - timestamp - # expire every 10 minutes - if age.seconds < 600: - return table_of_contents - except Exception as err: - pass - - # Get the table of contents from S3 - log.info("Retrieving textbook table of contents from %s" % toc_url) - try: - r = requests.get(toc_url) - except Exception as err: - msg = 'Error %s: Unable to retrieve textbook table of contents at %s' % (err, toc_url) - log.error(msg) - raise Exception(msg) - - # TOC is XML. Parse it - try: - table_of_contents = etree.fromstring(r.text) - _cached_toc[toc_url] = (table_of_contents, datetime.now()) - except Exception as err: - msg = 'Error %s: Unable to parse XML for textbook table of contents at %s' % (err, toc_url) - log.error(msg) - raise Exception(msg) - - return table_of_contents - - def __init__(self, system, definition=None, **kwargs): - super(CourseDescriptor, self).__init__(system, definition, **kwargs) - self.textbooks = [] - for title, book_url in self.definition['data']['textbooks']: - try: - self.textbooks.append(self.Textbook(title, book_url)) + textbooks.append(Textbook(title, book_url)) except: # If we can't get to S3 (e.g. on a train with no internet), don't break # the rest of the courseware. log.exception("Couldn't load textbook ({0}, {1})".format(title, book_url)) continue - self.wiki_slug = self.definition['data']['wiki_slug'] or self.location.course + return textbooks + + def to_json(self, values): + json_data = [] + for val in values: + if isinstance(val, Textbook): + json_data.append((val.title, val.book_url)) + elif isinstance(val, tuple): + json_data.append(val) + else: + continue + return json_data + + +class CourseFields(object): + textbooks = TextbookList(help="List of pairs of (title, url) for textbooks used in this course", scope=Scope.content) + wiki_slug = String(help="Slug that points to the wiki for this course", scope=Scope.content) + enrollment_start = Date(help="Date that enrollment for this class is opened", scope=Scope.settings) + enrollment_end = Date(help="Date that enrollment for this class is closed", scope=Scope.settings) + start = Date(help="Start time when this module is visible", scope=Scope.settings) + end = Date(help="Date that this class ends", scope=Scope.settings) + advertised_start = String(help="Date that this course is advertised to start", scope=Scope.settings) + grading_policy = Object(help="Grading policy definition for this class", scope=Scope.content) + show_calculator = Boolean(help="Whether to show the calculator in this course", default=False, scope=Scope.settings) + display_name = String(help="Display name for this module", scope=Scope.settings) + tabs = List(help="List of tabs to enable in this course", scope=Scope.settings) + end_of_course_survey_url = String(help="Url for the end-of-course survey", scope=Scope.settings) + discussion_blackouts = List(help="List of pairs of start/end dates for discussion blackouts", scope=Scope.settings) + discussion_topics = Object( + help="Map of topics names to ids", + scope=Scope.settings, + computed_default=lambda c: {'General': {'id': c.location.html_id()}}, + ) + testcenter_info = Object(help="Dictionary of Test Center info", scope=Scope.settings) + announcement = Date(help="Date this course is announced", scope=Scope.settings) + cohort_config = Object(help="Dictionary defining cohort configuration", scope=Scope.settings) + is_new = Boolean(help="Whether this course should be flagged as new", scope=Scope.settings) + no_grade = Boolean(help="True if this course isn't graded", default=False, scope=Scope.settings) + disable_progress_graph = Boolean(help="True if this course shouldn't display the progress graph", default=False, scope=Scope.settings) + pdf_textbooks = List(help="List of dictionaries containing pdf_textbook configuration", scope=Scope.settings) + html_textbooks = List(help="List of dictionaries containing html_textbook configuration", scope=Scope.settings) + remote_gradebook = Object(scope=Scope.settings) + allow_anonymous = Boolean(scope=Scope.settings, default=True) + allow_anonymous_to_peers = Boolean(scope=Scope.settings, default=False) + advanced_modules = List(help="Beta modules used in your course", scope=Scope.settings) + has_children = True + checklists = List(scope=Scope.settings) + info_sidebar_name = String(scope=Scope.settings, default='Course Handouts') + + # An extra property is used rather than the wiki_slug/number because + # there are courses that change the number for different runs. This allows + # courses to share the same css_class across runs even if they have + # different numbers. + # + # TODO get rid of this as soon as possible or potentially build in a robust + # way to add in course-specific styling. There needs to be a discussion + # about the right way to do this, but arjun will address this ASAP. Also + # note that the courseware template needs to change when this is removed. + css_class = String(help="DO NOT USE THIS", scope=Scope.settings) + + # TODO: This is a quick kludge to allow CS50 (and other courses) to + # specify their own discussion forums as external links by specifying a + # "discussion_link" in their policy JSON file. This should later get + # folded in with Syllabus, Course Info, and additional Custom tabs in a + # more sensible framework later. + discussion_link = String(help="DO NOT USE THIS", scope=Scope.settings) + + # TODO: same as above, intended to let internal CS50 hide the progress tab + # until we get grade integration set up. + # Explicit comparison to True because we always want to return a bool. + hide_progress_tab = Boolean(help="DO NOT USE THIS", scope=Scope.settings) + + +class CourseDescriptor(CourseFields, SequenceDescriptor): + module_class = SequenceModule + + template_dir_name = 'course' + + + def __init__(self, *args, **kwargs): + super(CourseDescriptor, self).__init__(*args, **kwargs) + + if self.wiki_slug is None: + self.wiki_slug = self.location.course msg = None if self.start is None: msg = "Course loaded without a valid start date. id = %s" % self.id # hack it -- start in 1970 - self.metadata['start'] = stringify_time(time.gmtime(0)) + self.start = time.gmtime(0) log.critical(msg) - system.error_tracker(msg) + self.system.error_tracker(msg) # NOTE: relies on the modulestore to call set_grading_policy() right after # init. (Modulestore is in charge of figuring out where to load the policy from) @@ -127,10 +230,11 @@ class CourseDescriptor(SequenceDescriptor): # NOTE (THK): This is a last-minute addition for Fall 2012 launch to dynamically # disable the syllabus content for courses that do not provide a syllabus self.syllabus_present = self.system.resources_fs.exists(path('syllabus')) - self.set_grading_policy(self.definition['data'].get('grading_policy', None)) + self._grading_policy = {} + self.set_grading_policy(self.grading_policy) self.test_center_exams = [] - test_center_info = self.metadata.get('testcenter_info') + test_center_info = self.testcenter_info if test_center_info is not None: for exam_name in test_center_info: try: @@ -143,11 +247,11 @@ class CourseDescriptor(SequenceDescriptor): log.error(msg) continue - def defaut_grading_policy(self): + def default_grading_policy(self): """ Return a dict which is a copy of the default grading policy """ - default = {"GRADER": [ + return {"GRADER": [ { "type": "Homework", "min_count": 12, @@ -179,7 +283,6 @@ class CourseDescriptor(SequenceDescriptor): "GRADE_CUTOFFS": { "Pass": 0.5 }} - return copy.deepcopy(default) def set_grading_policy(self, course_policy): """ @@ -190,17 +293,15 @@ class CourseDescriptor(SequenceDescriptor): course_policy = {} # Load the global settings as a dictionary - grading_policy = self.defaut_grading_policy() + grading_policy = self.default_grading_policy() # Override any global settings with the course settings grading_policy.update(course_policy) # Here is where we should parse any configurations, so that we can fail early - grading_policy['RAW_GRADER'] = grading_policy['GRADER'] # used for cms access - grading_policy['GRADER'] = grader_from_conf(grading_policy['GRADER']) - self._grading_policy = grading_policy - - + # Use setters so that side effecting to .definitions works + self.raw_grader = grading_policy['GRADER'] # used for cms access + self.grade_cutoffs = grading_policy['GRADE_CUTOFFS'] @classmethod def read_grading_policy(cls, paths, system): @@ -223,7 +324,6 @@ class CourseDescriptor(SequenceDescriptor): return policy_str - @classmethod def from_xml(cls, xml_data, system, org=None, course=None): instance = super(CourseDescriptor, cls).from_xml(xml_data, system, org, course) @@ -247,18 +347,17 @@ class CourseDescriptor(SequenceDescriptor): policy = json.loads(cls.read_grading_policy(paths, system)) except ValueError: system.error_tracker("Unable to decode grading policy as json") - policy = None + policy = {} # cdodge: import the grading policy information that is on disk and put into the # descriptor 'definition' bucket as a dictionary so that it is persisted in the DB - instance.definition['data']['grading_policy'] = policy + instance.grading_policy = policy # now set the current instance. set_grading_policy() will apply some inheritance rules instance.set_grading_policy(policy) return instance - @classmethod def definition_from_xml(cls, xml_object, system): textbooks = [] @@ -266,19 +365,19 @@ class CourseDescriptor(SequenceDescriptor): textbooks.append((textbook.get('title'), textbook.get('book_url'))) xml_object.remove(textbook) - #Load the wiki tag if it exists + # Load the wiki tag if it exists wiki_slug = None wiki_tag = xml_object.find("wiki") if wiki_tag is not None: wiki_slug = wiki_tag.attrib.get("slug", default=None) xml_object.remove(wiki_tag) - definition = super(CourseDescriptor, cls).definition_from_xml(xml_object, system) + definition, children = super(CourseDescriptor, cls).definition_from_xml(xml_object, system) - definition.setdefault('data', {})['textbooks'] = textbooks - definition['data']['wiki_slug'] = wiki_slug + definition['textbooks'] = textbooks + definition['wiki_slug'] = wiki_slug - return definition + return definition, children def has_ended(self): """ @@ -293,33 +392,9 @@ class CourseDescriptor(SequenceDescriptor): def has_started(self): return time.gmtime() > self.start - @property - def end(self): - return self._try_parse_time("end") - @end.setter - def end(self, value): - if isinstance(value, time.struct_time): - self.metadata['end'] = stringify_time(value) - @property - def enrollment_start(self): - return self._try_parse_time("enrollment_start") - - @enrollment_start.setter - def enrollment_start(self, value): - if isinstance(value, time.struct_time): - self.metadata['enrollment_start'] = stringify_time(value) - @property - def enrollment_end(self): - return self._try_parse_time("enrollment_end") - - @enrollment_end.setter - def enrollment_end(self, value): - if isinstance(value, time.struct_time): - self.metadata['enrollment_end'] = stringify_time(value) - @property def grader(self): - return self._grading_policy['GRADER'] + return grader_from_conf(self.raw_grader) @property def raw_grader(self): @@ -329,7 +404,7 @@ class CourseDescriptor(SequenceDescriptor): def raw_grader(self, value): # NOTE WELL: this change will not update the processed graders. If we need that, this needs to call grader_from_conf self._grading_policy['RAW_GRADER'] = value - self.definition['data'].setdefault('grading_policy', {})['GRADER'] = value + self.grading_policy['GRADER'] = value @property def grade_cutoffs(self): @@ -338,45 +413,58 @@ class CourseDescriptor(SequenceDescriptor): @grade_cutoffs.setter def grade_cutoffs(self, value): self._grading_policy['GRADE_CUTOFFS'] = value - self.definition['data'].setdefault('grading_policy', {})['GRADE_CUTOFFS'] = value + + # XBlock fields don't update after mutation + policy = self.grading_policy + policy['GRADE_CUTOFFS'] = value + self.grading_policy = policy @property def lowest_passing_grade(self): return min(self._grading_policy['GRADE_CUTOFFS'].values()) - @property - def tabs(self): - """ - Return the tabs config, as a python object, or None if not specified. - """ - return self.metadata.get('tabs') - - @tabs.setter - def tabs(self, value): - self.metadata['tabs'] = value - - @property - def show_calculator(self): - return self.metadata.get("show_calculator", None) == "Yes" - @property def is_cohorted(self): """ Return whether the course is cohorted. """ - config = self.metadata.get("cohort_config") + config = self.cohort_config if config is None: return False return bool(config.get("cohorted")) + @property + def auto_cohort(self): + """ + Return whether the course is auto-cohorted. + """ + if not self.is_cohorted: + return False + + return bool(self.cohort_config.get( + "auto_cohort", False)) + + @property + def auto_cohort_groups(self): + """ + Return the list of groups to put students into. Returns [] if not + specified. Returns specified list even if is_cohorted and/or auto_cohort are + false. + """ + if self.cohort_config is None: + return [] + else: + return self.cohort_config.get("auto_cohort_groups", []) + + @property def top_level_discussion_topic_ids(self): """ Return list of topic ids defined in course policy. """ - topics = self.metadata.get("discussion_topics", {}) + topics = self.discussion_topics return [d["id"] for d in topics.values()] @@ -387,7 +475,7 @@ class CourseDescriptor(SequenceDescriptor): the empty set. Note that all inline discussions are automatically cohorted based on the course's is_cohorted setting. """ - config = self.metadata.get("cohort_config") + config = self.cohort_config if config is None: return set() @@ -396,13 +484,13 @@ class CourseDescriptor(SequenceDescriptor): @property - def is_new(self): + def is_newish(self): """ - Returns if the course has been flagged as new in the metadata. If + Returns if the course has been flagged as new. If there is no flag, return a heuristic value considering the announcement and the start dates. """ - flag = self.metadata.get('is_new', None) + flag = self.is_new if flag is None: # Use a heuristic if the course has not been flagged announcement, start, now = self._sorting_dates() @@ -422,8 +510,8 @@ class CourseDescriptor(SequenceDescriptor): @property def sorting_score(self): """ - Returns a number that can be used to sort the courses according - the how "new"" they are. The "newness"" score is computed using a + Returns a tuple that can be used to sort the courses according + the how "new" they are. The "newness" score is computed using a heuristic that takes into account the announcement and (advertized) start dates of the course if available. @@ -448,12 +536,15 @@ class CourseDescriptor(SequenceDescriptor): def to_datetime(timestamp): return datetime(*timestamp[:6]) - def get_date(field): - timetuple = self._try_parse_time(field) - return to_datetime(timetuple) if timetuple else None + announcement = self.announcement + if announcement is not None: + announcement = to_datetime(announcement) + + try: + start = dateutil.parser.parse(self.advertised_start) + except (ValueError, AttributeError): + start = to_datetime(self.start) - announcement = get_date('announcement') - start = get_date('advertised_start') or to_datetime(self.start) now = to_datetime(time.gmtime()) return announcement, start, now @@ -478,7 +569,7 @@ class CourseDescriptor(SequenceDescriptor): all_descriptors - This contains a list of all xmodules that can effect grading a student. This is used to efficiently fetch - all the xmodule state for a StudentModuleCache without walking + all the xmodule state for a ModelDataCache without walking the descriptor tree again. @@ -496,14 +587,14 @@ class CourseDescriptor(SequenceDescriptor): for c in self.get_children(): sections = [] for s in c.get_children(): - if s.metadata.get('graded', False): + if s.lms.graded: xmoduledescriptors = list(yield_descriptor_descendents(s)) xmoduledescriptors.append(s) # The xmoduledescriptors included here are only the ones that have scores. section_description = {'section_descriptor': s, 'xmoduledescriptors': filter(lambda child: child.has_score, xmoduledescriptors)} - section_format = s.metadata.get('format', "") + section_format = s.lms.format if s.lms.format is not None else '' graded_sections[section_format] = graded_sections.get(section_format, []) + [section_description] all_descriptors.extend(xmoduledescriptors) @@ -544,58 +635,32 @@ class CourseDescriptor(SequenceDescriptor): @property def start_date_text(self): - parsed_advertised_start = self._try_parse_time('advertised_start') + def try_parse_iso_8601(text): + try: + result = datetime.strptime(text, "%Y-%m-%dT%H:%M") + result = result.strftime("%b %d, %Y") + except ValueError: + result = text.title() - # If the advertised start isn't a real date string, we assume it's free - # form text... - if parsed_advertised_start is None and \ - ('advertised_start' in self.metadata): - return self.metadata['advertised_start'] + return result - displayed_start = parsed_advertised_start or self.start - - # If we have neither an advertised start or a real start, just return TBD - if not displayed_start: - return "TBD" - - return time.strftime("%b %d, %Y", displayed_start) + if isinstance(self.advertised_start, basestring): + return try_parse_iso_8601(self.advertised_start) + elif self.advertised_start is None and self.start is None: + return 'TBD' + else: + return time.strftime("%b %d, %Y", self.advertised_start or self.start) @property def end_date_text(self): return time.strftime("%b %d, %Y", self.end) - # An extra property is used rather than the wiki_slug/number because - # there are courses that change the number for different runs. This allows - # courses to share the same css_class across runs even if they have - # different numbers. - # - # TODO get rid of this as soon as possible or potentially build in a robust - # way to add in course-specific styling. There needs to be a discussion - # about the right way to do this, but arjun will address this ASAP. Also - # note that the courseware template needs to change when this is removed. - @property - def css_class(self): - return self.metadata.get('css_class', '') - - @property - def info_sidebar_name(self): - return self.metadata.get('info_sidebar_name', 'Course Handouts') - - @property - def discussion_link(self): - """TODO: This is a quick kludge to allow CS50 (and other courses) to - specify their own discussion forums as external links by specifying a - "discussion_link" in their policy JSON file. This should later get - folded in with Syllabus, Course Info, and additional Custom tabs in a - more sensible framework later.""" - return self.metadata.get('discussion_link', None) - @property def forum_posts_allowed(self): try: blackout_periods = [(parse_time(start), parse_time(end)) for start, end - in self.metadata.get('discussion_blackouts', [])] + in self.discussion_blackouts] now = time.gmtime() for start, end in blackout_periods: if start <= now <= end: @@ -605,23 +670,6 @@ class CourseDescriptor(SequenceDescriptor): return True - @property - def hide_progress_tab(self): - """TODO: same as above, intended to let internal CS50 hide the progress tab - until we get grade integration set up.""" - # Explicit comparison to True because we always want to return a bool. - return self.metadata.get('hide_progress_tab') == True - - @property - def end_of_course_survey_url(self): - """ - Pull from policy. Once we have our own survey module set up, can change this to point to an automatically - created survey for each class. - - Returns None if no url specified. - """ - return self.metadata.get('end_of_course_survey_url') - class TestCenterExam(object): def __init__(self, course_id, exam_name, exam_info): self.course_id = course_id @@ -636,7 +684,7 @@ class CourseDescriptor(SequenceDescriptor): # *end* of the same day, not the same time. It's going to be used as the # end of the exam overall, so we don't want the exam to disappear too soon. # It's also used optionally as the registration end date, so time matters there too. - self.last_eligible_appointment_date = self._try_parse_time('Last_Eligible_Appointment_Date') # or self.first_eligible_appointment_date + self.last_eligible_appointment_date = self._try_parse_time('Last_Eligible_Appointment_Date') # or self.first_eligible_appointment_date if self.last_eligible_appointment_date is None: raise ValueError("Last appointment date must be specified") self.registration_start_date = self._try_parse_time('Registration_Start_Date') or time.gmtime(0) @@ -707,10 +755,6 @@ class CourseDescriptor(SequenceDescriptor): def get_test_center_exam(self, exam_series_code): exams = [exam for exam in self.test_center_exams if exam.exam_series_code == exam_series_code] return exams[0] if len(exams) == 1 else None - - @property - def title(self): - return self.display_name @property def number(self): diff --git a/common/lib/xmodule/xmodule/css/annotatable/display.scss b/common/lib/xmodule/xmodule/css/annotatable/display.scss new file mode 100644 index 0000000000..308b379ec1 --- /dev/null +++ b/common/lib/xmodule/xmodule/css/annotatable/display.scss @@ -0,0 +1,169 @@ +$border-color: #C8C8C8; +$body-font-size: em(14); + +.annotatable-header { + margin-bottom: .5em; + .annotatable-title { + font-size: em(22); + text-transform: uppercase; + padding: 2px 4px; + } +} + +.annotatable-section { + position: relative; + padding: .5em 1em; + border: 1px solid $border-color; + border-radius: .5em; + margin-bottom: .5em; + + &.shaded { background-color: #EDEDED; } + + .annotatable-section-title { + font-weight: bold; + a { font-weight: normal; } + } + .annotatable-section-body { + border-top: 1px solid $border-color; + margin-top: .5em; + padding-top: .5em; + @include clearfix; + } + + ul.instructions-template { + list-style: disc; + margin-left: 4em; + b { font-weight: bold; } + i { font-style: italic; } + code { + display: inline; + white-space: pre; + font-family: Courier New, monospace; + } + } +} + +.annotatable-toggle { + position: absolute; + right: 0; + margin: 2px 1em 2px 0; + &.expanded:after { content: " \2191" } + &.collapsed:after { content: " \2193" } +} + +.annotatable-span { + display: inline; + cursor: pointer; + + @each $highlight in ( + (yellow rgba(255,255,10,0.3) rgba(255,255,10,0.9)), + (red rgba(178,19,16,0.3) rgba(178,19,16,0.9)), + (orange rgba(255,165,0,0.3) rgba(255,165,0,0.9)), + (green rgba(25,255,132,0.3) rgba(25,255,132,0.9)), + (blue rgba(35,163,255,0.3) rgba(35,163,255,0.9)), + (purple rgba(115,9,178,0.3) rgba(115,9,178,0.9))) { + + $marker: nth($highlight,1); + $color: nth($highlight,2); + $selected_color: nth($highlight,3); + + @if $marker == yellow { + &.highlight { + background-color: $color; + &.selected { background-color: $selected_color; } + } + } + &.highlight-#{$marker} { + background-color: $color; + &.selected { background-color: $selected_color; } + } + } + + &.hide { + cursor: none; + background-color: inherit; + .annotatable-icon { + display: none; + } + } + + .annotatable-comment { + display: none; + } +} + +.ui-tooltip.qtip.ui-tooltip { + font-size: $body-font-size; + border: 1px solid #333; + border-radius: 1em; + background-color: rgba(0,0,0,.85); + color: #fff; + -webkit-font-smoothing: antialiased; + + .ui-tooltip-titlebar { + font-size: em(16); + color: inherit; + background-color: transparent; + padding: 5px 10px; + border: none; + .ui-tooltip-title { + padding: 5px 0px; + border-bottom: 2px solid #333; + font-weight: bold; + } + .ui-tooltip-icon { + right: 10px; + background: #333; + } + .ui-state-hover { + color: inherit; + border: 1px solid #ccc; + } + } + .ui-tooltip-content { + color: inherit; + font-size: em(14); + text-align: left; + font-weight: 400; + padding: 0 10px 10px 10px; + background-color: transparent; + } + p { + color: inherit; + line-height: normal; + } +} + +.ui-tooltip.qtip.ui-tooltip-annotatable { + max-width: 375px; + .ui-tooltip-content { + padding: 0 10px; + .annotatable-comment { + display: block; + margin: 0px 0px 10px 0; + max-height: 225px; + overflow: auto; + } + .annotatable-reply { + display: block; + border-top: 2px solid #333; + padding: 5px 0; + margin: 0; + text-align: center; + } + } + &:after { + content: ''; + display: inline-block; + position: absolute; + bottom: -20px; + left: 50%; + height: 0; + width: 0; + margin-left: -5px; + border: 10px solid transparent; + border-top-color: rgba(0, 0, 0, .85); + } +} + + diff --git a/common/lib/xmodule/xmodule/css/capa/display.scss b/common/lib/xmodule/xmodule/css/capa/display.scss index d40bdb556e..ab23bc1b48 100644 --- a/common/lib/xmodule/xmodule/css/capa/display.scss +++ b/common/lib/xmodule/xmodule/css/capa/display.scss @@ -40,8 +40,16 @@ section.problem { @include clearfix; label.choicegroup_correct{ - text:after{ + &:after{ content: url('../images/correct-icon.png'); + margin-left:15px + } + } + + label.choicegroup_incorrect{ + &:after{ + content: url('../images/incorrect-icon.png'); + margin-left:15px; } } @@ -52,6 +60,7 @@ section.problem { .indicator_container { float: left; width: 25px; + height: 1px; margin-right: 15px; } @@ -69,7 +78,7 @@ section.problem { } text { - display: block; + display: inline; margin-left: 25px; } } @@ -227,6 +236,15 @@ section.problem { background: url('../images/correct-icon.png') center center no-repeat; height: 20px; position: relative; + top: 3px; + width: 25px; + } + + &.partially-correct { + @include inline-block(); + background: url('../images/partially-correct-icon.png') center center no-repeat; + height: 20px; + position: relative; top: 6px; width: 25px; } @@ -237,7 +255,7 @@ section.problem { height: 20px; width: 20px; position: relative; - top: 6px; + top: 3px; } } @@ -802,4 +820,91 @@ section.problem { display: none; } } + + .annotation-input { + $yellow: rgba(255,255,10,0.3); + + border: 1px solid #ccc; + border-radius: 1em; + margin: 0 0 1em 0; + + .annotation-header { + font-weight: bold; + border-bottom: 1px solid #ccc; + padding: .5em 1em; + } + .annotation-body { padding: .5em 1em; } + a.annotation-return { + float: right; + font: inherit; + font-weight: normal; + } + a.annotation-return:after { content: " \2191" } + + .block, ul.tags { + margin: .5em 0; + padding: 0; + } + .block-highlight { + padding: .5em; + color: #333; + font-style: normal; + background-color: $yellow; + border: 1px solid darken($yellow, 10%); + } + .block-comment { font-style: italic; } + + ul.tags { + display: block; + list-style-type: none; + margin-left: 1em; + li { + display: block; + margin: 1em 0 0 0; + position: relative; + .tag { + display: inline-block; + cursor: pointer; + border: 1px solid rgb(102,102,102); + margin-left: 40px; + &.selected { + background-color: $yellow; + } + } + .tag-status { + position: absolute; + left: 0; + } + .tag-status, .tag { padding: .25em .5em; } + } + } + textarea.comment { + $num-lines-to-show: 5; + $line-height: 1.4em; + $padding: .2em; + width: 100%; + padding: $padding (2 * $padding); + line-height: $line-height; + height: ($num-lines-to-show * $line-height) + (2*$padding) - (($line-height - 1)/2); + } + .answer-annotation { display: block; margin: 0; } + + /* for debugging the input value field. enable the debug flag on the inputtype */ + .debug-value { + color: #fff; + padding: 1em; + margin: 1em 0; + background-color: #999; + border: 1px solid #000; + input[type="text"] { width: 100%; } + pre { background-color: #CCC; color: #000; } + &:before { + display: block; + content: "debug input value"; + text-transform: uppercase; + font-weight: bold; + font-size: 1.5em; + } + } + } } diff --git a/common/lib/xmodule/xmodule/css/combinedopenended/display.scss b/common/lib/xmodule/xmodule/css/combinedopenended/display.scss index 8d921f828b..20700ab092 100644 --- a/common/lib/xmodule/xmodule/css/combinedopenended/display.scss +++ b/common/lib/xmodule/xmodule/css/combinedopenended/display.scss @@ -24,14 +24,11 @@ section.combined-open-ended { @include clearfix; .status-container { - float:right; - width:40%; + padding-bottom: 5px; } .item-container { - float:left; - width: 53%; - padding-bottom: 50px; + padding-bottom: 10px; } .result-container @@ -46,14 +43,26 @@ section.combined-open-ended { } } +section.legend-container { + .legenditem { + background-color : #d4d4d4; + font-size: .9em; + padding: 2px; + display: inline; + width: 20%; + } + margin-bottom: 5px; +} + section.combined-open-ended-status { .statusitem { - background-color: #FAFAFA; color: #2C2C2C; - font-family: monospace; - font-size: 1em; - padding: 10px; + background-color : #d4d4d4; + font-size: .9em; + padding: 2px; + display: inline; + width: 20%; .show-results { margin-top: .3em; text-align:right; @@ -61,12 +70,12 @@ section.combined-open-ended-status { .show-results-button { font: 1em monospace; } - } + } .statusitem-current { - background-color: #d4d4d4; + background-color: #B2B2B2; color: #222; - } + } span { &.unanswered { @@ -98,8 +107,29 @@ section.combined-open-ended-status { } } -div.result-container { +div.combined-rubric-container { + ul.rubric-list{ + list-style-type: none; + padding:0; + margin:0; + li { + &.rubric-list-item{ + margin-bottom: 2px; + padding: 0px; + } + } + } + span.rubric-category { + font-size: .9em; + } + padding-bottom: 5px; + padding-top: 10px; +} + +div.result-container { + padding-top: 10px; + padding-bottom: 5px; .evaluation { p { @@ -113,9 +143,8 @@ div.result-container { } .evaluation-response { - margin-bottom: 10px; + margin-bottom: 2px; header { - text-align: right; a { font-size: .85em; } @@ -198,20 +227,6 @@ div.result-container { } } - .result-correct { - background: url('../images/correct-icon.png') left 20px no-repeat; - .result-actual-output { - color: #090; - } - } - - .result-incorrect { - background: url('../images/incorrect-icon.png') left 20px no-repeat; - .result-actual-output { - color: #B00; - } - } - .markup-text{ margin: 5px; padding: 20px 0px 15px 50px; @@ -229,6 +244,16 @@ div.result-container { } } } + .rubric-result-container { + .rubric-result { + font-size: .9em; + padding: 2px; + display: inline-table; + } + padding: 2px; + margin: 0px; + display : inline; + } } @@ -404,7 +429,7 @@ section.open-ended-child { div.short-form-response { background: #F6F6F6; border: 1px solid #ddd; - margin-bottom: 20px; + margin-bottom: 0px; overflow-y: auto; height: 200px; @include clearfix; @@ -478,6 +503,18 @@ section.open-ended-child { margin-left: .75rem; } + ul.rubric-list{ + list-style-type: none; + padding:0; + margin:0; + li { + &.rubric-list-item{ + margin-bottom: 0px; + padding: 0px; + } + } + } + ol { list-style: decimal outside none; margin-bottom: lh(); @@ -503,9 +540,8 @@ section.open-ended-child { } li { - line-height: 1.4em; - margin-bottom: lh(.5); - + margin-bottom: 0px; + padding: 0px; &:last-child { margin-bottom: 0; } diff --git a/common/lib/xmodule/xmodule/css/foldit/leaderboard.scss b/common/lib/xmodule/xmodule/css/foldit/leaderboard.scss new file mode 100644 index 0000000000..5342c985c2 --- /dev/null +++ b/common/lib/xmodule/xmodule/css/foldit/leaderboard.scss @@ -0,0 +1,20 @@ +$leaderboard: #F4F4F4; + +section.foldit { + div.folditchallenge { + table { + border: 1px solid lighten($leaderboard, 10%); + border-collapse: collapse; + margin-top: 20px; + } + th { + background: $leaderboard; + color: darken($leaderboard, 25%); + } + td { + background: lighten($leaderboard, 3%); + border-bottom: 1px solid #fff; + padding: 8px; + } + } +} diff --git a/common/lib/xmodule/xmodule/css/html/display.scss b/common/lib/xmodule/xmodule/css/html/display.scss index 956923c6d0..93138ac5a9 100644 --- a/common/lib/xmodule/xmodule/css/html/display.scss +++ b/common/lib/xmodule/xmodule/css/html/display.scss @@ -49,10 +49,18 @@ p { em, i { font-style: italic; + + span { + font-style: italic; + } } strong, b { font-weight: bold; + + span { + font-weight: bold; + } } p + p, ul + p, ol + p { diff --git a/common/lib/xmodule/xmodule/css/poll/display.scss b/common/lib/xmodule/xmodule/css/poll/display.scss new file mode 100644 index 0000000000..82c018a3a0 --- /dev/null +++ b/common/lib/xmodule/xmodule/css/poll/display.scss @@ -0,0 +1,222 @@ +section.poll_question { + @media print { + display: block; + width: auto; + padding: 0; + + canvas, img { + page-break-inside: avoid; + } + } + + .inline { + display: inline; + } + + h3 { + margin-top: 0; + margin-bottom: 15px; + color: #fe57a1; + font-size: 1.9em; + + &.problem-header { + section.staff { + margin-top: 30px; + font-size: 80%; + } + } + + @media print { + display: block; + width: auto; + border-right: 0; + } + } + + p { + text-align: justify; + font-weight: bold; + } + + .poll_answer { + margin-bottom: 20px; + + &.short { + clear: both; + } + + .question { + height: auto; + clear: both; + min-height: 30px; + + &.short { + clear: none; + width: 30%; + display: inline; + float: left; + } + + .button { + -webkit-appearance: none; + -webkit-background-clip: padding-box; + -webkit-border-image: none; + -webkit-box-align: center; + -webkit-box-shadow: rgb(255, 255, 255) 0px 1px 0px 0px inset; + -webkit-font-smoothing: antialiased; + -webkit-rtl-ordering: logical; + -webkit-user-select: text; + -webkit-writing-mode: horizontal-tb; + background-clip: padding-box; + background-color: rgb(238, 238, 238); + background-image: -webkit-linear-gradient(top, rgb(238, 238, 238), rgb(210, 210, 210)); + border-bottom-color: rgb(202, 202, 202); + border-bottom-left-radius: 3px; + border-bottom-right-radius: 3px; + border-bottom-style: solid; + border-bottom-width: 1px; + border-left-color: rgb(202, 202, 202); + border-left-style: solid; + border-left-width: 1px; + border-right-color: rgb(202, 202, 202); + border-right-style: solid; + border-right-width: 1px; + border-top-color: rgb(202, 202, 202); + border-top-left-radius: 3px; + border-top-right-radius: 3px; + border-top-style: solid; + border-top-width: 1px; + box-shadow: rgb(255, 255, 255) 0px 1px 0px 0px inset; + box-sizing: border-box; + color: rgb(51, 51, 51); + cursor: pointer; + + /* display: inline-block; */ + display: inline; + float: left; + + font-family: 'Open Sans', Verdana, Geneva, sans-serif; + font-size: 13px; + font-style: normal; + font-variant: normal; + font-weight: bold; + + letter-spacing: normal; + line-height: 25.59375px; + margin-bottom: 15px; + margin: 0px; + padding: 0px; + text-align: center; + text-decoration: none; + text-indent: 0px; + text-shadow: rgb(248, 248, 248) 0px 1px 0px; + text-transform: none; + vertical-align: top; + white-space: pre-line; + + width: 25px; + height: 25px; + + word-spacing: 0px; + writing-mode: lr-tb; + } + .button.answered { + -webkit-box-shadow: rgb(97, 184, 225) 0px 1px 0px 0px inset; + background-color: rgb(29, 157, 217); + background-image: -webkit-linear-gradient(top, rgb(29, 157, 217), rgb(14, 124, 176)); + border-bottom-color: rgb(13, 114, 162); + border-left-color: rgb(13, 114, 162); + border-right-color: rgb(13, 114, 162); + border-top-color: rgb(13, 114, 162); + box-shadow: rgb(97, 184, 225) 0px 1px 0px 0px inset; + color: rgb(255, 255, 255); + text-shadow: rgb(7, 103, 148) 0px 1px 0px; + background-image: none; + } + + .text { + display: inline; + float: left; + width: 80%; + text-align: left; + min-height: 30px; + margin-left: 20px; + height: auto; + margin-bottom: 20px; + cursor: pointer; + + &.short { + width: 100px; + } + } + } + + .stats { + min-height: 40px; + margin-top: 20px; + clear: both; + + &.short { + margin-top: 0; + clear: none; + display: inline; + float: right; + width: 70%; + } + + .bar { + width: 75%; + height: 20px; + border: 1px solid black; + display: inline; + float: left; + margin-right: 10px; + + &.short { + width: 65%; + height: 20px; + margin-top: 3px; + } + + .percent { + background-color: gray; + width: 0px; + height: 20px; + + &.short { } + } + } + + .number { + width: 80px; + display: inline; + float: right; + height: 28px; + text-align: right; + + &.short { + width: 120px; + height: auto; + } + } + } + } + + .poll_answer.answered { + -webkit-box-shadow: rgb(97, 184, 225) 0px 1px 0px 0px inset; + background-color: rgb(29, 157, 217); + background-image: -webkit-linear-gradient(top, rgb(29, 157, 217), rgb(14, 124, 176)); + border-bottom-color: rgb(13, 114, 162); + border-left-color: rgb(13, 114, 162); + border-right-color: rgb(13, 114, 162); + border-top-color: rgb(13, 114, 162); + box-shadow: rgb(97, 184, 225) 0px 1px 0px 0px inset; + color: rgb(255, 255, 255); + text-shadow: rgb(7, 103, 148) 0px 1px 0px; + } + + .button.reset-button { + clear: both; + float: right; + } +} diff --git a/lms/djangoapps/portal/__init__.py b/common/lib/xmodule/xmodule/css/wrapper/display.scss similarity index 100% rename from lms/djangoapps/portal/__init__.py rename to common/lib/xmodule/xmodule/css/wrapper/display.scss diff --git a/common/lib/xmodule/xmodule/discussion_module.py b/common/lib/xmodule/xmodule/discussion_module.py index 6ddfcbe6c0..7725a88e77 100644 --- a/common/lib/xmodule/xmodule/discussion_module.py +++ b/common/lib/xmodule/xmodule/discussion_module.py @@ -3,35 +3,38 @@ from pkg_resources import resource_string, resource_listdir from xmodule.x_module import XModule from xmodule.raw_module import RawDescriptor - -import json +from xblock.core import String, Scope -class DiscussionModule(XModule): +class DiscussionFields(object): + discussion_id = String(scope=Scope.settings) + discussion_category = String(scope=Scope.settings) + discussion_target = String(scope=Scope.settings) + sort_key = String(scope=Scope.settings) + + +class DiscussionModule(DiscussionFields, XModule): js = {'coffee': [resource_string(__name__, 'js/src/time.coffee'), resource_string(__name__, 'js/src/discussion/display.coffee')] } js_module_name = "InlineDiscussion" + + def get_html(self): context = { 'discussion_id': self.discussion_id, } return self.system.render_template('discussion/_discussion_module.html', context) - def __init__(self, system, location, definition, descriptor, - instance_state=None, shared_state=None, **kwargs): - XModule.__init__(self, system, location, definition, descriptor, - instance_state, shared_state, **kwargs) - if isinstance(instance_state, str): - instance_state = json.loads(instance_state) - xml_data = etree.fromstring(definition['data']) - self.discussion_id = xml_data.attrib['id'] - self.title = xml_data.attrib['for'] - self.discussion_category = xml_data.attrib['discussion_category'] - - -class DiscussionDescriptor(RawDescriptor): +class DiscussionDescriptor(DiscussionFields, RawDescriptor): module_class = DiscussionModule template_dir_name = "discussion" + + # The discussion XML format uses `id` and `for` attributes, + # but these would overload other module attributes, so we prefix them + # for actual use in the code + metadata_translations = dict(RawDescriptor.metadata_translations) + metadata_translations['id'] = 'discussion_id' + metadata_translations['for'] = 'discussion_target' diff --git a/common/lib/xmodule/xmodule/editing_module.py b/common/lib/xmodule/xmodule/editing_module.py index e025179b63..b93727a96b 100644 --- a/common/lib/xmodule/xmodule/editing_module.py +++ b/common/lib/xmodule/xmodule/editing_module.py @@ -1,11 +1,16 @@ from pkg_resources import resource_string from xmodule.mako_module import MakoModuleDescriptor +from xblock.core import Scope, String import logging log = logging.getLogger(__name__) -class EditingDescriptor(MakoModuleDescriptor): +class EditingFields(object): + data = String(scope=Scope.content, default='') + + +class EditingDescriptor(EditingFields, MakoModuleDescriptor): """ Module that provides a raw editing view of its data and children. It does not perform any validation on its definition---just passes it along to the browser. @@ -20,7 +25,7 @@ class EditingDescriptor(MakoModuleDescriptor): def get_context(self): _context = MakoModuleDescriptor.get_context(self) # Add our specific template information (the raw data body) - _context.update({'data': self.definition.get('data', '')}) + _context.update({'data': self.data}) return _context diff --git a/common/lib/xmodule/xmodule/error_module.py b/common/lib/xmodule/xmodule/error_module.py index 2df47e05e6..d2135302da 100644 --- a/common/lib/xmodule/xmodule/error_module.py +++ b/common/lib/xmodule/xmodule/error_module.py @@ -8,6 +8,7 @@ from xmodule.x_module import XModule from xmodule.editing_module import JSONEditingDescriptor from xmodule.errortracker import exc_info_to_str from xmodule.modulestore import Location +from xblock.core import String, Scope log = logging.getLogger(__name__) @@ -20,7 +21,14 @@ log = logging.getLogger(__name__) # decides whether to create a staff or not-staff module. -class ErrorModule(XModule): +class ErrorFields(object): + contents = String(scope=Scope.content) + error_msg = String(scope=Scope.content) + display_name = String(scope=Scope.settings) + + +class ErrorModule(ErrorFields, XModule): + def get_html(self): '''Show an error to staff. TODO (vshnayder): proper style, divs, etc. @@ -28,12 +36,12 @@ class ErrorModule(XModule): # staff get to see all the details return self.system.render_template('module-error.html', { 'staff_access': True, - 'data': self.definition['data']['contents'], - 'error': self.definition['data']['error_msg'], + 'data': self.contents, + 'error': self.error_msg, }) -class NonStaffErrorModule(XModule): +class NonStaffErrorModule(ErrorFields, XModule): def get_html(self): '''Show an error to a student. TODO (vshnayder): proper style, divs, etc. @@ -46,7 +54,7 @@ class NonStaffErrorModule(XModule): }) -class ErrorDescriptor(JSONEditingDescriptor): +class ErrorDescriptor(ErrorFields, JSONEditingDescriptor): """ Module that provides a raw editing view of broken xml. """ @@ -66,26 +74,22 @@ class ErrorDescriptor(JSONEditingDescriptor): name=hashlib.sha1(contents).hexdigest() ) - definition = { - 'data': { - 'error_msg': str(error_msg), - 'contents': contents, - } - } - # real metadata stays in the content, but add a display name - metadata = {'display_name': 'Error: ' + location.name} + model_data = { + 'error_msg': str(error_msg), + 'contents': contents, + 'display_name': 'Error: ' + location.name + } return ErrorDescriptor( system, - definition, - location=location, - metadata=metadata + location, + model_data, ) def get_context(self): return { 'module': self, - 'data': self.definition['data']['contents'], + 'data': self.contents, } @classmethod @@ -101,10 +105,7 @@ class ErrorDescriptor(JSONEditingDescriptor): def from_descriptor(cls, descriptor, error_msg='Error not available'): return cls._construct( descriptor.system, - json.dumps({ - 'definition': descriptor.definition, - 'metadata': descriptor.metadata, - }, indent=4), + descriptor._model_data, error_msg, location=descriptor.location, ) @@ -148,14 +149,14 @@ class ErrorDescriptor(JSONEditingDescriptor): files, etc. That would just get re-wrapped on import. ''' try: - xml = etree.fromstring(self.definition['data']['contents']) + xml = etree.fromstring(self.contents) return etree.tostring(xml, encoding='unicode') except etree.XMLSyntaxError: # still not valid. root = etree.Element('error') - root.text = self.definition['data']['contents'] + root.text = self.contents err_node = etree.SubElement(root, 'error_msg') - err_node.text = self.definition['data']['error_msg'] + err_node.text = self.error_msg return etree.tostring(root, encoding='unicode') diff --git a/common/lib/xmodule/xmodule/exceptions.py b/common/lib/xmodule/xmodule/exceptions.py index 3db5ceccde..d38fbb12bb 100644 --- a/common/lib/xmodule/xmodule/exceptions.py +++ b/common/lib/xmodule/xmodule/exceptions.py @@ -1,6 +1,12 @@ class InvalidDefinitionError(Exception): pass - class NotFoundError(Exception): pass + +class ProcessingError(Exception): + ''' + An error occurred while processing a request to the XModule. + For example: if an exception occurs while checking a capa problem. + ''' + pass diff --git a/common/lib/xmodule/xmodule/fields.py b/common/lib/xmodule/xmodule/fields.py new file mode 100644 index 0000000000..ea857933fc --- /dev/null +++ b/common/lib/xmodule/xmodule/fields.py @@ -0,0 +1,81 @@ +import time +import logging +import re + +from datetime import timedelta +from xblock.core import ModelType +import datetime +import dateutil.parser + +log = logging.getLogger(__name__) + + +class Date(ModelType): + ''' + Date fields know how to parse and produce json (iso) compatible formats. + ''' + def from_json(self, field): + """ + Parse an optional metadata key containing a time: if present, complain + if it doesn't parse. + Return None if not present or invalid. + """ + if field is None: + return field + elif field is "": + return None + elif isinstance(field, basestring): + d = dateutil.parser.parse(field) + return d.utctimetuple() + elif isinstance(field, (int, long, float)): + return time.gmtime(field / 1000) + elif isinstance(field, time.struct_time): + return field + else: + msg = "Field {0} has bad value '{1}'".format( + self._name, field) + log.warning(msg) + return None + + def to_json(self, value): + """ + Convert a time struct to a string + """ + if value is None: + return None + if isinstance(value, time.struct_time): + # struct_times are always utc + return time.strftime('%Y-%m-%dT%H:%M:%SZ', value) + elif isinstance(value, datetime.datetime): + return value.isoformat() + 'Z' + + +TIMEDELTA_REGEX = re.compile(r'^((?P\d+?) day(?:s?))?(\s)?((?P\d+?) hour(?:s?))?(\s)?((?P\d+?) minute(?:s)?)?(\s)?((?P\d+?) second(?:s)?)?$') +class Timedelta(ModelType): + def from_json(self, time_str): + """ + time_str: A string with the following components: + day[s] (optional) + hour[s] (optional) + minute[s] (optional) + second[s] (optional) + + Returns a datetime.timedelta parsed from the string + """ + parts = TIMEDELTA_REGEX.match(time_str) + if not parts: + return + parts = parts.groupdict() + time_params = {} + for (name, param) in parts.iteritems(): + if param: + time_params[name] = int(param) + return timedelta(**time_params) + + def to_json(self, value): + values = [] + for attr in ('days', 'hours', 'minutes', 'seconds'): + cur_value = getattr(value, attr, 0) + if cur_value > 0: + values.append("%d %s" % (cur_value, attr)) + return ' '.join(values) diff --git a/common/lib/xmodule/xmodule/foldit_module.py b/common/lib/xmodule/xmodule/foldit_module.py index ea16fee7f1..884f9e2df2 100644 --- a/common/lib/xmodule/xmodule/foldit_module.py +++ b/common/lib/xmodule/xmodule/foldit_module.py @@ -7,31 +7,46 @@ from pkg_resources import resource_string from xmodule.editing_module import EditingDescriptor from xmodule.x_module import XModule from xmodule.xml_module import XmlDescriptor +from xblock.core import Scope, Integer, String log = logging.getLogger(__name__) -class FolditModule(XModule): - def __init__(self, system, location, definition, descriptor, - instance_state=None, shared_state=None, **kwargs): - XModule.__init__(self, system, location, definition, descriptor, - instance_state, shared_state, **kwargs) - # ooh look--I'm lazy, so hardcoding the 7.00x required level. - # If we need it generalized, can pull from the xml later - self.required_level = 4 - self.required_sublevel = 5 +class FolditFields(object): + # default to what Spring_7012x uses + required_level = Integer(default=4, scope=Scope.settings) + required_sublevel = Integer(default=5, scope=Scope.settings) + due = String(help="Date that this problem is due by", scope=Scope.settings, default='') + + show_basic_score = String(scope=Scope.settings, default='false') + show_leaderboard = String(scope=Scope.settings, default='false') + + +class FolditModule(FolditFields, XModule): + + css = {'scss': [resource_string(__name__, 'css/foldit/leaderboard.scss')]} + + def __init__(self, *args, **kwargs): + XModule.__init__(self, *args, **kwargs) + """ + + Example: + + """ def parse_due_date(): """ Pull out the date, or None """ - s = self.metadata.get("due") + s = self.due if s: return parser.parse(s) else: return None - self.due_str = self.metadata.get("due", "None") - self.due = parse_due_date() + self.due_time = parse_due_date() def is_complete(self): """ @@ -46,7 +61,7 @@ class FolditModule(XModule): self.system.anonymous_student_id, self.required_level, self.required_sublevel, - self.due) + self.due_time) return complete def completed_puzzles(self): @@ -66,6 +81,17 @@ class FolditModule(XModule): PuzzleComplete.completed_puzzles(self.system.anonymous_student_id), key=lambda d: (d['set'], d['subset'])) + def puzzle_leaders(self, n=10): + """ + Returns a list of n pairs (user, score) corresponding to the top + scores; the pairs are in descending order of score. + """ + from foldit.models import Score + + leaders = [(e['username'], e['score']) for e in Score.get_tops_n(10)] + leaders.sort(key=lambda x: -x[1]) + + return leaders def get_html(self): """ @@ -75,15 +101,48 @@ class FolditModule(XModule): self.required_level, self.required_sublevel) + showbasic = (self.show_basic_score.lower() == "true") + showleader = (self.show_leaderboard.lower() == "true") + context = { - 'due': self.due_str, + 'due': self.due, 'success': self.is_complete(), 'goal_level': goal_level, 'completed': self.completed_puzzles(), + 'top_scores': self.puzzle_leaders(), + 'show_basic': showbasic, + 'show_leader': showleader, + 'folditbasic': self.get_basicpuzzles_html(), + 'folditchallenge': self.get_challenge_html() } return self.system.render_template('foldit.html', context) + def get_basicpuzzles_html(self): + """ + Render html for the basic puzzle section. + """ + goal_level = '{0}-{1}'.format( + self.required_level, + self.required_sublevel) + + context = { + 'due': self.due, + 'success': self.is_complete(), + 'goal_level': goal_level, + 'completed': self.completed_puzzles(), + } + return self.system.render_template('folditbasic.html', context) + + def get_challenge_html(self): + """ + Render html for challenge (i.e., the leaderboard) + """ + + context = { + 'top_scores': self.puzzle_leaders()} + + return self.system.render_template('folditchallenge.html', context) def get_score(self): """ @@ -97,9 +156,10 @@ class FolditModule(XModule): return 1 -class FolditDescriptor(XmlDescriptor, EditingDescriptor): + +class FolditDescriptor(FolditFields, XmlDescriptor, EditingDescriptor): """ - Module for adding open ended response questions to courses + Module for adding Foldit problems to courses """ mako_template = "widgets/html-edit.html" module_class = FolditModule @@ -118,7 +178,8 @@ class FolditDescriptor(XmlDescriptor, EditingDescriptor): @classmethod def definition_from_xml(cls, xml_object, system): - """ - For now, don't need anything from the xml - """ - return {} + return ({}, []) + + def definition_to_xml(self): + xml_object = etree.Element('foldit') + return xml_object diff --git a/common/lib/xmodule/xmodule/graders.py b/common/lib/xmodule/xmodule/graders.py index 35318f4f1e..862da791c0 100644 --- a/common/lib/xmodule/xmodule/graders.py +++ b/common/lib/xmodule/xmodule/graders.py @@ -45,8 +45,9 @@ def invalid_args(func, argdict): Given a function and a dictionary of arguments, returns a set of arguments from argdict that aren't accepted by func """ - args, varargs, keywords, defaults = inspect.getargspec(func) - if keywords: return set() # All accepted + args, _, keywords, _ = inspect.getargspec(func) + if keywords: + return set() # All accepted return set(argdict) - set(args) @@ -119,7 +120,7 @@ class CourseGrader(object): that has the matching section format. The grader outputs a dictionary with the following keys: - - percent: Contaisn a float value, which is the final percentage score for the student. + - percent: Contains a float value, which is the final percentage score for the student. - section_breakdown: This is a list of dictionaries which provide details on sections that were graded. These are used for display in a graph or chart. The format for a section_breakdown dictionary is explained below. @@ -150,6 +151,7 @@ class CourseGrader(object): @abc.abstractmethod def grade(self, grade_sheet, generate_random_scores=False): + '''Given a grade sheet, return a dict containing grading information''' raise NotImplementedError @@ -158,7 +160,10 @@ class WeightedSubsectionsGrader(CourseGrader): This grader takes a list of tuples containing (grader, category_name, weight) and computes a final grade by totalling the contribution of each sub grader and multiplying it by the given weight. For example, the sections may be - [ (homeworkGrader, "Homework", 0.15), (labGrader, "Labs", 0.15), (midtermGrader, "Midterm", 0.30), (finalGrader, "Final", 0.40) ] + + [ (homeworkGrader, "Homework", 0.15), (labGrader, "Labs", 0.15), (midtermGrader, "Midterm", 0.30), + (finalGrader, "Final", 0.40) ] + All items in section_breakdown for each subgrader will be combined. A grade_breakdown will be composed using the score from each grader. @@ -177,12 +182,12 @@ class WeightedSubsectionsGrader(CourseGrader): for subgrader, category, weight in self.sections: subgrade_result = subgrader.grade(grade_sheet, generate_random_scores) - weightedPercent = subgrade_result['percent'] * weight - section_detail = "{0} = {1:.1%} of a possible {2:.0%}".format(category, weightedPercent, weight) + weighted_percent = subgrade_result['percent'] * weight + section_detail = "{0} = {1:.1%} of a possible {2:.0%}".format(category, weighted_percent, weight) - total_percent += weightedPercent + total_percent += weighted_percent section_breakdown += subgrade_result['section_breakdown'] - grade_breakdown.append({'percent': weightedPercent, 'detail': section_detail, 'category': category}) + grade_breakdown.append({'percent': weighted_percent, 'detail': section_detail, 'category': category}) return {'percent': total_percent, 'section_breakdown': section_breakdown, @@ -203,32 +208,33 @@ class SingleSectionGrader(CourseGrader): self.category = category or name def grade(self, grade_sheet, generate_random_scores=False): - foundScore = None + found_score = None if self.type in grade_sheet: for score in grade_sheet[self.type]: if score.section == self.name: - foundScore = score + found_score = score break - if foundScore or generate_random_scores: + if found_score or generate_random_scores: if generate_random_scores: # for debugging! earned = random.randint(2, 15) possible = random.randint(earned, 15) else: # We found the score - earned = foundScore.earned - possible = foundScore.possible + earned = found_score.earned + possible = found_score.possible percent = earned / float(possible) detail = "{name} - {percent:.0%} ({earned:.3n}/{possible:.3n})".format(name=self.name, - percent=percent, - earned=float(earned), - possible=float(possible)) + percent=percent, + earned=float(earned), + possible=float(possible)) else: percent = 0.0 detail = "{name} - 0% (?/?)".format(name=self.name) - breakdown = [{'percent': percent, 'label': self.short_label, 'detail': detail, 'category': self.category, 'prominent': True}] + breakdown = [{'percent': percent, 'label': self.short_label, + 'detail': detail, 'category': self.category, 'prominent': True}] return {'percent': percent, 'section_breakdown': breakdown, @@ -250,6 +256,13 @@ class AssignmentFormatGrader(CourseGrader): show_only_average is to suppress the display of each assignment in this grader and instead only show the total score of this grader in the breakdown. + hide_average is to suppress the display of the total score in this grader and instead + only show each assignment in this grader in the breakdown. + + If there is only a single assignment in this grader, then it acts like a SingleSectionGrader + and returns only one entry for the grader. Since the assignment and the total are the same, + the total is returned but is not labeled as an average. + category should be presentable to the user, but may not appear. When the grade breakdown is displayed, scores from the same category will be similar (for example, by color). @@ -263,7 +276,8 @@ class AssignmentFormatGrader(CourseGrader): min_count = 2 would produce the labels "Assignment 3", "Assignment 4" """ - def __init__(self, type, min_count, drop_count, category=None, section_type=None, short_label=None, show_only_average=False, hide_average=False, starting_index=1): + def __init__(self, type, min_count, drop_count, category=None, section_type=None, short_label=None, + show_only_average=False, hide_average=False, starting_index=1): self.type = type self.min_count = min_count self.drop_count = drop_count @@ -275,7 +289,8 @@ class AssignmentFormatGrader(CourseGrader): self.hide_average = hide_average def grade(self, grade_sheet, generate_random_scores=False): - def totalWithDrops(breakdown, drop_count): + def total_with_drops(breakdown, drop_count): + '''calculates total score for a section while dropping lowest scores''' #create an array of tuples with (index, mark), sorted by mark['percent'] descending sorted_breakdown = sorted(enumerate(breakdown), key=lambda x: -x[1]['percent']) # A list of the indices of the dropped scores @@ -308,33 +323,50 @@ class AssignmentFormatGrader(CourseGrader): section_name = scores[i].section percentage = earned / float(possible) - summary = "{section_type} {index} - {name} - {percent:.0%} ({earned:.3n}/{possible:.3n})".format(index=i + self.starting_index, - section_type=self.section_type, - name=section_name, - percent=percentage, - earned=float(earned), - possible=float(possible)) + summary_format = "{section_type} {index} - {name} - {percent:.0%} ({earned:.3n}/{possible:.3n})" + summary = summary_format.format(index=i + self.starting_index, + section_type=self.section_type, + name=section_name, + percent=percentage, + earned=float(earned), + possible=float(possible)) else: percentage = 0 - summary = "{section_type} {index} Unreleased - 0% (?/?)".format(index=i + self.starting_index, section_type=self.section_type) + summary = "{section_type} {index} Unreleased - 0% (?/?)".format(index=i + self.starting_index, + section_type=self.section_type) - short_label = "{short_label} {index:02d}".format(index=i + self.starting_index, short_label=self.short_label) + short_label = "{short_label} {index:02d}".format(index=i + self.starting_index, + short_label=self.short_label) - breakdown.append({'percent': percentage, 'label': short_label, 'detail': summary, 'category': self.category}) + breakdown.append({'percent': percentage, 'label': short_label, + 'detail': summary, 'category': self.category}) - total_percent, dropped_indices = totalWithDrops(breakdown, self.drop_count) + total_percent, dropped_indices = total_with_drops(breakdown, self.drop_count) for dropped_index in dropped_indices: - breakdown[dropped_index]['mark'] = {'detail': "The lowest {drop_count} {section_type} scores are dropped.".format(drop_count=self.drop_count, section_type=self.section_type)} + breakdown[dropped_index]['mark'] = {'detail': "The lowest {drop_count} {section_type} scores are dropped." + .format(drop_count=self.drop_count, section_type=self.section_type)} - total_detail = "{section_type} Average = {percent:.0%}".format(percent=total_percent, section_type=self.section_type) - total_label = "{short_label} Avg".format(short_label=self.short_label) + if len(breakdown) == 1: + # if there is only one entry in a section, suppress the existing individual entry and the average, + # and just display a single entry for the section. That way it acts automatically like a + # SingleSectionGrader. + total_detail = "{section_type} = {percent:.0%}".format(percent=total_percent, + section_type=self.section_type) + total_label = "{short_label}".format(short_label=self.short_label) + breakdown = [{'percent': total_percent, 'label': total_label, + 'detail': total_detail, 'category': self.category, 'prominent': True}, ] + else: + total_detail = "{section_type} Average = {percent:.0%}".format(percent=total_percent, + section_type=self.section_type) + total_label = "{short_label} Avg".format(short_label=self.short_label) - if self.show_only_average: - breakdown = [] + if self.show_only_average: + breakdown = [] - if not self.hide_average: - breakdown.append({'percent': total_percent, 'label': total_label, 'detail': total_detail, 'category': self.category, 'prominent': True}) + if not self.hide_average: + breakdown.append({'percent': total_percent, 'label': total_label, + 'detail': total_detail, 'category': self.category, 'prominent': True}) return {'percent': total_percent, 'section_breakdown': breakdown, diff --git a/common/lib/xmodule/xmodule/gst_module.py b/common/lib/xmodule/xmodule/gst_module.py index ef1be96c84..00e8cf1f10 100644 --- a/common/lib/xmodule/xmodule/gst_module.py +++ b/common/lib/xmodule/xmodule/gst_module.py @@ -14,12 +14,18 @@ from xmodule.xml_module import XmlDescriptor from xmodule.x_module import XModule from xmodule.stringify import stringify_children from pkg_resources import resource_string +from xblock.core import String, Scope log = logging.getLogger(__name__) -class GraphicalSliderToolModule(XModule): +class GraphicalSliderToolFields(object): + render = String(scope=Scope.content) + configuration = String(scope=Scope.content) + + +class GraphicalSliderToolModule(GraphicalSliderToolFields, XModule): ''' Graphical-Slider-Tool Module ''' @@ -43,15 +49,6 @@ class GraphicalSliderToolModule(XModule): } js_module_name = "GraphicalSliderTool" - def __init__(self, system, location, definition, descriptor, instance_state=None, - shared_state=None, **kwargs): - """ - For XML file format please look at documentation. TODO - receive - information where to store XML documentation. - """ - XModule.__init__(self, system, location, definition, descriptor, - instance_state, shared_state, **kwargs) - def get_html(self): """ Renders parameters to template. """ @@ -60,14 +57,14 @@ class GraphicalSliderToolModule(XModule): self.html_class = self.location.category self.configuration_json = self.build_configuration_json() params = { - 'gst_html': self.substitute_controls(self.definition['render']), + 'gst_html': self.substitute_controls(self.render), 'element_id': self.html_id, 'element_class': self.html_class, 'configuration_json': self.configuration_json } - self.content = self.system.render_template( + content = self.system.render_template( 'graphical_slider_tool.html', params) - return self.content + return content def substitute_controls(self, html_string): """ Substitutes control elements (slider, textbox and plot) in @@ -139,10 +136,10 @@ class GraphicalSliderToolModule(XModule): # added for interface compatibility with xmltodict.parse # class added for javascript's part purposes return json.dumps(xmltodict.parse('' + self.definition['configuration'] + '')) + '">' + self.configuration + '')) -class GraphicalSliderToolDescriptor(MakoModuleDescriptor, XmlDescriptor): +class GraphicalSliderToolDescriptor(GraphicalSliderToolFields, MakoModuleDescriptor, XmlDescriptor): module_class = GraphicalSliderToolModule template_dir_name = 'graphical_slider_tool' @@ -177,14 +174,14 @@ class GraphicalSliderToolDescriptor(MakoModuleDescriptor, XmlDescriptor): return { 'render': parse('render'), 'configuration': parse('configuration') - } + }, [] def definition_to_xml(self, resource_fs): '''Return an xml element representing this definition.''' xml_object = etree.Element('graphical_slider_tool') def add_child(k): - child_str = '<{tag}>{body}'.format(tag=k, body=self.definition[k]) + child_str = '<{tag}>{body}'.format(tag=k, body=getattr(self, k)) child_node = etree.fromstring(child_str) xml_object.append(child_node) diff --git a/common/lib/xmodule/xmodule/html_module.py b/common/lib/xmodule/xmodule/html_module.py index af1ce0ad80..e9cec32e3e 100644 --- a/common/lib/xmodule/xmodule/html_module.py +++ b/common/lib/xmodule/xmodule/html_module.py @@ -7,10 +7,9 @@ from lxml import etree from path import path from pkg_resources import resource_string -from xmodule.contentstore.content import XASSET_SRCREF_PREFIX, StaticContent +from xblock.core import Scope, String from xmodule.editing_module import EditingDescriptor from xmodule.html_checker import check_html -from xmodule.modulestore import Location from xmodule.stringify import stringify_children from xmodule.x_module import XModule from xmodule.xml_module import XmlDescriptor, name_to_pathname @@ -18,7 +17,11 @@ from xmodule.xml_module import XmlDescriptor, name_to_pathname log = logging.getLogger("mitx.courseware") -class HtmlModule(XModule): +class HtmlFields(object): + data = String(help="Html contents to display for this module", scope=Scope.content) + + +class HtmlModule(HtmlFields, XModule): js = {'coffee': [resource_string(__name__, 'js/src/javascript_loader.coffee'), resource_string(__name__, 'js/src/collapsible.coffee'), resource_string(__name__, 'js/src/html/display.coffee') @@ -28,17 +31,10 @@ class HtmlModule(XModule): css = {'scss': [resource_string(__name__, 'css/html/display.scss')]} def get_html(self): - return self.html - - def __init__(self, system, location, definition, descriptor, - instance_state=None, shared_state=None, **kwargs): - XModule.__init__(self, system, location, definition, descriptor, - instance_state, shared_state, **kwargs) - self.html = self.definition['data'] + return self.data - -class HtmlDescriptor(XmlDescriptor, EditingDescriptor): +class HtmlDescriptor(HtmlFields, XmlDescriptor, EditingDescriptor): """ Module for putting raw html in a course """ @@ -91,7 +87,7 @@ class HtmlDescriptor(XmlDescriptor, EditingDescriptor): if filename is None: definition_xml = copy.deepcopy(xml_object) cls.clean_metadata_from_xml(definition_xml) - return {'data': stringify_children(definition_xml)} + return {'data': stringify_children(definition_xml)}, [] else: # html is special. cls.filename_extension is 'xml', but # if 'filename' is in the definition, that means to load @@ -105,8 +101,6 @@ class HtmlDescriptor(XmlDescriptor, EditingDescriptor): filepath = "{base}/{name}.html".format(base=base, name=filename) #log.debug("looking for html file for {0} at {1}".format(location, filepath)) - - # VS[compat] # TODO (cpennington): If the file doesn't exist at the right path, # give the class a chance to fix it up. The file will be written out @@ -135,7 +129,7 @@ class HtmlDescriptor(XmlDescriptor, EditingDescriptor): # for Fall 2012 LMS migration: keep filename (and unmangled filename) definition['filename'] = [filepath, filename] - return definition + return definition, [] except (ResourceNotFoundError) as err: msg = 'Unable to load file contents at path {0}: {1} '.format( @@ -151,19 +145,18 @@ class HtmlDescriptor(XmlDescriptor, EditingDescriptor): string to filename.html. ''' try: - return etree.fromstring(self.definition['data']) + return etree.fromstring(self.data) except etree.XMLSyntaxError: pass # Not proper format. Write html to file, return an empty tag pathname = name_to_pathname(self.url_name) - pathdir = path(pathname).dirname() filepath = u'{category}/{pathname}.html'.format(category=self.category, pathname=pathname) resource_fs.makedir(os.path.dirname(filepath), recursive=True, allow_recreate=True) with resource_fs.open(filepath, 'w') as file: - file.write(self.definition['data'].encode('utf-8')) + file.write(self.data.encode('utf-8')) # write out the relative name relname = path(pathname).basename() @@ -172,6 +165,16 @@ class HtmlDescriptor(XmlDescriptor, EditingDescriptor): elt.set("filename", relname) return elt + @property + def editable_metadata_fields(self): + """Remove any metadata from the editable fields which have their own editor or shouldn't be edited by user.""" + subset = super(HtmlDescriptor, self).editable_metadata_fields + + if 'empty' in subset: + del subset['empty'] + + return subset + class AboutDescriptor(HtmlDescriptor): """ diff --git a/common/lib/xmodule/xmodule/js/fixtures/annotatable.html b/common/lib/xmodule/xmodule/js/fixtures/annotatable.html new file mode 100644 index 0000000000..61020d95e8 --- /dev/null +++ b/common/lib/xmodule/xmodule/js/fixtures/annotatable.html @@ -0,0 +1,35 @@ +
        +
        +
        +
        First Annotation Exercise
        +
        +
        +
        + Instructions + Collapse Instructions +
        +
        +

        The main goal of this exercise is to start practicing the art of slow reading.

        +
        +
        +
        +
        + Guided Discussion + Hide Annotations +
        +
        +
        + |87 No, those who are really responsible are Zeus and Fate [Moira] and the Fury [Erinys] who roams in the mist.
        + |88 They are the ones who
        + |100 He [= Zeus], making a formal declaration [eukhesthai], spoke up at a meeting of all the gods and said:
        + |101 “hear me, all gods and all goddesses,
        + |113 but he swore a great oath. + And right then and there
        +
        +
        +
        + +
        Return to Annotation
        +
        Return to Annotation
        +
        Return to Annotation
        + diff --git a/common/lib/xmodule/xmodule/js/spec/annotatable/display_spec.coffee b/common/lib/xmodule/xmodule/js/spec/annotatable/display_spec.coffee new file mode 100644 index 0000000000..3adb028f97 --- /dev/null +++ b/common/lib/xmodule/xmodule/js/spec/annotatable/display_spec.coffee @@ -0,0 +1,9 @@ +describe 'Annotatable', -> + beforeEach -> + loadFixtures 'annotatable.html' + describe 'constructor', -> + el = $('.xmodule_display.xmodule_AnnotatableModule') + beforeEach -> + @annotatable = new Annotatable(el) + it 'works', -> + expect(1).toBe(1) \ No newline at end of file diff --git a/common/lib/xmodule/xmodule/js/spec/problem/edit_spec.coffee b/common/lib/xmodule/xmodule/js/spec/problem/edit_spec.coffee index 9b8062d60d..5161e658e7 100644 --- a/common/lib/xmodule/xmodule/js/spec/problem/edit_spec.coffee +++ b/common/lib/xmodule/xmodule/js/spec/problem/edit_spec.coffee @@ -119,13 +119,13 @@ describe 'MarkdownEditingDescriptor', ->

        The answer is correct if it is within a specified numerical tolerance of the expected answer.

        Enter the numerical value of Pi:

        - +

        Enter the approximate value of 502*9:

        - + @@ -147,6 +147,20 @@ describe 'MarkdownEditingDescriptor', ->
        + """) + it 'will convert 0 as a numerical response (instead of string response)', -> + data = MarkdownEditingDescriptor.markdownToXml(""" + Enter 0 with a tolerance: + = 0 +- .02 + """) + expect(data).toEqual(""" +

        Enter 0 with a tolerance:

        + + + + + +
        """) it 'converts multiple choice to xml', -> data = MarkdownEditingDescriptor.markdownToXml("""A multiple choice problem presents radio buttons for student input. Students can only select a single option presented. Multiple Choice questions have been the subject of many areas of research due to the early invention and adoption of bubble sheets. diff --git a/common/lib/xmodule/xmodule/js/src/annotatable/display.coffee b/common/lib/xmodule/xmodule/js/src/annotatable/display.coffee new file mode 100644 index 0000000000..2ad49ae6d7 --- /dev/null +++ b/common/lib/xmodule/xmodule/js/src/annotatable/display.coffee @@ -0,0 +1,197 @@ +class @Annotatable + _debug: false + + # selectors for the annotatable xmodule + toggleAnnotationsSelector: '.annotatable-toggle-annotations' + toggleInstructionsSelector: '.annotatable-toggle-instructions' + instructionsSelector: '.annotatable-instructions' + sectionSelector: '.annotatable-section' + spanSelector: '.annotatable-span' + replySelector: '.annotatable-reply' + + # these selectors are for responding to events from the annotation capa problem type + problemXModuleSelector: '.xmodule_CapaModule' + problemSelector: 'section.problem' + problemInputSelector: 'section.problem .annotation-input' + problemReturnSelector: 'section.problem .annotation-return' + + constructor: (el) -> + console.log 'loaded Annotatable' if @_debug + @el = el + @$el = $(el) + @init() + + $: (selector) -> + $(selector, @el) + + init: () -> + @initEvents() + @initTips() + + initEvents: () -> + # Initialize toggle handlers for the instructions and annotations sections + [@annotationsHidden, @instructionsHidden] = [false, false] + @$(@toggleAnnotationsSelector).bind 'click', @onClickToggleAnnotations + @$(@toggleInstructionsSelector).bind 'click', @onClickToggleInstructions + + # Initialize handler for 'reply to annotation' events that scroll to + # the associated problem. The reply buttons are part of the tooltip + # content. It's important that the tooltips be configured to render + # as descendants of the annotation module and *not* the document.body. + @$el.delegate @replySelector, 'click', @onClickReply + + # Initialize handler for 'return to annotation' events triggered from problems. + # 1) There are annotationinput capa problems rendered on the page + # 2) Each one has an embedded return link (see annotation capa problem template). + # Since the capa problem injects HTML content via AJAX, the best we can do is + # is let the click events bubble up to the body and handle them there. + $('body').delegate @problemReturnSelector, 'click', @onClickReturn + + initTips: () -> + # tooltips are used to display annotations for highlighted text spans + @$(@spanSelector).each (index, el) => + $(el).qtip(@getSpanTipOptions el) + + getSpanTipOptions: (el) -> + content: + title: + text: @makeTipTitle(el) + text: @makeTipContent(el) + position: + my: 'bottom center' # of tooltip + at: 'top center' # of target + target: $(el) # where the tooltip was triggered (i.e. the annotation span) + container: @$el + adjust: + y: -5 + show: + event: 'click mouseenter' + solo: true + hide: + event: 'click mouseleave' + delay: 500, + fixed: true # don't hide the tooltip if it is moused over + style: + classes: 'ui-tooltip-annotatable' + events: + show: @onShowTip + + onClickToggleAnnotations: (e) => @toggleAnnotations() + + onClickToggleInstructions: (e) => @toggleInstructions() + + onClickReply: (e) => @replyTo(e.currentTarget) + + onClickReturn: (e) => @returnFrom(e.currentTarget) + + onShowTip: (event, api) => + event.preventDefault() if @annotationsHidden + + getSpanForProblemReturn: (el) -> + problem_id = $(@problemReturnSelector).index(el) + @$(@spanSelector).filter("[data-problem-id='#{problem_id}']") + + getProblem: (el) -> + problem_id = @getProblemId(el) + $(@problemSelector).has(@problemInputSelector).eq(problem_id) + + getProblemId: (el) -> + $(el).data('problem-id') + + toggleAnnotations: () -> + hide = (@annotationsHidden = not @annotationsHidden) + @toggleAnnotationButtonText hide + @toggleSpans hide + @toggleTips hide + + toggleTips: (hide) -> + visible = @findVisibleTips() + @hideTips visible + + toggleAnnotationButtonText: (hide) -> + buttonText = (if hide then 'Show' else 'Hide')+' Annotations' + @$(@toggleAnnotationsSelector).text(buttonText) + + toggleInstructions: () -> + hide = (@instructionsHidden = not @instructionsHidden) + @toggleInstructionsButton hide + @toggleInstructionsText hide + + toggleInstructionsButton: (hide) -> + txt = (if hide then 'Expand' else 'Collapse')+' Instructions' + cls = (if hide then ['expanded', 'collapsed'] else ['collapsed','expanded']) + @$(@toggleInstructionsSelector).text(txt).removeClass(cls[0]).addClass(cls[1]) + + toggleInstructionsText: (hide) -> + slideMethod = (if hide then 'slideUp' else 'slideDown') + @$(@instructionsSelector)[slideMethod]() + + toggleSpans: (hide) -> + @$(@spanSelector).toggleClass 'hide', hide, 250 + + replyTo: (buttonEl) -> + offset = -20 + el = @getProblem buttonEl + if el.length > 0 + @scrollTo(el, @afterScrollToProblem, offset) + else + console.log('problem not found. event: ', e) if @_debug + + returnFrom: (buttonEl) -> + offset = -200 + el = @getSpanForProblemReturn buttonEl + if el.length > 0 + @scrollTo(el, @afterScrollToSpan, offset) + else + console.log('span not found. event:', e) if @_debug + + scrollTo: (el, after, offset = -20) -> + $('html,body').scrollTo(el, { + duration: 500 + onAfter: @_once => after?.call this, el + offset: offset + }) if $(el).length > 0 + + afterScrollToProblem: (problem_el) -> + problem_el.effect 'highlight', {}, 500 + + afterScrollToSpan: (span_el) -> + span_el.addClass 'selected', 400, 'swing', -> + span_el.removeClass 'selected', 400, 'swing' + + makeTipContent: (el) -> + (api) => + text = $(el).data('comment-body') + comment = @createComment(text) + problem_id = @getProblemId(el) + reply = @createReplyLink(problem_id) + $(comment).add(reply) + + makeTipTitle: (el) -> + (api) => + title = $(el).data('comment-title') + (if title then title else 'Commentary') + + createComment: (text) -> + $("
        #{text}
        ") + + createReplyLink: (problem_id) -> + $("Reply to Annotation") + + findVisibleTips: () -> + visible = [] + @$(@spanSelector).each (index, el) -> + api = $(el).qtip('api') + tip = $(api?.elements.tooltip) + if tip.is(':visible') + visible.push el + visible + + hideTips: (elements) -> + $(elements).qtip('hide') + + _once: (fn) -> + done = false + return => + fn.call this unless done + done = true diff --git a/common/lib/xmodule/xmodule/js/src/capa/display.coffee b/common/lib/xmodule/xmodule/js/src/capa/display.coffee index 57ff85298c..70704ab247 100644 --- a/common/lib/xmodule/xmodule/js/src/capa/display.coffee +++ b/common/lib/xmodule/xmodule/js/src/capa/display.coffee @@ -41,6 +41,11 @@ class @Problem @el.attr progress: response.progress_status @el.trigger('progressChanged') + forceUpdate: (response) => + @el.attr progress: response.progress_status + @el.trigger('progressChanged') + + queueing: => @queued_items = @$(".xqueue") @num_queued_items = @queued_items.length @@ -71,11 +76,30 @@ class @Problem @num_queued_items = @new_queued_items.length if @num_queued_items == 0 + @forceUpdate response delete window.queuePollerID else # TODO: Some logic to dynamically adjust polling rate based on queuelen window.queuePollerID = window.setTimeout(@poll, 1000) + + # Use this if you want to make an ajax call on the input type object + # static method so you don't have to instantiate a Problem in order to use it + # Input: + # url: the AJAX url of the problem + # input_id: the input_id of the input you would like to make the call on + # NOTE: the id is the ${id} part of "input_${id}" during rendering + # If this function is passed the entire prefixed id, the backend may have trouble + # finding the correct input + # dispatch: string that indicates how this data should be handled by the inputtype + # callback: the function that will be called once the AJAX call has been completed. + # It will be passed a response object + @inputAjax: (url, input_id, dispatch, data, callback) -> + data['dispatch'] = dispatch + data['input_id'] = input_id + $.postWithPrefix "#{url}/input_ajax", data, callback + + render: (content) -> if content @el.html(content) @@ -262,9 +286,8 @@ class @Problem save: => Logger.log 'problem_save', @answers $.postWithPrefix "#{@url}/problem_save", @answers, (response) => - if response.success - saveMessage = "Your answers have been saved but not graded. Hit 'Check' to grade them." - @gentle_alert saveMessage + saveMessage = response.msg + @gentle_alert saveMessage @updateProgress response refreshMath: (event, element) => diff --git a/common/lib/xmodule/xmodule/js/src/combinedopenended/display.coffee b/common/lib/xmodule/xmodule/js/src/combinedopenended/display.coffee index ae63171ed4..c749d65b45 100644 --- a/common/lib/xmodule/xmodule/js/src/combinedopenended/display.coffee +++ b/common/lib/xmodule/xmodule/js/src/combinedopenended/display.coffee @@ -1,14 +1,69 @@ class @Rubric constructor: () -> + @initialize: (location) -> + $('.rubric').data("location", location) + $('input[class="score-selection"]').change @tracking_callback + # set up the hotkeys + $(window).unbind('keydown', @keypress_callback) + $(window).keydown @keypress_callback + # display the 'current' carat + @categories = $('.rubric-category') + @category = $(@categories.first()) + @category.prepend('> ') + @category_index = 0 + + + @keypress_callback: (event) => + # don't try to do this when user is typing in a text input + if $(event.target).is('input, textarea') + return + # for when we select via top row + if event.which >= 48 and event.which <= 57 + selected = event.which - 48 + # for when we select via numpad + else if event.which >= 96 and event.which <= 105 + selected = event.which - 96 + # we don't want to do anything since we haven't pressed a number + else + return + + # if we actually have a current category (not past the end) + if(@category_index <= @categories.length) + # find the valid selections for this category + inputs = $("input[name='score-selection-#{@category_index}']") + max_score = inputs.length - 1 + + if selected > max_score or selected < 0 + return + inputs.filter("input[value=#{selected}]").click() + + # move to the next category + old_category_text = @category.html().substring(5) + @category.html(old_category_text) + @category_index++ + @category = $(@categories[@category_index]) + @category.prepend('> ') + + @tracking_callback: (event) -> + target_selection = $(event.target).val() + # chop off the beginning of the name so that we can get the number of the category + category = $(event.target).data("category") + location = $('.rubric').data('location') + # probably want the original problem location as well + + data = {location: location, selection: target_selection, category: category} + Logger.log 'rubric_select', data + + # finds the scores for each rubric category @get_score_list: () => # find the number of categories: - num_categories = $('table.rubric tr').length + num_categories = $('.rubric-category').length score_lst = [] # get the score for each one - for i in [0..(num_categories-2)] + for i in [0..(num_categories-1)] score = $("input[name='score-selection-#{i}']:checked").val() score_lst.push(score) @@ -23,9 +78,8 @@ class @Rubric @check_complete: () -> # check to see whether or not any categories have not been scored - num_categories = $('table.rubric tr').length - # -2 because we want to skip the header - for i in [0..(num_categories-2)] + num_categories = $('.rubric-category').length + for i in [0..(num_categories-1)] score = $("input[name='score-selection-#{i}']:checked").val() if score == undefined return false @@ -35,6 +89,7 @@ class @CombinedOpenEnded constructor: (element) -> @element=element @reinitialize(element) + $(window).keydown @keydown_handler reinitialize: (element) -> @wrapper=$(element).find('section.xmodule_CombinedOpenEndedModule') @@ -46,28 +101,39 @@ class @CombinedOpenEnded @task_count = @el.data('task-count') @task_number = @el.data('task-number') @accept_file_upload = @el.data('accept-file-upload') + @location = @el.data('location') + # set up handlers for click tracking + Rubric.initialize(@location) @allow_reset = @el.data('allow_reset') @reset_button = @$('.reset-button') @reset_button.click @reset @next_problem_button = @$('.next-step-button') @next_problem_button.click @next_problem + @status_container = @$('.status-elements') @show_results_button=@$('.show-results-button') @show_results_button.click @show_results + @question_header = @$('.question-header') + @question_header.click @collapse_question + # valid states: 'initial', 'assessing', 'post_assessment', 'done' Collapsible.setCollapsibles(@el) @submit_evaluation_button = $('.submit-evaluation-button') @submit_evaluation_button.click @message_post @results_container = $('.result-container') + @combined_rubric_container = $('.combined-rubric-container') + + @legend_container= $('.legend-container') + @show_legend_current() # Where to put the rubric once we load it @el = $(element).find('section.open-ended-child') @errors_area = @$('.error') @answer_area = @$('textarea.answer') - + @prompt_container = @$('.prompt') @rubric_wrapper = @$('.rubric-wrapper') @hint_wrapper = @$('.hint-wrapper') @message_wrapper = @$('.message-wrapper') @@ -82,11 +148,22 @@ class @CombinedOpenEnded @can_upload_files = false @open_ended_child= @$('.open-ended-child') + @out_of_sync_message = 'The problem state got out of sync. Try reloading the page.' + + if @task_number>1 + @prompt_hide() + else if @task_number==1 and @child_state!='initial' + @prompt_hide() + @find_assessment_elements() @find_hint_elements() @rebind() + if @task_number>1 + @show_combined_rubric_current() + @show_results_current() + # locally scoped jquery. $: (selector) -> $(selector, @el) @@ -100,9 +177,12 @@ class @CombinedOpenEnded @submit_evaluation_button = $('.submit-evaluation-button') @submit_evaluation_button.click @message_post Collapsible.setCollapsibles(@results_container) + # make sure we still have click tracking + $('.evaluation-response a').click @log_feedback_click + $('input[name="evaluation-score"]').change @log_feedback_selection show_results: (event) => - status_item = $(event.target).parent().parent() + status_item = $(event.target).parent() status_number = status_item.data('status-number') data = {'task_number' : status_number} $.postWithPrefix "#{@ajax_url}/get_results", data, (response) => @@ -115,8 +195,28 @@ class @CombinedOpenEnded else @gentle_alert response.error + show_combined_rubric_current: () => + data = {} + $.postWithPrefix "#{@ajax_url}/get_combined_rubric", data, (response) => + if response.success + @combined_rubric_container.after(response.html).remove() + @combined_rubric_container= $('div.combined_rubric_container') + + show_status_current: () => + data = {} + $.postWithPrefix "#{@ajax_url}/get_status", data, (response) => + if response.success + @status_container.after(response.html).remove() + @status_container= $('.status-elements') + + show_legend_current: () => + data = {} + $.postWithPrefix "#{@ajax_url}/get_legend", data, (response) => + if response.success + @legend_container.after(response.html).remove() + @legend_container= $('.legend-container') + message_post: (event)=> - Logger.log 'message_post', @answers external_grader_message=$(event.target).parent().parent().parent() evaluation_scoring = $(event.target).parent() @@ -145,6 +245,7 @@ class @CombinedOpenEnded $('section.evaluation').slideToggle() @message_wrapper.html(response.message_html) + $.ajaxWithPrefix("#{@ajax_url}/save_post_assessment", settings) @@ -156,6 +257,11 @@ class @CombinedOpenEnded @next_problem_button.hide() @hide_file_upload() @hint_area.attr('disabled', false) + if @task_number>1 or @child_state!='initial' + @show_status_current() + + if @task_number==1 and @child_state=='assessing' + @prompt_hide() if @child_state == 'done' @rubric_wrapper.hide() if @child_type=="openended" @@ -241,6 +347,7 @@ class @CombinedOpenEnded if response.success @rubric_wrapper.html(response.rubric_html) @rubric_wrapper.show() + Rubric.initialize(@location) @answer_area.html(response.student_response) @child_state = 'assessing' @find_assessment_elements() @@ -251,13 +358,19 @@ class @CombinedOpenEnded $.ajaxWithPrefix("#{@ajax_url}/save_answer",settings) else - @errors_area.html('Problem state got out of sync. Try reloading the page.') + @errors_area.html(@out_of_sync_message) + + keydown_handler: (e) => + # only do anything when the key pressed is the 'enter' key + if e.which == 13 && @child_state == 'assessing' && Rubric.check_complete() + @save_assessment(e) save_assessment: (event) => event.preventDefault() if @child_state == 'assessing' && Rubric.check_complete() checked_assessment = Rubric.get_total_score() - data = {'assessment' : checked_assessment} + score_list = Rubric.get_score_list() + data = {'assessment' : checked_assessment, 'score_list' : score_list} $.postWithPrefix "#{@ajax_url}/save_assessment", data, (response) => if response.success @child_state = response.state @@ -267,13 +380,12 @@ class @CombinedOpenEnded @find_hint_elements() else if @child_state == 'done' @rubric_wrapper.hide() - @message_wrapper.html(response.message_html) @rebind() else @errors_area.html(response.error) else - @errors_area.html('Problem state got out of sync. Try reloading the page.') + @errors_area.html(@out_of_sync_message) save_hint: (event) => event.preventDefault() @@ -288,7 +400,7 @@ class @CombinedOpenEnded else @errors_area.html(response.error) else - @errors_area.html('Problem state got out of sync. Try reloading the page.') + @errors_area.html(@out_of_sync_message) skip_post_assessment: => if @child_state == 'post_assessment' @@ -300,7 +412,7 @@ class @CombinedOpenEnded else @errors_area.html(response.error) else - @errors_area.html('Problem state got out of sync. Try reloading the page.') + @errors_area.html(@out_of_sync_message) reset: (event) => event.preventDefault() @@ -320,7 +432,7 @@ class @CombinedOpenEnded else @errors_area.html(response.error) else - @errors_area.html('Problem state got out of sync. Try reloading the page.') + @errors_area.html(@out_of_sync_message) next_problem: => if @child_state == 'done' @@ -343,7 +455,7 @@ class @CombinedOpenEnded else @errors_area.html(response.error) else - @errors_area.html('Problem state got out of sync. Try reloading the page.') + @errors_area.html(@out_of_sync_message) gentle_alert: (msg) => if @el.find('.open-ended-alert').length @@ -362,18 +474,18 @@ class @CombinedOpenEnded $.postWithPrefix "#{@ajax_url}/check_for_score", (response) => if response.state == "done" or response.state=="post_assessment" delete window.queuePollerID - location.reload() + @reload() else window.queuePollerID = window.setTimeout(@poll, 10000) setup_file_upload: => - if window.File and window.FileReader and window.FileList and window.Blob - if @accept_file_upload == "True" - @can_upload_files = true - @file_upload_area.html('') - @file_upload_area.show() - else - @gentle_alert 'File uploads are required for this question, but are not supported in this browser. Try the newest version of google chrome. Alternatively, if you have uploaded the image to the web, you can paste a link to it into the answer box.' + if @accept_file_upload == "True" + if window.File and window.FileReader and window.FileList and window.Blob + @can_upload_files = true + @file_upload_area.html('') + @file_upload_area.show() + else + @gentle_alert 'File uploads are required for this question, but are not supported in this browser. Try the newest version of google chrome. Alternatively, if you have uploaded the image to the web, you can paste a link to it into the answer box.' hide_file_upload: => if @accept_file_upload == "True" @@ -390,3 +502,40 @@ class @CombinedOpenEnded # wrap this so that it can be mocked reload: -> location.reload() + + collapse_question: () => + @prompt_container.slideToggle() + @prompt_container.toggleClass('open') + if @question_header.text() == "(Hide)" + new_text = "(Show)" + Logger.log 'oe_hide_question', {location: @location} + else + Logger.log 'oe_show_question', {location: @location} + new_text = "(Hide)" + @question_header.text(new_text) + + prompt_show: () => + if @prompt_container.is(":hidden")==true + @prompt_container.slideToggle() + @prompt_container.toggleClass('open') + @question_header.text("(Hide)") + + prompt_hide: () => + if @prompt_container.is(":visible")==true + @prompt_container.slideToggle() + @prompt_container.toggleClass('open') + @question_header.text("(Show)") + + log_feedback_click: (event) -> + link_text = $(event.target).html() + if link_text == 'See full feedback' + Logger.log 'oe_show_full_feedback', {} + else if link_text == 'Respond to Feedback' + Logger.log 'oe_show_respond_to_feedback', {} + else + generated_event_type = link_text.toLowerCase().replace(" ","_") + Logger.log "oe_" + generated_event_type, {} + + log_feedback_selection: (event) -> + target_selection = $(event.target).val() + Logger.log 'oe_feedback_response_selected', {value: target_selection} diff --git a/common/lib/xmodule/xmodule/js/src/conditional/display.coffee b/common/lib/xmodule/xmodule/js/src/conditional/display.coffee index 33dcb29079..857424c1dc 100644 --- a/common/lib/xmodule/xmodule/js/src/conditional/display.coffee +++ b/common/lib/xmodule/xmodule/js/src/conditional/display.coffee @@ -1,26 +1,35 @@ class @Conditional - constructor: (element) -> + constructor: (element, callerElId) -> @el = $(element).find('.conditional-wrapper') - @id = @el.data('problem-id') - @element_id = @el.attr('id') + + @callerElId = callerElId + + if callerElId isnt undefined + dependencies = @el.data('depends') + if (typeof dependencies is 'string') and (dependencies.length > 0) and (dependencies.indexOf(callerElId) is -1) + return + @url = @el.data('url') - @render() + @render(element) - $: (selector) -> - $(selector, @el) - - updateProgress: (response) => - if response.progress_changed - @el.attr progress: response.progress_status - @el.trigger('progressChanged') - - render: (content) -> - if content - @el.html(content) - XModule.loadModules(@el) - else + render: (element) -> $.postWithPrefix "#{@url}/conditional_get", (response) => - @el.html(response.html) - XModule.loadModules(@el) + @el.html '' + @el.append(i) for i in response.html + parentEl = $(element).parent() + parentId = parentEl.attr 'id' + + if response.message is false + if parentId.indexOf('vert') is 0 + parentEl.hide() + else + $(element).hide() + else + if parentId.indexOf('vert') is 0 + parentEl.show() + else + $(element).show() + + XModule.loadModules @el diff --git a/common/lib/xmodule/xmodule/js/src/peergrading/peer_grading_problem.coffee b/common/lib/xmodule/xmodule/js/src/peergrading/peer_grading_problem.coffee index deeb82900b..4bdb4bdf05 100644 --- a/common/lib/xmodule/xmodule/js/src/peergrading/peer_grading_problem.coffee +++ b/common/lib/xmodule/xmodule/js/src/peergrading/peer_grading_problem.coffee @@ -175,17 +175,23 @@ class @PeerGradingProblem @prompt_container = $('.prompt-container') @rubric_container = $('.rubric-container') @flag_student_container = $('.flag-student-container') + @answer_unknown_container = $('.answer-unknown-container') @calibration_panel = $('.calibration-panel') @grading_panel = $('.grading-panel') @content_panel = $('.content-panel') @grading_message = $('.grading-message') @grading_message.hide() + @question_header = $('.question-header') + @question_header.click @collapse_question @grading_wrapper =$('.grading-wrapper') @calibration_feedback_panel = $('.calibration-feedback') @interstitial_page = $('.interstitial-page') @interstitial_page.hide() + @calibration_interstitial_page = $('.calibration-interstitial-page') + @calibration_interstitial_page.hide() + @error_container = $('.error-container') @submission_key_input = $("input[name='submission-key']") @@ -201,7 +207,13 @@ class @PeerGradingProblem @action_button = $('.action-button') @calibration_feedback_button = $('.calibration-feedback-button') @interstitial_page_button = $('.interstitial-page-button') + @calibration_interstitial_page_button = $('.calibration-interstitial-page-button') @flag_student_checkbox = $('.flag-checkbox') + @answer_unknown_checkbox = $('.answer-unknown-checkbox') + + $(window).keydown @keydown_handler + + @collapse_question() Collapsible.setCollapsibles(@content_panel) @@ -210,12 +222,21 @@ class @PeerGradingProblem @calibration_feedback_button.click => @calibration_feedback_panel.hide() @grading_wrapper.show() + @gentle_alert "Calibration essay saved. Fetched the next essay." @is_calibrated_check() @interstitial_page_button.click => @interstitial_page.hide() @is_calibrated_check() + @calibration_interstitial_page_button.click => + @calibration_interstitial_page.hide() + @is_calibrated_check() + + @calibration_feedback_button.hide() + @calibration_feedback_panel.hide() + @error_container.hide() + @is_calibrated_check() @@ -243,6 +264,7 @@ class @PeerGradingProblem submission_key: @submission_key_input.val() feedback: @feedback_area.val() submission_flagged: @flag_student_checkbox.is(':checked') + answer_unknown: @answer_unknown_checkbox.is(':checked') return data @@ -273,6 +295,9 @@ class @PeerGradingProblem else if response.calibrated and @calibration == true @calibration = false @render_interstitial_page() + else if not response.calibrated and @calibration==null + @calibration=true + @render_calibration_interstitial_page() else @calibration = true @fetch_calibration_essay() @@ -296,7 +321,7 @@ class @PeerGradingProblem if response.success @is_calibrated_check() @grading_message.fadeIn() - @grading_message.html("

        Grade sent successfully.

        ") + @grading_message.html("

        Successfully saved your feedback. Fetched the next essay.

        ") else if response.error @render_error(response.error) @@ -308,9 +333,18 @@ class @PeerGradingProblem # check to see whether or not any categories have not been scored if Rubric.check_complete() # show button if we have scores for all categories + @grading_message.hide() @show_submit_button() @grade = Rubric.get_total_score() + keydown_handler: (event) => + if event.which == 13 && @submit_button.is(':visible') + if @calibration + @submit_calibration_essay() + else + @submit_grade() + + ########## @@ -323,7 +357,7 @@ class @PeerGradingProblem if response.success # load in all the data - @submission_container.html("

        Training Essay

        ") + @submission_container.html("") @render_submission_data(response) # TODO: indicate that we're in calibration mode @calibration_panel.addClass('current-state') @@ -337,6 +371,9 @@ class @PeerGradingProblem @calibration_panel.find('.grading-text').hide() @grading_panel.find('.grading-text').hide() @flag_student_container.hide() + @answer_unknown_container.hide() + + @feedback_area.val("") @submit_button.unbind('click') @submit_button.click @submit_calibration_essay @@ -350,7 +387,7 @@ class @PeerGradingProblem render_submission: (response) => if response.success @submit_button.hide() - @submission_container.html("

        Submitted Essay

        ") + @submission_container.html("") @render_submission_data(response) @calibration_panel.removeClass('current-state') @@ -364,6 +401,8 @@ class @PeerGradingProblem @calibration_panel.find('.grading-text').show() @grading_panel.find('.grading-text').show() @flag_student_container.show() + @answer_unknown_container.show() + @feedback_area.val("") @submit_button.unbind('click') @submit_button.click @submit_grade @@ -395,6 +434,7 @@ class @PeerGradingProblem @submit_button.hide() @action_button.hide() @calibration_feedback_panel.hide() + Rubric.initialize(@location) render_calibration_feedback: (response) => @@ -408,18 +448,25 @@ class @PeerGradingProblem actual_score = parseInt(response.actual_score) if score == actual_score - calibration_wrapper.append("

        Congratulations! Your score matches the actual score!

        ") + calibration_wrapper.append("

        Your score matches the actual score!

        ") else - calibration_wrapper.append("

        Please try to understand the grading critera better to be more accurate next time.

        ") + calibration_wrapper.append("

        You may want to review the rubric again.

        ") # disable score selection and submission from the grading interface $("input[name='score-selection']").attr('disabled', true) @submit_button.hide() + @calibration_feedback_button.show() render_interstitial_page: () => @content_panel.hide() + @grading_message.hide() @interstitial_page.show() + render_calibration_interstitial_page: () => + @content_panel.hide() + @action_button.hide() + @calibration_interstitial_page.show() + render_error: (error_message) => @error_container.show() @calibration_feedback_panel.hide() @@ -433,3 +480,18 @@ class @PeerGradingProblem setup_score_selection: (max_score) => # And now hook up an event handler again $("input[class='score-selection']").change @graded_callback + + gentle_alert: (msg) => + @grading_message.fadeIn() + @grading_message.html("

        " + msg + "

        ") + + collapse_question: () => + @prompt_container.slideToggle() + @prompt_container.toggleClass('open') + if @question_header.text() == "(Hide)" + Logger.log 'peer_grading_hide_question', {location: @location} + new_text = "(Show)" + else + Logger.log 'peer_grading_show_question', {location: @location} + new_text = "(Hide)" + @question_header.text(new_text) diff --git a/common/lib/xmodule/xmodule/js/src/poll/logme.js b/common/lib/xmodule/xmodule/js/src/poll/logme.js new file mode 100644 index 0000000000..c045757044 --- /dev/null +++ b/common/lib/xmodule/xmodule/js/src/poll/logme.js @@ -0,0 +1,54 @@ +// Wrapper for RequireJS. It will make the standard requirejs(), require(), and +// define() functions from Require JS available inside the anonymous function. +(function (requirejs, require, define) { + +define('logme', [], function () { + var debugMode; + + // debugMode can be one of the following: + // + // true - All messages passed to logme will be written to the internal + // browser console. + // false - Suppress all output to the internal browser console. + // + // Obviously, if anywhere there is a direct console.log() call, we can't do + // anything about it. That's why use logme() - it will allow to turn off + // the output of debug information with a single change to a variable. + debugMode = true; + + return logme; + + /* + * function: logme + * + * A helper function that provides logging facilities. We don't want + * to call console.log() directly, because sometimes it is not supported + * by the browser. Also when everything is routed through this function. + * the logging output can be easily turned off. + * + * logme() supports multiple parameters. Each parameter will be passed to + * console.log() function separately. + * + */ + function logme() { + var i; + + if ( + (typeof debugMode === 'undefined') || + (debugMode !== true) || + (typeof window.console === 'undefined') + ) { + return; + } + + for (i = 0; i < arguments.length; i++) { + window.console.log(arguments[i]); + } + } // End-of: function logme +}); + +// End of wrapper for RequireJS. As you can see, we are passing +// namespaced Require JS variables to an anonymous function. Within +// it, you can use the standard requirejs(), require(), and define() +// functions as if they were in the global namespace. +}(RequireJS.requirejs, RequireJS.require, RequireJS.define)); // End-of: (function (requirejs, require, define) diff --git a/common/lib/xmodule/xmodule/js/src/poll/poll.js b/common/lib/xmodule/xmodule/js/src/poll/poll.js new file mode 100644 index 0000000000..a2ccbc7c03 --- /dev/null +++ b/common/lib/xmodule/xmodule/js/src/poll/poll.js @@ -0,0 +1,5 @@ +window.Poll = function (el) { + RequireJS.require(['PollMain'], function (PollMain) { + new PollMain(el); + }); +}; diff --git a/common/lib/xmodule/xmodule/js/src/poll/poll_main.js b/common/lib/xmodule/xmodule/js/src/poll/poll_main.js new file mode 100644 index 0000000000..74f2a488d7 --- /dev/null +++ b/common/lib/xmodule/xmodule/js/src/poll/poll_main.js @@ -0,0 +1,323 @@ +(function (requirejs, require, define) { +define('PollMain', ['logme'], function (logme) { + +PollMain.prototype = { + +'showAnswerGraph': function (poll_answers, total) { + var _this, totalValue; + + totalValue = parseFloat(total); + if (isFinite(totalValue) === false) { + return; + } + + _this = this; + + $.each(poll_answers, function (index, value) { + var numValue, percentValue; + + numValue = parseFloat(value); + if (isFinite(numValue) === false) { + return; + } + + percentValue = (numValue / totalValue) * 100.0; + + _this.answersObj[index].statsEl.show(); + _this.answersObj[index].numberEl.html('' + value + ' (' + percentValue.toFixed(1) + '%)'); + _this.answersObj[index].percentEl.css({ + 'width': '' + percentValue.toFixed(1) + '%' + }); + }); +}, + +'submitAnswer': function (answer, answerObj) { + var _this; + + // Make sure that the user can answer a question only once. + if (this.questionAnswered === true) { + return; + } + this.questionAnswered = true; + + _this = this; + + console.log('submit answer'); + + answerObj.buttonEl.addClass('answered'); + + // Send the data to the server as an AJAX request. Attach a callback that will + // be fired on server's response. + $.postWithPrefix( + _this.ajax_url + '/' + answer, {}, + function (response) { + console.log('success! response = '); + console.log(response); + + _this.showAnswerGraph(response.poll_answers, response.total); + + if (_this.canReset === true) { + _this.resetButton.show(); + } + + // Initialize Conditional constructors. + if (_this.wrapperSectionEl !== null) { + $(_this.wrapperSectionEl).find('.xmodule_ConditionalModule').each(function (index, value) { + new window.Conditional(value, _this.id.replace(/^poll_/, '')); + }); + } + } + ); + +}, // End-of: 'submitAnswer': function (answer, answerEl) { + + +'submitReset': function () { + var _this; + + _this = this; + + console.log('submit reset'); + + // Send the data to the server as an AJAX request. Attach a callback that will + // be fired on server's response. + $.postWithPrefix( + this.ajax_url + '/' + 'reset_poll', + {}, + function (response) { + console.log('success! response = '); + console.log(response); + + if ( + (response.hasOwnProperty('status') !== true) || + (typeof response.status !== 'string') || + (response.status.toLowerCase() !== 'success')) { + return; + } + + _this.questionAnswered = false; + _this.questionEl.find('.button.answered').removeClass('answered'); + _this.questionEl.find('.stats').hide(); + _this.resetButton.hide(); + + // Initialize Conditional constructors. We will specify the third parameter as 'true' + // notifying the constructor that this is a reset operation. + if (_this.wrapperSectionEl !== null) { + $(_this.wrapperSectionEl).find('.xmodule_ConditionalModule').each(function (index, value) { + new window.Conditional(value, _this.id.replace(/^poll_/, '')); + }); + } + } + ); +}, // End-of: 'submitAnswer': function (answer, answerEl) { + +'postInit': function () { + var _this; + + // Access this object inside inner functions. + _this = this; + + if ( + (this.jsonConfig.poll_answer.length > 0) && + (this.jsonConfig.answers.hasOwnProperty(this.jsonConfig.poll_answer) === false) + ) { + this.questionEl.append( + '

        Error!

        ' + + '

        XML data format changed. List of answers was modified, but poll data was not updated.

        ' + ); + + return; + } + + // Get the DOM id of the question. + this.id = this.questionEl.attr('id'); + + // Get the URL to which we will post the users answer to the question. + this.ajax_url = this.questionEl.data('ajax-url'); + + this.questionHtmlMarkup = $('
        ').html(this.jsonConfig.question).text(); + this.questionEl.append(this.questionHtmlMarkup); + + // When the user selects and answer, we will set this flag to true. + this.questionAnswered = false; + + this.answersObj = {}; + this.shortVersion = true; + + $.each(this.jsonConfig.answers, function (index, value) { + if (value.length >= 18) { + _this.shortVersion = false; + } + }); + + $.each(this.jsonConfig.answers, function (index, value) { + var answer; + + answer = {}; + + _this.answersObj[index] = answer; + + answer.el = $('
        '); + + answer.questionEl = $('
        '); + answer.buttonEl = $('
        '); + answer.textEl = $('
        '); + answer.questionEl.append(answer.buttonEl); + answer.questionEl.append(answer.textEl); + + answer.el.append(answer.questionEl); + + answer.statsEl = $('
        '); + answer.barEl = $('
        '); + answer.percentEl = $('
        '); + answer.barEl.append(answer.percentEl); + answer.numberEl = $('
        '); + answer.statsEl.append(answer.barEl); + answer.statsEl.append(answer.numberEl); + + answer.statsEl.hide(); + + answer.el.append(answer.statsEl); + + answer.textEl.html(value); + + if (_this.shortVersion === true) { + $.each(answer, function (index, value) { + if (value instanceof jQuery) { + value.addClass('short'); + } + }); + } + + answer.el.appendTo(_this.questionEl); + + answer.textEl.on('click', function () { + _this.submitAnswer(index, answer); + }); + + answer.buttonEl.on('click', function () { + _this.submitAnswer(index, answer); + }); + + if (index === _this.jsonConfig.poll_answer) { + answer.buttonEl.addClass('answered'); + _this.questionAnswered = true; + } + }); + + console.log(this.jsonConfig.reset); + + if ((typeof this.jsonConfig.reset === 'string') && (this.jsonConfig.reset.toLowerCase() === 'true')) { + this.canReset = true; + + this.resetButton = $('
        Change your vote
        '); + + if (this.questionAnswered === false) { + this.resetButton.hide(); + } + + this.resetButton.appendTo(this.questionEl); + + this.resetButton.on('click', function () { + _this.submitReset(); + }); + } else { + this.canReset = false; + } + + // If it turns out that the user already answered the question, show the answers graph. + if (this.questionAnswered === true) { + this.showAnswerGraph(this.jsonConfig.poll_answers, this.jsonConfig.total); + } +} // End-of: 'postInit': function () { +}; // End-of: PollMain.prototype = { + +return PollMain; + +function PollMain(el) { + var _this; + + this.questionEl = $(el).find('.poll_question'); + if (this.questionEl.length !== 1) { + // We require one question DOM element. + logme('ERROR: PollMain constructor requires one question DOM element.'); + + return; + } + + // Just a safety precussion. If we run this code more than once, multiple 'click' callback handlers will be + // attached to the same DOM elements. We don't want this to happen. + if (this.questionEl.attr('poll_main_processed') === 'true') { + logme( + 'ERROR: PolMain JS constructor was called on a DOM element that has already been processed once.' + ); + + return; + } + + // This element was not processed earlier. + // Make sure that next time we will not process this element a second time. + this.questionEl.attr('poll_main_processed', 'true'); + + // Access this object inside inner functions. + _this = this; + + // DOM element which contains the current poll along with any conditionals. By default we assume that such + // element is not present. We will try to find it. + this.wrapperSectionEl = null; + + (function (tempEl, c1) { + while (tempEl.tagName.toLowerCase() !== 'body') { + tempEl = $(tempEl).parent()[0]; + c1 += 1; + + if ( + (tempEl.tagName.toLowerCase() === 'section') && + ($(tempEl).hasClass('xmodule_WrapperModule') === true) + ) { + _this.wrapperSectionEl = tempEl; + + break; + } else if (c1 > 50) { + // In case something breaks, and we enter an endless loop, a sane + // limit for loop iterations. + + break; + } + } + }($(el)[0], 0)); + + try { + this.jsonConfig = JSON.parse(this.questionEl.children('.poll_question_div').html()); + + $.postWithPrefix( + '' + this.questionEl.data('ajax-url') + '/' + 'get_state', {}, + function (response) { + _this.jsonConfig.poll_answer = response.poll_answer; + _this.jsonConfig.total = response.total; + + $.each(response.poll_answers, function (index, value) { + _this.jsonConfig.poll_answers[index] = value; + }); + + _this.questionEl.children('.poll_question_div').html(JSON.stringify(_this.jsonConfig)); + + _this.postInit(); + } + ); + + return; + } catch (err) { + logme( + 'ERROR: Invalid JSON config for poll ID "' + this.id + '".', + 'Error messsage: "' + err.message + '".' + ); + + return; + } +} // End-of: function PollMain(el) { + +}); // End-of: define('PollMain', ['logme'], function (logme) { + +// End-of: (function (requirejs, require, define) { +}(RequireJS.requirejs, RequireJS.require, RequireJS.define)); diff --git a/common/lib/xmodule/xmodule/js/src/problem/edit.coffee b/common/lib/xmodule/xmodule/js/src/problem/edit.coffee index 2bfe483a7f..b723f230e9 100644 --- a/common/lib/xmodule/xmodule/js/src/problem/edit.coffee +++ b/common/lib/xmodule/xmodule/js/src/problem/edit.coffee @@ -231,13 +231,14 @@ class @MarkdownEditingDescriptor extends XModule.Descriptor // replace string and numerical xml = xml.replace(/^\=\s*(.*?$)/gm, function(match, p) { var string; - var params = /(.*?)\+\-\s*(.*?$)/.exec(p); - if(parseFloat(p)) { + var floatValue = parseFloat(p); + if(!isNaN(floatValue)) { + var params = /(.*?)\+\-\s*(.*?$)/.exec(p); if(params) { - string = '\n'; + string = '\n'; string += ' \n'; } else { - string = '\n'; + string = '\n'; } string += ' \n'; string += '\n\n'; diff --git a/common/lib/xmodule/xmodule/js/src/sequence/display.coffee b/common/lib/xmodule/xmodule/js/src/sequence/display.coffee index 793e7f4f3c..0e4c9788ba 100644 --- a/common/lib/xmodule/xmodule/js/src/sequence/display.coffee +++ b/common/lib/xmodule/xmodule/js/src/sequence/display.coffee @@ -56,7 +56,7 @@ class @Sequence element.removeClass('progress-none') .removeClass('progress-some') .removeClass('progress-done') - + switch progress when 'none' then element.addClass('progress-none') when 'in_progress' then element.addClass('progress-some') @@ -65,6 +65,11 @@ class @Sequence toggleArrows: => @$('.sequence-nav-buttons a').unbind('click') + if @contents.length == 0 + @$('.sequence-nav-buttons .prev a').addClass('disabled') + @$('.sequence-nav-buttons .next a').addClass('disabled') + return + if @position == 1 @$('.sequence-nav-buttons .prev a').addClass('disabled') else @@ -105,8 +110,8 @@ class @Sequence if (1 <= new_position) and (new_position <= @num_contents) Logger.log "seq_goto", old: @position, new: new_position, id: @id - - # On Sequence chage, destroy any existing polling thread + + # On Sequence chage, destroy any existing polling thread # for queued submissions, see ../capa/display.coffee if window.queuePollerID window.clearTimeout(window.queuePollerID) diff --git a/common/lib/xmodule/xmodule/js/src/video/display.coffee b/common/lib/xmodule/xmodule/js/src/video/display.coffee index 1876330340..aadafbc8d0 100644 --- a/common/lib/xmodule/xmodule/js/src/video/display.coffee +++ b/common/lib/xmodule/xmodule/js/src/video/display.coffee @@ -4,7 +4,6 @@ class @Video @id = @el.attr('id').replace(/video_/, '') @start = @el.data('start') @end = @el.data('end') - @caption_data_dir = @el.data('caption-data-dir') @caption_asset_path = @el.data('caption-asset-path') @show_captions = @el.data('show-captions') == "true" window.player = null diff --git a/common/lib/xmodule/xmodule/js/src/videoalpha/display/html5_video.js b/common/lib/xmodule/xmodule/js/src/videoalpha/display/html5_video.js index acdc03932c..c3cc462ab8 100644 --- a/common/lib/xmodule/xmodule/js/src/videoalpha/display/html5_video.js +++ b/common/lib/xmodule/xmodule/js/src/videoalpha/display/html5_video.js @@ -221,6 +221,15 @@ this.HTML5Video = (function () { // and end playing at the specified end time. After it was paused, or when a seek operation happeded, // the starting time and ending time will reset to the beginning and the end of the video respectively. this.video.addEventListener('canplay', function () { + // Because firefox triggers 'canplay' event every time when 'currentTime' property + // changes, we must make sure that this block of code runs only once. Otherwise, + // this will be an endless loop ('currentTime' property is changed below). + // + // Chrome is immune to this behavior. + if (_this.playerState !== HTML5Video.PlayerState.UNSTARTED) { + return; + } + _this.playerState = HTML5Video.PlayerState.PAUSED; if (_this.start > _this.video.duration) { diff --git a/common/lib/xmodule/xmodule/js/src/wrapper/edit.coffee b/common/lib/xmodule/xmodule/js/src/wrapper/edit.coffee new file mode 100644 index 0000000000..a13c5a8bc7 --- /dev/null +++ b/common/lib/xmodule/xmodule/js/src/wrapper/edit.coffee @@ -0,0 +1,10 @@ +class @WrapperDescriptor extends XModule.Descriptor + constructor: (@element) -> + console.log 'WrapperDescriptor' + @$items = $(@element).find(".vert-mod") + @$items.sortable( + update: (event, ui) => @update() + ) + + save: -> + children: $('.vert-mod li', @element).map((idx, el) -> $(el).data('id')).toArray() diff --git a/common/lib/xmodule/xmodule/mako_module.py b/common/lib/xmodule/xmodule/mako_module.py index dab5d5e85b..84db6ad779 100644 --- a/common/lib/xmodule/xmodule/mako_module.py +++ b/common/lib/xmodule/xmodule/mako_module.py @@ -1,5 +1,5 @@ -from x_module import XModuleDescriptor, DescriptorSystem -import logging +from .x_module import XModuleDescriptor, DescriptorSystem +from .modulestore.inheritance import own_metadata class MakoDescriptorSystem(DescriptorSystem): @@ -21,21 +21,21 @@ class MakoModuleDescriptor(XModuleDescriptor): the descriptor as the `module` parameter to that template """ - def __init__(self, system, definition=None, **kwargs): + def __init__(self, system, location, model_data): if getattr(system, 'render_template', None) is None: raise TypeError('{system} must have a render_template function' ' in order to use a MakoDescriptor'.format( system=system)) - super(MakoModuleDescriptor, self).__init__(system, definition, **kwargs) + super(MakoModuleDescriptor, self).__init__(system, location, model_data) def get_context(self): """ Return the context to render the mako template with """ - return {'module': self, - 'metadata': self.metadata, - 'editable_metadata_fields': self.editable_metadata_fields - } + return { + 'module': self, + 'editable_metadata_fields': self.editable_metadata_fields, + } def get_html(self): return self.system.render_template( @@ -44,5 +44,10 @@ class MakoModuleDescriptor(XModuleDescriptor): # cdodge: encapsulate a means to expose "editable" metadata fields (i.e. not internal system metadata) @property def editable_metadata_fields(self): - subset = [name for name in self.metadata.keys() if name not in self.system_metadata_fields] - return subset + fields = {} + for field, value in own_metadata(self).items(): + if field in self.system_metadata_fields: + continue + + fields[field] = value + return fields diff --git a/common/lib/xmodule/xmodule/modulestore/__init__.py b/common/lib/xmodule/xmodule/modulestore/__init__.py index a9df6c3504..2593b04472 100644 --- a/common/lib/xmodule/xmodule/modulestore/__init__.py +++ b/common/lib/xmodule/xmodule/modulestore/__init__.py @@ -10,6 +10,7 @@ from collections import namedtuple from .exceptions import InvalidLocationError, InsufficientSpecificationError from xmodule.errortracker import ErrorLog, make_error_tracker +from bson.son import SON log = logging.getLogger('mitx.' + 'modulestore') @@ -23,6 +24,15 @@ URL_RE = re.compile(""" (@(?P[^/]+))? """, re.VERBOSE) +MISSING_SLASH_URL_RE = re.compile(""" + (?P[^:]+):/ + (?P[^/]+)/ + (?P[^/]+)/ + (?P[^/]+)/ + (?P[^@]+) + (@(?P[^/]+))? + """, re.VERBOSE) + # TODO (cpennington): We should decide whether we want to expand the # list of valid characters in a location INVALID_CHARS = re.compile(r"[^\w.-]") @@ -62,6 +72,17 @@ class Location(_LocationBase): """ return Location._clean(value, INVALID_CHARS) + + @staticmethod + def clean_keeping_underscores(value): + """ + Return value, replacing INVALID_CHARS, but not collapsing multiple '_' chars. + This for cleaning asset names, as the YouTube ID's may have underscores in them, and we need the + transcript asset name to match. In the future we may want to change the behavior of _clean. + """ + return INVALID_CHARS.sub('_', value) + + @staticmethod def clean_for_url_name(value): """ @@ -164,12 +185,16 @@ class Location(_LocationBase): if isinstance(location, basestring): match = URL_RE.match(location) if match is None: - log.debug('location is instance of %s but no URL match' % basestring) - raise InvalidLocationError(location) - else: - groups = match.groupdict() - check_dict(groups) - return _LocationBase.__new__(_cls, **groups) + # cdodge: + # check for a dropped slash near the i4x:// element of the location string. This can happen with some + # redirects (e.g. edx.org -> www.edx.org which I think happens in Nginx) + match = MISSING_SLASH_URL_RE.match(location) + if match is None: + log.debug('location is instance of %s but no URL match' % basestring) + raise InvalidLocationError(location) + groups = match.groupdict() + check_dict(groups) + return _LocationBase.__new__(_cls, **groups) elif isinstance(location, (list, tuple)): if len(location) not in (5, 6): log.debug('location has wrong length') @@ -399,6 +424,7 @@ class ModuleStoreBase(ModuleStore): Set up the error-tracking logic. ''' self._location_errors = {} # location -> ErrorLog + self.metadata_inheritance_cache = None def _get_errorlog(self, location): """ @@ -432,3 +458,13 @@ class ModuleStoreBase(ModuleStore): if c.id == course_id: return c return None + + +def namedtuple_to_son(namedtuple, prefix=''): + """ + Converts a namedtuple into a SON object with the same key order + """ + son = SON() + for idx, field_name in enumerate(namedtuple._fields): + son[prefix + field_name] = namedtuple[idx] + return son diff --git a/common/lib/xmodule/xmodule/modulestore/django.py b/common/lib/xmodule/xmodule/modulestore/django.py index 0b86c2fea4..b0a65273c7 100644 --- a/common/lib/xmodule/xmodule/modulestore/django.py +++ b/common/lib/xmodule/xmodule/modulestore/django.py @@ -33,11 +33,12 @@ def modulestore(name='default'): class_ = load_function(settings.MODULESTORE[name]['ENGINE']) options = {} + options.update(settings.MODULESTORE[name]['OPTIONS']) for key in FUNCTION_KEYS: if key in options: options[key] = load_function(options[key]) - + _MODULESTORES[name] = class_( **options ) diff --git a/common/lib/xmodule/xmodule/modulestore/draft.py b/common/lib/xmodule/xmodule/modulestore/draft.py index 6124d240a7..ced8e7d42e 100644 --- a/common/lib/xmodule/xmodule/modulestore/draft.py +++ b/common/lib/xmodule/xmodule/modulestore/draft.py @@ -1,7 +1,8 @@ from datetime import datetime -from . import ModuleStoreBase, Location +from . import ModuleStoreBase, Location, namedtuple_to_son from .exceptions import ItemNotFoundError +import logging DRAFT = 'draft' @@ -15,11 +16,11 @@ def as_draft(location): def wrap_draft(item): """ - Sets `item.metadata['is_draft']` to `True` if the item is a - draft, and false otherwise. Sets the item's location to the + Sets `item.is_draft` to `True` if the item is a + draft, and `False` otherwise. Sets the item's location to the non-draft location in either case """ - item.metadata['is_draft'] = item.location.revision == DRAFT + setattr(item, 'is_draft', item.location.revision == DRAFT) item.location = item.location._replace(revision=None) return item @@ -55,11 +56,10 @@ class DraftModuleStore(ModuleStoreBase): get_children() to cache. None indicates to cache all descendents """ - # cdodge: we're forcing depth=0 here as the Draft store is not handling caching well try: - return wrap_draft(super(DraftModuleStore, self).get_item(as_draft(location), depth=0)) + return wrap_draft(super(DraftModuleStore, self).get_item(as_draft(location), depth=depth)) except ItemNotFoundError: - return wrap_draft(super(DraftModuleStore, self).get_item(location, depth=0)) + return wrap_draft(super(DraftModuleStore, self).get_item(location, depth=depth)) def get_instance(self, course_id, location, depth=0): """ @@ -67,11 +67,10 @@ class DraftModuleStore(ModuleStoreBase): TODO (vshnayder): this may want to live outside the modulestore eventually """ - # cdodge: we're forcing depth=0 here as the Draft store is not handling caching well try: - return wrap_draft(super(DraftModuleStore, self).get_instance(course_id, as_draft(location), depth=0)) + return wrap_draft(super(DraftModuleStore, self).get_instance(course_id, as_draft(location), depth=depth)) except ItemNotFoundError: - return wrap_draft(super(DraftModuleStore, self).get_instance(course_id, location, depth=0)) + return wrap_draft(super(DraftModuleStore, self).get_instance(course_id, location, depth=depth)) def get_items(self, location, course_id=None, depth=0): """ @@ -88,9 +87,8 @@ class DraftModuleStore(ModuleStoreBase): """ draft_loc = as_draft(location) - # cdodge: we're forcing depth=0 here as the Draft store is not handling caching well - draft_items = super(DraftModuleStore, self).get_items(draft_loc, course_id=course_id, depth=0) - items = super(DraftModuleStore, self).get_items(location, course_id=course_id, depth=0) + draft_items = super(DraftModuleStore, self).get_items(draft_loc, course_id=course_id, depth=depth) + items = super(DraftModuleStore, self).get_items(location, course_id=course_id, depth=depth) draft_locs_found = set(item.location._replace(revision=None) for item in draft_items) non_draft_items = [ @@ -118,7 +116,7 @@ class DraftModuleStore(ModuleStoreBase): """ draft_loc = as_draft(location) draft_item = self.get_item(location) - if not draft_item.metadata['is_draft']: + if not getattr(draft_item, 'is_draft', False): self.clone_item(location, draft_loc) return super(DraftModuleStore, self).update_item(draft_loc, data) @@ -133,7 +131,7 @@ class DraftModuleStore(ModuleStoreBase): """ draft_loc = as_draft(location) draft_item = self.get_item(location) - if not draft_item.metadata['is_draft']: + if not getattr(draft_item, 'is_draft', False): self.clone_item(location, draft_loc) return super(DraftModuleStore, self).update_children(draft_loc, children) @@ -149,7 +147,7 @@ class DraftModuleStore(ModuleStoreBase): draft_loc = as_draft(location) draft_item = self.get_item(location) - if not draft_item.metadata['is_draft']: + if not getattr(draft_item, 'is_draft', False): self.clone_item(location, draft_loc) if 'is_draft' in metadata: @@ -179,17 +177,13 @@ class DraftModuleStore(ModuleStoreBase): Save a current draft to the underlying modulestore """ draft = self.get_item(location) - metadata = {} - metadata.update(draft.metadata) - metadata['published_date'] = tuple(datetime.utcnow().timetuple()) - metadata['published_by'] = published_by_id - if 'is_draft' in metadata: - del metadata['is_draft'] + draft.cms.published_date = datetime.utcnow() + draft.cms.published_by = published_by_id + super(DraftModuleStore, self).update_item(location, draft._model_data._kvs._data) + super(DraftModuleStore, self).update_children(location, draft._model_data._kvs._children) + super(DraftModuleStore, self).update_metadata(location, draft._model_data._kvs._metadata) - super(DraftModuleStore, self).update_item(location, draft.definition.get('data', {})) - super(DraftModuleStore, self).update_children(location, draft.definition.get('children', [])) - super(DraftModuleStore, self).update_metadata(location, metadata) self.delete_item(location) def unpublish(self, location): @@ -198,3 +192,36 @@ class DraftModuleStore(ModuleStoreBase): """ super(DraftModuleStore, self).clone_item(location, as_draft(location)) super(DraftModuleStore, self).delete_item(location) + + def _query_children_for_cache_children(self, items): + # first get non-draft in a round-trip + queried_children = [] + to_process_non_drafts = super(DraftModuleStore, self)._query_children_for_cache_children(items) + + to_process_dict = {} + for non_draft in to_process_non_drafts: + to_process_dict[Location(non_draft["_id"])] = non_draft + + # now query all draft content in another round-trip + query = { + '_id': {'$in': [namedtuple_to_son(as_draft(Location(item))) for item in items]} + } + to_process_drafts = list(self.collection.find(query)) + + # now we have to go through all drafts and replace the non-draft + # with the draft. This is because the semantics of the DraftStore is to + # always return the draft - if available + for draft in to_process_drafts: + draft_loc = Location(draft["_id"]) + draft_as_non_draft_loc = draft_loc._replace(revision=None) + + # does non-draft exist in the collection + # if so, replace it + if draft_as_non_draft_loc in to_process_dict: + to_process_dict[draft_as_non_draft_loc] = draft + + # convert the dict - which is used for look ups - back into a list + for key, value in to_process_dict.iteritems(): + queried_children.append(value) + + return queried_children diff --git a/common/lib/xmodule/xmodule/modulestore/inheritance.py b/common/lib/xmodule/xmodule/modulestore/inheritance.py new file mode 100644 index 0000000000..d819abe367 --- /dev/null +++ b/common/lib/xmodule/xmodule/modulestore/inheritance.py @@ -0,0 +1,67 @@ +from xblock.core import Scope + +# A list of metadata that this module can inherit from its parent module +INHERITABLE_METADATA = ( + 'graded', 'start', 'due', 'graceperiod', 'showanswer', 'rerandomize', + # TODO (ichuang): used for Fall 2012 xqa server access + 'xqa_key', + # How many days early to show a course element to beta testers (float) + # intended to be set per-course, but can be overridden in for specific + # elements. Can be a float. + 'days_early_for_beta' +) + +def compute_inherited_metadata(descriptor): + """Given a descriptor, traverse all of its descendants and do metadata + inheritance. Should be called on a CourseDescriptor after importing a + course. + + NOTE: This means that there is no such thing as lazy loading at the + moment--this accesses all the children.""" + for child in descriptor.get_children(): + inherit_metadata(child, descriptor._model_data) + compute_inherited_metadata(child) + + +def inherit_metadata(descriptor, model_data): + """ + Updates this module with metadata inherited from a containing module. + Only metadata specified in self.inheritable_metadata will + be inherited + """ + if not hasattr(descriptor, '_inherited_metadata'): + setattr(descriptor, '_inherited_metadata', {}) + + # Set all inheritable metadata from kwargs that are + # in self.inheritable_metadata and aren't already set in metadata + for attr in INHERITABLE_METADATA: + if attr not in descriptor._model_data and attr in model_data: + descriptor._inherited_metadata[attr] = model_data[attr] + descriptor._model_data[attr] = model_data[attr] + + +def own_metadata(module): + """ + Return a dictionary that contains only non-inherited field keys, + mapped to their values + """ + inherited_metadata = getattr(module, '_inherited_metadata', {}) + metadata = {} + for field in module.fields + module.lms.fields: + # Only save metadata that wasn't inherited + if field.scope != Scope.settings: + continue + + if field.name in inherited_metadata and module._model_data.get(field.name) == inherited_metadata.get(field.name): + continue + + if field.name not in module._model_data: + continue + + try: + metadata[field.name] = module._model_data[field.name] + except KeyError: + # Ignore any missing keys in _model_data + pass + + return metadata diff --git a/common/lib/xmodule/xmodule/modulestore/mongo.py b/common/lib/xmodule/xmodule/modulestore/mongo.py index f4db62ac31..f1e09b024a 100644 --- a/common/lib/xmodule/xmodule/modulestore/mongo.py +++ b/common/lib/xmodule/xmodule/modulestore/mongo.py @@ -1,35 +1,114 @@ import pymongo import sys import logging +import copy -from bson.son import SON +from collections import namedtuple from fs.osfs import OSFS from itertools import repeat from path import path +from datetime import datetime +from operator import attrgetter from importlib import import_module from xmodule.errortracker import null_error_tracker, exc_info_to_str -from xmodule.x_module import XModuleDescriptor from xmodule.mako_module import MakoDescriptorSystem +from xmodule.x_module import XModuleDescriptor from xmodule.error_module import ErrorDescriptor +from xblock.runtime import DbModel, KeyValueStore, InvalidScopeError +from xblock.core import Scope -from . import ModuleStoreBase, Location +from . import ModuleStoreBase, Location, namedtuple_to_son from .draft import DraftModuleStore from .exceptions import (ItemNotFoundError, DuplicateItemError) +from .inheritance import own_metadata, INHERITABLE_METADATA, inherit_metadata + +log = logging.getLogger(__name__) # TODO (cpennington): This code currently operates under the assumption that # there is only one revision for each item. Once we start versioning inside the CMS, # that assumption will have to change +class MongoKeyValueStore(KeyValueStore): + """ + A KeyValueStore that maps keyed data access to one of the 3 data areas + known to the MongoModuleStore (data, children, and metadata) + """ + def __init__(self, data, children, metadata): + self._data = data + self._children = children + self._metadata = metadata + + def get(self, key): + if key.scope == Scope.children: + return self._children + elif key.scope == Scope.parent: + return None + elif key.scope == Scope.settings: + return self._metadata[key.field_name] + elif key.scope == Scope.content: + if key.field_name == 'data' and not isinstance(self._data, dict): + return self._data + else: + return self._data[key.field_name] + else: + raise InvalidScopeError(key.scope) + + def set(self, key, value): + if key.scope == Scope.children: + self._children = value + elif key.scope == Scope.settings: + self._metadata[key.field_name] = value + elif key.scope == Scope.content: + if key.field_name == 'data' and not isinstance(self._data, dict): + self._data = value + else: + self._data[key.field_name] = value + else: + raise InvalidScopeError(key.scope) + + def delete(self, key): + if key.scope == Scope.children: + self._children = [] + elif key.scope == Scope.settings: + if key.field_name in self._metadata: + del self._metadata[key.field_name] + elif key.scope == Scope.content: + if key.field_name == 'data' and not isinstance(self._data, dict): + self._data = None + else: + del self._data[key.field_name] + else: + raise InvalidScopeError(key.scope) + + def has(self, key): + if key.scope in (Scope.children, Scope.parent): + return True + elif key.scope == Scope.settings: + return key.field_name in self._metadata + elif key.scope == Scope.content: + if key.field_name == 'data' and not isinstance(self._data, dict): + return True + else: + return key.field_name in self._data + else: + return False + + +MongoUsage = namedtuple('MongoUsage', 'id, def_id') + + class CachingDescriptorSystem(MakoDescriptorSystem): """ A system that has a cache of module json that it will use to load modules from, with a backup of calling to the underlying modulestore for more data + TODO (cdodge) when the 'split module store' work has been completed we can remove all + references to metadata_inheritance_tree """ def __init__(self, modulestore, module_data, default_class, resources_fs, - error_tracker, render_template): + error_tracker, render_template, cached_metadata=None): """ modulestore: the module store that can be used to retrieve additional modules @@ -54,19 +133,49 @@ class CachingDescriptorSystem(MakoDescriptorSystem): # cdodge: other Systems have a course_id attribute defined. To keep things consistent, let's # define an attribute here as well, even though it's None self.course_id = None + self.cached_metadata = cached_metadata + def load_item(self, location): + """ + Return an XModule instance for the specified location + """ location = Location(location) json_data = self.module_data.get(location) if json_data is None: - return self.modulestore.get_item(location) + module = self.modulestore.get_item(location) + if module is not None: + # update our own cache after going to the DB to get cache miss + self.module_data.update(module.system.module_data) + return module else: - # TODO (vshnayder): metadata inheritance is somewhat broken because mongo, doesn't - # always load an entire course. We're punting on this until after launch, and then - # will build a proper course policy framework. + # load the module and apply the inherited metadata try: - return XModuleDescriptor.load_from_json(json_data, self, self.default_class) + class_ = XModuleDescriptor.load_class( + json_data['location']['category'], + self.default_class + ) + definition = json_data.get('definition', {}) + metadata = json_data.get('metadata', {}) + for old_name, new_name in class_.metadata_translations.items(): + if old_name in metadata: + metadata[new_name] = metadata[old_name] + del metadata[old_name] + + kvs = MongoKeyValueStore( + definition.get('data', {}), + definition.get('children', []), + metadata, + ) + + model_data = DbModel(kvs, class_, None, MongoUsage(self.course_id, location)) + module = class_(self, location, model_data) + if self.cached_metadata is not None: + metadata_to_inherit = self.cached_metadata.get(location.url(), {}) + inherit_metadata(module, metadata_to_inherit) + return module except: + log.warning("Failed to load descriptor", exc_info=True) return ErrorDescriptor.from_json( json_data, self, @@ -92,14 +201,7 @@ def location_to_query(location, wildcard=True): return query -def namedtuple_to_son(namedtuple, prefix=''): - """ - Converts a namedtuple into a SON object with the same key order - """ - son = SON() - for idx, field_name in enumerate(namedtuple._fields): - son[prefix + field_name] = namedtuple[idx] - return son +metadata_cache_key = attrgetter('org', 'course') class MongoModuleStore(ModuleStoreBase): @@ -111,7 +213,8 @@ class MongoModuleStore(ModuleStoreBase): def __init__(self, host, db, collection, fs_root, render_template, port=27017, default_class=None, error_tracker=null_error_tracker, - user=None, password=None, **kwargs): + user=None, password=None, request_cache=None, + metadata_inheritance_cache_subsystem=None, **kwargs): ModuleStoreBase.__init__(self) @@ -124,7 +227,6 @@ class MongoModuleStore(ModuleStoreBase): if user is not None and password is not None: self.collection.database.authenticate(user, password) - # Force mongo to report errors, at the expense of performance self.collection.safe = True @@ -142,6 +244,126 @@ class MongoModuleStore(ModuleStoreBase): self.fs_root = path(fs_root) self.error_tracker = error_tracker self.render_template = render_template + self.ignore_write_events_on_courses = [] + self.request_cache = request_cache + self.metadata_inheritance_cache_subsystem = metadata_inheritance_cache_subsystem + + def compute_metadata_inheritance_tree(self, location): + ''' + TODO (cdodge) This method can be deleted when the 'split module store' work has been completed + ''' + + # get all collections in the course, this query should not return any leaf nodes + # note this is a bit ugly as when we add new categories of containers, we have to add it here + query = { + '_id.org': location.org, + '_id.course': location.course, + '_id.category': {'$in': ['course', 'chapter', 'sequential', 'vertical']} + } + # we just want the Location, children, and inheritable metadata + record_filter = {'_id': 1, 'definition.children': 1} + + # just get the inheritable metadata since that is all we need for the computation + # this minimizes both data pushed over the wire + for attr in INHERITABLE_METADATA: + record_filter['metadata.{0}'.format(attr)] = 1 + + # call out to the DB + resultset = self.collection.find(query, record_filter) + + results_by_url = {} + root = None + + # now go through the results and order them by the location url + for result in resultset: + location = Location(result['_id']) + results_by_url[location.url()] = result + if location.category == 'course': + root = location.url() + + # now traverse the tree and compute down the inherited metadata + metadata_to_inherit = {} + + def _compute_inherited_metadata(url): + """ + Helper method for computing inherited metadata for a specific location url + """ + my_metadata = {} + # check for presence of metadata key. Note that a given module may not yet be fully formed. + # example: update_item -> update_children -> update_metadata sequence on new item create + # if we get called here without update_metadata called first then 'metadata' hasn't been set + # as we're not fully transactional at the DB layer. Same comment applies to below key name + # check + my_metadata = results_by_url[url].get('metadata', {}) + for key in my_metadata.keys(): + if key not in INHERITABLE_METADATA: + del my_metadata[key] + results_by_url[url]['metadata'] = my_metadata + + # go through all the children and recurse, but only if we have + # in the result set. Remember results will not contain leaf nodes + for child in results_by_url[url].get('definition', {}).get('children', []): + if child in results_by_url: + new_child_metadata = copy.deepcopy(my_metadata) + new_child_metadata.update(results_by_url[child].get('metadata', {})) + results_by_url[child]['metadata'] = new_child_metadata + metadata_to_inherit[child] = new_child_metadata + _compute_inherited_metadata(child) + else: + # this is likely a leaf node, so let's record what metadata we need to inherit + metadata_to_inherit[child] = my_metadata + + if root is not None: + _compute_inherited_metadata(root) + + return metadata_to_inherit + + def get_cached_metadata_inheritance_tree(self, location, force_refresh=False): + ''' + TODO (cdodge) This method can be deleted when the 'split module store' work has been completed + ''' + key = metadata_cache_key(location) + tree = {} + + if not force_refresh: + # see if we are first in the request cache (if present) + if self.request_cache is not None and key in self.request_cache.data.get('metadata_inheritance', {}): + return self.request_cache.data['metadata_inheritance'][key] + + # then look in any caching subsystem (e.g. memcached) + if self.metadata_inheritance_cache_subsystem is not None: + tree = self.metadata_inheritance_cache_subsystem.get(key, {}) + else: + logging.warning('Running MongoModuleStore without a metadata_inheritance_cache_subsystem. This is OK in localdev and testing environment. Not OK in production.') + + if not tree: + # if not in subsystem, or we are on force refresh, then we have to compute + tree = self.compute_metadata_inheritance_tree(location) + + # now write out computed tree to caching subsystem (e.g. memcached), if available + if self.metadata_inheritance_cache_subsystem is not None: + self.metadata_inheritance_cache_subsystem.set(key, tree) + + # now populate a request_cache, if available. NOTE, we are outside of the + # scope of the above if: statement so that after a memcache hit, it'll get + # put into the request_cache + if self.request_cache is not None: + # we can't assume the 'metadatat_inheritance' part of the request cache dict has been + # defined + if 'metadata_inheritance' not in self.request_cache.data: + self.request_cache.data['metadata_inheritance'] = {} + self.request_cache.data['metadata_inheritance'][key] = tree + + return tree + + def refresh_cached_metadata_inheritance_tree(self, location): + """ + Refresh the cached metadata inheritance tree for the org/course combination + for location + """ + pseudo_course_id = '/'.join([location.org, location.course]) + if pseudo_course_id not in self.ignore_write_events_on_courses: + self.get_cached_metadata_inheritance_tree(location, force_refresh=True) def _clean_item_data(self, item): """ @@ -150,6 +372,13 @@ class MongoModuleStore(ModuleStoreBase): item['location'] = item['_id'] del item['_id'] + def _query_children_for_cache_children(self, items): + # first get non-draft in a round-trip + query = { + '_id': {'$in': [namedtuple_to_son(Location(item)) for item in items]} + } + return list(self.collection.find(query)) + def _cache_children(self, items, depth=0): """ Returns a dictionary mapping Location -> item data, populated with json data @@ -168,27 +397,27 @@ class MongoModuleStore(ModuleStoreBase): children.extend(item.get('definition', {}).get('children', [])) data[Location(item['location'])] = item + if depth == 0: + break + # Load all children by id. See # http://www.mongodb.org/display/DOCS/Advanced+Queries#AdvancedQueries-%24or # for or-query syntax + to_process = [] if children: - query = { - '_id': {'$in': [namedtuple_to_son(Location(child)) for child in children]} - } - to_process = self.collection.find(query) - else: - to_process = [] + to_process = self._query_children_for_cache_children(children) + # If depth is None, then we just recurse until we hit all the descendents if depth is not None: depth -= 1 return data - def _load_item(self, item, data_cache): + def _load_item(self, item, data_cache, apply_cached_metadata=True): """ Load an XModuleDescriptor from item, using the children stored in data_cache """ - data_dir = item.get('metadata', {}).get('data_dir', item['location']['course']) + data_dir = getattr(item, 'data_dir', item['location']['course']) root = self.fs_root / data_dir if not root.isdir(): @@ -196,6 +425,12 @@ class MongoModuleStore(ModuleStoreBase): resource_fs = OSFS(root) + cached_metadata = {} + if apply_cached_metadata: + cached_metadata = self.get_cached_metadata_inheritance_tree(Location(item['location'])) + + # TODO (cdodge): When the 'split module store' work has been completed, we should remove + # the 'metadata_inheritance_tree' parameter system = CachingDescriptorSystem( self, data_cache, @@ -203,6 +438,7 @@ class MongoModuleStore(ModuleStoreBase): resource_fs, self.error_tracker, self.render_template, + cached_metadata, ) return system.load_item(item['location']) @@ -213,7 +449,10 @@ class MongoModuleStore(ModuleStoreBase): """ data_cache = self._cache_children(items, depth) - return [self._load_item(item, data_cache) for item in items] + # if we are loading a course object, if we're not prefetching children (depth != 0) then don't + # bother with the metadata inheritance + return [self._load_item(item, data_cache, + apply_cached_metadata=(item['location']['category']!='course' or depth !=0)) for item in items] def get_courses(self): ''' @@ -261,11 +500,11 @@ class MongoModuleStore(ModuleStoreBase): descendents of the queried modules for more efficient results later in the request. The depth is counted in the number of calls to get_children() to cache. None indicates to cache all descendents. - """ location = Location.ensure_fully_specified(location) item = self._find_one(location) - return self._load_items([item], depth)[0] + module = self._load_items([item], depth)[0] + return module def get_instance(self, course_id, location, depth=0): """ @@ -285,7 +524,8 @@ class MongoModuleStore(ModuleStoreBase): sort=[('revision', pymongo.ASCENDING)], ) - return self._load_items(list(items), depth) + modules = self._load_items(list(items), depth) + return modules def clone_item(self, source, location): """ @@ -295,7 +535,12 @@ class MongoModuleStore(ModuleStoreBase): try: source_item = self.collection.find_one(location_to_query(source)) source_item['_id'] = Location(location).dict() - self.collection.insert(source_item) + self.collection.insert( + source_item, + # Must include this to avoid the django debug toolbar (which defines the deprecated "safe=False") + # from overriding our default value set in the init method. + safe=self.collection.safe + ) item = self._load_items([source_item])[0] # VS[compat] cdodge: This is a hack because static_tabs also have references from the course module, so @@ -304,16 +549,22 @@ class MongoModuleStore(ModuleStoreBase): if location.category == 'static_tab': course = self.get_course_for_item(item.location) existing_tabs = course.tabs or [] - existing_tabs.append({'type': 'static_tab', 'name': item.metadata.get('display_name'), 'url_slug': item.location.name}) + existing_tabs.append({ + 'type': 'static_tab', + 'name': item.display_name, + 'url_slug': item.location.name + }) course.tabs = existing_tabs - self.update_metadata(course.location, course.metadata) + self.update_metadata(course.location, course._model_data._kvs._metadata) return item except pymongo.errors.DuplicateKeyError: raise DuplicateItemError(location) + # recompute (and update) the metadata inheritance tree which is cached + self.refresh_cached_metadata_inheritance_tree(Location(location)) - def get_course_for_item(self, location): + def get_course_for_item(self, location, depth=0): ''' VS[compat] cdodge: for a given Xmodule, return the course that it belongs to @@ -327,15 +578,16 @@ class MongoModuleStore(ModuleStoreBase): # know the 'name' parameter in this context, so we have # to assume there's only one item in this query even though we are not specifying a name course_search_location = ['i4x', location.org, location.course, 'course', None] - courses = self.get_items(course_search_location) + courses = self.get_items(course_search_location, depth=depth) # make sure we found exactly one match on this above course search found_cnt = len(courses) if found_cnt == 0: - raise BaseException('Could not find course at {0}'.format(course_search_location)) + raise Exception('Could not find course at {0}'.format(course_search_location)) if found_cnt > 1: - raise BaseException('Found more than one course at {0}. There should only be one!!! Dump = {1}'.format(course_search_location, courses)) + raise Exception('Found more than one course at {0}. There should only be one!!! ' + 'Dump = {1}'.format(course_search_location, courses)) return courses[0] @@ -352,6 +604,9 @@ class MongoModuleStore(ModuleStoreBase): {'$set': update}, multi=False, upsert=True, + # Must include this to avoid the django debug toolbar (which defines the deprecated "safe=False") + # from overriding our default value set in the init method. + safe=self.collection.safe ) if result['n'] == 0: raise ItemNotFoundError(location) @@ -377,6 +632,8 @@ class MongoModuleStore(ModuleStoreBase): """ self._update_single_item(location, {'definition.children': children}) + # recompute (and update) the metadata inheritance tree which is cached + self.refresh_cached_metadata_inheritance_tree(Location(location)) def update_metadata(self, location, metadata): """ @@ -398,10 +655,11 @@ class MongoModuleStore(ModuleStoreBase): tab['name'] = metadata.get('display_name') break course.tabs = existing_tabs - self.update_metadata(course.location, course.metadata) + self.update_metadata(course.location, own_metadata(course)) self._update_single_item(location, {'metadata': metadata}) - + # recompute (and update) the metadata inheritance tree which is cached + self.refresh_cached_metadata_inheritance_tree(loc) def delete_item(self, location): """ @@ -417,10 +675,14 @@ class MongoModuleStore(ModuleStoreBase): course = self.get_course_for_item(item.location) existing_tabs = course.tabs or [] course.tabs = [tab for tab in existing_tabs if tab.get('url_slug') != location.name] - self.update_metadata(course.location, course.metadata) - - self.collection.remove({'_id': Location(location).dict()}) + self.update_metadata(course.location, own_metadata(course)) + self.collection.remove({'_id': Location(location).dict()}, + # Must include this to avoid the django debug toolbar (which defines the deprecated "safe=False") + # from overriding our default value set in the init method. + safe=self.collection.safe) + # recompute (and update) the metadata inheritance tree which is cached + self.refresh_cached_metadata_inheritance_tree(Location(location)) def get_parent_locations(self, location, course_id): '''Find all locations that are the parents of this location in this @@ -441,4 +703,10 @@ class MongoModuleStore(ModuleStoreBase): # DraftModuleStore is first, because it needs to intercept calls to MongoModuleStore class DraftMongoModuleStore(DraftModuleStore, MongoModuleStore): + """ + Version of MongoModuleStore with draft capability mixed in + """ + """ + Version of MongoModuleStore with draft capability mixed in + """ pass diff --git a/common/lib/xmodule/xmodule/modulestore/store_utilities.py b/common/lib/xmodule/xmodule/modulestore/store_utilities.py index 192b012bef..2935069090 100644 --- a/common/lib/xmodule/xmodule/modulestore/store_utilities.py +++ b/common/lib/xmodule/xmodule/modulestore/store_utilities.py @@ -5,128 +5,135 @@ from xmodule.modulestore.mongo import MongoModuleStore def clone_course(modulestore, contentstore, source_location, dest_location, delete_original=False): - # first check to see if the modulestore is Mongo backed - if not isinstance(modulestore, MongoModuleStore): - raise Exception("Expected a MongoModuleStore in the runtime. Aborting....") - - # check to see if the dest_location exists as an empty course - # we need an empty course because the app layers manage the permissions and users - if not modulestore.has_item(dest_location): - raise Exception("An empty course at {0} must have already been created. Aborting...".format(dest_location)) - - # verify that the dest_location really is an empty course, which means only one - dest_modules = modulestore.get_items([dest_location.tag, dest_location.org, dest_location.course, None, None, None]) - - if len(dest_modules) != 1: - raise Exception("Course at destination {0} is not an empty course. You can only clone into an empty course. Aborting...".format(dest_location)) - - # check to see if the source course is actually there - if not modulestore.has_item(source_location): - raise Exception("Cannot find a course at {0}. Aborting".format(source_location)) - - # Get all modules under this namespace which is (tag, org, course) tuple - - modules = modulestore.get_items([source_location.tag, source_location.org, source_location.course, None, None, None]) - - for module in modules: - original_loc = Location(module.location) - - if original_loc.category != 'course': - module.location = module.location._replace(tag=dest_location.tag, org=dest_location.org, - course=dest_location.course) - else: - # on the course module we also have to update the module name - module.location = module.location._replace(tag=dest_location.tag, org=dest_location.org, - course=dest_location.course, name=dest_location.name) - - print "Cloning module {0} to {1}....".format(original_loc, module.location) - - if 'data' in module.definition: - modulestore.update_item(module.location, module.definition['data']) - - # repoint children - if 'children' in module.definition: - new_children = [] - for child_loc_url in module.definition['children']: - child_loc = Location(child_loc_url) - child_loc = child_loc._replace(tag=dest_location.tag, org=dest_location.org, - course=dest_location.course) - new_children = new_children + [child_loc.url()] - - modulestore.update_children(module.location, new_children) - - # save metadata - modulestore.update_metadata(module.location, module.metadata) - - # now iterate through all of the assets and clone them - # first the thumbnails - thumbs = contentstore.get_all_content_thumbnails_for_course(source_location) - for thumb in thumbs: - thumb_loc = Location(thumb["_id"]) - content = contentstore.find(thumb_loc) - content.location = content.location._replace(org=dest_location.org, - course=dest_location.course) - - print "Cloning thumbnail {0} to {1}".format(thumb_loc, content.location) - - contentstore.save(content) - - # now iterate through all of the assets, also updating the thumbnail pointer - - assets = contentstore.get_all_content_for_course(source_location) - for asset in assets: - asset_loc = Location(asset["_id"]) - content = contentstore.find(asset_loc) - content.location = content.location._replace(org=dest_location.org, - course=dest_location.course) - - # be sure to update the pointer to the thumbnail - if content.thumbnail_location is not None: - content.thumbnail_location = content.thumbnail_location._replace(org=dest_location.org, - course=dest_location.course) - - print "Cloning asset {0} to {1}".format(asset_loc, content.location) - - contentstore.save(content) - - return True - - -def delete_course(modulestore, contentstore, source_location): # first check to see if the modulestore is Mongo backed - if not isinstance(modulestore, MongoModuleStore): - raise Exception("Expected a MongoModuleStore in the runtime. Aborting....") + if not isinstance(modulestore, MongoModuleStore): + raise Exception("Expected a MongoModuleStore in the runtime. Aborting....") - # check to see if the source course is actually there - if not modulestore.has_item(source_location): - raise Exception("Cannot find a course at {0}. Aborting".format(source_location)) + # check to see if the dest_location exists as an empty course + # we need an empty course because the app layers manage the permissions and users + if not modulestore.has_item(dest_location): + raise Exception("An empty course at {0} must have already been created. Aborting...".format(dest_location)) - # first delete all of the thumbnails - thumbs = contentstore.get_all_content_thumbnails_for_course(source_location) - for thumb in thumbs: - thumb_loc = Location(thumb["_id"]) - id = StaticContent.get_id_from_location(thumb_loc) - print "Deleting {0}...".format(id) - contentstore.delete(id) + # verify that the dest_location really is an empty course, which means only one + dest_modules = modulestore.get_items([dest_location.tag, dest_location.org, dest_location.course, None, None, None]) - # then delete all of the assets - assets = contentstore.get_all_content_for_course(source_location) - for asset in assets: - asset_loc = Location(asset["_id"]) - id = StaticContent.get_id_from_location(asset_loc) - print "Deleting {0}...".format(id) - contentstore.delete(id) + if len(dest_modules) != 1: + raise Exception("Course at destination {0} is not an empty course. You can only clone into an empty course. Aborting...".format(dest_location)) - # then delete all course modules - modules = modulestore.get_items([source_location.tag, source_location.org, source_location.course, None, None, None]) + # check to see if the source course is actually there + if not modulestore.has_item(source_location): + raise Exception("Cannot find a course at {0}. Aborting".format(source_location)) - for module in modules: - if module.category != 'course': # save deleting the course module for last - print "Deleting {0}...".format(module.location) - modulestore.delete_item(module.location) + # Get all modules under this namespace which is (tag, org, course) tuple - # finally delete the top-level course module itself - print "Deleting {0}...".format(source_location) - modulestore.delete_item(source_location) + modules = modulestore.get_items([source_location.tag, source_location.org, source_location.course, None, None, None]) + + for module in modules: + original_loc = Location(module.location) + + if original_loc.category != 'course': + module.location = module.location._replace(tag=dest_location.tag, org=dest_location.org, + course=dest_location.course) + else: + # on the course module we also have to update the module name + module.location = module.location._replace(tag=dest_location.tag, org=dest_location.org, + course=dest_location.course, name=dest_location.name) + + print "Cloning module {0} to {1}....".format(original_loc, module.location) + + modulestore.update_item(module.location, module._model_data._kvs._data) + + # repoint children + if module.has_children: + new_children = [] + for child_loc_url in module.children: + child_loc = Location(child_loc_url) + child_loc = child_loc._replace( + tag=dest_location.tag, + org=dest_location.org, + course=dest_location.course + ) + new_children.append(child_loc.url()) + + modulestore.update_children(module.location, new_children) + + # save metadata + modulestore.update_metadata(module.location, module._model_data._kvs._metadata) + + # now iterate through all of the assets and clone them + # first the thumbnails + thumbs = contentstore.get_all_content_thumbnails_for_course(source_location) + for thumb in thumbs: + thumb_loc = Location(thumb["_id"]) + content = contentstore.find(thumb_loc) + content.location = content.location._replace(org=dest_location.org, + course=dest_location.course) + + print "Cloning thumbnail {0} to {1}".format(thumb_loc, content.location) + + contentstore.save(content) + + # now iterate through all of the assets, also updating the thumbnail pointer + + assets = contentstore.get_all_content_for_course(source_location) + for asset in assets: + asset_loc = Location(asset["_id"]) + content = contentstore.find(asset_loc) + content.location = content.location._replace(org=dest_location.org, + course=dest_location.course) + + # be sure to update the pointer to the thumbnail + if content.thumbnail_location is not None: + content.thumbnail_location = content.thumbnail_location._replace(org=dest_location.org, + course=dest_location.course) + + print "Cloning asset {0} to {1}".format(asset_loc, content.location) + + contentstore.save(content) + + return True + + +def delete_course(modulestore, contentstore, source_location, commit = False): + # first check to see if the modulestore is Mongo backed + if not isinstance(modulestore, MongoModuleStore): + raise Exception("Expected a MongoModuleStore in the runtime. Aborting....") + + # check to see if the source course is actually there + if not modulestore.has_item(source_location): + raise Exception("Cannot find a course at {0}. Aborting".format(source_location)) + + # first delete all of the thumbnails + thumbs = contentstore.get_all_content_thumbnails_for_course(source_location) + for thumb in thumbs: + thumb_loc = Location(thumb["_id"]) + id = StaticContent.get_id_from_location(thumb_loc) + print "Deleting {0}...".format(id) + if commit: + contentstore.delete(id) + + # then delete all of the assets + assets = contentstore.get_all_content_for_course(source_location) + for asset in assets: + asset_loc = Location(asset["_id"]) + id = StaticContent.get_id_from_location(asset_loc) + print "Deleting {0}...".format(id) + if commit: + contentstore.delete(id) + + # then delete all course modules + modules = modulestore.get_items([source_location.tag, source_location.org, source_location.course, None, None, None]) + + for module in modules: + if module.category != 'course': # save deleting the course module for last + print "Deleting {0}...".format(module.location) + if commit: + modulestore.delete_item(module.location) + + # finally delete the top-level course module itself + print "Deleting {0}...".format(source_location) + if commit: + modulestore.delete_item(source_location) + + return True - return True diff --git a/common/lib/xmodule/xmodule/modulestore/tests/factories.py b/common/lib/xmodule/xmodule/modulestore/tests/factories.py index 1259da2690..1a82e1b708 100644 --- a/common/lib/xmodule/xmodule/modulestore/tests/factories.py +++ b/common/lib/xmodule/xmodule/modulestore/tests/factories.py @@ -4,6 +4,7 @@ from uuid import uuid4 from xmodule.modulestore import Location from xmodule.modulestore.django import modulestore from xmodule.timeparse import stringify_time +from xmodule.modulestore.inheritance import own_metadata def XMODULE_COURSE_CREATION(class_to_create, **kwargs): @@ -24,8 +25,7 @@ class XModuleCourseFactory(Factory): @classmethod def _create(cls, target_class, *args, **kwargs): - # This logic was taken from the create_new_course method in - # cms/djangoapps/contentstore/views.py + template = Location('i4x', 'edx', 'templates', 'course', 'Empty') org = kwargs.get('org') number = kwargs.get('number') @@ -40,19 +40,17 @@ class XModuleCourseFactory(Factory): # This metadata code was copied from cms/djangoapps/contentstore/views.py if display_name is not None: - new_course.metadata['display_name'] = display_name - - new_course.metadata['data_dir'] = uuid4().hex - new_course.metadata['start'] = stringify_time(gmtime()) + new_course.display_name = display_name + new_course.lms.start = gmtime() new_course.tabs = [{"type": "courseware"}, - {"type": "course_info", "name": "Course Info"}, - {"type": "discussion", "name": "Discussion"}, - {"type": "wiki", "name": "Wiki"}, - {"type": "progress", "name": "Progress"}] + {"type": "course_info", "name": "Course Info"}, + {"type": "discussion", "name": "Discussion"}, + {"type": "wiki", "name": "Wiki"}, + {"type": "progress", "name": "Progress"}] # Update the data in the mongo datastore - store.update_metadata(new_course.location.url(), new_course.own_metadata) + store.update_metadata(new_course.location.url(), own_metadata(new_course)) return new_course @@ -81,35 +79,59 @@ class XModuleItemFactory(Factory): @classmethod def _create(cls, target_class, *args, **kwargs): """ - kwargs must include parent_location, template. Can contain display_name - target_class is ignored + Uses *kwargs*: + + *parent_location* (required): the location of the parent module + (e.g. the parent course or section) + + *template* (required): the template to create the item from + (e.g. i4x://templates/section/Empty) + + *data* (optional): the data for the item + (e.g. XML problem definition for a problem item) + + *display_name* (optional): the display name of the item + + *metadata* (optional): dictionary of metadata attributes + + *target_class* is ignored """ DETACHED_CATEGORIES = ['about', 'static_tab', 'course_info'] parent_location = Location(kwargs.get('parent_location')) template = Location(kwargs.get('template')) + data = kwargs.get('data') display_name = kwargs.get('display_name') + metadata = kwargs.get('metadata', {}) store = modulestore('direct') # This code was based off that in cms/djangoapps/contentstore/views.py parent = store.get_item(parent_location) - dest_location = parent_location._replace(category=template.category, name=uuid4().hex) + + # If a display name is set, use that + dest_name = display_name.replace(" ", "_") if display_name is not None else uuid4().hex + dest_location = parent_location._replace(category=template.category, + name=dest_name) new_item = store.clone_item(template, dest_location) - # TODO: This needs to be deleted when we have proper storage for static content - new_item.metadata['data_dir'] = parent.metadata['data_dir'] - # replace the display name with an optional parameter passed in from the caller if display_name is not None: - new_item.metadata['display_name'] = display_name + new_item.display_name = display_name - store.update_metadata(new_item.location.url(), new_item.own_metadata) + # Add additional metadata or override current metadata + item_metadata = own_metadata(new_item) + item_metadata.update(metadata) + store.update_metadata(new_item.location.url(), item_metadata) + + # replace the data with the optional *data* parameter + if data is not None: + store.update_item(new_item.location, data) if new_item.location.category not in DETACHED_CATEGORIES: - store.update_children(parent_location, parent.definition.get('children', []) + [new_item.location.url()]) + store.update_children(parent_location, parent.children + [new_item.location.url()]) return new_item diff --git a/common/lib/xmodule/xmodule/modulestore/tests/test_location.py b/common/lib/xmodule/xmodule/modulestore/tests/test_location.py index 0772951884..f0f0e8bf48 100644 --- a/common/lib/xmodule/xmodule/modulestore/tests/test_location.py +++ b/common/lib/xmodule/xmodule/modulestore/tests/test_location.py @@ -119,11 +119,11 @@ def test_equality(): # All the cleaning functions should do the same thing with these general_pairs = [('', ''), - (' ', '_'), - ('abc,', 'abc_'), - ('ab fg!@//\\aj', 'ab_fg_aj'), - (u"ab\xA9", "ab_"), # no unicode allowed for now - ] + (' ', '_'), + ('abc,', 'abc_'), + ('ab fg!@//\\aj', 'ab_fg_aj'), + (u"ab\xA9", "ab_"), # no unicode allowed for now + ] def test_clean(): @@ -131,7 +131,7 @@ def test_clean(): ('a:b', 'a_b'), # no colons in non-name components ('a-b', 'a-b'), # dashes ok ('a.b', 'a.b'), # dot ok - ] + ] for input, output in pairs: assert_equals(Location.clean(input), output) @@ -141,17 +141,17 @@ def test_clean_for_url_name(): ('a:b', 'a:b'), # colons ok in names ('a-b', 'a-b'), # dashes ok in names ('a.b', 'a.b'), # dot ok in names - ] + ] for input, output in pairs: assert_equals(Location.clean_for_url_name(input), output) def test_clean_for_html(): pairs = general_pairs + [ - ("a:b", "a_b"), # no colons for html use - ("a-b", "a-b"), # dashes ok (though need to be replaced in various use locations. ugh.) - ('a.b', 'a_b'), # no dots. - ] + ("a:b", "a_b"), # no colons for html use + ("a-b", "a-b"), # dashes ok (though need to be replaced in various use locations. ugh.) + ('a.b', 'a_b'), # no dots. + ] for input, output in pairs: assert_equals(Location.clean_for_html(input), output) diff --git a/common/lib/xmodule/xmodule/modulestore/tests/test_modulestore.py b/common/lib/xmodule/xmodule/modulestore/tests/test_modulestore.py index 94ea622907..469eedac05 100644 --- a/common/lib/xmodule/xmodule/modulestore/tests/test_modulestore.py +++ b/common/lib/xmodule/xmodule/modulestore/tests/test_modulestore.py @@ -12,7 +12,7 @@ def check_path_to_location(modulestore): ("edX/toy/2012_Fall", "Overview", "Welcome", None)), ("i4x://edX/toy/chapter/Overview", ("edX/toy/2012_Fall", "Overview", None, None)), - ) + ) course_id = "edX/toy/2012_Fall" for location, expected in should_work: @@ -20,6 +20,6 @@ def check_path_to_location(modulestore): not_found = ( "i4x://edX/toy/video/WelcomeX", "i4x://edX/toy/course/NotHome" - ) + ) for location in not_found: assert_raises(ItemNotFoundError, path_to_location, modulestore, course_id, location) diff --git a/common/lib/xmodule/xmodule/modulestore/tests/test_mongo.py b/common/lib/xmodule/xmodule/modulestore/tests/test_mongo.py index 6f6f47ba85..061d70d09f 100644 --- a/common/lib/xmodule/xmodule/modulestore/tests/test_mongo.py +++ b/common/lib/xmodule/xmodule/modulestore/tests/test_mongo.py @@ -1,6 +1,7 @@ import pymongo -from nose.tools import assert_equals, assert_raises, assert_not_equals, with_setup +from mock import Mock +from nose.tools import assert_equals, assert_raises, assert_not_equals, with_setup, assert_false from pprint import pprint from xmodule.modulestore import Location diff --git a/common/lib/xmodule/xmodule/modulestore/xml.py b/common/lib/xmodule/xmodule/modulestore/xml.py index 1bd27189e9..677f8b7d6a 100644 --- a/common/lib/xmodule/xmodule/modulestore/xml.py +++ b/common/lib/xmodule/xmodule/modulestore/xml.py @@ -23,13 +23,14 @@ from xmodule.html_module import HtmlDescriptor from . import ModuleStoreBase, Location from .exceptions import ItemNotFoundError +from .inheritance import compute_inherited_metadata edx_xml_parser = etree.XMLParser(dtd_validation=False, load_dtd=False, remove_comments=True, remove_blank_text=True) etree.set_default_parser(edx_xml_parser) -log = logging.getLogger('mitx.' + __name__) +log = logging.getLogger(__name__) # VS[compat] @@ -73,7 +74,8 @@ class ImportSystem(XMLParsingSystem, MakoDescriptorSystem): # VS[compat]. Take this out once course conversion is done (perhaps leave the uniqueness check) # tags that really need unique names--they store (or should store) state. - need_uniq_names = ('problem', 'sequential', 'video', 'course', 'chapter', 'videosequence', 'timelimit') + need_uniq_names = ('problem', 'sequential', 'video', 'course', 'chapter', + 'videosequence', 'poll_question', 'timelimit') attr = xml_data.attrib tag = xml_data.tag @@ -161,7 +163,6 @@ class ImportSystem(XMLParsingSystem, MakoDescriptorSystem): etree.tostring(xml_data, encoding='unicode'), self, self.org, self.course, xmlstore.default_class) except Exception as err: - print err, self.load_error_modules if not self.load_error_modules: raise @@ -174,7 +175,7 @@ class ImportSystem(XMLParsingSystem, MakoDescriptorSystem): # Normally, we don't want lots of exception traces in our logs from common # content problems. But if you're debugging the xml loading code itself, # uncomment the next line. - # log.exception(msg) + log.exception(msg) self.error_tracker(msg) err_msg = msg + "\n" + exc_info_to_str(sys.exc_info()) @@ -186,12 +187,13 @@ class ImportSystem(XMLParsingSystem, MakoDescriptorSystem): err_msg ) - descriptor.metadata['data_dir'] = course_dir + setattr(descriptor, 'data_dir', course_dir) xmlstore.modules[course_id][descriptor.location] = descriptor - for child in descriptor.get_children(): - parent_tracker.add_parent(child.location, descriptor.location) + if hasattr(descriptor, 'children'): + for child in descriptor.get_children(): + parent_tracker.add_parent(child.location, descriptor.location) return descriptor render_template = lambda: '' @@ -318,8 +320,6 @@ class XMLModuleStore(ModuleStoreBase): # Didn't load course. Instead, save the errors elsewhere. self.errored_courses[course_dir] = errorlog - - def __unicode__(self): ''' String representation - for debugging @@ -345,8 +345,6 @@ class XMLModuleStore(ModuleStoreBase): log.warning(msg + " " + str(err)) return {} - - def load_course(self, course_dir, tracker): """ Load a course into this module store @@ -430,7 +428,7 @@ class XMLModuleStore(ModuleStoreBase): # breaks metadata inheritance via get_children(). Instead # (actually, in addition to, for now), we do a final inheritance pass # after we have the course descriptor. - XModuleDescriptor.compute_inherited_metadata(course_descriptor) + compute_inherited_metadata(course_descriptor) # now import all pieces of course_info which is expected to be stored # in /info or /info/ @@ -449,7 +447,6 @@ class XMLModuleStore(ModuleStoreBase): def load_extra_content(self, system, course_descriptor, category, base_dir, course_dir, url_name): - self._load_extra_content(system, course_descriptor, category, base_dir, course_dir) # then look in a override folder based on the course run @@ -460,26 +457,29 @@ class XMLModuleStore(ModuleStoreBase): def _load_extra_content(self, system, course_descriptor, category, path, course_dir): for filepath in glob.glob(path / '*'): - if not os.path.isdir(filepath): - with open(filepath) as f: - try: - html = f.read().decode('utf-8') - # tabs are referenced in policy.json through a 'slug' which is just the filename without the .html suffix - slug = os.path.splitext(os.path.basename(filepath))[0] - loc = Location('i4x', course_descriptor.location.org, course_descriptor.location.course, category, slug) - module = HtmlDescriptor(system, definition={'data': html}, **{'location': loc}) - # VS[compat]: - # Hack because we need to pull in the 'display_name' for static tabs (because we need to edit them) - # from the course policy - if category == "static_tab": - for tab in course_descriptor.tabs or []: - if tab.get('url_slug') == slug: - module.metadata['display_name'] = tab['name'] - module.metadata['data_dir'] = course_dir - self.modules[course_descriptor.id][module.location] = module - except Exception, e: - logging.exception("Failed to load {0}. Skipping... Exception: {1}".format(filepath, str(e))) - system.error_tracker("ERROR: " + str(e)) + if not os.path.isfile(filepath): + continue + + with open(filepath) as f: + try: + html = f.read().decode('utf-8') + # tabs are referenced in policy.json through a 'slug' which is just the filename without the .html suffix + slug = os.path.splitext(os.path.basename(filepath))[0] + loc = Location('i4x', course_descriptor.location.org, course_descriptor.location.course, category, slug) + module = HtmlDescriptor(system, loc, {'data': html}) + # VS[compat]: + # Hack because we need to pull in the 'display_name' for static tabs (because we need to edit them) + # from the course policy + if category == "static_tab": + for tab in course_descriptor.tabs or []: + if tab.get('url_slug') == slug: + module.display_name = tab['name'] + module.data_dir = course_dir + self.modules[course_descriptor.id][module.location] = module + except Exception, e: + logging.exception("Failed to load {0}. Skipping... Exception: {1}".format(filepath, str(e))) + system.error_tracker("ERROR: " + str(e)) + def get_instance(self, course_id, location, depth=0): """ diff --git a/common/lib/xmodule/xmodule/modulestore/xml_exporter.py b/common/lib/xmodule/xmodule/modulestore/xml_exporter.py index e8d3fb0f82..a5a8ee3855 100644 --- a/common/lib/xmodule/xmodule/modulestore/xml_exporter.py +++ b/common/lib/xmodule/xmodule/modulestore/xml_exporter.py @@ -1,45 +1,44 @@ import logging from xmodule.modulestore import Location from xmodule.modulestore.django import modulestore +from xmodule.modulestore.inheritance import own_metadata from fs.osfs import OSFS from json import dumps def export_to_xml(modulestore, contentstore, course_location, root_dir, course_dir, draft_modulestore = None): - course = modulestore.get_item(course_location) + course = modulestore.get_item(course_location) - fs = OSFS(root_dir) - export_fs = fs.makeopendir(course_dir) + fs = OSFS(root_dir) + export_fs = fs.makeopendir(course_dir) - xml = course.export_to_xml(export_fs) - with export_fs.open('course.xml', 'w') as course_xml: - course_xml.write(xml) + xml = course.export_to_xml(export_fs) + with export_fs.open('course.xml', 'w') as course_xml: + course_xml.write(xml) - # export the static assets - contentstore.export_all_for_course(course_location, root_dir + '/' + course_dir + '/static/') + # export the static assets + contentstore.export_all_for_course(course_location, root_dir + '/' + course_dir + '/static/') - # export the static tabs - export_extra_content(export_fs, modulestore, course_location, 'static_tab', 'tabs', '.html') + # export the static tabs + export_extra_content(export_fs, modulestore, course_location, 'static_tab', 'tabs', '.html') - # export the custom tags - export_extra_content(export_fs, modulestore, course_location, 'custom_tag_template', 'custom_tags') + # export the custom tags + export_extra_content(export_fs, modulestore, course_location, 'custom_tag_template', 'custom_tags') - # export the course updates - export_extra_content(export_fs, modulestore, course_location, 'course_info', 'info', '.html') + # export the course updates + export_extra_content(export_fs, modulestore, course_location, 'course_info', 'info', '.html') - # export the grading policy - policies_dir = export_fs.makeopendir('policies') - course_run_policy_dir = policies_dir.makeopendir(course.location.name) - if 'grading_policy' in course.definition['data']: + # export the grading policy + policies_dir = export_fs.makeopendir('policies') + course_run_policy_dir = policies_dir.makeopendir(course.location.name) with course_run_policy_dir.open('grading_policy.json', 'w') as grading_policy: - grading_policy.write(dumps(course.definition['data']['grading_policy'])) + grading_policy.write(dumps(course.grading_policy)) - # export all of the course metadata in policy.json - with course_run_policy_dir.open('policy.json', 'w') as course_policy: - policy = {} - policy = {'course/' + course.location.name: course.metadata} - course_policy.write(dumps(policy)) + # export all of the course metadata in policy.json + with course_run_policy_dir.open('policy.json', 'w') as course_policy: + policy = {'course/' + course.location.name: own_metadata(course)} + course_policy.write(dumps(policy)) # export everything from the draft store, unfortunately this will create lots of duplicates if draft_modulestore is not None: @@ -61,11 +60,11 @@ def export_to_xml(modulestore, contentstore, course_location, root_dir, course_d def export_extra_content(export_fs, modulestore, course_location, category_type, dirname, file_suffix=''): - query_loc = Location('i4x', course_location.org, course_location.course, category_type, None) - items = modulestore.get_items(query_loc) + query_loc = Location('i4x', course_location.org, course_location.course, category_type, None) + items = modulestore.get_items(query_loc) - if len(items) > 0: - item_dir = export_fs.makeopendir(dirname) - for item in items: - with item_dir.open(item.location.name + file_suffix, 'w') as item_file: - item_file.write(item.definition['data'].encode('utf8')) + if len(items) > 0: + item_dir = export_fs.makeopendir(dirname) + for item in items: + with item_dir.open(item.location.name + file_suffix, 'w') as item_file: + item_file.write(item.data.encode('utf8')) diff --git a/common/lib/xmodule/xmodule/modulestore/xml_importer.py b/common/lib/xmodule/xmodule/modulestore/xml_importer.py index 9fcd75d6f4..1d3de93b38 100644 --- a/common/lib/xmodule/xmodule/modulestore/xml_importer.py +++ b/common/lib/xmodule/xmodule/modulestore/xml_importer.py @@ -4,10 +4,13 @@ import mimetypes from lxml.html import rewrite_links as lxml_rewrite_links from path import path +from xblock.core import Scope + from .xml import XMLModuleStore from .exceptions import DuplicateItemError from xmodule.modulestore import Location from xmodule.contentstore.content import StaticContent, XASSET_SRCREF_PREFIX +from .inheritance import own_metadata log = logging.getLogger(__name__) @@ -20,6 +23,8 @@ def import_static_content(modules, course_loc, course_data_path, static_content_ # now import all static assets static_dir = course_data_path / subpath + verbose = True + for dirname, dirnames, filenames in os.walk(static_dir): for filename in filenames: @@ -95,6 +100,79 @@ def verify_content_links(module, base_dir, static_content_store, link, remap_dic return link +def import_module_from_xml(modulestore, static_content_store, course_data_path, module, target_location_namespace=None, verbose=False): + # remap module to the new namespace + if target_location_namespace is not None: + # This looks a bit wonky as we need to also change the 'name' of the imported course to be what + # the caller passed in + if module.location.category != 'course': + module.location = module.location._replace(tag=target_location_namespace.tag, org=target_location_namespace.org, + course=target_location_namespace.course) + else: + module.location = module.location._replace(tag=target_location_namespace.tag, org=target_location_namespace.org, + course=target_location_namespace.course, name=target_location_namespace.name) + + # then remap children pointers since they too will be re-namespaced + if module.has_children: + children_locs = module.children + new_locs = [] + for child in children_locs: + child_loc = Location(child) + new_child_loc = child_loc._replace(tag=target_location_namespace.tag, org=target_location_namespace.org, + course=target_location_namespace.course) + + new_locs.append(new_child_loc.url()) + + module.children = new_locs + + if hasattr(module, 'data'): + # cdodge: now go through any link references to '/static/' and make sure we've imported + # it as a StaticContent asset + try: + remap_dict = {} + + # use the rewrite_links as a utility means to enumerate through all links + # in the module data. We use that to load that reference into our asset store + # IMPORTANT: There appears to be a bug in lxml.rewrite_link which makes us not be able to + # do the rewrites natively in that code. + # For example, what I'm seeing is -> + # Note the dropped element closing tag. This causes the LMS to fail when rendering modules - that's + # no good, so we have to do this kludge + if isinstance(module.data, str) or isinstance(module.data, unicode): # some module 'data' fields are non strings which blows up the link traversal code + lxml_rewrite_links(module.data, lambda link: verify_content_links(module, course_data_path, + static_content_store, link, remap_dict)) + + for key in remap_dict.keys(): + module.data = module.data.replace(key, remap_dict[key]) + + except Exception: + logging.exception("failed to rewrite links on {0}. Continuing...".format(module.location)) + + modulestore.update_item(module.location, module.data) + + if module.has_children: + modulestore.update_children(module.location, module.children) + + modulestore.update_metadata(module.location, own_metadata(module)) + + +def import_course_from_xml(modulestore, static_content_store, course_data_path, module, target_location_namespace=None, verbose=False): + # cdodge: more hacks (what else). Seems like we have a problem when importing a course (like 6.002) which + # does not have any tabs defined in the policy file. The import goes fine and then displays fine in LMS, + # but if someone tries to add a new tab in the CMS, then the LMS barfs because it expects that - + # if there is *any* tabs - then there at least needs to be some predefined ones + if module.tabs is None or len(module.tabs) == 0: + module.tabs = [{"type": "courseware"}, + {"type": "course_info", "name": "Course Info"}, + {"type": "discussion", "name": "Discussion"}, + {"type": "wiki", "name": "Wiki"}] # note, add 'progress' when we can support it on Edge + + # a bit of a hack, but typically the "course image" which is shown on marketing pages is hard coded to /images/course_image.jpg + # so let's make sure we import in case there are no other references to it in the modules + verify_content_links(module, course_data_path, static_content_store, '/static/images/course_image.jpg') + import_module_from_xml(modulestore, static_content_store, course_data_path, module, target_location_namespace, verbose=verbose) + + def import_from_xml(store, data_dir, course_dirs=None, default_class='xmodule.raw_module.RawDescriptor', load_error_modules=True, static_content_store=None, target_location_namespace=None, verbose=False): @@ -125,100 +203,127 @@ def import_from_xml(store, data_dir, course_dirs=None, course_items = [] for course_id in module_store.modules.keys(): - course_data_path = None - course_location = None + if target_location_namespace is not None: + pseudo_course_id = '/'.join([target_location_namespace.org, target_location_namespace.course]) + else: + course_id_components = course_id.split('/') + pseudo_course_id = '/'.join([course_id_components[0], course_id_components[1]]) - if verbose: - log.debug("Scanning {0} for course module...".format(course_id)) + try: + # turn off all write signalling while importing as this is a high volume operation + if pseudo_course_id not in store.ignore_write_events_on_courses: + store.ignore_write_events_on_courses.append(pseudo_course_id) - # Quick scan to get course module as we need some info from there. Also we need to make sure that the - # course module is committed first into the store - for module in module_store.modules[course_id].itervalues(): - if module.category == 'course': - course_data_path = path(data_dir) / module.metadata['data_dir'] - course_location = module.location - - module = remap_namespace(module, target_location_namespace) - - # cdodge: more hacks (what else). Seems like we have a problem when importing a course (like 6.002) which - # does not have any tabs defined in the policy file. The import goes fine and then displays fine in LMS, - # but if someone tries to add a new tab in the CMS, then the LMS barfs because it expects that - - # if there is *any* tabs - then there at least needs to be some predefined ones - if module.tabs is None or len(module.tabs) == 0: - module.tabs = [{"type": "courseware"}, - {"type": "course_info", "name": "Course Info"}, - {"type": "discussion", "name": "Discussion"}, - {"type": "wiki", "name": "Wiki"}] # note, add 'progress' when we can support it on Edge - - - store.update_item(module.location, module.definition['data']) - if 'children' in module.definition: - store.update_children(module.location, module.definition['children']) - store.update_metadata(module.location, dict(module.own_metadata)) - - # a bit of a hack, but typically the "course image" which is shown on marketing pages is hard coded to /images/course_image.jpg - # so let's make sure we import in case there are no other references to it in the modules - verify_content_links(module, course_data_path, static_content_store, '/static/images/course_image.jpg') - - course_items.append(module) - - - # then import all the static content - if static_content_store is not None: - _namespace_rename = target_location_namespace if target_location_namespace is not None else course_location - - # first pass to find everything in /static/ - import_static_content(module_store.modules[course_id], course_location, course_data_path, static_content_store, - _namespace_rename, subpath='static', verbose=verbose) - - # finally loop through all the modules - for module in module_store.modules[course_id].itervalues(): - - if module.category == 'course': - # we've already saved the course module up at the top of the loop - # so just skip over it in the inner loop - continue - - # remap module to the new namespace - if target_location_namespace is not None: - module = remap_namespace(module, target_location_namespace) + course_data_path = None + course_location = None if verbose: - log.debug('importing module location {0}'.format(module.location)) + log.debug("Scanning {0} for course module...".format(course_id)) - if 'data' in module.definition: - module_data = module.definition['data'] + # Quick scan to get course module as we need some info from there. Also we need to make sure that the + # course module is committed first into the store + for module in module_store.modules[course_id].itervalues(): + if module.category == 'course': + course_data_path = path(data_dir) / module.data_dir + course_location = module.location - # cdodge: now go through any link references to '/static/' and make sure we've imported - # it as a StaticContent asset - try: - remap_dict = {} + module = remap_namespace(module, target_location_namespace) - # use the rewrite_links as a utility means to enumerate through all links - # in the module data. We use that to load that reference into our asset store - # IMPORTANT: There appears to be a bug in lxml.rewrite_link which makes us not be able to - # do the rewrites natively in that code. - # For example, what I'm seeing is -> - # Note the dropped element closing tag. This causes the LMS to fail when rendering modules - that's - # no good, so we have to do this kludge - if isinstance(module_data, str) or isinstance(module_data, unicode): # some module 'data' fields are non strings which blows up the link traversal code - lxml_rewrite_links(module_data, lambda link: verify_content_links(module, course_data_path, - static_content_store, link, remap_dict)) + # cdodge: more hacks (what else). Seems like we have a problem when importing a course (like 6.002) which + # does not have any tabs defined in the policy file. The import goes fine and then displays fine in LMS, + # but if someone tries to add a new tab in the CMS, then the LMS barfs because it expects that - + # if there is *any* tabs - then there at least needs to be some predefined ones + if module.tabs is None or len(module.tabs) == 0: + module.tabs = [{"type": "courseware"}, + {"type": "course_info", "name": "Course Info"}, + {"type": "discussion", "name": "Discussion"}, + {"type": "wiki", "name": "Wiki"}] # note, add 'progress' when we can support it on Edge - for key in remap_dict.keys(): - module_data = module_data.replace(key, remap_dict[key]) - except Exception, e: - logging.exception("failed to rewrite links on {0}. Continuing...".format(module.location)) + if hasattr(module, 'data'): + store.update_item(module.location, module.data) + store.update_children(module.location, module.children) + store.update_metadata(module.location, dict(own_metadata(module))) - store.update_item(module.location, module_data) + # a bit of a hack, but typically the "course image" which is shown on marketing pages is hard coded to /images/course_image.jpg + # so let's make sure we import in case there are no other references to it in the modules + verify_content_links(module, course_data_path, static_content_store, '/static/images/course_image.jpg') - if 'children' in module.definition: - store.update_children(module.location, module.definition['children']) + course_items.append(module) - # NOTE: It's important to use own_metadata here to avoid writing - # inherited metadata everywhere. - store.update_metadata(module.location, dict(module.own_metadata)) + + # then import all the static content + if static_content_store is not None: + _namespace_rename = target_location_namespace if target_location_namespace is not None else course_location + + # first pass to find everything in /static/ + import_static_content(module_store.modules[course_id], course_location, course_data_path, static_content_store, + _namespace_rename, subpath='static', verbose=verbose) + + # finally loop through all the modules + for module in module_store.modules[course_id].itervalues(): + + if module.category == 'course': + # we've already saved the course module up at the top of the loop + # so just skip over it in the inner loop + continue + + # remap module to the new namespace + if target_location_namespace is not None: + module = remap_namespace(module, target_location_namespace) + + if verbose: + log.debug('importing module location {0}'.format(module.location)) + + content = {} + for field in module.fields: + if field.scope != Scope.content: + continue + try: + content[field.name] = module._model_data[field.name] + except KeyError: + # Ignore any missing keys in _model_data + pass + + if 'data' in content: + module_data = content['data'] + + # cdodge: now go through any link references to '/static/' and make sure we've imported + # it as a StaticContent asset + try: + remap_dict = {} + + # use the rewrite_links as a utility means to enumerate through all links + # in the module data. We use that to load that reference into our asset store + # IMPORTANT: There appears to be a bug in lxml.rewrite_link which makes us not be able to + # do the rewrites natively in that code. + # For example, what I'm seeing is -> + # Note the dropped element closing tag. This causes the LMS to fail when rendering modules - that's + # no good, so we have to do this kludge + if isinstance(module_data, str) or isinstance(module_data, unicode): # some module 'data' fields are non strings which blows up the link traversal code + lxml_rewrite_links(module_data, lambda link: verify_content_links(module, course_data_path, + static_content_store, link, remap_dict)) + + for key in remap_dict.keys(): + module_data = module_data.replace(key, remap_dict[key]) + + except Exception, e: + logging.exception("failed to rewrite links on {0}. Continuing...".format(module.location)) + + store.update_item(module.location, content) + + if hasattr(module, 'children') and module.children != []: + store.update_children(module.location, module.children) + + # NOTE: It's important to use own_metadata here to avoid writing + # inherited metadata everywhere. + store.update_metadata(module.location, dict(own_metadata(module))) + finally: + # turn back on all write signalling + if pseudo_course_id in store.ignore_write_events_on_courses: + store.ignore_write_events_on_courses.remove(pseudo_course_id) + store.refresh_cached_metadata_inheritance_tree(target_location_namespace if + target_location_namespace is not None else course_location) # now import any 'draft' items import_course_draft(store, course_data_path, target_location_namespace) @@ -229,6 +334,7 @@ def import_from_xml(store, data_dir, course_dirs=None, def import_course_draft(store, course_data_path, target_location_namespace): pass + def remap_namespace(module, target_location_namespace): if target_location_namespace is None: return module @@ -243,20 +349,40 @@ def remap_namespace(module, target_location_namespace): course=target_location_namespace.course, name=target_location_namespace.name) # then remap children pointers since they too will be re-namespaced - children_locs = module.definition.get('children') - if children_locs is not None: - new_locs = [] - for child in children_locs: - child_loc = Location(child) - new_child_loc = child_loc._replace(tag=target_location_namespace.tag, org=target_location_namespace.org, - course=target_location_namespace.course) + if hasattr(module,'children'): + children_locs = module.children + if children_locs is not None and children_locs != []: + new_locs = [] + for child in children_locs: + child_loc = Location(child) + new_child_loc = child_loc._replace(tag=target_location_namespace.tag, org=target_location_namespace.org, + course=target_location_namespace.course) - new_locs.append(new_child_loc.url()) + new_locs.append(new_child_loc.url()) - module.definition['children'] = new_locs + module.children = new_locs return module +def validate_no_non_editable_metadata(module_store, course_id, category, allowed=[]): + ''' + Assert that there is no metadata within a particular category that we can't support editing + However we always allow display_name and 'xml_attribtues' + ''' + allowed = allowed + ['xml_attributes', 'display_name'] + + err_cnt = 0 + for module_loc in module_store.modules[course_id]: + module = module_store.modules[course_id][module_loc] + if module.location.category == category: + my_metadata = dict(own_metadata(module)) + for key in my_metadata.keys(): + if key not in allowed: + err_cnt = err_cnt + 1 + print ': found metadata on {0}. Studio will not support editing this piece of metadata, so it is not allowed. Metadata: {1} = {2}'. format(module.location.url(), key, my_metadata[key]) + + return err_cnt + def validate_category_hierarchy(module_store, course_id, parent_category, expected_child_category): err_cnt = 0 @@ -268,7 +394,7 @@ def validate_category_hierarchy(module_store, course_id, parent_category, expect parents.append(module) for parent in parents: - for child_loc in [Location(child) for child in parent.definition.get('children', [])]: + for child_loc in [Location(child) for child in parent.children]: if child_loc.category != expected_child_category: err_cnt += 1 print 'ERROR: child {0} of parent {1} was expected to be category of {2} but was {3}'.format( @@ -280,7 +406,7 @@ def validate_category_hierarchy(module_store, course_id, parent_category, expect def validate_data_source_path_existence(path, is_err=True, extra_msg=None): _cnt = 0 if not os.path.exists(path): - print ("{0}: Expected folder at {1}. {2}".format('ERROR' if is_err == True else 'WARNING', path, extra_msg if + print ("{0}: Expected folder at {1}. {2}".format('ERROR' if is_err == True else 'WARNING', path, extra_msg if extra_msg is not None else '')) _cnt = 1 return _cnt @@ -342,6 +468,13 @@ def perform_xlint(data_dir, course_dirs, err_cnt += validate_category_hierarchy(module_store, course_id, "chapter", "sequential") # constrain that sequentials only have 'verticals' err_cnt += validate_category_hierarchy(module_store, course_id, "sequential", "vertical") + # don't allow metadata on verticals, since we can't edit them in studio + err_cnt += validate_no_non_editable_metadata(module_store, course_id, "vertical") + # don't allow metadata on chapters, since we can't edit them in studio + err_cnt += validate_no_non_editable_metadata(module_store, course_id, "chapter",['start']) + # don't allow metadata on sequences that we can't edit + err_cnt += validate_no_non_editable_metadata(module_store, course_id, "sequential", + ['due','format','start','graded']) # check for a presence of a course marketing video location_elements = course_id.split('/') @@ -358,3 +491,5 @@ def perform_xlint(data_dir, course_dirs, print "This course can be imported, but some errors may occur during the run of the course. It is recommend that you fix your courseware before importing" else: print "This course can be imported successfully." + + return err_cnt diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/__init__.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/__init__.py new file mode 100644 index 0000000000..9aa77fde52 --- /dev/null +++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/__init__.py @@ -0,0 +1 @@ +__author__ = 'vik' diff --git a/common/lib/xmodule/xmodule/combined_open_ended_modulev1.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py similarity index 64% rename from common/lib/xmodule/xmodule/combined_open_ended_modulev1.py rename to common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py index 8bd7df86c1..eaa43c0d86 100644 --- a/common/lib/xmodule/xmodule/combined_open_ended_modulev1.py +++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py @@ -1,46 +1,26 @@ -import copy -from fs.errors import ResourceNotFoundError -import itertools import json import logging from lxml import etree from lxml.html import rewrite_links -from path import path -import os -import sys - -from pkg_resources import resource_string - -from .capa_module import only_one, ComplexEncoder -from .editing_module import EditingDescriptor -from .html_checker import check_html -from progress import Progress -from .stringify import stringify_children -from .x_module import XModule -from .xml_module import XmlDescriptor -from xmodule.modulestore import Location +from xmodule.timeinfo import TimeInfo +from xmodule.capa_module import ComplexEncoder +from xmodule.editing_module import EditingDescriptor +from xmodule.progress import Progress +from xmodule.stringify import stringify_children +from xmodule.xml_module import XmlDescriptor import self_assessment_module import open_ended_module -from combined_open_ended_rubric import CombinedOpenEndedRubric, RubricParsingError -from .stringify import stringify_children -import dateutil -import dateutil.parser -import datetime -from timeparse import parse_timedelta +from .combined_open_ended_rubric import CombinedOpenEndedRubric, GRADER_TYPE_IMAGE_DICT, HUMAN_GRADER_TYPE, LEGEND_LIST log = logging.getLogger("mitx.courseware") # Set the default number of max attempts. Should be 1 for production # Set higher for debugging/testing # attempts specified in xml definition overrides this. -MAX_ATTEMPTS = 10000 - -# Set maximum available number of points. -# Overriden by max_score specified in xml. -MAX_SCORE = 1 +MAX_ATTEMPTS = 1 #The highest score allowed for the overall xmodule and for each rubric point -MAX_SCORE_ALLOWED = 3 +MAX_SCORE_ALLOWED = 50 #If true, default behavior is to score module as a practice problem. Otherwise, no grade at all is shown in progress #Metadata overrides this. @@ -54,9 +34,14 @@ ACCEPT_FILE_UPLOAD = False TRUE_DICT = ["True", True, "TRUE", "true"] HUMAN_TASK_TYPE = { - 'selfassessment' : "Self Assessment", - 'openended' : "External Grader", - } + 'selfassessment': "Self Assessment", + 'openended': "edX Assessment", +} + +#Default value that controls whether or not to skip basic spelling checks in the controller +#Metadata overrides this +SKIP_BASIC_CHECKS = False + class CombinedOpenEndedV1Module(): """ @@ -72,7 +57,7 @@ class CombinedOpenEndedV1Module(): 'save_assessment' -- Saves the student assessment (or external grader assessment) 'save_post_assessment' -- saves a post assessment (hint, feedback on feedback, etc) ajax actions implemented by combined open ended module are: - 'reset' -- resets the whole combined open ended module and returns to the first child module + 'reset' -- resets the whole combined open ended module and returns to the first child moduleresource_string 'next_problem' -- moves to the next child module 'get_results' -- gets results from a given child module @@ -89,22 +74,17 @@ class CombinedOpenEndedV1Module(): INTERMEDIATE_DONE = 'intermediate_done' DONE = 'done' - js = {'coffee': [resource_string(__name__, 'js/src/combinedopenended/display.coffee'), - resource_string(__name__, 'js/src/collapsible.coffee'), - resource_string(__name__, 'js/src/javascript_loader.coffee'), - ]} - js_module_name = "CombinedOpenEnded" - - css = {'scss': [resource_string(__name__, 'css/combinedopenended/display.scss')]} + #Where the templates live for this problem + TEMPLATE_DIR = "combinedopenended" def __init__(self, system, location, definition, descriptor, - instance_state=None, shared_state=None, metadata = None, static_data = None, **kwargs): + instance_state=None, shared_state=None, metadata=None, static_data=None, **kwargs): """ Definition file should have one or many task blocks, a rubric block, and a prompt block: Sample file: - + Blah blah rubric. @@ -135,16 +115,9 @@ class CombinedOpenEndedV1Module(): """ - self.metadata = metadata - self.display_name = metadata.get('display_name', "Open Ended") - self.rewrite_content_links = static_data.get('rewrite_content_links',"") - - - # Load instance state - if instance_state is not None: - instance_state = json.loads(instance_state) - else: - instance_state = {} + self.instance_state = instance_state + self.display_name = instance_state.get('display_name', "Open Ended") + self.rewrite_content_links = static_data.get('rewrite_content_links', "") #We need to set the location here so the child modules can use it system.set('location', location) @@ -157,54 +130,41 @@ class CombinedOpenEndedV1Module(): #Overall state of the combined open ended module self.state = instance_state.get('state', self.INITIAL) - self.attempts = instance_state.get('attempts', 0) + self.student_attempts = instance_state.get('student_attempts', 0) #Allow reset is true if student has failed the criteria to move to the next child task - self.allow_reset = instance_state.get('ready_to_reset', False) - self.max_attempts = int(self.metadata.get('attempts', MAX_ATTEMPTS)) - self.is_scored = self.metadata.get('is_graded', IS_SCORED) in TRUE_DICT - self.accept_file_upload = self.metadata.get('accept_file_upload', ACCEPT_FILE_UPLOAD) in TRUE_DICT + self.ready_to_reset = instance_state.get('ready_to_reset', False) + self.attempts = self.instance_state.get('attempts', MAX_ATTEMPTS) + self.is_scored = self.instance_state.get('is_graded', IS_SCORED) in TRUE_DICT + self.accept_file_upload = self.instance_state.get('accept_file_upload', ACCEPT_FILE_UPLOAD) in TRUE_DICT + self.skip_basic_checks = self.instance_state.get('skip_spelling_checks', SKIP_BASIC_CHECKS) in TRUE_DICT - display_due_date_string = self.metadata.get('due', None) - if display_due_date_string is not None: - try: - self.display_due_date = dateutil.parser.parse(display_due_date_string) - except ValueError: - log.error("Could not parse due date {0} for location {1}".format(display_due_date_string, location)) - raise - else: - self.display_due_date = None + display_due_date_string = self.instance_state.get('due', None) - grace_period_string = self.metadata.get('graceperiod', None) - if grace_period_string is not None and self.display_due_date: - try: - self.grace_period = parse_timedelta(grace_period_string) - self.close_date = self.display_due_date + self.grace_period - except: - log.error("Error parsing the grace period {0} for location {1}".format(grace_period_string, location)) - raise - else: - self.grace_period = None - self.close_date = self.display_due_date + grace_period_string = self.instance_state.get('graceperiod', None) + try: + self.timeinfo = TimeInfo(display_due_date_string, grace_period_string) + except: + log.error("Error parsing due date information in location {0}".format(location)) + raise + self.display_due_date = self.timeinfo.display_due_date - # Used for progress / grading. Currently get credit just for - # completion (doesn't matter if you self-assessed correct/incorrect). - self._max_score = int(self.metadata.get('max_score', MAX_SCORE)) - - rubric_renderer = CombinedOpenEndedRubric(system, True) + self.rubric_renderer = CombinedOpenEndedRubric(system, True) rubric_string = stringify_children(definition['rubric']) - rubric_renderer.check_if_rubric_is_parseable(rubric_string, location, MAX_SCORE_ALLOWED, self._max_score) + self._max_score = self.rubric_renderer.check_if_rubric_is_parseable(rubric_string, location, MAX_SCORE_ALLOWED) #Static data is passed to the child modules to render self.static_data = { 'max_score': self._max_score, - 'max_attempts': self.max_attempts, + 'max_attempts': self.attempts, 'prompt': definition['prompt'], 'rubric': definition['rubric'], 'display_name': self.display_name, 'accept_file_upload': self.accept_file_upload, - 'close_date' : self.close_date, - } + 'close_date': self.timeinfo.close_date, + 's3_interface': self.system.s3_interface, + 'skip_basic_checks': self.skip_basic_checks, + } self.task_xml = definition['task_xml'] self.location = location @@ -230,10 +190,10 @@ class CombinedOpenEndedV1Module(): last_response = last_response_data['response'] loaded_task_state = json.loads(current_task_state) - if loaded_task_state['state'] == self.INITIAL: - loaded_task_state['state'] = self.ASSESSING - loaded_task_state['created'] = True - loaded_task_state['history'].append({'answer': last_response}) + if loaded_task_state['child_state'] == self.INITIAL: + loaded_task_state['child_state'] = self.ASSESSING + loaded_task_state['child_created'] = True + loaded_task_state['child_history'].append({'answer': last_response}) current_task_state = json.dumps(loaded_task_state) return current_task_state @@ -247,15 +207,15 @@ class CombinedOpenEndedV1Module(): child_modules = { 'openended': open_ended_module.OpenEndedModule, 'selfassessment': self_assessment_module.SelfAssessmentModule, - } + } child_descriptors = { 'openended': open_ended_module.OpenEndedDescriptor, 'selfassessment': self_assessment_module.SelfAssessmentDescriptor, - } + } children = { 'modules': child_modules, 'descriptors': child_descriptors, - } + } return children def setup_next_task(self, reset=False): @@ -272,8 +232,8 @@ class CombinedOpenEndedV1Module(): self.current_task_xml = self.task_xml[self.current_task_number] if self.current_task_number > 0: - self.allow_reset = self.check_allow_reset() - if self.allow_reset: + self.ready_to_reset = self.check_allow_reset() + if self.ready_to_reset: self.current_task_number = self.current_task_number - 1 current_task_type = self.get_tag_name(self.current_task_xml) @@ -291,31 +251,34 @@ class CombinedOpenEndedV1Module(): self.current_task_parsed_xml = self.current_task_descriptor.definition_from_xml(etree_xml, self.system) if current_task_state is None and self.current_task_number == 0: self.current_task = child_task_module(self.system, self.location, - self.current_task_parsed_xml, self.current_task_descriptor, self.static_data) + self.current_task_parsed_xml, self.current_task_descriptor, + self.static_data) self.task_states.append(self.current_task.get_instance_state()) self.state = self.ASSESSING elif current_task_state is None and self.current_task_number > 0: last_response_data = self.get_last_response(self.current_task_number - 1) last_response = last_response_data['response'] current_task_state = json.dumps({ - 'state': self.ASSESSING, + 'child_state': self.ASSESSING, 'version': self.STATE_VERSION, 'max_score': self._max_score, - 'attempts': 0, - 'created': True, - 'history': [{'answer': last_response}], - }) + 'child_attempts': 0, + 'child_created': True, + 'child_history': [{'answer': last_response}], + }) self.current_task = child_task_module(self.system, self.location, - self.current_task_parsed_xml, self.current_task_descriptor, self.static_data, - instance_state=current_task_state) + self.current_task_parsed_xml, self.current_task_descriptor, + self.static_data, + instance_state=current_task_state) self.task_states.append(self.current_task.get_instance_state()) self.state = self.ASSESSING else: if self.current_task_number > 0 and not reset: current_task_state = self.overwrite_state(current_task_state) self.current_task = child_task_module(self.system, self.location, - self.current_task_parsed_xml, self.current_task_descriptor, self.static_data, - instance_state=current_task_state) + self.current_task_parsed_xml, self.current_task_descriptor, + self.static_data, + instance_state=current_task_state) return True @@ -326,17 +289,17 @@ class CombinedOpenEndedV1Module(): Input: None Output: the allow_reset attribute of the current module. """ - if not self.allow_reset: + if not self.ready_to_reset: if self.current_task_number > 0: last_response_data = self.get_last_response(self.current_task_number - 1) current_response_data = self.get_current_attributes(self.current_task_number) - if(current_response_data['min_score_to_attempt'] > last_response_data['score'] - or current_response_data['max_score_to_attempt'] < last_response_data['score']): + if (current_response_data['min_score_to_attempt'] > last_response_data['score'] + or current_response_data['max_score_to_attempt'] < last_response_data['score']): self.state = self.DONE - self.allow_reset = True + self.ready_to_reset = True - return self.allow_reset + return self.ready_to_reset def get_context(self): """ @@ -350,14 +313,16 @@ class CombinedOpenEndedV1Module(): context = { 'items': [{'content': task_html}], 'ajax_url': self.system.ajax_url, - 'allow_reset': self.allow_reset, + 'allow_reset': self.ready_to_reset, 'state': self.state, 'task_count': len(self.task_xml), 'task_number': self.current_task_number + 1, - 'status': self.get_status(), + 'status': self.get_status(False), 'display_name': self.display_name, 'accept_file_upload': self.accept_file_upload, - } + 'location': self.location, + 'legend_list': LEGEND_LIST, + } return context @@ -368,7 +333,7 @@ class CombinedOpenEndedV1Module(): Output: rendered html """ context = self.get_context() - html = self.system.render_template('combined_open_ended.html', context) + html = self.system.render_template('{0}/combined_open_ended.html'.format(self.TEMPLATE_DIR), context) return html def get_html_nonsystem(self): @@ -379,7 +344,7 @@ class CombinedOpenEndedV1Module(): Output: HTML rendered directly via Mako """ context = self.get_context() - html = self.system.render_template('combined_open_ended.html', context) + html = self.system.render_template('{0}/combined_open_ended.html'.format(self.TEMPLATE_DIR), context) return html def get_html_base(self): @@ -390,7 +355,15 @@ class CombinedOpenEndedV1Module(): """ self.update_task_states() html = self.current_task.get_html(self.system) - return_html = rewrite_links(html, self.rewrite_content_links) + return_html = html + try: + #Without try except block, get this error: + # File "/home/vik/mitx_all/mitx/common/lib/xmodule/xmodule/x_module.py", line 263, in rewrite_content_links + # if link.startswith(XASSET_SRCREF_PREFIX): + # Placing try except so that if the error is fixed, this code will start working again. + return_html = rewrite_links(html, self.rewrite_content_links) + except: + pass return return_html def get_current_attributes(self, task_number): @@ -426,11 +399,14 @@ class CombinedOpenEndedV1Module(): task_parsed_xml = task_descriptor.definition_from_xml(etree_xml, self.system) task = children['modules'][task_type](self.system, self.location, task_parsed_xml, task_descriptor, - self.static_data, instance_state=task_state) + self.static_data, instance_state=task_state) last_response = task.latest_answer() last_score = task.latest_score() last_post_assessment = task.latest_post_assessment(self.system) last_post_feedback = "" + feedback_dicts = [{}] + grader_ids = [0] + submission_ids = [0] if task_type == "openended": last_post_assessment = task.latest_post_assessment(self.system, short_feedback=False, join_feedback=False) if isinstance(last_post_assessment, list): @@ -441,9 +417,21 @@ class CombinedOpenEndedV1Module(): else: last_post_evaluation = task.format_feedback_with_evaluation(self.system, last_post_assessment) last_post_assessment = last_post_evaluation + rubric_data = task._parse_score_msg(task.child_history[-1].get('post_assessment', ""), self.system) + rubric_scores = rubric_data['rubric_scores'] + grader_types = rubric_data['grader_types'] + feedback_items = rubric_data['feedback_items'] + feedback_dicts = rubric_data['feedback_dicts'] + grader_ids = rubric_data['grader_ids'] + submission_ids = rubric_data['submission_ids'] + elif task_type == "selfassessment": + rubric_scores = last_post_assessment + grader_types = ['SA'] + feedback_items = [''] + last_post_assessment = "" last_correctness = task.is_last_response_correct() max_score = task.max_score() - state = task.state + state = task.child_state if task_type in HUMAN_TASK_TYPE: human_task_name = HUMAN_TASK_TYPE[task_type] else: @@ -453,6 +441,16 @@ class CombinedOpenEndedV1Module(): human_state = task.HUMAN_NAMES[state] else: human_state = state + if len(grader_types) > 0: + grader_type = grader_types[0] + else: + grader_type = "IN" + + if grader_type in HUMAN_GRADER_TYPE: + human_grader_name = HUMAN_GRADER_TYPE[grader_type] + else: + human_grader_name = grader_type + last_response_dict = { 'response': last_response, 'score': last_score, @@ -465,8 +463,15 @@ class CombinedOpenEndedV1Module(): 'correct': last_correctness, 'min_score_to_attempt': min_score_to_attempt, 'max_score_to_attempt': max_score_to_attempt, - } - + 'rubric_scores': rubric_scores, + 'grader_types': grader_types, + 'feedback_items': feedback_items, + 'grader_type': grader_type, + 'human_grader_type': human_grader_name, + 'feedback_dicts': feedback_dicts, + 'grader_ids': grader_ids, + 'submission_ids': submission_ids, + } return last_response_dict def update_task_states(self): @@ -476,10 +481,10 @@ class CombinedOpenEndedV1Module(): Output: boolean indicating whether or not the task state changed. """ changed = False - if not self.allow_reset: + if not self.ready_to_reset: self.task_states[self.current_task_number] = self.current_task.get_instance_state() current_task_state = json.loads(self.task_states[self.current_task_number]) - if current_task_state['state'] == self.DONE: + if current_task_state['child_state'] == self.DONE: self.current_task_number += 1 if self.current_task_number >= (len(self.task_xml)): self.state = self.DONE @@ -502,17 +507,101 @@ class CombinedOpenEndedV1Module(): pass return return_html + def get_rubric(self, get): + """ + Gets the results of a given grader via ajax. + Input: AJAX get dictionary + Output: Dictionary to be rendered via ajax that contains the result html. + """ + all_responses = [] + loop_up_to_task = self.current_task_number + 1 + for i in xrange(0, loop_up_to_task): + all_responses.append(self.get_last_response(i)) + rubric_scores = [all_responses[i]['rubric_scores'] for i in xrange(0, len(all_responses)) if + len(all_responses[i]['rubric_scores']) > 0 and all_responses[i]['grader_types'][ + 0] in HUMAN_GRADER_TYPE.keys()] + grader_types = [all_responses[i]['grader_types'] for i in xrange(0, len(all_responses)) if + len(all_responses[i]['grader_types']) > 0 and all_responses[i]['grader_types'][ + 0] in HUMAN_GRADER_TYPE.keys()] + feedback_items = [all_responses[i]['feedback_items'] for i in xrange(0, len(all_responses)) if + len(all_responses[i]['feedback_items']) > 0 and all_responses[i]['grader_types'][ + 0] in HUMAN_GRADER_TYPE.keys()] + rubric_html = self.rubric_renderer.render_combined_rubric(stringify_children(self.static_data['rubric']), + rubric_scores, + grader_types, feedback_items) + + response_dict = all_responses[-1] + context = { + 'results': rubric_html, + 'task_name': 'Scored Rubric', + 'class_name': 'combined-rubric-container' + } + html = self.system.render_template('{0}/combined_open_ended_results.html'.format(self.TEMPLATE_DIR), context) + return {'html': html, 'success': True} + + def get_legend(self, get): + """ + Gets the results of a given grader via ajax. + Input: AJAX get dictionary + Output: Dictionary to be rendered via ajax that contains the result html. + """ + context = { + 'legend_list': LEGEND_LIST, + } + html = self.system.render_template('{0}/combined_open_ended_legend.html'.format(self.TEMPLATE_DIR), context) + return {'html': html, 'success': True} + def get_results(self, get): """ Gets the results of a given grader via ajax. Input: AJAX get dictionary Output: Dictionary to be rendered via ajax that contains the result html. """ - task_number = int(get['task_number']) self.update_task_states() - response_dict = self.get_last_response(task_number) - context = {'results': response_dict['post_assessment'], 'task_number': task_number + 1} - html = self.system.render_template('combined_open_ended_results.html', context) + loop_up_to_task = self.current_task_number + 1 + all_responses = [] + for i in xrange(0, loop_up_to_task): + all_responses.append(self.get_last_response(i)) + context_list = [] + for ri in all_responses: + for i in xrange(0, len(ri['rubric_scores'])): + feedback = ri['feedback_dicts'][i].get('feedback', '') + rubric_data = self.rubric_renderer.render_rubric(stringify_children(self.static_data['rubric']), + ri['rubric_scores'][i]) + if rubric_data['success']: + rubric_html = rubric_data['html'] + else: + rubric_html = '' + context = { + 'rubric_html': rubric_html, + 'grader_type': ri['grader_type'], + 'feedback': feedback, + 'grader_id': ri['grader_ids'][i], + 'submission_id': ri['submission_ids'][i], + } + context_list.append(context) + feedback_table = self.system.render_template('{0}/open_ended_result_table.html'.format(self.TEMPLATE_DIR), { + 'context_list': context_list, + 'grader_type_image_dict': GRADER_TYPE_IMAGE_DICT, + 'human_grader_types': HUMAN_GRADER_TYPE, + 'rows': 50, + 'cols': 50, + }) + context = { + 'results': feedback_table, + 'task_name': "Feedback", + 'class_name': "result-container", + } + html = self.system.render_template('{0}/combined_open_ended_results.html'.format(self.TEMPLATE_DIR), context) + return {'html': html, 'success': True} + + def get_status_ajax(self, get): + """ + Gets the results of a given grader via ajax. + Input: AJAX get dictionary + Output: Dictionary to be rendered via ajax that contains the result html. + """ + html = self.get_status(True) return {'html': html, 'success': True} def handle_ajax(self, dispatch, get): @@ -529,7 +618,10 @@ class CombinedOpenEndedV1Module(): handlers = { 'next_problem': self.next_problem, 'reset': self.reset, - 'get_results': self.get_results + 'get_results': self.get_results, + 'get_combined_rubric': self.get_rubric, + 'get_status': self.get_status_ajax, + 'get_legend': self.get_legend, } if dispatch not in handlers: @@ -546,7 +638,7 @@ class CombinedOpenEndedV1Module(): Output: Dictionary to be rendered """ self.update_task_states() - return {'success': True, 'html': self.get_html_nonsystem(), 'allow_reset': self.allow_reset} + return {'success': True, 'html': self.get_html_nonsystem(), 'allow_reset': self.ready_to_reset} def reset(self, get): """ @@ -555,23 +647,26 @@ class CombinedOpenEndedV1Module(): Output: AJAX dictionary to tbe rendered """ if self.state != self.DONE: - if not self.allow_reset: + if not self.ready_to_reset: return self.out_of_sync_error(get) - if self.attempts > self.max_attempts: + if self.student_attempts > self.attempts: return { 'success': False, - 'error': 'Too many attempts.' + #This is a student_facing_error + 'error': ('You have attempted this question {0} times. ' + 'You are only allowed to attempt it {1} times.').format( + self.student_attempts, self.attempts) } self.state = self.INITIAL - self.allow_reset = False + self.ready_to_reset = False for i in xrange(0, len(self.task_xml)): self.current_task_number = i self.setup_next_task(reset=True) self.current_task.reset(self.system) self.task_states[self.current_task_number] = self.current_task.get_instance_state() self.current_task_number = 0 - self.allow_reset = False + self.ready_to_reset = False self.setup_next_task() return {'success': True, 'html': self.get_html_nonsystem()} @@ -587,13 +682,13 @@ class CombinedOpenEndedV1Module(): 'current_task_number': self.current_task_number, 'state': self.state, 'task_states': self.task_states, - 'attempts': self.attempts, - 'ready_to_reset': self.allow_reset, - } + 'student_attempts': self.student_attempts, + 'ready_to_reset': self.ready_to_reset, + } return json.dumps(state) - def get_status(self): + def get_status(self, render_via_ajax): """ Gets the status panel to be displayed at the top right. Input: None @@ -604,8 +699,15 @@ class CombinedOpenEndedV1Module(): task_data = self.get_last_response(i) task_data.update({'task_number': i + 1}) status.append(task_data) - context = {'status_list': status} - status_html = self.system.render_template("combined_open_ended_status.html", context) + + context = { + 'status_list': status, + 'grader_type_image_dict': GRADER_TYPE_IMAGE_DICT, + 'legend_list': LEGEND_LIST, + 'render_via_ajax': render_via_ajax, + } + status_html = self.system.render_template("{0}/combined_open_ended_status.html".format(self.TEMPLATE_DIR), + context) return status_html @@ -616,7 +718,7 @@ class CombinedOpenEndedV1Module(): entirely, in which case they will be in the self.DONE state), and if it is scored or not. @return: Boolean corresponding to the above. """ - return (self.state == self.DONE or self.allow_reset) and self.is_scored + return (self.state == self.DONE or self.ready_to_reset) and self.is_scored def get_score(self): """ @@ -638,7 +740,7 @@ class CombinedOpenEndedV1Module(): score_dict = { 'score': score, 'total': max_score, - } + } return score_dict @@ -667,7 +769,7 @@ class CombinedOpenEndedV1Module(): return progress_object -class CombinedOpenEndedV1Descriptor(XmlDescriptor, EditingDescriptor): +class CombinedOpenEndedV1Descriptor(): """ Module for adding combined open ended questions """ @@ -679,8 +781,8 @@ class CombinedOpenEndedV1Descriptor(XmlDescriptor, EditingDescriptor): has_score = True template_dir_name = "combinedopenended" - js = {'coffee': [resource_string(__name__, 'js/src/html/edit.coffee')]} - js_module_name = "HTMLEditingDescriptor" + def __init__(self, system): + self.system = system @classmethod def definition_from_xml(cls, xml_object, system): @@ -697,7 +799,10 @@ class CombinedOpenEndedV1Descriptor(XmlDescriptor, EditingDescriptor): expected_children = ['task', 'rubric', 'prompt'] for child in expected_children: if len(xml_object.xpath(child)) == 0: - raise ValueError("Combined Open Ended definition must include at least one '{0}' tag".format(child)) + #This is a staff_facing_error + raise ValueError( + "Combined Open Ended definition must include at least one '{0}' tag. Contact the learning sciences group for assistance.".format( + child)) def parse_task(k): """Assumes that xml_object has child k""" @@ -722,4 +827,4 @@ class CombinedOpenEndedV1Descriptor(XmlDescriptor, EditingDescriptor): for child in ['task']: add_child(child) - return elt \ No newline at end of file + return elt diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_rubric.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_rubric.py new file mode 100644 index 0000000000..6245d4d31c --- /dev/null +++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_rubric.py @@ -0,0 +1,323 @@ +import logging +from lxml import etree + +log = logging.getLogger(__name__) + +GRADER_TYPE_IMAGE_DICT = { + 'SA': '/static/images/self_assessment_icon.png', + 'PE': '/static/images/peer_grading_icon.png', + 'ML': '/static/images/ml_grading_icon.png', + 'IN': '/static/images/peer_grading_icon.png', + 'BC': '/static/images/ml_grading_icon.png', +} + +HUMAN_GRADER_TYPE = { + 'SA': 'Self-Assessment', + 'PE': 'Peer-Assessment', + 'IN': 'Instructor-Assessment', + 'ML': 'AI-Assessment', + 'BC': 'AI-Assessment', +} + +DO_NOT_DISPLAY = ['BC', 'IN'] + +LEGEND_LIST = [{'name': HUMAN_GRADER_TYPE[k], 'image': GRADER_TYPE_IMAGE_DICT[k]} for k in GRADER_TYPE_IMAGE_DICT.keys() + if k not in DO_NOT_DISPLAY] + + +class RubricParsingError(Exception): + def __init__(self, msg): + self.msg = msg + + +class CombinedOpenEndedRubric(object): + TEMPLATE_DIR = "combinedopenended/openended" + + def __init__(self, system, view_only=False): + self.has_score = False + self.view_only = view_only + self.system = system + + def render_rubric(self, rubric_xml, score_list=None): + ''' + render_rubric: takes in an xml string and outputs the corresponding + html for that xml, given the type of rubric we're generating + Input: + rubric_xml: an string that has not been parsed into xml that + represents this particular rubric + Output: + html: the html that corresponds to the xml given + ''' + success = False + try: + rubric_categories = self.extract_categories(rubric_xml) + if score_list and len(score_list) == len(rubric_categories): + for i in xrange(0, len(rubric_categories)): + category = rubric_categories[i] + for j in xrange(0, len(category['options'])): + if score_list[i] == j: + rubric_categories[i]['options'][j]['selected'] = True + rubric_scores = [cat['score'] for cat in rubric_categories] + max_scores = map((lambda cat: cat['options'][-1]['points']), rubric_categories) + max_score = max(max_scores) + rubric_template = '{0}/open_ended_rubric.html'.format(self.TEMPLATE_DIR) + if self.view_only: + rubric_template = '{0}/open_ended_view_only_rubric.html'.format(self.TEMPLATE_DIR) + html = self.system.render_template(rubric_template, + {'categories': rubric_categories, + 'has_score': self.has_score, + 'view_only': self.view_only, + 'max_score': max_score, + 'combined_rubric': False + }) + success = True + except: + #This is a staff_facing_error + error_message = "[render_rubric] Could not parse the rubric with xml: {0}. Contact the learning sciences group for assistance.".format( + rubric_xml) + log.exception(error_message) + raise RubricParsingError(error_message) + return {'success': success, 'html': html, 'rubric_scores': rubric_scores} + + def check_if_rubric_is_parseable(self, rubric_string, location, max_score_allowed): + rubric_dict = self.render_rubric(rubric_string) + success = rubric_dict['success'] + rubric_feedback = rubric_dict['html'] + if not success: + #This is a staff_facing_error + error_message = "Could not parse rubric : {0} for location {1}. Contact the learning sciences group for assistance.".format( + rubric_string, location.url()) + log.error(error_message) + raise RubricParsingError(error_message) + + rubric_categories = self.extract_categories(rubric_string) + total = 0 + for category in rubric_categories: + total = total + len(category['options']) - 1 + if len(category['options']) > (max_score_allowed + 1): + #This is a staff_facing_error + error_message = "Number of score points in rubric {0} higher than the max allowed, which is {1}. Contact the learning sciences group for assistance.".format( + len(category['options']), max_score_allowed) + log.error(error_message) + raise RubricParsingError(error_message) + + return int(total) + + def extract_categories(self, element): + ''' + Contstruct a list of categories such that the structure looks like: + [ { category: "Category 1 Name", + options: [{text: "Option 1 Name", points: 0}, {text:"Option 2 Name", points: 5}] + }, + { category: "Category 2 Name", + options: [{text: "Option 1 Name", points: 0}, + {text: "Option 2 Name", points: 1}, + {text: "Option 3 Name", points: 2]}] + + ''' + if isinstance(element, basestring): + element = etree.fromstring(element) + categories = [] + for category in element: + if category.tag != 'category': + #This is a staff_facing_error + raise RubricParsingError( + "[extract_categories] Expected a tag: got {0} instead. Contact the learning sciences group for assistance.".format( + category.tag)) + else: + categories.append(self.extract_category(category)) + return categories + + def extract_category(self, category): + ''' + construct an individual category + {category: "Category 1 Name", + options: [{text: "Option 1 text", points: 1}, + {text: "Option 2 text", points: 2}]} + + all sorting and auto-point generation occurs in this function + ''' + descriptionxml = category[0] + optionsxml = category[1:] + scorexml = category[1] + score = None + if scorexml.tag == 'score': + score_text = scorexml.text + optionsxml = category[2:] + score = int(score_text) + self.has_score = True + # if we are missing the score tag and we are expecting one + elif self.has_score: + #This is a staff_facing_error + raise RubricParsingError( + "[extract_category] Category {0} is missing a score. Contact the learning sciences group for assistance.".format( + descriptionxml.text)) + + + # parse description + if descriptionxml.tag != 'description': + #This is a staff_facing_error + raise RubricParsingError( + "[extract_category]: expected description tag, got {0} instead. Contact the learning sciences group for assistance.".format( + descriptionxml.tag)) + + description = descriptionxml.text + + cur_points = 0 + options = [] + autonumbering = True + # parse options + for option in optionsxml: + if option.tag != 'option': + #This is a staff_facing_error + raise RubricParsingError( + "[extract_category]: expected option tag, got {0} instead. Contact the learning sciences group for assistance.".format( + option.tag)) + else: + pointstr = option.get("points") + if pointstr: + autonumbering = False + # try to parse this into an int + try: + points = int(pointstr) + except ValueError: + #This is a staff_facing_error + raise RubricParsingError( + "[extract_category]: expected points to have int, got {0} instead. Contact the learning sciences group for assistance.".format( + pointstr)) + elif autonumbering: + # use the generated one if we're in the right mode + points = cur_points + cur_points = cur_points + 1 + else: + raise Exception( + "[extract_category]: missing points attribute. Cannot continue to auto-create points values after a points value is explicitly defined.") + + selected = score == points + optiontext = option.text + options.append({'text': option.text, 'points': points, 'selected': selected}) + + # sort and check for duplicates + options = sorted(options, key=lambda option: option['points']) + CombinedOpenEndedRubric.validate_options(options) + + return {'description': description, 'options': options, 'score': score} + + def render_combined_rubric(self, rubric_xml, scores, score_types, feedback_types): + success, score_tuples = CombinedOpenEndedRubric.reformat_scores_for_rendering(scores, score_types, + feedback_types) + rubric_categories = self.extract_categories(rubric_xml) + max_scores = map((lambda cat: cat['options'][-1]['points']), rubric_categories) + max_score = max(max_scores) + for i in xrange(0, len(rubric_categories)): + category = rubric_categories[i] + for j in xrange(0, len(category['options'])): + rubric_categories[i]['options'][j]['grader_types'] = [] + for tuple in score_tuples: + if tuple[1] == i and tuple[2] == j: + for grader_type in tuple[3]: + rubric_categories[i]['options'][j]['grader_types'].append(grader_type) + + html = self.system.render_template('{0}/open_ended_combined_rubric.html'.format(self.TEMPLATE_DIR), + {'categories': rubric_categories, + 'has_score': True, + 'view_only': True, + 'max_score': max_score, + 'combined_rubric': True, + 'grader_type_image_dict': GRADER_TYPE_IMAGE_DICT, + 'human_grader_types': HUMAN_GRADER_TYPE, + }) + return html + + @staticmethod + def validate_options(options): + ''' + Validates a set of options. This can and should be extended to filter out other bad edge cases + ''' + if len(options) == 0: + #This is a staff_facing_error + raise RubricParsingError( + "[extract_category]: no options associated with this category. Contact the learning sciences group for assistance.") + if len(options) == 1: + return + prev = options[0]['points'] + for option in options[1:]: + if prev == option['points']: + #This is a staff_facing_error + raise RubricParsingError( + "[extract_category]: found duplicate point values between two different options. Contact the learning sciences group for assistance.") + else: + prev = option['points'] + + @staticmethod + def reformat_scores_for_rendering(scores, score_types, feedback_types): + """ + Takes in a list of rubric scores, the types of those scores, and feedback associated with them + Outputs a reformatted list of score tuples (count, rubric category, rubric score, [graders that gave this score], [feedback types]) + @param scores: + @param score_types: + @param feedback_types: + @return: + """ + success = False + if len(scores) == 0: + #This is a dev_facing_error + log.error("Score length is 0 when trying to reformat rubric scores for rendering.") + return success, "" + + if len(scores) != len(score_types) or len(feedback_types) != len(scores): + #This is a dev_facing_error + log.error("Length mismatches when trying to reformat rubric scores for rendering. " + "Scores: {0}, Score Types: {1} Feedback Types: {2}".format(scores, score_types, feedback_types)) + return success, "" + + score_lists = [] + score_type_list = [] + feedback_type_list = [] + for i in xrange(0, len(scores)): + score_cont_list = scores[i] + for j in xrange(0, len(score_cont_list)): + score_list = score_cont_list[j] + score_lists.append(score_list) + score_type_list.append(score_types[i][j]) + feedback_type_list.append(feedback_types[i][j]) + + score_list_len = len(score_lists[0]) + for i in xrange(0, len(score_lists)): + score_list = score_lists[i] + if len(score_list) != score_list_len: + return success, "" + + score_tuples = [] + for i in xrange(0, len(score_lists)): + for j in xrange(0, len(score_lists[i])): + tuple = [1, j, score_lists[i][j], [], []] + score_tuples, tup_ind = CombinedOpenEndedRubric.check_for_tuple_matches(score_tuples, tuple) + score_tuples[tup_ind][0] += 1 + score_tuples[tup_ind][3].append(score_type_list[i]) + score_tuples[tup_ind][4].append(feedback_type_list[i]) + + success = True + return success, score_tuples + + @staticmethod + def check_for_tuple_matches(tuples, tuple): + """ + Checks to see if a tuple in a list of tuples is a match for tuple. + If not match, creates a new tuple matching tuple. + @param tuples: list of tuples + @param tuple: tuples to match + @return: a new list of tuples, and the index of the tuple that matches tuple + """ + category = tuple[1] + score = tuple[2] + tup_ind = -1 + for t in xrange(0, len(tuples)): + if tuples[t][1] == category and tuples[t][2] == score: + tup_ind = t + break + + if tup_ind == -1: + tuples.append([0, category, score, [], []]) + tup_ind = len(tuples) - 1 + return tuples, tup_ind diff --git a/lms/djangoapps/open_ended_grading/controller_query_service.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/controller_query_service.py similarity index 76% rename from lms/djangoapps/open_ended_grading/controller_query_service.py rename to common/lib/xmodule/xmodule/open_ended_grading_classes/controller_query_service.py index 83d5617bd2..08f2a95387 100644 --- a/lms/djangoapps/open_ended_grading/controller_query_service.py +++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/controller_query_service.py @@ -1,14 +1,5 @@ -import json import logging -import requests -from requests.exceptions import RequestException, ConnectionError, HTTPError -import sys -from xmodule.grading_service_module import GradingService, GradingServiceError - -from django.conf import settings -from django.http import HttpResponse, Http404 -from xmodule.x_module import ModuleSystem -from mitxmako.shortcuts import render_to_string +from .grading_service_module import GradingService log = logging.getLogger(__name__) @@ -17,9 +8,12 @@ class ControllerQueryService(GradingService): """ Interface to staff grading backend. """ - def __init__(self, config): - config['system'] = ModuleSystem(None, None, None, render_to_string, None) + + def __init__(self, config, system): + config['system'] = system super(ControllerQueryService, self).__init__(config) + self.url = config['url'] + config['grading_controller'] + self.login_url = self.url + '/login/' self.check_eta_url = self.url + '/get_submission_eta/' self.is_unique_url = self.url + '/is_name_unique/' self.combined_notifications_url = self.url + '/combined_notifications/' @@ -66,7 +60,7 @@ class ControllerQueryService(GradingService): def get_flagged_problem_list(self, course_id): params = { 'course_id': course_id, - } + } response = self.get(self.flagged_problem_list_url, params) return response @@ -77,7 +71,21 @@ class ControllerQueryService(GradingService): 'student_id': student_id, 'submission_id': submission_id, 'action_type': action_type - } + } response = self.post(self.take_action_on_flags_url, params) return response + + +def convert_seconds_to_human_readable(seconds): + if seconds < 60: + human_string = "{0} seconds".format(seconds) + elif seconds < 60 * 60: + human_string = "{0} minutes".format(round(seconds / 60, 1)) + elif seconds < (24 * 60 * 60): + human_string = "{0} hours".format(round(seconds / (60 * 60), 1)) + else: + human_string = "{0} days".format(round(seconds / (60 * 60 * 24), 1)) + + eta_string = "{0}".format(human_string) + return eta_string diff --git a/common/lib/xmodule/xmodule/grading_service_module.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/grading_service_module.py similarity index 80% rename from common/lib/xmodule/xmodule/grading_service_module.py rename to common/lib/xmodule/xmodule/open_ended_grading_classes/grading_service_module.py index 10c6f16adb..f3f6568b1e 100644 --- a/common/lib/xmodule/xmodule/grading_service_module.py +++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/grading_service_module.py @@ -5,7 +5,7 @@ import requests from requests.exceptions import RequestException, ConnectionError, HTTPError import sys -from xmodule.combined_open_ended_rubric import CombinedOpenEndedRubric, RubricParsingError +from .combined_open_ended_rubric import CombinedOpenEndedRubric from lxml import etree log = logging.getLogger(__name__) @@ -19,11 +19,10 @@ class GradingService(object): """ Interface to staff grading backend. """ + def __init__(self, config): self.username = config['username'] self.password = config['password'] - self.url = config['url'] - self.login_url = self.url + '/login/' self.session = requests.session() self.system = config['system'] @@ -36,8 +35,8 @@ class GradingService(object): Returns the decoded json dict of the response. """ response = self.session.post(self.login_url, - {'username': self.username, - 'password': self.password, }) + {'username': self.username, + 'password': self.password, }) response.raise_for_status() @@ -49,10 +48,12 @@ class GradingService(object): """ try: op = lambda: self.session.post(url, data=data, - allow_redirects=allow_redirects) + allow_redirects=allow_redirects) r = self._try_with_login(op) except (RequestException, ConnectionError, HTTPError) as err: # reraise as promised GradingServiceError, but preserve stacktrace. + #This is a dev_facing_error + log.error("Problem posting data to the grading controller. URL: {0}, data: {1}".format(url, data)) raise GradingServiceError, str(err), sys.exc_info()[2] return r.text @@ -63,12 +64,14 @@ class GradingService(object): """ log.debug(params) op = lambda: self.session.get(url, - allow_redirects=allow_redirects, - params=params) + allow_redirects=allow_redirects, + params=params) try: r = self._try_with_login(op) except (RequestException, ConnectionError, HTTPError) as err: # reraise as promised GradingServiceError, but preserve stacktrace. + #This is a dev_facing_error + log.error("Problem getting data from the grading controller. URL: {0}, params: {1}".format(url, params)) raise GradingServiceError, str(err), sys.exc_info()[2] return r.text @@ -90,7 +93,7 @@ class GradingService(object): r = self._login() if r and not r.get('success'): log.warning("Couldn't log into staff_grading backend. Response: %s", - r) + r) # try again response = operation() response.raise_for_status() @@ -114,16 +117,20 @@ class GradingService(object): if 'rubric' in response_json: rubric = response_json['rubric'] rubric_renderer = CombinedOpenEndedRubric(self.system, view_only) - success, rubric_html = rubric_renderer.render_rubric(rubric) + rubric_dict = rubric_renderer.render_rubric(rubric) + success = rubric_dict['success'] + rubric_html = rubric_dict['html'] response_json['rubric'] = rubric_html return response_json # if we can't parse the rubric into HTML, except etree.XMLSyntaxError, RubricParsingError: + #This is a dev_facing_error log.exception("Cannot parse rubric string. Raw string: {0}" .format(rubric)) return {'success': False, 'error': 'Error displaying submission'} except ValueError: + #This is a dev_facing_error log.exception("Error parsing response: {0}".format(response)) return {'success': False, 'error': "Error displaying submission"} diff --git a/common/lib/xmodule/xmodule/open_ended_image_submission.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/open_ended_image_submission.py similarity index 90% rename from common/lib/xmodule/xmodule/open_ended_image_submission.py rename to common/lib/xmodule/xmodule/open_ended_grading_classes/open_ended_image_submission.py index 66500146ed..2eb9502269 100644 --- a/common/lib/xmodule/xmodule/open_ended_image_submission.py +++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/open_ended_image_submission.py @@ -5,6 +5,7 @@ to send them to S3. try: from PIL import Image + ENABLE_PIL = True except: ENABLE_PIL = False @@ -13,11 +14,6 @@ from urlparse import urlparse import requests from boto.s3.connection import S3Connection from boto.s3.key import Key -#TODO: Settings import is needed now in order to specify the URL and keys for amazon s3 (to upload images). -#Eventually, the goal is to replace the global django settings import with settings specifically -#for this module. There is no easy way to do this now, so piggybacking on the django settings -#makes sense. -from django.conf import settings import pickle import logging import re @@ -40,7 +36,7 @@ ALLOWABLE_IMAGE_SUFFIXES = [ ] #Maximum allowed dimensions (x and y) for an uploaded image -MAX_ALLOWED_IMAGE_DIM = 1500 +MAX_ALLOWED_IMAGE_DIM = 2000 #Dimensions to which image is resized before it is evaluated for color count, etc MAX_IMAGE_DIM = 150 @@ -56,6 +52,7 @@ class ImageProperties(object): """ Class to check properties of an image and to validate if they are allowed. """ + def __init__(self, image_data): """ Initializes class variables @@ -97,7 +94,7 @@ class ImageProperties(object): g = rgb[1] b = rgb[2] check_r = (r > 60) - check_g = (r * 0.4) < g < (r * 0.85) + check_g = (r * 0.4) < g < (r * 0.85) check_b = (r * 0.2) < b < (r * 0.7) colors_okay = check_r and check_b and check_g except: @@ -146,6 +143,7 @@ class URLProperties(object): Checks to see if a URL points to acceptable content. Added to check if students are submitting reasonable links to the peer grading image functionality of the external grading service. """ + def __init__(self, url_string): self.url_string = url_string @@ -180,7 +178,7 @@ class URLProperties(object): Runs all available url tests @return: True if URL passes tests, false if not. """ - url_is_okay = self.check_suffix() and self.check_if_parses() and self.check_domain() + url_is_okay = self.check_suffix() and self.check_if_parses() return url_is_okay def check_domain(self): @@ -217,11 +215,11 @@ def run_image_tests(image): success = image_properties.run_tests() except: log.exception("Cannot run image tests in combined open ended xmodule. May be an issue with a particular image," - "or an issue with the deployment configuration of PIL/Pillow") + "or an issue with the deployment configuration of PIL/Pillow") return success -def upload_to_s3(file_to_upload, keyname): +def upload_to_s3(file_to_upload, keyname, s3_interface): ''' Upload file to S3 using provided keyname. @@ -237,8 +235,8 @@ def upload_to_s3(file_to_upload, keyname): #im.save(out_im, 'PNG') try: - conn = S3Connection(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY) - bucketname = str(settings.AWS_STORAGE_BUCKET_NAME) + conn = S3Connection(s3_interface['access_key'], s3_interface['secret_access_key']) + bucketname = str(s3_interface['storage_bucket_name']) bucket = conn.create_bucket(bucketname.lower()) k = Key(bucket) @@ -256,8 +254,10 @@ def upload_to_s3(file_to_upload, keyname): return True, public_url except: - error_message = "Could not connect to S3." - log.exception(error_message) + #This is a dev_facing_error + error_message = "Could not connect to S3 to upload peer grading image. Trying to utilize bucket: {0}".format( + bucketname.lower()) + log.error(error_message) return False, error_message diff --git a/common/lib/xmodule/xmodule/open_ended_module.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/open_ended_module.py similarity index 72% rename from common/lib/xmodule/xmodule/open_ended_module.py rename to common/lib/xmodule/xmodule/open_ended_grading_classes/open_ended_module.py index 0ad6a26995..8373700837 100644 --- a/common/lib/xmodule/xmodule/open_ended_module.py +++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/open_ended_module.py @@ -5,28 +5,16 @@ hints, answers, and assessment judgment (currently only correct/incorrect). Parses xml definition file--see below for exact format. """ -import copy -from fs.errors import ResourceNotFoundError -import itertools import json import logging from lxml import etree -from lxml.html import rewrite_links -from path import path -import os -import sys -import hashlib import capa.xqueue_interface as xqueue_interface -from pkg_resources import resource_string - -from .capa_module import only_one, ComplexEncoder -from .editing_module import EditingDescriptor -from .html_checker import check_html -from progress import Progress -from .stringify import stringify_children -from .xml_module import XmlDescriptor -from xmodule.modulestore import Location +from xmodule.capa_module import ComplexEncoder +from xmodule.editing_module import EditingDescriptor +from xmodule.progress import Progress +from xmodule.stringify import stringify_children +from xmodule.xml_module import XmlDescriptor from capa.util import * import openendedchild @@ -34,7 +22,7 @@ from numpy import median from datetime import datetime -from combined_open_ended_rubric import CombinedOpenEndedRubric +from .combined_open_ended_rubric import CombinedOpenEndedRubric log = logging.getLogger("mitx.courseware") @@ -52,6 +40,8 @@ class OpenEndedModule(openendedchild.OpenEndedChild): """ + TEMPLATE_DIR = "combinedopenended/openended" + def setup_response(self, system, location, definition, descriptor): """ Sets up the response type. @@ -71,19 +61,21 @@ class OpenEndedModule(openendedchild.OpenEndedChild): self.submission_id = None self.grader_id = None + error_message = "No {0} found in problem xml for open ended problem. Contact the learning sciences group for assistance." if oeparam is None: - raise ValueError("No oeparam found in problem xml.") - if self.prompt is None: - raise ValueError("No prompt found in problem xml.") - if self.rubric is None: - raise ValueError("No rubric found in problem xml.") + #This is a staff_facing_error + raise ValueError(error_message.format('oeparam')) + if self.child_prompt is None: + raise ValueError(error_message.format('prompt')) + if self.child_rubric is None: + raise ValueError(error_message.format('rubric')) - self._parse(oeparam, self.prompt, self.rubric, system) + self._parse(oeparam, self.child_prompt, self.child_rubric, system) - if self.created == True and self.state == self.ASSESSING: - self.created = False + if self.child_created == True and self.child_state == self.ASSESSING: + self.child_created = False self.send_to_grader(self.latest_answer(), system) - self.created = False + self.child_created = False def _parse(self, oeparam, prompt, rubric, system): ''' @@ -97,8 +89,8 @@ class OpenEndedModule(openendedchild.OpenEndedChild): # Note that OpenEndedResponse is agnostic to the specific contents of grader_payload prompt_string = stringify_children(prompt) rubric_string = stringify_children(rubric) - self.prompt = prompt_string - self.rubric = rubric_string + self.child_prompt = prompt_string + self.child_rubric = rubric_string grader_payload = oeparam.find('grader_payload') grader_payload = grader_payload.text if grader_payload is not None else '' @@ -110,19 +102,23 @@ class OpenEndedModule(openendedchild.OpenEndedChild): # __init__ adds it (easiest way to get problem location into # response types) except TypeError, ValueError: - log.exception("Grader payload %r is not a json object!", grader_payload) + #This is a dev_facing_error + log.exception( + "Grader payload from external open ended grading server is not a json object! Object: {0}".format( + grader_payload)) self.initial_display = find_with_default(oeparam, 'initial_display', '') self.answer = find_with_default(oeparam, 'answer_display', 'No answer given.') parsed_grader_payload.update({ - 'location': system.location.url(), + 'location': self.location_string, 'course_id': system.course_id, 'prompt': prompt_string, 'rubric': rubric_string, 'initial_display': self.initial_display, 'answer': self.answer, - 'problem_id': self.display_name + 'problem_id': self.display_name, + 'skip_basic_checks': self.skip_basic_checks, }) updated_grader_payload = json.dumps(parsed_grader_payload) @@ -135,7 +131,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): @param system: ModuleSystem @return: Success indicator """ - self.state = self.DONE + self.child_state = self.DONE return {'success': True} def message_post(self, get, system): @@ -145,24 +141,29 @@ class OpenEndedModule(openendedchild.OpenEndedChild): """ event_info = dict() - event_info['problem_id'] = system.location.url() + event_info['problem_id'] = self.location_string event_info['student_id'] = system.anonymous_student_id event_info['survey_responses'] = get survey_responses = event_info['survey_responses'] for tag in ['feedback', 'submission_id', 'grader_id', 'score']: if tag not in survey_responses: - return {'success': False, 'msg': "Could not find needed tag {0}".format(tag)} + #This is a student_facing_error + return {'success': False, + 'msg': "Could not find needed tag {0} in the survey responses. Please try submitting again.".format( + tag)} try: submission_id = int(survey_responses['submission_id']) grader_id = int(survey_responses['grader_id']) feedback = str(survey_responses['feedback'].encode('ascii', 'ignore')) score = int(survey_responses['score']) except: + #This is a dev_facing_error error_message = ("Could not parse submission id, grader id, " "or feedback from message_post ajax call. Here is the message data: {0}".format( survey_responses)) log.exception(error_message) + #This is a student_facing_error return {'success': False, 'msg': "There was an error saving your feedback. Please contact course staff."} qinterface = system.xqueue['interface'] @@ -170,10 +171,10 @@ class OpenEndedModule(openendedchild.OpenEndedChild): anonymous_student_id = system.anonymous_student_id queuekey = xqueue_interface.make_hashkey(str(system.seed) + qtime + anonymous_student_id + - str(len(self.history))) + str(len(self.child_history))) xheader = xqueue_interface.make_xheader( - lms_callback_url=system.xqueue['callback_url'], + lms_callback_url=system.xqueue['construct_callback'](), lms_key=queuekey, queue_name=self.message_queue_name ) @@ -190,15 +191,16 @@ class OpenEndedModule(openendedchild.OpenEndedChild): } (error, msg) = qinterface.send_to_queue(header=xheader, - body=json.dumps(contents)) + body=json.dumps(contents)) #Convert error to a success value success = True if error: success = False - self.state = self.DONE + self.child_state = self.DONE + #This is a student_facing_message return {'success': success, 'msg': "Successfully submitted your feedback."} def send_to_grader(self, submission, system): @@ -220,11 +222,11 @@ class OpenEndedModule(openendedchild.OpenEndedChild): # Generate header queuekey = xqueue_interface.make_hashkey(str(system.seed) + qtime + anonymous_student_id + - str(len(self.history))) + str(len(self.child_history))) - xheader = xqueue_interface.make_xheader(lms_callback_url=system.xqueue['callback_url'], - lms_key=queuekey, - queue_name=self.queue_name) + xheader = xqueue_interface.make_xheader(lms_callback_url=system.xqueue['construct_callback'](), + lms_key=queuekey, + queue_name=self.queue_name) contents = self.payload.copy() @@ -242,7 +244,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): # Submit request. When successful, 'msg' is the prior length of the queue (error, msg) = qinterface.send_to_queue(header=xheader, - body=json.dumps(contents)) + body=json.dumps(contents)) # State associated with the queueing request queuestate = {'key': queuekey, @@ -263,11 +265,10 @@ class OpenEndedModule(openendedchild.OpenEndedChild): self.record_latest_score(new_score_msg['score']) self.record_latest_post_assessment(score_msg) - self.state = self.POST_ASSESSMENT + self.child_state = self.POST_ASSESSMENT return True - def get_answers(self): """ Gets and shows the answer for this problem. @@ -301,11 +302,12 @@ class OpenEndedModule(openendedchild.OpenEndedChild): # We want to display available feedback in a particular order. # This dictionary specifies which goes first--lower first. - priorities = { # These go at the start of the feedback + priorities = {# These go at the start of the feedback 'spelling': 0, 'grammar': 1, # needs to be after all the other feedback 'markup_text': 3} + do_not_render = ['topicality', 'prompt-overlap'] default_priority = 2 @@ -347,22 +349,31 @@ class OpenEndedModule(openendedchild.OpenEndedChild): for tag in ['success', 'feedback', 'submission_id', 'grader_id']: if tag not in response_items: - return format_feedback('errors', 'Error getting feedback') + #This is a student_facing_error + return format_feedback('errors', 'Error getting feedback from grader.') feedback_items = response_items['feedback'] try: feedback = json.loads(feedback_items) except (TypeError, ValueError): - log.exception("feedback_items have invalid json %r", feedback_items) - return format_feedback('errors', 'Could not parse feedback') + #This is a dev_facing_error + log.exception("feedback_items from external open ended grader have invalid json {0}".format(feedback_items)) + #This is a student_facing_error + return format_feedback('errors', 'Error getting feedback from grader.') if response_items['success']: if len(feedback) == 0: - return format_feedback('errors', 'No feedback available') + #This is a student_facing_error + return format_feedback('errors', 'No feedback available from grader.') + + for tag in do_not_render: + if tag in feedback: + feedback.pop(tag) feedback_lst = sorted(feedback.items(), key=get_priority) feedback_list_part1 = u"\n".join(format_feedback(k, v) for k, v in feedback_lst) else: + #This is a student_facing_error feedback_list_part1 = format_feedback('errors', response_items['feedback']) feedback_list_part2 = (u"\n".join([format_feedback_hidden(feedback_type, value) @@ -381,23 +392,26 @@ class OpenEndedModule(openendedchild.OpenEndedChild): rubric_feedback = "" feedback = self._convert_longform_feedback_to_html(response_items) + rubric_scores = [] if response_items['rubric_scores_complete'] == True: rubric_renderer = CombinedOpenEndedRubric(system, True) - success, rubric_feedback = rubric_renderer.render_rubric(response_items['rubric_xml']) + rubric_dict = rubric_renderer.render_rubric(response_items['rubric_xml']) + success = rubric_dict['success'] + rubric_feedback = rubric_dict['html'] + rubric_scores = rubric_dict['rubric_scores'] if not response_items['success']: - return system.render_template("open_ended_error.html", - {'errors': feedback}) + return system.render_template("{0}/open_ended_error.html".format(self.TEMPLATE_DIR), + {'errors': feedback}) - feedback_template = system.render_template("open_ended_feedback.html", { + feedback_template = system.render_template("{0}/open_ended_feedback.html".format(self.TEMPLATE_DIR), { 'grader_type': response_items['grader_type'], 'score': "{0} / {1}".format(response_items['score'], self.max_score()), 'feedback': feedback, 'rubric_feedback': rubric_feedback }) - return feedback_template - + return feedback_template, rubric_scores def _parse_score_msg(self, score_msg, system, join_feedback=True): """ @@ -420,18 +434,30 @@ class OpenEndedModule(openendedchild.OpenEndedChild): correct: Correctness of submission (Boolean) score: Points to be assigned (numeric, can be float) """ - fail = {'valid': False, 'score': 0, 'feedback': ''} + fail = { + 'valid': False, + 'score': 0, + 'feedback': '', + 'rubric_scores': [[0]], + 'grader_types': [''], + 'feedback_items': [''], + 'feedback_dicts': [{}], + 'grader_ids': [0], + 'submission_ids': [0], + } try: score_result = json.loads(score_msg) except (TypeError, ValueError): - error_message = ("External grader message should be a JSON-serialized dict." + #This is a dev_facing_error + error_message = ("External open ended grader message should be a JSON-serialized dict." " Received score_msg = {0}".format(score_msg)) log.error(error_message) fail['feedback'] = error_message return fail if not isinstance(score_result, dict): - error_message = ("External grader message should be a JSON-serialized dict." + #This is a dev_facing_error + error_message = ("External open ended grader message should be a JSON-serialized dict." " Received score_result = {0}".format(score_result)) log.error(error_message) fail['feedback'] = error_message @@ -439,14 +465,20 @@ class OpenEndedModule(openendedchild.OpenEndedChild): for tag in ['score', 'feedback', 'grader_type', 'success', 'grader_id', 'submission_id']: if tag not in score_result: - error_message = ("External grader message is missing required tag: {0}" + #This is a dev_facing_error + error_message = ("External open ended grader message is missing required tag: {0}" .format(tag)) log.error(error_message) fail['feedback'] = error_message return fail - #This is to support peer grading + #This is to support peer grading if isinstance(score_result['score'], list): feedback_items = [] + rubric_scores = [] + grader_types = [] + feedback_dicts = [] + grader_ids = [] + submission_ids = [] for i in xrange(0, len(score_result['score'])): new_score_result = { 'score': score_result['score'][i], @@ -458,7 +490,17 @@ class OpenEndedModule(openendedchild.OpenEndedChild): 'rubric_scores_complete': score_result['rubric_scores_complete'][i], 'rubric_xml': score_result['rubric_xml'][i], } - feedback_items.append(self._format_feedback(new_score_result, system)) + feedback_template, rubric_score = self._format_feedback(new_score_result, system) + feedback_items.append(feedback_template) + rubric_scores.append(rubric_score) + grader_types.append(score_result['grader_type']) + try: + feedback_dict = json.loads(score_result['feedback'][i]) + except: + pass + feedback_dicts.append(feedback_dict) + grader_ids.append(score_result['grader_id'][i]) + submission_ids.append(score_result['submission_id']) if join_feedback: feedback = "".join(feedback_items) else: @@ -466,13 +508,33 @@ class OpenEndedModule(openendedchild.OpenEndedChild): score = int(median(score_result['score'])) else: #This is for instructor and ML grading - feedback = self._format_feedback(score_result, system) + feedback, rubric_score = self._format_feedback(score_result, system) score = score_result['score'] + rubric_scores = [rubric_score] + grader_types = [score_result['grader_type']] + feedback_items = [feedback] + try: + feedback_dict = json.loads(score_result['feedback']) + except: + pass + feedback_dicts = [feedback_dict] + grader_ids = [score_result['grader_id']] + submission_ids = [score_result['submission_id']] self.submission_id = score_result['submission_id'] self.grader_id = score_result['grader_id'] - return {'valid': True, 'score': score, 'feedback': feedback} + return { + 'valid': True, + 'score': score, + 'feedback': feedback, + 'rubric_scores': rubric_scores, + 'grader_types': grader_types, + 'feedback_items': feedback_items, + 'feedback_dicts': feedback_dicts, + 'grader_ids': grader_ids, + 'submission_ids': submission_ids, + } def latest_post_assessment(self, system, short_feedback=False, join_feedback=True): """ @@ -480,16 +542,16 @@ class OpenEndedModule(openendedchild.OpenEndedChild): @param short_feedback: If the long feedback is wanted or not @return: Returns formatted feedback """ - if not self.history: + if not self.child_history: return "" - feedback_dict = self._parse_score_msg(self.history[-1].get('post_assessment', ""), system, - join_feedback=join_feedback) + feedback_dict = self._parse_score_msg(self.child_history[-1].get('post_assessment', ""), system, + join_feedback=join_feedback) if not short_feedback: return feedback_dict['feedback'] if feedback_dict['valid'] else '' if feedback_dict['valid']: short_feedback = self._convert_longform_feedback_to_html( - json.loads(self.history[-1].get('post_assessment', ""))) + json.loads(self.child_history[-1].get('post_assessment', ""))) return short_feedback if feedback_dict['valid'] else '' def format_feedback_with_evaluation(self, system, feedback): @@ -499,7 +561,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): @return: Rendered html """ context = {'msg': feedback, 'id': "1", 'rows': 50, 'cols': 50} - html = system.render_template('open_ended_evaluation.html', context) + html = system.render_template('{0}/open_ended_evaluation.html'.format(self.TEMPLATE_DIR), context) return html def handle_ajax(self, dispatch, get, system): @@ -521,7 +583,10 @@ class OpenEndedModule(openendedchild.OpenEndedChild): } if dispatch not in handlers: - return 'Error' + #This is a dev_facing_error + log.error("Cannot find {0} in handlers in handle_ajax function for open_ended_module.py".format(dispatch)) + #This is a dev_facing_error + return json.dumps({'error': 'Error handling action. Please try again.', 'success': False}) before = self.get_progress() d = handlers[dispatch](get, system) @@ -539,7 +604,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): @param system: Modulesystem (needed to align with other ajax functions) @return: Returns the current state """ - state = self.state + state = self.child_state return {'state': state} def save_answer(self, get, system): @@ -555,22 +620,28 @@ class OpenEndedModule(openendedchild.OpenEndedChild): if closed: return msg - if self.state != self.INITIAL: + if self.child_state != self.INITIAL: return self.out_of_sync_error(get) # add new history element with answer and empty score and hint. success, get = self.append_image_to_student_answer(get) error_message = "" if success: - get['student_answer'] = OpenEndedModule.sanitize_html(get['student_answer']) - self.new_history_entry(get['student_answer']) - self.send_to_grader(get['student_answer'], system) - self.change_state(self.ASSESSING) + success, allowed_to_submit, error_message = self.check_if_student_can_submit() + if allowed_to_submit: + get['student_answer'] = OpenEndedModule.sanitize_html(get['student_answer']) + self.new_history_entry(get['student_answer']) + self.send_to_grader(get['student_answer'], system) + self.change_state(self.ASSESSING) + else: + #Error message already defined + success = False else: + #This is a student_facing_error error_message = "There was a problem saving the image in your submission. Please try a different image, or try pasting a link to an image into the answer box." return { - 'success': True, + 'success': success, 'error': error_message, 'student_response': get['student_answer'] } @@ -595,21 +666,24 @@ class OpenEndedModule(openendedchild.OpenEndedChild): Output: Rendered HTML """ #set context variables and render template - if self.state != self.INITIAL: + eta_string = None + if self.child_state != self.INITIAL: latest = self.latest_answer() previous_answer = latest if latest is not None else self.initial_display post_assessment = self.latest_post_assessment(system) score = self.latest_score() correct = 'correct' if self.is_submission_correct(score) else 'incorrect' + if self.child_state == self.ASSESSING: + eta_string = self.get_eta() else: post_assessment = "" correct = "" previous_answer = self.initial_display context = { - 'prompt': self.prompt, + 'prompt': self.child_prompt, 'previous_answer': previous_answer, - 'state': self.state, + 'state': self.child_state, 'allow_reset': self._allow_reset(), 'rows': 30, 'cols': 80, @@ -618,12 +692,13 @@ class OpenEndedModule(openendedchild.OpenEndedChild): 'child_type': 'openended', 'correct': correct, 'accept_file_upload': self.accept_file_upload, + 'eta_message': eta_string, } - html = system.render_template('open_ended.html', context) + html = system.render_template('{0}/open_ended.html'.format(self.TEMPLATE_DIR), context) return html -class OpenEndedDescriptor(XmlDescriptor, EditingDescriptor): +class OpenEndedDescriptor(): """ Module for adding open ended response questions to courses """ @@ -635,8 +710,8 @@ class OpenEndedDescriptor(XmlDescriptor, EditingDescriptor): has_score = True template_dir_name = "openended" - js = {'coffee': [resource_string(__name__, 'js/src/html/edit.coffee')]} - js_module_name = "HTMLEditingDescriptor" + def __init__(self, system): + self.system =system @classmethod def definition_from_xml(cls, xml_object, system): @@ -650,13 +725,16 @@ class OpenEndedDescriptor(XmlDescriptor, EditingDescriptor): """ for child in ['openendedparam']: if len(xml_object.xpath(child)) != 1: - raise ValueError("Open Ended definition must include exactly one '{0}' tag".format(child)) + #This is a staff_facing_error + raise ValueError( + "Open Ended definition must include exactly one '{0}' tag. Contact the learning sciences group for assistance.".format( + child)) def parse(k): """Assumes that xml_object has child k""" return xml_object.xpath(k)[0] - return {'oeparam': parse('openendedparam'), } + return {'oeparam': parse('openendedparam')} def definition_to_xml(self, resource_fs): diff --git a/common/lib/xmodule/xmodule/openendedchild.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/openendedchild.py similarity index 63% rename from common/lib/xmodule/xmodule/openendedchild.py rename to common/lib/xmodule/xmodule/open_ended_grading_classes/openendedchild.py index ba2de5c930..b9341f0cbe 100644 --- a/common/lib/xmodule/xmodule/openendedchild.py +++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/openendedchild.py @@ -1,29 +1,19 @@ -import copy -from fs.errors import ResourceNotFoundError -import itertools import json import logging -from lxml import etree -from lxml.html import rewrite_links from lxml.html.clean import Cleaner, autolink_html -from path import path -import os -import sys -import hashlib -import capa.xqueue_interface as xqueue_interface import re -from pkg_resources import resource_string - -from .capa_module import only_one, ComplexEncoder -from .editing_module import EditingDescriptor -from .html_checker import check_html -from progress import Progress -from .stringify import stringify_children -from .xml_module import XmlDescriptor +from xmodule.capa_module import ComplexEncoder +import open_ended_image_submission +from xmodule.editing_module import EditingDescriptor +from xmodule.html_checker import check_html +from xmodule.progress import Progress +from xmodule.stringify import stringify_children +from xmodule.xml_module import XmlDescriptor from xmodule.modulestore import Location from capa.util import * -import open_ended_image_submission +from .peer_grading_service import PeerGradingService, MockPeerGradingService +import controller_query_service from datetime import datetime @@ -68,17 +58,21 @@ class OpenEndedChild(object): #This is used to tell students where they are at in the module HUMAN_NAMES = { - 'initial': 'Started', - 'assessing': 'Being scored', - 'post_assessment': 'Scoring finished', - 'done': 'Problem complete', + 'initial': 'Not started', + 'assessing': 'In progress', + 'post_assessment': 'Done', + 'done': 'Done', } - def __init__(self, system, location, definition, descriptor, static_data, + def __init__(self, system, location, definition, descriptor, static_data, instance_state=None, shared_state=None, **kwargs): # Load instance state + if instance_state is not None: - instance_state = json.loads(instance_state) + try: + instance_state = json.loads(instance_state) + except: + log.error("Could not load instance state for open ended. Setting it to nothing.: {0}".format(instance_state)) else: instance_state = {} @@ -86,24 +80,39 @@ class OpenEndedChild(object): # None for any element, and score and hint can be None for the last (current) # element. # Scores are on scale from 0 to max_score - self.history = instance_state.get('history', []) - self.state = instance_state.get('state', self.INITIAL) + self.child_history=instance_state.get('child_history',[]) + self.child_state=instance_state.get('child_state', self.INITIAL) + self.child_created = instance_state.get('child_created', False) + self.child_attempts = instance_state.get('child_attempts', 0) - self.created = instance_state.get('created', False) - - self.attempts = instance_state.get('attempts', 0) self.max_attempts = static_data['max_attempts'] - - self.prompt = static_data['prompt'] - self.rubric = static_data['rubric'] + self.child_prompt = static_data['prompt'] + self.child_rubric = static_data['rubric'] self.display_name = static_data['display_name'] self.accept_file_upload = static_data['accept_file_upload'] self.close_date = static_data['close_date'] + self.s3_interface = static_data['s3_interface'] + self.skip_basic_checks = static_data['skip_basic_checks'] + self._max_score = static_data['max_score'] # Used for progress / grading. Currently get credit just for # completion (doesn't matter if you self-assessed correct/incorrect). - self._max_score = static_data['max_score'] + if system.open_ended_grading_interface: + self.peer_gs = PeerGradingService(system.open_ended_grading_interface, system) + self.controller_qs = controller_query_service.ControllerQueryService(system.open_ended_grading_interface, + system) + else: + self.peer_gs = MockPeerGradingService() + self.controller_qs = None + + self.system = system + + self.location_string = location + try: + self.location_string = self.location_string.url() + except: + pass self.setup_response(system, location, definition, descriptor) @@ -127,43 +136,45 @@ class OpenEndedChild(object): if self.closed(): return True, { 'success': False, - 'error': 'This problem is now closed.' + #This is a student_facing_error + 'error': 'The problem close date has passed, and this problem is now closed.' } - elif self.attempts > self.max_attempts: + elif self.child_attempts > self.max_attempts: return True, { 'success': False, - 'error': 'Too many attempts.' + #This is a student_facing_error + 'error': 'You have attempted this problem {0} times. You are allowed {1} attempts.'.format( + self.child_attempts, self.max_attempts + ) } else: return False, {} - - def latest_answer(self): """Empty string if not available""" - if not self.history: + if not self.child_history: return "" - return self.history[-1].get('answer', "") + return self.child_history[-1].get('answer', "") def latest_score(self): """None if not available""" - if not self.history: + if not self.child_history: return None - return self.history[-1].get('score') + return self.child_history[-1].get('score') def latest_post_assessment(self, system): """Empty string if not available""" - if not self.history: + if not self.child_history: return "" - return self.history[-1].get('post_assessment', "") + return self.child_history[-1].get('post_assessment', "") @staticmethod def sanitize_html(answer): try: answer = autolink_html(answer) cleaner = Cleaner(style=True, links=True, add_nofollow=False, page_structure=True, safe_attrs_only=True, - host_whitelist=open_ended_image_submission.TRUSTED_IMAGE_DOMAINS, - whitelist_tags=set(['embed', 'iframe', 'a', 'img'])) + host_whitelist=open_ended_image_submission.TRUSTED_IMAGE_DOMAINS, + whitelist_tags=set(['embed', 'iframe', 'a', 'img'])) clean_html = cleaner.clean_html(answer) clean_html = re.sub(r'

        $', '', re.sub(r'^

        ', '', clean_html)) except: @@ -177,30 +188,30 @@ class OpenEndedChild(object): @return: None """ answer = OpenEndedChild.sanitize_html(answer) - self.history.append({'answer': answer}) + self.child_history.append({'answer': answer}) def record_latest_score(self, score): """Assumes that state is right, so we're adding a score to the latest history element""" - self.history[-1]['score'] = score + self.child_history[-1]['score'] = score def record_latest_post_assessment(self, post_assessment): """Assumes that state is right, so we're adding a score to the latest history element""" - self.history[-1]['post_assessment'] = post_assessment + self.child_history[-1]['post_assessment'] = post_assessment def change_state(self, new_state): """ A centralized place for state changes--allows for hooks. If the current state matches the old state, don't run any hooks. """ - if self.state == new_state: + if self.child_state == new_state: return - self.state = new_state + self.child_state = new_state - if self.state == self.DONE: - self.attempts += 1 + if self.child_state == self.DONE: + self.child_attempts += 1 def get_instance_state(self): """ @@ -209,17 +220,17 @@ class OpenEndedChild(object): state = { 'version': self.STATE_VERSION, - 'history': self.history, - 'state': self.state, + 'child_history': self.child_history, + 'child_state': self.child_state, 'max_score': self._max_score, - 'attempts': self.attempts, - 'created': False, + 'child_attempts': self.child_attempts, + 'child_created': False, } return json.dumps(state) def _allow_reset(self): """Can the module be reset?""" - return (self.state == self.DONE and self.attempts < self.max_attempts) + return (self.child_state == self.DONE and self.child_attempts < self.max_attempts) def max_score(self): """ @@ -251,9 +262,10 @@ class OpenEndedChild(object): ''' if self._max_score > 0: try: - return Progress(self.get_score()['score'], self._max_score) + return Progress(int(self.get_score()['score']), int(self._max_score)) except Exception as err: - log.exception("Got bad progress") + #This is a dev_facing_error + log.exception("Got bad progress from open ended child module. Max Score: {0}".format(self._max_score)) return None return None @@ -261,10 +273,12 @@ class OpenEndedChild(object): """ return dict out-of-sync error message, and also log. """ - log.warning("Assessment module state out sync. state: %r, get: %r. %s", - self.state, get, msg) + #This is a dev_facing_error + log.warning("Open ended child state out sync. state: %r, get: %r. %s", + self.child_state, get, msg) + #This is a student_facing_error return {'success': False, - 'error': 'The problem state got out-of-sync'} + 'error': 'The problem state got out-of-sync. Please try reloading the page.'} def get_html(self): """ @@ -287,7 +301,7 @@ class OpenEndedChild(object): @return: Boolean correct. """ correct = False - if(isinstance(score, (int, long, float, complex))): + if (isinstance(score, (int, long, float, complex))): score_ratio = int(score) / float(self.max_score()) correct = (score_ratio >= 0.66) return correct @@ -321,7 +335,8 @@ class OpenEndedChild(object): try: image_data.seek(0) - success, s3_public_url = open_ended_image_submission.upload_to_s3(image_data, image_key) + success, s3_public_url = open_ended_image_submission.upload_to_s3(image_data, image_key, + self.s3_interface) except: log.exception("Could not upload image to S3.") @@ -379,9 +394,9 @@ class OpenEndedChild(object): #In this case, an image was submitted by the student, but the image could not be uploaded to S3. Likely #a config issue (development vs deployment). For now, just treat this as a "success" log.exception("Student AJAX post to combined open ended xmodule indicated that it contained an image, " - "but the image was not able to be uploaded to S3. This could indicate a config" - "issue with this deployment, but it could also indicate a problem with S3 or with the" - "student image itself.") + "but the image was not able to be uploaded to S3. This could indicate a config" + "issue with this deployment, but it could also indicate a problem with S3 or with the" + "student image itself.") overall_success = True elif not has_file_to_upload: #If there is no file to upload, probably the student has embedded the link in the answer text @@ -410,3 +425,57 @@ class OpenEndedChild(object): success = True return success, string + + def check_if_student_can_submit(self): + location = self.location_string + + student_id = self.system.anonymous_student_id + success = False + allowed_to_submit = True + response = {} + #This is a student_facing_error + error_string = ("You need to peer grade {0} more in order to make another submission. " + "You have graded {1}, and {2} are required. You have made {3} successful peer grading submissions.") + try: + response = self.peer_gs.get_data_for_location(self.location_string, student_id) + count_graded = response['count_graded'] + count_required = response['count_required'] + student_sub_count = response['student_sub_count'] + success = True + except: + #This is a dev_facing_error + log.error("Could not contact external open ended graders for location {0} and student {1}".format( + self.location_string, student_id)) + #This is a student_facing_error + error_message = "Could not contact the graders. Please notify course staff." + return success, allowed_to_submit, error_message + if count_graded >= count_required: + return success, allowed_to_submit, "" + else: + allowed_to_submit = False + #This is a student_facing_error + error_message = error_string.format(count_required - count_graded, count_graded, count_required, + student_sub_count) + return success, allowed_to_submit, error_message + + def get_eta(self): + if self.controller_qs: + response = self.controller_qs.check_for_eta(self.location_string) + try: + response = json.loads(response) + except: + pass + else: + return "" + + success = response['success'] + if isinstance(success, basestring): + success = (success.lower() == "true") + + if success: + eta = controller_query_service.convert_seconds_to_human_readable(response['eta']) + eta_string = "Please check back for your response in at most {0}.".format(eta) + else: + eta_string = "" + + return eta_string diff --git a/common/lib/xmodule/xmodule/peer_grading_service.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/peer_grading_service.py similarity index 78% rename from common/lib/xmodule/xmodule/peer_grading_service.py rename to common/lib/xmodule/xmodule/open_ended_grading_classes/peer_grading_service.py index 8c50b6ff0a..85c7a98132 100644 --- a/common/lib/xmodule/xmodule/peer_grading_service.py +++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/peer_grading_service.py @@ -1,18 +1,7 @@ import json import logging -import requests -from requests.exceptions import RequestException, ConnectionError, HTTPError -import sys -#TODO: Settings import is needed now in order to specify the URL where to find the peer grading service. -#Eventually, the goal is to replace the global django settings import with settings specifically -#for this xmodule. There is no easy way to do this now, so piggybacking on the django settings -#makes sense. -from django.conf import settings - -from combined_open_ended_rubric import CombinedOpenEndedRubric, RubricParsingError -from lxml import etree -from grading_service_module import GradingService, GradingServiceError +from .grading_service_module import GradingService log = logging.getLogger(__name__) @@ -25,9 +14,12 @@ class PeerGradingService(GradingService): """ Interface with the grading controller for peer grading """ + def __init__(self, config, system): config['system'] = system super(PeerGradingService, self).__init__(config) + self.url = config['url'] + config['peer_grading'] + self.login_url = self.url + '/login/' self.get_next_submission_url = self.url + '/get_next_submission/' self.save_grade_url = self.url + '/save_grade/' self.is_student_calibrated_url = self.url + '/is_student_calibrated/' @@ -39,16 +31,17 @@ class PeerGradingService(GradingService): self.system = system def get_data_for_location(self, problem_location, student_id): - response = self.get(self.get_data_for_location_url, - {'location': problem_location, 'student_id': student_id}) + params = {'location': problem_location, 'student_id': student_id} + response = self.get(self.get_data_for_location_url, params) return self.try_to_decode(response) def get_next_submission(self, problem_location, grader_id): response = self.get(self.get_next_submission_url, - {'location': problem_location, 'grader_id': grader_id}) + {'location': problem_location, 'grader_id': grader_id}) return self.try_to_decode(self._render_rubric(response)) - def save_grade(self, location, grader_id, submission_id, score, feedback, submission_key, rubric_scores, submission_flagged): + def save_grade(self, location, grader_id, submission_id, score, feedback, submission_key, rubric_scores, + submission_flagged): data = {'grader_id': grader_id, 'submission_id': submission_id, 'score': score, @@ -98,6 +91,7 @@ class PeerGradingService(GradingService): pass return text + """ This is a mock peer grading service that can be used for unit tests without making actual service calls to the grading controller @@ -115,7 +109,7 @@ class MockPeerGradingService(object): 'max_score': 4}) def save_grade(self, location, grader_id, submission_id, - score, feedback, submission_key): + score, feedback, submission_key, rubric_scores, submission_flagged): return json.dumps({'success': True}) def is_student_calibrated(self, problem_location, grader_id): @@ -131,7 +125,8 @@ class MockPeerGradingService(object): 'max_score': 4}) def save_calibration_essay(self, problem_location, grader_id, - calibration_essay_id, submission_key, score, feedback): + calibration_essay_id, submission_key, score, + feedback, rubric_scores): return {'success': True, 'actual_score': 2} def get_problem_list(self, course_id, grader_id): @@ -142,25 +137,3 @@ class MockPeerGradingService(object): json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo2', 'problem_name': "Problem 2", 'num_graded': 1, 'num_pending': 5}) ]}) - -_service = None - - -def peer_grading_service(system): - """ - Return a peer grading service instance--if settings.MOCK_PEER_GRADING is True, - returns a mock one, otherwise a real one. - - Caches the result, so changing the setting after the first call to this - function will have no effect. - """ - global _service - if _service is not None: - return _service - - if settings.MOCK_PEER_GRADING: - _service = MockPeerGradingService() - else: - _service = PeerGradingService(settings.PEER_GRADING_INTERFACE, system) - - return _service diff --git a/common/lib/xmodule/xmodule/self_assessment_module.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/self_assessment_module.py similarity index 63% rename from common/lib/xmodule/xmodule/self_assessment_module.py rename to common/lib/xmodule/xmodule/open_ended_grading_classes/self_assessment_module.py index c8d1fe7a28..5fb901d49c 100644 --- a/common/lib/xmodule/xmodule/self_assessment_module.py +++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/self_assessment_module.py @@ -1,27 +1,13 @@ -import copy -from fs.errors import ResourceNotFoundError -import itertools import json import logging from lxml import etree -from lxml.html import rewrite_links -from path import path -import os -import sys -from pkg_resources import resource_string - -from .capa_module import only_one, ComplexEncoder -from .editing_module import EditingDescriptor -from .html_checker import check_html -from progress import Progress -from .stringify import stringify_children -from .x_module import XModule -from .xml_module import XmlDescriptor -from xmodule.modulestore import Location +from xmodule.capa_module import ComplexEncoder +from xmodule.progress import Progress +from xmodule.stringify import stringify_children import openendedchild -from combined_open_ended_rubric import CombinedOpenEndedRubric +from .combined_open_ended_rubric import CombinedOpenEndedRubric log = logging.getLogger("mitx.courseware") @@ -43,6 +29,12 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): """ + TEMPLATE_DIR = "combinedopenended/selfassessment" + # states + INITIAL = 'initial' + ASSESSING = 'assessing' + REQUEST_HINT = 'request_hint' + DONE = 'done' def setup_response(self, system, location, definition, descriptor): """ @@ -53,10 +45,8 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): @param descriptor: SelfAssessmentDescriptor @return: None """ - self.submit_message = definition['submitmessage'] - self.hint_prompt = definition['hintprompt'] - self.prompt = stringify_children(self.prompt) - self.rubric = stringify_children(self.rubric) + self.child_prompt = stringify_children(self.child_prompt) + self.child_rubric = stringify_children(self.child_rubric) def get_html(self, system): """ @@ -65,29 +55,26 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): @return: Rendered HTML """ #set context variables and render template - if self.state != self.INITIAL: + if self.child_state != self.INITIAL: latest = self.latest_answer() previous_answer = latest if latest is not None else '' else: previous_answer = '' context = { - 'prompt': self.prompt, + 'prompt': self.child_prompt, 'previous_answer': previous_answer, 'ajax_url': system.ajax_url, 'initial_rubric': self.get_rubric_html(system), - 'initial_hint': "", - 'initial_message': self.get_message_html(), - 'state': self.state, + 'state': self.child_state, 'allow_reset': self._allow_reset(), 'child_type': 'selfassessment', 'accept_file_upload': self.accept_file_upload, } - html = system.render_template('self_assessment_prompt.html', context) + html = system.render_template('{0}/self_assessment_prompt.html'.format(self.TEMPLATE_DIR), context) return html - def handle_ajax(self, dispatch, get, system): """ This is called by courseware.module_render, to handle an AJAX call. @@ -106,9 +93,11 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): } if dispatch not in handlers: - return 'Error' + #This is a dev_facing_error + log.error("Cannot find {0} in handlers in handle_ajax function for open_ended_module.py".format(dispatch)) + #This is a dev_facing_error + return json.dumps({'error': 'Error handling action. Please try again.', 'success': False}) - log.debug(get) before = self.get_progress() d = handlers[dispatch](get, system) after = self.get_progress() @@ -122,61 +111,54 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): """ Return the appropriate version of the rubric, based on the state. """ - if self.state == self.INITIAL: + if self.child_state == self.INITIAL: return '' rubric_renderer = CombinedOpenEndedRubric(system, False) - success, rubric_html = rubric_renderer.render_rubric(self.rubric) + rubric_dict = rubric_renderer.render_rubric(self.child_rubric) + success = rubric_dict['success'] + rubric_html = rubric_dict['html'] # we'll render it context = {'rubric': rubric_html, 'max_score': self._max_score, } - if self.state == self.ASSESSING: + if self.child_state == self.ASSESSING: context['read_only'] = False - elif self.state in (self.POST_ASSESSMENT, self.DONE): + elif self.child_state in (self.POST_ASSESSMENT, self.DONE): context['read_only'] = True else: - raise ValueError("Illegal state '%r'" % self.state) + #This is a dev_facing_error + raise ValueError("Self assessment module is in an illegal state '{0}'".format(self.child_state)) - return system.render_template('self_assessment_rubric.html', context) + return system.render_template('{0}/self_assessment_rubric.html'.format(self.TEMPLATE_DIR), context) def get_hint_html(self, system): """ Return the appropriate version of the hint view, based on state. """ - if self.state in (self.INITIAL, self.ASSESSING): + if self.child_state in (self.INITIAL, self.ASSESSING): return '' - if self.state == self.DONE: + if self.child_state == self.DONE: # display the previous hint latest = self.latest_post_assessment(system) hint = latest if latest is not None else '' else: hint = '' - context = {'hint_prompt': self.hint_prompt, - 'hint': hint} + context = {'hint': hint} - if self.state == self.POST_ASSESSMENT: + if self.child_state == self.POST_ASSESSMENT: context['read_only'] = False - elif self.state == self.DONE: + elif self.child_state == self.DONE: context['read_only'] = True else: - raise ValueError("Illegal state '%r'" % self.state) - - return system.render_template('self_assessment_hint.html', context) - - def get_message_html(self): - """ - Return the appropriate version of the message view, based on state. - """ - if self.state != self.DONE: - return "" - - return """

        {0}
        """.format(self.submit_message) + #This is a dev_facing_error + raise ValueError("Self Assessment module is in an illegal state '{0}'".format(self.child_state)) + return system.render_template('{0}/self_assessment_hint.html'.format(self.TEMPLATE_DIR), context) def save_answer(self, get, system): """ @@ -195,17 +177,23 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): if closed: return msg - if self.state != self.INITIAL: + if self.child_state != self.INITIAL: return self.out_of_sync_error(get) error_message = "" # add new history element with answer and empty score and hint. success, get = self.append_image_to_student_answer(get) if success: - get['student_answer'] = SelfAssessmentModule.sanitize_html(get['student_answer']) - self.new_history_entry(get['student_answer']) - self.change_state(self.ASSESSING) + success, allowed_to_submit, error_message = self.check_if_student_can_submit() + if allowed_to_submit: + get['student_answer'] = SelfAssessmentModule.sanitize_html(get['student_answer']) + self.new_history_entry(get['student_answer']) + self.change_state(self.ASSESSING) + else: + #Error message already defined + success = False else: + #This is a student_facing_error error_message = "There was a problem saving the image in your submission. Please try a different image, or try pasting a link to an image into the answer box." return { @@ -230,27 +218,35 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): 'message_html' only if success is true """ - if self.state != self.ASSESSING: + if self.child_state != self.ASSESSING: return self.out_of_sync_error(get) try: score = int(get['assessment']) + score_list = get.getlist('score_list[]') + for i in xrange(0, len(score_list)): + score_list[i] = int(score_list[i]) except ValueError: - return {'success': False, 'error': "Non-integer score value"} + #This is a dev_facing_error + log.error("Non-integer score value passed to save_assessment ,or no score list present.") + #This is a student_facing_error + return {'success': False, 'error': "Error saving your score. Please notify course staff."} + #Record score as assessment and rubric scores as post assessment self.record_latest_score(score) + self.record_latest_post_assessment(json.dumps(score_list)) d = {'success': True, } self.change_state(self.DONE) - d['message_html'] = self.get_message_html() d['allow_reset'] = self._allow_reset() - d['state'] = self.state + d['state'] = self.child_state return d def save_hint(self, get, system): ''' + Not used currently, as hints have been removed from the system. Save the hint. Returns a dict { 'success': bool, 'message_html': message_html, @@ -259,7 +255,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): with the error key only present if success is False and message_html only if True. ''' - if self.state != self.POST_ASSESSMENT: + if self.child_state != self.POST_ASSESSMENT: # Note: because we only ask for hints on wrong answers, may not have # the same number of hints and answers. return self.out_of_sync_error(get) @@ -268,11 +264,21 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): self.change_state(self.DONE) return {'success': True, - 'message_html': self.get_message_html(), + 'message_html': '', 'allow_reset': self._allow_reset()} + def latest_post_assessment(self, system): + latest_post_assessment = super(SelfAssessmentModule, self).latest_post_assessment(system) + try: + rubric_scores = json.loads(latest_post_assessment) + except: + #This is a dev_facing_error + log.error("Cannot parse rubric scores in self assessment module from {0}".format(latest_post_assessment)) + rubric_scores = [] + return [rubric_scores] -class SelfAssessmentDescriptor(XmlDescriptor, EditingDescriptor): + +class SelfAssessmentDescriptor(): """ Module for adding self assessment questions to courses """ @@ -284,9 +290,8 @@ class SelfAssessmentDescriptor(XmlDescriptor, EditingDescriptor): has_score = True template_dir_name = "selfassessment" - js = {'coffee': [resource_string(__name__, 'js/src/html/edit.coffee')]} - js_module_name = "HTMLEditingDescriptor" - css = {'scss': [resource_string(__name__, 'css/editor/edit.scss'), resource_string(__name__, 'css/html/edit.scss')]} + def __init__(self, system): + self.system =system @classmethod def definition_from_xml(cls, xml_object, system): @@ -299,29 +304,30 @@ class SelfAssessmentDescriptor(XmlDescriptor, EditingDescriptor): 'hintprompt': 'some-html' } """ - expected_children = ['submitmessage', 'hintprompt'] + expected_children = [] for child in expected_children: if len(xml_object.xpath(child)) != 1: - raise ValueError("Self assessment definition must include exactly one '{0}' tag".format(child)) + #This is a staff_facing_error + raise ValueError( + "Self assessment definition must include exactly one '{0}' tag. Contact the learning sciences group for assistance.".format( + child)) def parse(k): """Assumes that xml_object has child k""" return stringify_children(xml_object.xpath(k)[0]) - return {'submitmessage': parse('submitmessage'), - 'hintprompt': parse('hintprompt'), - } + return {} def definition_to_xml(self, resource_fs): '''Return an xml element representing this definition.''' elt = etree.Element('selfassessment') def add_child(k): - child_str = '<{tag}>{body}'.format(tag=k, body=self.definition[k]) + child_str = '<{tag}>{body}'.format(tag=k, body=getattr(self, k)) child_node = etree.fromstring(child_str) elt.append(child_node) - for child in ['submitmessage', 'hintprompt']: + for child in []: add_child(child) return elt diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/xblock_field_types.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/xblock_field_types.py new file mode 100644 index 0000000000..2dcb7a4cda --- /dev/null +++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/xblock_field_types.py @@ -0,0 +1,14 @@ +from xblock.core import Integer, Float + + +class StringyFloat(Float): + """ + A model type that converts from string to floats when reading from json + """ + + def from_json(self, value): + try: + return float(value) + except: + return None + diff --git a/common/lib/xmodule/xmodule/peer_grading_module.py b/common/lib/xmodule/xmodule/peer_grading_module.py index 20f71f3b3c..564356fcc3 100644 --- a/common/lib/xmodule/xmodule/peer_grading_module.py +++ b/common/lib/xmodule/xmodule/peer_grading_module.py @@ -1,39 +1,21 @@ -""" -This module provides an interface on the grading-service backend -for peer grading - -Use peer_grading_service() to get the version specified -in settings.PEER_GRADING_INTERFACE - -""" import json import logging -import requests -import sys -from django.conf import settings - -from combined_open_ended_rubric import CombinedOpenEndedRubric from lxml import etree -import copy -import itertools -import json -import logging -from lxml.html import rewrite_links -import os - +from datetime import datetime from pkg_resources import resource_string -from .capa_module import only_one, ComplexEncoder -from .editing_module import EditingDescriptor -from .html_checker import check_html -from progress import Progress +from .capa_module import ComplexEncoder from .stringify import stringify_children from .x_module import XModule -from .xml_module import XmlDescriptor +from xmodule.raw_module import RawDescriptor from xmodule.modulestore import Location +from xmodule.modulestore.django import modulestore +from .timeinfo import TimeInfo +from xblock.core import Object, Integer, Boolean, String, Scope +from xmodule.open_ended_grading_classes.xblock_field_types import StringyFloat -from peer_grading_service import peer_grading_service, GradingServiceError +from xmodule.open_ended_grading_classes.peer_grading_service import PeerGradingService, GradingServiceError, MockPeerGradingService log = logging.getLogger(__name__) @@ -43,58 +25,88 @@ TRUE_DICT = [True, "True", "true", "TRUE"] MAX_SCORE = 1 IS_GRADED = True +EXTERNAL_GRADER_NO_CONTACT_ERROR = "Failed to contact external graders. Please notify course staff." -class PeerGradingModule(XModule): + +class PeerGradingFields(object): + use_for_single_location = Boolean(help="Whether to use this for a single location or as a panel.", + default=USE_FOR_SINGLE_LOCATION, scope=Scope.settings) + link_to_location = String(help="The location this problem is linked to.", default=LINK_TO_LOCATION, + scope=Scope.settings) + is_graded = Boolean(help="Whether or not this module is scored.", default=IS_GRADED, scope=Scope.settings) + display_due_date_string = String(help="Due date that should be displayed.", default=None, scope=Scope.settings) + grace_period_string = String(help="Amount of grace to give on the due date.", default=None, scope=Scope.settings) + max_grade = Integer(help="The maximum grade that a student can receieve for this problem.", default=MAX_SCORE, + scope=Scope.settings) + student_data_for_location = Object(help="Student data for a given peer grading problem.", default=json.dumps({}), + scope=Scope.student_state) + weight = StringyFloat(help="How much to weight this problem by", scope=Scope.settings) + + +class PeerGradingModule(PeerGradingFields, XModule): _VERSION = 1 js = {'coffee': [resource_string(__name__, 'js/src/peergrading/peer_grading.coffee'), resource_string(__name__, 'js/src/peergrading/peer_grading_problem.coffee'), resource_string(__name__, 'js/src/collapsible.coffee'), resource_string(__name__, 'js/src/javascript_loader.coffee'), - ]} + ]} js_module_name = "PeerGrading" css = {'scss': [resource_string(__name__, 'css/combinedopenended/display.scss')]} - def __init__(self, system, location, definition, descriptor, - instance_state=None, shared_state=None, **kwargs): - XModule.__init__(self, system, location, definition, descriptor, - instance_state, shared_state, **kwargs) - - # Load instance state - if instance_state is not None: - instance_state = json.loads(instance_state) - else: - instance_state = {} + def __init__(self, system, location, descriptor, model_data): + XModule.__init__(self, system, location, descriptor, model_data) #We need to set the location here so the child modules can use it system.set('location', location) self.system = system - self.peer_gs = peer_grading_service(self.system) + if (self.system.open_ended_grading_interface): + self.peer_gs = PeerGradingService(self.system.open_ended_grading_interface, self.system) + else: + self.peer_gs = MockPeerGradingService() - self.use_for_single_location = self.metadata.get('use_for_single_location', USE_FOR_SINGLE_LOCATION) - if isinstance(self.use_for_single_location, basestring): - self.use_for_single_location = (self.use_for_single_location in TRUE_DICT) + if self.use_for_single_location in TRUE_DICT: + try: + self.linked_problem = modulestore().get_instance(self.system.course_id, self.link_to_location) + except: + log.error("Linked location {0} for peer grading module {1} does not exist".format( + self.link_to_location, self.location)) + raise + due_date = self.linked_problem._model_data.get('peer_grading_due', None) + if due_date: + self._model_data['due'] = due_date - self.is_graded = self.metadata.get('is_graded', IS_GRADED) - if isinstance(self.is_graded, basestring): - self.is_graded = (self.is_graded in TRUE_DICT) + try: + self.timeinfo = TimeInfo(self.display_due_date_string, self.grace_period_string) + except: + log.error("Error parsing due date information in location {0}".format(location)) + raise - self.link_to_location = self.metadata.get('link_to_location', USE_FOR_SINGLE_LOCATION) - if self.use_for_single_location == True: - #This will raise an exception if the location is invalid - link_to_location_object = Location(self.link_to_location) + self.display_due_date = self.timeinfo.display_due_date + + try: + self.student_data_for_location = json.loads(self.student_data_for_location) + except: + pass self.ajax_url = self.system.ajax_url if not self.ajax_url.endswith("/"): self.ajax_url = self.ajax_url + "/" - self.student_data_for_location = instance_state.get('student_data_for_location', {}) - self.max_grade = instance_state.get('max_grade', MAX_SCORE) if not isinstance(self.max_grade, (int, long)): #This could result in an exception, but not wrapping in a try catch block so it moves up the stack self.max_grade = int(self.max_grade) + def closed(self): + return self._closed(self.timeinfo) + + def _closed(self, timeinfo): + if timeinfo.close_date is not None and datetime.utcnow() > timeinfo.close_date: + return True + return False + + def _err_response(self, msg): """ Return a HttpResponse with a json dump with success=False, and the given error message. @@ -114,7 +126,9 @@ class PeerGradingModule(XModule): Needs to be implemented by inheritors. Renders the HTML that students see. @return: """ - if not self.use_for_single_location: + if self.closed(): + return self.peer_grading_closed() + if self.use_for_single_location not in TRUE_DICT: return self.peer_grading() else: return self.peer_grading_problem({'location': self.link_to_location})['html'] @@ -131,10 +145,13 @@ class PeerGradingModule(XModule): 'save_grade': self.save_grade, 'save_calibration_essay': self.save_calibration_essay, 'problem': self.peer_grading_problem, - } + } if dispatch not in handlers: - return 'Error' + #This is a dev_facing_error + log.error("Cannot find {0} in handlers in handle_ajax function for open_ended_module.py".format(dispatch)) + #This is a dev_facing_error + return json.dumps({'error': 'Error handling action. Please try again.', 'success': False}) d = handlers[dispatch](get) @@ -142,7 +159,7 @@ class PeerGradingModule(XModule): def query_data_for_location(self): student_id = self.system.anonymous_student_id - location = self.system.location + location = self.link_to_location success = False response = {} @@ -152,6 +169,7 @@ class PeerGradingModule(XModule): count_required = response['count_required'] success = True except GradingServiceError: + #This is a dev_facing_error log.exception("Error getting location data from controller for location {0}, student {1}" .format(location, student_id)) @@ -161,7 +179,7 @@ class PeerGradingModule(XModule): pass def get_score(self): - if not self.use_for_single_location or not self.is_graded: + if self.use_for_single_location not in TRUE_DICT or self.is_graded not in TRUE_DICT: return None try: @@ -170,19 +188,21 @@ class PeerGradingModule(XModule): except: success, response = self.query_data_for_location() if not success: - log.exception("No instance data found and could not get data from controller for loc {0} student {1}".format( - self.system.location, self.system.anonymous_student_id - )) + log.exception( + "No instance data found and could not get data from controller for loc {0} student {1}".format( + self.system.location.url(), self.system.anonymous_student_id + )) return None count_graded = response['count_graded'] count_required = response['count_required'] if count_required > 0 and count_graded >= count_required: + #Ensures that once a student receives a final score for peer grading, that it does not change. self.student_data_for_location = response score_dict = { 'score': int(count_graded >= count_required), 'total': self.max_grade, - } + } return score_dict @@ -193,7 +213,7 @@ class PeerGradingModule(XModule): randomization, and 5/7 on another ''' max_grade = None - if self.use_for_single_location and self.is_graded: + if self.use_for_single_location in TRUE_DICT and self.is_graded in TRUE_DICT: max_grade = self.max_grade return max_grade @@ -226,10 +246,12 @@ class PeerGradingModule(XModule): response = self.peer_gs.get_next_submission(location, grader_id) return response except GradingServiceError: + #This is a dev_facing_error log.exception("Error getting next submission. server url: {0} location: {1}, grader_id: {2}" .format(self.peer_gs.url, location, grader_id)) + #This is a student_facing_error return {'success': False, - 'error': 'Could not connect to grading service'} + 'error': EXTERNAL_GRADER_NO_CONTACT_ERROR} def save_grade(self, get): """ @@ -247,7 +269,8 @@ class PeerGradingModule(XModule): error: if there was an error in the submission, this is the error message """ - required = set(['location', 'submission_id', 'submission_key', 'score', 'feedback', 'rubric_scores[]', 'submission_flagged']) + required = set(['location', 'submission_id', 'submission_key', 'score', 'feedback', 'rubric_scores[]', + 'submission_flagged']) success, message = self._check_required(get, required) if not success: return self._err_response(message) @@ -263,17 +286,19 @@ class PeerGradingModule(XModule): try: response = self.peer_gs.save_grade(location, grader_id, submission_id, - score, feedback, submission_key, rubric_scores, submission_flagged) + score, feedback, submission_key, rubric_scores, submission_flagged) return response except GradingServiceError: - log.exception("""Error saving grade. server url: {0}, location: {1}, submission_id:{2}, + #This is a dev_facing_error + log.exception("""Error saving grade to open ended grading service. server url: {0}, location: {1}, submission_id:{2}, submission_key: {3}, score: {4}""" .format(self.peer_gs.url, - location, submission_id, submission_key, score) + location, submission_id, submission_key, score) ) + #This is a student_facing_error return { 'success': False, - 'error': 'Could not connect to grading service' + 'error': EXTERNAL_GRADER_NO_CONTACT_ERROR } def is_student_calibrated(self, get): @@ -306,11 +331,13 @@ class PeerGradingModule(XModule): response = self.peer_gs.is_student_calibrated(location, grader_id) return response except GradingServiceError: - log.exception("Error from grading service. server url: {0}, grader_id: {0}, location: {1}" + #This is a dev_facing_error + log.exception("Error from open ended grading service. server url: {0}, grader_id: {0}, location: {1}" .format(self.peer_gs.url, grader_id, location)) + #This is a student_facing_error return { 'success': False, - 'error': 'Could not connect to grading service' + 'error': EXTERNAL_GRADER_NO_CONTACT_ERROR } def show_calibration_essay(self, get): @@ -349,16 +376,20 @@ class PeerGradingModule(XModule): response = self.peer_gs.show_calibration_essay(location, grader_id) return response except GradingServiceError: - log.exception("Error from grading service. server url: {0}, location: {0}" + #This is a dev_facing_error + log.exception("Error from open ended grading service. server url: {0}, location: {0}" .format(self.peer_gs.url, location)) + #This is a student_facing_error return {'success': False, - 'error': 'Could not connect to grading service'} + 'error': EXTERNAL_GRADER_NO_CONTACT_ERROR} # if we can't parse the rubric into HTML, except etree.XMLSyntaxError: + #This is a dev_facing_error log.exception("Cannot parse rubric string. Raw string: {0}" .format(rubric)) + #This is a student_facing_error return {'success': False, - 'error': 'Error displaying submission'} + 'error': 'Error displaying submission. Please notify course staff.'} def save_calibration_essay(self, get): @@ -394,11 +425,25 @@ class PeerGradingModule(XModule): try: response = self.peer_gs.save_calibration_essay(location, grader_id, calibration_essay_id, - submission_key, score, feedback, rubric_scores) + submission_key, score, feedback, rubric_scores) return response except GradingServiceError: - log.exception("Error saving calibration grade, location: {0}, submission_id: {1}, submission_key: {2}, grader_id: {3}".format(location, submission_id, submission_key, grader_id)) - return self._err_response('Could not connect to grading service') + #This is a dev_facing_error + log.exception( + "Error saving calibration grade, location: {0}, submission_id: {1}, submission_key: {2}, grader_id: {3}".format( + location, submission_id, submission_key, grader_id)) + #This is a student_facing_error + return self._err_response('There was an error saving your score. Please notify course staff.') + + def peer_grading_closed(self): + ''' + Show the Peer grading closed template + ''' + html = self.system.render_template('peer_grading/peer_grading_closed.html', { + 'use_for_single_location': self.use_for_single_location + }) + return html + def peer_grading(self, get=None): ''' @@ -419,12 +464,52 @@ class PeerGradingModule(XModule): problem_list = problem_list_dict['problem_list'] except GradingServiceError: - error_text = "Error occured while contacting the grading service" + #This is a student_facing_error + error_text = EXTERNAL_GRADER_NO_CONTACT_ERROR + log.error(error_text) success = False # catch error if if the json loads fails except ValueError: - error_text = "Could not get problem list" + #This is a student_facing_error + error_text = "Could not get list of problems to peer grade. Please notify course staff." + log.error(error_text) success = False + except: + log.exception("Could not contact peer grading service.") + success = False + + + def _find_corresponding_module_for_location(location): + ''' + find the peer grading module that links to the given location + ''' + try: + return modulestore().get_instance(self.system.course_id, location) + except: + # the linked problem doesn't exist + log.error("Problem {0} does not exist in this course".format(location)) + raise + + + for problem in problem_list: + problem_location = problem['location'] + descriptor = _find_corresponding_module_for_location(problem_location) + if descriptor: + problem['due'] = descriptor._model_data.get('peer_grading_due', None) + grace_period_string = descriptor._model_data.get('graceperiod', None) + try: + problem_timeinfo = TimeInfo(problem['due'], grace_period_string) + except: + log.error("Malformed due date or grace period string for location {0}".format(problem_location)) + raise + if self._closed(problem_timeinfo): + problem['closed'] = True + else: + problem['closed'] = False + else: + # if we can't find the due date, assume that it doesn't have one + problem['due'] = None + problem['closed'] = False ajax_url = self.ajax_url html = self.system.render_template('peer_grading/peer_grading.html', { @@ -436,7 +521,7 @@ class PeerGradingModule(XModule): # Checked above 'staff_access': False, 'use_single_location': self.use_for_single_location, - }) + }) return html @@ -444,9 +529,12 @@ class PeerGradingModule(XModule): ''' Show individual problem interface ''' - if get == None or get.get('location') == None: - if not self.use_for_single_location: + if get is None or get.get('location') is None: + if self.use_for_single_location not in TRUE_DICT: #This is an error case, because it must be set to use a single location to be called without get parameters + #This is a dev_facing_error + log.error( + "Peer grading problem in peer_grading_module called with no get parameters, but use_for_single_location is False.") return {'html': "", 'success': False} problem_location = self.link_to_location @@ -462,7 +550,7 @@ class PeerGradingModule(XModule): # Checked above 'staff_access': False, 'use_single_location': self.use_for_single_location, - }) + }) return {'html': html, 'success': True} @@ -475,65 +563,19 @@ class PeerGradingModule(XModule): state = { 'student_data_for_location': self.student_data_for_location, - } + } return json.dumps(state) -class PeerGradingDescriptor(XmlDescriptor, EditingDescriptor): +class PeerGradingDescriptor(PeerGradingFields, RawDescriptor): """ - Module for adding combined open ended questions + Module for adding peer grading questions """ - mako_template = "widgets/html-edit.html" + mako_template = "widgets/raw-edit.html" module_class = PeerGradingModule filename_extension = "xml" stores_state = True has_score = True template_dir_name = "peer_grading" - - js = {'coffee': [resource_string(__name__, 'js/src/html/edit.coffee')]} - js_module_name = "HTMLEditingDescriptor" - - @classmethod - def definition_from_xml(cls, xml_object, system): - """ - Pull out the individual tasks, the rubric, and the prompt, and parse - - Returns: - { - 'rubric': 'some-html', - 'prompt': 'some-html', - 'task_xml': dictionary of xml strings, - } - """ - log.debug("In definition") - expected_children = [] - for child in expected_children: - if len(xml_object.xpath(child)) == 0: - raise ValueError("Peer grading definition must include at least one '{0}' tag".format(child)) - - def parse_task(k): - """Assumes that xml_object has child k""" - return [stringify_children(xml_object.xpath(k)[i]) for i in xrange(0, len(xml_object.xpath(k)))] - - def parse(k): - """Assumes that xml_object has child k""" - return xml_object.xpath(k)[0] - - return {} - - - def definition_to_xml(self, resource_fs): - '''Return an xml element representing this definition.''' - elt = etree.Element('peergrading') - - def add_child(k): - child_str = '<{tag}>{body}'.format(tag=k, body=self.definition[k]) - child_node = etree.fromstring(child_str) - elt.append(child_node) - - for child in ['task']: - add_child(child) - - return elt diff --git a/common/lib/xmodule/xmodule/plugin.py b/common/lib/xmodule/xmodule/plugin.py new file mode 100644 index 0000000000..5cf9c647aa --- /dev/null +++ b/common/lib/xmodule/xmodule/plugin.py @@ -0,0 +1,64 @@ +import pkg_resources +import logging + +log = logging.getLogger(__name__) + +class PluginNotFoundError(Exception): + pass + + +class Plugin(object): + """ + Base class for a system that uses entry_points to load plugins. + + Implementing classes are expected to have the following attributes: + + entry_point: The name of the entry point to load plugins from + """ + + _plugin_cache = None + + @classmethod + def load_class(cls, identifier, default=None): + """ + Loads a single class instance specified by identifier. If identifier + specifies more than a single class, then logs a warning and returns the + first class identified. + + If default is not None, will return default if no entry_point matching + identifier is found. Otherwise, will raise a ModuleMissingError + """ + if cls._plugin_cache is None: + cls._plugin_cache = {} + + if identifier not in cls._plugin_cache: + identifier = identifier.lower() + classes = list(pkg_resources.iter_entry_points( + cls.entry_point, name=identifier)) + + if len(classes) > 1: + log.warning("Found multiple classes for {entry_point} with " + "identifier {id}: {classes}. " + "Returning the first one.".format( + entry_point=cls.entry_point, + id=identifier, + classes=", ".join( + class_.module_name for class_ in classes))) + + if len(classes) == 0: + if default is not None: + return default + raise PluginNotFoundError(identifier) + + cls._plugin_cache[identifier] = classes[0].load() + return cls._plugin_cache[identifier] + + @classmethod + def load_classes(cls): + """ + Returns a list of containing the identifiers and their corresponding classes for all + of the available instances of this plugin + """ + return [(class_.name, class_.load()) + for class_ + in pkg_resources.iter_entry_points(cls.entry_point)] diff --git a/common/lib/xmodule/xmodule/poll_module.py b/common/lib/xmodule/xmodule/poll_module.py new file mode 100644 index 0000000000..0fb3bfb496 --- /dev/null +++ b/common/lib/xmodule/xmodule/poll_module.py @@ -0,0 +1,205 @@ +"""Poll module is ungraded xmodule used by students to +to do set of polls. + +On the client side we show: +If student does not yet anwered - Question with set of choices. +If student have answered - Question with statistics for each answers. + +Student can't change his answer. +""" + +import cgi +import json +import logging +from copy import deepcopy +from collections import OrderedDict + +from lxml import etree +from pkg_resources import resource_string + +from xmodule.x_module import XModule +from xmodule.stringify import stringify_children +from xmodule.mako_module import MakoModuleDescriptor +from xmodule.xml_module import XmlDescriptor +from xblock.core import Scope, String, Object, Boolean, List + +log = logging.getLogger(__name__) + + +class PollFields(object): + # Name of poll to use in links to this poll + display_name = String(help="Display name for this module", scope=Scope.settings) + + voted = Boolean(help="Whether this student has voted on the poll", scope=Scope.student_state, default=False) + poll_answer = String(help="Student answer", scope=Scope.student_state, default='') + poll_answers = Object(help="All possible answers for the poll fro other students", scope=Scope.content) + + answers = List(help="Poll answers from xml", scope=Scope.content, default=[]) + question = String(help="Poll question", scope=Scope.content, default='') + + +class PollModule(PollFields, XModule): + """Poll Module""" + js = { + 'coffee': [resource_string(__name__, 'js/src/javascript_loader.coffee')], + 'js': [resource_string(__name__, 'js/src/poll/logme.js'), + resource_string(__name__, 'js/src/poll/poll.js'), + resource_string(__name__, 'js/src/poll/poll_main.js')] + } + css = {'scss': [resource_string(__name__, 'css/poll/display.scss')]} + js_module_name = "Poll" + + def handle_ajax(self, dispatch, get): + """Ajax handler. + + Args: + dispatch: string request slug + get: dict request get parameters + + Returns: + json string + """ + if dispatch in self.poll_answers and not self.voted: + # FIXME: fix this, when xblock will support mutable types. + # Now we use this hack. + temp_poll_answers = self.poll_answers + temp_poll_answers[dispatch] += 1 + self.poll_answers = temp_poll_answers + + self.voted = True + self.poll_answer = dispatch + return json.dumps({'poll_answers': self.poll_answers, + 'total': sum(self.poll_answers.values()), + 'callback': {'objectName': 'Conditional'} + }) + elif dispatch == 'get_state': + return json.dumps({'poll_answer': self.poll_answer, + 'poll_answers': self.poll_answers, + 'total': sum(self.poll_answers.values()) + }) + elif dispatch == 'reset_poll' and self.voted and \ + self.descriptor.xml_attributes.get('reset', 'True').lower() != 'false': + self.voted = False + + # FIXME: fix this, when xblock will support mutable types. + # Now we use this hack. + temp_poll_answers = self.poll_answers + temp_poll_answers[self.poll_answer] -= 1 + self.poll_answers = temp_poll_answers + + self.poll_answer = '' + return json.dumps({'status': 'success'}) + else: # return error message + return json.dumps({'error': 'Unknown Command!'}) + + def get_html(self): + """Renders parameters to template.""" + params = { + 'element_id': self.location.html_id(), + 'element_class': self.location.category, + 'ajax_url': self.system.ajax_url, + 'configuration_json': self.dump_poll(), + } + self.content = self.system.render_template('poll.html', params) + return self.content + + def dump_poll(self): + """Dump poll information. + + Returns: + string - Serialize json. + """ + # FIXME: hack for resolving caching `default={}` during definition + # poll_answers field + if self.poll_answers is None: + self.poll_answers = {} + + answers_to_json = OrderedDict() + + # FIXME: fix this, when xblock support mutable types. + # Now we use this hack. + temp_poll_answers = self.poll_answers + + # Fill self.poll_answers, prepare data for template context. + for answer in self.answers: + # Set default count for answer = 0. + if answer['id'] not in temp_poll_answers: + temp_poll_answers[answer['id']] = 0 + answers_to_json[answer['id']] = cgi.escape(answer['text']) + self.poll_answers = temp_poll_answers + + return json.dumps({'answers': answers_to_json, + 'question': cgi.escape(self.question), + # to show answered poll after reload: + 'poll_answer': self.poll_answer, + 'poll_answers': self.poll_answers if self.voted else {}, + 'total': sum(self.poll_answers.values()) if self.voted else 0, + 'reset': str(self.descriptor.xml_attributes.get('reset', 'true')).lower()}) + + +class PollDescriptor(PollFields, MakoModuleDescriptor, XmlDescriptor): + _tag_name = 'poll_question' + _child_tag_name = 'answer' + + module_class = PollModule + template_dir_name = 'poll' + stores_state = True + + @classmethod + def definition_from_xml(cls, xml_object, system): + """Pull out the data into dictionary. + + Args: + xml_object: xml from file. + system: `system` object. + + Returns: + (definition, children) - tuple + definition - dict: + { + 'answers': , + 'question': + } + """ + # Check for presense of required tags in xml. + if len(xml_object.xpath(cls._child_tag_name)) == 0: + raise ValueError("Poll_question definition must include \ + at least one 'answer' tag") + + xml_object_copy = deepcopy(xml_object) + answers = [] + for element_answer in xml_object_copy.findall(cls._child_tag_name): + answer_id = element_answer.get('id', None) + if answer_id: + answers.append({ + 'id': answer_id, + 'text': stringify_children(element_answer) + }) + xml_object_copy.remove(element_answer) + + definition = { + 'answers': answers, + 'question': stringify_children(xml_object_copy) + } + children = [] + + return (definition, children) + + def definition_to_xml(self, resource_fs): + """Return an xml element representing to this definition.""" + poll_str = '<{tag_name}>{text}'.format( + tag_name=self._tag_name, text=self.question) + xml_object = etree.fromstring(poll_str) + xml_object.set('display_name', self.display_name) + + def add_child(xml_obj, answer): + child_str = '<{tag_name} id="{id}">{text}'.format( + tag_name=self._child_tag_name, id=answer['id'], + text=answer['text']) + child_node = etree.fromstring(child_str) + xml_object.append(child_node) + + for answer in self.answers: + add_child(xml_object, answer) + + return xml_object diff --git a/common/lib/xmodule/xmodule/randomize_module.py b/common/lib/xmodule/xmodule/randomize_module.py index b336789193..6620ab3cf7 100644 --- a/common/lib/xmodule/xmodule/randomize_module.py +++ b/common/lib/xmodule/xmodule/randomize_module.py @@ -1,19 +1,19 @@ -import json import logging import random -from xmodule.mako_module import MakoModuleDescriptor from xmodule.x_module import XModule -from xmodule.xml_module import XmlDescriptor -from xmodule.modulestore import Location from xmodule.seq_module import SequenceDescriptor -from pkg_resources import resource_string +from xblock.core import Scope, Integer log = logging.getLogger('mitx.' + __name__) -class RandomizeModule(XModule): +class RandomizeFields(object): + choice = Integer(help="Which random child was chosen", scope=Scope.student_state) + + +class RandomizeModule(RandomizeFields, XModule): """ Chooses a random child module. Chooses the same one every time for each student. @@ -35,30 +35,23 @@ class RandomizeModule(XModule): grading interaction is a tangle between super and subclasses of descriptors and modules. """ - - def __init__(self, system, location, definition, descriptor, - instance_state=None, shared_state=None, **kwargs): - XModule.__init__(self, system, location, definition, descriptor, - instance_state, shared_state, **kwargs) + def __init__(self, *args, **kwargs): + XModule.__init__(self, *args, **kwargs) # NOTE: calling self.get_children() creates a circular reference-- # it calls get_child_descriptors() internally, but that doesn't work until # we've picked a choice num_choices = len(self.descriptor.get_children()) - self.choice = None - if instance_state is not None: - state = json.loads(instance_state) - self.choice = state.get('choice', None) - if self.choice > num_choices: - # Oops. Children changed. Reset. - self.choice = None + if self.choice > num_choices: + # Oops. Children changed. Reset. + self.choice = None if self.choice is None: # choose one based on the system seed, or randomly if that's not available if num_choices > 0: - if system.seed is not None: - self.choice = system.seed % num_choices + if self.system.seed is not None: + self.choice = self.system.seed % num_choices else: self.choice = random.randrange(0, num_choices) @@ -72,11 +65,6 @@ class RandomizeModule(XModule): self.child_descriptor = None self.child = None - - def get_instance_state(self): - return json.dumps({'choice': self.choice}) - - def get_child_descriptors(self): """ For grading--return just the chosen child. @@ -98,7 +86,7 @@ class RandomizeModule(XModule): return self.child.get_icon_class() if self.child else 'other' -class RandomizeDescriptor(SequenceDescriptor): +class RandomizeDescriptor(RandomizeFields, SequenceDescriptor): # the editing interface can be the same as for sequences -- just a container module_class = RandomizeModule @@ -107,6 +95,7 @@ class RandomizeDescriptor(SequenceDescriptor): stores_state = True def definition_to_xml(self, resource_fs): + xml_object = etree.Element('randomize') for child in self.get_children(): xml_object.append( diff --git a/common/lib/xmodule/xmodule/raw_module.py b/common/lib/xmodule/xmodule/raw_module.py index 4a2bfbceaf..2c6e157018 100644 --- a/common/lib/xmodule/xmodule/raw_module.py +++ b/common/lib/xmodule/xmodule/raw_module.py @@ -3,6 +3,7 @@ from xmodule.editing_module import XMLEditingDescriptor from xmodule.xml_module import XmlDescriptor import logging import sys +from xblock.core import String, Scope log = logging.getLogger(__name__) @@ -12,17 +13,19 @@ class RawDescriptor(XmlDescriptor, XMLEditingDescriptor): Module that provides a raw editing view of its data and children. It requires that the definition xml is valid. """ + data = String(help="XML data for the module", scope=Scope.content) + @classmethod def definition_from_xml(cls, xml_object, system): - return {'data': etree.tostring(xml_object, pretty_print=True, encoding='unicode')} + return {'data': etree.tostring(xml_object, pretty_print=True, encoding='unicode')}, [] def definition_to_xml(self, resource_fs): try: - return etree.fromstring(self.definition['data']) + return etree.fromstring(self.data) except etree.XMLSyntaxError as err: # Can't recover here, so just add some info and # re-raise - lines = self.definition['data'].split('\n') + lines = self.data.split('\n') line, offset = err.position msg = ("Unable to create xml for problem {loc}. " "Context: '{context}'".format( diff --git a/common/lib/xmodule/xmodule/schematic_module.py b/common/lib/xmodule/xmodule/schematic_module.py index 21dd33a897..d15d629c24 100644 --- a/common/lib/xmodule/xmodule/schematic_module.py +++ b/common/lib/xmodule/xmodule/schematic_module.py @@ -1,6 +1,6 @@ import json -from x_module import XModule, XModuleDescriptor +from .x_module import XModule, XModuleDescriptor class ModuleDescriptor(XModuleDescriptor): diff --git a/common/lib/xmodule/xmodule/seq_module.py b/common/lib/xmodule/xmodule/seq_module.py index 36011744f5..f8e982f1a0 100644 --- a/common/lib/xmodule/xmodule/seq_module.py +++ b/common/lib/xmodule/xmodule/seq_module.py @@ -8,6 +8,7 @@ from xmodule.xml_module import XmlDescriptor from xmodule.x_module import XModule from xmodule.progress import Progress from xmodule.exceptions import NotFoundError +from xblock.core import Integer, Scope from pkg_resources import resource_string log = logging.getLogger(__name__) @@ -17,7 +18,15 @@ log = logging.getLogger(__name__) class_priority = ['video', 'problem'] -class SequenceModule(XModule): +class SequenceFields(object): + has_children = True + + # NOTE: Position is 1-indexed. This is silly, but there are now student + # positions saved on prod, so it's not easy to fix. + position = Integer(help="Last tab viewed in this sequence", scope=Scope.student_state) + + +class SequenceModule(SequenceFields, XModule): ''' Layout module which lays out content in a temporal sequence ''' js = {'coffee': [resource_string(__name__, @@ -26,22 +35,13 @@ class SequenceModule(XModule): css = {'scss': [resource_string(__name__, 'css/sequence/display.scss')]} js_module_name = "Sequence" - def __init__(self, system, location, definition, descriptor, instance_state=None, - shared_state=None, **kwargs): - XModule.__init__(self, system, location, definition, descriptor, - instance_state, shared_state, **kwargs) - # NOTE: Position is 1-indexed. This is silly, but there are now student - # positions saved on prod, so it's not easy to fix. - self.position = 1 - if instance_state is not None: - state = json.loads(instance_state) - if 'position' in state: - self.position = int(state['position']) + def __init__(self, *args, **kwargs): + XModule.__init__(self, *args, **kwargs) # if position is specified in system, then use that instead - if system.get('position'): - self.position = int(system.get('position')) + if self.system.get('position'): + self.position = int(self.system.get('position')) self.rendered = False @@ -70,6 +70,11 @@ class SequenceModule(XModule): raise NotFoundError('Unexpected dispatch type') def render(self): + # If we're rendering this sequence, but no position is set yet, + # default the position to the first element + if self.position is None: + self.position = 1 + if self.rendered: return ## Returns a set of all types of all sub-children @@ -79,9 +84,9 @@ class SequenceModule(XModule): childinfo = { 'content': child.get_html(), 'title': "\n".join( - grand_child.display_name.strip() + grand_child.display_name for grand_child in child.get_children() - if 'display_name' in grand_child.metadata + if grand_child.display_name is not None ), 'progress_status': Progress.to_js_status_str(progress), 'progress_detail': Progress.to_js_detail_str(progress), @@ -89,7 +94,7 @@ class SequenceModule(XModule): 'id': child.id, } if childinfo['title'] == '': - childinfo['title'] = child.metadata.get('display_name', '') + childinfo['title'] = child.display_name_with_default contents.append(childinfo) params = {'items': contents, @@ -112,11 +117,11 @@ class SequenceModule(XModule): return new_class -class SequenceDescriptor(MakoModuleDescriptor, XmlDescriptor): +class SequenceDescriptor(SequenceFields, MakoModuleDescriptor, XmlDescriptor): mako_template = 'widgets/sequence-edit.html' module_class = SequenceModule - stores_state = True # For remembering where in the sequence the student is + stores_state = True # For remembering where in the sequence the student is js = {'coffee': [resource_string(__name__, 'js/src/sequence/edit.coffee')]} js_module_name = "SequenceDescriptor" @@ -132,7 +137,7 @@ class SequenceDescriptor(MakoModuleDescriptor, XmlDescriptor): if system.error_tracker is not None: system.error_tracker("ERROR: " + str(e)) continue - return {'children': children} + return {}, children def definition_to_xml(self, resource_fs): xml_object = etree.Element('sequential') diff --git a/common/lib/xmodule/xmodule/stringify.py b/common/lib/xmodule/xmodule/stringify.py index 5a640e91b1..35587d3b09 100644 --- a/common/lib/xmodule/xmodule/stringify.py +++ b/common/lib/xmodule/xmodule/stringify.py @@ -1,4 +1,5 @@ -from itertools import chain +# -*- coding: utf-8 -*- + from lxml import etree diff --git a/common/lib/xmodule/xmodule/template_module.py b/common/lib/xmodule/xmodule/template_module.py index 5f376945eb..d79d2a163e 100644 --- a/common/lib/xmodule/xmodule/template_module.py +++ b/common/lib/xmodule/xmodule/template_module.py @@ -28,11 +28,6 @@ class CustomTagModule(XModule): More information given in the text """ - def __init__(self, system, location, definition, descriptor, - instance_state=None, shared_state=None, **kwargs): - XModule.__init__(self, system, location, definition, descriptor, - instance_state, shared_state, **kwargs) - def get_html(self): return self.descriptor.rendered_html @@ -62,19 +57,15 @@ class CustomTagDescriptor(RawDescriptor): # cdodge: look up the template as a module template_loc = self.location._replace(category='custom_tag_template', name=template_name) - template_module = self.system.load_item(template_loc) - template_module_data = template_module.definition['data'] + template_module = modulestore().get_instance(system.course_id, template_loc) + template_module_data = template_module.data template = Template(template_module_data) return template.render(**params) - def __init__(self, system, definition, **kwargs): - '''Render and save the template for this descriptor instance''' - super(CustomTagDescriptor, self).__init__(system, definition, **kwargs) - @property def rendered_html(self): - return self.render_template(self.system, self.definition['data']) + return self.render_template(self.system, self.data) def export_to_file(self): """ diff --git a/common/lib/xmodule/xmodule/templates/annotatable/default.yaml b/common/lib/xmodule/xmodule/templates/annotatable/default.yaml new file mode 100644 index 0000000000..31dd489fb4 --- /dev/null +++ b/common/lib/xmodule/xmodule/templates/annotatable/default.yaml @@ -0,0 +1,20 @@ +--- +metadata: + display_name: 'Annotation' +data: | + + +

        Enter your (optional) instructions for the exercise in HTML format.

        +

        Annotations are specified by an <annotation> tag which may may have the following attributes:

        +
          +
        • title (optional). Title of the annotation. Defaults to Commentary if omitted.
        • +
        • body (required). Text of the annotation.
        • +
        • problem (optional). Numeric index of the problem associated with this annotation. This is a zero-based index, so the first problem on the page would have problem="0".
        • +
        • highlight (optional). Possible values: yellow, red, orange, green, blue, or purple. Defaults to yellow if this attribute is omitted.
        • +
        +
        +

        Add your HTML with annotation spans here.

        +

        Lorem ipsum dolor sit amet, consectetur adipiscing elit. Ut sodales laoreet est, egestas gravida felis egestas nec. Aenean at volutpat erat. Cras commodo viverra nibh in aliquam.

        +

        Nulla facilisi. Pellentesque id vestibulum libero. Suspendisse potenti. Morbi scelerisque nisi vitae felis dictum mattis. Nam sit amet magna elit. Nullam volutpat cursus est, sit amet sagittis odio vulputate et. Curabitur euismod, orci in vulputate imperdiet, augue lorem tempor purus, id aliquet augue turpis a est. Aenean a sagittis libero. Praesent fringilla pretium magna, non condimentum risus elementum nec. Pellentesque faucibus elementum pharetra. Pellentesque vitae metus eros.

        +
        +children: [] diff --git a/common/lib/xmodule/xmodule/templates/combinedopenended/default.yaml b/common/lib/xmodule/xmodule/templates/combinedopenended/default.yaml new file mode 100644 index 0000000000..515d9071b1 --- /dev/null +++ b/common/lib/xmodule/xmodule/templates/combinedopenended/default.yaml @@ -0,0 +1,44 @@ +--- +metadata: + display_name: Open Ended Response + max_attempts: 1 + is_graded: False + version: 1 + display_name: Open Ended Response + skip_spelling_checks: False + accept_file_upload: False + weight: "" +data: | + + + + + Category 1 + + + + + + +

        Why is the sky blue?

        +
        + + + + + + + Enter essay here. + This is the answer. + {"grader_settings" : "peer_grading.conf", "problem_id" : "700x/Demo"} + + + +
        + + +children: [] diff --git a/common/lib/xmodule/xmodule/templates/course/empty.yaml b/common/lib/xmodule/xmodule/templates/course/empty.yaml index cb2f3bcec6..89f1bfcf21 100644 --- a/common/lib/xmodule/xmodule/templates/course/empty.yaml +++ b/common/lib/xmodule/xmodule/templates/course/empty.yaml @@ -2,5 +2,123 @@ metadata: display_name: Empty start: 2020-10-10T10:00 + checklists: [ + {"short_description" : "Getting Started With Studio", + "items" : [{"short_description": "Add Course Team Members", + "long_description": "Grant your collaborators permission to edit your course so you can work together.", + "is_checked": false, + "action_url": "ManageUsers", + "action_text": "Edit Course Team", + "action_external": false}, + {"short_description": "Set Important Dates for Your Course", + "long_description": "Establish your course's student enrollment and launch dates on the Schedule and Details page.", + "is_checked": false, + "action_url": "SettingsDetails", + "action_text": "Edit Course Details & Schedule", + "action_external": false}, + {"short_description": "Draft Your Course's Grading Policy", + "long_description": "Set up your assignment types and grading policy even if you haven't created all your assignments.", + "is_checked": false, + "action_url": "SettingsGrading", + "action_text": "Edit Grading Settings", + "action_external": false}, + {"short_description": "Explore the Other Studio Checklists", + "long_description": "Discover other available course authoring tools, and find help when you need it.", + "is_checked": false, + "action_url": "", + "action_text": "", + "action_external": false}] + }, + {"short_description" : "Draft a Rough Course Outline", + "items" : [{"short_description": "Create Your First Section and Subsection", + "long_description": "Use your course outline to build your first Section and Subsection.", + "is_checked": false, + "action_url": "CourseOutline", + "action_text": "Edit Course Outline", + "action_external": false}, + {"short_description": "Set Section Release Dates", + "long_description": "Specify the release dates for each Section in your course. Sections become visible to students on their release dates.", + "is_checked": false, + "action_url": "CourseOutline", + "action_text": "Edit Course Outline", + "action_external": false}, + {"short_description": "Designate a Subsection as Graded", + "long_description": "Set a Subsection to be graded as a specific assignment type. Assignments within graded Subsections count toward a student's final grade.", + "is_checked": false, + "action_url": "CourseOutline", + "action_text": "Edit Course Outline", + "action_external": false}, + {"short_description": "Reordering Course Content", + "long_description": "Use drag and drop to reorder the content in your course.", + "is_checked": false, + "action_url": "CourseOutline", + "action_text": "Edit Course Outline", + "action_external": false}, + {"short_description": "Renaming Sections", + "long_description": "Rename Sections by clicking the Section name from the Course Outline.", + "is_checked": false, + "action_url": "CourseOutline", + "action_text": "Edit Course Outline", + "action_external": false}, + {"short_description": "Deleting Course Content", + "long_description": "Delete Sections, Subsections, or Units you don't need anymore. Be careful, as there is no Undo function.", + "is_checked": false, + "action_url": "CourseOutline", + "action_text": "Edit Course Outline", + "action_external": false}, + {"short_description": "Add an Instructor-Only Section to Your Outline", + "long_description": "Some course authors find using a section for unsorted, in-progress work useful. To do this, create a section and set the release date to the distant future.", + "is_checked": false, + "action_url": "CourseOutline", + "action_text": "Edit Course Outline", + "action_external": false}] + }, + {"short_description" : "Explore edX's Support Tools", + "items" : [{"short_description": "Explore the Studio Help Forum", + "long_description": "Access the Studio Help forum from the menu that appears when you click your user name in the top right corner of Studio.", + "is_checked": false, + "action_url": "http://help.edge.edx.org/", + "action_text": "Visit Studio Help", + "action_external": true}, + {"short_description": "Enroll in edX 101", + "long_description": "Register for edX 101, edX's primer for course creation.", + "is_checked": false, + "action_url": "https://edge.edx.org/courses/edX/edX101/How_to_Create_an_edX_Course/about", + "action_text": "Register for edX 101", + "action_external": true}, + {"short_description": "Download the Studio Documentation", + "long_description": "Download the searchable Studio reference documentation in PDF form.", + "is_checked": false, + "action_url": "http://files.edx.org/Getting_Started_with_Studio.pdf", + "action_text": "Download Documentation", + "action_external": true}] + }, + {"short_description" : "Draft Your Course About Page", + "items" : [{"short_description": "Draft a Course Description", + "long_description": "Courses on edX have an About page that includes a course video, description, and more. Draft the text students will read before deciding to enroll in your course.", + "is_checked": false, + "action_url": "SettingsDetails", + "action_text": "Edit Course Schedule & Details", + "action_external": false}, + {"short_description": "Add Staff Bios", + "long_description": "Showing prospective students who their instructor will be is helpful. Include staff bios on the course About page.", + "is_checked": false, + "action_url": "SettingsDetails", + "action_text": "Edit Course Schedule & Details", + "action_external": false}, + {"short_description": "Add Course FAQs", + "long_description": "Include a short list of frequently asked questions about your course.", + "is_checked": false, + "action_url": "SettingsDetails", + "action_text": "Edit Course Schedule & Details", + "action_external": false}, + {"short_description": "Add Course Prerequisites", + "long_description": "Let students know what knowledge and/or skills they should have before they enroll in your course.", + "is_checked": false, + "action_url": "SettingsDetails", + "action_text": "Edit Course Schedule & Details", + "action_external": false}] + } + ] data: { 'textbooks' : [ ], 'wiki_slug' : null } children: [] diff --git a/common/lib/xmodule/xmodule/templates/html/empty.yaml b/common/lib/xmodule/xmodule/templates/html/empty.yaml index 1262ed37cf..b6d867d7d6 100644 --- a/common/lib/xmodule/xmodule/templates/html/empty.yaml +++ b/common/lib/xmodule/xmodule/templates/html/empty.yaml @@ -1,6 +1,7 @@ --- metadata: - display_name: Empty + display_name: Blank HTML Page + empty: True data: | diff --git a/common/lib/xmodule/xmodule/templates/peer_grading/default.yaml b/common/lib/xmodule/xmodule/templates/peer_grading/default.yaml new file mode 100644 index 0000000000..1ba8f978d6 --- /dev/null +++ b/common/lib/xmodule/xmodule/templates/peer_grading/default.yaml @@ -0,0 +1,14 @@ +--- +metadata: + display_name: Peer Grading Interface + attempts: 1 + use_for_single_location: False + link_to_location: None + is_graded: False + max_grade: 1 + weight: "" +data: | + + + +children: [] diff --git a/common/lib/xmodule/xmodule/templates/problem/circuitschematic.yaml b/common/lib/xmodule/xmodule/templates/problem/circuitschematic.yaml index f56b17b1b9..a94b824cfb 100644 --- a/common/lib/xmodule/xmodule/templates/problem/circuitschematic.yaml +++ b/common/lib/xmodule/xmodule/templates/problem/circuitschematic.yaml @@ -1,6 +1,7 @@ + --- metadata: - display_name: Circuit Schematic + display_name: Circuit Schematic Builder rerandomize: never showanswer: always weight: "" diff --git a/common/lib/xmodule/xmodule/templates/problem/customgrader.yaml b/common/lib/xmodule/xmodule/templates/problem/customgrader.yaml index 6ada6f97f3..aadbe4075a 100644 --- a/common/lib/xmodule/xmodule/templates/problem/customgrader.yaml +++ b/common/lib/xmodule/xmodule/templates/problem/customgrader.yaml @@ -1,6 +1,6 @@ --- metadata: - display_name: Custom Grader + display_name: Custom Python-Evaluated Input rerandomize: never showanswer: always weight: "" @@ -8,7 +8,7 @@ metadata: data: |

        - A custom response problem accepts one or more lines of text input from the + A custom python-evaluated input problem accepts one or more lines of text input from the student, and evaluates the inputs for correctness based on evaluation using a python script embedded within the problem.

        diff --git a/common/lib/xmodule/xmodule/templates/problem/empty.yaml b/common/lib/xmodule/xmodule/templates/problem/empty.yaml index 346f49609c..39c9e7671c 100644 --- a/common/lib/xmodule/xmodule/templates/problem/empty.yaml +++ b/common/lib/xmodule/xmodule/templates/problem/empty.yaml @@ -1,10 +1,11 @@ --- metadata: - display_name: Empty + display_name: Blank Common Problem rerandomize: never showanswer: always markdown: "" weight: "" + empty: True attempts: "" data: | diff --git a/common/lib/xmodule/xmodule/templates/problem/emptyadvanced.yaml b/common/lib/xmodule/xmodule/templates/problem/emptyadvanced.yaml new file mode 100644 index 0000000000..bba7b3a8ac --- /dev/null +++ b/common/lib/xmodule/xmodule/templates/problem/emptyadvanced.yaml @@ -0,0 +1,13 @@ +--- +metadata: + display_name: Blank Advanced Problem + rerandomize: never + showanswer: always + weight: "" + attempts: "" + empty: True +data: | + + + +children: [] diff --git a/common/lib/xmodule/xmodule/templates/problem/forumularesponse.yaml b/common/lib/xmodule/xmodule/templates/problem/forumularesponse.yaml index 5b30a0497d..b4c53a107b 100644 --- a/common/lib/xmodule/xmodule/templates/problem/forumularesponse.yaml +++ b/common/lib/xmodule/xmodule/templates/problem/forumularesponse.yaml @@ -1,6 +1,6 @@ --- metadata: - display_name: Formula Response + display_name: Math Expression Input rerandomize: never showanswer: always weight: "" @@ -8,7 +8,7 @@ metadata: data: |

        - A formula response problem accepts a line of text representing a mathematical expression from the + A math expression input problem accepts a line of text representing a mathematical expression from the student, and evaluates the input for equivalence to a mathematical expression provided by the grader. Correctness is based on numerical sampling of the symbolic expressions.

        diff --git a/common/lib/xmodule/xmodule/templates/problem/imageresponse.yaml b/common/lib/xmodule/xmodule/templates/problem/imageresponse.yaml index 069c157852..3ef619d54b 100644 --- a/common/lib/xmodule/xmodule/templates/problem/imageresponse.yaml +++ b/common/lib/xmodule/xmodule/templates/problem/imageresponse.yaml @@ -1,6 +1,6 @@ --- metadata: - display_name: Image Response + display_name: Image Mapped Input rerandomize: never showanswer: always weight: "" @@ -8,7 +8,7 @@ metadata: data: |

        - An image response problem presents an image for the student. Input is + An image mapped input problem presents an image for the student. Input is given by the location of mouse clicks on the image. Correctness of input can be evaluated based on expected dimensions of a rectangle.

        diff --git a/common/lib/xmodule/xmodule/templates/problem/latex_problem.yaml b/common/lib/xmodule/xmodule/templates/problem/latex_problem.yaml index 434354e4c7..81cb9dc353 100644 --- a/common/lib/xmodule/xmodule/templates/problem/latex_problem.yaml +++ b/common/lib/xmodule/xmodule/templates/problem/latex_problem.yaml @@ -1,7 +1,7 @@ --- metadata: display_name: Problem Written in LaTeX - source_processor_url: https://qisx.mit.edu:5443/latex2edx + source_processor_url: https://studio-input-filter.mitx.mit.edu/latex2edx source_code: | % Nearly any kind of edX problem can be authored using Latex as % the source language. Write latex as usual, including equations. The diff --git a/common/lib/xmodule/xmodule/templates/problem/multiplechoice.yaml b/common/lib/xmodule/xmodule/templates/problem/multiplechoice.yaml index 9e61324ae1..3a35a35199 100644 --- a/common/lib/xmodule/xmodule/templates/problem/multiplechoice.yaml +++ b/common/lib/xmodule/xmodule/templates/problem/multiplechoice.yaml @@ -26,10 +26,6 @@ metadata: ( ) The vegetable peeler - ( ) Android - - ( ) The Beatles - [explanation] The release of the iPod allowed consumers to carry their entire music library with them in a @@ -51,8 +47,6 @@ data: | Napster The iPod The vegetable peeler - Android - The Beatles diff --git a/common/lib/xmodule/xmodule/templates/problem/numericalresponse.yaml b/common/lib/xmodule/xmodule/templates/problem/numericalresponse.yaml index e0a5776222..1dc46f5f51 100644 --- a/common/lib/xmodule/xmodule/templates/problem/numericalresponse.yaml +++ b/common/lib/xmodule/xmodule/templates/problem/numericalresponse.yaml @@ -1,12 +1,12 @@ --- metadata: - display_name: Numerical Response + display_name: Numerical Input rerandomize: never showanswer: always weight: "" attempts: "" markdown: - "A numerical response problem accepts a line of text input from the + "A numerical input problem accepts a line of text input from the student, and evaluates the input for correctness based on its numerical value. @@ -45,7 +45,7 @@ metadata: data: |

        - A numerical response problem accepts a line of text input from the + A numerical input problem accepts a line of text input from the student, and evaluates the input for correctness based on its numerical value.

        diff --git a/common/lib/xmodule/xmodule/templates/problem/optionresponse.yaml b/common/lib/xmodule/xmodule/templates/problem/optionresponse.yaml index 1a42a5a009..f523c7fdc5 100644 --- a/common/lib/xmodule/xmodule/templates/problem/optionresponse.yaml +++ b/common/lib/xmodule/xmodule/templates/problem/optionresponse.yaml @@ -1,12 +1,12 @@ --- metadata: - display_name: Option Response + display_name: Dropdown rerandomize: never showanswer: always weight: "" attempts: "" markdown: - "OptionResponse gives a limited set of options for students to respond with, and presents those options + "Dropdown problems give a limited set of options for students to respond with, and present those options in a format that encourages them to search for a specific answer rather than being immediately presented with options from which to recognize the correct answer. @@ -14,30 +14,30 @@ metadata: The answer options and the identification of the correct answer is defined in the optioninput tag. - Translation between Option Response and __________ is extremely straightforward: + Translation between Dropdown and __________ is extremely straightforward: - [[(Multiple Choice), String Response, Numerical Response, External Response, Image Response]] + [[(Multiple Choice), Text Input, Numerical Input, External Response, Image Response]] [explanation] Multiple Choice also allows students to select from a variety of pre-written responses, although the - format makes it easier for students to read very long response options. Optionresponse also differs + format makes it easier for students to read very long response options. Dropdowns also differ slightly because students are more likely to think of an answer and then search for it rather than relying purely on recognition to answer the question. [explanation] " data: | -

        OptionResponse gives a limited set of options for students to respond with, and presents those options +

        Dropdown problems give a limited set of options for students to respond with, and present those options in a format that encourages them to search for a specific answer rather than being immediately presented with options from which to recognize the correct answer.

        The answer options and the identification of the correct answer is defined in the optioninput tag.

        -

        Translation between Option Response and __________ is extremely straightforward: +

        Translation between Dropdown and __________ is extremely straightforward: - +

        diff --git a/common/lib/xmodule/xmodule/templates/problem/string_response.yaml b/common/lib/xmodule/xmodule/templates/problem/string_response.yaml index 1761ea8f67..c018d3f6cf 100644 --- a/common/lib/xmodule/xmodule/templates/problem/string_response.yaml +++ b/common/lib/xmodule/xmodule/templates/problem/string_response.yaml @@ -1,15 +1,15 @@ --- metadata: - display_name: String Response + display_name: Text Input rerandomize: never showanswer: always weight: "" attempts: "" # Note, the extra newlines are needed to make the yaml parser add blank lines instead of folding markdown: - "A string response problem accepts a line of text input from the + "A text input problem accepts a line of text from the student, and evaluates the input for correctness based on an expected - answer within each input box. + answer. The answer is correct if it matches every character of the expected answer. This can be a problem with @@ -30,9 +30,9 @@ data: |

        - A string response problem accepts a line of text input from the + A text input problem accepts a line of text from the student, and evaluates the input for correctness based on an expected - answer within each input box. + answer. The answer is correct if it matches every character of the expected answer. This can be a problem with international spelling, dates, or anything where the format of the answer is not clear.

        diff --git a/common/lib/xmodule/xmodule/tests/__init__.py b/common/lib/xmodule/xmodule/tests/__init__.py index 04e7ee19b1..1a10654f6c 100644 --- a/common/lib/xmodule/xmodule/tests/__init__.py +++ b/common/lib/xmodule/xmodule/tests/__init__.py @@ -19,20 +19,45 @@ import xmodule from xmodule.x_module import ModuleSystem from mock import Mock -test_system = ModuleSystem( - ajax_url='courses/course_id/modx/a_location', - track_function=Mock(), - get_module=Mock(), - # "render" to just the context... - render_template=lambda template, context: str(context), - replace_urls=Mock(), - user=Mock(is_staff=False), - filestore=Mock(), - debug=True, - xqueue={'interface': None, 'callback_url': '/', 'default_queuename': 'testqueue', 'waittime': 10}, - node_path=os.environ.get("NODE_PATH", "/usr/local/lib/node_modules"), - anonymous_student_id='student' -) +open_ended_grading_interface = { + 'url': 'http://sandbox-grader-001.m.edx.org/peer_grading', + 'username': 'incorrect_user', + 'password': 'incorrect_pass', + 'staff_grading' : 'staff_grading', + 'peer_grading' : 'peer_grading', + 'grading_controller' : 'grading_controller' + } + + +def test_system(): + """ + Construct a test ModuleSystem instance. + + By default, the render_template() method simply returns + the context it is passed as a string. + You can override this behavior by monkey patching: + + system = test_system() + system.render_template = my_render_func + + where my_render_func is a function of the form + my_render_func(template, context) + """ + return ModuleSystem( + ajax_url='courses/course_id/modx/a_location', + track_function=Mock(), + get_module=Mock(), + render_template=lambda template, context: str(context), + replace_urls=lambda html: str(html), + user=Mock(is_staff=False), + filestore=Mock(), + debug=True, + xqueue={'interface': None, 'callback_url': '/', 'default_queuename': 'testqueue', 'waittime': 10}, + node_path=os.environ.get("NODE_PATH", "/usr/local/lib/node_modules"), + xblock_model_data=lambda descriptor: descriptor._model_data, + anonymous_student_id='student', + open_ended_grading_interface= open_ended_grading_interface + ) class ModelsTest(unittest.TestCase): diff --git a/common/lib/xmodule/xmodule/tests/test_annotatable_module.py b/common/lib/xmodule/xmodule/tests/test_annotatable_module.py new file mode 100644 index 0000000000..43eae8e43e --- /dev/null +++ b/common/lib/xmodule/xmodule/tests/test_annotatable_module.py @@ -0,0 +1,127 @@ +"""Module annotatable tests""" + +import unittest + +from lxml import etree +from mock import Mock + +from xmodule.annotatable_module import AnnotatableModule +from xmodule.modulestore import Location + +from . import test_system + +class AnnotatableModuleTestCase(unittest.TestCase): + location = Location(["i4x", "edX", "toy", "annotatable", "guided_discussion"]) + sample_xml = ''' + + Read the text. +

        + Sing, + O goddess, + the anger of Achilles son of Peleus, + that brought countless ills upon the Achaeans. Many a brave soul did it send + hurrying down to Hades, and many a hero did it yield a prey to dogs and +

        vultures, for so were the counsels + of Jove fulfilled from the day on which the son of Atreus, king of men, and great + Achilles, first fell out with one another.
        +

        + The Iliad of Homer by Samuel Butler +
        + ''' + descriptor = Mock() + module_data = {'data': sample_xml} + + def setUp(self): + self.annotatable = AnnotatableModule(test_system(), self.location, self.descriptor, self.module_data) + + def test_annotation_data_attr(self): + el = etree.fromstring('test') + + expected_attr = { + 'data-comment-body': {'value': 'foo', '_delete': 'body' }, + 'data-comment-title': {'value': 'bar', '_delete': 'title'}, + 'data-problem-id': {'value': '0', '_delete': 'problem'} + } + + actual_attr = self.annotatable._get_annotation_data_attr(0, el) + + self.assertTrue(type(actual_attr) is dict) + self.assertDictEqual(expected_attr, actual_attr) + + def test_annotation_class_attr_default(self): + xml = 'test' + el = etree.fromstring(xml) + + expected_attr = { 'class': { 'value': 'annotatable-span highlight' } } + actual_attr = self.annotatable._get_annotation_class_attr(0, el) + + self.assertTrue(type(actual_attr) is dict) + self.assertDictEqual(expected_attr, actual_attr) + + def test_annotation_class_attr_with_valid_highlight(self): + xml = 'test' + + for color in self.annotatable.highlight_colors: + el = etree.fromstring(xml.format(highlight=color)) + value = 'annotatable-span highlight highlight-{highlight}'.format(highlight=color) + + expected_attr = { 'class': { + 'value': value, + '_delete': 'highlight' } + } + actual_attr = self.annotatable._get_annotation_class_attr(0, el) + + self.assertTrue(type(actual_attr) is dict) + self.assertDictEqual(expected_attr, actual_attr) + + def test_annotation_class_attr_with_invalid_highlight(self): + xml = 'test' + + for invalid_color in ['rainbow', 'blink', 'invisible', '', None]: + el = etree.fromstring(xml.format(highlight=invalid_color)) + expected_attr = { 'class': { + 'value': 'annotatable-span highlight', + '_delete': 'highlight' } + } + actual_attr = self.annotatable._get_annotation_class_attr(0, el) + + self.assertTrue(type(actual_attr) is dict) + self.assertDictEqual(expected_attr, actual_attr) + + def test_render_annotation(self): + expected_html = 'z' + expected_el = etree.fromstring(expected_html) + + actual_el = etree.fromstring('z') + self.annotatable._render_annotation(0, actual_el) + + self.assertEqual(expected_el.tag, actual_el.tag) + self.assertEqual(expected_el.text, actual_el.text) + self.assertDictEqual(dict(expected_el.attrib), dict(actual_el.attrib)) + + def test_render_content(self): + content = self.annotatable._render_content() + el = etree.fromstring(content) + + self.assertEqual('div', el.tag, 'root tag is a div') + + expected_num_annotations = 5 + actual_num_annotations = el.xpath('count(//span[contains(@class,"annotatable-span")])') + self.assertEqual(expected_num_annotations, actual_num_annotations, 'check number of annotations') + + def test_get_html(self): + context = self.annotatable.get_html() + for key in ['display_name', 'element_id', 'content_html', 'instructions_html']: + self.assertIn(key, context) + + def test_extract_instructions(self): + xmltree = etree.fromstring(self.sample_xml) + + expected_xml = u"
        Read the text.
        " + actual_xml = self.annotatable._extract_instructions(xmltree) + self.assertIsNotNone(actual_xml) + self.assertEqual(expected_xml.strip(), actual_xml.strip()) + + xmltree = etree.fromstring('foo') + actual = self.annotatable._extract_instructions(xmltree) + self.assertIsNone(actual) diff --git a/common/lib/xmodule/xmodule/tests/test_capa_module.py b/common/lib/xmodule/xmodule/tests/test_capa_module.py index a22fcdb5f6..1fefbb64cd 100644 --- a/common/lib/xmodule/xmodule/tests/test_capa_module.py +++ b/common/lib/xmodule/xmodule/tests/test_capa_module.py @@ -1,13 +1,20 @@ import datetime import json -from mock import Mock +from mock import Mock, MagicMock, patch from pprint import pprint import unittest +import random +import xmodule +import capa +from capa.responsetypes import StudentInputError, \ + LoncapaProblemError, ResponseError from xmodule.capa_module import CapaModule from xmodule.modulestore import Location from lxml import etree +from django.http import QueryDict + from . import test_system @@ -33,6 +40,18 @@ class CapaFactory(object): CapaFactory.num += 1 return CapaFactory.num + @staticmethod + def input_key(): + """ Return the input key to use when passing GET parameters """ + return ("input_" + CapaFactory.answer_key()) + + @staticmethod + def answer_key(): + """ Return the key stored in the capa problem answer dict """ + return ("-".join(['i4x', 'edX', 'capa_test', 'problem', + 'SampleProblem%d' % CapaFactory.num]) + + "_2_1") + @staticmethod def create(graceperiod=None, due=None, @@ -42,6 +61,8 @@ class CapaFactory(object): force_save_button=None, attempts=None, problem_state=None, + correct=False, + done=None ): """ All parameters are optional, and are added to the created problem if specified. @@ -59,40 +80,42 @@ class CapaFactory(object): attempts: also added to instance state. Will be converted to an int. """ - definition = {'data': CapaFactory.sample_problem_xml, } location = Location(["i4x", "edX", "capa_test", "problem", "SampleProblem{0}".format(CapaFactory.next_num())]) - metadata = {} - if graceperiod is not None: - metadata['graceperiod'] = graceperiod - if due is not None: - metadata['due'] = due - if max_attempts is not None: - metadata['attempts'] = max_attempts - if showanswer is not None: - metadata['showanswer'] = showanswer - if force_save_button is not None: - metadata['force_save_button'] = force_save_button - if rerandomize is not None: - metadata['rerandomize'] = rerandomize + model_data = {'data': CapaFactory.sample_problem_xml} + if graceperiod is not None: + model_data['graceperiod'] = graceperiod + if due is not None: + model_data['due'] = due + if max_attempts is not None: + model_data['max_attempts'] = max_attempts + if showanswer is not None: + model_data['showanswer'] = showanswer + if force_save_button is not None: + model_data['force_save_button'] = force_save_button + if rerandomize is not None: + model_data['rerandomize'] = rerandomize + if done is not None: + model_data['done'] = done descriptor = Mock(weight="1") - instance_state_dict = {} if problem_state is not None: - instance_state_dict = problem_state + model_data.update(problem_state) if attempts is not None: # converting to int here because I keep putting "0" and "1" in the tests # since everything else is a string. - instance_state_dict['attempts'] = int(attempts) - if len(instance_state_dict) > 0: - instance_state = json.dumps(instance_state_dict) - else: - instance_state = None + model_data['attempts'] = int(attempts) - module = CapaModule(test_system, location, - definition, descriptor, - instance_state, None, metadata=metadata) + system = test_system() + system.render_template = Mock(return_value="
        Test Template HTML
        ") + module = CapaModule(system, location, descriptor, model_data) + + if correct: + # TODO: probably better to actually set the internal state properly, but... + module.get_score = lambda: {'score': 1, 'total': 1} + else: + module.get_score = lambda: {'score': 0, 'total': 1} return module @@ -100,7 +123,6 @@ class CapaFactory(object): class CapaModuleTest(unittest.TestCase): - def setUp(self): now = datetime.datetime.now() day_delta = datetime.timedelta(days=1) @@ -120,6 +142,20 @@ class CapaModuleTest(unittest.TestCase): self.assertNotEqual(module.url_name, other_module.url_name, "Factory should be creating unique names for each problem") + + + + def test_correct(self): + """ + Check that the factory creates correct and incorrect problems properly. + """ + module = CapaFactory.create() + self.assertEqual(module.get_score()['score'], 0) + + other_module = CapaFactory.create(correct=True) + self.assertEqual(other_module.get_score()['score'], 1) + + def test_showanswer_default(self): """ Make sure the show answer logic does the right thing. @@ -152,6 +188,7 @@ class CapaModuleTest(unittest.TestCase): max_attempts="1", attempts="0", due=self.yesterday_str) + self.assertTrue(after_due_date.answer_available()) @@ -178,7 +215,7 @@ class CapaModuleTest(unittest.TestCase): for everyone--e.g. after due date + grace period. """ - # can see after attempts used up, even with due date in the future + # can't see after attempts used up, even with due date in the future used_all_attempts = CapaFactory.create(showanswer='past_due', max_attempts="1", attempts="1", @@ -209,3 +246,752 @@ class CapaModuleTest(unittest.TestCase): due=self.yesterday_str, graceperiod=self.two_day_delta_str) self.assertFalse(still_in_grace.answer_available()) + + def test_showanswer_finished(self): + """ + With showanswer="finished" should show answer after the problem is closed, + or after the answer is correct. + """ + + # can see after attempts used up, even with due date in the future + used_all_attempts = CapaFactory.create(showanswer='finished', + max_attempts="1", + attempts="1", + due=self.tomorrow_str) + self.assertTrue(used_all_attempts.answer_available()) + + + # can see after due date + past_due_date = CapaFactory.create(showanswer='finished', + max_attempts="1", + attempts="0", + due=self.yesterday_str) + self.assertTrue(past_due_date.answer_available()) + + + # can't see because attempts left and wrong + attempts_left_open = CapaFactory.create(showanswer='finished', + max_attempts="1", + attempts="0", + due=self.tomorrow_str) + self.assertFalse(attempts_left_open.answer_available()) + + # _can_ see because attempts left and right + correct_ans = CapaFactory.create(showanswer='finished', + max_attempts="1", + attempts="0", + due=self.tomorrow_str, + correct=True) + self.assertTrue(correct_ans.answer_available()) + + + # Can see even though grace period hasn't expired, because have no more + # attempts. + still_in_grace = CapaFactory.create(showanswer='finished', + max_attempts="1", + attempts="1", + due=self.yesterday_str, + graceperiod=self.two_day_delta_str) + self.assertTrue(still_in_grace.answer_available()) + + + def test_closed(self): + + # Attempts < Max attempts --> NOT closed + module = CapaFactory.create(max_attempts="1", attempts="0") + self.assertFalse(module.closed()) + + # Attempts < Max attempts --> NOT closed + module = CapaFactory.create(max_attempts="2", attempts="1") + self.assertFalse(module.closed()) + + # Attempts = Max attempts --> closed + module = CapaFactory.create(max_attempts="1", attempts="1") + self.assertTrue(module.closed()) + + # Attempts > Max attempts --> closed + module = CapaFactory.create(max_attempts="1", attempts="2") + self.assertTrue(module.closed()) + + # Max attempts = 0 --> closed + module = CapaFactory.create(max_attempts="0", attempts="2") + self.assertTrue(module.closed()) + + # Past due --> closed + module = CapaFactory.create(max_attempts="1", attempts="0", + due=self.yesterday_str) + self.assertTrue(module.closed()) + + + def test_parse_get_params(self): + + # We have to set up Django settings in order to use QueryDict + from django.conf import settings + settings.configure() + + # Valid GET param dict + valid_get_dict = self._querydict_from_dict({'input_1': 'test', + 'input_1_2': 'test', + 'input_1_2_3': 'test', + 'input_[]_3': 'test', + 'input_4': None, + 'input_5': [], + 'input_6': 5}) + + result = CapaModule.make_dict_of_responses(valid_get_dict) + + # Expect that we get a dict with "input" stripped from key names + # and that we get the same values back + for key in result.keys(): + original_key = "input_" + key + self.assertTrue(original_key in valid_get_dict, + "Output dict should have key %s" % original_key) + self.assertEqual(valid_get_dict[original_key], result[key]) + + + # Valid GET param dict with list keys + valid_get_dict = self._querydict_from_dict({'input_2[]': ['test1', 'test2']}) + result = CapaModule.make_dict_of_responses(valid_get_dict) + self.assertTrue('2' in result) + self.assertEqual(['test1', 'test2'], result['2']) + + # If we use [] at the end of a key name, we should always + # get a list, even if there's just one value + valid_get_dict = self._querydict_from_dict({'input_1[]': 'test'}) + result = CapaModule.make_dict_of_responses(valid_get_dict) + self.assertEqual(result['1'], ['test']) + + # If we have no underscores in the name, then the key is invalid + invalid_get_dict = self._querydict_from_dict({'input': 'test'}) + with self.assertRaises(ValueError): + result = CapaModule.make_dict_of_responses(invalid_get_dict) + + + # Two equivalent names (one list, one non-list) + # One of the values would overwrite the other, so detect this + # and raise an exception + invalid_get_dict = self._querydict_from_dict({'input_1[]': 'test 1', + 'input_1': 'test 2'}) + with self.assertRaises(ValueError): + result = CapaModule.make_dict_of_responses(invalid_get_dict) + + def _querydict_from_dict(self, param_dict): + """ Create a Django QueryDict from a Python dictionary """ + + # QueryDict objects are immutable by default, so we make + # a copy that we can update. + querydict = QueryDict('') + copyDict = querydict.copy() + + for (key, val) in param_dict.items(): + + # QueryDicts handle lists differently from ordinary values, + # so we have to specifically tell the QueryDict that + # this is a list + if type(val) is list: + copyDict.setlist(key, val) + else: + copyDict[key] = val + + return copyDict + + + def test_check_problem_correct(self): + + module = CapaFactory.create(attempts=1) + + # Simulate that all answers are marked correct, no matter + # what the input is, by patching CorrectMap.is_correct() + # Also simulate rendering the HTML + with patch('capa.correctmap.CorrectMap.is_correct') as mock_is_correct,\ + patch('xmodule.capa_module.CapaModule.get_problem_html') as mock_html: + mock_is_correct.return_value = True + mock_html.return_value = "Test HTML" + + # Check the problem + get_request_dict = {CapaFactory.input_key(): '3.14'} + result = module.check_problem(get_request_dict) + + # Expect that the problem is marked correct + self.assertEqual(result['success'], 'correct') + + # Expect that we get the (mocked) HTML + self.assertEqual(result['contents'], 'Test HTML') + + # Expect that the number of attempts is incremented by 1 + self.assertEqual(module.attempts, 2) + + def test_check_problem_incorrect(self): + + module = CapaFactory.create(attempts=0) + + # Simulate marking the input incorrect + with patch('capa.correctmap.CorrectMap.is_correct') as mock_is_correct: + mock_is_correct.return_value = False + + # Check the problem + get_request_dict = {CapaFactory.input_key(): '0'} + result = module.check_problem(get_request_dict) + + # Expect that the problem is marked correct + self.assertEqual(result['success'], 'incorrect') + + # Expect that the number of attempts is incremented by 1 + self.assertEqual(module.attempts, 1) + + + def test_check_problem_closed(self): + module = CapaFactory.create(attempts=3) + + # Problem closed -- cannot submit + # Simulate that CapaModule.closed() always returns True + with patch('xmodule.capa_module.CapaModule.closed') as mock_closed: + mock_closed.return_value = True + with self.assertRaises(xmodule.exceptions.NotFoundError): + get_request_dict = {CapaFactory.input_key(): '3.14'} + module.check_problem(get_request_dict) + + # Expect that number of attempts NOT incremented + self.assertEqual(module.attempts, 3) + + def test_check_problem_resubmitted_with_randomize(self): + # Randomize turned on + module = CapaFactory.create(rerandomize='always', attempts=0) + + # Simulate that the problem is completed + module.done = True + + # Expect that we cannot submit + with self.assertRaises(xmodule.exceptions.NotFoundError): + get_request_dict = {CapaFactory.input_key(): '3.14'} + module.check_problem(get_request_dict) + + # Expect that number of attempts NOT incremented + self.assertEqual(module.attempts, 0) + + def test_check_problem_resubmitted_no_randomize(self): + # Randomize turned off + module = CapaFactory.create(rerandomize='never', attempts=0, done=True) + + # Expect that we can submit successfully + get_request_dict = {CapaFactory.input_key(): '3.14'} + result = module.check_problem(get_request_dict) + + self.assertEqual(result['success'], 'correct') + + # Expect that number of attempts IS incremented + self.assertEqual(module.attempts, 1) + + def test_check_problem_queued(self): + module = CapaFactory.create(attempts=1) + + # Simulate that the problem is queued + with patch('capa.capa_problem.LoncapaProblem.is_queued') \ + as mock_is_queued,\ + patch('capa.capa_problem.LoncapaProblem.get_recentmost_queuetime') \ + as mock_get_queuetime: + + mock_is_queued.return_value = True + mock_get_queuetime.return_value = datetime.datetime.now() + + get_request_dict = {CapaFactory.input_key(): '3.14'} + result = module.check_problem(get_request_dict) + + # Expect an AJAX alert message in 'success' + self.assertTrue('You must wait' in result['success']) + + # Expect that the number of attempts is NOT incremented + self.assertEqual(module.attempts, 1) + + + def test_check_problem_error(self): + + # Try each exception that capa_module should handle + for exception_class in [StudentInputError, + LoncapaProblemError, + ResponseError]: + + # Create the module + module = CapaFactory.create(attempts=1) + + # Ensure that the user is NOT staff + module.system.user_is_staff = False + + # Simulate answering a problem that raises the exception + with patch('capa.capa_problem.LoncapaProblem.grade_answers') as mock_grade: + mock_grade.side_effect = exception_class('test error') + + get_request_dict = {CapaFactory.input_key(): '3.14'} + result = module.check_problem(get_request_dict) + + # Expect an AJAX alert message in 'success' + expected_msg = 'Error: test error' + self.assertEqual(expected_msg, result['success']) + + # Expect that the number of attempts is NOT incremented + self.assertEqual(module.attempts, 1) + + def test_check_problem_error_with_staff_user(self): + + # Try each exception that capa module should handle + for exception_class in [StudentInputError, + LoncapaProblemError, + ResponseError]: + + # Create the module + module = CapaFactory.create(attempts=1) + + # Ensure that the user IS staff + module.system.user_is_staff = True + + # Simulate answering a problem that raises an exception + with patch('capa.capa_problem.LoncapaProblem.grade_answers') as mock_grade: + mock_grade.side_effect = exception_class('test error') + + get_request_dict = {CapaFactory.input_key(): '3.14'} + result = module.check_problem(get_request_dict) + + # Expect an AJAX alert message in 'success' + self.assertTrue('test error' in result['success']) + + # We DO include traceback information for staff users + self.assertTrue('Traceback' in result['success']) + + # Expect that the number of attempts is NOT incremented + self.assertEqual(module.attempts, 1) + + + def test_reset_problem(self): + module = CapaFactory.create(done=True) + module.new_lcp = Mock(wraps=module.new_lcp) + + # Stub out HTML rendering + with patch('xmodule.capa_module.CapaModule.get_problem_html') as mock_html: + mock_html.return_value = "
        Test HTML
        " + + # Reset the problem + get_request_dict = {} + result = module.reset_problem(get_request_dict) + + # Expect that the request was successful + self.assertTrue('success' in result and result['success']) + + # Expect that the problem HTML is retrieved + self.assertTrue('html' in result) + self.assertEqual(result['html'], "
        Test HTML
        ") + + # Expect that the problem was reset + module.new_lcp.assert_called_once_with({'seed': None}) + + + def test_reset_problem_closed(self): + module = CapaFactory.create() + + # Simulate that the problem is closed + with patch('xmodule.capa_module.CapaModule.closed') as mock_closed: + mock_closed.return_value = True + + # Try to reset the problem + get_request_dict = {} + result = module.reset_problem(get_request_dict) + + # Expect that the problem was NOT reset + self.assertTrue('success' in result and not result['success']) + + + def test_reset_problem_not_done(self): + # Simulate that the problem is NOT done + module = CapaFactory.create(done=False) + + # Try to reset the problem + get_request_dict = {} + result = module.reset_problem(get_request_dict) + + # Expect that the problem was NOT reset + self.assertTrue('success' in result and not result['success']) + + + def test_save_problem(self): + module = CapaFactory.create(done=False) + + # Save the problem + get_request_dict = {CapaFactory.input_key(): '3.14'} + result = module.save_problem(get_request_dict) + + # Expect that answers are saved to the problem + expected_answers = {CapaFactory.answer_key(): '3.14'} + self.assertEqual(module.lcp.student_answers, expected_answers) + + # Expect that the result is success + self.assertTrue('success' in result and result['success']) + + + def test_save_problem_closed(self): + module = CapaFactory.create(done=False) + + # Simulate that the problem is closed + with patch('xmodule.capa_module.CapaModule.closed') as mock_closed: + mock_closed.return_value = True + + # Try to save the problem + get_request_dict = {CapaFactory.input_key(): '3.14'} + result = module.save_problem(get_request_dict) + + # Expect that the result is failure + self.assertTrue('success' in result and not result['success']) + + + def test_save_problem_submitted_with_randomize(self): + module = CapaFactory.create(rerandomize='always', done=True) + + # Try to save + get_request_dict = {CapaFactory.input_key(): '3.14'} + result = module.save_problem(get_request_dict) + + # Expect that we cannot save + self.assertTrue('success' in result and not result['success']) + + + def test_save_problem_submitted_no_randomize(self): + module = CapaFactory.create(rerandomize='never', done=True) + + # Try to save + get_request_dict = {CapaFactory.input_key(): '3.14'} + result = module.save_problem(get_request_dict) + + # Expect that we succeed + self.assertTrue('success' in result and result['success']) + + def test_check_button_name(self): + + # If last attempt, button name changes to "Final Check" + # Just in case, we also check what happens if we have + # more attempts than allowed. + attempts = random.randint(1, 10) + module = CapaFactory.create(attempts=attempts - 1, max_attempts=attempts) + self.assertEqual(module.check_button_name(), "Final Check") + + module = CapaFactory.create(attempts=attempts, max_attempts=attempts) + self.assertEqual(module.check_button_name(), "Final Check") + + module = CapaFactory.create(attempts=attempts + 1, max_attempts=attempts) + self.assertEqual(module.check_button_name(), "Final Check") + + # Otherwise, button name is "Check" + module = CapaFactory.create(attempts=attempts - 2, max_attempts=attempts) + self.assertEqual(module.check_button_name(), "Check") + + module = CapaFactory.create(attempts=attempts - 3, max_attempts=attempts) + self.assertEqual(module.check_button_name(), "Check") + + # If no limit on attempts, then always show "Check" + module = CapaFactory.create(attempts=attempts - 3) + self.assertEqual(module.check_button_name(), "Check") + + module = CapaFactory.create(attempts=0) + self.assertEqual(module.check_button_name(), "Check") + + def test_should_show_check_button(self): + + attempts = random.randint(1, 10) + + # If we're after the deadline, do NOT show check button + module = CapaFactory.create(due=self.yesterday_str) + self.assertFalse(module.should_show_check_button()) + + # If user is out of attempts, do NOT show the check button + module = CapaFactory.create(attempts=attempts, max_attempts=attempts) + self.assertFalse(module.should_show_check_button()) + + # If survey question (max_attempts = 0), do NOT show the check button + module = CapaFactory.create(max_attempts=0) + self.assertFalse(module.should_show_check_button()) + + # If user submitted a problem but hasn't reset, + # do NOT show the check button + # Note: we can only reset when rerandomize="always" + module = CapaFactory.create(rerandomize="always", done=True) + self.assertFalse(module.should_show_check_button()) + + # Otherwise, DO show the check button + module = CapaFactory.create() + self.assertTrue(module.should_show_check_button()) + + # If the user has submitted the problem + # and we do NOT have a reset button, then we can show the check button + # Setting rerandomize to "never" ensures that the reset button + # is not shown + module = CapaFactory.create(rerandomize="never", done=True) + self.assertTrue(module.should_show_check_button()) + + + def test_should_show_reset_button(self): + + attempts = random.randint(1, 10) + + # If we're after the deadline, do NOT show the reset button + module = CapaFactory.create(due=self.yesterday_str, done=True) + self.assertFalse(module.should_show_reset_button()) + + # If the user is out of attempts, do NOT show the reset button + module = CapaFactory.create(attempts=attempts, max_attempts=attempts, done=True) + self.assertFalse(module.should_show_reset_button()) + + # If we're NOT randomizing, then do NOT show the reset button + module = CapaFactory.create(rerandomize="never", done=True) + self.assertFalse(module.should_show_reset_button()) + + # If the user hasn't submitted an answer yet, + # then do NOT show the reset button + module = CapaFactory.create(done=False) + self.assertFalse(module.should_show_reset_button()) + + # Otherwise, DO show the reset button + module = CapaFactory.create(done=True) + self.assertTrue(module.should_show_reset_button()) + + # If survey question for capa (max_attempts = 0), + # DO show the reset button + module = CapaFactory.create(max_attempts=0, done=True) + self.assertTrue(module.should_show_reset_button()) + + + def test_should_show_save_button(self): + + attempts = random.randint(1, 10) + + # If we're after the deadline, do NOT show the save button + module = CapaFactory.create(due=self.yesterday_str, done=True) + self.assertFalse(module.should_show_save_button()) + + # If the user is out of attempts, do NOT show the save button + module = CapaFactory.create(attempts=attempts, max_attempts=attempts, done=True) + self.assertFalse(module.should_show_save_button()) + + # If user submitted a problem but hasn't reset, do NOT show the save button + module = CapaFactory.create(rerandomize="always", done=True) + self.assertFalse(module.should_show_save_button()) + + # If the user has unlimited attempts and we are not randomizing, + # then do NOT show a save button + # because they can keep using "Check" + module = CapaFactory.create(max_attempts=None, rerandomize="never", done=False) + self.assertFalse(module.should_show_save_button()) + + module = CapaFactory.create(max_attempts=None, rerandomize="never", done=True) + self.assertFalse(module.should_show_save_button()) + + # Otherwise, DO show the save button + module = CapaFactory.create(done=False) + self.assertTrue(module.should_show_save_button()) + + # If we're not randomizing and we have limited attempts, then we can save + module = CapaFactory.create(rerandomize="never", max_attempts=2, done=True) + self.assertTrue(module.should_show_save_button()) + + # If survey question for capa (max_attempts = 0), + # DO show the save button + module = CapaFactory.create(max_attempts=0, done=False) + self.assertTrue(module.should_show_save_button()) + + def test_should_show_save_button_force_save_button(self): + # If we're after the deadline, do NOT show the save button + # even though we're forcing a save + module = CapaFactory.create(due=self.yesterday_str, + force_save_button="true", + done=True) + self.assertFalse(module.should_show_save_button()) + + # If the user is out of attempts, do NOT show the save button + attempts = random.randint(1, 10) + module = CapaFactory.create(attempts=attempts, + max_attempts=attempts, + force_save_button="true", + done=True) + self.assertFalse(module.should_show_save_button()) + + # Otherwise, if we force the save button, + # then show it even if we would ordinarily + # require a reset first + module = CapaFactory.create(force_save_button="true", + rerandomize="always", + done=True) + self.assertTrue(module.should_show_save_button()) + + def test_no_max_attempts(self): + module = CapaFactory.create(max_attempts='') + html = module.get_problem_html() + # assert that we got here without exploding + + + def test_get_problem_html(self): + module = CapaFactory.create() + + # We've tested the show/hide button logic in other tests, + # so here we hard-wire the values + show_check_button = bool(random.randint(0, 1) % 2) + show_reset_button = bool(random.randint(0, 1) % 2) + show_save_button = bool(random.randint(0, 1) % 2) + + module.should_show_check_button = Mock(return_value=show_check_button) + module.should_show_reset_button = Mock(return_value=show_reset_button) + module.should_show_save_button = Mock(return_value=show_save_button) + + # Mock the system rendering function + module.system.render_template = Mock(return_value="
        Test Template HTML
        ") + + # Patch the capa problem's HTML rendering + with patch('capa.capa_problem.LoncapaProblem.get_html') as mock_html: + mock_html.return_value = "
        Test Problem HTML
        " + + # Render the problem HTML + html = module.get_problem_html(encapsulate=False) + + # Also render the problem encapsulated in a
        + html_encapsulated = module.get_problem_html(encapsulate=True) + + # Expect that we get the rendered template back + self.assertEqual(html, "
        Test Template HTML
        ") + + # Check the rendering context + render_args, _ = module.system.render_template.call_args + self.assertEqual(len(render_args), 2) + + template_name = render_args[0] + self.assertEqual(template_name, "problem.html") + + context = render_args[1] + self.assertEqual(context['problem']['html'], "
        Test Problem HTML
        ") + self.assertEqual(bool(context['check_button']), show_check_button) + self.assertEqual(bool(context['reset_button']), show_reset_button) + self.assertEqual(bool(context['save_button']), show_save_button) + + # Assert that the encapsulated html contains the original html + self.assertTrue(html in html_encapsulated) + + + def test_get_problem_html_error(self): + """ + In production, when an error occurs with the problem HTML + rendering, a "dummy" problem is created with an error + message to display to the user. + """ + module = CapaFactory.create() + + # Save the original problem so we can compare it later + original_problem = module.lcp + + # Simulate throwing an exception when the capa problem + # is asked to render itself as HTML + module.lcp.get_html = Mock(side_effect=Exception("Test")) + + # Stub out the test_system rendering function + module.system.render_template = Mock(return_value="
        Test Template HTML
        ") + + # Turn off DEBUG + module.system.DEBUG = False + + # Try to render the module with DEBUG turned off + html = module.get_problem_html() + + # Check the rendering context + render_args, _ = module.system.render_template.call_args + context = render_args[1] + self.assertTrue("error" in context['problem']['html']) + + # Expect that the module has created a new dummy problem with the error + self.assertNotEqual(original_problem, module.lcp) + + + def test_random_seed_no_change(self): + + # Run the test for each possible rerandomize value + for rerandomize in ['never', 'per_student', 'always', 'onreset']: + module = CapaFactory.create(rerandomize=rerandomize) + + # Get the seed + # By this point, the module should have persisted the seed + seed = module.seed + self.assertTrue(seed is not None) + + # If we're not rerandomizing, the seed is always set + # to the same value (1) + if rerandomize == 'never': + self.assertEqual(seed, 1) + + # Check the problem + get_request_dict = { CapaFactory.input_key(): '3.14'} + module.check_problem(get_request_dict) + + # Expect that the seed is the same + self.assertEqual(seed, module.seed) + + # Save the problem + module.save_problem(get_request_dict) + + # Expect that the seed is the same + self.assertEqual(seed, module.seed) + + def test_random_seed_with_reset(self): + + def _reset_and_get_seed(module): + ''' + Reset the XModule and return the module's seed + ''' + + # Simulate submitting an attempt + # We need to do this, or reset_problem() will + # fail with a complaint that we haven't submitted + # the problem yet. + module.done = True + + # Reset the problem + module.reset_problem({}) + + # Return the seed + return module.seed + + def _retry_and_check(num_tries, test_func): + ''' + Returns True if *test_func* was successful + (returned True) within *num_tries* attempts + + *test_func* must be a function + of the form test_func() -> bool + ''' + success = False + for i in range(num_tries): + if test_func() is True: + success = True + break + return success + + # Run the test for each possible rerandomize value + for rerandomize in ['never', 'per_student', 'always', 'onreset']: + module = CapaFactory.create(rerandomize=rerandomize) + + # Get the seed + # By this point, the module should have persisted the seed + seed = module.seed + self.assertTrue(seed is not None) + + # We do NOT want the seed to reset if rerandomize + # is set to 'never' -- it should still be 1 + # The seed also stays the same if we're randomizing + # 'per_student': the same student should see the same problem + if rerandomize in ['never', 'per_student']: + self.assertEqual(seed, _reset_and_get_seed(module)) + + # Otherwise, we expect the seed to change + # to another valid seed + else: + + # Since there's a small chance we might get the + # same seed again, give it 5 chances + # to generate a different seed + success = _retry_and_check(5, + lambda: _reset_and_get_seed(module) != seed) + + self.assertTrue(module.seed != None) + msg = 'Could not get a new seed from reset after 5 tries' + self.assertTrue(success, msg) diff --git a/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py b/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py index c2b27e4953..1950389399 100644 --- a/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py +++ b/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py @@ -2,16 +2,23 @@ import json from mock import Mock, MagicMock, ANY import unittest -from xmodule.openendedchild import OpenEndedChild -from xmodule.open_ended_module import OpenEndedModule -from xmodule.combined_open_ended_modulev1 import CombinedOpenEndedV1Module +from xmodule.open_ended_grading_classes.openendedchild import OpenEndedChild +from xmodule.open_ended_grading_classes.open_ended_module import OpenEndedModule +from xmodule.open_ended_grading_classes.combined_open_ended_modulev1 import CombinedOpenEndedV1Module +from xmodule.combined_open_ended_module import CombinedOpenEndedModule from xmodule.modulestore import Location from lxml import etree import capa.xqueue_interface as xqueue_interface from datetime import datetime +import logging + +log = logging.getLogger(__name__) from . import test_system + +import test_util_open_ended + """ Tests for the various pieces of the CombinedOpenEndedGrading system @@ -37,37 +44,38 @@ class OpenEndedChildTest(unittest.TestCase): max_score = 1 static_data = { - 'max_attempts': 20, - 'prompt': prompt, - 'rubric': rubric, - 'max_score': max_score, - 'display_name': 'Name', - 'accept_file_upload': False, - 'close_date': None - } + 'max_attempts': 20, + 'prompt': prompt, + 'rubric': rubric, + 'max_score': max_score, + 'display_name': 'Name', + 'accept_file_upload': False, + 'close_date': None, + 's3_interface': "", + 'open_ended_grading_interface': {}, + 'skip_basic_checks': False, + } definition = Mock() descriptor = Mock() def setUp(self): - self.openendedchild = OpenEndedChild(test_system, self.location, - self.definition, self.descriptor, self.static_data, self.metadata) + self.test_system = test_system() + self.openendedchild = OpenEndedChild(self.test_system, self.location, + self.definition, self.descriptor, self.static_data, self.metadata) def test_latest_answer_empty(self): answer = self.openendedchild.latest_answer() self.assertEqual(answer, "") - def test_latest_score_empty(self): answer = self.openendedchild.latest_score() self.assertEqual(answer, None) - def test_latest_post_assessment_empty(self): - answer = self.openendedchild.latest_post_assessment(test_system) + answer = self.openendedchild.latest_post_assessment(self.test_system) self.assertEqual(answer, "") - def test_new_history_entry(self): new_answer = "New Answer" self.openendedchild.new_history_entry(new_answer) @@ -93,7 +101,6 @@ class OpenEndedChildTest(unittest.TestCase): score = self.openendedchild.latest_score() self.assertEqual(score, 4) - def test_record_latest_post_assessment(self): new_answer = "New Answer" self.openendedchild.new_history_entry(new_answer) @@ -101,7 +108,7 @@ class OpenEndedChildTest(unittest.TestCase): post_assessment = "Post assessment" self.openendedchild.record_latest_post_assessment(post_assessment) self.assertEqual(post_assessment, - self.openendedchild.latest_post_assessment(test_system)) + self.openendedchild.latest_post_assessment(self.test_system)) def test_get_score(self): new_answer = "New Answer" @@ -118,24 +125,22 @@ class OpenEndedChildTest(unittest.TestCase): self.assertEqual(score['score'], new_score) self.assertEqual(score['total'], self.static_data['max_score']) - def test_reset(self): - self.openendedchild.reset(test_system) + self.openendedchild.reset(self.test_system) state = json.loads(self.openendedchild.get_instance_state()) - self.assertEqual(state['state'], OpenEndedChild.INITIAL) - + self.assertEqual(state['child_state'], OpenEndedChild.INITIAL) def test_is_last_response_correct(self): new_answer = "New Answer" self.openendedchild.new_history_entry(new_answer) self.openendedchild.record_latest_score(self.static_data['max_score']) self.assertEqual(self.openendedchild.is_last_response_correct(), - 'correct') + 'correct') self.openendedchild.new_history_entry(new_answer) self.openendedchild.record_latest_score(0) self.assertEqual(self.openendedchild.is_last_response_correct(), - 'incorrect') + 'incorrect') class OpenEndedModuleTest(unittest.TestCase): @@ -153,15 +158,18 @@ class OpenEndedModuleTest(unittest.TestCase): max_score = 4 static_data = { - 'max_attempts': 20, - 'prompt': prompt, - 'rubric': rubric, - 'max_score': max_score, - 'display_name': 'Name', - 'accept_file_upload': False, - 'rewrite_content_links' : "", - 'close_date': None, - } + 'max_attempts': 20, + 'prompt': prompt, + 'rubric': rubric, + 'max_score': max_score, + 'display_name': 'Name', + 'accept_file_upload': False, + 'rewrite_content_links': "", + 'close_date': None, + 's3_interface': test_util_open_ended.S3_INTERFACE, + 'open_ended_grading_interface': test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE, + 'skip_basic_checks': False, + } oeparam = etree.XML(''' @@ -174,90 +182,98 @@ class OpenEndedModuleTest(unittest.TestCase): descriptor = Mock() def setUp(self): - test_system.location = self.location + self.test_system = test_system() + + self.test_system.location = self.location self.mock_xqueue = MagicMock() self.mock_xqueue.send_to_queue.return_value = (None, "Message") - test_system.xqueue = {'interface': self.mock_xqueue, 'callback_url': '/', 'default_queuename': 'testqueue', 'waittime': 1} - self.openendedmodule = OpenEndedModule(test_system, self.location, - self.definition, self.descriptor, self.static_data, self.metadata) + + def constructed_callback(dispatch="score_update"): + return dispatch + + self.test_system.xqueue = {'interface': self.mock_xqueue, 'construct_callback': constructed_callback, + 'default_queuename': 'testqueue', + 'waittime': 1} + self.openendedmodule = OpenEndedModule(self.test_system, self.location, + self.definition, self.descriptor, self.static_data, self.metadata) def test_message_post(self): get = {'feedback': 'feedback text', - 'submission_id': '1', - 'grader_id': '1', - 'score': 3} + 'submission_id': '1', + 'grader_id': '1', + 'score': 3} qtime = datetime.strftime(datetime.now(), xqueue_interface.dateformat) - student_info = {'anonymous_student_id': test_system.anonymous_student_id, - 'submission_time': qtime} + student_info = {'anonymous_student_id': self.test_system.anonymous_student_id, + 'submission_time': qtime} contents = { - 'feedback': get['feedback'], - 'submission_id': int(get['submission_id']), - 'grader_id': int(get['grader_id']), - 'score': get['score'], - 'student_info': json.dumps(student_info) - } + 'feedback': get['feedback'], + 'submission_id': int(get['submission_id']), + 'grader_id': int(get['grader_id']), + 'score': get['score'], + 'student_info': json.dumps(student_info) + } - result = self.openendedmodule.message_post(get, test_system) + result = self.openendedmodule.message_post(get, self.test_system) self.assertTrue(result['success']) # make sure it's actually sending something we want to the queue self.mock_xqueue.send_to_queue.assert_called_with(body=json.dumps(contents), header=ANY) state = json.loads(self.openendedmodule.get_instance_state()) - self.assertIsNotNone(state['state'], OpenEndedModule.DONE) + self.assertIsNotNone(state['child_state'], OpenEndedModule.DONE) def test_send_to_grader(self): submission = "This is a student submission" qtime = datetime.strftime(datetime.now(), xqueue_interface.dateformat) - student_info = {'anonymous_student_id': test_system.anonymous_student_id, - 'submission_time': qtime} + student_info = {'anonymous_student_id': self.test_system.anonymous_student_id, + 'submission_time': qtime} contents = self.openendedmodule.payload.copy() contents.update({ 'student_info': json.dumps(student_info), 'student_response': submission, 'max_score': self.max_score - }) - result = self.openendedmodule.send_to_grader(submission, test_system) + }) + result = self.openendedmodule.send_to_grader(submission, self.test_system) self.assertTrue(result) self.mock_xqueue.send_to_queue.assert_called_with(body=json.dumps(contents), header=ANY) def update_score_single(self): self.openendedmodule.new_history_entry("New Entry") score_msg = { - 'correct': True, - 'score': 4, - 'msg': 'Grader Message', - 'feedback': "Grader Feedback" - } + 'correct': True, + 'score': 4, + 'msg': 'Grader Message', + 'feedback': "Grader Feedback" + } get = {'queuekey': "abcd", - 'xqueue_body': score_msg} - self.openendedmodule.update_score(get, test_system) + 'xqueue_body': score_msg} + self.openendedmodule.update_score(get, self.test_system) def update_score_single(self): self.openendedmodule.new_history_entry("New Entry") feedback = { - "success": True, - "feedback": "Grader Feedback" - } + "success": True, + "feedback": "Grader Feedback" + } score_msg = { - 'correct': True, - 'score': 4, - 'msg': 'Grader Message', - 'feedback': json.dumps(feedback), - 'grader_type': 'IN', - 'grader_id': '1', - 'submission_id': '1', - 'success': True, - 'rubric_scores': [0], - 'rubric_scores_complete': True, - 'rubric_xml': etree.tostring(self.rubric) - } + 'correct': True, + 'score': 4, + 'msg': 'Grader Message', + 'feedback': json.dumps(feedback), + 'grader_type': 'IN', + 'grader_id': '1', + 'submission_id': '1', + 'success': True, + 'rubric_scores': [0], + 'rubric_scores_complete': True, + 'rubric_xml': etree.tostring(self.rubric) + } get = {'queuekey': "abcd", - 'xqueue_body': json.dumps(score_msg)} - self.openendedmodule.update_score(get, test_system) + 'xqueue_body': json.dumps(score_msg)} + self.openendedmodule.update_score(get, self.test_system) def test_latest_post_assessment(self): self.update_score_single() - assessment = self.openendedmodule.latest_post_assessment(test_system) + assessment = self.openendedmodule.latest_post_assessment(self.test_system) self.assertFalse(assessment == '') # check for errors self.assertFalse('errors' in assessment) @@ -271,7 +287,18 @@ class OpenEndedModuleTest(unittest.TestCase): class CombinedOpenEndedModuleTest(unittest.TestCase): location = Location(["i4x", "edX", "open_ended", "combinedopenended", "SampleQuestion"]) - + definition_template = """ + + {rubric} + {prompt} + + {task1} + + + {task2} + + + """ prompt = "This is a question prompt" rubric = ''' @@ -285,15 +312,18 @@ class CombinedOpenEndedModuleTest(unittest.TestCase): metadata = {'attempts': '10', 'max_score': max_score} static_data = { - 'max_attempts': 20, - 'prompt': prompt, - 'rubric': rubric, - 'max_score': max_score, - 'display_name': 'Name', - 'accept_file_upload' : False, - 'rewrite_content_links' : "", - 'close_date' : "", - } + 'max_attempts': 20, + 'prompt': prompt, + 'rubric': rubric, + 'max_score': max_score, + 'display_name': 'Name', + 'accept_file_upload': False, + 'rewrite_content_links': "", + 'close_date': "", + 's3_interface': test_util_open_ended.S3_INTERFACE, + 'open_ended_grading_interface': test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE, + 'skip_basic_checks': False, + } oeparam = etree.XML(''' @@ -315,17 +345,31 @@ class CombinedOpenEndedModuleTest(unittest.TestCase): ''' task_xml2 = ''' - - Enter essay here. - This is the answer. - {"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"} - - ''' + + Enter essay here. + This is the answer. + {"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"} + + ''' definition = {'prompt': etree.XML(prompt), 'rubric': etree.XML(rubric), 'task_xml': [task_xml1, task_xml2]} - descriptor = Mock() + full_definition = definition_template.format(prompt=prompt, rubric=rubric, task1=task_xml1, task2=task_xml2) + descriptor = Mock(data=full_definition) + test_system = test_system() + combinedoe_container = CombinedOpenEndedModule(test_system, + location, + descriptor, + model_data={'data': full_definition, 'weight' : '1'}) def setUp(self): - self.combinedoe = CombinedOpenEndedV1Module(test_system, self.location, self.definition, self.descriptor, static_data = self.static_data, metadata=self.metadata) + # TODO: this constructor call is definitely wrong, but neither branch + # of the merge matches the module constructor. Someone (Vik?) should fix this. + self.combinedoe = CombinedOpenEndedV1Module(self.test_system, + self.location, + self.definition, + self.descriptor, + static_data=self.static_data, + metadata=self.metadata, + instance_state={}) def test_get_tag_name(self): name = self.combinedoe.get_tag_name("Tag") @@ -346,3 +390,19 @@ class CombinedOpenEndedModuleTest(unittest.TestCase): changed = self.combinedoe.update_task_states() self.assertTrue(changed) + + def test_get_max_score(self): + changed = self.combinedoe.update_task_states() + self.combinedoe.state = "done" + self.combinedoe.is_scored = True + max_score = self.combinedoe.max_score() + self.assertEqual(max_score, 1) + + def test_container_get_max_score(self): + #The progress view requires that this function be exposed + max_score = self.combinedoe_container.max_score() + self.assertEqual(max_score, None) + + def test_container_weight(self): + weight = self.combinedoe_container.weight + self.assertEqual(weight,1) diff --git a/common/lib/xmodule/xmodule/tests/test_conditional.py b/common/lib/xmodule/xmodule/tests/test_conditional.py index 361a6ea785..1b2da0b74a 100644 --- a/common/lib/xmodule/xmodule/tests/test_conditional.py +++ b/common/lib/xmodule/xmodule/tests/test_conditional.py @@ -56,6 +56,9 @@ class ConditionalModuleTest(unittest.TestCase): '''Get a dummy system''' return DummySystem(load_error_modules) + def setUp(self): + self.test_system = test_system() + def get_course(self, name): """Get a test course by directory name. If there's more than one, error.""" print "Importing {0}".format(name) @@ -70,52 +73,51 @@ class ConditionalModuleTest(unittest.TestCase): """Make sure that conditional module works""" print "Starting import" - course = self.get_course('conditional') + course = self.get_course('conditional_and_poll') print "Course: ", course print "id: ", course.id - instance_states = dict(problem=None) - shared_state = None - def inner_get_module(descriptor): if isinstance(descriptor, Location): location = descriptor descriptor = self.modulestore.get_instance(course.id, location, depth=None) location = descriptor.location - instance_state = instance_states.get(location.category, None) - print "inner_get_module, location=%s, inst_state=%s" % (location, instance_state) - return descriptor.xmodule_constructor(test_system)(instance_state, shared_state) + return descriptor.xmodule(self.test_system) - location = Location(["i4x", "edX", "cond_test", "conditional", "condone"]) + # edx - HarvardX + # cond_test - ER22x + location = Location(["i4x", "HarvardX", "ER22x", "conditional", "condone"]) def replace_urls(text, staticfiles_prefix=None, replace_prefix='/static/', course_namespace=None): return text - test_system.replace_urls = replace_urls - test_system.get_module = inner_get_module + self.test_system.replace_urls = replace_urls + self.test_system.get_module = inner_get_module module = inner_get_module(location) print "module: ", module - print "module definition: ", module.definition + print "module.conditions_map: ", module.conditions_map print "module children: ", module.get_children() print "module display items (children): ", module.get_display_items() html = module.get_html() print "html type: ", type(html) print "html: ", html - html_expect = "{'ajax_url': 'courses/course_id/modx/a_location', 'element_id': 'i4x-edX-cond_test-conditional-condone', 'id': 'i4x://edX/cond_test/conditional/condone'}" + html_expect = "{'ajax_url': 'courses/course_id/modx/a_location', 'element_id': 'i4x-HarvardX-ER22x-conditional-condone', 'id': 'i4x://HarvardX/ER22x/conditional/condone', 'depends': 'i4x-HarvardX-ER22x-problem-choiceprob'}" self.assertEqual(html, html_expect) gdi = module.get_display_items() print "gdi=", gdi ajax = json.loads(module.handle_ajax('', '')) - self.assertTrue('xmodule.conditional_module' in ajax['html']) print "ajax: ", ajax + html = ajax['html'] + self.assertFalse(any(['This is a secret' in item for item in html])) # now change state of the capa problem to make it completed - instance_states['problem'] = json.dumps({'attempts': 1}) + inner_get_module(Location('i4x://HarvardX/ER22x/problem/choiceprob')).attempts = 1 ajax = json.loads(module.handle_ajax('', '')) - self.assertTrue('This is a secret' in ajax['html']) print "post-attempt ajax: ", ajax + html = ajax['html'] + self.assertTrue(any(['This is a secret' in item for item in html])) diff --git a/common/lib/xmodule/xmodule/tests/test_content.py b/common/lib/xmodule/xmodule/tests/test_content.py index 1bcd2f4ebe..e73c33197c 100644 --- a/common/lib/xmodule/xmodule/tests/test_content.py +++ b/common/lib/xmodule/xmodule/tests/test_content.py @@ -19,9 +19,14 @@ class ContentTest(unittest.TestCase): content = StaticContent('loc', 'name', 'content_type', 'data') self.assertIsNone(content.thumbnail_location) - def test_generate_thumbnail_nonimage(self): + def test_generate_thumbnail_image(self): contentStore = ContentStore() - content = Content(Location(u'c4x', u'mitX', u'800', u'asset', u'monsters.jpg'), None) + content = Content(Location(u'c4x', u'mitX', u'800', u'asset', u'monsters__.jpg'), None) (thumbnail_content, thumbnail_file_location) = contentStore.generate_thumbnail(content) self.assertIsNone(thumbnail_content) - self.assertEqual(Location(u'c4x', u'mitX', u'800', u'thumbnail', u'monsters.jpg'), thumbnail_file_location) + self.assertEqual(Location(u'c4x', u'mitX', u'800', u'thumbnail', u'monsters__.jpg'), thumbnail_file_location) + def test_compute_location(self): + # We had a bug that __ got converted into a single _. Make sure that substitution of INVALID_CHARS (like space) + # still happen. + asset_location = StaticContent.compute_location('mitX', '400', 'subs__1eo_jXvZnE .srt.sjson') + self.assertEqual(Location(u'c4x', u'mitX', u'400', u'asset', u'subs__1eo_jXvZnE_.srt.sjson', None), asset_location) diff --git a/common/lib/xmodule/xmodule/tests/test_course_module.py b/common/lib/xmodule/xmodule/tests/test_course_module.py index 712b095696..eda9cf386c 100644 --- a/common/lib/xmodule/xmodule/tests/test_course_module.py +++ b/common/lib/xmodule/xmodule/tests/test_course_module.py @@ -1,5 +1,6 @@ import unittest from time import strptime + from fs.memoryfs import MemoryFS from mock import Mock, patch @@ -39,7 +40,7 @@ class DummySystem(ImportSystem): class IsNewCourseTestCase(unittest.TestCase): """Make sure the property is_new works on courses""" @staticmethod - def get_dummy_course(start, announcement=None, is_new=None): + def get_dummy_course(start, announcement=None, is_new=None, advertised_start=None): """Get a dummy course""" system = DummySystem(load_error_modules=True) @@ -49,71 +50,103 @@ class IsNewCourseTestCase(unittest.TestCase): is_new = to_attrb('is_new', is_new) announcement = to_attrb('announcement', announcement) + advertised_start = to_attrb('advertised_start', advertised_start) start_xml = ''' + {is_new} + {advertised_start}> Two houses, ... '''.format(org=ORG, course=COURSE, start=start, is_new=is_new, - announcement=announcement) + announcement=announcement, advertised_start=advertised_start) return system.process_xml(start_xml) @patch('xmodule.course_module.time.gmtime') def test_sorting_score(self, gmtime_mock): gmtime_mock.return_value = NOW - dates = [('2012-10-01T12:00', '2012-09-01T12:00'), # 0 - ('2012-12-01T12:00', '2012-11-01T12:00'), # 1 - ('2013-02-01T12:00', '2012-12-01T12:00'), # 2 - ('2013-02-01T12:00', '2012-11-10T12:00'), # 3 - ('2013-02-01T12:00', None), # 4 - ('2013-03-01T12:00', None), # 5 - ('2013-04-01T12:00', None), # 6 - ('2012-11-01T12:00', None), # 7 - ('2012-09-01T12:00', None), # 8 - ('1990-01-01T12:00', None), # 9 - ('2013-01-02T12:00', None), # 10 - ('2013-01-10T12:00', '2012-12-31T12:00'), # 11 - ('2013-01-10T12:00', '2013-01-01T12:00'), # 12 + + day1 = '2012-01-01T12:00' + day2 = '2012-01-02T12:00' + + dates = [ + # Announce date takes priority over actual start + # and courses announced on a later date are newer + # than courses announced for an earlier date + ((day1, day2, None), (day1, day1, None), self.assertLess), + ((day1, day1, None), (day2, day1, None), self.assertEqual), + + # Announce dates take priority over advertised starts + ((day1, day2, day1), (day1, day1, day1), self.assertLess), + ((day1, day1, day2), (day2, day1, day2), self.assertEqual), + + # Later start == newer course + ((day2, None, None), (day1, None, None), self.assertLess), + ((day1, None, None), (day1, None, None), self.assertEqual), + + # Non-parseable advertised starts are ignored in preference to actual starts + ((day2, None, "Spring"), (day1, None, "Fall"), self.assertLess), + ((day1, None, "Spring"), (day1, None, "Fall"), self.assertEqual), + + # Partially parsable advertised starts should take priority over start dates + ((day2, None, "October 2013"), (day2, None, "October 2012"), self.assertLess), + ((day2, None, "October 2013"), (day1, None, "October 2013"), self.assertEqual), + + # Parseable advertised starts take priority over start dates + ((day1, None, day2), (day1, None, day1), self.assertLess), + ((day2, None, day2), (day1, None, day2), self.assertEqual), ] - data = [] - for i, d in enumerate(dates): - descriptor = self.get_dummy_course(start=d[0], announcement=d[1]) - score = descriptor.sorting_score - data.append((score, i)) - - result = [d[1] for d in sorted(data)] - assert(result == [12, 11, 2, 3, 1, 0, 6, 5, 4, 10, 7, 8, 9]) - + for a, b, assertion in dates: + a_score = self.get_dummy_course(start=a[0], announcement=a[1], advertised_start=a[2]).sorting_score + b_score = self.get_dummy_course(start=b[0], announcement=b[1], advertised_start=b[2]).sorting_score + print "Comparing %s to %s" % (a, b) + assertion(a_score, b_score) @patch('xmodule.course_module.time.gmtime') - def test_is_new(self, gmtime_mock): + def test_start_date_text(self, gmtime_mock): + gmtime_mock.return_value = NOW + + settings = [ + # start, advertized, result + ('2012-12-02T12:00', None, 'Dec 02, 2012'), + ('2012-12-02T12:00', '2011-11-01T12:00', 'Nov 01, 2011'), + ('2012-12-02T12:00', 'Spring 2012', 'Spring 2012'), + ('2012-12-02T12:00', 'November, 2011', 'November, 2011'), + ] + + for s in settings: + d = self.get_dummy_course(start=s[0], advertised_start=s[1]) + print "Checking start=%s advertised=%s" % (s[0], s[1]) + self.assertEqual(d.start_date_text, s[2]) + + @patch('xmodule.course_module.time.gmtime') + def test_is_newish(self, gmtime_mock): gmtime_mock.return_value = NOW descriptor = self.get_dummy_course(start='2012-12-02T12:00', is_new=True) - assert(descriptor.is_new is True) + assert(descriptor.is_newish is True) descriptor = self.get_dummy_course(start='2013-02-02T12:00', is_new=False) - assert(descriptor.is_new is False) + assert(descriptor.is_newish is False) descriptor = self.get_dummy_course(start='2013-02-02T12:00', is_new=True) - assert(descriptor.is_new is True) + assert(descriptor.is_newish is True) descriptor = self.get_dummy_course(start='2013-01-15T12:00') - assert(descriptor.is_new is True) + assert(descriptor.is_newish is True) - descriptor = self.get_dummy_course(start='2013-03-00T12:00') - assert(descriptor.is_new is True) + descriptor = self.get_dummy_course(start='2013-03-01T12:00') + assert(descriptor.is_newish is True) descriptor = self.get_dummy_course(start='2012-10-15T12:00') - assert(descriptor.is_new is False) + assert(descriptor.is_newish is False) descriptor = self.get_dummy_course(start='2012-12-31T12:00') - assert(descriptor.is_new is True) + assert(descriptor.is_newish is True) diff --git a/common/lib/xmodule/xmodule/tests/test_export.py b/common/lib/xmodule/xmodule/tests/test_export.py index da1b04bd94..443014f9ef 100644 --- a/common/lib/xmodule/xmodule/tests/test_export.py +++ b/common/lib/xmodule/xmodule/tests/test_export.py @@ -4,7 +4,7 @@ from fs.osfs import OSFS from nose.tools import assert_equals, assert_true from path import path from tempfile import mkdtemp -from shutil import copytree +import shutil from xmodule.modulestore.xml import XMLModuleStore @@ -18,27 +18,16 @@ TEST_DIR = TEST_DIR / 'test' DATA_DIR = TEST_DIR / 'data' -def strip_metadata(descriptor, key): - """ - Recursively strips tag from all children. - """ - print "strip {key} from {desc}".format(key=key, desc=descriptor.location.url()) - descriptor.metadata.pop(key, None) - for d in descriptor.get_children(): - strip_metadata(d, key) - - def strip_filenames(descriptor): """ Recursively strips 'filename' from all children's definitions. """ print "strip filename from {desc}".format(desc=descriptor.location.url()) - descriptor.definition.pop('filename', None) + descriptor._model_data.pop('filename', None) for d in descriptor.get_children(): strip_filenames(d) - class RoundTripTestCase(unittest.TestCase): ''' Check that our test courses roundtrip properly. Same course imported , than exported, then imported again. @@ -46,11 +35,11 @@ class RoundTripTestCase(unittest.TestCase): Thus we make sure that export and import work properly. ''' def check_export_roundtrip(self, data_dir, course_dir): - root_dir = path(mkdtemp()) + root_dir = path(self.temp_dir) print "Copying test course to temp dir {0}".format(root_dir) data_dir = path(data_dir) - copytree(data_dir / course_dir, root_dir / course_dir) + shutil.copytree(data_dir / course_dir, root_dir / course_dir) print "Starting import" initial_import = XMLModuleStore(root_dir, course_dirs=[course_dir]) @@ -77,10 +66,6 @@ class RoundTripTestCase(unittest.TestCase): exported_course = courses2[0] print "Checking course equality" - # HACK: data_dir metadata tags break equality because they - # aren't real metadata, and depend on paths. Remove them. - strip_metadata(initial_course, 'data_dir') - strip_metadata(exported_course, 'data_dir') # HACK: filenames change when changing file formats # during imports from old-style courses. Ignore them. @@ -105,9 +90,10 @@ class RoundTripTestCase(unittest.TestCase): self.assertEquals(initial_import.modules[course_id][location], second_import.modules[course_id][location]) - def setUp(self): self.maxDiff = None + self.temp_dir = mkdtemp() + self.addCleanup(shutil.rmtree, self.temp_dir) def test_toy_roundtrip(self): self.check_export_roundtrip(DATA_DIR, "toy") @@ -118,6 +104,9 @@ class RoundTripTestCase(unittest.TestCase): def test_full_roundtrip(self): self.check_export_roundtrip(DATA_DIR, "full") + def test_conditional_and_poll_roundtrip(self): + self.check_export_roundtrip(DATA_DIR, "conditional_and_poll") + def test_selfassessment_roundtrip(self): #Test selfassessment xmodule to see if it exports correctly self.check_export_roundtrip(DATA_DIR, "self_assessment") diff --git a/common/lib/xmodule/xmodule/tests/test_fields.py b/common/lib/xmodule/xmodule/tests/test_fields.py new file mode 100644 index 0000000000..7c8872efc1 --- /dev/null +++ b/common/lib/xmodule/xmodule/tests/test_fields.py @@ -0,0 +1,80 @@ +"""Tests for Date class defined in fields.py.""" +import datetime +import unittest +from django.utils.timezone import UTC +from xmodule.fields import Date +import time + +class DateTest(unittest.TestCase): + date = Date() + + @staticmethod + def struct_to_datetime(struct_time): + return datetime.datetime(struct_time.tm_year, struct_time.tm_mon, + struct_time.tm_mday, struct_time.tm_hour, + struct_time.tm_min, struct_time.tm_sec, tzinfo=UTC()) + + def compare_dates(self, date1, date2, expected_delta): + dt1 = DateTest.struct_to_datetime(date1) + dt2 = DateTest.struct_to_datetime(date2) + self.assertEqual(dt1 - dt2, expected_delta, str(date1) + "-" + + str(date2) + "!=" + str(expected_delta)) + + def test_from_json(self): + '''Test conversion from iso compatible date strings to struct_time''' + self.compare_dates( + DateTest.date.from_json("2013-01-01"), + DateTest.date.from_json("2012-12-31"), + datetime.timedelta(days=1)) + self.compare_dates( + DateTest.date.from_json("2013-01-01T00"), + DateTest.date.from_json("2012-12-31T23"), + datetime.timedelta(hours=1)) + self.compare_dates( + DateTest.date.from_json("2013-01-01T00:00"), + DateTest.date.from_json("2012-12-31T23:59"), + datetime.timedelta(minutes=1)) + self.compare_dates( + DateTest.date.from_json("2013-01-01T00:00:00"), + DateTest.date.from_json("2012-12-31T23:59:59"), + datetime.timedelta(seconds=1)) + self.compare_dates( + DateTest.date.from_json("2013-01-01T00:00:00Z"), + DateTest.date.from_json("2012-12-31T23:59:59Z"), + datetime.timedelta(seconds=1)) + self.compare_dates( + DateTest.date.from_json("2012-12-31T23:00:01-01:00"), + DateTest.date.from_json("2013-01-01T00:00:00+01:00"), + datetime.timedelta(hours=1, seconds=1)) + + def test_return_None(self): + self.assertIsNone(DateTest.date.from_json("")) + self.assertIsNone(DateTest.date.from_json(None)) + self.assertIsNone(DateTest.date.from_json(['unknown value'])) + + def test_old_due_date_format(self): + current = datetime.datetime.today() + self.assertEqual( + time.struct_time((current.year, 3, 12, 12, 0, 0, 1, 71, 0)), + DateTest.date.from_json("March 12 12:00")) + self.assertEqual( + time.struct_time((current.year, 12, 4, 16, 30, 0, 2, 338, 0)), + DateTest.date.from_json("December 4 16:30")) + + def test_to_json(self): + ''' + Test converting time reprs to iso dates + ''' + self.assertEqual( + DateTest.date.to_json( + time.strptime("2012-12-31T23:59:59Z", "%Y-%m-%dT%H:%M:%SZ")), + "2012-12-31T23:59:59Z") + self.assertEqual( + DateTest.date.to_json( + DateTest.date.from_json("2012-12-31T23:59:59Z")), + "2012-12-31T23:59:59Z") + self.assertEqual( + DateTest.date.to_json( + DateTest.date.from_json("2012-12-31T23:00:01-01:00")), + "2013-01-01T00:00:01Z") + diff --git a/common/lib/xmodule/xmodule/tests/test_graders.py b/common/lib/xmodule/xmodule/tests/test_graders.py index 27416b1d5c..1a9ba50dc4 100644 --- a/common/lib/xmodule/xmodule/tests/test_graders.py +++ b/common/lib/xmodule/xmodule/tests/test_graders.py @@ -6,32 +6,34 @@ from xmodule.graders import Score, aggregate_scores class GradesheetTest(unittest.TestCase): + '''Tests the aggregate_scores method''' def test_weighted_grading(self): scores = [] Score.__sub__ = lambda me, other: (me.earned - other.earned) + (me.possible - other.possible) - all, graded = aggregate_scores(scores) - self.assertEqual(all, Score(earned=0, possible=0, graded=False, section="summary")) - self.assertEqual(graded, Score(earned=0, possible=0, graded=True, section="summary")) + all_total, graded_total = aggregate_scores(scores) + self.assertEqual(all_total, Score(earned=0, possible=0, graded=False, section="summary")) + self.assertEqual(graded_total, Score(earned=0, possible=0, graded=True, section="summary")) scores.append(Score(earned=0, possible=5, graded=False, section="summary")) - all, graded = aggregate_scores(scores) - self.assertEqual(all, Score(earned=0, possible=5, graded=False, section="summary")) - self.assertEqual(graded, Score(earned=0, possible=0, graded=True, section="summary")) + all_total, graded_total = aggregate_scores(scores) + self.assertEqual(all_total, Score(earned=0, possible=5, graded=False, section="summary")) + self.assertEqual(graded_total, Score(earned=0, possible=0, graded=True, section="summary")) scores.append(Score(earned=3, possible=5, graded=True, section="summary")) - all, graded = aggregate_scores(scores) - self.assertAlmostEqual(all, Score(earned=3, possible=10, graded=False, section="summary")) - self.assertAlmostEqual(graded, Score(earned=3, possible=5, graded=True, section="summary")) + all_total, graded_total = aggregate_scores(scores) + self.assertAlmostEqual(all_total, Score(earned=3, possible=10, graded=False, section="summary")) + self.assertAlmostEqual(graded_total, Score(earned=3, possible=5, graded=True, section="summary")) scores.append(Score(earned=2, possible=5, graded=True, section="summary")) - all, graded = aggregate_scores(scores) - self.assertAlmostEqual(all, Score(earned=5, possible=15, graded=False, section="summary")) - self.assertAlmostEqual(graded, Score(earned=5, possible=10, graded=True, section="summary")) + all_total, graded_total = aggregate_scores(scores) + self.assertAlmostEqual(all_total, Score(earned=5, possible=15, graded=False, section="summary")) + self.assertAlmostEqual(graded_total, Score(earned=5, possible=10, graded=True, section="summary")) class GraderTest(unittest.TestCase): + '''Tests grader implementations''' empty_gradesheet = { } @@ -44,136 +46,152 @@ class GraderTest(unittest.TestCase): test_gradesheet = { 'Homework': [Score(earned=2, possible=20.0, graded=True, section='hw1'), - Score(earned=16, possible=16.0, graded=True, section='hw2')], - #The dropped scores should be from the assignments that don't exist yet + Score(earned=16, possible=16.0, graded=True, section='hw2')], + # The dropped scores should be from the assignments that don't exist yet 'Lab': [Score(earned=1, possible=2.0, graded=True, section='lab1'), # Dropped - Score(earned=1, possible=1.0, graded=True, section='lab2'), - Score(earned=1, possible=1.0, graded=True, section='lab3'), - Score(earned=5, possible=25.0, graded=True, section='lab4'), # Dropped - Score(earned=3, possible=4.0, graded=True, section='lab5'), # Dropped - Score(earned=6, possible=7.0, graded=True, section='lab6'), - Score(earned=5, possible=6.0, graded=True, section='lab7')], + Score(earned=1, possible=1.0, graded=True, section='lab2'), + Score(earned=1, possible=1.0, graded=True, section='lab3'), + Score(earned=5, possible=25.0, graded=True, section='lab4'), # Dropped + Score(earned=3, possible=4.0, graded=True, section='lab5'), # Dropped + Score(earned=6, possible=7.0, graded=True, section='lab6'), + Score(earned=5, possible=6.0, graded=True, section='lab7')], 'Midterm': [Score(earned=50.5, possible=100, graded=True, section="Midterm Exam"), ], } - def test_SingleSectionGrader(self): - midtermGrader = graders.SingleSectionGrader("Midterm", "Midterm Exam") - lab4Grader = graders.SingleSectionGrader("Lab", "lab4") - badLabGrader = graders.SingleSectionGrader("Lab", "lab42") + def test_single_section_grader(self): + midterm_grader = graders.SingleSectionGrader("Midterm", "Midterm Exam") + lab4_grader = graders.SingleSectionGrader("Lab", "lab4") + bad_lab_grader = graders.SingleSectionGrader("Lab", "lab42") - for graded in [midtermGrader.grade(self.empty_gradesheet), - midtermGrader.grade(self.incomplete_gradesheet), - badLabGrader.grade(self.test_gradesheet)]: + for graded in [midterm_grader.grade(self.empty_gradesheet), + midterm_grader.grade(self.incomplete_gradesheet), + bad_lab_grader.grade(self.test_gradesheet)]: self.assertEqual(len(graded['section_breakdown']), 1) self.assertEqual(graded['percent'], 0.0) - graded = midtermGrader.grade(self.test_gradesheet) + graded = midterm_grader.grade(self.test_gradesheet) self.assertAlmostEqual(graded['percent'], 0.505) self.assertEqual(len(graded['section_breakdown']), 1) - graded = lab4Grader.grade(self.test_gradesheet) + graded = lab4_grader.grade(self.test_gradesheet) self.assertAlmostEqual(graded['percent'], 0.2) self.assertEqual(len(graded['section_breakdown']), 1) - def test_AssignmentFormatGrader(self): - homeworkGrader = graders.AssignmentFormatGrader("Homework", 12, 2) - noDropGrader = graders.AssignmentFormatGrader("Homework", 12, 0) - #Even though the minimum number is 3, this should grade correctly when 7 assignments are found - overflowGrader = graders.AssignmentFormatGrader("Lab", 3, 2) - labGrader = graders.AssignmentFormatGrader("Lab", 7, 3) + def test_assignment_format_grader(self): + homework_grader = graders.AssignmentFormatGrader("Homework", 12, 2) + no_drop_grader = graders.AssignmentFormatGrader("Homework", 12, 0) + # Even though the minimum number is 3, this should grade correctly when 7 assignments are found + overflow_grader = graders.AssignmentFormatGrader("Lab", 3, 2) + lab_grader = graders.AssignmentFormatGrader("Lab", 7, 3) - #Test the grading of an empty gradesheet - for graded in [homeworkGrader.grade(self.empty_gradesheet), - noDropGrader.grade(self.empty_gradesheet), - homeworkGrader.grade(self.incomplete_gradesheet), - noDropGrader.grade(self.incomplete_gradesheet)]: + # Test the grading of an empty gradesheet + for graded in [homework_grader.grade(self.empty_gradesheet), + no_drop_grader.grade(self.empty_gradesheet), + homework_grader.grade(self.incomplete_gradesheet), + no_drop_grader.grade(self.incomplete_gradesheet)]: self.assertAlmostEqual(graded['percent'], 0.0) - #Make sure the breakdown includes 12 sections, plus one summary + # Make sure the breakdown includes 12 sections, plus one summary self.assertEqual(len(graded['section_breakdown']), 12 + 1) - graded = homeworkGrader.grade(self.test_gradesheet) + graded = homework_grader.grade(self.test_gradesheet) self.assertAlmostEqual(graded['percent'], 0.11) # 100% + 10% / 10 assignments self.assertEqual(len(graded['section_breakdown']), 12 + 1) - graded = noDropGrader.grade(self.test_gradesheet) + graded = no_drop_grader.grade(self.test_gradesheet) self.assertAlmostEqual(graded['percent'], 0.0916666666666666) # 100% + 10% / 12 assignments self.assertEqual(len(graded['section_breakdown']), 12 + 1) - graded = overflowGrader.grade(self.test_gradesheet) + graded = overflow_grader.grade(self.test_gradesheet) self.assertAlmostEqual(graded['percent'], 0.8880952380952382) # 100% + 10% / 5 assignments self.assertEqual(len(graded['section_breakdown']), 7 + 1) - graded = labGrader.grade(self.test_gradesheet) + graded = lab_grader.grade(self.test_gradesheet) self.assertAlmostEqual(graded['percent'], 0.9226190476190477) self.assertEqual(len(graded['section_breakdown']), 7 + 1) - def test_WeightedSubsectionsGrader(self): - #First, a few sub graders - homeworkGrader = graders.AssignmentFormatGrader("Homework", 12, 2) - labGrader = graders.AssignmentFormatGrader("Lab", 7, 3) - midtermGrader = graders.SingleSectionGrader("Midterm", "Midterm Exam") + def test_assignment_format_grader_on_single_section_entry(self): + midterm_grader = graders.AssignmentFormatGrader("Midterm", 1, 0) + # Test the grading on a section with one item: + for graded in [midterm_grader.grade(self.empty_gradesheet), + midterm_grader.grade(self.incomplete_gradesheet)]: + self.assertAlmostEqual(graded['percent'], 0.0) + # Make sure the breakdown includes just the one summary + self.assertEqual(len(graded['section_breakdown']), 0 + 1) + self.assertEqual(graded['section_breakdown'][0]['label'], 'Midterm') - weightedGrader = graders.WeightedSubsectionsGrader([(homeworkGrader, homeworkGrader.category, 0.25), - (labGrader, labGrader.category, 0.25), - (midtermGrader, midtermGrader.category, 0.5)]) + graded = midterm_grader.grade(self.test_gradesheet) + self.assertAlmostEqual(graded['percent'], 0.505) + self.assertEqual(len(graded['section_breakdown']), 0 + 1) - overOneWeightsGrader = graders.WeightedSubsectionsGrader([(homeworkGrader, homeworkGrader.category, 0.5), - (labGrader, labGrader.category, 0.5), - (midtermGrader, midtermGrader.category, 0.5)]) + def test_weighted_subsections_grader(self): + # First, a few sub graders + homework_grader = graders.AssignmentFormatGrader("Homework", 12, 2) + lab_grader = graders.AssignmentFormatGrader("Lab", 7, 3) + # phasing out the use of SingleSectionGraders, and instead using AssignmentFormatGraders that + # will act like SingleSectionGraders on single sections. + midterm_grader = graders.AssignmentFormatGrader("Midterm", 1, 0) - #The midterm should have all weight on this one - zeroWeightsGrader = graders.WeightedSubsectionsGrader([(homeworkGrader, homeworkGrader.category, 0.0), - (labGrader, labGrader.category, 0.0), - (midtermGrader, midtermGrader.category, 0.5)]) + weighted_grader = graders.WeightedSubsectionsGrader([(homework_grader, homework_grader.category, 0.25), + (lab_grader, lab_grader.category, 0.25), + (midterm_grader, midterm_grader.category, 0.5)]) - #This should always have a final percent of zero - allZeroWeightsGrader = graders.WeightedSubsectionsGrader([(homeworkGrader, homeworkGrader.category, 0.0), - (labGrader, labGrader.category, 0.0), - (midtermGrader, midtermGrader.category, 0.0)]) + over_one_weights_grader = graders.WeightedSubsectionsGrader([(homework_grader, homework_grader.category, 0.5), + (lab_grader, lab_grader.category, 0.5), + (midterm_grader, midterm_grader.category, 0.5)]) - emptyGrader = graders.WeightedSubsectionsGrader([]) + # The midterm should have all weight on this one + zero_weights_grader = graders.WeightedSubsectionsGrader([(homework_grader, homework_grader.category, 0.0), + (lab_grader, lab_grader.category, 0.0), + (midterm_grader, midterm_grader.category, 0.5)]) - graded = weightedGrader.grade(self.test_gradesheet) + # This should always have a final percent of zero + all_zero_weights_grader = graders.WeightedSubsectionsGrader([(homework_grader, homework_grader.category, 0.0), + (lab_grader, lab_grader.category, 0.0), + (midterm_grader, midterm_grader.category, 0.0)]) + + empty_grader = graders.WeightedSubsectionsGrader([]) + + graded = weighted_grader.grade(self.test_gradesheet) self.assertAlmostEqual(graded['percent'], 0.5106547619047619) self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1) self.assertEqual(len(graded['grade_breakdown']), 3) - graded = overOneWeightsGrader.grade(self.test_gradesheet) + graded = over_one_weights_grader.grade(self.test_gradesheet) self.assertAlmostEqual(graded['percent'], 0.7688095238095238) self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1) self.assertEqual(len(graded['grade_breakdown']), 3) - graded = zeroWeightsGrader.grade(self.test_gradesheet) + graded = zero_weights_grader.grade(self.test_gradesheet) self.assertAlmostEqual(graded['percent'], 0.2525) self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1) self.assertEqual(len(graded['grade_breakdown']), 3) - graded = allZeroWeightsGrader.grade(self.test_gradesheet) + graded = all_zero_weights_grader.grade(self.test_gradesheet) self.assertAlmostEqual(graded['percent'], 0.0) self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1) self.assertEqual(len(graded['grade_breakdown']), 3) - for graded in [weightedGrader.grade(self.empty_gradesheet), - weightedGrader.grade(self.incomplete_gradesheet), - zeroWeightsGrader.grade(self.empty_gradesheet), - allZeroWeightsGrader.grade(self.empty_gradesheet)]: + for graded in [weighted_grader.grade(self.empty_gradesheet), + weighted_grader.grade(self.incomplete_gradesheet), + zero_weights_grader.grade(self.empty_gradesheet), + all_zero_weights_grader.grade(self.empty_gradesheet)]: self.assertAlmostEqual(graded['percent'], 0.0) self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1) self.assertEqual(len(graded['grade_breakdown']), 3) - graded = emptyGrader.grade(self.test_gradesheet) + graded = empty_grader.grade(self.test_gradesheet) self.assertAlmostEqual(graded['percent'], 0.0) self.assertEqual(len(graded['section_breakdown']), 0) self.assertEqual(len(graded['grade_breakdown']), 0) - def test_graderFromConf(self): + def test_grader_from_conf(self): - #Confs always produce a graders.WeightedSubsectionsGrader, so we test this by repeating the test - #in test_graders.WeightedSubsectionsGrader, but generate the graders with confs. + # Confs always produce a graders.WeightedSubsectionsGrader, so we test this by repeating the test + # in test_graders.WeightedSubsectionsGrader, but generate the graders with confs. - weightedGrader = graders.grader_from_conf([ + weighted_grader = graders.grader_from_conf([ { 'type': "Homework", 'min_count': 12, @@ -196,25 +214,25 @@ class GraderTest(unittest.TestCase): }, ]) - emptyGrader = graders.grader_from_conf([]) + empty_grader = graders.grader_from_conf([]) - graded = weightedGrader.grade(self.test_gradesheet) + graded = weighted_grader.grade(self.test_gradesheet) self.assertAlmostEqual(graded['percent'], 0.5106547619047619) self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1) self.assertEqual(len(graded['grade_breakdown']), 3) - graded = emptyGrader.grade(self.test_gradesheet) + graded = empty_grader.grade(self.test_gradesheet) self.assertAlmostEqual(graded['percent'], 0.0) self.assertEqual(len(graded['section_breakdown']), 0) self.assertEqual(len(graded['grade_breakdown']), 0) - #Test that graders can also be used instead of lists of dictionaries - homeworkGrader = graders.AssignmentFormatGrader("Homework", 12, 2) - homeworkGrader2 = graders.grader_from_conf(homeworkGrader) + # Test that graders can also be used instead of lists of dictionaries + homework_grader = graders.AssignmentFormatGrader("Homework", 12, 2) + homework_grader2 = graders.grader_from_conf(homework_grader) - graded = homeworkGrader2.grade(self.test_gradesheet) + graded = homework_grader2.grade(self.test_gradesheet) self.assertAlmostEqual(graded['percent'], 0.11) self.assertEqual(len(graded['section_breakdown']), 12 + 1) - #TODO: How do we test failure cases? The parser only logs an error when - #it can't parse something. Maybe it should throw exceptions? + # TODO: How do we test failure cases? The parser only logs an error when + # it can't parse something. Maybe it should throw exceptions? diff --git a/common/lib/xmodule/xmodule/tests/test_import.py b/common/lib/xmodule/xmodule/tests/test_import.py index 42072ffe4d..37b1d35938 100644 --- a/common/lib/xmodule/xmodule/tests/test_import.py +++ b/common/lib/xmodule/xmodule/tests/test_import.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- + from path import path import unittest from fs.memoryfs import MemoryFS @@ -12,6 +14,7 @@ from xmodule.errortracker import make_error_tracker from xmodule.modulestore import Location from xmodule.modulestore.xml import ImportSystem, XMLModuleStore from xmodule.modulestore.exceptions import ItemNotFoundError +from xmodule.modulestore.inheritance import compute_inherited_metadata from .test_export import DATA_DIR @@ -75,7 +78,6 @@ class ImportTestCase(BaseCourseTestCase): self.assertEqual(descriptor.__class__.__name__, 'ErrorDescriptor') - def test_unique_url_names(self): '''Check that each error gets its very own url_name''' bad_xml = '''''' @@ -87,7 +89,6 @@ class ImportTestCase(BaseCourseTestCase): self.assertNotEqual(descriptor1.location, descriptor2.location) - def test_reimport(self): '''Make sure an already-exported error xml tag loads properly''' @@ -103,8 +104,10 @@ class ImportTestCase(BaseCourseTestCase): self.assertEqual(re_import_descriptor.__class__.__name__, 'ErrorDescriptor') - self.assertEqual(descriptor.definition['data'], - re_import_descriptor.definition['data']) + self.assertEqual(descriptor.contents, + re_import_descriptor.contents) + self.assertEqual(descriptor.error_msg, + re_import_descriptor.error_msg) def test_fixed_xml_tag(self): """Make sure a tag that's been fixed exports as the original tag type""" @@ -138,23 +141,20 @@ class ImportTestCase(BaseCourseTestCase): url_name = 'test1' start_xml = ''' + due="{due}" url_name="{url_name}" unicorn="purple"> Two houses, ... - '''.format(grace=v, org=ORG, course=COURSE, url_name=url_name) + '''.format(due=v, org=ORG, course=COURSE, url_name=url_name) descriptor = system.process_xml(start_xml) + compute_inherited_metadata(descriptor) - print descriptor, descriptor.metadata - self.assertEqual(descriptor.metadata['graceperiod'], v) - self.assertEqual(descriptor.metadata['unicorn'], 'purple') + print descriptor, descriptor._model_data + self.assertEqual(descriptor.lms.due, v) - # Check that the child inherits graceperiod correctly + # Check that the child inherits due correctly child = descriptor.get_children()[0] - self.assertEqual(child.metadata['graceperiod'], v) - - # check that the child does _not_ inherit any unicorns - self.assertTrue('unicorn' not in child.metadata) + self.assertEqual(child.lms.due, v) # Now export and check things resource_fs = MemoryFS() @@ -181,12 +181,12 @@ class ImportTestCase(BaseCourseTestCase): # did we successfully strip the url_name from the definition contents? self.assertTrue('url_name' not in course_xml.attrib) - # Does the chapter tag now have a graceperiod attribute? + # Does the chapter tag now have a due attribute? # hardcoded path to child with resource_fs.open('chapter/ch.xml') as f: chapter_xml = etree.fromstring(f.read()) self.assertEqual(chapter_xml.tag, 'chapter') - self.assertFalse('graceperiod' in chapter_xml.attrib) + self.assertFalse('due' in chapter_xml.attrib) def test_is_pointer_tag(self): """ @@ -224,13 +224,12 @@ class ImportTestCase(BaseCourseTestCase): def check_for_key(key, node): "recursive check for presence of key" print "Checking {0}".format(node.location.url()) - self.assertTrue(key in node.metadata) + self.assertTrue(key in node._model_data) for c in node.get_children(): check_for_key(key, c) check_for_key('graceperiod', course) - def test_policy_loading(self): """Make sure that when two courses share content with the same org and course names, policy applies to the right one.""" @@ -252,8 +251,7 @@ class ImportTestCase(BaseCourseTestCase): # Also check that keys from policy are run through the # appropriate attribute maps -- 'graded' should be True, not 'true' - self.assertEqual(toy.metadata['graded'], True) - + self.assertEqual(toy.lms.graded, True) def test_definition_loading(self): """When two courses share the same org and course name and @@ -271,9 +269,8 @@ class ImportTestCase(BaseCourseTestCase): location = Location(["i4x", "edX", "toy", "video", "Welcome"]) toy_video = modulestore.get_instance(toy_id, location) two_toy_video = modulestore.get_instance(two_toy_id, location) - self.assertEqual(toy_video.metadata['youtube'], "1.0:p2Q6BrNhdh8") - self.assertEqual(two_toy_video.metadata['youtube'], "1.0:p2Q6BrNhdh9") - + self.assertEqual(etree.fromstring(toy_video.data).get('youtube'), "1.0:p2Q6BrNhdh8") + self.assertEqual(etree.fromstring(two_toy_video.data).get('youtube'), "1.0:p2Q6BrNhdh9") def test_colon_in_url_name(self): """Ensure that colons in url_names convert to file paths properly""" @@ -331,6 +328,22 @@ class ImportTestCase(BaseCourseTestCase): self.assertEqual(len(video.url_name), len('video_') + 12) + def test_poll_and_conditional_xmodule(self): + modulestore = XMLModuleStore(DATA_DIR, course_dirs=['conditional_and_poll']) + + course = modulestore.get_courses()[0] + chapters = course.get_children() + ch1 = chapters[0] + sections = ch1.get_children() + + self.assertEqual(len(sections), 1) + + location = course.location + location = Location(location.tag, location.org, location.course, + 'sequential', 'Problem_Demos') + module = modulestore.get_instance(course.id, location) + self.assertEqual(len(module.children), 2) + def test_error_on_import(self): '''Check that when load_error_module is false, an exception is raised, rather than returning an ErrorModule''' @@ -354,7 +367,7 @@ class ImportTestCase(BaseCourseTestCase): render_string_from_sample_gst_xml = """ \ """.strip() - self.assertEqual(gst_sample.definition['render'], render_string_from_sample_gst_xml) + self.assertEqual(gst_sample.render, render_string_from_sample_gst_xml) def test_cohort_config(self): """ @@ -370,13 +383,13 @@ class ImportTestCase(BaseCourseTestCase): self.assertFalse(course.is_cohorted) # empty config -> False - course.metadata['cohort_config'] = {} + course.cohort_config = {} self.assertFalse(course.is_cohorted) # false config -> False - course.metadata['cohort_config'] = {'cohorted': False} + course.cohort_config = {'cohorted': False} self.assertFalse(course.is_cohorted) # and finally... - course.metadata['cohort_config'] = {'cohorted': True} + course.cohort_config = {'cohorted': True} self.assertTrue(course.is_cohorted) diff --git a/common/lib/xmodule/xmodule/tests/test_logic.py b/common/lib/xmodule/xmodule/tests/test_logic.py new file mode 100644 index 0000000000..018b40427e --- /dev/null +++ b/common/lib/xmodule/xmodule/tests/test_logic.py @@ -0,0 +1,66 @@ +# -*- coding: utf-8 -*- + +import json +import unittest + +from xmodule.poll_module import PollDescriptor +from xmodule.conditional_module import ConditionalDescriptor + + +class LogicTest(unittest.TestCase): + """Base class for testing xmodule logic.""" + descriptor_class = None + raw_model_data = {} + + def setUp(self): + class EmptyClass: pass + + self.system = None + self.location = None + self.descriptor = EmptyClass() + + self.xmodule_class = self.descriptor_class.module_class + self.xmodule = self.xmodule_class(self.system, self.location, + self.descriptor, self.raw_model_data) + + def ajax_request(self, dispatch, get): + return json.loads(self.xmodule.handle_ajax(dispatch, get)) + + +class PollModuleTest(LogicTest): + descriptor_class = PollDescriptor + raw_model_data = { + 'poll_answers': {'Yes': 1, 'Dont_know': 0, 'No': 0}, + 'voted': False, + 'poll_answer': '' + } + + def test_bad_ajax_request(self): + response = self.ajax_request('bad_answer', {}) + self.assertDictEqual(response, {'error': 'Unknown Command!'}) + + def test_good_ajax_request(self): + response = self.ajax_request('No', {}) + + poll_answers = response['poll_answers'] + total = response['total'] + callback = response['callback'] + + self.assertDictEqual(poll_answers, {'Yes': 1, 'Dont_know': 0, 'No': 1}) + self.assertEqual(total, 2) + self.assertDictEqual(callback, {'objectName': 'Conditional'}) + self.assertEqual(self.xmodule.poll_answer, 'No') + + +class ConditionalModuleTest(LogicTest): + descriptor_class = ConditionalDescriptor + + def test_ajax_request(self): + # Mock is_condition_satisfied + self.xmodule.is_condition_satisfied = lambda: True + setattr(self.xmodule.descriptor, 'get_children', lambda: []) + + response = self.ajax_request('No', {}) + html = response['html'] + + self.assertEqual(html, []) diff --git a/common/lib/xmodule/xmodule/tests/test_randomize_module.py b/common/lib/xmodule/xmodule/tests/test_randomize_module.py index 456fd379a5..59cf5a59f3 100644 --- a/common/lib/xmodule/xmodule/tests/test_randomize_module.py +++ b/common/lib/xmodule/xmodule/tests/test_randomize_module.py @@ -13,7 +13,7 @@ COURSE = 'test_course' START = '2013-01-01T01:00:00' -from test_course_module import DummySystem as DummyImportSystem +from .test_course_module import DummySystem as DummyImportSystem from . import test_system diff --git a/common/lib/xmodule/xmodule/tests/test_self_assessment.py b/common/lib/xmodule/xmodule/tests/test_self_assessment.py index 617b2b142a..593b3fea01 100644 --- a/common/lib/xmodule/xmodule/tests/test_self_assessment.py +++ b/common/lib/xmodule/xmodule/tests/test_self_assessment.py @@ -1,16 +1,17 @@ import json -from mock import Mock +from mock import Mock, MagicMock import unittest -from xmodule.self_assessment_module import SelfAssessmentModule +from xmodule.open_ended_grading_classes.self_assessment_module import SelfAssessmentModule from xmodule.modulestore import Location from lxml import etree from . import test_system +import test_util_open_ended + class SelfAssessmentTest(unittest.TestCase): - rubric = ''' Response Quality @@ -23,13 +24,11 @@ class SelfAssessmentTest(unittest.TestCase): 'prompt': prompt, 'submitmessage': 'Shall we submit now?', 'hintprompt': 'Consider this...', - } + } location = Location(["i4x", "edX", "sa_test", "selfassessment", "SampleQuestion"]) - metadata = {'attempts': '10'} - descriptor = Mock() def setUp(self): @@ -40,40 +39,62 @@ class SelfAssessmentTest(unittest.TestCase): 'attempts': 2}) static_data = { - 'max_attempts': 10, - 'rubric': etree.XML(self.rubric), - 'prompt': self.prompt, - 'max_score': 1, - 'display_name': "Name", - 'accept_file_upload': False, - 'close_date': None - } + 'max_attempts': 10, + 'rubric': etree.XML(self.rubric), + 'prompt': self.prompt, + 'max_score': 1, + 'display_name': "Name", + 'accept_file_upload': False, + 'close_date': None, + 's3_interface': test_util_open_ended.S3_INTERFACE, + 'open_ended_grading_interface': test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE, + 'skip_basic_checks': False, + } - self.module = SelfAssessmentModule(test_system, self.location, - self.definition, self.descriptor, - static_data, - state, metadata=self.metadata) + self.module = SelfAssessmentModule(test_system(), self.location, + self.definition, + self.descriptor, + static_data) def test_get_html(self): - html = self.module.get_html(test_system) + html = self.module.get_html(self.module.system) self.assertTrue("This is sample prompt text" in html) def test_self_assessment_flow(self): + responses = {'assessment': '0', 'score_list[]': ['0', '0']} + + def get_fake_item(name): + return responses[name] + + def get_data_for_location(self, location, student): + return { + 'count_graded': 0, + 'count_required': 0, + 'student_sub_count': 0, + } + + mock_query_dict = MagicMock() + mock_query_dict.__getitem__.side_effect = get_fake_item + mock_query_dict.getlist = get_fake_item + + self.module.peer_gs.get_data_for_location = get_data_for_location self.assertEqual(self.module.get_score()['score'], 0) - self.module.save_answer({'student_answer': "I am an answer"}, test_system) - self.assertEqual(self.module.state, self.module.ASSESSING) - - self.module.save_assessment({'assessment': '0'}, test_system) - self.assertEqual(self.module.state, self.module.DONE) + self.module.save_answer({'student_answer': "I am an answer"}, + self.module.system) + self.assertEqual(self.module.child_state, self.module.ASSESSING) + self.module.save_assessment(mock_query_dict, self.module.system) + self.assertEqual(self.module.child_state, self.module.DONE) d = self.module.reset({}) self.assertTrue(d['success']) - self.assertEqual(self.module.state, self.module.INITIAL) + self.assertEqual(self.module.child_state, self.module.INITIAL) # if we now assess as right, skip the REQUEST_HINT state - self.module.save_answer({'student_answer': 'answer 4'}, test_system) - self.module.save_assessment({'assessment': '1'}, test_system) - self.assertEqual(self.module.state, self.module.DONE) + self.module.save_answer({'student_answer': 'answer 4'}, + self.module.system) + responses['assessment'] = '1' + self.module.save_assessment(mock_query_dict, self.module.system) + self.assertEqual(self.module.child_state, self.module.DONE) diff --git a/common/lib/xmodule/xmodule/tests/test_util_open_ended.py b/common/lib/xmodule/xmodule/tests/test_util_open_ended.py new file mode 100644 index 0000000000..db580f1e0e --- /dev/null +++ b/common/lib/xmodule/xmodule/tests/test_util_open_ended.py @@ -0,0 +1,14 @@ +OPEN_ENDED_GRADING_INTERFACE = { + 'url': 'http://127.0.0.1:3033/', + 'username': 'incorrect', + 'password': 'incorrect', + 'staff_grading': 'staff_grading', + 'peer_grading': 'peer_grading', + 'grading_controller': 'grading_controller' +} + +S3_INTERFACE = { + 'aws_access_key': "", + 'aws_secret_key': "", + "aws_bucket_name": "", +} \ No newline at end of file diff --git a/common/lib/xmodule/xmodule/timeinfo.py b/common/lib/xmodule/xmodule/timeinfo.py new file mode 100644 index 0000000000..615a7b2c73 --- /dev/null +++ b/common/lib/xmodule/xmodule/timeinfo.py @@ -0,0 +1,39 @@ +import dateutil +import dateutil.parser +import datetime +from .timeparse import parse_timedelta + +import logging +log = logging.getLogger(__name__) + +class TimeInfo(object): + """ + This is a simple object that calculates and stores datetime information for an XModule + based on the due date string and the grace period string + + So far it parses out three different pieces of time information: + self.display_due_date - the 'official' due date that gets displayed to students + self.grace_period - the length of the grace period + self.close_date - the real due date + + """ + def __init__(self, display_due_date_string, grace_period_string): + if display_due_date_string is not None: + try: + self.display_due_date = dateutil.parser.parse(display_due_date_string) + except ValueError: + log.error("Could not parse due date {0}".format(display_due_date_string)) + raise + else: + self.display_due_date = None + + if grace_period_string is not None and self.display_due_date: + try: + self.grace_period = parse_timedelta(grace_period_string) + self.close_date = self.display_due_date + self.grace_period + except: + log.error("Error parsing the grace period {0}".format(grace_period_string)) + raise + else: + self.grace_period = None + self.close_date = self.display_due_date diff --git a/common/lib/xmodule/xmodule/timelimit_module.py b/common/lib/xmodule/xmodule/timelimit_module.py index 9abb5d183f..efa47a5dca 100644 --- a/common/lib/xmodule/xmodule/timelimit_module.py +++ b/common/lib/xmodule/xmodule/timelimit_module.py @@ -9,35 +9,31 @@ from xmodule.xml_module import XmlDescriptor from xmodule.x_module import XModule from xmodule.progress import Progress from xmodule.exceptions import NotFoundError +from xblock.core import Float, String, Boolean, Scope log = logging.getLogger(__name__) -class TimeLimitModule(XModule): - ''' + +class TimeLimitFields(object): + beginning_at = Float(help="The time this timer was started", scope=Scope.student_state) + ending_at = Float(help="The time this timer will end", scope=Scope.student_state) + accomodation_code = String(help="A code indicating accommodations to be given the student", scope=Scope.student_state) + time_expired_redirect_url = String(help="Url to redirect users to after the timelimit has expired", scope=Scope.settings) + duration = Float(help="The length of this timer", scope=Scope.settings) + suppress_toplevel_navigation = Boolean(help="Whether the toplevel navigation should be suppressed when viewing this module", scope=Scope.settings) + + +class TimeLimitModule(TimeLimitFields, XModule): + ''' Wrapper module which imposes a time constraint for the completion of its child. ''' - def __init__(self, system, location, definition, descriptor, instance_state=None, - shared_state=None, **kwargs): - XModule.__init__(self, system, location, definition, descriptor, - instance_state, shared_state, **kwargs) + def __init__(self, *args, **kwargs): + XModule.__init__(self, *args, **kwargs) self.rendered = False - self.beginning_at = None - self.ending_at = None - self.accommodation_code = None - - if instance_state is not None: - state = json.loads(instance_state) - if 'beginning_at' in state: - self.beginning_at = state['beginning_at'] - if 'ending_at' in state: - self.ending_at = state['ending_at'] - if 'accommodation_code' in state: - self.accommodation_code = state['accommodation_code'] - # For a timed activity, we are only interested here # in time-related accommodations, and these should be disjoint. # (For proctored exams, it is possible to have multiple accommodations @@ -50,7 +46,7 @@ class TimeLimitModule(XModule): ) def _get_accommodated_duration(self, duration): - ''' + ''' Get duration for activity, as adjusted for accommodations. Input and output are expressed in seconds. ''' @@ -70,35 +66,25 @@ class TimeLimitModule(XModule): @property def has_begun(self): return self.beginning_at is not None - - @property + + @property def has_ended(self): if not self.ending_at: return False return self.ending_at < time() - + def begin(self, duration): - ''' + ''' Sets the starting time and ending time for the activity, based on the duration provided (in seconds). ''' self.beginning_at = time() modified_duration = self._get_accommodated_duration(duration) self.ending_at = self.beginning_at + modified_duration - + def get_remaining_time_in_ms(self): return int((self.ending_at - time()) * 1000) - def get_instance_state(self): - state = {} - if self.beginning_at: - state['beginning_at'] = self.beginning_at - if self.ending_at: - state['ending_at'] = self.ending_at - if self.accommodation_code: - state['accommodation_code'] = self.accommodation_code - return json.dumps(state) - def get_html(self): self.render() return self.content @@ -133,12 +119,12 @@ class TimeLimitModule(XModule): else: return "other" -class TimeLimitDescriptor(XMLEditingDescriptor, XmlDescriptor): +class TimeLimitDescriptor(TimeLimitFields, XMLEditingDescriptor, XmlDescriptor): module_class = TimeLimitModule # For remembering when a student started, and when they should end - stores_state = True + stores_state = True @classmethod def definition_from_xml(cls, xml_object, system): @@ -151,7 +137,7 @@ class TimeLimitDescriptor(XMLEditingDescriptor, XmlDescriptor): if system.error_tracker is not None: system.error_tracker("ERROR: " + str(e)) continue - return {'children': children} + return {}, children def definition_to_xml(self, resource_fs): xml_object = etree.Element('timelimit') diff --git a/common/lib/xmodule/xmodule/vertical_module.py b/common/lib/xmodule/xmodule/vertical_module.py index 5827ea96a9..610d180c11 100644 --- a/common/lib/xmodule/xmodule/vertical_module.py +++ b/common/lib/xmodule/xmodule/vertical_module.py @@ -8,11 +8,15 @@ from pkg_resources import resource_string class_priority = ['video', 'problem'] -class VerticalModule(XModule): +class VerticalFields(object): + has_children = True + + +class VerticalModule(VerticalFields, XModule): ''' Layout module for laying out submodules vertically.''' - def __init__(self, system, location, definition, descriptor, instance_state=None, shared_state=None, **kwargs): - XModule.__init__(self, system, location, definition, descriptor, instance_state, shared_state, **kwargs) + def __init__(self, *args, **kwargs): + XModule.__init__(self, *args, **kwargs) self.contents = None def get_html(self): @@ -42,7 +46,7 @@ class VerticalModule(XModule): return new_class -class VerticalDescriptor(SequenceDescriptor): +class VerticalDescriptor(VerticalFields, SequenceDescriptor): module_class = VerticalModule js = {'coffee': [resource_string(__name__, 'js/src/vertical/edit.coffee')]} diff --git a/common/lib/xmodule/xmodule/video_module.py b/common/lib/xmodule/xmodule/video_module.py index 27388f7630..0203299b40 100644 --- a/common/lib/xmodule/xmodule/video_module.py +++ b/common/lib/xmodule/xmodule/video_module.py @@ -8,9 +8,8 @@ from django.http import Http404 from xmodule.x_module import XModule from xmodule.raw_module import RawDescriptor -from xmodule.modulestore.xml import XMLModuleStore -from xmodule.modulestore.django import modulestore from xmodule.contentstore.content import StaticContent +from xblock.core import Integer, Scope, String import datetime import time @@ -18,7 +17,13 @@ import time log = logging.getLogger(__name__) -class VideoModule(XModule): +class VideoFields(object): + data = String(help="XML data for the problem", scope=Scope.content) + position = Integer(help="Current position in the video", scope=Scope.student_state, default=0) + display_name = String(help="Display name for this module", scope=Scope.settings) + + +class VideoModule(VideoFields, XModule): video_time = 0 icon_class = 'video' @@ -32,23 +37,16 @@ class VideoModule(XModule): css = {'scss': [resource_string(__name__, 'css/video/display.scss')]} js_module_name = "Video" - def __init__(self, system, location, definition, descriptor, - instance_state=None, shared_state=None, **kwargs): - XModule.__init__(self, system, location, definition, descriptor, - instance_state, shared_state, **kwargs) - xmltree = etree.fromstring(self.definition['data']) + def __init__(self, *args, **kwargs): + XModule.__init__(self, *args, **kwargs) + + xmltree = etree.fromstring(self.data) self.youtube = xmltree.get('youtube') - self.position = 0 self.show_captions = xmltree.get('show_captions', 'true') self.source = self._get_source(xmltree) self.track = self._get_track(xmltree) self.start_time, self.end_time = self._get_timeframe(xmltree) - if instance_state is not None: - state = json.loads(instance_state) - if 'position' in state: - self.position = int(float(state['position'])) - def _get_source(self, xmltree): # find the first valid source return self._get_first_external(xmltree, 'source') @@ -120,13 +118,6 @@ class VideoModule(XModule): return self.youtube def get_html(self): - if isinstance(modulestore(), XMLModuleStore): - # VS[compat] - # cdodge: filesystem static content support. - caption_asset_path = "/static/{0}/subs/".format(self.metadata['data_dir']) - else: - caption_asset_path = StaticContent.get_base_url_path_for_course_assets(self.location) + '/subs_' - # We normally let JS parse this, but in the case that we need a hacked # out player because YouTube has broken their \ No newline at end of file diff --git a/common/test/data/conditional_and_poll/chapter/Staff.xml b/common/test/data/conditional_and_poll/chapter/Staff.xml new file mode 100644 index 0000000000..e1d5216f6d --- /dev/null +++ b/common/test/data/conditional_and_poll/chapter/Staff.xml @@ -0,0 +1,3 @@ + + + diff --git a/common/test/data/conditional_and_poll/conditional/condone.xml b/common/test/data/conditional_and_poll/conditional/condone.xml new file mode 100644 index 0000000000..80b061e244 --- /dev/null +++ b/common/test/data/conditional_and_poll/conditional/condone.xml @@ -0,0 +1,3 @@ + + + diff --git a/common/test/data/conditional_and_poll/course.xml b/common/test/data/conditional_and_poll/course.xml new file mode 120000 index 0000000000..f4f5c17b87 --- /dev/null +++ b/common/test/data/conditional_and_poll/course.xml @@ -0,0 +1 @@ +roots/2013_Spring.xml \ No newline at end of file diff --git a/common/test/data/conditional_and_poll/course/2013_Spring.xml b/common/test/data/conditional_and_poll/course/2013_Spring.xml new file mode 100644 index 0000000000..2eea422a2f --- /dev/null +++ b/common/test/data/conditional_and_poll/course/2013_Spring.xml @@ -0,0 +1,12 @@ + + + + + + + + + + + diff --git a/common/test/data/conditional_and_poll/creating_course.xml b/common/test/data/conditional_and_poll/creating_course.xml new file mode 100644 index 0000000000..4c90f1c2ec --- /dev/null +++ b/common/test/data/conditional_and_poll/creating_course.xml @@ -0,0 +1,8 @@ + diff --git a/common/test/data/conditional_and_poll/html/secret_page.xml b/common/test/data/conditional_and_poll/html/secret_page.xml new file mode 100644 index 0000000000..63be3cfa8d --- /dev/null +++ b/common/test/data/conditional_and_poll/html/secret_page.xml @@ -0,0 +1,4 @@ + +

        This is a secret!

        + + diff --git a/common/test/data/conditional_and_poll/info/2013_Spring/handouts.html b/common/test/data/conditional_and_poll/info/2013_Spring/handouts.html new file mode 100644 index 0000000000..35f2c89474 --- /dev/null +++ b/common/test/data/conditional_and_poll/info/2013_Spring/handouts.html @@ -0,0 +1,3 @@ +
          +
        1. A list of course handouts, or an empty file if there are none.
        2. +
        diff --git a/common/test/data/conditional_and_poll/info/2013_Spring/updates.html b/common/test/data/conditional_and_poll/info/2013_Spring/updates.html new file mode 100644 index 0000000000..9744c1699d --- /dev/null +++ b/common/test/data/conditional_and_poll/info/2013_Spring/updates.html @@ -0,0 +1,10 @@ + +
          + +
        1. December 9

          +
          +

          Announcement text

          +
          +
        2. + +
        diff --git a/common/test/data/conditional_and_poll/policies/2013_Spring/policy.json b/common/test/data/conditional_and_poll/policies/2013_Spring/policy.json new file mode 100644 index 0000000000..e2a204815c --- /dev/null +++ b/common/test/data/conditional_and_poll/policies/2013_Spring/policy.json @@ -0,0 +1,8 @@ +{ + "course/2013_Spring": { + "start": "2099-01-01T00:00", + "advertised_start" : "Spring 2013", + "display_name": "Justice" + } + +} diff --git a/common/test/data/conditional_and_poll/problem/choiceprob.xml b/common/test/data/conditional_and_poll/problem/choiceprob.xml new file mode 100644 index 0000000000..fa91954977 --- /dev/null +++ b/common/test/data/conditional_and_poll/problem/choiceprob.xml @@ -0,0 +1,22 @@ + + + +

        Consider a hypothetical magnetic field pointing out of your computer screen. Now imagine an electron traveling from right to left in the plane of your screen. A diagram of this situation is show below…

        +
        + +

        a. The magnitude of the force experienced by the electron is proportional the product of which of the following? (Select all that apply.)

        + + + + +Magnetic field strength… +Electric field strength… +Electric charge of the electron… +Radius of the electron… +Mass of the electron… +Velocity of the electron… + + + + +
        diff --git a/common/test/data/conditional_and_poll/roots/2013_Spring.xml b/common/test/data/conditional_and_poll/roots/2013_Spring.xml new file mode 100644 index 0000000000..1b97a5a714 --- /dev/null +++ b/common/test/data/conditional_and_poll/roots/2013_Spring.xml @@ -0,0 +1,2 @@ + + diff --git a/common/test/data/conditional_and_poll/sequential/Problem_Demos.xml b/common/test/data/conditional_and_poll/sequential/Problem_Demos.xml new file mode 100644 index 0000000000..e10298336d --- /dev/null +++ b/common/test/data/conditional_and_poll/sequential/Problem_Demos.xml @@ -0,0 +1,31 @@ + + + +

        What's the Right Thing to Do?

        +

        Suppose four shipwrecked sailors are stranded at sea in a lifeboat, without + food or water. Would it be wrong for three of them to kill and eat the cabin + boy, in order to save their own lives?

        + Yes + No + Don't know +
        + +

        What's the Right Thing to Do?

        +

        Suppose four shipwrecked sailors are stranded at sea in a lifeboat, without + food or water. Would it be wrong for three of them to kill and eat the cabin + boy, in order to save their own lives?

        + Yes + No + Don't know +
        +
        + + + + Condition: first_poll - Yes + + In first condition. + + + +
        diff --git a/common/test/data/conditional_and_poll/static/README b/common/test/data/conditional_and_poll/static/README new file mode 100644 index 0000000000..e22f378b5e --- /dev/null +++ b/common/test/data/conditional_and_poll/static/README @@ -0,0 +1,5 @@ +Images, handouts, and other statically-served content should go ONLY +in this directory. + +Images for the front page should go in static/images. The frontpage +banner MUST be named course_image.jpg \ No newline at end of file diff --git a/common/test/data/conditional_and_poll/static/images/course_image.jpg b/common/test/data/conditional_and_poll/static/images/course_image.jpg new file mode 100644 index 0000000000..b6a64b9396 Binary files /dev/null and b/common/test/data/conditional_and_poll/static/images/course_image.jpg differ diff --git a/common/test/data/conditional_and_poll/static/images/professor-sandel.jpg b/common/test/data/conditional_and_poll/static/images/professor-sandel.jpg new file mode 100644 index 0000000000..41bde60165 Binary files /dev/null and b/common/test/data/conditional_and_poll/static/images/professor-sandel.jpg differ diff --git a/common/test/data/full/vertical/vertical_89.xml b/common/test/data/full/vertical/vertical_89.xml index c2b68b6bc2..cf2dd23462 100644 --- a/common/test/data/full/vertical/vertical_89.xml +++ b/common/test/data/full/vertical/vertical_89.xml @@ -7,4 +7,9 @@ + +

        Have you changed your mind?

        + Yes + No +
        diff --git a/common/test/data/simple/course.xml b/common/test/data/simple/course.xml index 86dc8df45c..660411384f 100644 --- a/common/test/data/simple/course.xml +++ b/common/test/data/simple/course.xml @@ -15,7 +15,7 @@
        - +