+
+
+
+
diff --git a/common/lib/xmodule/xmodule/js/spec/combinedopenended/display_spec.coffee b/common/lib/xmodule/xmodule/js/spec/combinedopenended/display_spec.coffee
new file mode 100644
index 0000000000..f2e8da7990
--- /dev/null
+++ b/common/lib/xmodule/xmodule/js/spec/combinedopenended/display_spec.coffee
@@ -0,0 +1,111 @@
+describe 'CombinedOpenEnded', ->
+ beforeEach ->
+ spyOn Logger, 'log'
+ # load up some fixtures
+ loadFixtures 'combined-open-ended.html'
+ jasmine.Clock.useMock()
+ @element = $('.course-content')
+
+
+ describe 'constructor', ->
+ beforeEach ->
+ spyOn(Collapsible, 'setCollapsibles')
+ @combined = new CombinedOpenEnded @element
+
+ it 'set the element', ->
+ expect(@combined.element).toEqual @element
+
+ it 'get the correct values from data fields', ->
+ expect(@combined.ajax_url).toEqual '/courses/MITx/6.002x/2012_Fall/modx/i4x://MITx/6.002x/combinedopenended/CombinedOE'
+ expect(@combined.state).toEqual 'assessing'
+ expect(@combined.task_count).toEqual 2
+ expect(@combined.task_number).toEqual 1
+
+ it 'subelements are made collapsible', ->
+ expect(Collapsible.setCollapsibles).toHaveBeenCalled()
+
+
+ describe 'poll', ->
+ beforeEach =>
+ # setup the spies
+ @combined = new CombinedOpenEnded @element
+ spyOn(@combined, 'reload').andCallFake -> return 0
+ window.setTimeout = jasmine.createSpy().andCallFake (callback, timeout) -> return 5
+
+ it 'polls at the correct intervals', =>
+ fakeResponseContinue = state: 'not done'
+ spyOn($, 'postWithPrefix').andCallFake (url, callback) -> callback(fakeResponseContinue)
+ @combined.poll()
+ expect(window.setTimeout).toHaveBeenCalledWith(@combined.poll, 10000)
+ expect(window.queuePollerID).toBe(5)
+
+ it 'polling stops properly', =>
+ fakeResponseDone = state: "done"
+ spyOn($, 'postWithPrefix').andCallFake (url, callback) -> callback(fakeResponseDone)
+ @combined.poll()
+ expect(window.queuePollerID).toBeUndefined()
+ expect(window.setTimeout).not.toHaveBeenCalled()
+
+ describe 'rebind', ->
+ beforeEach ->
+ @combined = new CombinedOpenEnded @element
+ spyOn(@combined, 'queueing').andCallFake -> return 0
+ spyOn(@combined, 'skip_post_assessment').andCallFake -> return 0
+ window.setTimeout = jasmine.createSpy().andCallFake (callback, timeout) -> return 5
+
+ it 'when our child is in an assessing state', ->
+ @combined.child_state = 'assessing'
+ @combined.rebind()
+ expect(@combined.answer_area.attr("disabled")).toBe("disabled")
+ expect(@combined.submit_button.val()).toBe("Submit assessment")
+ expect(@combined.queueing).toHaveBeenCalled()
+
+ it 'when our child state is initial', ->
+ @combined.child_state = 'initial'
+ @combined.rebind()
+ expect(@combined.answer_area.attr("disabled")).toBeUndefined()
+ expect(@combined.submit_button.val()).toBe("Submit")
+
+ it 'when our child state is post_assessment', ->
+ @combined.child_state = 'post_assessment'
+ @combined.rebind()
+ expect(@combined.answer_area.attr("disabled")).toBe("disabled")
+ expect(@combined.submit_button.val()).toBe("Submit post-assessment")
+
+ it 'when our child state is done', ->
+ spyOn(@combined, 'next_problem').andCallFake ->
+ @combined.child_state = 'done'
+ @combined.rebind()
+ expect(@combined.answer_area.attr("disabled")).toBe("disabled")
+ expect(@combined.next_problem).toHaveBeenCalled()
+
+ describe 'next_problem', ->
+ beforeEach ->
+ @combined = new CombinedOpenEnded @element
+ @combined.child_state = 'done'
+
+ it 'handling a successful call', ->
+ fakeResponse =
+ success: true
+ html: "dummy html"
+ allow_reset: false
+ spyOn($, 'postWithPrefix').andCallFake (url, val, callback) -> callback(fakeResponse)
+ spyOn(@combined, 'reinitialize')
+ spyOn(@combined, 'rebind')
+ @combined.next_problem()
+ expect($.postWithPrefix).toHaveBeenCalled()
+ expect(@combined.reinitialize).toHaveBeenCalledWith(@combined.element)
+ expect(@combined.rebind).toHaveBeenCalled()
+ expect(@combined.answer_area.val()).toBe('')
+ expect(@combined.child_state).toBe('initial')
+
+ it 'handling an unsuccessful call', ->
+ fakeResponse =
+ success: false
+ error: 'This is an error'
+ spyOn($, 'postWithPrefix').andCallFake (url, val, callback) -> callback(fakeResponse)
+ @combined.next_problem()
+ expect(@combined.errors_area.html()).toBe(fakeResponse.error)
+
+
+
diff --git a/common/lib/xmodule/xmodule/js/spec/helper.coffee b/common/lib/xmodule/xmodule/js/spec/helper.coffee
index dc01241861..fbc89f7bd9 100644
--- a/common/lib/xmodule/xmodule/js/spec/helper.coffee
+++ b/common/lib/xmodule/xmodule/js/spec/helper.coffee
@@ -64,7 +64,6 @@ jasmine.stubVideoPlayer = (context, enableParts, createPlayer=true) ->
if createPlayer
return new VideoPlayer(video: context.video)
-spyOn(window, 'onunload')
# Stub jQuery.cookie
$.cookie = jasmine.createSpy('jQuery.cookie').andReturn '1.0'
diff --git a/common/lib/xmodule/xmodule/js/src/capa/display.coffee b/common/lib/xmodule/xmodule/js/src/capa/display.coffee
index 1c0ace9e59..5890686c0e 100644
--- a/common/lib/xmodule/xmodule/js/src/capa/display.coffee
+++ b/common/lib/xmodule/xmodule/js/src/capa/display.coffee
@@ -140,15 +140,15 @@ class @Problem
allowed_files = $(element).data("allowed_files")
for file in element.files
if allowed_files.length != 0 and file.name not in allowed_files
- unallowed_file_submitted = true
- errors.push "You submitted #{file.name}; only #{allowed_files} are allowed."
+ unallowed_file_submitted = true
+ errors.push "You submitted #{file.name}; only #{allowed_files} are allowed."
if file.name in required_files
- required_files.splice(required_files.indexOf(file.name), 1)
+ required_files.splice(required_files.indexOf(file.name), 1)
if file.size > max_filesize
file_too_large = true
errors.push 'Your file "' + file.name '" is too large (max size: ' + max_filesize/(1000*1000) + ' MB)'
fd.append(element.id, file)
- if element.files.length == 0
+ if element.files.length == 0
file_not_selected = true
fd.append(element.id, '') # In case we want to allow submissions with no file
if required_files.length != 0
@@ -157,7 +157,7 @@ class @Problem
else
fd.append(element.id, element.value)
-
+
if file_not_selected
errors.push 'You did not select any files to submit'
diff --git a/common/lib/xmodule/xmodule/js/src/combinedopenended/display.coffee b/common/lib/xmodule/xmodule/js/src/combinedopenended/display.coffee
index 370ef8d136..52fd4c2547 100644
--- a/common/lib/xmodule/xmodule/js/src/combinedopenended/display.coffee
+++ b/common/lib/xmodule/xmodule/js/src/combinedopenended/display.coffee
@@ -12,6 +12,7 @@ class @CombinedOpenEnded
@state = @el.data('state')
@task_count = @el.data('task-count')
@task_number = @el.data('task-number')
+ @accept_file_upload = @el.data('accept-file-upload')
@allow_reset = @el.data('allow_reset')
@reset_button = @$('.reset-button')
@@ -44,6 +45,8 @@ class @CombinedOpenEnded
@skip_button = @$('.skip-button')
@skip_button.click @skip_post_assessment
+ @file_upload_area = @$('.file-upload')
+ @can_upload_files = false
@open_ended_child= @$('.open-ended-child')
@find_assessment_elements()
@@ -55,6 +58,16 @@ class @CombinedOpenEnded
$: (selector) ->
$(selector, @el)
+ show_results_current: () =>
+ data = {'task_number' : @task_number-1}
+ $.postWithPrefix "#{@ajax_url}/get_results", data, (response) =>
+ if response.success
+ @results_container.after(response.html).remove()
+ @results_container = $('div.result-container')
+ @submit_evaluation_button = $('.submit-evaluation-button')
+ @submit_evaluation_button.click @message_post
+ Collapsible.setCollapsibles(@results_container)
+
show_results: (event) =>
status_item = $(event.target).parent().parent()
status_number = status_item.data('status-number')
@@ -67,7 +80,7 @@ class @CombinedOpenEnded
@submit_evaluation_button.click @message_post
Collapsible.setCollapsibles(@results_container)
else
- @errors_area.html(response.error)
+ @gentle_alert response.error
message_post: (event)=>
Logger.log 'message_post', @answers
@@ -108,22 +121,28 @@ class @CombinedOpenEnded
@submit_button.show()
@reset_button.hide()
@next_problem_button.hide()
+ @hide_file_upload()
@hint_area.attr('disabled', false)
if @child_state == 'done'
@rubric_wrapper.hide()
if @child_type=="openended"
@skip_button.hide()
if @allow_reset=="True"
+ @show_results_current
@reset_button.show()
@submit_button.hide()
@answer_area.attr("disabled", true)
+ @replace_text_inputs()
@hint_area.attr('disabled', true)
else if @child_state == 'initial'
@answer_area.attr("disabled", false)
@submit_button.prop('value', 'Submit')
@submit_button.click @save_answer
+ @setup_file_upload()
else if @child_state == 'assessing'
@answer_area.attr("disabled", true)
+ @replace_text_inputs()
+ @hide_file_upload()
@submit_button.prop('value', 'Submit assessment')
@submit_button.click @save_assessment
if @child_type == "openended"
@@ -134,6 +153,7 @@ class @CombinedOpenEnded
@skip_button.show()
@skip_post_assessment()
@answer_area.attr("disabled", true)
+ @replace_text_inputs()
@submit_button.prop('value', 'Submit post-assessment')
if @child_type=="selfassessment"
@submit_button.click @save_hint
@@ -142,6 +162,7 @@ class @CombinedOpenEnded
else if @child_state == 'done'
@rubric_wrapper.hide()
@answer_area.attr("disabled", true)
+ @replace_text_inputs()
@hint_area.attr('disabled', true)
@submit_button.hide()
if @child_type=="openended"
@@ -149,6 +170,7 @@ class @CombinedOpenEnded
if @task_number<@task_count
@next_problem()
else
+ @show_results_current()
@reset_button.show()
@@ -160,17 +182,41 @@ class @CombinedOpenEnded
save_answer: (event) =>
event.preventDefault()
+ max_filesize = 2*1000*1000 #2MB
if @child_state == 'initial'
- data = {'student_answer' : @answer_area.val()}
- $.postWithPrefix "#{@ajax_url}/save_answer", data, (response) =>
- if response.success
- @rubric_wrapper.html(response.rubric_html)
- @rubric_wrapper.show()
- @child_state = 'assessing'
- @find_assessment_elements()
- @rebind()
+ files = ""
+ if @can_upload_files == true
+ files = $('.file-upload-box')[0].files[0]
+ if files != undefined
+ if files.size > max_filesize
+ @can_upload_files = false
+ files = ""
else
- @errors_area.html(response.error)
+ @can_upload_files = false
+
+ fd = new FormData()
+ fd.append('student_answer', @answer_area.val())
+ fd.append('student_file', files)
+ fd.append('can_upload_files', @can_upload_files)
+
+ settings =
+ type: "POST"
+ data: fd
+ processData: false
+ contentType: false
+ success: (response) =>
+ if response.success
+ @rubric_wrapper.html(response.rubric_html)
+ @rubric_wrapper.show()
+ @answer_area.html(response.student_response)
+ @child_state = 'assessing'
+ @find_assessment_elements()
+ @rebind()
+ else
+ @gentle_alert response.error
+
+ $.ajaxWithPrefix("#{@ajax_url}/save_answer",settings)
+
else
@errors_area.html('Problem state got out of sync. Try reloading the page.')
@@ -260,6 +306,7 @@ class @CombinedOpenEnded
@gentle_alert "Moved to next step."
else
@gentle_alert "Your score did not meet the criteria to move to the next step."
+ @show_results_current()
else
@errors_area.html(response.error)
else
@@ -282,6 +329,31 @@ class @CombinedOpenEnded
$.postWithPrefix "#{@ajax_url}/check_for_score", (response) =>
if response.state == "done" or response.state=="post_assessment"
delete window.queuePollerID
- location.reload()
+ @reload
else
window.queuePollerID = window.setTimeout(@poll, 10000)
+
+ setup_file_upload: =>
+ if window.File and window.FileReader and window.FileList and window.Blob
+ if @accept_file_upload == "True"
+ @can_upload_files = true
+ @file_upload_area.html('')
+ @file_upload_area.show()
+ else
+ @gentle_alert 'File uploads are required for this question, but are not supported in this browser. Try the newest version of google chrome. Alternatively, if you have uploaded the image to the web, you can paste a link to it into the answer box.'
+
+ hide_file_upload: =>
+ if @accept_file_upload == "True"
+ @file_upload_area.hide()
+
+ replace_text_inputs: =>
+ answer_class = @answer_area.attr('class')
+ answer_id = @answer_area.attr('id')
+ answer_val = @answer_area.val()
+ new_text = ''
+ new_text = "#{answer_val}"
+ @answer_area.replaceWith(new_text)
+
+ # wrap this so that it can be mocked
+ reload: ->
+ location.reload()
diff --git a/common/lib/xmodule/xmodule/modulestore/xml_exporter.py b/common/lib/xmodule/xmodule/modulestore/xml_exporter.py
index e0bf0ec1d3..3522b45718 100644
--- a/common/lib/xmodule/xmodule/modulestore/xml_exporter.py
+++ b/common/lib/xmodule/xmodule/modulestore/xml_exporter.py
@@ -17,4 +17,26 @@ def export_to_xml(modulestore, contentstore, course_location, root_dir, course_d
# export the static assets
contentstore.export_all_for_course(course_location, root_dir + '/' + course_dir + '/static/')
+ # export the static tabs
+ export_extra_content(export_fs, modulestore, course_location, 'static_tab', 'tabs', '.html')
+
+ # export the custom tags
+ export_extra_content(export_fs, modulestore, course_location, 'custom_tag_template', 'custom_tags')
+
+ # export the course updates
+ export_extra_content(export_fs, modulestore, course_location, 'course_info', 'info', '.html')
+
+
+def export_extra_content(export_fs, modulestore, course_location, category_type, dirname, file_suffix = ''):
+ query_loc = Location('i4x', course_location.org, course_location.course, category_type, None)
+ items = modulestore.get_items(query_loc)
+
+ if len(items) > 0:
+ item_dir = export_fs.makeopendir(dirname)
+ for item in items:
+ with item_dir.open(item.location.name + file_suffix, 'w') as item_file:
+ item_file.write(item.definition['data'].encode('utf8'))
+
+
+
\ No newline at end of file
diff --git a/common/lib/xmodule/xmodule/open_ended_image_submission.py b/common/lib/xmodule/xmodule/open_ended_image_submission.py
new file mode 100644
index 0000000000..abfb2d80ba
--- /dev/null
+++ b/common/lib/xmodule/xmodule/open_ended_image_submission.py
@@ -0,0 +1,261 @@
+"""
+This contains functions and classes used to evaluate if images are acceptable (do not show improper content, etc), and
+to send them to S3.
+"""
+
+try:
+ from PIL import Image
+ ENABLE_PIL = True
+except:
+ ENABLE_PIL = False
+
+from urlparse import urlparse
+import requests
+from boto.s3.connection import S3Connection
+from boto.s3.key import Key
+from django.conf import settings
+import pickle
+import logging
+import re
+
+log = logging.getLogger(__name__)
+
+#Domains where any image linked to can be trusted to have acceptable content.
+TRUSTED_IMAGE_DOMAINS = [
+ 'wikipedia',
+ 'edxuploads.s3.amazonaws.com',
+ 'wikimedia',
+]
+
+#Suffixes that are allowed in image urls
+ALLOWABLE_IMAGE_SUFFIXES = [
+ 'jpg',
+ 'png',
+ 'gif',
+ 'jpeg'
+]
+
+#Maximum allowed dimensions (x and y) for an uploaded image
+MAX_ALLOWED_IMAGE_DIM = 1000
+
+#Dimensions to which image is resized before it is evaluated for color count, etc
+MAX_IMAGE_DIM = 150
+
+#Maximum number of colors that should be counted in ImageProperties
+MAX_COLORS_TO_COUNT = 16
+
+#Maximum number of colors allowed in an uploaded image
+MAX_COLORS = 400
+
+class ImageProperties(object):
+ """
+ Class to check properties of an image and to validate if they are allowed.
+ """
+ def __init__(self, image_data):
+ """
+ Initializes class variables
+ @param image: Image object (from PIL)
+ @return: None
+ """
+ self.image = Image.open(image_data)
+ image_size = self.image.size
+ self.image_too_large = False
+ if image_size[0] > MAX_ALLOWED_IMAGE_DIM or image_size[1] > MAX_ALLOWED_IMAGE_DIM:
+ self.image_too_large = True
+ if image_size[0] > MAX_IMAGE_DIM or image_size[1] > MAX_IMAGE_DIM:
+ self.image = self.image.resize((MAX_IMAGE_DIM, MAX_IMAGE_DIM))
+ self.image_size = self.image.size
+
+ def count_colors(self):
+ """
+ Counts the number of colors in an image, and matches them to the max allowed
+ @return: boolean true if color count is acceptable, false otherwise
+ """
+ colors = self.image.getcolors(MAX_COLORS_TO_COUNT)
+ if colors is None:
+ color_count = MAX_COLORS_TO_COUNT
+ else:
+ color_count = len(colors)
+
+ too_many_colors = (color_count <= MAX_COLORS)
+ return too_many_colors
+
+ def check_if_rgb_is_skin(self, rgb):
+ """
+ Checks if a given input rgb tuple/list is a skin tone
+ @param rgb: RGB tuple
+ @return: Boolean true false
+ """
+ colors_okay = False
+ try:
+ r = rgb[0]
+ g = rgb[1]
+ b = rgb[2]
+ check_r = (r > 60)
+ check_g = (r * 0.4) < g < (r * 0.85)
+ check_b = (r * 0.2) < b < (r * 0.7)
+ colors_okay = check_r and check_b and check_g
+ except:
+ pass
+
+ return colors_okay
+
+ def get_skin_ratio(self):
+ """
+ Gets the ratio of skin tone colors in an image
+ @return: True if the ratio is low enough to be acceptable, false otherwise
+ """
+ colors = self.image.getcolors(MAX_COLORS_TO_COUNT)
+ is_okay = True
+ if colors is not None:
+ skin = sum([count for count, rgb in colors if self.check_if_rgb_is_skin(rgb)])
+ total_colored_pixels = sum([count for count, rgb in colors])
+ bad_color_val = float(skin) / total_colored_pixels
+ if bad_color_val > .4:
+ is_okay = False
+
+ return is_okay
+
+ def run_tests(self):
+ """
+ Does all available checks on an image to ensure that it is okay (size, skin ratio, colors)
+ @return: Boolean indicating whether or not image passes all checks
+ """
+ image_is_okay = False
+ try:
+ image_is_okay = self.count_colors() and self.get_skin_ratio() and not self.image_too_large
+ except:
+ log.exception("Could not run image tests.")
+
+ return image_is_okay
+
+
+class URLProperties(object):
+ """
+ Checks to see if a URL points to acceptable content. Added to check if students are submitting reasonable
+ links to the peer grading image functionality of the external grading service.
+ """
+ def __init__(self, url_string):
+ self.url_string = url_string
+
+ def check_if_parses(self):
+ """
+ Check to see if a URL parses properly
+ @return: success (True if parses, false if not)
+ """
+ success = False
+ try:
+ self.parsed_url = urlparse(self.url_string)
+ success = True
+ except:
+ pass
+
+ return success
+
+ def check_suffix(self):
+ """
+ Checks the suffix of a url to make sure that it is allowed
+ @return: True if suffix is okay, false if not
+ """
+ good_suffix = False
+ for suffix in ALLOWABLE_IMAGE_SUFFIXES:
+ if self.url_string.endswith(suffix):
+ good_suffix = True
+ break
+ return good_suffix
+
+ def run_tests(self):
+ """
+ Runs all available url tests
+ @return: True if URL passes tests, false if not.
+ """
+ url_is_okay = self.check_suffix() and self.check_if_parses() and self.check_domain()
+ return url_is_okay
+
+ def check_domain(self):
+ """
+ Checks to see if url is from a trusted domain
+ """
+ success = False
+ for domain in TRUSTED_IMAGE_DOMAINS:
+ if domain in self.url_string:
+ success = True
+ return success
+ return success
+
+def run_url_tests(url_string):
+ """
+ Creates a URLProperties object and runs all tests
+ @param url_string: A URL in string format
+ @return: Boolean indicating whether or not URL has passed all tests
+ """
+ url_properties = URLProperties(url_string)
+ return url_properties.run_tests()
+
+
+def run_image_tests(image):
+ """
+ Runs all available image tests
+ @param image: PIL Image object
+ @return: Boolean indicating whether or not all tests have been passed
+ """
+ success = False
+ try:
+ image_properties = ImageProperties(image)
+ success = image_properties.run_tests()
+ except:
+ log.exception("Cannot run image tests in combined open ended xmodule. May be an issue with a particular image,"
+ "or an issue with the deployment configuration of PIL/Pillow")
+ return success
+
+
+def upload_to_s3(file_to_upload, keyname):
+ '''
+ Upload file to S3 using provided keyname.
+
+ Returns:
+ public_url: URL to access uploaded file
+ '''
+
+ #This commented out code is kept here in case we change the uploading method and require images to be
+ #converted before they are sent to S3.
+ #TODO: determine if commented code is needed and remove
+ #im = Image.open(file_to_upload)
+ #out_im = cStringIO.StringIO()
+ #im.save(out_im, 'PNG')
+
+ try:
+ conn = S3Connection(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY)
+ bucketname = str(settings.AWS_STORAGE_BUCKET_NAME)
+ bucket = conn.create_bucket(bucketname.lower())
+
+ k = Key(bucket)
+ k.key = keyname
+ k.set_metadata('filename', file_to_upload.name)
+ k.set_contents_from_file(file_to_upload)
+
+ #This commented out code is kept here in case we change the uploading method and require images to be
+ #converted before they are sent to S3.
+ #k.set_contents_from_string(out_im.getvalue())
+ #k.set_metadata("Content-Type", 'images/png')
+
+ k.set_acl("public-read")
+ public_url = k.generate_url(60 * 60 * 24 * 365) # URL timeout in seconds.
+
+ return True, public_url
+ except:
+ return False, "Could not connect to S3."
+
+
+def get_from_s3(s3_public_url):
+ """
+ Gets an image from a given S3 url
+ @param s3_public_url: The URL where an image is located
+ @return: The image data
+ """
+ r = requests.get(s3_public_url, timeout=2)
+ data = r.text
+ return data
+
+
+
diff --git a/common/lib/xmodule/xmodule/open_ended_module.py b/common/lib/xmodule/xmodule/open_ended_module.py
index 3117d9566a..94d45d96e3 100644
--- a/common/lib/xmodule/xmodule/open_ended_module.py
+++ b/common/lib/xmodule/xmodule/open_ended_module.py
@@ -258,7 +258,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
"""
new_score_msg = self._parse_score_msg(score_msg, system)
if not new_score_msg['valid']:
- score_msg['feedback'] = 'Invalid grader reply. Please contact the course staff.'
+ new_score_msg['feedback'] = 'Invalid grader reply. Please contact the course staff.'
self.record_latest_score(new_score_msg['score'])
self.record_latest_post_assessment(score_msg)
@@ -378,12 +378,11 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
Return error message or feedback template
"""
- log.debug(response_items)
- rubric_feedback=""
+ rubric_feedback = ""
feedback = self._convert_longform_feedback_to_html(response_items)
- if response_items['rubric_scores_complete']==True:
+ if response_items['rubric_scores_complete'] == True:
rubric_renderer = CombinedOpenEndedRubric(system, True)
- rubric_feedback = rubric_renderer.render_rubric(response_items['rubric_xml'])
+ success, rubric_feedback = rubric_renderer.render_rubric(response_items['rubric_xml'])
if not response_items['success']:
return system.render_template("open_ended_error.html",
@@ -393,7 +392,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
'grader_type': response_items['grader_type'],
'score': "{0} / {1}".format(response_items['score'], self.max_score()),
'feedback': feedback,
- 'rubric_feedback' : rubric_feedback
+ 'rubric_feedback': rubric_feedback
})
return feedback_template
@@ -406,6 +405,13 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
'score': Numeric value (floating point is okay) to assign to answer
'msg': grader_msg
'feedback' : feedback from grader
+ 'grader_type': what type of grader resulted in this score
+ 'grader_id': id of the grader
+ 'submission_id' : id of the submission
+ 'success': whether or not this submission was successful
+ 'rubric_scores': a list of rubric scores
+ 'rubric_scores_complete': boolean if rubric scores are complete
+ 'rubric_xml': the xml of the rubric in string format
}
Returns (valid_score_msg, correct, score, msg):
@@ -437,7 +443,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
log.error(error_message)
fail['feedback'] = error_message
return fail
- #This is to support peer grading
+ #This is to support peer grading
if isinstance(score_result['score'], list):
feedback_items = []
for i in xrange(0, len(score_result['score'])):
@@ -448,8 +454,8 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
'success': score_result['success'],
'grader_id': score_result['grader_id'][i],
'submission_id': score_result['submission_id'],
- 'rubric_scores_complete' : score_result['rubric_scores_complete'][i],
- 'rubric_xml' : score_result['rubric_xml'][i],
+ 'rubric_scores_complete': score_result['rubric_scores_complete'][i],
+ 'rubric_xml': score_result['rubric_xml'][i],
}
feedback_items.append(self._format_feedback(new_score_result, system))
if join_feedback:
@@ -476,7 +482,8 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
if not self.history:
return ""
- feedback_dict = self._parse_score_msg(self.history[-1].get('post_assessment', ""), system, join_feedback=join_feedback)
+ feedback_dict = self._parse_score_msg(self.history[-1].get('post_assessment', ""), system,
+ join_feedback=join_feedback)
if not short_feedback:
return feedback_dict['feedback'] if feedback_dict['valid'] else ''
if feedback_dict['valid']:
@@ -554,11 +561,21 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
return self.out_of_sync_error(get)
# add new history element with answer and empty score and hint.
- self.new_history_entry(get['student_answer'])
- self.send_to_grader(get['student_answer'], system)
- self.change_state(self.ASSESSING)
+ success, get = self.append_image_to_student_answer(get)
+ error_message = ""
+ if success:
+ get['student_answer'] = OpenEndedModule.sanitize_html(get['student_answer'])
+ self.new_history_entry(get['student_answer'])
+ self.send_to_grader(get['student_answer'], system)
+ self.change_state(self.ASSESSING)
+ else:
+ error_message = "There was a problem saving the image in your submission. Please try a different image, or try pasting a link to an image into the answer box."
- return {'success': True, }
+ return {
+ 'success': True,
+ 'error': error_message,
+ 'student_response': get['student_answer']
+ }
def update_score(self, get, system):
"""
@@ -602,8 +619,8 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
'msg': post_assessment,
'child_type': 'openended',
'correct': correct,
+ 'accept_file_upload': self.accept_file_upload,
}
- log.debug(context)
html = system.render_template('open_ended.html', context)
return html
diff --git a/common/lib/xmodule/xmodule/openendedchild.py b/common/lib/xmodule/xmodule/openendedchild.py
index 62d203987a..7151ac0723 100644
--- a/common/lib/xmodule/xmodule/openendedchild.py
+++ b/common/lib/xmodule/xmodule/openendedchild.py
@@ -5,11 +5,13 @@ import json
import logging
from lxml import etree
from lxml.html import rewrite_links
+from lxml.html.clean import Cleaner, autolink_html
from path import path
import os
import sys
import hashlib
import capa.xqueue_interface as xqueue_interface
+import re
from pkg_resources import resource_string
@@ -21,6 +23,7 @@ from .stringify import stringify_children
from .xml_module import XmlDescriptor
from xmodule.modulestore import Location
from capa.util import *
+import open_ended_image_submission
from datetime import datetime
@@ -94,6 +97,7 @@ class OpenEndedChild(object):
self.prompt = static_data['prompt']
self.rubric = static_data['rubric']
self.display_name = static_data['display_name']
+ self.accept_file_upload = static_data['accept_file_upload']
# Used for progress / grading. Currently get credit just for
# completion (doesn't matter if you self-assessed correct/incorrect).
@@ -113,7 +117,7 @@ class OpenEndedChild(object):
pass
def latest_answer(self):
- """None if not available"""
+ """Empty string if not available"""
if not self.history:
return ""
return self.history[-1].get('answer', "")
@@ -125,17 +129,31 @@ class OpenEndedChild(object):
return self.history[-1].get('score')
def latest_post_assessment(self, system):
- """None if not available"""
+ """Empty string if not available"""
if not self.history:
return ""
return self.history[-1].get('post_assessment', "")
+ @staticmethod
+ def sanitize_html(answer):
+ try:
+ answer = autolink_html(answer)
+ cleaner = Cleaner(style=True, links=True, add_nofollow=False, page_structure=True, safe_attrs_only=True,
+ host_whitelist=open_ended_image_submission.TRUSTED_IMAGE_DOMAINS,
+ whitelist_tags=set(['embed', 'iframe', 'a', 'img']))
+ clean_html = cleaner.clean_html(answer)
+ clean_html = re.sub(r'$', '', re.sub(r'^
', '', clean_html))
+ except:
+ clean_html = answer
+ return clean_html
+
def new_history_entry(self, answer):
"""
Adds a new entry to the history dictionary
@param answer: The student supplied answer
@return: None
"""
+ answer = OpenEndedChild.sanitize_html(answer)
self.history.append({'answer': answer})
def record_latest_score(self, score):
@@ -260,5 +278,115 @@ class OpenEndedChild(object):
correctness = 'correct' if self.is_submission_correct(score) else 'incorrect'
return correctness
+ def upload_image_to_s3(self, image_data):
+ """
+ Uploads an image to S3
+ Image_data: InMemoryUploadedFileObject that responds to read() and seek()
+ @return:Success and a URL corresponding to the uploaded object
+ """
+ success = False
+ s3_public_url = ""
+ image_ok = False
+ try:
+ image_data.seek(0)
+ image_ok = open_ended_image_submission.run_image_tests(image_data)
+ except:
+ log.exception("Could not create image and check it.")
+
+ if image_ok:
+ image_key = image_data.name + datetime.now().strftime("%Y%m%d%H%M%S")
+
+ try:
+ image_data.seek(0)
+ success, s3_public_url = open_ended_image_submission.upload_to_s3(image_data, image_key)
+ except:
+ log.exception("Could not upload image to S3.")
+
+ return success, image_ok, s3_public_url
+
+ def check_for_image_and_upload(self, get_data):
+ """
+ Checks to see if an image was passed back in the AJAX query. If so, it will upload it to S3
+ @param get_data: AJAX get data
+ @return: Success, whether or not a file was in the get dictionary,
+ and the html corresponding to the uploaded image
+ """
+ has_file_to_upload = False
+ uploaded_to_s3 = False
+ image_tag = ""
+ image_ok = False
+ if 'can_upload_files' in get_data:
+ if get_data['can_upload_files'] == 'true':
+ has_file_to_upload = True
+ file = get_data['student_file'][0]
+ uploaded_to_s3, image_ok, s3_public_url = self.upload_image_to_s3(file)
+ if uploaded_to_s3:
+ image_tag = self.generate_image_tag_from_url(s3_public_url, file.name)
+
+ return has_file_to_upload, uploaded_to_s3, image_ok, image_tag
+
+ def generate_image_tag_from_url(self, s3_public_url, image_name):
+ """
+ Makes an image tag from a given URL
+ @param s3_public_url: URL of the image
+ @param image_name: Name of the image
+ @return: Boolean success, updated AJAX get data
+ """
+ image_template = """
+ {1}
+ """.format(s3_public_url, image_name)
+ return image_template
+
+ def append_image_to_student_answer(self, get_data):
+ """
+ Adds an image to a student answer after uploading it to S3
+ @param get_data: AJAx get data
+ @return: Boolean success, updated AJAX get data
+ """
+ overall_success = False
+ if not self.accept_file_upload:
+ #If the question does not accept file uploads, do not do anything
+ return True, get_data
+
+ has_file_to_upload, uploaded_to_s3, image_ok, image_tag = self.check_for_image_and_upload(get_data)
+ if uploaded_to_s3 and has_file_to_upload and image_ok:
+ get_data['student_answer'] += image_tag
+ overall_success = True
+ elif has_file_to_upload and not uploaded_to_s3 and image_ok:
+ #In this case, an image was submitted by the student, but the image could not be uploaded to S3. Likely
+ #a config issue (development vs deployment). For now, just treat this as a "success"
+ log.warning("Student AJAX post to combined open ended xmodule indicated that it contained an image, "
+ "but the image was not able to be uploaded to S3. This could indicate a config"
+ "issue with this deployment, but it could also indicate a problem with S3 or with the"
+ "student image itself.")
+ overall_success = True
+ elif not has_file_to_upload:
+ #If there is no file to upload, probably the student has embedded the link in the answer text
+ success, get_data['student_answer'] = self.check_for_url_in_text(get_data['student_answer'])
+ overall_success = success
+
+ return overall_success, get_data
+
+ def check_for_url_in_text(self, string):
+ """
+ Checks for urls in a string
+ @param string: Arbitrary string
+ @return: Boolean success, the edited string
+ """
+ success = False
+ links = re.findall(r'(https?://\S+)', string)
+ if len(links)>0:
+ for link in links:
+ success = open_ended_image_submission.run_url_tests(link)
+ if not success:
+ string = re.sub(link, '', string)
+ else:
+ string = re.sub(link, self.generate_image_tag_from_url(link,link), string)
+ success = True
+
+ return success, string
+
+
+
diff --git a/common/lib/xmodule/xmodule/self_assessment_module.py b/common/lib/xmodule/xmodule/self_assessment_module.py
index fb1d306708..38a60e11f5 100644
--- a/common/lib/xmodule/xmodule/self_assessment_module.py
+++ b/common/lib/xmodule/xmodule/self_assessment_module.py
@@ -80,6 +80,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
'state': self.state,
'allow_reset': self._allow_reset(),
'child_type': 'selfassessment',
+ 'accept_file_upload': self.accept_file_upload,
}
html = system.render_template('self_assessment_prompt.html', context)
@@ -106,6 +107,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
if dispatch not in handlers:
return 'Error'
+ log.debug(get)
before = self.get_progress()
d = handlers[dispatch](get, system)
after = self.get_progress()
@@ -123,7 +125,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
return ''
rubric_renderer = CombinedOpenEndedRubric(system, True)
- rubric_html = rubric_renderer.render_rubric(self.rubric)
+ success, rubric_html = rubric_renderer.render_rubric(self.rubric)
# we'll render it
context = {'rubric': rubric_html,
@@ -200,13 +202,21 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
if self.state != self.INITIAL:
return self.out_of_sync_error(get)
+ error_message = ""
# add new history element with answer and empty score and hint.
- self.new_history_entry(get['student_answer'])
- self.change_state(self.ASSESSING)
+ success, get = self.append_image_to_student_answer(get)
+ if success:
+ get['student_answer'] = SelfAssessmentModule.sanitize_html(get['student_answer'])
+ self.new_history_entry(get['student_answer'])
+ self.change_state(self.ASSESSING)
+ else:
+ error_message = "There was a problem saving the image in your submission. Please try a different image, or try pasting a link to an image into the answer box."
return {
- 'success': True,
- 'rubric_html': self.get_rubric_html(system)
+ 'success': success,
+ 'rubric_html': self.get_rubric_html(system),
+ 'error': error_message,
+ 'student_response': get['student_answer'],
}
def save_assessment(self, get, system):
diff --git a/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py b/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py
new file mode 100644
index 0000000000..c89f5ee848
--- /dev/null
+++ b/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py
@@ -0,0 +1,339 @@
+import json
+from mock import Mock, MagicMock, ANY
+import unittest
+
+from xmodule.openendedchild import OpenEndedChild
+from xmodule.open_ended_module import OpenEndedModule
+from xmodule.combined_open_ended_module import CombinedOpenEndedModule
+
+from xmodule.modulestore import Location
+from lxml import etree
+import capa.xqueue_interface as xqueue_interface
+from datetime import datetime
+
+from . import test_system
+"""
+Tests for the various pieces of the CombinedOpenEndedGrading system
+
+OpenEndedChild
+OpenEndedModule
+
+"""
+
+class OpenEndedChildTest(unittest.TestCase):
+ location = Location(["i4x", "edX", "sa_test", "selfassessment",
+ "SampleQuestion"])
+
+ metadata = json.dumps({'attempts': '10'})
+ prompt = etree.XML("This is a question prompt")
+ rubric = '''
+
+ Response Quality
+
+
+ '''
+ max_score = 4
+
+ static_data = {
+ 'max_attempts': 20,
+ 'prompt': prompt,
+ 'rubric': rubric,
+ 'max_score': max_score,
+ 'display_name': 'Name',
+ 'accept_file_upload' : False,
+ }
+ definition = Mock()
+ descriptor = Mock()
+
+ def setUp(self):
+ self.openendedchild = OpenEndedChild(test_system, self.location,
+ self.definition, self.descriptor, self.static_data, self.metadata)
+
+
+ def test_latest_answer_empty(self):
+ answer = self.openendedchild.latest_answer()
+ self.assertEqual(answer, "")
+
+
+ def test_latest_score_empty(self):
+ answer = self.openendedchild.latest_score()
+ self.assertEqual(answer, None)
+
+
+ def test_latest_post_assessment_empty(self):
+ answer = self.openendedchild.latest_post_assessment(test_system)
+ self.assertEqual(answer, "")
+
+
+ def test_new_history_entry(self):
+ new_answer = "New Answer"
+ self.openendedchild.new_history_entry(new_answer)
+ answer = self.openendedchild.latest_answer()
+ self.assertEqual(answer, new_answer)
+
+ new_answer = "Newer Answer"
+ self.openendedchild.new_history_entry(new_answer)
+ answer = self.openendedchild.latest_answer()
+ self.assertEqual(new_answer, answer)
+
+ def test_record_latest_score(self):
+ new_answer = "New Answer"
+ self.openendedchild.new_history_entry(new_answer)
+ new_score = 3
+ self.openendedchild.record_latest_score(new_score)
+ score = self.openendedchild.latest_score()
+ self.assertEqual(score, 3)
+
+ new_score = 4
+ self.openendedchild.new_history_entry(new_answer)
+ self.openendedchild.record_latest_score(new_score)
+ score = self.openendedchild.latest_score()
+ self.assertEqual(score, 4)
+
+
+ def test_record_latest_post_assessment(self):
+ new_answer = "New Answer"
+ self.openendedchild.new_history_entry(new_answer)
+
+ post_assessment = "Post assessment"
+ self.openendedchild.record_latest_post_assessment(post_assessment)
+ self.assertEqual(post_assessment,
+ self.openendedchild.latest_post_assessment(test_system))
+
+ def test_get_score(self):
+ new_answer = "New Answer"
+ self.openendedchild.new_history_entry(new_answer)
+
+ score = self.openendedchild.get_score()
+ self.assertEqual(score['score'], 0)
+ self.assertEqual(score['total'], self.static_data['max_score'])
+
+ new_score = 4
+ self.openendedchild.new_history_entry(new_answer)
+ self.openendedchild.record_latest_score(new_score)
+ score = self.openendedchild.get_score()
+ self.assertEqual(score['score'], new_score)
+ self.assertEqual(score['total'], self.static_data['max_score'])
+
+
+ def test_reset(self):
+ self.openendedchild.reset(test_system)
+ state = json.loads(self.openendedchild.get_instance_state())
+ self.assertEqual(state['state'], OpenEndedChild.INITIAL)
+
+
+ def test_is_last_response_correct(self):
+ new_answer = "New Answer"
+ self.openendedchild.new_history_entry(new_answer)
+ self.openendedchild.record_latest_score(self.static_data['max_score'])
+ self.assertEqual(self.openendedchild.is_last_response_correct(),
+ 'correct')
+
+ self.openendedchild.new_history_entry(new_answer)
+ self.openendedchild.record_latest_score(0)
+ self.assertEqual(self.openendedchild.is_last_response_correct(),
+ 'incorrect')
+
+class OpenEndedModuleTest(unittest.TestCase):
+ location = Location(["i4x", "edX", "sa_test", "selfassessment",
+ "SampleQuestion"])
+
+ metadata = json.dumps({'attempts': '10'})
+ prompt = etree.XML("This is a question prompt")
+ rubric = etree.XML('''
+
+ Response Quality
+
+
+ ''')
+ max_score = 4
+
+ static_data = {
+ 'max_attempts': 20,
+ 'prompt': prompt,
+ 'rubric': rubric,
+ 'max_score': max_score,
+ 'display_name': 'Name',
+ 'accept_file_upload': False,
+ }
+
+ oeparam = etree.XML('''
+
+ Enter essay here.
+ This is the answer.
+ {"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}
+
+ ''')
+ definition = {'oeparam': oeparam}
+ descriptor = Mock()
+
+ def setUp(self):
+ test_system.location = self.location
+ self.mock_xqueue = MagicMock()
+ self.mock_xqueue.send_to_queue.return_value=(None, "Message")
+ test_system.xqueue = {'interface':self.mock_xqueue, 'callback_url':'/', 'default_queuename': 'testqueue', 'waittime': 1}
+ self.openendedmodule = OpenEndedModule(test_system, self.location,
+ self.definition, self.descriptor, self.static_data, self.metadata)
+
+ def test_message_post(self):
+ get = {'feedback': 'feedback text',
+ 'submission_id': '1',
+ 'grader_id': '1',
+ 'score': 3}
+ qtime = datetime.strftime(datetime.now(), xqueue_interface.dateformat)
+ student_info = {'anonymous_student_id': test_system.anonymous_student_id,
+ 'submission_time': qtime}
+ contents = {
+ 'feedback': get['feedback'],
+ 'submission_id': int(get['submission_id']),
+ 'grader_id': int(get['grader_id']),
+ 'score': get['score'],
+ 'student_info': json.dumps(student_info)
+ }
+
+ result = self.openendedmodule.message_post(get, test_system)
+ self.assertTrue(result['success'])
+ # make sure it's actually sending something we want to the queue
+ self.mock_xqueue.send_to_queue.assert_called_with(body = json.dumps(contents), header=ANY)
+
+ state = json.loads(self.openendedmodule.get_instance_state())
+ self.assertIsNotNone(state['state'], OpenEndedModule.DONE)
+
+ def test_send_to_grader(self):
+ submission = "This is a student submission"
+ qtime = datetime.strftime(datetime.now(), xqueue_interface.dateformat)
+ student_info = {'anonymous_student_id': test_system.anonymous_student_id,
+ 'submission_time': qtime}
+ contents = self.openendedmodule.payload.copy()
+ contents.update({
+ 'student_info': json.dumps(student_info),
+ 'student_response': submission,
+ 'max_score': self.max_score
+ })
+ result = self.openendedmodule.send_to_grader(submission, test_system)
+ self.assertTrue(result)
+ self.mock_xqueue.send_to_queue.assert_called_with(body = json.dumps(contents), header=ANY)
+
+ def update_score_single(self):
+ self.openendedmodule.new_history_entry("New Entry")
+ score_msg = {
+ 'correct': True,
+ 'score': 4,
+ 'msg' : 'Grader Message',
+ 'feedback': "Grader Feedback"
+ }
+ get = {'queuekey': "abcd",
+ 'xqueue_body': score_msg}
+ self.openendedmodule.update_score(get, test_system)
+
+ def update_score_single(self):
+ self.openendedmodule.new_history_entry("New Entry")
+ feedback = {
+ "success": True,
+ "feedback": "Grader Feedback"
+ }
+ score_msg = {
+ 'correct': True,
+ 'score': 4,
+ 'msg' : 'Grader Message',
+ 'feedback': json.dumps(feedback),
+ 'grader_type': 'IN',
+ 'grader_id': '1',
+ 'submission_id': '1',
+ 'success': True,
+ 'rubric_scores': [0],
+ 'rubric_scores_complete': True,
+ 'rubric_xml': etree.tostring(self.rubric)
+ }
+ get = {'queuekey': "abcd",
+ 'xqueue_body': json.dumps(score_msg)}
+ self.openendedmodule.update_score(get, test_system)
+
+ def test_latest_post_assessment(self):
+ self.update_score_single()
+ assessment = self.openendedmodule.latest_post_assessment(test_system)
+ self.assertFalse(assessment == '')
+ # check for errors
+ self.assertFalse('errors' in assessment)
+
+ def test_update_score(self):
+ self.update_score_single()
+ score = self.openendedmodule.latest_score()
+ self.assertEqual(score, 4)
+
+class CombinedOpenEndedModuleTest(unittest.TestCase):
+ location = Location(["i4x", "edX", "open_ended", "combinedopenended",
+ "SampleQuestion"])
+
+ prompt = "This is a question prompt"
+ rubric = '''
+
+ Response Quality
+
+
+ '''
+ max_score = 3
+
+ metadata = {'attempts': '10', 'max_score': max_score}
+
+ static_data = json.dumps({
+ 'max_attempts': 20,
+ 'prompt': prompt,
+ 'rubric': rubric,
+ 'max_score': max_score,
+ 'display_name': 'Name'
+ })
+
+ oeparam = etree.XML('''
+
+ Enter essay here.
+ This is the answer.
+ {"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}
+
+ ''')
+
+ task_xml1 = '''
+
+
+ What hint about this problem would you give to someone?
+
+
+ Save Succcesful. Thanks for participating!
+
+
+ '''
+ task_xml2 = '''
+
+
+ Enter essay here.
+ This is the answer.
+ {"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}
+
+ '''
+ definition = {'prompt': etree.XML(prompt), 'rubric': etree.XML(rubric), 'task_xml': [task_xml1, task_xml2]}
+ descriptor = Mock()
+
+ def setUp(self):
+ self.combinedoe = CombinedOpenEndedModule(test_system, self.location, self.definition, self.descriptor, self.static_data, metadata=self.metadata)
+
+ def test_get_tag_name(self):
+ name = self.combinedoe.get_tag_name("Tag")
+ self.assertEqual(name, "t")
+
+ def test_get_last_response(self):
+ response_dict = self.combinedoe.get_last_response(0)
+ self.assertEqual(response_dict['type'], "selfassessment")
+ self.assertEqual(response_dict['max_score'], self.max_score)
+ self.assertEqual(response_dict['state'], CombinedOpenEndedModule.INITIAL)
+
+ def test_update_task_states(self):
+ changed = self.combinedoe.update_task_states()
+ self.assertFalse(changed)
+
+ current_task = self.combinedoe.current_task
+ current_task.change_state(CombinedOpenEndedModule.DONE)
+ changed = self.combinedoe.update_task_states()
+
+ self.assertTrue(changed)
+
+
diff --git a/common/lib/xmodule/xmodule/tests/test_self_assessment.py b/common/lib/xmodule/xmodule/tests/test_self_assessment.py
index 9013794dbb..c5fb82e412 100644
--- a/common/lib/xmodule/xmodule/tests/test_self_assessment.py
+++ b/common/lib/xmodule/xmodule/tests/test_self_assessment.py
@@ -10,8 +10,16 @@ from . import test_system
class SelfAssessmentTest(unittest.TestCase):
- definition = {'rubric': 'A rubric',
- 'prompt': 'Who?',
+ rubric = '''
+
+ Response Quality
+
+
+ '''
+
+ prompt = etree.XML("This is sample prompt text.")
+ definition = {'rubric': rubric,
+ 'prompt': prompt,
'submitmessage': 'Shall we submit now?',
'hintprompt': 'Consider this...',
}
@@ -23,47 +31,47 @@ class SelfAssessmentTest(unittest.TestCase):
descriptor = Mock()
- def test_import(self):
+ def setUp(self):
state = json.dumps({'student_answers': ["Answer 1", "answer 2", "answer 3"],
'scores': [0, 1],
'hints': ['o hai'],
'state': SelfAssessmentModule.INITIAL,
'attempts': 2})
- rubric = '''
-
- Response Quality
-
-
- '''
-
- prompt = etree.XML("Text")
static_data = {
'max_attempts': 10,
- 'rubric': etree.XML(rubric),
- 'prompt': prompt,
+ 'rubric': etree.XML(self.rubric),
+ 'prompt': self.prompt,
'max_score': 1,
- 'display_name': "Name"
+ 'display_name': "Name",
+ 'accept_file_upload' : False,
}
- module = SelfAssessmentModule(test_system, self.location,
+ self.module = SelfAssessmentModule(test_system, self.location,
self.definition, self.descriptor,
static_data, state, metadata=self.metadata)
- self.assertEqual(module.get_score()['score'], 0)
+ def test_get_html(self):
+ html = self.module.get_html(test_system)
+ self.assertTrue("This is sample prompt text" in html)
+
+ def test_self_assessment_flow(self):
+
+ self.assertEqual(self.module.get_score()['score'], 0)
+
+ self.module.save_answer({'student_answer': "I am an answer"}, test_system)
+ self.assertEqual(self.module.state, self.module.ASSESSING)
+
+ self.module.save_assessment({'assessment': '0'}, test_system)
+ self.assertEqual(self.module.state, self.module.DONE)
- module.save_answer({'student_answer': "I am an answer"}, test_system)
- self.assertEqual(module.state, module.ASSESSING)
-
- module.save_assessment({'assessment': '0'}, test_system)
- self.assertEqual(module.state, module.DONE)
-
- d = module.reset({})
+ d = self.module.reset({})
self.assertTrue(d['success'])
- self.assertEqual(module.state, module.INITIAL)
+ self.assertEqual(self.module.state, self.module.INITIAL)
# if we now assess as right, skip the REQUEST_HINT state
- module.save_answer({'student_answer': 'answer 4'}, test_system)
- module.save_assessment({'assessment': '1'}, test_system)
- self.assertEqual(module.state, module.DONE)
+ self.module.save_answer({'student_answer': 'answer 4'}, test_system)
+ self.module.save_assessment({'assessment': '1'}, test_system)
+ self.assertEqual(self.module.state, self.module.DONE)
+
diff --git a/common/lib/xmodule/xmodule/x_module.py b/common/lib/xmodule/xmodule/x_module.py
index 84b2dd4fbb..5387a9b083 100644
--- a/common/lib/xmodule/xmodule/x_module.py
+++ b/common/lib/xmodule/xmodule/x_module.py
@@ -406,7 +406,7 @@ class ResourceTemplates(object):
log.warning("Skipping unknown template file %s" % template_file)
continue
template_content = resource_string(__name__, os.path.join(dirname, template_file))
- template = yaml.load(template_content)
+ template = yaml.safe_load(template_content)
templates.append(Template(**template))
return templates
diff --git a/common/test/data/full/tabs/resources.html b/common/test/data/full/tabs/resources.html
new file mode 100644
index 0000000000..bf78c92fb1
--- /dev/null
+++ b/common/test/data/full/tabs/resources.html
@@ -0,0 +1 @@
+
This is another sample tab
\ No newline at end of file
diff --git a/lms/djangoapps/courseware/tabs.py b/lms/djangoapps/courseware/tabs.py
index 2ece7f0404..0a7c723cb5 100644
--- a/lms/djangoapps/courseware/tabs.py
+++ b/lms/djangoapps/courseware/tabs.py
@@ -18,8 +18,10 @@ from django.core.urlresolvers import reverse
from fs.errors import ResourceNotFoundError
-from lxml.html import rewrite_links
+from courseware.access import has_access
+from static_replace import replace_urls
+from lxml.html import rewrite_links
from module_render import get_module
from courseware.access import has_access
from static_replace import replace_urls
@@ -27,13 +29,10 @@ from xmodule.modulestore import Location
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.xml import XMLModuleStore
from xmodule.x_module import XModule
-
-
-
-from open_ended_grading.peer_grading_service import PeerGradingService
-from open_ended_grading.staff_grading_service import StaffGradingService
from student.models import unique_id_for_user
+from open_ended_grading import open_ended_notifications
+
log = logging.getLogger(__name__)
class InvalidTabsException(Exception):
@@ -118,49 +117,45 @@ def _textbooks(tab, user, course, active_page):
def _staff_grading(tab, user, course, active_page):
if has_access(user, course, 'staff'):
link = reverse('staff_grading', args=[course.id])
- staff_gs = StaffGradingService(settings.STAFF_GRADING_INTERFACE)
- pending_grading=False
- tab_name = "Staff grading"
- img_path= ""
- try:
- notifications = json.loads(staff_gs.get_notifications(course.id))
- if notifications['success']:
- if notifications['staff_needs_to_grade']:
- pending_grading=True
- except:
- #Non catastrophic error, so no real action
- log.info("Problem with getting notifications from staff grading service.")
- if pending_grading:
- img_path = "/static/images/slider-handle.png"
+ tab_name = "Staff grading"
+
+ notifications = open_ended_notifications.staff_grading_notifications(course, user)
+ pending_grading = notifications['pending_grading']
+ img_path = notifications['img_path']
tab = [CourseTab(tab_name, link, active_page == "staff_grading", pending_grading, img_path)]
return tab
return []
def _peer_grading(tab, user, course, active_page):
+
if user.is_authenticated():
link = reverse('peer_grading', args=[course.id])
- peer_gs = PeerGradingService(settings.PEER_GRADING_INTERFACE)
- pending_grading=False
tab_name = "Peer grading"
- img_path= ""
- try:
- notifications = json.loads(peer_gs.get_notifications(course.id,unique_id_for_user(user)))
- if notifications['success']:
- if notifications['student_needs_to_peer_grade']:
- pending_grading=True
- except:
- #Non catastrophic error, so no real action
- log.info("Problem with getting notifications from peer grading service.")
- if pending_grading:
- img_path = "/static/images/slider-handle.png"
+ notifications = open_ended_notifications.peer_grading_notifications(course, user)
+ pending_grading = notifications['pending_grading']
+ img_path = notifications['img_path']
tab = [CourseTab(tab_name, link, active_page == "peer_grading", pending_grading, img_path)]
return tab
return []
+def _combined_open_ended_grading(tab, user, course, active_page):
+ if user.is_authenticated():
+ link = reverse('open_ended_notifications', args=[course.id])
+ tab_name = "Open Ended Panel"
+
+ notifications = open_ended_notifications.combined_notifications(course, user)
+ pending_grading = notifications['pending_grading']
+ img_path = notifications['img_path']
+
+ tab = [CourseTab(tab_name, link, active_page == "open_ended", pending_grading, img_path)]
+ return tab
+ return []
+
+
#### Validators
@@ -198,6 +193,7 @@ VALID_TAB_TYPES = {
'static_tab': TabImpl(key_checker(['name', 'url_slug']), _static_tab),
'peer_grading': TabImpl(null_validator, _peer_grading),
'staff_grading': TabImpl(null_validator, _staff_grading),
+ 'open_ended': TabImpl(null_validator, _combined_open_ended_grading),
}
@@ -326,4 +322,4 @@ def get_static_tab_contents(request, cache, course, tab):
if tab_module is not None:
html = tab_module.get_html()
- return html
+ return html
\ No newline at end of file
diff --git a/lms/djangoapps/courseware/tests/factories.py b/lms/djangoapps/courseware/tests/factories.py
new file mode 100644
index 0000000000..6950e28565
--- /dev/null
+++ b/lms/djangoapps/courseware/tests/factories.py
@@ -0,0 +1,44 @@
+import factory
+from student.models import (User, UserProfile, Registration,
+ CourseEnrollmentAllowed)
+from django.contrib.auth.models import Group
+from datetime import datetime
+import uuid
+
+class UserProfileFactory(factory.Factory):
+ FACTORY_FOR = UserProfile
+
+ user = None
+ name = 'Robot Studio'
+ courseware = 'course.xml'
+
+class RegistrationFactory(factory.Factory):
+ FACTORY_FOR = Registration
+
+ user = None
+ activation_key = uuid.uuid4().hex
+
+class UserFactory(factory.Factory):
+ FACTORY_FOR = User
+
+ username = 'robot'
+ email = 'robot@edx.org'
+ password = 'test'
+ first_name = 'Robot'
+ last_name = 'Tester'
+ is_staff = False
+ is_active = True
+ is_superuser = False
+ last_login = datetime.now()
+ date_joined = datetime.now()
+
+class GroupFactory(factory.Factory):
+ FACTORY_FOR = Group
+
+ name = 'test_group'
+
+class CourseEnrollmentAllowedFactory(factory.Factory):
+ FACTORY_FOR = CourseEnrollmentAllowed
+
+ email = 'test@edx.org'
+ course_id = 'edX/test/2012_Fall'
diff --git a/lms/djangoapps/courseware/tests/test_access.py b/lms/djangoapps/courseware/tests/test_access.py
new file mode 100644
index 0000000000..ed9335d382
--- /dev/null
+++ b/lms/djangoapps/courseware/tests/test_access.py
@@ -0,0 +1,109 @@
+import unittest
+import time
+from mock import Mock
+from django.test import TestCase
+
+from xmodule.modulestore import Location
+from factories import CourseEnrollmentAllowedFactory
+import courseware.access as access
+
+class AccessTestCase(TestCase):
+ def test__has_global_staff_access(self):
+ u = Mock(is_staff=False)
+ self.assertFalse(access._has_global_staff_access(u))
+
+ u = Mock(is_staff=True)
+ self.assertTrue(access._has_global_staff_access(u))
+
+ def test__has_access_to_location(self):
+ location = Location('i4x://edX/toy/course/2012_Fall')
+
+ self.assertFalse(access._has_access_to_location(None, location,
+ 'staff', None))
+ u = Mock()
+ u.is_authenticated.return_value = False
+ self.assertFalse(access._has_access_to_location(u, location,
+ 'staff', None))
+ u = Mock(is_staff=True)
+ self.assertTrue(access._has_access_to_location(u, location,
+ 'instructor', None))
+ # A user has staff access if they are in the staff group
+ u = Mock(is_staff=False)
+ g = Mock()
+ g.name = 'staff_edX/toy/2012_Fall'
+ u.groups.all.return_value = [g]
+ self.assertTrue(access._has_access_to_location(u, location,
+ 'staff', None))
+ # A user has staff access if they are in the instructor group
+ g.name = 'instructor_edX/toy/2012_Fall'
+ self.assertTrue(access._has_access_to_location(u, location,
+ 'staff', None))
+
+ # A user has instructor access if they are in the instructor group
+ g.name = 'instructor_edX/toy/2012_Fall'
+ self.assertTrue(access._has_access_to_location(u, location,
+ 'instructor', None))
+
+ # A user does not have staff access if they are
+ # not in either the staff or the the instructor group
+ g.name = 'student_only'
+ self.assertFalse(access._has_access_to_location(u, location,
+ 'staff', None))
+
+ # A user does not have instructor access if they are
+ # not in the instructor group
+ g.name = 'student_only'
+ self.assertFalse(access._has_access_to_location(u, location,
+ 'instructor', None))
+
+ def test__has_access_string(self):
+ u = Mock(is_staff=True)
+ self.assertFalse(access._has_access_string(u, 'not_global', 'staff', None))
+
+ u._has_global_staff_access.return_value = True
+ self.assertTrue(access._has_access_string(u, 'global', 'staff', None))
+
+ self.assertRaises(ValueError, access._has_access_string, u, 'global', 'not_staff', None)
+
+ def test__has_access_descriptor(self):
+ # TODO: override DISABLE_START_DATES and test the start date branch of the method
+ u = Mock()
+ d = Mock()
+ d.start = time.gmtime(time.time() - 86400) # make sure the start time is in the past
+
+ # Always returns true because DISABLE_START_DATES is set in test.py
+ self.assertTrue(access._has_access_descriptor(u, d, 'load'))
+ self.assertRaises(ValueError, access._has_access_descriptor, u, d, 'not_load_or_staff')
+
+ def test__has_access_course_desc_can_enroll(self):
+ u = Mock()
+ yesterday = time.gmtime(time.time() - 86400)
+ tomorrow = time.gmtime(time.time() + 86400)
+ c = Mock(enrollment_start=yesterday, enrollment_end=tomorrow)
+ c.metadata.get = 'is_public'
+
+ # User can enroll if it is between the start and end dates
+ self.assertTrue(access._has_access_course_desc(u, c, 'enroll'))
+
+ # User can enroll if authenticated and specifically allowed for that course
+ # even outside the open enrollment period
+ u = Mock(email='test@edx.org', is_staff=False)
+ u.is_authenticated.return_value = True
+
+ c = Mock(enrollment_start=tomorrow, enrollment_end=tomorrow, id='edX/test/2012_Fall')
+ c.metadata.get = 'is_public'
+
+ allowed = CourseEnrollmentAllowedFactory(email=u.email, course_id=c.id)
+
+ self.assertTrue(access._has_access_course_desc(u, c, 'enroll'))
+
+ # Staff can always enroll even outside the open enrollment period
+ u = Mock(email='test@edx.org', is_staff=True)
+ u.is_authenticated.return_value = True
+
+ c = Mock(enrollment_start=tomorrow, enrollment_end=tomorrow, id='edX/test/Whenever')
+ c.metadata.get = 'is_public'
+ self.assertTrue(access._has_access_course_desc(u, c, 'enroll'))
+
+ # TODO:
+ # Non-staff cannot enroll outside the open enrollment period if not specifically allowed
diff --git a/lms/djangoapps/open_ended_grading/controller_query_service.py b/lms/djangoapps/open_ended_grading/controller_query_service.py
new file mode 100644
index 0000000000..7d515e2475
--- /dev/null
+++ b/lms/djangoapps/open_ended_grading/controller_query_service.py
@@ -0,0 +1,59 @@
+import json
+import logging
+import requests
+from requests.exceptions import RequestException, ConnectionError, HTTPError
+import sys
+from grading_service import GradingService
+from grading_service import GradingServiceError
+
+from django.conf import settings
+from django.http import HttpResponse, Http404
+
+log = logging.getLogger(__name__)
+
+class ControllerQueryService(GradingService):
+ """
+ Interface to staff grading backend.
+ """
+ def __init__(self, config):
+ super(ControllerQueryService, self).__init__(config)
+ self.check_eta_url = self.url + '/get_submission_eta/'
+ self.is_unique_url = self.url + '/is_name_unique/'
+ self.combined_notifications_url = self.url + '/combined_notifications/'
+ self.grading_status_list_url = self.url + '/get_grading_status_list/'
+
+ def check_if_name_is_unique(self, location, problem_id, course_id):
+ params = {
+ 'course_id': course_id,
+ 'location' : location,
+ 'problem_id' : problem_id
+ }
+ response = self.get(self.is_unique_url, params)
+ return response
+
+ def check_for_eta(self, location):
+ params = {
+ 'location' : location,
+ }
+ response = self.get(self.check_eta_url, params)
+ return response
+
+ def check_combined_notifications(self, course_id, student_id, user_is_staff, last_time_viewed):
+ params= {
+ 'student_id' : student_id,
+ 'course_id' : course_id,
+ 'user_is_staff' : user_is_staff,
+ 'last_time_viewed' : last_time_viewed,
+ }
+ log.debug(self.combined_notifications_url)
+ response = self.get(self.combined_notifications_url,params)
+ return response
+
+ def get_grading_status_list(self, course_id, student_id):
+ params = {
+ 'student_id' : student_id,
+ 'course_id' : course_id,
+ }
+
+ response = self.get(self.grading_status_list_url, params)
+ return response
diff --git a/lms/djangoapps/open_ended_grading/grading_service.py b/lms/djangoapps/open_ended_grading/grading_service.py
index f65554a9d6..63febb105f 100644
--- a/lms/djangoapps/open_ended_grading/grading_service.py
+++ b/lms/djangoapps/open_ended_grading/grading_service.py
@@ -116,7 +116,7 @@ class GradingService(object):
if 'rubric' in response_json:
rubric = response_json['rubric']
rubric_renderer = CombinedOpenEndedRubric(self.system, False)
- rubric_html = rubric_renderer.render_rubric(rubric)
+ success, rubric_html = rubric_renderer.render_rubric(rubric)
response_json['rubric'] = rubric_html
return response_json
# if we can't parse the rubric into HTML,
diff --git a/lms/djangoapps/open_ended_grading/open_ended_notifications.py b/lms/djangoapps/open_ended_grading/open_ended_notifications.py
new file mode 100644
index 0000000000..43259f3e1b
--- /dev/null
+++ b/lms/djangoapps/open_ended_grading/open_ended_notifications.py
@@ -0,0 +1,158 @@
+from django.conf import settings
+from staff_grading_service import StaffGradingService
+from peer_grading_service import PeerGradingService
+from open_ended_grading.controller_query_service import ControllerQueryService
+import json
+from student.models import unique_id_for_user
+import open_ended_util
+from courseware.models import StudentModule
+import logging
+from courseware.access import has_access
+from util.cache import cache
+import datetime
+
+log=logging.getLogger(__name__)
+
+NOTIFICATION_CACHE_TIME = 300
+KEY_PREFIX = "open_ended_"
+
+NOTIFICATION_TYPES = (
+ ('student_needs_to_peer_grade', 'peer_grading', 'Peer Grading'),
+ ('staff_needs_to_grade', 'staff_grading', 'Staff Grading'),
+ ('new_student_grading_to_view', 'open_ended_problems', 'Problems you have submitted')
+ )
+
+def staff_grading_notifications(course, user):
+ staff_gs = StaffGradingService(settings.STAFF_GRADING_INTERFACE)
+ pending_grading=False
+ img_path= ""
+ course_id = course.id
+ student_id = unique_id_for_user(user)
+ notification_type = "staff"
+
+ success, notification_dict = get_value_from_cache(student_id, course_id, notification_type)
+ if success:
+ return notification_dict
+
+ try:
+ notifications = json.loads(staff_gs.get_notifications(course_id))
+ if notifications['success']:
+ if notifications['staff_needs_to_grade']:
+ pending_grading=True
+ except:
+ #Non catastrophic error, so no real action
+ notifications = {}
+ log.info("Problem with getting notifications from staff grading service.")
+
+ if pending_grading:
+ img_path = "/static/images/slider-handle.png"
+
+ notification_dict = {'pending_grading' : pending_grading, 'img_path' : img_path, 'response' : notifications}
+
+ set_value_in_cache(student_id, course_id, notification_type, notification_dict)
+
+ return notification_dict
+
+def peer_grading_notifications(course, user):
+ peer_gs = PeerGradingService(settings.PEER_GRADING_INTERFACE)
+ pending_grading=False
+ img_path= ""
+ course_id = course.id
+ student_id = unique_id_for_user(user)
+ notification_type = "peer"
+
+ success, notification_dict = get_value_from_cache(student_id, course_id, notification_type)
+ if success:
+ return notification_dict
+
+ try:
+ notifications = json.loads(peer_gs.get_notifications(course_id,student_id))
+ if notifications['success']:
+ if notifications['student_needs_to_peer_grade']:
+ pending_grading=True
+ except:
+ #Non catastrophic error, so no real action
+ notifications = {}
+ log.info("Problem with getting notifications from peer grading service.")
+
+ if pending_grading:
+ img_path = "/static/images/slider-handle.png"
+
+ notification_dict = {'pending_grading' : pending_grading, 'img_path' : img_path, 'response' : notifications}
+
+ set_value_in_cache(student_id, course_id, notification_type, notification_dict)
+
+ return notification_dict
+
+def combined_notifications(course, user):
+ controller_url = open_ended_util.get_controller_url()
+ controller_qs = ControllerQueryService(controller_url)
+ student_id = unique_id_for_user(user)
+ user_is_staff = has_access(user, course, 'staff')
+ course_id = course.id
+ notification_type = "combined"
+
+ success, notification_dict = get_value_from_cache(student_id, course_id, notification_type)
+ if success:
+ return notification_dict
+
+ min_time_to_query = user.last_login
+ last_module_seen = StudentModule.objects.filter(student=user, course_id = course_id, modified__gt=min_time_to_query).values('modified').order_by('-modified')
+ last_module_seen_count = last_module_seen.count()
+
+ if last_module_seen_count>0:
+ last_time_viewed = last_module_seen[0]['modified'] - datetime.timedelta(seconds=(NOTIFICATION_CACHE_TIME + 60))
+ else:
+ last_time_viewed = user.last_login
+
+ pending_grading= False
+
+ img_path= ""
+ try:
+ controller_response = controller_qs.check_combined_notifications(course.id,student_id, user_is_staff, last_time_viewed)
+ log.debug(controller_response)
+ notifications = json.loads(controller_response)
+ if notifications['success']:
+ if notifications['overall_need_to_check']:
+ pending_grading=True
+ except:
+ #Non catastrophic error, so no real action
+ notifications = {}
+ log.exception("Problem with getting notifications from controller query service.")
+
+ if pending_grading:
+ img_path = "/static/images/slider-handle.png"
+
+ notification_dict = {'pending_grading' : pending_grading, 'img_path' : img_path, 'response' : notifications}
+
+ set_value_in_cache(student_id, course_id, notification_type, notification_dict)
+
+ return notification_dict
+
+def get_value_from_cache(student_id, course_id, notification_type):
+ key_name = create_key_name(student_id, course_id, notification_type)
+ success, value = _get_value_from_cache(key_name)
+ return success, value
+
+def set_value_in_cache(student_id, course_id, notification_type, value):
+ key_name = create_key_name(student_id, course_id, notification_type)
+ _set_value_in_cache(key_name, value)
+
+def create_key_name(student_id, course_id, notification_type):
+ key_name = "{prefix}{type}_{course}_{student}".format(prefix=KEY_PREFIX, type=notification_type, course=course_id, student=student_id)
+ return key_name
+
+def _get_value_from_cache(key_name):
+ value = cache.get(key_name)
+ success = False
+ if value is None:
+ return success , value
+ try:
+ value = json.loads(value)
+ success = True
+ except:
+ pass
+ return success , value
+
+def _set_value_in_cache(key_name, value):
+ cache.set(key_name, json.dumps(value), NOTIFICATION_CACHE_TIME)
\ No newline at end of file
diff --git a/lms/djangoapps/open_ended_grading/open_ended_util.py b/lms/djangoapps/open_ended_grading/open_ended_util.py
new file mode 100644
index 0000000000..07744d7d2c
--- /dev/null
+++ b/lms/djangoapps/open_ended_grading/open_ended_util.py
@@ -0,0 +1,12 @@
+from django.conf import settings
+import logging
+
+log=logging.getLogger(__name__)
+
+def get_controller_url():
+ peer_grading_url = settings.PEER_GRADING_INTERFACE['url']
+ split_url = peer_grading_url.split("/")
+ controller_url = "http://" + split_url[2] + "/grading_controller"
+ controller_settings=settings.PEER_GRADING_INTERFACE.copy()
+ controller_settings['url'] = controller_url
+ return controller_settings
diff --git a/lms/djangoapps/open_ended_grading/peer_grading_service.py b/lms/djangoapps/open_ended_grading/peer_grading_service.py
index caa349125d..2e31dd02e0 100644
--- a/lms/djangoapps/open_ended_grading/peer_grading_service.py
+++ b/lms/djangoapps/open_ended_grading/peer_grading_service.py
@@ -31,6 +31,15 @@ This is a mock peer grading service that can be used for unit tests
without making actual service calls to the grading controller
"""
class MockPeerGradingService(object):
+ # TODO: get this rubric parsed and working
+ rubric = """
+
+ Description
+
+
+
+ """
+
def get_next_submission(self, problem_location, grader_id):
return json.dumps({'success': True,
'submission_id':1,
@@ -41,7 +50,7 @@ class MockPeerGradingService(object):
'max_score': 4})
def save_grade(self, location, grader_id, submission_id,
- score, feedback, submission_key):
+ score, feedback, submission_key, rubric_scores):
return json.dumps({'success': True})
def is_student_calibrated(self, problem_location, grader_id):
@@ -57,16 +66,16 @@ class MockPeerGradingService(object):
'max_score': 4})
def save_calibration_essay(self, problem_location, grader_id,
- calibration_essay_id, submission_key, score, feedback):
- return {'success': True, 'actual_score': 2}
+ calibration_essay_id, submission_key, score, feedback, rubric_scores):
+ return json.dumps({'success': True, 'actual_score': 2})
def get_problem_list(self, course_id, grader_id):
return json.dumps({'success': True,
'problem_list': [
json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo1',
- 'problem_name': "Problem 1", 'num_graded': 3, 'num_pending': 5}),
+ 'problem_name': "Problem 1", 'num_graded': 3, 'num_pending': 5, 'num_required': 7}),
json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo2',
- 'problem_name': "Problem 2", 'num_graded': 1, 'num_pending': 5})
+ 'problem_name': "Problem 2", 'num_graded': 1, 'num_pending': 5, 'num_required': 8})
]})
class PeerGradingService(GradingService):
diff --git a/lms/djangoapps/open_ended_grading/tests.py b/lms/djangoapps/open_ended_grading/tests.py
index 059a8939a2..57ea4f319c 100644
--- a/lms/djangoapps/open_ended_grading/tests.py
+++ b/lms/djangoapps/open_ended_grading/tests.py
@@ -6,6 +6,7 @@ django-admin.py test --settings=lms.envs.test --pythonpath=. lms/djangoapps/open
from django.test import TestCase
from open_ended_grading import staff_grading_service
+from open_ended_grading import peer_grading_service
from django.core.urlresolvers import reverse
from django.contrib.auth.models import Group
@@ -17,9 +18,10 @@ from nose import SkipTest
from mock import patch, Mock
import json
+import logging
+log = logging.getLogger(__name__)
from override_settings import override_settings
-_mock_service = staff_grading_service.MockStaffGradingService()
@override_settings(MODULESTORE=ct.TEST_DATA_XML_MODULESTORE)
class TestStaffGradingService(ct.PageLoader):
@@ -111,3 +113,144 @@ class TestStaffGradingService(ct.PageLoader):
d = json.loads(r.content)
self.assertTrue(d['success'], str(d))
self.assertIsNotNone(d['problem_list'])
+
+
+@override_settings(MODULESTORE=ct.TEST_DATA_XML_MODULESTORE)
+class TestPeerGradingService(ct.PageLoader):
+ '''
+ Check that staff grading service proxy works. Basically just checking the
+ access control and error handling logic -- all the actual work is on the
+ backend.
+ '''
+ def setUp(self):
+ xmodule.modulestore.django._MODULESTORES = {}
+
+ self.student = 'view@test.com'
+ self.instructor = 'view2@test.com'
+ self.password = 'foo'
+ self.location = 'TestLocation'
+ self.create_account('u1', self.student, self.password)
+ self.create_account('u2', self.instructor, self.password)
+ self.activate_user(self.student)
+ self.activate_user(self.instructor)
+
+ self.course_id = "edX/toy/2012_Fall"
+ self.toy = modulestore().get_course(self.course_id)
+
+ self.mock_service = peer_grading_service.peer_grading_service()
+
+ self.logout()
+
+ def test_get_next_submission_success(self):
+ self.login(self.student, self.password)
+
+ url = reverse('peer_grading_get_next_submission', kwargs={'course_id': self.course_id})
+ data = {'location': self.location}
+
+ r = self.check_for_post_code(200, url, data)
+ d = json.loads(r.content)
+ self.assertTrue(d['success'])
+ self.assertIsNotNone(d['submission_id'])
+ self.assertIsNotNone(d['prompt'])
+ self.assertIsNotNone(d['submission_key'])
+ self.assertIsNotNone(d['max_score'])
+
+ def test_get_next_submission_missing_location(self):
+ self.login(self.student, self.password)
+ url = reverse('peer_grading_get_next_submission', kwargs={'course_id': self.course_id})
+ data = {}
+ r = self.check_for_post_code(200, url, data)
+ d = json.loads(r.content)
+ self.assertFalse(d['success'])
+ self.assertEqual(d['error'], "Missing required keys: location")
+
+ def test_save_grade_success(self):
+ self.login(self.student, self.password)
+ url = reverse('peer_grading_save_grade', kwargs={'course_id': self.course_id})
+ data = {'location': self.location,
+ 'submission_id': '1',
+ 'submission_key': 'fake key',
+ 'score': '2',
+ 'feedback': 'This is feedback',
+ 'rubric_scores[]': [1, 2]}
+ r = self.check_for_post_code(200, url, data)
+ d = json.loads(r.content)
+ self.assertTrue(d['success'])
+
+ def test_save_grade_missing_keys(self):
+ self.login(self.student, self.password)
+ url = reverse('peer_grading_save_grade', kwargs={'course_id': self.course_id})
+ data = {}
+ r = self.check_for_post_code(200, url, data)
+ d = json.loads(r.content)
+ self.assertFalse(d['success'])
+ self.assertTrue(d['error'].find('Missing required keys:') > -1)
+
+ def test_is_calibrated_success(self):
+ self.login(self.student, self.password)
+ url = reverse('peer_grading_is_student_calibrated', kwargs={'course_id': self.course_id})
+ data = {'location': self.location}
+ r = self.check_for_post_code(200, url, data)
+ d = json.loads(r.content)
+ self.assertTrue(d['success'])
+ self.assertTrue('calibrated' in d)
+
+ def test_is_calibrated_failure(self):
+ self.login(self.student, self.password)
+ url = reverse('peer_grading_is_student_calibrated', kwargs={'course_id': self.course_id})
+ data = {}
+ r = self.check_for_post_code(200, url, data)
+ d = json.loads(r.content)
+ self.assertFalse(d['success'])
+ self.assertFalse('calibrated' in d)
+
+ def test_show_calibration_essay_success(self):
+ self.login(self.student, self.password)
+
+ url = reverse('peer_grading_show_calibration_essay', kwargs={'course_id': self.course_id})
+ data = {'location': self.location}
+
+ r = self.check_for_post_code(200, url, data)
+ d = json.loads(r.content)
+ self.assertTrue(d['success'])
+ self.assertIsNotNone(d['submission_id'])
+ self.assertIsNotNone(d['prompt'])
+ self.assertIsNotNone(d['submission_key'])
+ self.assertIsNotNone(d['max_score'])
+
+ def test_show_calibration_essay_missing_key(self):
+ self.login(self.student, self.password)
+
+ url = reverse('peer_grading_show_calibration_essay', kwargs={'course_id': self.course_id})
+ data = {}
+
+ r = self.check_for_post_code(200, url, data)
+ d = json.loads(r.content)
+
+ self.assertFalse(d['success'])
+ self.assertEqual(d['error'], "Missing required keys: location")
+
+ def test_save_calibration_essay_success(self):
+ self.login(self.student, self.password)
+ url = reverse('peer_grading_save_calibration_essay', kwargs={'course_id': self.course_id})
+ data = {'location': self.location,
+ 'submission_id': '1',
+ 'submission_key': 'fake key',
+ 'score': '2',
+ 'feedback': 'This is feedback',
+ 'rubric_scores[]': [1, 2]}
+ r = self.check_for_post_code(200, url, data)
+ d = json.loads(r.content)
+ self.assertTrue(d['success'])
+ self.assertTrue('actual_score' in d)
+
+ def test_save_calibration_essay_missing_keys(self):
+ self.login(self.student, self.password)
+ url = reverse('peer_grading_save_calibration_essay', kwargs={'course_id': self.course_id})
+ data = {}
+ r = self.check_for_post_code(200, url, data)
+ d = json.loads(r.content)
+ self.assertFalse(d['success'])
+ self.assertTrue(d['error'].find('Missing required keys:') > -1)
+ self.assertFalse('actual_score' in d)
+
diff --git a/lms/djangoapps/open_ended_grading/views.py b/lms/djangoapps/open_ended_grading/views.py
index 858c9a4fd5..2ebd8778e6 100644
--- a/lms/djangoapps/open_ended_grading/views.py
+++ b/lms/djangoapps/open_ended_grading/views.py
@@ -13,10 +13,17 @@ from courseware.courses import get_course_with_access
from peer_grading_service import PeerGradingService
from peer_grading_service import MockPeerGradingService
+from controller_query_service import ControllerQueryService
from grading_service import GradingServiceError
import json
from .staff_grading import StaffGrading
+from student.models import unique_id_for_user
+import open_ended_util
+import open_ended_notifications
+
+from xmodule.modulestore.django import modulestore
+from xmodule.modulestore import search
log = logging.getLogger(__name__)
@@ -26,18 +33,34 @@ if settings.MOCK_PEER_GRADING:
else:
peer_gs = PeerGradingService(settings.PEER_GRADING_INTERFACE)
+controller_url = open_ended_util.get_controller_url()
+controller_qs = ControllerQueryService(controller_url)
+
"""
Reverses the URL from the name and the course id, and then adds a trailing slash if
it does not exist yet
"""
def _reverse_with_slash(url_name, course_id):
- ajax_url = reverse(url_name, kwargs={'course_id': course_id})
+ ajax_url = _reverse_without_slash(url_name, course_id)
if not ajax_url.endswith('/'):
ajax_url += '/'
return ajax_url
+def _reverse_without_slash(url_name, course_id):
+ ajax_url = reverse(url_name, kwargs={'course_id': course_id})
+ return ajax_url
+DESCRIPTION_DICT = {
+ 'Peer Grading': "View all problems that require peer assessment in this particular course.",
+ 'Staff Grading': "View ungraded submissions submitted by students for the open ended problems in the course.",
+ 'Problems you have submitted': "View open ended problems that you have previously submitted for grading."
+ }
+ALERT_DICT = {
+ 'Peer Grading': "New submissions to grade",
+ 'Staff Grading': "New submissions to grade",
+ 'Problems you have submitted': "New grades have been returned"
+ }
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def staff_grading(request, course_id):
"""
@@ -114,5 +137,111 @@ def peer_grading_problem(request, course_id):
'ajax_url': ajax_url,
# Checked above
'staff_access': False, })
+
+@cache_control(no_cache=True, no_store=True, must_revalidate=True)
+def student_problem_list(request, course_id):
+ '''
+ Show a student problem list
+ '''
+ course = get_course_with_access(request.user, course_id, 'load')
+ student_id = unique_id_for_user(request.user)
+
+ # call problem list service
+ success = False
+ error_text = ""
+ problem_list = []
+ base_course_url = reverse('courses')
+
+ try:
+ problem_list_json = controller_qs.get_grading_status_list(course_id, unique_id_for_user(request.user))
+ problem_list_dict = json.loads(problem_list_json)
+ success = problem_list_dict['success']
+ if 'error' in problem_list_dict:
+ error_text = problem_list_dict['error']
+
+ problem_list = problem_list_dict['problem_list']
+
+ for i in xrange(0,len(problem_list)):
+ problem_url_parts = search.path_to_location(modulestore(), course.id, problem_list[i]['location'])
+ problem_url = base_course_url + "/"
+ for z in xrange(0,len(problem_url_parts)):
+ part = problem_url_parts[z]
+ if part is not None:
+ if z==1:
+ problem_url += "courseware/"
+ problem_url += part + "/"
+
+ problem_list[i].update({'actual_url' : problem_url})
+
+ except GradingServiceError:
+ error_text = "Error occured while contacting the grading service"
+ success = False
+ # catch error if if the json loads fails
+ except ValueError:
+ error_text = "Could not get problem list"
+ success = False
+
+ ajax_url = _reverse_with_slash('open_ended_problems', course_id)
+
+ return render_to_response('open_ended_problems/open_ended_problems.html', {
+ 'course': course,
+ 'course_id': course_id,
+ 'ajax_url': ajax_url,
+ 'success': success,
+ 'problem_list': problem_list,
+ 'error_text': error_text,
+ # Checked above
+ 'staff_access': False, })
+
+@cache_control(no_cache=True, no_store=True, must_revalidate=True)
+def combined_notifications(request, course_id):
+ course = get_course_with_access(request.user, course_id, 'load')
+ user = request.user
+ notifications = open_ended_notifications.combined_notifications(course, user)
+ log.debug(notifications)
+ response = notifications['response']
+ notification_tuples=open_ended_notifications.NOTIFICATION_TYPES
+
+ notification_list = []
+ for response_num in xrange(0,len(notification_tuples)):
+ tag=notification_tuples[response_num][0]
+ if tag in response:
+ url_name = notification_tuples[response_num][1]
+ human_name = notification_tuples[response_num][2]
+ url = _reverse_without_slash(url_name, course_id)
+ has_img = response[tag]
+
+ # check to make sure we have descriptions and alert messages
+ if human_name in DESCRIPTION_DICT:
+ description = DESCRIPTION_DICT[human_name]
+ else:
+ description = ""
+
+ if human_name in ALERT_DICT:
+ alert_message = ALERT_DICT[human_name]
+ else:
+ alert_message = ""
+
+ notification_item = {
+ 'url' : url,
+ 'name' : human_name,
+ 'alert' : has_img,
+ 'description': description,
+ 'alert_message': alert_message
+ }
+ notification_list.append(notification_item)
+
+ ajax_url = _reverse_with_slash('open_ended_notifications', course_id)
+ combined_dict = {
+ 'error_text' : "",
+ 'notification_list' : notification_list,
+ 'course' : course,
+ 'success' : True,
+ 'ajax_url' : ajax_url,
+ }
+
+ return render_to_response('open_ended_problems/combined_notifications.html',
+ combined_dict
+ )
diff --git a/lms/envs/aws.py b/lms/envs/aws.py
index 47bffac91e..0779f1f684 100644
--- a/lms/envs/aws.py
+++ b/lms/envs/aws.py
@@ -10,8 +10,23 @@ import json
from .common import *
from logsettings import get_logger_config
+import os
-############################### ALWAYS THE SAME ################################
+# specified as an environment variable. Typically this is set
+# in the service's upstart script and corresponds exactly to the service name.
+# Service variants apply config differences via env and auth JSON files,
+# the names of which correspond to the variant.
+SERVICE_VARIANT = os.environ.get('SERVICE_VARIANT', None)
+
+# when not variant is specified we attempt to load an unvaried
+# config set.
+CONFIG_PREFIX = ""
+
+if SERVICE_VARIANT:
+ CONFIG_PREFIX = SERVICE_VARIANT + "."
+
+
+################### ALWAYS THE SAME ################################
DEBUG = False
TEMPLATE_DEBUG = False
@@ -25,14 +40,15 @@ MITX_FEATURES['ENABLE_DISCUSSION_SERVICE'] = True
# IMPORTANT: With this enabled, the server must always be behind a proxy that
# strips the header HTTP_X_FORWARDED_PROTO from client requests. Otherwise,
# a user can fool our server into thinking it was an https connection.
-# See https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
+# See
+# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
# for other warnings.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
-########################### NON-SECURE ENV CONFIG ##############################
+################# NON-SECURE ENV CONFIG ##############################
# Things like server locations, ports, etc.
-with open(ENV_ROOT / "env.json") as env_file:
+with open(ENV_ROOT / CONFIG_PREFIX + "env.json") as env_file:
ENV_TOKENS = json.load(env_file)
SITE_NAME = ENV_TOKENS['SITE_NAME']
@@ -55,18 +71,19 @@ LOGGING = get_logger_config(LOG_DIR,
logging_env=ENV_TOKENS['LOGGING_ENV'],
syslog_addr=(ENV_TOKENS['SYSLOG_SERVER'], 514),
local_loglevel=local_loglevel,
- debug=False)
+ debug=False,
+ service_variant=SERVICE_VARIANT)
COURSE_LISTINGS = ENV_TOKENS.get('COURSE_LISTINGS', {})
SUBDOMAIN_BRANDING = ENV_TOKENS.get('SUBDOMAIN_BRANDING', {})
VIRTUAL_UNIVERSITIES = ENV_TOKENS.get('VIRTUAL_UNIVERSITIES', [])
-COMMENTS_SERVICE_URL = ENV_TOKENS.get("COMMENTS_SERVICE_URL",'')
-COMMENTS_SERVICE_KEY = ENV_TOKENS.get("COMMENTS_SERVICE_KEY",'')
+COMMENTS_SERVICE_URL = ENV_TOKENS.get("COMMENTS_SERVICE_URL", '')
+COMMENTS_SERVICE_KEY = ENV_TOKENS.get("COMMENTS_SERVICE_KEY", '')
CERT_QUEUE = ENV_TOKENS.get("CERT_QUEUE", 'test-pull')
-############################## SECURE AUTH ITEMS ###############################
+############################## SECURE AUTH ITEMS ###############
# Secret things: passwords, access keys, etc.
-with open(ENV_ROOT / "auth.json") as auth_file:
+with open(ENV_ROOT / CONFIG_PREFIX + "auth.json") as auth_file:
AUTH_TOKENS = json.load(auth_file)
SECRET_KEY = AUTH_TOKENS['SECRET_KEY']
@@ -84,8 +101,10 @@ XQUEUE_INTERFACE = AUTH_TOKENS['XQUEUE_INTERFACE']
MODULESTORE = AUTH_TOKENS.get('MODULESTORE', MODULESTORE)
CONTENTSTORE = AUTH_TOKENS.get('CONTENTSTORE', CONTENTSTORE)
-STAFF_GRADING_INTERFACE = AUTH_TOKENS.get('STAFF_GRADING_INTERFACE', STAFF_GRADING_INTERFACE)
-PEER_GRADING_INTERFACE = AUTH_TOKENS.get('PEER_GRADING_INTERFACE', PEER_GRADING_INTERFACE)
+STAFF_GRADING_INTERFACE = AUTH_TOKENS.get('STAFF_GRADING_INTERFACE',
+ STAFF_GRADING_INTERFACE)
+PEER_GRADING_INTERFACE = AUTH_TOKENS.get('PEER_GRADING_INTERFACE',
+ PEER_GRADING_INTERFACE)
PEARSON_TEST_USER = "pearsontest"
PEARSON_TEST_PASSWORD = AUTH_TOKENS.get("PEARSON_TEST_PASSWORD")
diff --git a/lms/static/coffee/fixtures/staff_grading.html b/lms/static/coffee/fixtures/staff_grading.html
new file mode 100644
index 0000000000..2fe5a39a17
--- /dev/null
+++ b/lms/static/coffee/fixtures/staff_grading.html
@@ -0,0 +1,76 @@
+
+
+
+
Staff grading
+
+
+
+
+
+
+
+
+
+
Instructions
+
+
This is the list of problems that current need to be graded in order to train the machine learning models. Each problem needs to be trained separately, and we have indicated the number of student submissions that need to be graded in order for a model to be generated. You can grade more than the minimum required number of submissions--this will improve the accuracy of machine learning, though with diminishing returns. You can see the current accuracy of machine learning while grading.
@@ -41,12 +29,8 @@ describe 'Courseware', ->
"""
@courseware.render()
- it 'detect the video elements and convert them', ->
- expect(window.Video).toHaveBeenCalledWith('1', '1.0:abc1234')
- expect(window.Video).toHaveBeenCalledWith('2', '1.0:def5678')
-
- it 'detect the problem element and convert it', ->
- expect(window.Problem).toHaveBeenCalledWith(3, 'problem_3', '/example/url/')
+ it 'ensure that the XModules have been loaded', ->
+ expect(XModule.loadModules).toHaveBeenCalled()
it 'detect the histrogram element and convert it', ->
expect(window.Histogram).toHaveBeenCalledWith('3', [[0, 1]])
diff --git a/lms/static/coffee/spec/navigation_spec.coffee b/lms/static/coffee/spec/navigation_spec.coffee
index cb98c2b64c..1340984e52 100644
--- a/lms/static/coffee/spec/navigation_spec.coffee
+++ b/lms/static/coffee/spec/navigation_spec.coffee
@@ -16,6 +16,7 @@ describe 'Navigation', ->
active: 1
header: 'h3'
autoHeight: false
+ heightStyle: 'content'
describe 'when there is no active section', ->
beforeEach ->
@@ -23,11 +24,12 @@ describe 'Navigation', ->
$('#accordion').append('
Yes. Online learners who demonstrate mastery of subjects can earn a certificate of mastery. Certificates will be issued by edX under the name of the underlying "X University" from where the course originated, i.e. HarvardX, MITx or BerkeleyX. For the courses in Fall 2012, those certificates will be free. There is a plan to charge a modest fee for certificates in the future.
+
Yes. Online learners who demonstrate mastery of subjects can earn a certificate
+ of mastery. Certificates will be issued at the discretion of edX and the underlying
+ X University that offered the course under the name of the underlying "X
+ University" from where the course originated, i.e. HarvardX, MITx or BerkeleyX.
+ For the courses in Fall 2012, those certificates will be free. There is a plan to
+ charge a modest fee for certificates in the future. Note: At this time, edX is
+ holding certificates for learners connected with Cuba, Iran, Syria and Sudan
+ pending confirmation that the issuance is in compliance with U.S. embargoes.
What will the scope of the online courses be? How many? Which faculty?
Will I get a certificate for taking an edX course?
-
-
Online learners who receive a passing grade for a course will receive a certificate of mastery from edX and the underlying X University that offered the course. For example, a certificate of mastery for MITx’s 6.002x Circuits & Electronics will come from edX and MITx.
+
+
Online learners who receive a passing grade for a course will receive a certificate
+ of mastery at the discretion of edX and the underlying X University that offered
+ the course. For example, a certificate of mastery for MITx’s 6.002x Circuits &
+ Electronics will come from edX and MITx.
+
If you passed the course, your certificate of mastery will be delivered online
+ through edx.org. So be sure to check your email in the weeks following the final
+ grading – you will be able to download and print your certificate. Note: At this
+ time, edX is holding certificates for learners connected with Cuba, Iran, Syria
+ and Sudan pending confirmation that the issuance is in compliance with U.S.
+ embargoes.
diff --git a/lms/templates/static_templates/press_releases/eric_lander_secret_of_life.html b/lms/templates/static_templates/press_releases/eric_lander_secret_of_life.html
new file mode 100644
index 0000000000..d91c8091d7
--- /dev/null
+++ b/lms/templates/static_templates/press_releases/eric_lander_secret_of_life.html
@@ -0,0 +1,92 @@
+<%! from django.core.urlresolvers import reverse %>
+<%inherit file="../../main.html" />
+
+<%namespace name='static' file='../../static_content.html'/>
+
+<%block name="title">Human Genome Pioneer Eric Lander to reveal “the secret of life”%block>
+
+
+
+
+
+
Human Genome Pioneer Eric Lander to reveal “the secret of life”
+
+
+
Broad Institute Director shares his MIT introductory biology course, covering topics in biochemistry, genetics and genomics, through edX.
+
+
+
+
+
Eric Lander, the founding director of the Broad Institute and a professor at MIT and Harvard Medical School.
CAMBRIDGE, MA – January 30, 2013 –
+In the past 10 years, the ability to decode or “sequence” DNA has grown by a million-fold, a stunning rate of progress that is producing a flood of information about human biology and disease. Because of these advances, the scientific community — and the world as a whole — stands on the verge of a revolution in biology. In the coming decades scientists will be able to understand how cells are “wired” and how that wiring is disrupted in human diseases ranging from diabetes to cancer to schizophrenia. Now, with his free online course, 7.00x Introductory Biology: “The Secret of Life”, genome pioneer Eric Lander, the founding director of the Broad Institute and a professor at MIT and Harvard Medical School, will explain to students around the world the basics of biology – the secret of life, so to speak – so that they can understand today’s revolution in biology.
+
+
EdX, the not-for-profit online learning initiative founded by Harvard University and the Massachusetts Institute of Technology (MIT), brings the best courses from the best faculty at the best institutions to anyone with an Internet connection. For the past 20 years, legendary teacher Lander has taught Introductory Biology to more than half of all MIT students. He has now adapted his course for online education, creating the newest course on the edX platform. The course, 7.00X, is now open for enrollment, with the first class slated for March 5th. This course will include innovative technology including a 3D molecule viewer and gene explorer tool to transform the learning experience. It is open to all levels and types of learners.
+
+
“Introducing the freshman class of MIT to the basics of biology is exhilarating,” said Lander. “Now, with this edX course, I look forward to teaching people around the world. There are no prerequisites for this course – other than curiosity and an interest in understanding some of the greatest scientific challenges of our time.”
+
+
Those taking the course will learn the fundamental ideas that underlie modern biology and medicine, including genetics, biochemistry, molecular biology, recombinant DNA, genomics and genomic medicine. They will become familiar with the structure and function of macromolecules such as DNA, RNA and proteins and understand how information flows within cells. Students will explore how mutations affect biological function and cause human disease. They will learn about modern molecular biological techniques and their wide-ranging impact.
+
+
“Eric Lander has created this remarkable digitally enhanced introduction to genetics and biology,” said Anant Agarwal, President of edX. “With this unique online version, he has brought the introductory biology course to a new level. It has been completely rethought and retooled, incorporating cutting-edge online interactive tools as well as community-building contests and milestone-based prizes.”
+
+
With online courses through edX like 7.00x, what matters isn’t what people have achieved or their transcripts, but their desire to learn. Students only need to come with a real interest in science and the desire to understand what's going on at the forefront of biology, and to learn the fundamental principles on which an amazing biomedical revolution is based – from one of the top scientist in the world. 7.00x Introductory Biology: The Secret of Life is now available for enrollment. Classes will start on March 5, 2013.
+
+
Dr. Eric Lander is President and Founding Director of the Broad Institute of Harvard and MIT, a new kind of collaborative biomedical research institution focused on genomic medicine. Dr. Lander is also Professor of Biology at MIT and Professor of Systems Biology at the Harvard Medical School. In addition, Dr. Lander serves as Co-Chair of the President’s Council of Advisors on Science and Technology, which advises the White House on science and technology. A geneticist, molecular biologist and mathematician, Dr. Lander has played a pioneering role in all aspects of the reading, understanding and medical application of the human genome. He was a principal leader of the international Human Genome Project (HGP) from 1990-2003, with his group being the largest contributor to the mapping and sequencing of the human genetic blueprint. Dr. Lander was an early pioneer in the free availability of genomic tools and information. Finally, he has mentored an extraordinary cadre of young scientists who have become the next generation of leaders in medical genomics. The recipient of numerous awards and honorary degrees, Dr. Lander was elected a member of the U.S. National Academy of Sciences in 1997 and of the U.S. Institute of Medicine in 1999.
The Eli and Edythe L. Broad Institute of MIT and Harvard was founded in 2003 to empower this generation of creative scientists to transform medicine with new genome-based knowledge. The Broad Institute seeks to describe all the molecular components of life and their connections; discover the molecular basis of major human diseases; develop effective new approaches to diagnostics and therapeutics; and disseminate discoveries, tools, methods and data openly to the entire scientific community.
+
+
Founded by MIT, Harvard and its affiliated hospitals, and the visionary Los Angeles philanthropists Eli and Edythe L. Broad, the Broad Institute includes faculty, professional staff and students from throughout the MIT and Harvard biomedical research communities and beyond, with collaborations spanning over a hundred private and public institutions in more than 40 countries worldwide. For further information about the Broad Institute, go to www.broadinstitute.org.
+
+
About edX
+
+
EdX is a not-for-profit enterprise of its founding partners Harvard University and the Massachusetts Institute of Technology focused on transforming online and on-campus learning through groundbreaking methodologies, game-like experiences and cutting-edge research. EdX provides inspirational and transformative knowledge to students of all ages, social status, and income who form worldwide communities of learners. EdX uses its open source technology to transcend physical and social borders. We’re focused on people, not profit. EdX is based in Cambridge, Massachusetts in the USA.
EdX is a not-for-profit enterprise of its founding partners Harvard University and the Massachusetts Institute of Technology focused on transforming online and on-campus learning through groundbreaking methodologies, game-like experiences and cutting-edge research. EdX provides inspirational and transformative knowledge to students of all ages, social status, and income who form worldwide communities of learners. EdX uses its open source technology to transcend physical and social borders. We’re focused on people, not profit. EdX is based in Cambridge, Massachusetts in the USA.