From b5ab766092c340da375d6accee111c4478508a6c Mon Sep 17 00:00:00 2001
From: Brian Wilson
Date: Wed, 6 Mar 2013 17:57:33 -0500
Subject: [PATCH 01/22] initial framework for htmlbook
---
common/lib/xmodule/xmodule/course_module.py | 7 ++++
lms/djangoapps/courseware/tabs.py | 11 ++++++
lms/djangoapps/staticbook/views.py | 40 +++++++++++++++++++--
lms/urls.py | 5 +++
4 files changed, 60 insertions(+), 3 deletions(-)
diff --git a/common/lib/xmodule/xmodule/course_module.py b/common/lib/xmodule/xmodule/course_module.py
index 72196f92a2..3923d3f056 100644
--- a/common/lib/xmodule/xmodule/course_module.py
+++ b/common/lib/xmodule/xmodule/course_module.py
@@ -358,6 +358,13 @@ class CourseDescriptor(SequenceDescriptor):
"""
return self.metadata.get('pdf_textbooks')
+ @property
+ def html_textbooks(self):
+ """
+ Return the html_textbooks config, as a python object, or None if not specified.
+ """
+ return self.metadata.get('html_textbooks')
+
@tabs.setter
def tabs(self, value):
self.metadata['tabs'] = value
diff --git a/lms/djangoapps/courseware/tabs.py b/lms/djangoapps/courseware/tabs.py
index b31aeb6b5a..4a9de0792f 100644
--- a/lms/djangoapps/courseware/tabs.py
+++ b/lms/djangoapps/courseware/tabs.py
@@ -130,6 +130,17 @@ def _pdf_textbooks(tab, user, course, active_page):
for index, textbook in enumerate(course.pdf_textbooks)]
return []
+def _html_textbooks(tab, user, course, active_page):
+ """
+ Generates one tab per textbook. Only displays if user is authenticated.
+ """
+ if user.is_authenticated():
+ # since there can be more than one textbook, active_page is e.g. "book/0".
+ return [CourseTab(textbook['tab_title'], reverse('html_book', args=[course.id, index]),
+ active_page == "htmltextbook/{0}".format(index))
+ for index, textbook in enumerate(course.html_textbooks)]
+ return []
+
def _staff_grading(tab, user, course, active_page):
if has_access(user, course, 'staff'):
link = reverse('staff_grading', args=[course.id])
diff --git a/lms/djangoapps/staticbook/views.py b/lms/djangoapps/staticbook/views.py
index a391b1cb32..8e973ac97d 100644
--- a/lms/djangoapps/staticbook/views.py
+++ b/lms/djangoapps/staticbook/views.py
@@ -57,12 +57,46 @@ def pdf_index(request, course_id, book_index, chapter=None, page=None):
# then remap all the chapter URLs as well, if they are provided.
if 'chapters' in textbook:
for entry in textbook['chapters']:
- entry['url'] = remap_static_url(entry['url'], course)
+ entry['url'] = remap_static_url(entry['url'], course)
return render_to_response('static_pdfbook.html',
- {'book_index': book_index,
- 'course': course,
+ {'book_index': book_index,
+ 'course': course,
+ 'textbook': textbook,
+ 'chapter': chapter,
+ 'page': page,
+ 'staff_access': staff_access})
+
+@login_required
+def html_index(request, course_id, book_index, chapter=None, page=None):
+ course = get_course_with_access(request.user, course_id, 'load')
+ staff_access = has_access(request.user, course, 'staff')
+
+ book_index = int(book_index)
+ textbook = course.html_textbooks[book_index]
+
+ def remap_static_url(original_url, course):
+ input_url = "'" + original_url + "'"
+ output_url = replace_static_urls(
+ input_url,
+ course.metadata['data_dir'],
+ course_namespace=course.location
+ )
+ # strip off the quotes again...
+ return output_url[1:-1]
+
+ if 'url' in textbook:
+ textbook['url'] = remap_static_url(textbook['url'], course)
+ # then remap all the chapter URLs as well, if they are provided.
+ if 'chapters' in textbook:
+ for entry in textbook['chapters']:
+ entry['url'] = remap_static_url(entry['url'], course)
+
+
+ return render_to_response('static_htmlbook.html',
+ {'book_index': book_index,
+ 'course': course,
'textbook': textbook,
'chapter': chapter,
'page': page,
diff --git a/lms/urls.py b/lms/urls.py
index 5e5ac9a7f2..b85bc3d458 100644
--- a/lms/urls.py
+++ b/lms/urls.py
@@ -285,6 +285,11 @@ if settings.COURSEWARE_ENABLED:
url(r'^courses/(?P[^/]+/[^/]+/[^/]+)/pdfbook/(?P[^/]*)/chapter/(?P[^/]*)/(?P[^/]*)$',
'staticbook.views.pdf_index'),
+ url(r'^courses/(?P[^/]+/[^/]+/[^/]+)/htmlbook/(?P[^/]*)/$',
+ 'staticbook.views.html_index', name="html_book"),
+ url(r'^courses/(?P[^/]+/[^/]+/[^/]+)/htmlbook/(?P[^/]*)/(?P[^/]*)$',
+ 'staticbook.views.html_index'),
+
url(r'^courses/(?P[^/]+/[^/]+/[^/]+)/courseware/?$',
'courseware.views.index', name="courseware"),
url(r'^courses/(?P[^/]+/[^/]+/[^/]+)/courseware/(?P[^/]*)/$',
From aee7d856942b403b768f9a0a9781502777c99401 Mon Sep 17 00:00:00 2001
From: Brian Wilson
Date: Thu, 7 Mar 2013 02:01:42 -0500
Subject: [PATCH 02/22] fix bug in tabs
---
lms/djangoapps/courseware/tabs.py | 1 +
lms/templates/static_htmlbook.html | 71 ++++++++++++++++++++++++++++++
2 files changed, 72 insertions(+)
create mode 100644 lms/templates/static_htmlbook.html
diff --git a/lms/djangoapps/courseware/tabs.py b/lms/djangoapps/courseware/tabs.py
index 4a9de0792f..a956c03eeb 100644
--- a/lms/djangoapps/courseware/tabs.py
+++ b/lms/djangoapps/courseware/tabs.py
@@ -220,6 +220,7 @@ VALID_TAB_TYPES = {
'external_link': TabImpl(key_checker(['name', 'link']), _external_link),
'textbooks': TabImpl(null_validator, _textbooks),
'pdf_textbooks': TabImpl(null_validator, _pdf_textbooks),
+ 'html_textbooks': TabImpl(null_validator, _html_textbooks),
'progress': TabImpl(need_name, _progress),
'static_tab': TabImpl(key_checker(['name', 'url_slug']), _static_tab),
'peer_grading': TabImpl(null_validator, _peer_grading),
diff --git a/lms/templates/static_htmlbook.html b/lms/templates/static_htmlbook.html
new file mode 100644
index 0000000000..e3caa9c0ce
--- /dev/null
+++ b/lms/templates/static_htmlbook.html
@@ -0,0 +1,71 @@
+<%inherit file="main.html" />
+<%namespace name='static' file='static_content.html'/>
+<%block name="title">
+
+
+ ${course.number} Textbook
+%block>
+
+<%block name="headextra">
+<%static:css group='course'/>
+<%static:js group='courseware'/>
+%block>
+
+<%block name="js_extra">
+
+%block>
+
+<%include file="/courseware/course_navigation.html" args="active_page='htmltextbook/{0}'.format(book_index)" />
+
+
+
+
+ %if 'chapters' in textbook:
+
+ %endif
+
+
+
+
+
+
From 49dee5080afd551862e248205c1489e94e4a0081 Mon Sep 17 00:00:00 2001
From: Brian Wilson
Date: Thu, 7 Mar 2013 18:45:00 -0500
Subject: [PATCH 03/22] first pass at chapter navigation
---
lms/templates/static_htmlbook.html | 126 +++++++++++++++++++++++------
1 file changed, 100 insertions(+), 26 deletions(-)
diff --git a/lms/templates/static_htmlbook.html b/lms/templates/static_htmlbook.html
index e3caa9c0ce..2cdec8c109 100644
--- a/lms/templates/static_htmlbook.html
+++ b/lms/templates/static_htmlbook.html
@@ -1,9 +1,6 @@
<%inherit file="main.html" />
<%namespace name='static' file='static_content.html'/>
-<%block name="title">
-
-
- ${course.number} Textbook
+<%block name="title">${course.number} Textbook
%block>
<%block name="headextra">
@@ -13,6 +10,85 @@
<%block name="js_extra">
%block>
@@ -43,29 +117,29 @@
%if 'chapters' in textbook:
-
%endif
-
+
+
-
From bed7dbdb6bf3bae7f51d8c1012f4fb82cebd44ae Mon Sep 17 00:00:00 2001
From: Brian Wilson
Date: Fri, 8 Mar 2013 18:25:33 -0500
Subject: [PATCH 04/22] fix plumbing for anchors and chapters.
---
lms/djangoapps/staticbook/views.py | 4 ++--
lms/templates/static_htmlbook.html | 34 +++++++++++-------------------
lms/urls.py | 6 +++++-
3 files changed, 19 insertions(+), 25 deletions(-)
diff --git a/lms/djangoapps/staticbook/views.py b/lms/djangoapps/staticbook/views.py
index 8e973ac97d..72c72e3154 100644
--- a/lms/djangoapps/staticbook/views.py
+++ b/lms/djangoapps/staticbook/views.py
@@ -69,7 +69,7 @@ def pdf_index(request, course_id, book_index, chapter=None, page=None):
'staff_access': staff_access})
@login_required
-def html_index(request, course_id, book_index, chapter=None, page=None):
+def html_index(request, course_id, book_index, chapter=None, anchor_id=None):
course = get_course_with_access(request.user, course_id, 'load')
staff_access = has_access(request.user, course, 'staff')
@@ -99,5 +99,5 @@ def html_index(request, course_id, book_index, chapter=None, page=None):
'course': course,
'textbook': textbook,
'chapter': chapter,
- 'page': page,
+ 'anchor_id': anchor_id,
'staff_access': staff_access})
diff --git a/lms/templates/static_htmlbook.html b/lms/templates/static_htmlbook.html
index 2cdec8c109..9500a379ac 100644
--- a/lms/templates/static_htmlbook.html
+++ b/lms/templates/static_htmlbook.html
@@ -27,35 +27,22 @@
chapterToLoad = options.chapterNum;
}
var anchorToLoad = null;
-
- loadUrlAsIframe = function htmlViewLoadUrlAsIframe(url, anchorId) {
- if (anchorId != null) {
- var newurl = url + "#" + anchorId;
- $("#bookpage").src = newurl;
- } else {
- // $("#bookpage").src = url;
- // $("#bookpage").append("")
- parentElement = document.getElementById('bookpage');
- while (parentElement.hasChildNodes())
- parentElement.removeChild(parentElement.lastChild);
-
- $('"')
- // .css({'height':'40px','width':'200px'})
- .appendTo('#bookpage');
- }
- };
+ if (options.chapters) {
+ anchorToLoad = options.anchor_id;
+ }
loadUrl = function htmlViewLoadUrl(url, anchorId) {
- var newurl = url;
- if (anchorId != null) {
- newurl = url + "#" + anchorId;
- }
// clear out previous load, if any:
parentElement = document.getElementById('bookpage');
while (parentElement.hasChildNodes())
parentElement.removeChild(parentElement.lastChild);
+ // load new URL in:
+ $('#bookpage').load(url);
- $('#bookpage').load(newurl);
+ // if there is an anchor set, then go to that location:
+ if (anchorId != null) {
+ // TODO: add implementation....
+ }
};
@@ -105,6 +92,9 @@
%if chapter is not None:
options.chapterNum = ${chapter};
%endif
+ %if anchor_id is not None:
+ options.anchor_id = ${anchor_id};
+ %endif
$('#outerContainer').myHTMLViewer(options);
});
diff --git a/lms/urls.py b/lms/urls.py
index b85bc3d458..3377fa54c0 100644
--- a/lms/urls.py
+++ b/lms/urls.py
@@ -287,7 +287,11 @@ if settings.COURSEWARE_ENABLED:
url(r'^courses/(?P[^/]+/[^/]+/[^/]+)/htmlbook/(?P[^/]*)/$',
'staticbook.views.html_index', name="html_book"),
- url(r'^courses/(?P[^/]+/[^/]+/[^/]+)/htmlbook/(?P[^/]*)/(?P[^/]*)$',
+ url(r'^courses/(?P[^/]+/[^/]+/[^/]+)/htmlbook/(?P[^/]*)/chapter/(?P[^/]*)/$',
+ 'staticbook.views.html_index'),
+ url(r'^courses/(?P[^/]+/[^/]+/[^/]+)/htmlbook/(?P[^/]*)/chapter/(?P[^/]*)/(?P[^/]*)/$',
+ 'staticbook.views.html_index'),
+ url(r'^courses/(?P[^/]+/[^/]+/[^/]+)/htmlbook/(?P[^/]*)/(?P[^/]*)/$',
'staticbook.views.html_index'),
url(r'^courses/(?P[^/]+/[^/]+/[^/]+)/courseware/?$',
From fcb618f04eedd65602914582546c57aed77ad30b Mon Sep 17 00:00:00 2001
From: Brian Wilson
Date: Mon, 11 Mar 2013 10:38:32 -0400
Subject: [PATCH 05/22] fix basic alignment issue with html textbook display
---
lms/static/sass/course/_textbook.scss | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/lms/static/sass/course/_textbook.scss b/lms/static/sass/course/_textbook.scss
index af9c2493fd..aba076af5b 100644
--- a/lms/static/sass/course/_textbook.scss
+++ b/lms/static/sass/course/_textbook.scss
@@ -158,6 +158,10 @@ div.book-wrapper {
img {
max-width: 100%;
}
+
+ div {
+ text-align: left;
+ }
}
}
From 2199ae673b1895ae5a004b6dcc88bca67beecf61 Mon Sep 17 00:00:00 2001
From: Brian Wilson
Date: Mon, 11 Mar 2013 11:10:08 -0400
Subject: [PATCH 06/22] add checks that book_index is in range
---
lms/djangoapps/staticbook/views.py | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/lms/djangoapps/staticbook/views.py b/lms/djangoapps/staticbook/views.py
index 72c72e3154..ec34683997 100644
--- a/lms/djangoapps/staticbook/views.py
+++ b/lms/djangoapps/staticbook/views.py
@@ -1,7 +1,7 @@
from lxml import etree
-# from django.conf import settings
from django.contrib.auth.decorators import login_required
+from django.http import Http404
from mitxmako.shortcuts import render_to_response
from courseware.access import has_access
@@ -15,6 +15,8 @@ def index(request, course_id, book_index, page=None):
staff_access = has_access(request.user, course, 'staff')
book_index = int(book_index)
+ if book_index < 0 or book_index >= len(course.textbooks):
+ raise Http404("Invalid book index value: {0}".format(book_index))
textbook = course.textbooks[book_index]
table_of_contents = textbook.table_of_contents
@@ -40,6 +42,8 @@ def pdf_index(request, course_id, book_index, chapter=None, page=None):
staff_access = has_access(request.user, course, 'staff')
book_index = int(book_index)
+ if book_index < 0 or book_index >= len(course.pdf_textbooks):
+ raise Http404("Invalid book index value: {0}".format(book_index))
textbook = course.pdf_textbooks[book_index]
def remap_static_url(original_url, course):
@@ -74,6 +78,8 @@ def html_index(request, course_id, book_index, chapter=None, anchor_id=None):
staff_access = has_access(request.user, course, 'staff')
book_index = int(book_index)
+ if book_index < 0 or book_index >= len(course.html_textbooks):
+ raise Http404("Invalid book index value: {0}".format(book_index))
textbook = course.html_textbooks[book_index]
def remap_static_url(original_url, course):
From 588b27c9dc6911e9b8e0728f274b155cf9674c46 Mon Sep 17 00:00:00 2001
From: Julian Arni
Date: Mon, 11 Mar 2013 11:17:12 -0400
Subject: [PATCH 07/22] Added Descriptor method, fixed order issue.
---
common/lib/xmodule/xmodule/foldit_module.py | 14 ++++++++++----
lms/djangoapps/foldit/models.py | 2 +-
2 files changed, 11 insertions(+), 5 deletions(-)
diff --git a/common/lib/xmodule/xmodule/foldit_module.py b/common/lib/xmodule/xmodule/foldit_module.py
index 920a5aed6d..2bcdda5bc2 100644
--- a/common/lib/xmodule/xmodule/foldit_module.py
+++ b/common/lib/xmodule/xmodule/foldit_module.py
@@ -86,7 +86,10 @@ class FolditModule(XModule):
"""
from foldit.models import Score
- return [(e['username'], e['score']) for e in Score.get_tops_n(10)]
+ leaders = [(e['username'], e['score']) for e in Score.get_tops_n(10)]
+ leaders.sort(key=lambda x: x[1])
+
+ return leaders
def get_html(self):
"""
@@ -173,7 +176,10 @@ class FolditDescriptor(XmlDescriptor, EditingDescriptor):
@classmethod
def definition_from_xml(cls, xml_object, system):
- """
- Get the xml_object's attributes.
- """
+ """ Get the xml_object's attributes. """
+
return {'metadata': xml_object.attrib}
+
+ def definition_to_xml(self):
+ xml_object = etree.Element('foldit')
+ return xml_object
diff --git a/lms/djangoapps/foldit/models.py b/lms/djangoapps/foldit/models.py
index 7041be1446..0dce956756 100644
--- a/lms/djangoapps/foldit/models.py
+++ b/lms/djangoapps/foldit/models.py
@@ -59,7 +59,7 @@ class Score(models.Model):
scores = Score.objects \
.filter(puzzle_id__in=puzzles) \
.annotate(total_score=models.Sum('best_score')) \
- .order_by('-total_score')[:n]
+ .order_by('total_score')[:n]
num = len(puzzles)
return [{'username': s.user.username,
From cd95872b52fa1f6b7b5055f5312573049e37a4f3 Mon Sep 17 00:00:00 2001
From: Brian Wilson
Date: Mon, 11 Mar 2013 11:56:48 -0400
Subject: [PATCH 08/22] htmlbook-specific styling to make Heroes text
presentable.
---
lms/static/sass/course/_textbook.scss | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/lms/static/sass/course/_textbook.scss b/lms/static/sass/course/_textbook.scss
index aba076af5b..b1f3a863b8 100644
--- a/lms/static/sass/course/_textbook.scss
+++ b/lms/static/sass/course/_textbook.scss
@@ -161,6 +161,15 @@ div.book-wrapper {
div {
text-align: left;
+ line-height: 1.6em;
+ margin-left: 5px;
+ margin-right: 5px;
+ margin-top: 5px;
+ margin-bottom: 5px;
+
+ .Paragraph, h2 {
+ margin-top: 10px;
+ }
}
}
}
From 53f85f2a07438fe84cfc6cda77537419a619a7fd Mon Sep 17 00:00:00 2001
From: Brian Wilson
Date: Mon, 11 Mar 2013 13:52:43 -0400
Subject: [PATCH 09/22] return empty list if no html or pdf textbook appears in
policy.json
---
common/lib/xmodule/xmodule/course_module.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/common/lib/xmodule/xmodule/course_module.py b/common/lib/xmodule/xmodule/course_module.py
index 3923d3f056..1c9928a502 100644
--- a/common/lib/xmodule/xmodule/course_module.py
+++ b/common/lib/xmodule/xmodule/course_module.py
@@ -356,14 +356,14 @@ class CourseDescriptor(SequenceDescriptor):
"""
Return the pdf_textbooks config, as a python object, or None if not specified.
"""
- return self.metadata.get('pdf_textbooks')
+ return self.metadata.get('pdf_textbooks', [])
@property
def html_textbooks(self):
"""
Return the html_textbooks config, as a python object, or None if not specified.
"""
- return self.metadata.get('html_textbooks')
+ return self.metadata.get('html_textbooks', [])
@tabs.setter
def tabs(self, value):
From 78c8358957d29b59a0972511f9c56db3ebfd0fac Mon Sep 17 00:00:00 2001
From: Chris Dodge
Date: Mon, 11 Mar 2013 14:51:44 -0400
Subject: [PATCH 10/22] clean up pyling violations from views.py
---
cms/djangoapps/contentstore/views.py | 28 +++++++++++++++++++++++++---
1 file changed, 25 insertions(+), 3 deletions(-)
diff --git a/cms/djangoapps/contentstore/views.py b/cms/djangoapps/contentstore/views.py
index 34003d71a4..a4a3b620c4 100644
--- a/cms/djangoapps/contentstore/views.py
+++ b/cms/djangoapps/contentstore/views.py
@@ -86,12 +86,14 @@ def signup(request):
csrf_token = csrf(request)['csrf_token']
return render_to_response('signup.html', {'csrf': csrf_token})
+
def old_login_redirect(request):
'''
Redirect to the active login url.
'''
return redirect('login', permanent=True)
+
@ssl_login_shortcut
@ensure_csrf_cookie
def login_page(request):
@@ -104,6 +106,7 @@ def login_page(request):
'forgot_password_link': "//{base}/#forgot-password-modal".format(base=settings.LMS_BASE),
})
+
def howitworks(request):
if request.user.is_authenticated():
return index(request)
@@ -112,6 +115,7 @@ def howitworks(request):
# ==== Views for any logged-in user ==================================
+
@login_required
@ensure_csrf_cookie
def index(request):
@@ -145,6 +149,7 @@ def index(request):
# ==== Views with per-item permissions================================
+
def has_access(user, location, role=STAFF_ROLE_NAME):
'''
Return True if user allowed to access this piece of data
@@ -393,6 +398,7 @@ def preview_component(request, location):
'editor': wrap_xmodule(component.get_html, component, 'xmodule_edit.html')(),
})
+
@expect_json
@login_required
@ensure_csrf_cookie
@@ -709,6 +715,7 @@ def create_draft(request):
return HttpResponse()
+
@login_required
@expect_json
def publish_draft(request):
@@ -738,6 +745,7 @@ def unpublish_unit(request):
return HttpResponse()
+
@login_required
@expect_json
def clone_item(request):
@@ -768,8 +776,7 @@ def clone_item(request):
return HttpResponse(json.dumps({'id': dest_location.url()}))
-#@login_required
-#@ensure_csrf_cookie
+
def upload_asset(request, org, course, coursename):
'''
cdodge: this method allows for POST uploading of files into the course asset library, which will
@@ -831,6 +838,7 @@ def upload_asset(request, org, course, coursename):
response['asset_url'] = StaticContent.get_url_path_from_location(content.location)
return response
+
'''
This view will return all CMS users who are editors for the specified course
'''
@@ -863,6 +871,7 @@ def create_json_response(errmsg = None):
return resp
+
'''
This POST-back view will add a user - specified by email - to the list of editors for
the specified course
@@ -895,6 +904,7 @@ def add_user(request, location):
return create_json_response()
+
'''
This POST-back view will remove a user - specified by email - from the list of editors for
the specified course
@@ -926,6 +936,7 @@ def remove_user(request, location):
def landing(request, org, course, coursename):
return render_to_response('temp-course-landing.html', {})
+
@login_required
@ensure_csrf_cookie
def static_pages(request, org, course, coursename):
@@ -1029,6 +1040,7 @@ def edit_tabs(request, org, course, coursename):
'components': components
})
+
def not_found(request):
return render_to_response('error.html', {'error': '404'})
@@ -1064,6 +1076,7 @@ def course_info(request, org, course, name, provided_id=None):
'handouts_location': Location(['i4x', org, course, 'course_info', 'handouts']).url()
})
+
@expect_json
@login_required
@ensure_csrf_cookie
@@ -1161,6 +1174,7 @@ def get_course_settings(request, org, course, name):
"section": "details"})
})
+
@login_required
@ensure_csrf_cookie
def course_config_graders_page(request, org, course, name):
@@ -1184,6 +1198,7 @@ def course_config_graders_page(request, org, course, name):
'course_details': json.dumps(course_details, cls=CourseSettingsEncoder)
})
+
@login_required
@ensure_csrf_cookie
def course_config_advanced_page(request, org, course, name):
@@ -1207,6 +1222,7 @@ def course_config_advanced_page(request, org, course, name):
'advanced_dict' : json.dumps(CourseMetadata.fetch(location)),
})
+
@expect_json
@login_required
@ensure_csrf_cookie
@@ -1238,6 +1254,7 @@ def course_settings_updates(request, org, course, name, section):
return HttpResponse(json.dumps(manager.update_from_json(request.POST), cls=CourseSettingsEncoder),
mimetype="application/json")
+
@expect_json
@login_required
@ensure_csrf_cookie
@@ -1272,7 +1289,7 @@ def course_grader_updates(request, org, course, name, grader_index=None):
return HttpResponse(json.dumps(CourseGradingModel.update_grader_from_json(Location(['i4x', org, course, 'course', name]), request.POST)),
mimetype="application/json")
-
+
## NB: expect_json failed on ["key", "key2"] and json payload
@login_required
@ensure_csrf_cookie
@@ -1363,6 +1380,7 @@ def asset_index(request, org, course, name):
def edge(request):
return render_to_response('university_profiles/edge.html', {})
+
@login_required
@expect_json
def create_new_course(request):
@@ -1418,6 +1436,7 @@ def create_new_course(request):
return HttpResponse(json.dumps({'id': new_course.location.url()}))
+
def initialize_course_tabs(course):
# set up the default tabs
# I've added this because when we add static tabs, the LMS either expects a None for the tabs list or
@@ -1435,6 +1454,7 @@ def initialize_course_tabs(course):
modulestore('direct').update_metadata(course.location.url(), course.own_metadata)
+
@ensure_csrf_cookie
@login_required
def import_course(request, org, course, name):
@@ -1512,6 +1532,7 @@ def import_course(request, org, course, name):
course_module.location.name])
})
+
@ensure_csrf_cookie
@login_required
def generate_export_course(request, org, course, name):
@@ -1563,6 +1584,7 @@ def export_course(request, org, course, name):
'successful_import_redirect_url': ''
})
+
def event(request):
'''
A noop to swallow the analytics call so that cms methods don't spook and poor developers looking at
From 26f442458168d1b1247046fc0357a4fdf56d3861 Mon Sep 17 00:00:00 2001
From: Chris Dodge
Date: Mon, 11 Mar 2013 14:55:37 -0400
Subject: [PATCH 11/22] clean up some pylint violations
---
cms/djangoapps/contentstore/tests/test_contentstore.py | 1 -
1 file changed, 1 deletion(-)
diff --git a/cms/djangoapps/contentstore/tests/test_contentstore.py b/cms/djangoapps/contentstore/tests/test_contentstore.py
index c0ab9ec60e..b62f691818 100644
--- a/cms/djangoapps/contentstore/tests/test_contentstore.py
+++ b/cms/djangoapps/contentstore/tests/test_contentstore.py
@@ -63,7 +63,6 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase):
self.client = Client()
self.client.login(username=uname, password=password)
-
def check_edit_unit(self, test_course_name):
import_from_xml(modulestore(), 'common/test/data/', [test_course_name])
From 4ec3683c3c0bf159bf64a0b7b8c992e4febb8a47 Mon Sep 17 00:00:00 2001
From: Chris Dodge
Date: Mon, 11 Mar 2013 14:57:04 -0400
Subject: [PATCH 12/22] clean up some pylint violations
---
.../models/settings/course_metadata.py | 26 +++++++++----------
1 file changed, 13 insertions(+), 13 deletions(-)
diff --git a/cms/djangoapps/models/settings/course_metadata.py b/cms/djangoapps/models/settings/course_metadata.py
index d088d75665..24245a39d5 100644
--- a/cms/djangoapps/models/settings/course_metadata.py
+++ b/cms/djangoapps/models/settings/course_metadata.py
@@ -10,7 +10,7 @@ class CourseMetadata(object):
'''
# __new_advanced_key__ is used by client not server; so, could argue against it being here
FILTERED_LIST = XModuleDescriptor.system_metadata_fields + ['start', 'end', 'enrollment_start', 'enrollment_end', 'tabs', 'graceperiod', '__new_advanced_key__']
-
+
@classmethod
def fetch(cls, course_location):
"""
@@ -18,17 +18,17 @@ class CourseMetadata(object):
"""
if not isinstance(course_location, Location):
course_location = Location(course_location)
-
+
course = {}
-
+
descriptor = get_modulestore(course_location).get_item(course_location)
-
+
for k, v in descriptor.metadata.iteritems():
if k not in cls.FILTERED_LIST:
course[k] = v
-
+
return course
-
+
@classmethod
def update_from_json(cls, course_location, jsondict):
"""
@@ -37,7 +37,7 @@ class CourseMetadata(object):
Ensures none of the fields are in the blacklist.
"""
descriptor = get_modulestore(course_location).get_item(course_location)
-
+
dirty = False
for k, v in jsondict.iteritems():
@@ -45,26 +45,26 @@ class CourseMetadata(object):
if k not in cls.FILTERED_LIST and (k not in descriptor.metadata or descriptor.metadata[k] != v):
dirty = True
descriptor.metadata[k] = v
-
+
if dirty:
get_modulestore(course_location).update_metadata(course_location, descriptor.metadata)
-
+
# Could just generate and return a course obj w/o doing any db reads, but I put the reads in as a means to confirm
# it persisted correctly
return cls.fetch(course_location)
-
+
@classmethod
def delete_key(cls, course_location, payload):
'''
Remove the given metadata key(s) from the course. payload can be a single key or [key..]
'''
descriptor = get_modulestore(course_location).get_item(course_location)
-
+
for key in payload['deleteKeys']:
if key in descriptor.metadata:
del descriptor.metadata[key]
-
+
get_modulestore(course_location).update_metadata(course_location, descriptor.metadata)
-
+
return cls.fetch(course_location)
\ No newline at end of file
From 6e92666a203ed9ec149a44458c24e6d3d1a6be97 Mon Sep 17 00:00:00 2001
From: Julian Arni
Date: Mon, 11 Mar 2013 14:57:19 -0400
Subject: [PATCH 13/22] Fixed score order.
Less is more.
---
lms/djangoapps/foldit/tests.py | 15 ++++++++++-----
1 file changed, 10 insertions(+), 5 deletions(-)
diff --git a/lms/djangoapps/foldit/tests.py b/lms/djangoapps/foldit/tests.py
index 7127651601..afdd678f06 100644
--- a/lms/djangoapps/foldit/tests.py
+++ b/lms/djangoapps/foldit/tests.py
@@ -143,11 +143,12 @@ class FolditTestCase(TestCase):
def test_SetPlayerPuzzleScores_manyplayers(self):
"""
Check that when we send scores from multiple users, the correct order
- of scores is displayed.
+ of scores is displayed. Note that, before being processed by
+ display_score, lower scores are better.
"""
puzzle_id = ['1']
- player1_score = 0.07
- player2_score = 0.08
+ player1_score = 0.08
+ player2_score = 0.02
response1 = self.make_puzzle_score_request(puzzle_id, player1_score,
self.user)
@@ -164,8 +165,12 @@ class FolditTestCase(TestCase):
self.assertEqual(len(top_10), 2)
# Top score should be player2_score. Second should be player1_score
- self.assertEqual(top_10[0]['score'], Score.display_score(player2_score))
- self.assertEqual(top_10[1]['score'], Score.display_score(player1_score))
+ self.assertAlmostEqual(top_10[0]['score'],
+ Score.display_score(player2_score),
+ delta=0.5)
+ self.assertAlmostEqual(top_10[1]['score'],
+ Score.display_score(player1_score),
+ delta=0.5)
# Top score user should be self.user2.username
self.assertEqual(top_10[0]['username'], self.user2.username)
From 1c98d5fc8d095d3b0ef9594cbacf20225e76ec1c Mon Sep 17 00:00:00 2001
From: Jay Zoldak
Date: Mon, 11 Mar 2013 15:16:09 -0400
Subject: [PATCH 14/22] Fix modulestore tests for pep8 violations
---
.../xmodule/modulestore/tests/factories.py | 8 +++----
.../modulestore/tests/test_location.py | 22 +++++++++----------
.../modulestore/tests/test_modulestore.py | 4 ++--
3 files changed, 17 insertions(+), 17 deletions(-)
diff --git a/common/lib/xmodule/xmodule/modulestore/tests/factories.py b/common/lib/xmodule/xmodule/modulestore/tests/factories.py
index 1259da2690..f2a291d680 100644
--- a/common/lib/xmodule/xmodule/modulestore/tests/factories.py
+++ b/common/lib/xmodule/xmodule/modulestore/tests/factories.py
@@ -46,10 +46,10 @@ class XModuleCourseFactory(Factory):
new_course.metadata['start'] = stringify_time(gmtime())
new_course.tabs = [{"type": "courseware"},
- {"type": "course_info", "name": "Course Info"},
- {"type": "discussion", "name": "Discussion"},
- {"type": "wiki", "name": "Wiki"},
- {"type": "progress", "name": "Progress"}]
+ {"type": "course_info", "name": "Course Info"},
+ {"type": "discussion", "name": "Discussion"},
+ {"type": "wiki", "name": "Wiki"},
+ {"type": "progress", "name": "Progress"}]
# Update the data in the mongo datastore
store.update_metadata(new_course.location.url(), new_course.own_metadata)
diff --git a/common/lib/xmodule/xmodule/modulestore/tests/test_location.py b/common/lib/xmodule/xmodule/modulestore/tests/test_location.py
index 0772951884..f0f0e8bf48 100644
--- a/common/lib/xmodule/xmodule/modulestore/tests/test_location.py
+++ b/common/lib/xmodule/xmodule/modulestore/tests/test_location.py
@@ -119,11 +119,11 @@ def test_equality():
# All the cleaning functions should do the same thing with these
general_pairs = [('', ''),
- (' ', '_'),
- ('abc,', 'abc_'),
- ('ab fg!@//\\aj', 'ab_fg_aj'),
- (u"ab\xA9", "ab_"), # no unicode allowed for now
- ]
+ (' ', '_'),
+ ('abc,', 'abc_'),
+ ('ab fg!@//\\aj', 'ab_fg_aj'),
+ (u"ab\xA9", "ab_"), # no unicode allowed for now
+ ]
def test_clean():
@@ -131,7 +131,7 @@ def test_clean():
('a:b', 'a_b'), # no colons in non-name components
('a-b', 'a-b'), # dashes ok
('a.b', 'a.b'), # dot ok
- ]
+ ]
for input, output in pairs:
assert_equals(Location.clean(input), output)
@@ -141,17 +141,17 @@ def test_clean_for_url_name():
('a:b', 'a:b'), # colons ok in names
('a-b', 'a-b'), # dashes ok in names
('a.b', 'a.b'), # dot ok in names
- ]
+ ]
for input, output in pairs:
assert_equals(Location.clean_for_url_name(input), output)
def test_clean_for_html():
pairs = general_pairs + [
- ("a:b", "a_b"), # no colons for html use
- ("a-b", "a-b"), # dashes ok (though need to be replaced in various use locations. ugh.)
- ('a.b', 'a_b'), # no dots.
- ]
+ ("a:b", "a_b"), # no colons for html use
+ ("a-b", "a-b"), # dashes ok (though need to be replaced in various use locations. ugh.)
+ ('a.b', 'a_b'), # no dots.
+ ]
for input, output in pairs:
assert_equals(Location.clean_for_html(input), output)
diff --git a/common/lib/xmodule/xmodule/modulestore/tests/test_modulestore.py b/common/lib/xmodule/xmodule/modulestore/tests/test_modulestore.py
index 94ea622907..469eedac05 100644
--- a/common/lib/xmodule/xmodule/modulestore/tests/test_modulestore.py
+++ b/common/lib/xmodule/xmodule/modulestore/tests/test_modulestore.py
@@ -12,7 +12,7 @@ def check_path_to_location(modulestore):
("edX/toy/2012_Fall", "Overview", "Welcome", None)),
("i4x://edX/toy/chapter/Overview",
("edX/toy/2012_Fall", "Overview", None, None)),
- )
+ )
course_id = "edX/toy/2012_Fall"
for location, expected in should_work:
@@ -20,6 +20,6 @@ def check_path_to_location(modulestore):
not_found = (
"i4x://edX/toy/video/WelcomeX", "i4x://edX/toy/course/NotHome"
- )
+ )
for location in not_found:
assert_raises(ItemNotFoundError, path_to_location, modulestore, course_id, location)
From cc2c26b9240271eb79e334b4c2768ab7684dc592 Mon Sep 17 00:00:00 2001
From: John Hess
Date: Mon, 11 Mar 2013 18:04:21 -0400
Subject: [PATCH 15/22] reverted change to definition to/from xml
---
common/lib/xmodule/xmodule/foldit_module.py | 7 +++----
1 file changed, 3 insertions(+), 4 deletions(-)
diff --git a/common/lib/xmodule/xmodule/foldit_module.py b/common/lib/xmodule/xmodule/foldit_module.py
index 2bcdda5bc2..65632fa031 100644
--- a/common/lib/xmodule/xmodule/foldit_module.py
+++ b/common/lib/xmodule/xmodule/foldit_module.py
@@ -176,10 +176,9 @@ class FolditDescriptor(XmlDescriptor, EditingDescriptor):
@classmethod
def definition_from_xml(cls, xml_object, system):
- """ Get the xml_object's attributes. """
+ """
+ Get the xml_object's attributes.
+ """
return {'metadata': xml_object.attrib}
- def definition_to_xml(self):
- xml_object = etree.Element('foldit')
- return xml_object
From 00c35c95a4140741da9669c7d01965cba98a7170 Mon Sep 17 00:00:00 2001
From: John Hess
Date: Mon, 11 Mar 2013 18:05:10 -0400
Subject: [PATCH 16/22] whitespace back to original
---
common/lib/xmodule/xmodule/foldit_module.py | 4 +---
1 file changed, 1 insertion(+), 3 deletions(-)
diff --git a/common/lib/xmodule/xmodule/foldit_module.py b/common/lib/xmodule/xmodule/foldit_module.py
index 65632fa031..152930876d 100644
--- a/common/lib/xmodule/xmodule/foldit_module.py
+++ b/common/lib/xmodule/xmodule/foldit_module.py
@@ -176,9 +176,7 @@ class FolditDescriptor(XmlDescriptor, EditingDescriptor):
@classmethod
def definition_from_xml(cls, xml_object, system):
- """
+ """
Get the xml_object's attributes.
"""
-
return {'metadata': xml_object.attrib}
-
From ad92eb4d022da3c7aba6df37929905621b2d4610 Mon Sep 17 00:00:00 2001
From: John Hess
Date: Mon, 11 Mar 2013 18:05:34 -0400
Subject: [PATCH 17/22] whitespace back to original. for real though.
---
common/lib/xmodule/xmodule/foldit_module.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/common/lib/xmodule/xmodule/foldit_module.py b/common/lib/xmodule/xmodule/foldit_module.py
index 152930876d..37255bd5cb 100644
--- a/common/lib/xmodule/xmodule/foldit_module.py
+++ b/common/lib/xmodule/xmodule/foldit_module.py
@@ -177,6 +177,6 @@ class FolditDescriptor(XmlDescriptor, EditingDescriptor):
@classmethod
def definition_from_xml(cls, xml_object, system):
"""
- Get the xml_object's attributes.
+ Get the xml_object's attributes.
"""
return {'metadata': xml_object.attrib}
From b06615298fa71ce6e70d0b756ba1ab6530e8d82f Mon Sep 17 00:00:00 2001
From: Victor Shnayder
Date: Mon, 11 Mar 2013 15:24:14 -0700
Subject: [PATCH 18/22] Add logging to debug strange cohort behavior on prod.
Strange behavior:
- There are 20 cohorts that users should be randomly assigned
into in HeroesX
- Almost all, but not all (e.g. ~3300 / 3400) users end up in the
same group.
- testing manually locally and on prod in a django shell shows
nothing wrong
---
common/djangoapps/course_groups/cohorts.py | 24 ++++++++-----
.../djangoapps/course_groups/tests/tests.py | 35 +++++++++++++++++--
2 files changed, 49 insertions(+), 10 deletions(-)
diff --git a/common/djangoapps/course_groups/cohorts.py b/common/djangoapps/course_groups/cohorts.py
index f0234ec71a..c362ed4e89 100644
--- a/common/djangoapps/course_groups/cohorts.py
+++ b/common/djangoapps/course_groups/cohorts.py
@@ -65,23 +65,23 @@ def is_commentable_cohorted(course_id, commentable_id):
ans))
return ans
-
+
def get_cohorted_commentables(course_id):
"""
Given a course_id return a list of strings representing cohorted commentables
"""
course = courses.get_course_by_id(course_id)
-
+
if not course.is_cohorted:
# this is the easy case :)
ans = []
- else:
+ else:
ans = course.cohorted_discussions
return ans
-
-
+
+
def get_cohort(user, course_id):
"""
Given a django User and a course_id, return the user's cohort in that
@@ -120,7 +120,8 @@ def get_cohort(user, course_id):
return None
choices = course.auto_cohort_groups
- if len(choices) == 0:
+ n = len(choices)
+ if n == 0:
# Nowhere to put user
log.warning("Course %s is auto-cohorted, but there are no"
" auto_cohort_groups specified",
@@ -128,12 +129,19 @@ def get_cohort(user, course_id):
return None
# Put user in a random group, creating it if needed
- group_name = random.choice(choices)
+ choice = random.randrange(0, n)
+ group_name = choices[choice]
+
+ # Victor: we are seeing very strange behavior on prod, where almost all users
+ # end up in the same group. Log at INFO to try to figure out what's going on.
+ log.info("DEBUG: adding user {0} to cohort {1}. choice={2}".format(
+ user, group_name,choice))
+
group, created = CourseUserGroup.objects.get_or_create(
course_id=course_id,
group_type=CourseUserGroup.COHORT,
name=group_name)
-
+
user.course_groups.add(group)
return group
diff --git a/common/djangoapps/course_groups/tests/tests.py b/common/djangoapps/course_groups/tests/tests.py
index efed39d536..88d9c1f508 100644
--- a/common/djangoapps/course_groups/tests/tests.py
+++ b/common/djangoapps/course_groups/tests/tests.py
@@ -6,7 +6,7 @@ from django.test.utils import override_settings
from course_groups.models import CourseUserGroup
from course_groups.cohorts import (get_cohort, get_course_cohorts,
- is_commentable_cohorted)
+ is_commentable_cohorted, get_cohort_by_name)
from xmodule.modulestore.django import modulestore, _MODULESTORES
@@ -168,7 +168,7 @@ class TestCohorts(django.test.TestCase):
self.assertEquals(get_cohort(user3, course.id), None,
"No groups->no auto-cohorting")
-
+
# Now make it different
self.config_course_cohorts(course, [], cohorted=True,
auto_cohort=True,
@@ -180,6 +180,37 @@ class TestCohorts(django.test.TestCase):
"user2 should still be in originally placed cohort")
+ def test_auto_cohorting_randomization(self):
+ """
+ Make sure get_cohort() randomizes properly.
+ """
+ course = modulestore().get_course("edX/toy/2012_Fall")
+ self.assertEqual(course.id, "edX/toy/2012_Fall")
+ self.assertFalse(course.is_cohorted)
+
+ groups = ["group_{0}".format(n) for n in range(5)]
+ self.config_course_cohorts(course, [], cohorted=True,
+ auto_cohort=True,
+ auto_cohort_groups=groups)
+
+ # Assign 100 users to cohorts
+ for i in range(100):
+ user = User.objects.create(username="test_{0}".format(i),
+ email="a@b{0}.com".format(i))
+ get_cohort(user, course.id)
+
+ # Now make sure that the assignment was at least vaguely random:
+ # each cohort should have at least 1, and fewer than 50 students.
+ # (with 5 groups, probability of 0 users in any group is about
+ # .8**100= 2.0e-10)
+ for cohort_name in groups:
+ cohort = get_cohort_by_name(course.id, cohort_name)
+ num_users = cohort.users.count()
+ self.assertGreater(num_users, 1)
+ self.assertLess(num_users, 50)
+
+
+
def test_get_course_cohorts(self):
course1_id = 'a/b/c'
course2_id = 'e/f/g'
From bf0e5a270183b90becc56ec7f709deef7bb26fd6 Mon Sep 17 00:00:00 2001
From: Chris Dodge
Date: Mon, 11 Mar 2013 21:14:24 -0400
Subject: [PATCH 19/22] do a bunch of pylint improvements
---
.../contentstore/tests/test_contentstore.py | 159 +++++++++---------
1 file changed, 78 insertions(+), 81 deletions(-)
diff --git a/cms/djangoapps/contentstore/tests/test_contentstore.py b/cms/djangoapps/contentstore/tests/test_contentstore.py
index 58197b9762..99ef1169b1 100644
--- a/cms/djangoapps/contentstore/tests/test_contentstore.py
+++ b/cms/djangoapps/contentstore/tests/test_contentstore.py
@@ -9,10 +9,8 @@ from tempdir import mkdtemp_clean
import json
from fs.osfs import OSFS
import copy
-from mock import Mock
-from json import dumps, loads
+from json import loads
-from student.models import Registration
from django.contrib.auth.models import User
from cms.djangoapps.contentstore.utils import get_modulestore
@@ -22,12 +20,11 @@ from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.modulestore import Location
from xmodule.modulestore.store_utilities import clone_course
from xmodule.modulestore.store_utilities import delete_course
-from xmodule.modulestore.django import modulestore, _MODULESTORES
+from xmodule.modulestore.django import modulestore
from xmodule.contentstore.django import contentstore
from xmodule.templates import update_templates
from xmodule.modulestore.xml_exporter import export_to_xml
from xmodule.modulestore.xml_importer import import_from_xml
-from xmodule.templates import update_templates
from xmodule.capa_module import CapaDescriptor
from xmodule.course_module import CourseDescriptor
@@ -81,8 +78,8 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase):
def test_static_tab_reordering(self):
import_from_xml(modulestore(), 'common/test/data/', ['full'])
- ms = modulestore('direct')
- course = ms.get_item(Location(['i4x', 'edX', 'full', 'course', '6.002_Spring_2012', None]))
+ module_store = modulestore('direct')
+ course = module_store.get_item(Location(['i4x', 'edX', 'full', 'course', '6.002_Spring_2012', None]))
# reverse the ordering
reverse_tabs = []
@@ -90,9 +87,9 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase):
if tab['type'] == 'static_tab':
reverse_tabs.insert(0, 'i4x://edX/full/static_tab/{0}'.format(tab['url_slug']))
- resp = self.client.post(reverse('reorder_static_tabs'), json.dumps({'tabs': reverse_tabs}), "application/json")
+ self.client.post(reverse('reorder_static_tabs'), json.dumps({'tabs': reverse_tabs}), "application/json")
- course = ms.get_item(Location(['i4x', 'edX', 'full', 'course', '6.002_Spring_2012', None]))
+ course = module_store.get_item(Location(['i4x', 'edX', 'full', 'course', '6.002_Spring_2012', None]))
# compare to make sure that the tabs information is in the expected order after the server call
course_tabs = []
@@ -105,28 +102,29 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase):
def test_delete(self):
import_from_xml(modulestore(), 'common/test/data/', ['full'])
- ms = modulestore('direct')
- course = ms.get_item(Location(['i4x', 'edX', 'full', 'course', '6.002_Spring_2012', None]))
+ module_store = modulestore('direct')
- sequential = ms.get_item(Location(['i4x', 'edX', 'full', 'sequential','Administrivia_and_Circuit_Elements', None]))
+ sequential = module_store.get_item(Location(['i4x', 'edX', 'full', 'sequential', 'Administrivia_and_Circuit_Elements', None]))
- chapter = ms.get_item(Location(['i4x', 'edX', 'full', 'chapter','Week_1', None]))
+ chapter = module_store.get_item(Location(['i4x', 'edX', 'full', 'chapter','Week_1', None]))
# make sure the parent no longer points to the child object which was deleted
self.assertTrue(sequential.location.url() in chapter.definition['children'])
- resp = self.client.post(reverse('delete_item'), json.dumps({'id': sequential.location.url(), 'delete_children':'true'}), "application/json")
+ self.client.post(reverse('delete_item'),
+ json.dumps({'id': sequential.location.url(), 'delete_children':'true'}),
+ "application/json")
- bFound = False
+ found = False
try:
- sequential = ms.get_item(Location(['i4x', 'edX', 'full', 'sequential','Administrivia_and_Circuit_Elements', None]))
- bFound = True
+ module_store.get_item(Location(['i4x', 'edX', 'full', 'sequential', 'Administrivia_and_Circuit_Elements', None]))
+ found = True
except ItemNotFoundError:
pass
- self.assertFalse(bFound)
+ self.assertFalse(found)
- chapter = ms.get_item(Location(['i4x', 'edX', 'full', 'chapter','Week_1', None]))
+ chapter = module_store.get_item(Location(['i4x', 'edX', 'full', 'chapter','Week_1', None]))
# make sure the parent no longer points to the child object which was deleted
self.assertFalse(sequential.location.url() in chapter.definition['children'])
@@ -139,22 +137,22 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase):
while there is a base definition in /about/effort.html
'''
import_from_xml(modulestore(), 'common/test/data/', ['full'])
- ms = modulestore('direct')
- effort = ms.get_item(Location(['i4x', 'edX', 'full', 'about', 'effort', None]))
+ module_store = modulestore('direct')
+ effort = module_store.get_item(Location(['i4x', 'edX', 'full', 'about', 'effort', None]))
self.assertEqual(effort.definition['data'], '6 hours')
# this one should be in a non-override folder
- effort = ms.get_item(Location(['i4x', 'edX', 'full', 'about', 'end_date', None]))
+ effort = module_store.get_item(Location(['i4x', 'edX', 'full', 'about', 'end_date', None]))
self.assertEqual(effort.definition['data'], 'TBD')
def test_remove_hide_progress_tab(self):
import_from_xml(modulestore(), 'common/test/data/', ['full'])
- ms = modulestore('direct')
- cs = contentstore()
+ module_store = modulestore('direct')
+ content_store = contentstore()
source_location = CourseDescriptor.id_to_location('edX/full/6.002_Spring_2012')
- course = ms.get_item(source_location)
+ course = module_store.get_item(source_location)
self.assertNotIn('hide_progress_tab', course.metadata)
def test_clone_course(self):
@@ -173,19 +171,19 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase):
data = parse_json(resp)
self.assertEqual(data['id'], 'i4x://MITx/999/course/Robot_Super_Course')
- ms = modulestore('direct')
- cs = contentstore()
+ module_store = modulestore('direct')
+ content_store = contentstore()
source_location = CourseDescriptor.id_to_location('edX/full/6.002_Spring_2012')
dest_location = CourseDescriptor.id_to_location('MITx/999/Robot_Super_Course')
- clone_course(ms, cs, source_location, dest_location)
+ clone_course(module_store, content_store, source_location, dest_location)
# now loop through all the units in the course and verify that the clone can render them, which
# means the objects are at least present
- items = ms.get_items(Location(['i4x', 'edX', 'full', 'vertical', None]))
+ items = module_store.get_items(Location(['i4x', 'edX', 'full', 'vertical', None]))
self.assertGreater(len(items), 0)
- clone_items = ms.get_items(Location(['i4x', 'MITx', '999', 'vertical', None]))
+ clone_items = module_store.get_items(Location(['i4x', 'MITx', '999', 'vertical', None]))
self.assertGreater(len(clone_items), 0)
for descriptor in items:
new_loc = descriptor.location._replace(org='MITx', course='999')
@@ -196,14 +194,14 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase):
def test_delete_course(self):
import_from_xml(modulestore(), 'common/test/data/', ['full'])
- ms = modulestore('direct')
- cs = contentstore()
+ module_store = modulestore('direct')
+ content_store = contentstore()
location = CourseDescriptor.id_to_location('edX/full/6.002_Spring_2012')
- delete_course(ms, cs, location, commit=True)
+ delete_course(module_store, content_store, location, commit=True)
- items = ms.get_items(Location(['i4x', 'edX', 'full', 'vertical', None]))
+ items = module_store.get_items(Location(['i4x', 'edX', 'full', 'vertical', None]))
self.assertEqual(len(items), 0)
def verify_content_existence(self, modulestore, root_dir, location, dirname, category_name, filename_suffix=''):
@@ -218,10 +216,10 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase):
self.assertTrue(fs.exists(item.location.name + filename_suffix))
def test_export_course(self):
- ms = modulestore('direct')
- cs = contentstore()
+ module_store = modulestore('direct')
+ content_store = contentstore()
- import_from_xml(ms, 'common/test/data/', ['full'])
+ import_from_xml(module_store, 'common/test/data/', ['full'])
location = CourseDescriptor.id_to_location('edX/full/6.002_Spring_2012')
root_dir = path(mkdtemp_clean())
@@ -229,43 +227,43 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase):
print 'Exporting to tempdir = {0}'.format(root_dir)
# export out to a tempdir
- export_to_xml(ms, cs, location, root_dir, 'test_export')
+ export_to_xml(module_store, content_store, location, root_dir, 'test_export')
# check for static tabs
- self.verify_content_existence(ms, root_dir, location, 'tabs', 'static_tab', '.html')
+ self.verify_content_existence(module_store, root_dir, location, 'tabs', 'static_tab', '.html')
# check for custom_tags
- self.verify_content_existence(ms, root_dir, location, 'info', 'course_info', '.html')
+ self.verify_content_existence(module_store, root_dir, location, 'info', 'course_info', '.html')
# check for custom_tags
- self.verify_content_existence(ms, root_dir, location, 'custom_tags', 'custom_tag_template')
+ self.verify_content_existence(module_store, root_dir, location, 'custom_tags', 'custom_tag_template')
# check for graiding_policy.json
fs = OSFS(root_dir / 'test_export/policies/6.002_Spring_2012')
self.assertTrue(fs.exists('grading_policy.json'))
- course = ms.get_item(location)
+ course = module_store.get_item(location)
# compare what's on disk compared to what we have in our course
- with fs.open('grading_policy.json','r') as grading_policy:
- on_disk = loads(grading_policy.read())
+ with fs.open('grading_policy.json', 'r') as grading_policy:
+ on_disk = loads(grading_policy.read())
self.assertEqual(on_disk, course.definition['data']['grading_policy'])
#check for policy.json
self.assertTrue(fs.exists('policy.json'))
# compare what's on disk to what we have in the course module
- with fs.open('policy.json','r') as course_policy:
+ with fs.open('policy.json', 'r') as course_policy:
on_disk = loads(course_policy.read())
self.assertIn('course/6.002_Spring_2012', on_disk)
self.assertEqual(on_disk['course/6.002_Spring_2012'], course.metadata)
# remove old course
- delete_course(ms, cs, location)
+ delete_course(module_store, content_store, location)
# reimport
- import_from_xml(ms, root_dir, ['test_export'])
+ import_from_xml(module_store, root_dir, ['test_export'])
- items = ms.get_items(Location(['i4x', 'edX', 'full', 'vertical', None]))
+ items = module_store.get_items(Location(['i4x', 'edX', 'full', 'vertical', None]))
self.assertGreater(len(items), 0)
for descriptor in items:
print "Checking {0}....".format(descriptor.location.url())
@@ -275,11 +273,11 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase):
shutil.rmtree(root_dir)
def test_course_handouts_rewrites(self):
- ms = modulestore('direct')
- cs = contentstore()
+ module_store = modulestore('direct')
+ content_store = contentstore()
# import a test course
- import_from_xml(ms, 'common/test/data/', ['full'])
+ import_from_xml(module_store, 'common/test/data/', ['full'])
handout_location = Location(['i4x', 'edX', 'full', 'course_info', 'handouts'])
@@ -294,32 +292,32 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase):
self.assertContains(resp, '/c4x/edX/full/asset/handouts_schematic_tutorial.pdf')
def test_export_course_with_unknown_metadata(self):
- ms = modulestore('direct')
- cs = contentstore()
+ module_store = modulestore('direct')
+ content_store = contentstore()
- import_from_xml(ms, 'common/test/data/', ['full'])
+ import_from_xml(module_store, 'common/test/data/', ['full'])
location = CourseDescriptor.id_to_location('edX/full/6.002_Spring_2012')
root_dir = path(mkdtemp_clean())
- course = ms.get_item(location)
+ course = module_store.get_item(location)
# add a bool piece of unknown metadata so we can verify we don't throw an exception
course.metadata['new_metadata'] = True
- ms.update_metadata(location, course.metadata)
+ module_store.update_metadata(location, course.metadata)
print 'Exporting to tempdir = {0}'.format(root_dir)
# export out to a tempdir
- bExported = False
+ exported = False
try:
- export_to_xml(ms, cs, location, root_dir, 'test_export')
- bExported = True
+ export_to_xml(module_store, content_store, location, root_dir, 'test_export')
+ exported = True
except Exception:
pass
- self.assertTrue(bExported)
+ self.assertTrue(exported)
class ContentStoreTest(ModuleStoreTestCase):
"""
@@ -458,7 +456,7 @@ class ContentStoreTest(ModuleStoreTestCase):
def test_capa_module(self):
"""Test that a problem treats markdown specially."""
- course = CourseFactory.create(org='MITx', course='999', display_name='Robot Super Course')
+ CourseFactory.create(org='MITx', course='999', display_name='Robot Super Course')
problem_data = {
'parent_location': 'i4x://MITx/999/course/Robot_Super_Course',
@@ -480,10 +478,10 @@ class ContentStoreTest(ModuleStoreTestCase):
def test_import_metadata_with_attempts_empty_string(self):
import_from_xml(modulestore(), 'common/test/data/', ['simple'])
- ms = modulestore('direct')
+ module_store = modulestore('direct')
did_load_item = False
try:
- ms.get_item(Location(['i4x', 'edX', 'simple', 'problem', 'ps01-simple', None]))
+ module_store.get_item(Location(['i4x', 'edX', 'simple', 'problem', 'ps01-simple', None]))
did_load_item = True
except ItemNotFoundError:
pass
@@ -494,10 +492,10 @@ class ContentStoreTest(ModuleStoreTestCase):
def test_metadata_inheritance(self):
import_from_xml(modulestore(), 'common/test/data/', ['full'])
- ms = modulestore('direct')
- course = ms.get_item(Location(['i4x', 'edX', 'full', 'course', '6.002_Spring_2012', None]))
+ module_store = modulestore('direct')
+ course = module_store.get_item(Location(['i4x', 'edX', 'full', 'course', '6.002_Spring_2012', None]))
- verticals = ms.get_items(['i4x', 'edX', 'full', 'vertical', None, None])
+ verticals = module_store.get_items(['i4x', 'edX', 'full', 'vertical', None, None])
# let's assert on the metadata_inheritance on an existing vertical
for vertical in verticals:
@@ -508,15 +506,15 @@ class ContentStoreTest(ModuleStoreTestCase):
new_component_location = Location('i4x', 'edX', 'full', 'html', 'new_component')
source_template_location = Location('i4x', 'edx', 'templates', 'html', 'Blank_HTML_Page')
-
+
# crate a new module and add it as a child to a vertical
- ms.clone_item(source_template_location, new_component_location)
+ module_store.clone_item(source_template_location, new_component_location)
parent = verticals[0]
- ms.update_children(parent.location, parent.definition.get('children', []) + [new_component_location.url()])
+ module_store.update_children(parent.location, parent.definition.get('children', []) + [new_component_location.url()])
# flush the cache
- ms.get_cached_metadata_inheritance_tree(new_component_location, -1)
- new_module = ms.get_item(new_component_location)
+ module_store.get_cached_metadata_inheritance_tree(new_component_location, -1)
+ new_module = module_store.get_item(new_component_location)
# check for grace period definition which should be defined at the course level
self.assertIn('graceperiod', new_module.metadata)
@@ -529,11 +527,11 @@ class ContentStoreTest(ModuleStoreTestCase):
# now let's define an override at the leaf node level
#
new_module.metadata['graceperiod'] = '1 day'
- ms.update_metadata(new_module.location, new_module.metadata)
+ module_store.update_metadata(new_module.location, new_module.metadata)
# flush the cache and refetch
- ms.get_cached_metadata_inheritance_tree(new_component_location, -1)
- new_module = ms.get_item(new_component_location)
+ module_store.get_cached_metadata_inheritance_tree(new_component_location, -1)
+ new_module = module_store.get_item(new_component_location)
self.assertIn('graceperiod', new_module.metadata)
self.assertEqual('1 day', new_module.metadata['graceperiod'])
@@ -542,15 +540,15 @@ class ContentStoreTest(ModuleStoreTestCase):
class TemplateTestCase(ModuleStoreTestCase):
def test_template_cleanup(self):
- ms = modulestore('direct')
+ module_store = modulestore('direct')
# insert a bogus template in the store
bogus_template_location = Location('i4x', 'edx', 'templates', 'html', 'bogus')
source_template_location = Location('i4x', 'edx', 'templates', 'html', 'Blank_HTML_Page')
-
- ms.clone_item(source_template_location, bogus_template_location)
- verify_create = ms.get_item(bogus_template_location)
+ module_store.clone_item(source_template_location, bogus_template_location)
+
+ verify_create = module_store.get_item(bogus_template_location)
self.assertIsNotNone(verify_create)
# now run cleanup
@@ -559,10 +557,9 @@ class TemplateTestCase(ModuleStoreTestCase):
# now try to find dangling template, it should not be in DB any longer
asserted = False
try:
- verify_create = ms.get_item(bogus_template_location)
+ verify_create = module_store.get_item(bogus_template_location)
except ItemNotFoundError:
asserted = True
- self.assertTrue(asserted)
-
+ self.assertTrue(asserted)
From 554cb752fa0981aa527d23a1c565b2308692d37f Mon Sep 17 00:00:00 2001
From: Vik Paruchuri
Date: Tue, 12 Mar 2013 12:20:11 -0400
Subject: [PATCH 20/22] Pep8 cleanup
---
.../xmodule/combined_open_ended_module.py | 12 +-
.../combined_open_ended_modulev1.py | 161 ++++++++-------
.../combined_open_ended_rubric.py | 145 ++++++++------
.../controller_query_service.py | 14 +-
.../grading_service_module.py | 13 +-
.../open_ended_image_submission.py | 10 +-
.../open_ended_module.py | 61 +++---
.../openendedchild.py | 43 ++--
.../peer_grading_service.py | 9 +-
.../self_assessment_module.py | 10 +-
.../xmodule/xmodule/peer_grading_module.py | 64 +++---
.../xmodule/tests/test_combined_open_ended.py | 183 +++++++++---------
.../xmodule/tests/test_self_assessment.py | 46 ++---
.../xmodule/tests/test_util_open_ended.py | 18 +-
.../open_ended_notifications.py | 24 ++-
.../open_ended_grading/staff_grading.py | 1 +
.../staff_grading_service.py | 65 ++++---
lms/djangoapps/open_ended_grading/tests.py | 49 +++--
lms/djangoapps/open_ended_grading/views.py | 61 +++---
19 files changed, 539 insertions(+), 450 deletions(-)
diff --git a/common/lib/xmodule/xmodule/combined_open_ended_module.py b/common/lib/xmodule/xmodule/combined_open_ended_module.py
index 659590b5b4..0cc69a4a24 100644
--- a/common/lib/xmodule/xmodule/combined_open_ended_module.py
+++ b/common/lib/xmodule/xmodule/combined_open_ended_module.py
@@ -10,7 +10,6 @@ from xmodule.open_ended_grading_classes.combined_open_ended_modulev1 import Comb
log = logging.getLogger("mitx.courseware")
-
VERSION_TUPLES = (
('1', CombinedOpenEndedV1Descriptor, CombinedOpenEndedV1Module),
)
@@ -18,6 +17,7 @@ VERSION_TUPLES = (
DEFAULT_VERSION = 1
DEFAULT_VERSION = str(DEFAULT_VERSION)
+
class CombinedOpenEndedModule(XModule):
"""
This is a module that encapsulates all open ended grading (self assessment, peer assessment, etc).
@@ -60,7 +60,7 @@ class CombinedOpenEndedModule(XModule):
def __init__(self, system, location, definition, descriptor,
instance_state=None, shared_state=None, **kwargs):
XModule.__init__(self, system, location, definition, descriptor,
- instance_state, shared_state, **kwargs)
+ instance_state, shared_state, **kwargs)
"""
Definition file should have one or many task blocks, a rubric block, and a prompt block:
@@ -129,13 +129,15 @@ class CombinedOpenEndedModule(XModule):
version_index = versions.index(self.version)
static_data = {
- 'rewrite_content_links' : self.rewrite_content_links,
+ 'rewrite_content_links': self.rewrite_content_links,
}
self.child_descriptor = descriptors[version_index](self.system)
- self.child_definition = descriptors[version_index].definition_from_xml(etree.fromstring(definition['data']), self.system)
+ self.child_definition = descriptors[version_index].definition_from_xml(etree.fromstring(definition['data']),
+ self.system)
self.child_module = modules[version_index](self.system, location, self.child_definition, self.child_descriptor,
- instance_state = json.dumps(instance_state), metadata = self.metadata, static_data= static_data)
+ instance_state=json.dumps(instance_state), metadata=self.metadata,
+ static_data=static_data)
def get_html(self):
return self.child_module.get_html()
diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py
index 5c3bfa5b2a..20cedaab75 100644
--- a/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py
+++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py
@@ -40,14 +40,15 @@ ACCEPT_FILE_UPLOAD = False
TRUE_DICT = ["True", True, "TRUE", "true"]
HUMAN_TASK_TYPE = {
- 'selfassessment' : "Self Assessment",
- 'openended' : "edX Assessment",
- }
+ 'selfassessment': "Self Assessment",
+ 'openended': "edX Assessment",
+}
#Default value that controls whether or not to skip basic spelling checks in the controller
#Metadata overrides this
SKIP_BASIC_CHECKS = False
+
class CombinedOpenEndedV1Module():
"""
This is a module that encapsulates all open ended grading (self assessment, peer assessment, etc).
@@ -83,7 +84,7 @@ class CombinedOpenEndedV1Module():
TEMPLATE_DIR = "combinedopenended"
def __init__(self, system, location, definition, descriptor,
- instance_state=None, shared_state=None, metadata = None, static_data = None, **kwargs):
+ instance_state=None, shared_state=None, metadata=None, static_data=None, **kwargs):
"""
Definition file should have one or many task blocks, a rubric block, and a prompt block:
@@ -122,7 +123,7 @@ class CombinedOpenEndedV1Module():
self.metadata = metadata
self.display_name = metadata.get('display_name', "Open Ended")
- self.rewrite_content_links = static_data.get('rewrite_content_links',"")
+ self.rewrite_content_links = static_data.get('rewrite_content_links', "")
# Load instance state
@@ -152,10 +153,10 @@ class CombinedOpenEndedV1Module():
self.skip_basic_checks = self.metadata.get('skip_spelling_checks', SKIP_BASIC_CHECKS)
display_due_date_string = self.metadata.get('due', None)
-
+
grace_period_string = self.metadata.get('graceperiod', None)
try:
- self.timeinfo = TimeInfo(display_due_date_string, grace_period_string)
+ self.timeinfo = TimeInfo(display_due_date_string, grace_period_string)
except:
log.error("Error parsing due date information in location {0}".format(location))
raise
@@ -177,10 +178,10 @@ class CombinedOpenEndedV1Module():
'rubric': definition['rubric'],
'display_name': self.display_name,
'accept_file_upload': self.accept_file_upload,
- 'close_date' : self.timeinfo.close_date,
- 's3_interface' : self.system.s3_interface,
- 'skip_basic_checks' : self.skip_basic_checks,
- }
+ 'close_date': self.timeinfo.close_date,
+ 's3_interface': self.system.s3_interface,
+ 'skip_basic_checks': self.skip_basic_checks,
+ }
self.task_xml = definition['task_xml']
self.location = location
@@ -223,15 +224,15 @@ class CombinedOpenEndedV1Module():
child_modules = {
'openended': open_ended_module.OpenEndedModule,
'selfassessment': self_assessment_module.SelfAssessmentModule,
- }
+ }
child_descriptors = {
'openended': open_ended_module.OpenEndedDescriptor,
'selfassessment': self_assessment_module.SelfAssessmentDescriptor,
- }
+ }
children = {
'modules': child_modules,
'descriptors': child_descriptors,
- }
+ }
return children
def setup_next_task(self, reset=False):
@@ -267,7 +268,8 @@ class CombinedOpenEndedV1Module():
self.current_task_parsed_xml = self.current_task_descriptor.definition_from_xml(etree_xml, self.system)
if current_task_state is None and self.current_task_number == 0:
self.current_task = child_task_module(self.system, self.location,
- self.current_task_parsed_xml, self.current_task_descriptor, self.static_data)
+ self.current_task_parsed_xml, self.current_task_descriptor,
+ self.static_data)
self.task_states.append(self.current_task.get_instance_state())
self.state = self.ASSESSING
elif current_task_state is None and self.current_task_number > 0:
@@ -280,18 +282,20 @@ class CombinedOpenEndedV1Module():
'attempts': 0,
'created': True,
'history': [{'answer': last_response}],
- })
+ })
self.current_task = child_task_module(self.system, self.location,
- self.current_task_parsed_xml, self.current_task_descriptor, self.static_data,
- instance_state=current_task_state)
+ self.current_task_parsed_xml, self.current_task_descriptor,
+ self.static_data,
+ instance_state=current_task_state)
self.task_states.append(self.current_task.get_instance_state())
self.state = self.ASSESSING
else:
if self.current_task_number > 0 and not reset:
current_task_state = self.overwrite_state(current_task_state)
self.current_task = child_task_module(self.system, self.location,
- self.current_task_parsed_xml, self.current_task_descriptor, self.static_data,
- instance_state=current_task_state)
+ self.current_task_parsed_xml, self.current_task_descriptor,
+ self.static_data,
+ instance_state=current_task_state)
return True
@@ -307,8 +311,8 @@ class CombinedOpenEndedV1Module():
last_response_data = self.get_last_response(self.current_task_number - 1)
current_response_data = self.get_current_attributes(self.current_task_number)
- if(current_response_data['min_score_to_attempt'] > last_response_data['score']
- or current_response_data['max_score_to_attempt'] < last_response_data['score']):
+ if (current_response_data['min_score_to_attempt'] > last_response_data['score']
+ or current_response_data['max_score_to_attempt'] < last_response_data['score']):
self.state = self.DONE
self.allow_reset = True
@@ -334,8 +338,8 @@ class CombinedOpenEndedV1Module():
'display_name': self.display_name,
'accept_file_upload': self.accept_file_upload,
'location': self.location,
- 'legend_list' : LEGEND_LIST,
- }
+ 'legend_list': LEGEND_LIST,
+ }
return context
@@ -404,7 +408,7 @@ class CombinedOpenEndedV1Module():
task_parsed_xml = task_descriptor.definition_from_xml(etree_xml, self.system)
task = children['modules'][task_type](self.system, self.location, task_parsed_xml, task_descriptor,
- self.static_data, instance_state=task_state)
+ self.static_data, instance_state=task_state)
last_response = task.latest_answer()
last_score = task.latest_score()
last_post_assessment = task.latest_post_assessment(self.system)
@@ -426,10 +430,10 @@ class CombinedOpenEndedV1Module():
rubric_scores = rubric_data['rubric_scores']
grader_types = rubric_data['grader_types']
feedback_items = rubric_data['feedback_items']
- feedback_dicts = rubric_data['feedback_dicts']
+ feedback_dicts = rubric_data['feedback_dicts']
grader_ids = rubric_data['grader_ids']
- submission_ids = rubric_data['submission_ids']
- elif task_type== "selfassessment":
+ submission_ids = rubric_data['submission_ids']
+ elif task_type == "selfassessment":
rubric_scores = last_post_assessment
grader_types = ['SA']
feedback_items = ['']
@@ -446,7 +450,7 @@ class CombinedOpenEndedV1Module():
human_state = task.HUMAN_NAMES[state]
else:
human_state = state
- if len(grader_types)>0:
+ if len(grader_types) > 0:
grader_type = grader_types[0]
else:
grader_type = "IN"
@@ -468,15 +472,15 @@ class CombinedOpenEndedV1Module():
'correct': last_correctness,
'min_score_to_attempt': min_score_to_attempt,
'max_score_to_attempt': max_score_to_attempt,
- 'rubric_scores' : rubric_scores,
- 'grader_types' : grader_types,
- 'feedback_items' : feedback_items,
- 'grader_type' : grader_type,
- 'human_grader_type' : human_grader_name,
- 'feedback_dicts' : feedback_dicts,
- 'grader_ids' : grader_ids,
- 'submission_ids' : submission_ids,
- }
+ 'rubric_scores': rubric_scores,
+ 'grader_types': grader_types,
+ 'feedback_items': feedback_items,
+ 'grader_type': grader_type,
+ 'human_grader_type': human_grader_name,
+ 'feedback_dicts': feedback_dicts,
+ 'grader_ids': grader_ids,
+ 'submission_ids': submission_ids,
+ }
return last_response_dict
def update_task_states(self):
@@ -519,20 +523,27 @@ class CombinedOpenEndedV1Module():
Output: Dictionary to be rendered via ajax that contains the result html.
"""
all_responses = []
- loop_up_to_task = self.current_task_number+1
- for i in xrange(0,loop_up_to_task):
+ loop_up_to_task = self.current_task_number + 1
+ for i in xrange(0, loop_up_to_task):
all_responses.append(self.get_last_response(i))
- rubric_scores = [all_responses[i]['rubric_scores'] for i in xrange(0,len(all_responses)) if len(all_responses[i]['rubric_scores'])>0 and all_responses[i]['grader_types'][0] in HUMAN_GRADER_TYPE.keys()]
- grader_types = [all_responses[i]['grader_types'] for i in xrange(0,len(all_responses)) if len(all_responses[i]['grader_types'])>0 and all_responses[i]['grader_types'][0] in HUMAN_GRADER_TYPE.keys()]
- feedback_items = [all_responses[i]['feedback_items'] for i in xrange(0,len(all_responses)) if len(all_responses[i]['feedback_items'])>0 and all_responses[i]['grader_types'][0] in HUMAN_GRADER_TYPE.keys()]
- rubric_html = self.rubric_renderer.render_combined_rubric(stringify_children(self.static_data['rubric']), rubric_scores,
- grader_types, feedback_items)
+ rubric_scores = [all_responses[i]['rubric_scores'] for i in xrange(0, len(all_responses)) if
+ len(all_responses[i]['rubric_scores']) > 0 and all_responses[i]['grader_types'][
+ 0] in HUMAN_GRADER_TYPE.keys()]
+ grader_types = [all_responses[i]['grader_types'] for i in xrange(0, len(all_responses)) if
+ len(all_responses[i]['grader_types']) > 0 and all_responses[i]['grader_types'][
+ 0] in HUMAN_GRADER_TYPE.keys()]
+ feedback_items = [all_responses[i]['feedback_items'] for i in xrange(0, len(all_responses)) if
+ len(all_responses[i]['feedback_items']) > 0 and all_responses[i]['grader_types'][
+ 0] in HUMAN_GRADER_TYPE.keys()]
+ rubric_html = self.rubric_renderer.render_combined_rubric(stringify_children(self.static_data['rubric']),
+ rubric_scores,
+ grader_types, feedback_items)
response_dict = all_responses[-1]
context = {
'results': rubric_html,
- 'task_name' : 'Scored Rubric',
- 'class_name' : 'combined-rubric-container'
+ 'task_name': 'Scored Rubric',
+ 'class_name': 'combined-rubric-container'
}
html = self.system.render_template('{0}/combined_open_ended_results.html'.format(self.TEMPLATE_DIR), context)
return {'html': html, 'success': True}
@@ -544,8 +555,8 @@ class CombinedOpenEndedV1Module():
Output: Dictionary to be rendered via ajax that contains the result html.
"""
context = {
- 'legend_list' : LEGEND_LIST,
- }
+ 'legend_list': LEGEND_LIST,
+ }
html = self.system.render_template('{0}/combined_open_ended_legend.html'.format(self.TEMPLATE_DIR), context)
return {'html': html, 'success': True}
@@ -556,15 +567,16 @@ class CombinedOpenEndedV1Module():
Output: Dictionary to be rendered via ajax that contains the result html.
"""
self.update_task_states()
- loop_up_to_task = self.current_task_number+1
- all_responses =[]
- for i in xrange(0,loop_up_to_task):
+ loop_up_to_task = self.current_task_number + 1
+ all_responses = []
+ for i in xrange(0, loop_up_to_task):
all_responses.append(self.get_last_response(i))
context_list = []
for ri in all_responses:
- for i in xrange(0,len(ri['rubric_scores'])):
- feedback = ri['feedback_dicts'][i].get('feedback','')
- rubric_data = self.rubric_renderer.render_rubric(stringify_children(self.static_data['rubric']), ri['rubric_scores'][i])
+ for i in xrange(0, len(ri['rubric_scores'])):
+ feedback = ri['feedback_dicts'][i].get('feedback', '')
+ rubric_data = self.rubric_renderer.render_rubric(stringify_children(self.static_data['rubric']),
+ ri['rubric_scores'][i])
if rubric_data['success']:
rubric_html = rubric_data['html']
else:
@@ -572,23 +584,23 @@ class CombinedOpenEndedV1Module():
context = {
'rubric_html': rubric_html,
'grader_type': ri['grader_type'],
- 'feedback' : feedback,
- 'grader_id' : ri['grader_ids'][i],
- 'submission_id' : ri['submission_ids'][i],
+ 'feedback': feedback,
+ 'grader_id': ri['grader_ids'][i],
+ 'submission_id': ri['submission_ids'][i],
}
context_list.append(context)
feedback_table = self.system.render_template('{0}/open_ended_result_table.html'.format(self.TEMPLATE_DIR), {
- 'context_list' : context_list,
- 'grader_type_image_dict' : GRADER_TYPE_IMAGE_DICT,
- 'human_grader_types' : HUMAN_GRADER_TYPE,
+ 'context_list': context_list,
+ 'grader_type_image_dict': GRADER_TYPE_IMAGE_DICT,
+ 'human_grader_types': HUMAN_GRADER_TYPE,
'rows': 50,
'cols': 50,
})
context = {
'results': feedback_table,
- 'task_name' : "Feedback",
- 'class_name' : "result-container",
- }
+ 'task_name': "Feedback",
+ 'class_name': "result-container",
+ }
html = self.system.render_template('{0}/combined_open_ended_results.html'.format(self.TEMPLATE_DIR), context)
return {'html': html, 'success': True}
@@ -617,8 +629,8 @@ class CombinedOpenEndedV1Module():
'reset': self.reset,
'get_results': self.get_results,
'get_combined_rubric': self.get_rubric,
- 'get_status' : self.get_status_ajax,
- 'get_legend' : self.get_legend,
+ 'get_status': self.get_status_ajax,
+ 'get_legend': self.get_legend,
}
if dispatch not in handlers:
@@ -681,7 +693,7 @@ class CombinedOpenEndedV1Module():
'task_states': self.task_states,
'attempts': self.attempts,
'ready_to_reset': self.allow_reset,
- }
+ }
return json.dumps(state)
@@ -699,11 +711,12 @@ class CombinedOpenEndedV1Module():
context = {
'status_list': status,
- 'grader_type_image_dict' : GRADER_TYPE_IMAGE_DICT,
- 'legend_list' : LEGEND_LIST,
- 'render_via_ajax' : render_via_ajax,
+ 'grader_type_image_dict': GRADER_TYPE_IMAGE_DICT,
+ 'legend_list': LEGEND_LIST,
+ 'render_via_ajax': render_via_ajax,
}
- status_html = self.system.render_template("{0}/combined_open_ended_status.html".format(self.TEMPLATE_DIR), context)
+ status_html = self.system.render_template("{0}/combined_open_ended_status.html".format(self.TEMPLATE_DIR),
+ context)
return status_html
@@ -736,7 +749,7 @@ class CombinedOpenEndedV1Module():
score_dict = {
'score': score,
'total': max_score,
- }
+ }
return score_dict
@@ -793,7 +806,9 @@ class CombinedOpenEndedV1Descriptor(XmlDescriptor, EditingDescriptor):
for child in expected_children:
if len(xml_object.xpath(child)) == 0:
#This is a staff_facing_error
- raise ValueError("Combined Open Ended definition must include at least one '{0}' tag. Contact the learning sciences group for assistance.".format(child))
+ raise ValueError(
+ "Combined Open Ended definition must include at least one '{0}' tag. Contact the learning sciences group for assistance.".format(
+ child))
def parse_task(k):
"""Assumes that xml_object has child k"""
diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_rubric.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_rubric.py
index 8d1bd376fb..f4ea7648a1 100644
--- a/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_rubric.py
+++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_rubric.py
@@ -4,24 +4,26 @@ from lxml import etree
log = logging.getLogger(__name__)
GRADER_TYPE_IMAGE_DICT = {
- 'SA' : '/static/images/self_assessment_icon.png',
- 'PE' : '/static/images/peer_grading_icon.png',
- 'ML' : '/static/images/ml_grading_icon.png',
- 'IN' : '/static/images/peer_grading_icon.png',
- 'BC' : '/static/images/ml_grading_icon.png',
- }
+ 'SA': '/static/images/self_assessment_icon.png',
+ 'PE': '/static/images/peer_grading_icon.png',
+ 'ML': '/static/images/ml_grading_icon.png',
+ 'IN': '/static/images/peer_grading_icon.png',
+ 'BC': '/static/images/ml_grading_icon.png',
+}
HUMAN_GRADER_TYPE = {
- 'SA' : 'Self-Assessment',
- 'PE' : 'Peer-Assessment',
- 'IN' : 'Instructor-Assessment',
- 'ML' : 'AI-Assessment',
- 'BC' : 'AI-Assessment',
- }
+ 'SA': 'Self-Assessment',
+ 'PE': 'Peer-Assessment',
+ 'IN': 'Instructor-Assessment',
+ 'ML': 'AI-Assessment',
+ 'BC': 'AI-Assessment',
+}
DO_NOT_DISPLAY = ['BC', 'IN']
-LEGEND_LIST = [{'name' : HUMAN_GRADER_TYPE[k], 'image' : GRADER_TYPE_IMAGE_DICT[k]} for k in GRADER_TYPE_IMAGE_DICT.keys() if k not in DO_NOT_DISPLAY ]
+LEGEND_LIST = [{'name': HUMAN_GRADER_TYPE[k], 'image': GRADER_TYPE_IMAGE_DICT[k]} for k in GRADER_TYPE_IMAGE_DICT.keys()
+ if k not in DO_NOT_DISPLAY]
+
class RubricParsingError(Exception):
def __init__(self, msg):
@@ -29,15 +31,14 @@ class RubricParsingError(Exception):
class CombinedOpenEndedRubric(object):
-
TEMPLATE_DIR = "combinedopenended/openended"
- def __init__ (self, system, view_only = False):
+ def __init__(self, system, view_only=False):
self.has_score = False
self.view_only = view_only
self.system = system
- def render_rubric(self, rubric_xml, score_list = None):
+ def render_rubric(self, rubric_xml, score_list=None):
'''
render_rubric: takes in an xml string and outputs the corresponding
html for that xml, given the type of rubric we're generating
@@ -50,11 +51,11 @@ class CombinedOpenEndedRubric(object):
success = False
try:
rubric_categories = self.extract_categories(rubric_xml)
- if score_list and len(score_list)==len(rubric_categories):
- for i in xrange(0,len(rubric_categories)):
+ if score_list and len(score_list) == len(rubric_categories):
+ for i in xrange(0, len(rubric_categories)):
category = rubric_categories[i]
- for j in xrange(0,len(category['options'])):
- if score_list[i]==j:
+ for j in xrange(0, len(category['options'])):
+ if score_list[i] == j:
rubric_categories[i]['options'][j]['selected'] = True
rubric_scores = [cat['score'] for cat in rubric_categories]
max_scores = map((lambda cat: cat['options'][-1]['points']), rubric_categories)
@@ -63,19 +64,20 @@ class CombinedOpenEndedRubric(object):
if self.view_only:
rubric_template = '{0}/open_ended_view_only_rubric.html'.format(self.TEMPLATE_DIR)
html = self.system.render_template(rubric_template,
- {'categories': rubric_categories,
- 'has_score': self.has_score,
- 'view_only': self.view_only,
- 'max_score': max_score,
- 'combined_rubric' : False
- })
+ {'categories': rubric_categories,
+ 'has_score': self.has_score,
+ 'view_only': self.view_only,
+ 'max_score': max_score,
+ 'combined_rubric': False
+ })
success = True
except:
#This is a staff_facing_error
- error_message = "[render_rubric] Could not parse the rubric with xml: {0}. Contact the learning sciences group for assistance.".format(rubric_xml)
+ error_message = "[render_rubric] Could not parse the rubric with xml: {0}. Contact the learning sciences group for assistance.".format(
+ rubric_xml)
log.exception(error_message)
raise RubricParsingError(error_message)
- return {'success' : success, 'html' : html, 'rubric_scores' : rubric_scores}
+ return {'success': success, 'html': html, 'rubric_scores': rubric_scores}
def check_if_rubric_is_parseable(self, rubric_string, location, max_score_allowed, max_score):
rubric_dict = self.render_rubric(rubric_string)
@@ -83,7 +85,8 @@ class CombinedOpenEndedRubric(object):
rubric_feedback = rubric_dict['html']
if not success:
#This is a staff_facing_error
- error_message = "Could not parse rubric : {0} for location {1}. Contact the learning sciences group for assistance.".format(rubric_string, location.url())
+ error_message = "Could not parse rubric : {0} for location {1}. Contact the learning sciences group for assistance.".format(
+ rubric_string, location.url())
log.error(error_message)
raise RubricParsingError(error_message)
@@ -101,7 +104,7 @@ class CombinedOpenEndedRubric(object):
if total != max_score:
#This is a staff_facing_error
error_msg = "The max score {0} for problem {1} does not match the total number of points in the rubric {2}. Contact the learning sciences group for assistance.".format(
- max_score, location, total)
+ max_score, location, total)
log.error(error_msg)
raise RubricParsingError(error_msg)
@@ -123,7 +126,9 @@ class CombinedOpenEndedRubric(object):
for category in element:
if category.tag != 'category':
#This is a staff_facing_error
- raise RubricParsingError("[extract_categories] Expected a tag: got {0} instead. Contact the learning sciences group for assistance.".format(category.tag))
+ raise RubricParsingError(
+ "[extract_categories] Expected a tag: got {0} instead. Contact the learning sciences group for assistance.".format(
+ category.tag))
else:
categories.append(self.extract_category(category))
return categories
@@ -150,13 +155,17 @@ class CombinedOpenEndedRubric(object):
# if we are missing the score tag and we are expecting one
elif self.has_score:
#This is a staff_facing_error
- raise RubricParsingError("[extract_category] Category {0} is missing a score. Contact the learning sciences group for assistance.".format(descriptionxml.text))
+ raise RubricParsingError(
+ "[extract_category] Category {0} is missing a score. Contact the learning sciences group for assistance.".format(
+ descriptionxml.text))
# parse description
if descriptionxml.tag != 'description':
#This is a staff_facing_error
- raise RubricParsingError("[extract_category]: expected description tag, got {0} instead. Contact the learning sciences group for assistance.".format(descriptionxml.tag))
+ raise RubricParsingError(
+ "[extract_category]: expected description tag, got {0} instead. Contact the learning sciences group for assistance.".format(
+ descriptionxml.tag))
description = descriptionxml.text
@@ -167,7 +176,9 @@ class CombinedOpenEndedRubric(object):
for option in optionsxml:
if option.tag != 'option':
#This is a staff_facing_error
- raise RubricParsingError("[extract_category]: expected option tag, got {0} instead. Contact the learning sciences group for assistance.".format(option.tag))
+ raise RubricParsingError(
+ "[extract_category]: expected option tag, got {0} instead. Contact the learning sciences group for assistance.".format(
+ option.tag))
else:
pointstr = option.get("points")
if pointstr:
@@ -177,13 +188,16 @@ class CombinedOpenEndedRubric(object):
points = int(pointstr)
except ValueError:
#This is a staff_facing_error
- raise RubricParsingError("[extract_category]: expected points to have int, got {0} instead. Contact the learning sciences group for assistance.".format(pointstr))
+ raise RubricParsingError(
+ "[extract_category]: expected points to have int, got {0} instead. Contact the learning sciences group for assistance.".format(
+ pointstr))
elif autonumbering:
# use the generated one if we're in the right mode
points = cur_points
cur_points = cur_points + 1
else:
- raise Exception("[extract_category]: missing points attribute. Cannot continue to auto-create points values after a points value is explicitly defined.")
+ raise Exception(
+ "[extract_category]: missing points attribute. Cannot continue to auto-create points values after a points value is explicitly defined.")
selected = score == points
optiontext = option.text
@@ -193,31 +207,32 @@ class CombinedOpenEndedRubric(object):
options = sorted(options, key=lambda option: option['points'])
CombinedOpenEndedRubric.validate_options(options)
- return {'description': description, 'options': options, 'score' : score}
+ return {'description': description, 'options': options, 'score': score}
- def render_combined_rubric(self,rubric_xml,scores,score_types,feedback_types):
- success, score_tuples = CombinedOpenEndedRubric.reformat_scores_for_rendering(scores,score_types,feedback_types)
+ def render_combined_rubric(self, rubric_xml, scores, score_types, feedback_types):
+ success, score_tuples = CombinedOpenEndedRubric.reformat_scores_for_rendering(scores, score_types,
+ feedback_types)
rubric_categories = self.extract_categories(rubric_xml)
max_scores = map((lambda cat: cat['options'][-1]['points']), rubric_categories)
max_score = max(max_scores)
- for i in xrange(0,len(rubric_categories)):
+ for i in xrange(0, len(rubric_categories)):
category = rubric_categories[i]
- for j in xrange(0,len(category['options'])):
+ for j in xrange(0, len(category['options'])):
rubric_categories[i]['options'][j]['grader_types'] = []
for tuple in score_tuples:
- if tuple[1] == i and tuple[2] ==j:
+ if tuple[1] == i and tuple[2] == j:
for grader_type in tuple[3]:
rubric_categories[i]['options'][j]['grader_types'].append(grader_type)
html = self.system.render_template('{0}/open_ended_combined_rubric.html'.format(self.TEMPLATE_DIR),
- {'categories': rubric_categories,
- 'has_score': True,
- 'view_only': True,
- 'max_score': max_score,
- 'combined_rubric' : True,
- 'grader_type_image_dict' : GRADER_TYPE_IMAGE_DICT,
- 'human_grader_types' : HUMAN_GRADER_TYPE,
- })
+ {'categories': rubric_categories,
+ 'has_score': True,
+ 'view_only': True,
+ 'max_score': max_score,
+ 'combined_rubric': True,
+ 'grader_type_image_dict': GRADER_TYPE_IMAGE_DICT,
+ 'human_grader_types': HUMAN_GRADER_TYPE,
+ })
return html
@@ -228,14 +243,16 @@ class CombinedOpenEndedRubric(object):
'''
if len(options) == 0:
#This is a staff_facing_error
- raise RubricParsingError("[extract_category]: no options associated with this category. Contact the learning sciences group for assistance.")
+ raise RubricParsingError(
+ "[extract_category]: no options associated with this category. Contact the learning sciences group for assistance.")
if len(options) == 1:
return
prev = options[0]['points']
for option in options[1:]:
if prev == option['points']:
#This is a staff_facing_error
- raise RubricParsingError("[extract_category]: found duplicate point values between two different options. Contact the learning sciences group for assistance.")
+ raise RubricParsingError(
+ "[extract_category]: found duplicate point values between two different options. Contact the learning sciences group for assistance.")
else:
prev = option['points']
@@ -250,7 +267,7 @@ class CombinedOpenEndedRubric(object):
@return:
"""
success = False
- if len(scores)==0:
+ if len(scores) == 0:
#This is a dev_facing_error
log.error("Score length is 0 when trying to reformat rubric scores for rendering.")
return success, ""
@@ -264,25 +281,25 @@ class CombinedOpenEndedRubric(object):
score_lists = []
score_type_list = []
feedback_type_list = []
- for i in xrange(0,len(scores)):
+ for i in xrange(0, len(scores)):
score_cont_list = scores[i]
- for j in xrange(0,len(score_cont_list)):
+ for j in xrange(0, len(score_cont_list)):
score_list = score_cont_list[j]
score_lists.append(score_list)
score_type_list.append(score_types[i][j])
feedback_type_list.append(feedback_types[i][j])
score_list_len = len(score_lists[0])
- for i in xrange(0,len(score_lists)):
+ for i in xrange(0, len(score_lists)):
score_list = score_lists[i]
- if len(score_list)!=score_list_len:
+ if len(score_list) != score_list_len:
return success, ""
score_tuples = []
- for i in xrange(0,len(score_lists)):
- for j in xrange(0,len(score_lists[i])):
- tuple = [1,j,score_lists[i][j],[],[]]
- score_tuples, tup_ind = CombinedOpenEndedRubric.check_for_tuple_matches(score_tuples,tuple)
+ for i in xrange(0, len(score_lists)):
+ for j in xrange(0, len(score_lists[i])):
+ tuple = [1, j, score_lists[i][j], [], []]
+ score_tuples, tup_ind = CombinedOpenEndedRubric.check_for_tuple_matches(score_tuples, tuple)
score_tuples[tup_ind][0] += 1
score_tuples[tup_ind][3].append(score_type_list[i])
score_tuples[tup_ind][4].append(feedback_type_list[i])
@@ -302,14 +319,14 @@ class CombinedOpenEndedRubric(object):
category = tuple[1]
score = tuple[2]
tup_ind = -1
- for t in xrange(0,len(tuples)):
+ for t in xrange(0, len(tuples)):
if tuples[t][1] == category and tuples[t][2] == score:
tup_ind = t
break
if tup_ind == -1:
- tuples.append([0,category,score,[],[]])
- tup_ind = len(tuples)-1
+ tuples.append([0, category, score, [], []])
+ tup_ind = len(tuples) - 1
return tuples, tup_ind
diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/controller_query_service.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/controller_query_service.py
index 1dd5c57ad4..21715c8e57 100644
--- a/common/lib/xmodule/xmodule/open_ended_grading_classes/controller_query_service.py
+++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/controller_query_service.py
@@ -8,6 +8,7 @@ class ControllerQueryService(GradingService):
"""
Interface to staff grading backend.
"""
+
def __init__(self, config, system):
config['system'] = system
super(ControllerQueryService, self).__init__(config)
@@ -59,7 +60,7 @@ class ControllerQueryService(GradingService):
def get_flagged_problem_list(self, course_id):
params = {
'course_id': course_id,
- }
+ }
response = self.get(self.flagged_problem_list_url, params)
return response
@@ -70,20 +71,21 @@ class ControllerQueryService(GradingService):
'student_id': student_id,
'submission_id': submission_id,
'action_type': action_type
- }
+ }
response = self.post(self.take_action_on_flags_url, params)
return response
+
def convert_seconds_to_human_readable(seconds):
if seconds < 60:
human_string = "{0} seconds".format(seconds)
elif seconds < 60 * 60:
- human_string = "{0} minutes".format(round(seconds/60,1))
- elif seconds < (24*60*60):
- human_string = "{0} hours".format(round(seconds/(60*60),1))
+ human_string = "{0} minutes".format(round(seconds / 60, 1))
+ elif seconds < (24 * 60 * 60):
+ human_string = "{0} hours".format(round(seconds / (60 * 60), 1))
else:
- human_string = "{0} days".format(round(seconds/(60*60*24),1))
+ human_string = "{0} days".format(round(seconds / (60 * 60 * 24), 1))
eta_string = "{0}".format(human_string)
return eta_string
diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/grading_service_module.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/grading_service_module.py
index 8a4caa1291..0f961794d5 100644
--- a/common/lib/xmodule/xmodule/open_ended_grading_classes/grading_service_module.py
+++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/grading_service_module.py
@@ -19,6 +19,7 @@ class GradingService(object):
"""
Interface to staff grading backend.
"""
+
def __init__(self, config):
self.username = config['username']
self.password = config['password']
@@ -34,8 +35,8 @@ class GradingService(object):
Returns the decoded json dict of the response.
"""
response = self.session.post(self.login_url,
- {'username': self.username,
- 'password': self.password, })
+ {'username': self.username,
+ 'password': self.password, })
response.raise_for_status()
@@ -47,7 +48,7 @@ class GradingService(object):
"""
try:
op = lambda: self.session.post(url, data=data,
- allow_redirects=allow_redirects)
+ allow_redirects=allow_redirects)
r = self._try_with_login(op)
except (RequestException, ConnectionError, HTTPError) as err:
# reraise as promised GradingServiceError, but preserve stacktrace.
@@ -63,8 +64,8 @@ class GradingService(object):
"""
log.debug(params)
op = lambda: self.session.get(url,
- allow_redirects=allow_redirects,
- params=params)
+ allow_redirects=allow_redirects,
+ params=params)
try:
r = self._try_with_login(op)
except (RequestException, ConnectionError, HTTPError) as err:
@@ -92,7 +93,7 @@ class GradingService(object):
r = self._login()
if r and not r.get('success'):
log.warning("Couldn't log into staff_grading backend. Response: %s",
- r)
+ r)
# try again
response = operation()
response.raise_for_status()
diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/open_ended_image_submission.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/open_ended_image_submission.py
index edae69854f..6956f336a5 100644
--- a/common/lib/xmodule/xmodule/open_ended_grading_classes/open_ended_image_submission.py
+++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/open_ended_image_submission.py
@@ -5,6 +5,7 @@ to send them to S3.
try:
from PIL import Image
+
ENABLE_PIL = True
except:
ENABLE_PIL = False
@@ -51,6 +52,7 @@ class ImageProperties(object):
"""
Class to check properties of an image and to validate if they are allowed.
"""
+
def __init__(self, image_data):
"""
Initializes class variables
@@ -92,7 +94,7 @@ class ImageProperties(object):
g = rgb[1]
b = rgb[2]
check_r = (r > 60)
- check_g = (r * 0.4) < g < (r * 0.85)
+ check_g = (r * 0.4) < g < (r * 0.85)
check_b = (r * 0.2) < b < (r * 0.7)
colors_okay = check_r and check_b and check_g
except:
@@ -141,6 +143,7 @@ class URLProperties(object):
Checks to see if a URL points to acceptable content. Added to check if students are submitting reasonable
links to the peer grading image functionality of the external grading service.
"""
+
def __init__(self, url_string):
self.url_string = url_string
@@ -212,7 +215,7 @@ def run_image_tests(image):
success = image_properties.run_tests()
except:
log.exception("Cannot run image tests in combined open ended xmodule. May be an issue with a particular image,"
- "or an issue with the deployment configuration of PIL/Pillow")
+ "or an issue with the deployment configuration of PIL/Pillow")
return success
@@ -252,7 +255,8 @@ def upload_to_s3(file_to_upload, keyname, s3_interface):
return True, public_url
except:
#This is a dev_facing_error
- error_message = "Could not connect to S3 to upload peer grading image. Trying to utilize bucket: {0}".format(bucketname.lower())
+ error_message = "Could not connect to S3 to upload peer grading image. Trying to utilize bucket: {0}".format(
+ bucketname.lower())
log.error(error_message)
return False, error_message
diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/open_ended_module.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/open_ended_module.py
index aa805a5290..b16e7f5313 100644
--- a/common/lib/xmodule/xmodule/open_ended_grading_classes/open_ended_module.py
+++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/open_ended_module.py
@@ -10,7 +10,7 @@ import logging
from lxml import etree
import capa.xqueue_interface as xqueue_interface
-from xmodule.capa_module import ComplexEncoder
+from xmodule.capa_module import ComplexEncoder
from xmodule.editing_module import EditingDescriptor
from xmodule.progress import Progress
from xmodule.stringify import stringify_children
@@ -104,7 +104,9 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
# response types)
except TypeError, ValueError:
#This is a dev_facing_error
- log.exception("Grader payload from external open ended grading server is not a json object! Object: {0}".format(grader_payload))
+ log.exception(
+ "Grader payload from external open ended grading server is not a json object! Object: {0}".format(
+ grader_payload))
self.initial_display = find_with_default(oeparam, 'initial_display', '')
self.answer = find_with_default(oeparam, 'answer_display', 'No answer given.')
@@ -148,7 +150,9 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
for tag in ['feedback', 'submission_id', 'grader_id', 'score']:
if tag not in survey_responses:
#This is a student_facing_error
- return {'success': False, 'msg': "Could not find needed tag {0} in the survey responses. Please try submitting again.".format(tag)}
+ return {'success': False,
+ 'msg': "Could not find needed tag {0} in the survey responses. Please try submitting again.".format(
+ tag)}
try:
submission_id = int(survey_responses['submission_id'])
grader_id = int(survey_responses['grader_id'])
@@ -188,7 +192,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
}
(error, msg) = qinterface.send_to_queue(header=xheader,
- body=json.dumps(contents))
+ body=json.dumps(contents))
#Convert error to a success value
success = True
@@ -222,8 +226,8 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
str(len(self.history)))
xheader = xqueue_interface.make_xheader(lms_callback_url=system.xqueue['callback_url'],
- lms_key=queuekey,
- queue_name=self.queue_name)
+ lms_key=queuekey,
+ queue_name=self.queue_name)
contents = self.payload.copy()
@@ -241,7 +245,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
# Submit request. When successful, 'msg' is the prior length of the queue
(error, msg) = qinterface.send_to_queue(header=xheader,
- body=json.dumps(contents))
+ body=json.dumps(contents))
# State associated with the queueing request
queuestate = {'key': queuekey,
@@ -300,7 +304,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
# We want to display available feedback in a particular order.
# This dictionary specifies which goes first--lower first.
- priorities = { # These go at the start of the feedback
+ priorities = {# These go at the start of the feedback
'spelling': 0,
'grammar': 1,
# needs to be after all the other feedback
@@ -400,7 +404,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
if not response_items['success']:
return system.render_template("{0}/open_ended_error.html".format(self.TEMPLATE_DIR),
- {'errors': feedback})
+ {'errors': feedback})
feedback_template = system.render_template("{0}/open_ended_feedback.html".format(self.TEMPLATE_DIR), {
'grader_type': response_items['grader_type'],
@@ -437,13 +441,13 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
'valid': False,
'score': 0,
'feedback': '',
- 'rubric_scores' : [[0]],
- 'grader_types' : [''],
- 'feedback_items' : [''],
- 'feedback_dicts' : [{}],
- 'grader_ids' : [0],
- 'submission_ids' : [0],
- }
+ 'rubric_scores': [[0]],
+ 'grader_types': [''],
+ 'feedback_items': [''],
+ 'feedback_dicts': [{}],
+ 'grader_ids': [0],
+ 'submission_ids': [0],
+ }
try:
score_result = json.loads(score_msg)
except (TypeError, ValueError):
@@ -470,7 +474,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
log.error(error_message)
fail['feedback'] = error_message
return fail
- #This is to support peer grading
+ #This is to support peer grading
if isinstance(score_result['score'], list):
feedback_items = []
rubric_scores = []
@@ -527,12 +531,12 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
'valid': True,
'score': score,
'feedback': feedback,
- 'rubric_scores' : rubric_scores,
- 'grader_types' : grader_types,
- 'feedback_items' : feedback_items,
- 'feedback_dicts' : feedback_dicts,
- 'grader_ids' : grader_ids,
- 'submission_ids' : submission_ids,
+ 'rubric_scores': rubric_scores,
+ 'grader_types': grader_types,
+ 'feedback_items': feedback_items,
+ 'feedback_dicts': feedback_dicts,
+ 'grader_ids': grader_ids,
+ 'submission_ids': submission_ids,
}
def latest_post_assessment(self, system, short_feedback=False, join_feedback=True):
@@ -545,7 +549,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
return ""
feedback_dict = self._parse_score_msg(self.history[-1].get('post_assessment', ""), system,
- join_feedback=join_feedback)
+ join_feedback=join_feedback)
if not short_feedback:
return feedback_dict['feedback'] if feedback_dict['valid'] else ''
if feedback_dict['valid']:
@@ -585,7 +589,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
#This is a dev_facing_error
log.error("Cannot find {0} in handlers in handle_ajax function for open_ended_module.py".format(dispatch))
#This is a dev_facing_error
- return json.dumps({'error': 'Error handling action. Please try again.', 'success' : False})
+ return json.dumps({'error': 'Error handling action. Please try again.', 'success': False})
before = self.get_progress()
d = handlers[dispatch](get, system)
@@ -679,7 +683,6 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
correct = ""
previous_answer = self.initial_display
-
context = {
'prompt': self.prompt,
'previous_answer': previous_answer,
@@ -692,7 +695,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
'child_type': 'openended',
'correct': correct,
'accept_file_upload': self.accept_file_upload,
- 'eta_message' : eta_string,
+ 'eta_message': eta_string,
}
html = system.render_template('{0}/open_ended.html'.format(self.TEMPLATE_DIR), context)
return html
@@ -723,7 +726,9 @@ class OpenEndedDescriptor(XmlDescriptor, EditingDescriptor):
for child in ['openendedparam']:
if len(xml_object.xpath(child)) != 1:
#This is a staff_facing_error
- raise ValueError("Open Ended definition must include exactly one '{0}' tag. Contact the learning sciences group for assistance.".format(child))
+ raise ValueError(
+ "Open Ended definition must include exactly one '{0}' tag. Contact the learning sciences group for assistance.".format(
+ child))
def parse(k):
"""Assumes that xml_object has child k"""
diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/openendedchild.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/openendedchild.py
index 31d26fd3c3..91dec5943d 100644
--- a/common/lib/xmodule/xmodule/open_ended_grading_classes/openendedchild.py
+++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/openendedchild.py
@@ -74,7 +74,7 @@ class OpenEndedChild(object):
'done': 'Done',
}
- def __init__(self, system, location, definition, descriptor, static_data,
+ def __init__(self, system, location, definition, descriptor, static_data,
instance_state=None, shared_state=None, **kwargs):
# Load instance state
if instance_state is not None:
@@ -108,15 +108,14 @@ class OpenEndedChild(object):
self._max_score = static_data['max_score']
if system.open_ended_grading_interface:
self.peer_gs = PeerGradingService(system.open_ended_grading_interface, system)
- self.controller_qs = controller_query_service.ControllerQueryService(system.open_ended_grading_interface,system)
+ self.controller_qs = controller_query_service.ControllerQueryService(system.open_ended_grading_interface,
+ system)
else:
self.peer_gs = MockPeerGradingService()
- self.controller_qs = None
-
-
+ self.controller_qs = None
self.system = system
-
+
self.location_string = location
try:
self.location_string = self.location_string.url()
@@ -152,7 +151,8 @@ class OpenEndedChild(object):
return True, {
'success': False,
#This is a student_facing_error
- 'error': 'You have attempted this problem {0} times. You are allowed {1} attempts.'.format(self.attempts, self.max_attempts)
+ 'error': 'You have attempted this problem {0} times. You are allowed {1} attempts.'.format(
+ self.attempts, self.max_attempts)
}
else:
return False, {}
@@ -180,8 +180,8 @@ class OpenEndedChild(object):
try:
answer = autolink_html(answer)
cleaner = Cleaner(style=True, links=True, add_nofollow=False, page_structure=True, safe_attrs_only=True,
- host_whitelist=open_ended_image_submission.TRUSTED_IMAGE_DOMAINS,
- whitelist_tags=set(['embed', 'iframe', 'a', 'img']))
+ host_whitelist=open_ended_image_submission.TRUSTED_IMAGE_DOMAINS,
+ whitelist_tags=set(['embed', 'iframe', 'a', 'img']))
clean_html = cleaner.clean_html(answer)
clean_html = re.sub(r'
$', '', re.sub(r'^', '', clean_html))
except:
@@ -282,7 +282,7 @@ class OpenEndedChild(object):
"""
#This is a dev_facing_error
log.warning("Open ended child state out sync. state: %r, get: %r. %s",
- self.state, get, msg)
+ self.state, get, msg)
#This is a student_facing_error
return {'success': False,
'error': 'The problem state got out-of-sync. Please try reloading the page.'}
@@ -308,7 +308,7 @@ class OpenEndedChild(object):
@return: Boolean correct.
"""
correct = False
- if(isinstance(score, (int, long, float, complex))):
+ if (isinstance(score, (int, long, float, complex))):
score_ratio = int(score) / float(self.max_score())
correct = (score_ratio >= 0.66)
return correct
@@ -342,7 +342,8 @@ class OpenEndedChild(object):
try:
image_data.seek(0)
- success, s3_public_url = open_ended_image_submission.upload_to_s3(image_data, image_key, self.s3_interface)
+ success, s3_public_url = open_ended_image_submission.upload_to_s3(image_data, image_key,
+ self.s3_interface)
except:
log.exception("Could not upload image to S3.")
@@ -404,9 +405,9 @@ class OpenEndedChild(object):
#In this case, an image was submitted by the student, but the image could not be uploaded to S3. Likely
#a config issue (development vs deployment). For now, just treat this as a "success"
log.exception("Student AJAX post to combined open ended xmodule indicated that it contained an image, "
- "but the image was not able to be uploaded to S3. This could indicate a config"
- "issue with this deployment, but it could also indicate a problem with S3 or with the"
- "student image itself.")
+ "but the image was not able to be uploaded to S3. This could indicate a config"
+ "issue with this deployment, but it could also indicate a problem with S3 or with the"
+ "student image itself.")
overall_success = True
elif not has_file_to_upload:
#If there is no file to upload, probably the student has embedded the link in the answer text
@@ -445,7 +446,7 @@ class OpenEndedChild(object):
response = {}
#This is a student_facing_error
error_string = ("You need to peer grade {0} more in order to make another submission. "
- "You have graded {1}, and {2} are required. You have made {3} successful peer grading submissions.")
+ "You have graded {1}, and {2} are required. You have made {3} successful peer grading submissions.")
try:
response = self.peer_gs.get_data_for_location(self.location_string, student_id)
count_graded = response['count_graded']
@@ -454,16 +455,18 @@ class OpenEndedChild(object):
success = True
except:
#This is a dev_facing_error
- log.error("Could not contact external open ended graders for location {0} and student {1}".format(self.location_string,student_id))
+ log.error("Could not contact external open ended graders for location {0} and student {1}".format(
+ self.location_string, student_id))
#This is a student_facing_error
error_message = "Could not contact the graders. Please notify course staff."
return success, allowed_to_submit, error_message
- if count_graded>=count_required:
+ if count_graded >= count_required:
return success, allowed_to_submit, ""
else:
allowed_to_submit = False
#This is a student_facing_error
- error_message = error_string.format(count_required-count_graded, count_graded, count_required, student_sub_count)
+ error_message = error_string.format(count_required - count_graded, count_graded, count_required,
+ student_sub_count)
return success, allowed_to_submit, error_message
def get_eta(self):
@@ -478,7 +481,7 @@ class OpenEndedChild(object):
success = response['success']
if isinstance(success, basestring):
- success = (success.lower()=="true")
+ success = (success.lower() == "true")
if success:
eta = controller_query_service.convert_seconds_to_human_readable(response['eta'])
diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/peer_grading_service.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/peer_grading_service.py
index 42c54f0463..5daf1b83b5 100644
--- a/common/lib/xmodule/xmodule/open_ended_grading_classes/peer_grading_service.py
+++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/peer_grading_service.py
@@ -14,6 +14,7 @@ class PeerGradingService(GradingService):
"""
Interface with the grading controller for peer grading
"""
+
def __init__(self, config, system):
config['system'] = system
super(PeerGradingService, self).__init__(config)
@@ -36,10 +37,11 @@ class PeerGradingService(GradingService):
def get_next_submission(self, problem_location, grader_id):
response = self.get(self.get_next_submission_url,
- {'location': problem_location, 'grader_id': grader_id})
+ {'location': problem_location, 'grader_id': grader_id})
return self.try_to_decode(self._render_rubric(response))
- def save_grade(self, location, grader_id, submission_id, score, feedback, submission_key, rubric_scores, submission_flagged):
+ def save_grade(self, location, grader_id, submission_id, score, feedback, submission_key, rubric_scores,
+ submission_flagged):
data = {'grader_id': grader_id,
'submission_id': submission_id,
'score': score,
@@ -89,6 +91,7 @@ class PeerGradingService(GradingService):
pass
return text
+
"""
This is a mock peer grading service that can be used for unit tests
without making actual service calls to the grading controller
@@ -122,7 +125,7 @@ class MockPeerGradingService(object):
'max_score': 4})
def save_calibration_essay(self, problem_location, grader_id,
- calibration_essay_id, submission_key, score,
+ calibration_essay_id, submission_key, score,
feedback, rubric_scores):
return {'success': True, 'actual_score': 2}
diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/self_assessment_module.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/self_assessment_module.py
index f4be426667..a25d81eeae 100644
--- a/common/lib/xmodule/xmodule/open_ended_grading_classes/self_assessment_module.py
+++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/self_assessment_module.py
@@ -95,7 +95,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
#This is a dev_facing_error
log.error("Cannot find {0} in handlers in handle_ajax function for open_ended_module.py".format(dispatch))
#This is a dev_facing_error
- return json.dumps({'error': 'Error handling action. Please try again.', 'success' : False})
+ return json.dumps({'error': 'Error handling action. Please try again.', 'success': False})
before = self.get_progress()
d = handlers[dispatch](get, system)
@@ -224,7 +224,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
try:
score = int(get['assessment'])
score_list = get.getlist('score_list[]')
- for i in xrange(0,len(score_list)):
+ for i in xrange(0, len(score_list)):
score_list[i] = int(score_list[i])
except ValueError:
#This is a dev_facing_error
@@ -268,7 +268,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
'allow_reset': self._allow_reset()}
def latest_post_assessment(self, system):
- latest_post_assessment = super(SelfAssessmentModule, self).latest_post_assessment(system)
+ latest_post_assessment = super(SelfAssessmentModule, self).latest_post_assessment(system)
try:
rubric_scores = json.loads(latest_post_assessment)
except:
@@ -305,7 +305,9 @@ class SelfAssessmentDescriptor(XmlDescriptor, EditingDescriptor):
for child in expected_children:
if len(xml_object.xpath(child)) != 1:
#This is a staff_facing_error
- raise ValueError("Self assessment definition must include exactly one '{0}' tag. Contact the learning sciences group for assistance.".format(child))
+ raise ValueError(
+ "Self assessment definition must include exactly one '{0}' tag. Contact the learning sciences group for assistance.".format(
+ child))
def parse(k):
"""Assumes that xml_object has child k"""
diff --git a/common/lib/xmodule/xmodule/peer_grading_module.py b/common/lib/xmodule/xmodule/peer_grading_module.py
index 1e52dcf070..2ea8ab0db5 100644
--- a/common/lib/xmodule/xmodule/peer_grading_module.py
+++ b/common/lib/xmodule/xmodule/peer_grading_module.py
@@ -5,7 +5,7 @@ from lxml import etree
from datetime import datetime
from pkg_resources import resource_string
-from .capa_module import ComplexEncoder
+from .capa_module import ComplexEncoder
from .editing_module import EditingDescriptor
from .stringify import stringify_children
from .x_module import XModule
@@ -34,7 +34,7 @@ class PeerGradingModule(XModule):
resource_string(__name__, 'js/src/peergrading/peer_grading_problem.coffee'),
resource_string(__name__, 'js/src/collapsible.coffee'),
resource_string(__name__, 'js/src/javascript_loader.coffee'),
- ]}
+ ]}
js_module_name = "PeerGrading"
css = {'scss': [resource_string(__name__, 'css/combinedopenended/display.scss')]}
@@ -42,7 +42,7 @@ class PeerGradingModule(XModule):
def __init__(self, system, location, definition, descriptor,
instance_state=None, shared_state=None, **kwargs):
XModule.__init__(self, system, location, definition, descriptor,
- instance_state, shared_state, **kwargs)
+ instance_state, shared_state, **kwargs)
# Load instance state
if instance_state is not None:
@@ -53,12 +53,11 @@ class PeerGradingModule(XModule):
#We need to set the location here so the child modules can use it
system.set('location', location)
self.system = system
- if(self.system.open_ended_grading_interface):
+ if (self.system.open_ended_grading_interface):
self.peer_gs = PeerGradingService(self.system.open_ended_grading_interface, self.system)
else:
self.peer_gs = MockPeerGradingService()
-
self.use_for_single_location = self.metadata.get('use_for_single_location', USE_FOR_SINGLE_LOCATION)
if isinstance(self.use_for_single_location, basestring):
self.use_for_single_location = (self.use_for_single_location in TRUE_DICT)
@@ -83,14 +82,13 @@ class PeerGradingModule(XModule):
grace_period_string = self.metadata.get('graceperiod', None)
try:
- self.timeinfo = TimeInfo(display_due_date_string, grace_period_string)
+ self.timeinfo = TimeInfo(display_due_date_string, grace_period_string)
except:
log.error("Error parsing due date information in location {0}".format(location))
raise
self.display_due_date = self.timeinfo.display_due_date
-
self.ajax_url = self.system.ajax_url
if not self.ajax_url.endswith("/"):
self.ajax_url = self.ajax_url + "/"
@@ -148,13 +146,13 @@ class PeerGradingModule(XModule):
'save_grade': self.save_grade,
'save_calibration_essay': self.save_calibration_essay,
'problem': self.peer_grading_problem,
- }
+ }
if dispatch not in handlers:
#This is a dev_facing_error
log.error("Cannot find {0} in handlers in handle_ajax function for open_ended_module.py".format(dispatch))
#This is a dev_facing_error
- return json.dumps({'error': 'Error handling action. Please try again.', 'success' : False})
+ return json.dumps({'error': 'Error handling action. Please try again.', 'success': False})
d = handlers[dispatch](get)
@@ -191,9 +189,10 @@ class PeerGradingModule(XModule):
except:
success, response = self.query_data_for_location()
if not success:
- log.exception("No instance data found and could not get data from controller for loc {0} student {1}".format(
- self.system.location.url(), self.system.anonymous_student_id
- ))
+ log.exception(
+ "No instance data found and could not get data from controller for loc {0} student {1}".format(
+ self.system.location.url(), self.system.anonymous_student_id
+ ))
return None
count_graded = response['count_graded']
count_required = response['count_required']
@@ -204,7 +203,7 @@ class PeerGradingModule(XModule):
score_dict = {
'score': int(count_graded >= count_required),
'total': self.max_grade,
- }
+ }
return score_dict
@@ -253,7 +252,7 @@ class PeerGradingModule(XModule):
.format(self.peer_gs.url, location, grader_id))
#This is a student_facing_error
return {'success': False,
- 'error': EXTERNAL_GRADER_NO_CONTACT_ERROR}
+ 'error': EXTERNAL_GRADER_NO_CONTACT_ERROR}
def save_grade(self, get):
"""
@@ -271,7 +270,8 @@ class PeerGradingModule(XModule):
error: if there was an error in the submission, this is the error message
"""
- required = set(['location', 'submission_id', 'submission_key', 'score', 'feedback', 'rubric_scores[]', 'submission_flagged'])
+ required = set(['location', 'submission_id', 'submission_key', 'score', 'feedback', 'rubric_scores[]',
+ 'submission_flagged'])
success, message = self._check_required(get, required)
if not success:
return self._err_response(message)
@@ -287,14 +287,14 @@ class PeerGradingModule(XModule):
try:
response = self.peer_gs.save_grade(location, grader_id, submission_id,
- score, feedback, submission_key, rubric_scores, submission_flagged)
+ score, feedback, submission_key, rubric_scores, submission_flagged)
return response
except GradingServiceError:
#This is a dev_facing_error
log.exception("""Error saving grade to open ended grading service. server url: {0}, location: {1}, submission_id:{2},
submission_key: {3}, score: {4}"""
.format(self.peer_gs.url,
- location, submission_id, submission_key, score)
+ location, submission_id, submission_key, score)
)
#This is a student_facing_error
return {
@@ -382,7 +382,7 @@ class PeerGradingModule(XModule):
.format(self.peer_gs.url, location))
#This is a student_facing_error
return {'success': False,
- 'error': EXTERNAL_GRADER_NO_CONTACT_ERROR}
+ 'error': EXTERNAL_GRADER_NO_CONTACT_ERROR}
# if we can't parse the rubric into HTML,
except etree.XMLSyntaxError:
#This is a dev_facing_error
@@ -390,7 +390,7 @@ class PeerGradingModule(XModule):
.format(rubric))
#This is a student_facing_error
return {'success': False,
- 'error': 'Error displaying submission. Please notify course staff.'}
+ 'error': 'Error displaying submission. Please notify course staff.'}
def save_calibration_essay(self, get):
@@ -426,11 +426,13 @@ class PeerGradingModule(XModule):
try:
response = self.peer_gs.save_calibration_essay(location, grader_id, calibration_essay_id,
- submission_key, score, feedback, rubric_scores)
+ submission_key, score, feedback, rubric_scores)
return response
except GradingServiceError:
#This is a dev_facing_error
- log.exception("Error saving calibration grade, location: {0}, submission_id: {1}, submission_key: {2}, grader_id: {3}".format(location, submission_id, submission_key, grader_id))
+ log.exception(
+ "Error saving calibration grade, location: {0}, submission_id: {1}, submission_key: {2}, grader_id: {3}".format(
+ location, submission_id, submission_key, grader_id))
#This is a student_facing_error
return self._err_response('There was an error saving your score. Please notify course staff.')
@@ -440,7 +442,7 @@ class PeerGradingModule(XModule):
'''
html = self.system.render_template('peer_grading/peer_grading_closed.html', {
'use_for_single_location': self.use_for_single_location
- })
+ })
return html
@@ -503,12 +505,11 @@ class PeerGradingModule(XModule):
problem['closed'] = True
else:
problem['closed'] = False
- else:
- # if we can't find the due date, assume that it doesn't have one
+ else:
+ # if we can't find the due date, assume that it doesn't have one
problem['due'] = None
problem['closed'] = False
-
ajax_url = self.ajax_url
html = self.system.render_template('peer_grading/peer_grading.html', {
'course_id': self.system.course_id,
@@ -519,7 +520,7 @@ class PeerGradingModule(XModule):
# Checked above
'staff_access': False,
'use_single_location': self.use_for_single_location,
- })
+ })
return html
@@ -531,7 +532,8 @@ class PeerGradingModule(XModule):
if not self.use_for_single_location:
#This is an error case, because it must be set to use a single location to be called without get parameters
#This is a dev_facing_error
- log.error("Peer grading problem in peer_grading_module called with no get parameters, but use_for_single_location is False.")
+ log.error(
+ "Peer grading problem in peer_grading_module called with no get parameters, but use_for_single_location is False.")
return {'html': "", 'success': False}
problem_location = self.link_to_location
@@ -547,7 +549,7 @@ class PeerGradingModule(XModule):
# Checked above
'staff_access': False,
'use_single_location': self.use_for_single_location,
- })
+ })
return {'html': html, 'success': True}
@@ -560,7 +562,7 @@ class PeerGradingModule(XModule):
state = {
'student_data_for_location': self.student_data_for_location,
- }
+ }
return json.dumps(state)
@@ -596,7 +598,9 @@ class PeerGradingDescriptor(XmlDescriptor, EditingDescriptor):
for child in expected_children:
if len(xml_object.xpath(child)) == 0:
#This is a staff_facing_error
- raise ValueError("Peer grading definition must include at least one '{0}' tag. Contact the learning sciences group for assistance.".format(child))
+ raise ValueError(
+ "Peer grading definition must include at least one '{0}' tag. Contact the learning sciences group for assistance.".format(
+ child))
def parse_task(k):
"""Assumes that xml_object has child k"""
diff --git a/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py b/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py
index a524ac2fd9..8a14e03ded 100644
--- a/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py
+++ b/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py
@@ -14,6 +14,7 @@ from datetime import datetime
from . import test_system
import test_util_open_ended
+
"""
Tests for the various pieces of the CombinedOpenEndedGrading system
@@ -39,41 +40,37 @@ class OpenEndedChildTest(unittest.TestCase):
max_score = 1
static_data = {
- 'max_attempts': 20,
- 'prompt': prompt,
- 'rubric': rubric,
- 'max_score': max_score,
- 'display_name': 'Name',
- 'accept_file_upload': False,
- 'close_date': None,
- 's3_interface' : "",
- 'open_ended_grading_interface' : {},
- 'skip_basic_checks' : False,
- }
+ 'max_attempts': 20,
+ 'prompt': prompt,
+ 'rubric': rubric,
+ 'max_score': max_score,
+ 'display_name': 'Name',
+ 'accept_file_upload': False,
+ 'close_date': None,
+ 's3_interface': "",
+ 'open_ended_grading_interface': {},
+ 'skip_basic_checks': False,
+ }
definition = Mock()
descriptor = Mock()
def setUp(self):
self.test_system = test_system()
self.openendedchild = OpenEndedChild(self.test_system, self.location,
- self.definition, self.descriptor, self.static_data, self.metadata)
-
+ self.definition, self.descriptor, self.static_data, self.metadata)
def test_latest_answer_empty(self):
answer = self.openendedchild.latest_answer()
self.assertEqual(answer, "")
-
def test_latest_score_empty(self):
answer = self.openendedchild.latest_score()
self.assertEqual(answer, None)
-
def test_latest_post_assessment_empty(self):
answer = self.openendedchild.latest_post_assessment(self.test_system)
self.assertEqual(answer, "")
-
def test_new_history_entry(self):
new_answer = "New Answer"
self.openendedchild.new_history_entry(new_answer)
@@ -99,7 +96,6 @@ class OpenEndedChildTest(unittest.TestCase):
score = self.openendedchild.latest_score()
self.assertEqual(score, 4)
-
def test_record_latest_post_assessment(self):
new_answer = "New Answer"
self.openendedchild.new_history_entry(new_answer)
@@ -107,7 +103,7 @@ class OpenEndedChildTest(unittest.TestCase):
post_assessment = "Post assessment"
self.openendedchild.record_latest_post_assessment(post_assessment)
self.assertEqual(post_assessment,
- self.openendedchild.latest_post_assessment(self.test_system))
+ self.openendedchild.latest_post_assessment(self.test_system))
def test_get_score(self):
new_answer = "New Answer"
@@ -124,24 +120,22 @@ class OpenEndedChildTest(unittest.TestCase):
self.assertEqual(score['score'], new_score)
self.assertEqual(score['total'], self.static_data['max_score'])
-
def test_reset(self):
self.openendedchild.reset(self.test_system)
state = json.loads(self.openendedchild.get_instance_state())
self.assertEqual(state['state'], OpenEndedChild.INITIAL)
-
def test_is_last_response_correct(self):
new_answer = "New Answer"
self.openendedchild.new_history_entry(new_answer)
self.openendedchild.record_latest_score(self.static_data['max_score'])
self.assertEqual(self.openendedchild.is_last_response_correct(),
- 'correct')
+ 'correct')
self.openendedchild.new_history_entry(new_answer)
self.openendedchild.record_latest_score(0)
self.assertEqual(self.openendedchild.is_last_response_correct(),
- 'incorrect')
+ 'incorrect')
class OpenEndedModuleTest(unittest.TestCase):
@@ -159,18 +153,18 @@ class OpenEndedModuleTest(unittest.TestCase):
max_score = 4
static_data = {
- 'max_attempts': 20,
- 'prompt': prompt,
- 'rubric': rubric,
- 'max_score': max_score,
- 'display_name': 'Name',
- 'accept_file_upload': False,
- 'rewrite_content_links' : "",
- 'close_date': None,
- 's3_interface' : test_util_open_ended.S3_INTERFACE,
- 'open_ended_grading_interface' : test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE,
- 'skip_basic_checks' : False,
- }
+ 'max_attempts': 20,
+ 'prompt': prompt,
+ 'rubric': rubric,
+ 'max_score': max_score,
+ 'display_name': 'Name',
+ 'accept_file_upload': False,
+ 'rewrite_content_links': "",
+ 'close_date': None,
+ 's3_interface': test_util_open_ended.S3_INTERFACE,
+ 'open_ended_grading_interface': test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE,
+ 'skip_basic_checks': False,
+ }
oeparam = etree.XML('''
@@ -188,25 +182,26 @@ class OpenEndedModuleTest(unittest.TestCase):
self.test_system.location = self.location
self.mock_xqueue = MagicMock()
self.mock_xqueue.send_to_queue.return_value = (None, "Message")
- self.test_system.xqueue = {'interface': self.mock_xqueue, 'callback_url': '/', 'default_queuename': 'testqueue', 'waittime': 1}
+ self.test_system.xqueue = {'interface': self.mock_xqueue, 'callback_url': '/', 'default_queuename': 'testqueue',
+ 'waittime': 1}
self.openendedmodule = OpenEndedModule(self.test_system, self.location,
- self.definition, self.descriptor, self.static_data, self.metadata)
+ self.definition, self.descriptor, self.static_data, self.metadata)
def test_message_post(self):
get = {'feedback': 'feedback text',
- 'submission_id': '1',
- 'grader_id': '1',
- 'score': 3}
+ 'submission_id': '1',
+ 'grader_id': '1',
+ 'score': 3}
qtime = datetime.strftime(datetime.now(), xqueue_interface.dateformat)
student_info = {'anonymous_student_id': self.test_system.anonymous_student_id,
- 'submission_time': qtime}
+ 'submission_time': qtime}
contents = {
- 'feedback': get['feedback'],
- 'submission_id': int(get['submission_id']),
- 'grader_id': int(get['grader_id']),
- 'score': get['score'],
- 'student_info': json.dumps(student_info)
- }
+ 'feedback': get['feedback'],
+ 'submission_id': int(get['submission_id']),
+ 'grader_id': int(get['grader_id']),
+ 'score': get['score'],
+ 'student_info': json.dumps(student_info)
+ }
result = self.openendedmodule.message_post(get, self.test_system)
self.assertTrue(result['success'])
@@ -220,13 +215,13 @@ class OpenEndedModuleTest(unittest.TestCase):
submission = "This is a student submission"
qtime = datetime.strftime(datetime.now(), xqueue_interface.dateformat)
student_info = {'anonymous_student_id': self.test_system.anonymous_student_id,
- 'submission_time': qtime}
+ 'submission_time': qtime}
contents = self.openendedmodule.payload.copy()
contents.update({
'student_info': json.dumps(student_info),
'student_response': submission,
'max_score': self.max_score
- })
+ })
result = self.openendedmodule.send_to_grader(submission, self.test_system)
self.assertTrue(result)
self.mock_xqueue.send_to_queue.assert_called_with(body=json.dumps(contents), header=ANY)
@@ -234,36 +229,36 @@ class OpenEndedModuleTest(unittest.TestCase):
def update_score_single(self):
self.openendedmodule.new_history_entry("New Entry")
score_msg = {
- 'correct': True,
- 'score': 4,
- 'msg': 'Grader Message',
- 'feedback': "Grader Feedback"
- }
+ 'correct': True,
+ 'score': 4,
+ 'msg': 'Grader Message',
+ 'feedback': "Grader Feedback"
+ }
get = {'queuekey': "abcd",
- 'xqueue_body': score_msg}
+ 'xqueue_body': score_msg}
self.openendedmodule.update_score(get, self.test_system)
def update_score_single(self):
self.openendedmodule.new_history_entry("New Entry")
feedback = {
- "success": True,
- "feedback": "Grader Feedback"
- }
+ "success": True,
+ "feedback": "Grader Feedback"
+ }
score_msg = {
- 'correct': True,
- 'score': 4,
- 'msg': 'Grader Message',
- 'feedback': json.dumps(feedback),
- 'grader_type': 'IN',
- 'grader_id': '1',
- 'submission_id': '1',
- 'success': True,
- 'rubric_scores': [0],
- 'rubric_scores_complete': True,
- 'rubric_xml': etree.tostring(self.rubric)
- }
+ 'correct': True,
+ 'score': 4,
+ 'msg': 'Grader Message',
+ 'feedback': json.dumps(feedback),
+ 'grader_type': 'IN',
+ 'grader_id': '1',
+ 'submission_id': '1',
+ 'success': True,
+ 'rubric_scores': [0],
+ 'rubric_scores_complete': True,
+ 'rubric_xml': etree.tostring(self.rubric)
+ }
get = {'queuekey': "abcd",
- 'xqueue_body': json.dumps(score_msg)}
+ 'xqueue_body': json.dumps(score_msg)}
self.openendedmodule.update_score(get, self.test_system)
def test_latest_post_assessment(self):
@@ -296,18 +291,18 @@ class CombinedOpenEndedModuleTest(unittest.TestCase):
metadata = {'attempts': '10', 'max_score': max_score}
static_data = {
- 'max_attempts': 20,
- 'prompt': prompt,
- 'rubric': rubric,
- 'max_score': max_score,
- 'display_name': 'Name',
- 'accept_file_upload' : False,
- 'rewrite_content_links' : "",
- 'close_date' : "",
- 's3_interface' : test_util_open_ended.S3_INTERFACE,
- 'open_ended_grading_interface' : test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE,
- 'skip_basic_checks' : False,
- }
+ 'max_attempts': 20,
+ 'prompt': prompt,
+ 'rubric': rubric,
+ 'max_score': max_score,
+ 'display_name': 'Name',
+ 'accept_file_upload': False,
+ 'rewrite_content_links': "",
+ 'close_date': "",
+ 's3_interface': test_util_open_ended.S3_INTERFACE,
+ 'open_ended_grading_interface': test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE,
+ 'skip_basic_checks': False,
+ }
oeparam = etree.XML('''
@@ -329,23 +324,23 @@ class CombinedOpenEndedModuleTest(unittest.TestCase):
'''
task_xml2 = '''
-
- Enter essay here.
- This is the answer.
- {"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}
-
- '''
+
+ Enter essay here.
+ This is the answer.
+ {"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}
+
+ '''
definition = {'prompt': etree.XML(prompt), 'rubric': etree.XML(rubric), 'task_xml': [task_xml1, task_xml2]}
descriptor = Mock()
def setUp(self):
self.test_system = test_system()
- self.combinedoe = CombinedOpenEndedV1Module(self.test_system,
- self.location,
- self.definition,
- self.descriptor,
- static_data = self.static_data,
- metadata=self.metadata)
+ self.combinedoe = CombinedOpenEndedV1Module(self.test_system,
+ self.location,
+ self.definition,
+ self.descriptor,
+ static_data=self.static_data,
+ metadata=self.metadata)
def test_get_tag_name(self):
name = self.combinedoe.get_tag_name("Tag")
diff --git a/common/lib/xmodule/xmodule/tests/test_self_assessment.py b/common/lib/xmodule/xmodule/tests/test_self_assessment.py
index 362b73df67..a7f2a9fdfe 100644
--- a/common/lib/xmodule/xmodule/tests/test_self_assessment.py
+++ b/common/lib/xmodule/xmodule/tests/test_self_assessment.py
@@ -10,8 +10,8 @@ from . import test_system
import test_util_open_ended
-class SelfAssessmentTest(unittest.TestCase):
+class SelfAssessmentTest(unittest.TestCase):
rubric = '''
Response Quality
@@ -24,7 +24,7 @@ class SelfAssessmentTest(unittest.TestCase):
'prompt': prompt,
'submitmessage': 'Shall we submit now?',
'hintprompt': 'Consider this...',
- }
+ }
location = Location(["i4x", "edX", "sa_test", "selfassessment",
"SampleQuestion"])
@@ -41,22 +41,22 @@ class SelfAssessmentTest(unittest.TestCase):
'attempts': 2})
static_data = {
- 'max_attempts': 10,
- 'rubric': etree.XML(self.rubric),
- 'prompt': self.prompt,
- 'max_score': 1,
- 'display_name': "Name",
- 'accept_file_upload': False,
- 'close_date': None,
- 's3_interface' : test_util_open_ended.S3_INTERFACE,
- 'open_ended_grading_interface' : test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE,
- 'skip_basic_checks' : False,
- }
+ 'max_attempts': 10,
+ 'rubric': etree.XML(self.rubric),
+ 'prompt': self.prompt,
+ 'max_score': 1,
+ 'display_name': "Name",
+ 'accept_file_upload': False,
+ 'close_date': None,
+ 's3_interface': test_util_open_ended.S3_INTERFACE,
+ 'open_ended_grading_interface': test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE,
+ 'skip_basic_checks': False,
+ }
self.module = SelfAssessmentModule(test_system(), self.location,
- self.definition, self.descriptor,
- static_data,
- state, metadata=self.metadata)
+ self.definition, self.descriptor,
+ static_data,
+ state, metadata=self.metadata)
def test_get_html(self):
html = self.module.get_html(self.module.system)
@@ -64,14 +64,15 @@ class SelfAssessmentTest(unittest.TestCase):
def test_self_assessment_flow(self):
responses = {'assessment': '0', 'score_list[]': ['0', '0']}
+
def get_fake_item(name):
return responses[name]
- def get_data_for_location(self,location,student):
+ def get_data_for_location(self, location, student):
return {
- 'count_graded' : 0,
- 'count_required' : 0,
- 'student_sub_count': 0,
+ 'count_graded': 0,
+ 'count_required': 0,
+ 'student_sub_count': 0,
}
mock_query_dict = MagicMock()
@@ -82,20 +83,19 @@ class SelfAssessmentTest(unittest.TestCase):
self.assertEqual(self.module.get_score()['score'], 0)
- self.module.save_answer({'student_answer': "I am an answer"},
+ self.module.save_answer({'student_answer': "I am an answer"},
self.module.system)
self.assertEqual(self.module.state, self.module.ASSESSING)
self.module.save_assessment(mock_query_dict, self.module.system)
self.assertEqual(self.module.state, self.module.DONE)
-
d = self.module.reset({})
self.assertTrue(d['success'])
self.assertEqual(self.module.state, self.module.INITIAL)
# if we now assess as right, skip the REQUEST_HINT state
- self.module.save_answer({'student_answer': 'answer 4'},
+ self.module.save_answer({'student_answer': 'answer 4'},
self.module.system)
responses['assessment'] = '1'
self.module.save_assessment(mock_query_dict, self.module.system)
diff --git a/common/lib/xmodule/xmodule/tests/test_util_open_ended.py b/common/lib/xmodule/xmodule/tests/test_util_open_ended.py
index 8d1fcd30ce..db580f1e0e 100644
--- a/common/lib/xmodule/xmodule/tests/test_util_open_ended.py
+++ b/common/lib/xmodule/xmodule/tests/test_util_open_ended.py
@@ -1,14 +1,14 @@
OPEN_ENDED_GRADING_INTERFACE = {
- 'url' : 'http://127.0.0.1:3033/',
- 'username' : 'incorrect',
- 'password' : 'incorrect',
- 'staff_grading' : 'staff_grading',
- 'peer_grading' : 'peer_grading',
- 'grading_controller' : 'grading_controller'
+ 'url': 'http://127.0.0.1:3033/',
+ 'username': 'incorrect',
+ 'password': 'incorrect',
+ 'staff_grading': 'staff_grading',
+ 'peer_grading': 'peer_grading',
+ 'grading_controller': 'grading_controller'
}
S3_INTERFACE = {
- 'aws_access_key' : "",
- 'aws_secret_key' : "",
- "aws_bucket_name" : "",
+ 'aws_access_key': "",
+ 'aws_secret_key': "",
+ "aws_bucket_name": "",
}
\ No newline at end of file
diff --git a/lms/djangoapps/open_ended_grading/open_ended_notifications.py b/lms/djangoapps/open_ended_grading/open_ended_notifications.py
index b4ca20079f..d67e2816d5 100644
--- a/lms/djangoapps/open_ended_grading/open_ended_notifications.py
+++ b/lms/djangoapps/open_ended_grading/open_ended_notifications.py
@@ -22,7 +22,7 @@ NOTIFICATION_TYPES = (
('staff_needs_to_grade', 'staff_grading', 'Staff Grading'),
('new_student_grading_to_view', 'open_ended_problems', 'Problems you have submitted'),
('flagged_submissions_exist', 'open_ended_flagged_problems', 'Flagged Submissions')
- )
+)
def staff_grading_notifications(course, user):
@@ -46,7 +46,9 @@ def staff_grading_notifications(course, user):
#Non catastrophic error, so no real action
notifications = {}
#This is a dev_facing_error
- log.info("Problem with getting notifications from staff grading service for course {0} user {1}.".format(course_id, student_id))
+ log.info(
+ "Problem with getting notifications from staff grading service for course {0} user {1}.".format(course_id,
+ student_id))
if pending_grading:
img_path = "/static/images/grading_notification.png"
@@ -80,7 +82,9 @@ def peer_grading_notifications(course, user):
#Non catastrophic error, so no real action
notifications = {}
#This is a dev_facing_error
- log.info("Problem with getting notifications from peer grading service for course {0} user {1}.".format(course_id, student_id))
+ log.info(
+ "Problem with getting notifications from peer grading service for course {0} user {1}.".format(course_id,
+ student_id))
if pending_grading:
img_path = "/static/images/grading_notification.png"
@@ -105,7 +109,9 @@ def combined_notifications(course, user):
return notification_dict
min_time_to_query = user.last_login
- last_module_seen = StudentModule.objects.filter(student=user, course_id=course_id, modified__gt=min_time_to_query).values('modified').order_by('-modified')
+ last_module_seen = StudentModule.objects.filter(student=user, course_id=course_id,
+ modified__gt=min_time_to_query).values('modified').order_by(
+ '-modified')
last_module_seen_count = last_module_seen.count()
if last_module_seen_count > 0:
@@ -117,7 +123,8 @@ def combined_notifications(course, user):
img_path = ""
try:
- controller_response = controller_qs.check_combined_notifications(course.id, student_id, user_is_staff, last_time_viewed)
+ controller_response = controller_qs.check_combined_notifications(course.id, student_id, user_is_staff,
+ last_time_viewed)
log.debug(controller_response)
notifications = json.loads(controller_response)
if notifications['success']:
@@ -127,7 +134,9 @@ def combined_notifications(course, user):
#Non catastrophic error, so no real action
notifications = {}
#This is a dev_facing_error
- log.exception("Problem with getting notifications from controller query service for course {0} user {1}.".format(course_id, student_id))
+ log.exception(
+ "Problem with getting notifications from controller query service for course {0} user {1}.".format(
+ course_id, student_id))
if pending_grading:
img_path = "/static/images/grading_notification.png"
@@ -151,7 +160,8 @@ def set_value_in_cache(student_id, course_id, notification_type, value):
def create_key_name(student_id, course_id, notification_type):
- key_name = "{prefix}{type}_{course}_{student}".format(prefix=KEY_PREFIX, type=notification_type, course=course_id, student=student_id)
+ key_name = "{prefix}{type}_{course}_{student}".format(prefix=KEY_PREFIX, type=notification_type, course=course_id,
+ student=student_id)
return key_name
diff --git a/lms/djangoapps/open_ended_grading/staff_grading.py b/lms/djangoapps/open_ended_grading/staff_grading.py
index e39b26da56..fad5268294 100644
--- a/lms/djangoapps/open_ended_grading/staff_grading.py
+++ b/lms/djangoapps/open_ended_grading/staff_grading.py
@@ -15,6 +15,7 @@ class StaffGrading(object):
"""
Wrap up functionality for staff grading of submissions--interface exposes get_html, ajax views.
"""
+
def __init__(self, course):
self.course = course
diff --git a/lms/djangoapps/open_ended_grading/staff_grading_service.py b/lms/djangoapps/open_ended_grading/staff_grading_service.py
index 79b92dffba..91138bf685 100644
--- a/lms/djangoapps/open_ended_grading/staff_grading_service.py
+++ b/lms/djangoapps/open_ended_grading/staff_grading_service.py
@@ -20,10 +20,12 @@ log = logging.getLogger(__name__)
STAFF_ERROR_MESSAGE = 'Could not contact the external grading server. Please contact the development team. If you do not have a point of contact, you can contact Vik at vik@edx.org.'
+
class MockStaffGradingService(object):
"""
A simple mockup of a staff grading service, testing.
"""
+
def __init__(self):
self.cnt = 0
@@ -43,15 +45,18 @@ class MockStaffGradingService(object):
def get_problem_list(self, course_id, grader_id):
self.cnt += 1
return json.dumps({'success': True,
- 'problem_list': [
- json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo1',
- 'problem_name': "Problem 1", 'num_graded': 3, 'num_pending': 5, 'min_for_ml': 10}),
- json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo2',
- 'problem_name': "Problem 2", 'num_graded': 1, 'num_pending': 5, 'min_for_ml': 10})
- ]})
+ 'problem_list': [
+ json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo1',
+ 'problem_name': "Problem 1", 'num_graded': 3, 'num_pending': 5,
+ 'min_for_ml': 10}),
+ json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo2',
+ 'problem_name': "Problem 2", 'num_graded': 1, 'num_pending': 5,
+ 'min_for_ml': 10})
+ ]})
- def save_grade(self, course_id, grader_id, submission_id, score, feedback, skipped, rubric_scores, submission_flagged):
+ def save_grade(self, course_id, grader_id, submission_id, score, feedback, skipped, rubric_scores,
+ submission_flagged):
return self.get_next(course_id, 'fake location', grader_id)
@@ -59,6 +64,7 @@ class StaffGradingService(GradingService):
"""
Interface to staff grading backend.
"""
+
def __init__(self, config):
config['system'] = ModuleSystem(None, None, None, render_to_string, None)
super(StaffGradingService, self).__init__(config)
@@ -109,12 +115,13 @@ class StaffGradingService(GradingService):
GradingServiceError: something went wrong with the connection.
"""
response = self.get(self.get_next_url,
- params={'location': location,
- 'grader_id': grader_id})
+ params={'location': location,
+ 'grader_id': grader_id})
return json.dumps(self._render_rubric(response))
- def save_grade(self, course_id, grader_id, submission_id, score, feedback, skipped, rubric_scores, submission_flagged):
+ def save_grade(self, course_id, grader_id, submission_id, score, feedback, skipped, rubric_scores,
+ submission_flagged):
"""
Save a score and feedback for a submission.
@@ -253,14 +260,14 @@ def get_problem_list(request, course_id):
try:
response = staff_grading_service().get_problem_list(course_id, unique_id_for_user(request.user))
return HttpResponse(response,
- mimetype="application/json")
+ mimetype="application/json")
except GradingServiceError:
#This is a dev_facing_error
log.exception("Error from staff grading service in open ended grading. server url: {0}"
- .format(staff_grading_service().url))
+ .format(staff_grading_service().url))
#This is a staff_facing_error
return HttpResponse(json.dumps({'success': False,
- 'error': STAFF_ERROR_MESSAGE}))
+ 'error': STAFF_ERROR_MESSAGE}))
def _get_next(course_id, grader_id, location):
@@ -272,7 +279,7 @@ def _get_next(course_id, grader_id, location):
except GradingServiceError:
#This is a dev facing error
log.exception("Error from staff grading service in open ended grading. server url: {0}"
- .format(staff_grading_service().url))
+ .format(staff_grading_service().url))
#This is a staff_facing_error
return json.dumps({'success': False,
'error': STAFF_ERROR_MESSAGE})
@@ -297,7 +304,7 @@ def save_grade(request, course_id):
if request.method != 'POST':
raise Http404
- required = set(['score', 'feedback', 'submission_id', 'location','submission_flagged', 'rubric_scores[]'])
+ required = set(['score', 'feedback', 'submission_id', 'location', 'submission_flagged', 'rubric_scores[]'])
actual = set(request.POST.keys())
missing = required - actual
if len(missing) > 0:
@@ -307,22 +314,23 @@ def save_grade(request, course_id):
grader_id = unique_id_for_user(request.user)
p = request.POST
-
location = p['location']
- skipped = 'skipped' in p
+ skipped = 'skipped' in p
try:
result_json = staff_grading_service().save_grade(course_id,
- grader_id,
- p['submission_id'],
- p['score'],
- p['feedback'],
- skipped,
- p.getlist('rubric_scores[]'),
- p['submission_flagged'])
+ grader_id,
+ p['submission_id'],
+ p['score'],
+ p['feedback'],
+ skipped,
+ p.getlist('rubric_scores[]'),
+ p['submission_flagged'])
except GradingServiceError:
#This is a dev_facing_error
- log.exception("Error saving grade in the staff grading interface in open ended grading. Request: {0} Course ID: {1}".format(request, course_id))
+ log.exception(
+ "Error saving grade in the staff grading interface in open ended grading. Request: {0} Course ID: {1}".format(
+ request, course_id))
#This is a staff_facing_error
return _err_response(STAFF_ERROR_MESSAGE)
@@ -330,13 +338,16 @@ def save_grade(request, course_id):
result = json.loads(result_json)
except ValueError:
#This is a dev_facing_error
- log.exception("save_grade returned broken json in the staff grading interface in open ended grading: {0}".format(result_json))
+ log.exception(
+ "save_grade returned broken json in the staff grading interface in open ended grading: {0}".format(
+ result_json))
#This is a staff_facing_error
return _err_response(STAFF_ERROR_MESSAGE)
if not result.get('success', False):
#This is a dev_facing_error
- log.warning('Got success=False from staff grading service in open ended grading. Response: {0}'.format(result_json))
+ log.warning(
+ 'Got success=False from staff grading service in open ended grading. Response: {0}'.format(result_json))
return _err_response(STAFF_ERROR_MESSAGE)
# Ok, save_grade seemed to work. Get the next submission to grade.
diff --git a/lms/djangoapps/open_ended_grading/tests.py b/lms/djangoapps/open_ended_grading/tests.py
index d452883ebb..64123605ce 100644
--- a/lms/djangoapps/open_ended_grading/tests.py
+++ b/lms/djangoapps/open_ended_grading/tests.py
@@ -7,7 +7,7 @@ django-admin.py test --settings=lms.envs.test --pythonpath=. lms/djangoapps/open
from django.test import TestCase
from open_ended_grading import staff_grading_service
from xmodule.open_ended_grading_classes import peer_grading_service
-from xmodule import peer_grading_module
+from xmodule import peer_grading_module
from django.core.urlresolvers import reverse
from django.contrib.auth.models import Group
@@ -22,6 +22,7 @@ from xmodule.x_module import ModuleSystem
from mitxmako.shortcuts import render_to_string
import logging
+
log = logging.getLogger(__name__)
from django.test.utils import override_settings
from django.http import QueryDict
@@ -36,6 +37,7 @@ class TestStaffGradingService(ct.PageLoader):
access control and error handling logic -- all the actual work is on the
backend.
'''
+
def setUp(self):
xmodule.modulestore.django._MODULESTORES = {}
@@ -50,6 +52,7 @@ class TestStaffGradingService(ct.PageLoader):
self.course_id = "edX/toy/2012_Fall"
self.toy = modulestore().get_course(self.course_id)
+
def make_instructor(course):
group_name = _course_staff_group_name(course.location)
g = Group.objects.create(name=group_name)
@@ -130,6 +133,7 @@ class TestPeerGradingService(ct.PageLoader):
access control and error handling logic -- all the actual work is on the
backend.
'''
+
def setUp(self):
xmodule.modulestore.django._MODULESTORES = {}
@@ -148,11 +152,12 @@ class TestPeerGradingService(ct.PageLoader):
self.mock_service = peer_grading_service.MockPeerGradingService()
self.system = ModuleSystem(location, None, None, render_to_string, None,
- s3_interface = test_util_open_ended.S3_INTERFACE,
- open_ended_grading_interface=test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE
+ s3_interface=test_util_open_ended.S3_INTERFACE,
+ open_ended_grading_interface=test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE
)
self.descriptor = peer_grading_module.PeerGradingDescriptor(self.system)
- self.peer_module = peer_grading_module.PeerGradingModule(self.system, location, "", self.descriptor)
+ self.peer_module = peer_grading_module.PeerGradingModule(self.system, location, "",
+ self.descriptor)
self.peer_module.peer_gs = self.mock_service
self.logout()
@@ -175,18 +180,20 @@ class TestPeerGradingService(ct.PageLoader):
def test_save_grade_success(self):
data = {
- 'rubric_scores[]': [0, 0],
- 'location': self.location,
- 'submission_id': 1,
- 'submission_key': 'fake key',
- 'score': 2,
- 'feedback': 'feedback',
- 'submission_flagged': 'false'
- }
+ 'rubric_scores[]': [0, 0],
+ 'location': self.location,
+ 'submission_id': 1,
+ 'submission_key': 'fake key',
+ 'score': 2,
+ 'feedback': 'feedback',
+ 'submission_flagged': 'false'
+ }
qdict = MagicMock()
+
def fake_get_item(key):
return data[key]
+
qdict.__getitem__.side_effect = fake_get_item
qdict.getlist = fake_get_item
qdict.keys = data.keys
@@ -237,18 +244,20 @@ class TestPeerGradingService(ct.PageLoader):
def test_save_calibration_essay_success(self):
data = {
- 'rubric_scores[]': [0, 0],
- 'location': self.location,
- 'submission_id': 1,
- 'submission_key': 'fake key',
- 'score': 2,
- 'feedback': 'feedback',
- 'submission_flagged': 'false'
- }
+ 'rubric_scores[]': [0, 0],
+ 'location': self.location,
+ 'submission_id': 1,
+ 'submission_key': 'fake key',
+ 'score': 2,
+ 'feedback': 'feedback',
+ 'submission_flagged': 'false'
+ }
qdict = MagicMock()
+
def fake_get_item(key):
return data[key]
+
qdict.__getitem__.side_effect = fake_get_item
qdict.getlist = fake_get_item
qdict.keys = data.keys
diff --git a/lms/djangoapps/open_ended_grading/views.py b/lms/djangoapps/open_ended_grading/views.py
index 55e8088c3f..2e7f429429 100644
--- a/lms/djangoapps/open_ended_grading/views.py
+++ b/lms/djangoapps/open_ended_grading/views.py
@@ -50,22 +50,24 @@ def _reverse_without_slash(url_name, course_id):
ajax_url = reverse(url_name, kwargs={'course_id': course_id})
return ajax_url
+
DESCRIPTION_DICT = {
- 'Peer Grading': "View all problems that require peer assessment in this particular course.",
- 'Staff Grading': "View ungraded submissions submitted by students for the open ended problems in the course.",
- 'Problems you have submitted': "View open ended problems that you have previously submitted for grading.",
- 'Flagged Submissions': "View submissions that have been flagged by students as inappropriate."
- }
+ 'Peer Grading': "View all problems that require peer assessment in this particular course.",
+ 'Staff Grading': "View ungraded submissions submitted by students for the open ended problems in the course.",
+ 'Problems you have submitted': "View open ended problems that you have previously submitted for grading.",
+ 'Flagged Submissions': "View submissions that have been flagged by students as inappropriate."
+}
ALERT_DICT = {
- 'Peer Grading': "New submissions to grade",
- 'Staff Grading': "New submissions to grade",
- 'Problems you have submitted': "New grades have been returned",
- 'Flagged Submissions': "Submissions have been flagged for review"
- }
+ 'Peer Grading': "New submissions to grade",
+ 'Staff Grading': "New submissions to grade",
+ 'Problems you have submitted': "New grades have been returned",
+ 'Flagged Submissions': "Submissions have been flagged for review"
+}
STUDENT_ERROR_MESSAGE = "Error occured while contacting the grading service. Please notify course staff."
STAFF_ERROR_MESSAGE = "Error occured while contacting the grading service. Please notify the development team. If you do not have a point of contact, please email Vik at vik@edx.org"
+
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def staff_grading(request, course_id):
"""
@@ -92,10 +94,10 @@ def peer_grading(request, course_id):
#Get the current course
course = get_course_with_access(request.user, course_id, 'load')
course_id_parts = course.id.split("/")
- false_dict = [False,"False", "false", "FALSE"]
+ false_dict = [False, "False", "false", "FALSE"]
#Reverse the base course url
- base_course_url = reverse('courses')
+ base_course_url = reverse('courses')
try:
#TODO: This will not work with multiple runs of a course. Make it work. The last key in the Location passed
#to get_items is called revision. Is this the same as run?
@@ -147,7 +149,7 @@ def student_problem_list(request, course_id):
success = False
error_text = ""
problem_list = []
- base_course_url = reverse('courses')
+ base_course_url = reverse('courses')
try:
problem_list_json = controller_qs.get_grading_status_list(course_id, unique_id_for_user(request.user))
@@ -174,7 +176,7 @@ def student_problem_list(request, course_id):
except:
#This is a student_facing_error
eta_string = "Error getting ETA."
- problem_list[i].update({'eta_string' : eta_string})
+ problem_list[i].update({'eta_string': eta_string})
except GradingServiceError:
#This is a student_facing_error
@@ -215,7 +217,7 @@ def flagged_problem_list(request, course_id):
success = False
error_text = ""
problem_list = []
- base_course_url = reverse('courses')
+ base_course_url = reverse('courses')
try:
problem_list_json = controller_qs.get_flagged_problem_list(course_id)
@@ -243,14 +245,14 @@ def flagged_problem_list(request, course_id):
ajax_url = _reverse_with_slash('open_ended_flagged_problems', course_id)
context = {
- 'course': course,
- 'course_id': course_id,
- 'ajax_url': ajax_url,
- 'success': success,
- 'problem_list': problem_list,
- 'error_text': error_text,
- # Checked above
- 'staff_access': True,
+ 'course': course,
+ 'course_id': course_id,
+ 'ajax_url': ajax_url,
+ 'success': success,
+ 'problem_list': problem_list,
+ 'error_text': error_text,
+ # Checked above
+ 'staff_access': True,
}
return render_to_response('open_ended_problems/open_ended_flagged_problems.html', context)
@@ -305,7 +307,7 @@ def combined_notifications(request, course_id):
}
return render_to_response('open_ended_problems/combined_notifications.html',
- combined_dict
+ combined_dict
)
@@ -318,13 +320,14 @@ def take_action_on_flags(request, course_id):
if request.method != 'POST':
raise Http404
-
required = ['submission_id', 'action_type', 'student_id']
for key in required:
if key not in request.POST:
#This is a staff_facing_error
- return HttpResponse(json.dumps({'success': False, 'error': STAFF_ERROR_MESSAGE + 'Missing key {0} from submission. Please reload and try again.'.format(key)}),
- mimetype="application/json")
+ return HttpResponse(json.dumps({'success': False,
+ 'error': STAFF_ERROR_MESSAGE + 'Missing key {0} from submission. Please reload and try again.'.format(
+ key)}),
+ mimetype="application/json")
p = request.POST
submission_id = p['submission_id']
@@ -338,5 +341,7 @@ def take_action_on_flags(request, course_id):
return HttpResponse(response, mimetype="application/json")
except GradingServiceError:
#This is a dev_facing_error
- log.exception("Error taking action on flagged peer grading submissions, submission_id: {0}, action_type: {1}, grader_id: {2}".format(submission_id, action_type, grader_id))
+ log.exception(
+ "Error taking action on flagged peer grading submissions, submission_id: {0}, action_type: {1}, grader_id: {2}".format(
+ submission_id, action_type, grader_id))
return _err_response(STAFF_ERROR_MESSAGE)
From c40ab310c3310a0be8e718afa3e913dcc0212082 Mon Sep 17 00:00:00 2001
From: Vik Paruchuri
Date: Tue, 12 Mar 2013 14:24:42 -0400
Subject: [PATCH 21/22] Blank line fixes
---
.../combined_open_ended_rubric.py | 8 --------
.../open_ended_grading_classes/open_ended_module.py | 3 ---
.../xmodule/open_ended_grading_classes/openendedchild.py | 3 ---
.../open_ended_grading_classes/self_assessment_module.py | 2 --
4 files changed, 16 deletions(-)
diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_rubric.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_rubric.py
index f4ea7648a1..287aeb5c24 100644
--- a/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_rubric.py
+++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_rubric.py
@@ -133,7 +133,6 @@ class CombinedOpenEndedRubric(object):
categories.append(self.extract_category(category))
return categories
-
def extract_category(self, category):
'''
construct an individual category
@@ -235,7 +234,6 @@ class CombinedOpenEndedRubric(object):
})
return html
-
@staticmethod
def validate_options(options):
'''
@@ -328,9 +326,3 @@ class CombinedOpenEndedRubric(object):
tuples.append([0, category, score, [], []])
tup_ind = len(tuples) - 1
return tuples, tup_ind
-
-
-
-
-
-
diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/open_ended_module.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/open_ended_module.py
index b16e7f5313..fc53a62c06 100644
--- a/common/lib/xmodule/xmodule/open_ended_grading_classes/open_ended_module.py
+++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/open_ended_module.py
@@ -77,7 +77,6 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
self.send_to_grader(self.latest_answer(), system)
self.created = False
-
def _parse(self, oeparam, prompt, rubric, system):
'''
Parse OpenEndedResponse XML:
@@ -270,7 +269,6 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
return True
-
def get_answers(self):
"""
Gets and shows the answer for this problem.
@@ -415,7 +413,6 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
return feedback_template, rubric_scores
-
def _parse_score_msg(self, score_msg, system, join_feedback=True):
"""
Grader reply is a JSON-dump of the following dict
diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/openendedchild.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/openendedchild.py
index 91dec5943d..922a4f9b77 100644
--- a/common/lib/xmodule/xmodule/open_ended_grading_classes/openendedchild.py
+++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/openendedchild.py
@@ -490,6 +490,3 @@ class OpenEndedChild(object):
eta_string = ""
return eta_string
-
-
-
diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/self_assessment_module.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/self_assessment_module.py
index a25d81eeae..8911e2890f 100644
--- a/common/lib/xmodule/xmodule/open_ended_grading_classes/self_assessment_module.py
+++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/self_assessment_module.py
@@ -73,7 +73,6 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
html = system.render_template('{0}/self_assessment_prompt.html'.format(self.TEMPLATE_DIR), context)
return html
-
def handle_ajax(self, dispatch, get, system):
"""
This is called by courseware.module_render, to handle an AJAX call.
@@ -159,7 +158,6 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
return system.render_template('{0}/self_assessment_hint.html'.format(self.TEMPLATE_DIR), context)
-
def save_answer(self, get, system):
"""
After the answer is submitted, show the rubric.
From 70af75e8525f6d0e9cdfe1ec39396c0852528d86 Mon Sep 17 00:00:00 2001
From: Ned Batchelder
Date: Tue, 12 Mar 2013 14:34:39 -0400
Subject: [PATCH 22/22] Don't run pylint on migrations, and it's ok to have a
global name 'log'
---
.pylintrc | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/.pylintrc b/.pylintrc
index ce2f2e3b87..4357885133 100644
--- a/.pylintrc
+++ b/.pylintrc
@@ -12,7 +12,7 @@ profile=no
# Add files or directories to the blacklist. They should be base names, not
# paths.
-ignore=CVS
+ignore=CVS, migrations
# Pickle collected data for later comparisons.
persistent=yes
@@ -43,7 +43,7 @@ disable=E1102,W0142
output-format=text
# Include message's id in output
-include-ids=no
+include-ids=yes
# Put messages in a separate file for each module / package specified on the
# command line instead of printing them on stdout. Reports (if any) will be
@@ -97,7 +97,7 @@ bad-functions=map,filter,apply,input
module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
# Regular expression which should only match correct module level names
-const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$
+const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__)|log)$
# Regular expression which should only match correct class names
class-rgx=[A-Z_][a-zA-Z0-9]+$