diff --git a/.pylintrc b/.pylintrc
index ce2f2e3b87..d1cdbb4780 100644
--- a/.pylintrc
+++ b/.pylintrc
@@ -12,7 +12,7 @@ profile=no
# Add files or directories to the blacklist. They should be base names, not
# paths.
-ignore=CVS
+ignore=CVS, migrations
# Pickle collected data for later comparisons.
persistent=yes
@@ -33,7 +33,11 @@ load-plugins=
# can either give multiple identifier separated by comma (,) or put this option
# multiple time (only on the command line, not in the configuration file where
# it should appear only once).
-disable=E1102,W0142
+disable=
+# W0141: Used builtin function 'map'
+# W0142: Used * or ** magic
+# R0903: Too few public methods (1/2)
+ W0141,W0142,R0903
[REPORTS]
@@ -43,7 +47,7 @@ disable=E1102,W0142
output-format=text
# Include message's id in output
-include-ids=no
+include-ids=yes
# Put messages in a separate file for each module / package specified on the
# command line instead of printing them on stdout. Reports (if any) will be
@@ -97,7 +101,7 @@ bad-functions=map,filter,apply,input
module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
# Regular expression which should only match correct module level names
-const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$
+const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__)|log|urlpatterns)$
# Regular expression which should only match correct class names
class-rgx=[A-Z_][a-zA-Z0-9]+$
@@ -106,7 +110,7 @@ class-rgx=[A-Z_][a-zA-Z0-9]+$
function-rgx=[a-z_][a-z0-9_]{2,30}$
# Regular expression which should only match correct method names
-method-rgx=[a-z_][a-z0-9_]{2,30}$
+method-rgx=([a-z_][a-z0-9_]{2,60}|setUp|set[Uu]pClass|tearDown|tear[Dd]ownClass|assert[A-Z]\w*)$
# Regular expression which should only match correct instance attribute names
attr-rgx=[a-z_][a-z0-9_]{2,30}$
diff --git a/cms/djangoapps/contentstore/tests/test_contentstore.py b/cms/djangoapps/contentstore/tests/test_contentstore.py
index 0893a4dd12..983b7babd8 100644
--- a/cms/djangoapps/contentstore/tests/test_contentstore.py
+++ b/cms/djangoapps/contentstore/tests/test_contentstore.py
@@ -114,7 +114,7 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase):
self.assertTrue(sequential.location.url() in chapter.children)
self.client.post(reverse('delete_item'),
- json.dumps({'id': sequential.location.url(), 'delete_children': 'true'}),
+ json.dumps({'id': sequential.location.url(), 'delete_children': 'true', 'delete_all_versions': 'true'}),
"application/json")
found = False
diff --git a/cms/djangoapps/contentstore/views.py b/cms/djangoapps/contentstore/views.py
index 2d9a1eaf61..0350c9ed1a 100644
--- a/cms/djangoapps/contentstore/views.py
+++ b/cms/djangoapps/contentstore/views.py
@@ -643,17 +643,17 @@ def delete_item(request):
modulestore('direct').delete_item(item.location)
# cdodge: we need to remove our parent's pointer to us so that it is no longer dangling
+ if delete_all_versions:
+ parent_locs = modulestore('direct').get_parent_locations(item_loc, None)
- parent_locs = modulestore('direct').get_parent_locations(item_loc, None)
-
- for parent_loc in parent_locs:
- parent = modulestore('direct').get_item(parent_loc)
- item_url = item_loc.url()
- if item_url in parent.children:
- children = parent.children
- children.remove(item_url)
- parent.children = children
- modulestore('direct').update_children(parent.location, parent.children)
+ for parent_loc in parent_locs:
+ parent = modulestore('direct').get_item(parent_loc)
+ item_url = item_loc.url()
+ if item_url in parent.children:
+ children = parent.children
+ children.remove(item_url)
+ parent.children = children
+ modulestore('direct').update_children(parent.location, parent.children)
return HttpResponse()
diff --git a/common/djangoapps/course_groups/cohorts.py b/common/djangoapps/course_groups/cohorts.py
index f0234ec71a..c362ed4e89 100644
--- a/common/djangoapps/course_groups/cohorts.py
+++ b/common/djangoapps/course_groups/cohorts.py
@@ -65,23 +65,23 @@ def is_commentable_cohorted(course_id, commentable_id):
ans))
return ans
-
+
def get_cohorted_commentables(course_id):
"""
Given a course_id return a list of strings representing cohorted commentables
"""
course = courses.get_course_by_id(course_id)
-
+
if not course.is_cohorted:
# this is the easy case :)
ans = []
- else:
+ else:
ans = course.cohorted_discussions
return ans
-
-
+
+
def get_cohort(user, course_id):
"""
Given a django User and a course_id, return the user's cohort in that
@@ -120,7 +120,8 @@ def get_cohort(user, course_id):
return None
choices = course.auto_cohort_groups
- if len(choices) == 0:
+ n = len(choices)
+ if n == 0:
# Nowhere to put user
log.warning("Course %s is auto-cohorted, but there are no"
" auto_cohort_groups specified",
@@ -128,12 +129,19 @@ def get_cohort(user, course_id):
return None
# Put user in a random group, creating it if needed
- group_name = random.choice(choices)
+ choice = random.randrange(0, n)
+ group_name = choices[choice]
+
+ # Victor: we are seeing very strange behavior on prod, where almost all users
+ # end up in the same group. Log at INFO to try to figure out what's going on.
+ log.info("DEBUG: adding user {0} to cohort {1}. choice={2}".format(
+ user, group_name,choice))
+
group, created = CourseUserGroup.objects.get_or_create(
course_id=course_id,
group_type=CourseUserGroup.COHORT,
name=group_name)
-
+
user.course_groups.add(group)
return group
diff --git a/common/djangoapps/course_groups/tests/tests.py b/common/djangoapps/course_groups/tests/tests.py
index 0c1a3de61c..94d52ff6df 100644
--- a/common/djangoapps/course_groups/tests/tests.py
+++ b/common/djangoapps/course_groups/tests/tests.py
@@ -6,7 +6,7 @@ from django.test.utils import override_settings
from course_groups.models import CourseUserGroup
from course_groups.cohorts import (get_cohort, get_course_cohorts,
- is_commentable_cohorted)
+ is_commentable_cohorted, get_cohort_by_name)
from xmodule.modulestore.django import modulestore, _MODULESTORES
@@ -180,6 +180,37 @@ class TestCohorts(django.test.TestCase):
"user2 should still be in originally placed cohort")
+ def test_auto_cohorting_randomization(self):
+ """
+ Make sure get_cohort() randomizes properly.
+ """
+ course = modulestore().get_course("edX/toy/2012_Fall")
+ self.assertEqual(course.id, "edX/toy/2012_Fall")
+ self.assertFalse(course.is_cohorted)
+
+ groups = ["group_{0}".format(n) for n in range(5)]
+ self.config_course_cohorts(course, [], cohorted=True,
+ auto_cohort=True,
+ auto_cohort_groups=groups)
+
+ # Assign 100 users to cohorts
+ for i in range(100):
+ user = User.objects.create(username="test_{0}".format(i),
+ email="a@b{0}.com".format(i))
+ get_cohort(user, course.id)
+
+ # Now make sure that the assignment was at least vaguely random:
+ # each cohort should have at least 1, and fewer than 50 students.
+ # (with 5 groups, probability of 0 users in any group is about
+ # .8**100= 2.0e-10)
+ for cohort_name in groups:
+ cohort = get_cohort_by_name(course.id, cohort_name)
+ num_users = cohort.users.count()
+ self.assertGreater(num_users, 1)
+ self.assertLess(num_users, 50)
+
+
+
def test_get_course_cohorts(self):
course1_id = 'a/b/c'
course2_id = 'e/f/g'
diff --git a/common/lib/xmodule/xmodule/combined_open_ended_module.py b/common/lib/xmodule/xmodule/combined_open_ended_module.py
index fd527f88d7..f05f419a03 100644
--- a/common/lib/xmodule/xmodule/combined_open_ended_module.py
+++ b/common/lib/xmodule/xmodule/combined_open_ended_module.py
@@ -89,7 +89,6 @@ class CombinedOpenEndedModule(CombinedOpenEndedFields, XModule):
def __init__(self, system, location, descriptor, model_data):
XModule.__init__(self, system, location, descriptor, model_data)
-
"""
Definition file should have one or many task blocks, a rubric block, and a prompt block:
@@ -152,13 +151,13 @@ class CombinedOpenEndedModule(CombinedOpenEndedFields, XModule):
attributes = self.student_attributes + self.settings_attributes
static_data = {
- 'rewrite_content_links' : self.rewrite_content_links,
+ 'rewrite_content_links': self.rewrite_content_links,
}
- instance_state = { k: getattr(self,k) for k in attributes}
+ instance_state = {k: getattr(self, k) for k in attributes}
self.child_descriptor = descriptors[version_index](self.system)
self.child_definition = descriptors[version_index].definition_from_xml(etree.fromstring(self.data), self.system)
self.child_module = modules[version_index](self.system, location, self.child_definition, self.child_descriptor,
- instance_state = instance_state, static_data= static_data, attributes=attributes)
+ instance_state=instance_state, static_data=static_data, attributes=attributes)
self.save_instance_data()
def get_html(self):
@@ -190,9 +189,9 @@ class CombinedOpenEndedModule(CombinedOpenEndedFields, XModule):
def save_instance_data(self):
for attribute in self.student_attributes:
- child_attr = getattr(self.child_module,attribute)
+ child_attr = getattr(self.child_module, attribute)
if child_attr != getattr(self, attribute):
- setattr(self,attribute, getattr(self.child_module,attribute))
+ setattr(self, attribute, getattr(self.child_module, attribute))
class CombinedOpenEndedDescriptor(CombinedOpenEndedFields, RawDescriptor):
diff --git a/common/lib/xmodule/xmodule/foldit_module.py b/common/lib/xmodule/xmodule/foldit_module.py
index d8111eecac..884f9e2df2 100644
--- a/common/lib/xmodule/xmodule/foldit_module.py
+++ b/common/lib/xmodule/xmodule/foldit_module.py
@@ -89,7 +89,7 @@ class FolditModule(FolditFields, XModule):
from foldit.models import Score
leaders = [(e['username'], e['score']) for e in Score.get_tops_n(10)]
- leaders.sort(key=lambda x: x[1])
+ leaders.sort(key=lambda x: -x[1])
return leaders
diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py
index 2a106ba63b..c75bbae962 100644
--- a/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py
+++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py
@@ -38,14 +38,15 @@ ACCEPT_FILE_UPLOAD = False
TRUE_DICT = ["True", True, "TRUE", "true"]
HUMAN_TASK_TYPE = {
- 'selfassessment' : "Self Assessment",
- 'openended' : "edX Assessment",
- }
+ 'selfassessment': "Self Assessment",
+ 'openended': "edX Assessment",
+}
#Default value that controls whether or not to skip basic spelling checks in the controller
#Metadata overrides this
SKIP_BASIC_CHECKS = False
+
class CombinedOpenEndedV1Module():
"""
This is a module that encapsulates all open ended grading (self assessment, peer assessment, etc).
@@ -81,7 +82,7 @@ class CombinedOpenEndedV1Module():
TEMPLATE_DIR = "combinedopenended"
def __init__(self, system, location, definition, descriptor,
- instance_state=None, shared_state=None, metadata = None, static_data = None, **kwargs):
+ instance_state=None, shared_state=None, metadata=None, static_data=None, **kwargs):
"""
Definition file should have one or many task blocks, a rubric block, and a prompt block:
@@ -120,7 +121,7 @@ class CombinedOpenEndedV1Module():
self.instance_state = instance_state
self.display_name = instance_state.get('display_name', "Open Ended")
- self.rewrite_content_links = static_data.get('rewrite_content_links',"")
+ self.rewrite_content_links = static_data.get('rewrite_content_links', "")
#We need to set the location here so the child modules can use it
system.set('location', location)
@@ -168,10 +169,10 @@ class CombinedOpenEndedV1Module():
'rubric': definition['rubric'],
'display_name': self.display_name,
'accept_file_upload': self.accept_file_upload,
- 'close_date' : self.timeinfo.close_date,
- 's3_interface' : self.system.s3_interface,
- 'skip_basic_checks' : self.skip_basic_checks,
- }
+ 'close_date': self.timeinfo.close_date,
+ 's3_interface': self.system.s3_interface,
+ 'skip_basic_checks': self.skip_basic_checks,
+ }
self.task_xml = definition['task_xml']
self.location = location
@@ -214,15 +215,15 @@ class CombinedOpenEndedV1Module():
child_modules = {
'openended': open_ended_module.OpenEndedModule,
'selfassessment': self_assessment_module.SelfAssessmentModule,
- }
+ }
child_descriptors = {
'openended': open_ended_module.OpenEndedDescriptor,
'selfassessment': self_assessment_module.SelfAssessmentDescriptor,
- }
+ }
children = {
'modules': child_modules,
'descriptors': child_descriptors,
- }
+ }
return children
def setup_next_task(self, reset=False):
@@ -258,7 +259,8 @@ class CombinedOpenEndedV1Module():
self.current_task_parsed_xml = self.current_task_descriptor.definition_from_xml(etree_xml, self.system)
if current_task_state is None and self.current_task_number == 0:
self.current_task = child_task_module(self.system, self.location,
- self.current_task_parsed_xml, self.current_task_descriptor, self.static_data)
+ self.current_task_parsed_xml, self.current_task_descriptor,
+ self.static_data)
self.task_states.append(self.current_task.get_instance_state())
self.state = self.ASSESSING
elif current_task_state is None and self.current_task_number > 0:
@@ -271,18 +273,20 @@ class CombinedOpenEndedV1Module():
'child_attempts': 0,
'child_created': True,
'child_history': [{'answer': last_response}],
- })
+ })
self.current_task = child_task_module(self.system, self.location,
- self.current_task_parsed_xml, self.current_task_descriptor, self.static_data,
- instance_state=current_task_state)
+ self.current_task_parsed_xml, self.current_task_descriptor,
+ self.static_data,
+ instance_state=current_task_state)
self.task_states.append(self.current_task.get_instance_state())
self.state = self.ASSESSING
else:
if self.current_task_number > 0 and not reset:
current_task_state = self.overwrite_state(current_task_state)
self.current_task = child_task_module(self.system, self.location,
- self.current_task_parsed_xml, self.current_task_descriptor, self.static_data,
- instance_state=current_task_state)
+ self.current_task_parsed_xml, self.current_task_descriptor,
+ self.static_data,
+ instance_state=current_task_state)
return True
@@ -298,8 +302,8 @@ class CombinedOpenEndedV1Module():
last_response_data = self.get_last_response(self.current_task_number - 1)
current_response_data = self.get_current_attributes(self.current_task_number)
- if(current_response_data['min_score_to_attempt'] > last_response_data['score']
- or current_response_data['max_score_to_attempt'] < last_response_data['score']):
+ if (current_response_data['min_score_to_attempt'] > last_response_data['score']
+ or current_response_data['max_score_to_attempt'] < last_response_data['score']):
self.state = self.DONE
self.ready_to_reset = True
@@ -325,8 +329,8 @@ class CombinedOpenEndedV1Module():
'display_name': self.display_name,
'accept_file_upload': self.accept_file_upload,
'location': self.location,
- 'legend_list' : LEGEND_LIST,
- }
+ 'legend_list': LEGEND_LIST,
+ }
return context
@@ -395,7 +399,7 @@ class CombinedOpenEndedV1Module():
task_parsed_xml = task_descriptor.definition_from_xml(etree_xml, self.system)
task = children['modules'][task_type](self.system, self.location, task_parsed_xml, task_descriptor,
- self.static_data, instance_state=task_state)
+ self.static_data, instance_state=task_state)
last_response = task.latest_answer()
last_score = task.latest_score()
last_post_assessment = task.latest_post_assessment(self.system)
@@ -417,10 +421,10 @@ class CombinedOpenEndedV1Module():
rubric_scores = rubric_data['rubric_scores']
grader_types = rubric_data['grader_types']
feedback_items = rubric_data['feedback_items']
- feedback_dicts = rubric_data['feedback_dicts']
+ feedback_dicts = rubric_data['feedback_dicts']
grader_ids = rubric_data['grader_ids']
- submission_ids = rubric_data['submission_ids']
- elif task_type== "selfassessment":
+ submission_ids = rubric_data['submission_ids']
+ elif task_type == "selfassessment":
rubric_scores = last_post_assessment
grader_types = ['SA']
feedback_items = ['']
@@ -437,7 +441,7 @@ class CombinedOpenEndedV1Module():
human_state = task.HUMAN_NAMES[state]
else:
human_state = state
- if len(grader_types)>0:
+ if len(grader_types) > 0:
grader_type = grader_types[0]
else:
grader_type = "IN"
@@ -459,15 +463,15 @@ class CombinedOpenEndedV1Module():
'correct': last_correctness,
'min_score_to_attempt': min_score_to_attempt,
'max_score_to_attempt': max_score_to_attempt,
- 'rubric_scores' : rubric_scores,
- 'grader_types' : grader_types,
- 'feedback_items' : feedback_items,
- 'grader_type' : grader_type,
- 'human_grader_type' : human_grader_name,
- 'feedback_dicts' : feedback_dicts,
- 'grader_ids' : grader_ids,
- 'submission_ids' : submission_ids,
- }
+ 'rubric_scores': rubric_scores,
+ 'grader_types': grader_types,
+ 'feedback_items': feedback_items,
+ 'grader_type': grader_type,
+ 'human_grader_type': human_grader_name,
+ 'feedback_dicts': feedback_dicts,
+ 'grader_ids': grader_ids,
+ 'submission_ids': submission_ids,
+ }
return last_response_dict
def update_task_states(self):
@@ -510,20 +514,27 @@ class CombinedOpenEndedV1Module():
Output: Dictionary to be rendered via ajax that contains the result html.
"""
all_responses = []
- loop_up_to_task = self.current_task_number+1
- for i in xrange(0,loop_up_to_task):
+ loop_up_to_task = self.current_task_number + 1
+ for i in xrange(0, loop_up_to_task):
all_responses.append(self.get_last_response(i))
- rubric_scores = [all_responses[i]['rubric_scores'] for i in xrange(0,len(all_responses)) if len(all_responses[i]['rubric_scores'])>0 and all_responses[i]['grader_types'][0] in HUMAN_GRADER_TYPE.keys()]
- grader_types = [all_responses[i]['grader_types'] for i in xrange(0,len(all_responses)) if len(all_responses[i]['grader_types'])>0 and all_responses[i]['grader_types'][0] in HUMAN_GRADER_TYPE.keys()]
- feedback_items = [all_responses[i]['feedback_items'] for i in xrange(0,len(all_responses)) if len(all_responses[i]['feedback_items'])>0 and all_responses[i]['grader_types'][0] in HUMAN_GRADER_TYPE.keys()]
- rubric_html = self.rubric_renderer.render_combined_rubric(stringify_children(self.static_data['rubric']), rubric_scores,
- grader_types, feedback_items)
+ rubric_scores = [all_responses[i]['rubric_scores'] for i in xrange(0, len(all_responses)) if
+ len(all_responses[i]['rubric_scores']) > 0 and all_responses[i]['grader_types'][
+ 0] in HUMAN_GRADER_TYPE.keys()]
+ grader_types = [all_responses[i]['grader_types'] for i in xrange(0, len(all_responses)) if
+ len(all_responses[i]['grader_types']) > 0 and all_responses[i]['grader_types'][
+ 0] in HUMAN_GRADER_TYPE.keys()]
+ feedback_items = [all_responses[i]['feedback_items'] for i in xrange(0, len(all_responses)) if
+ len(all_responses[i]['feedback_items']) > 0 and all_responses[i]['grader_types'][
+ 0] in HUMAN_GRADER_TYPE.keys()]
+ rubric_html = self.rubric_renderer.render_combined_rubric(stringify_children(self.static_data['rubric']),
+ rubric_scores,
+ grader_types, feedback_items)
response_dict = all_responses[-1]
context = {
'results': rubric_html,
- 'task_name' : 'Scored Rubric',
- 'class_name' : 'combined-rubric-container'
+ 'task_name': 'Scored Rubric',
+ 'class_name': 'combined-rubric-container'
}
html = self.system.render_template('{0}/combined_open_ended_results.html'.format(self.TEMPLATE_DIR), context)
return {'html': html, 'success': True}
@@ -535,8 +546,8 @@ class CombinedOpenEndedV1Module():
Output: Dictionary to be rendered via ajax that contains the result html.
"""
context = {
- 'legend_list' : LEGEND_LIST,
- }
+ 'legend_list': LEGEND_LIST,
+ }
html = self.system.render_template('{0}/combined_open_ended_legend.html'.format(self.TEMPLATE_DIR), context)
return {'html': html, 'success': True}
@@ -547,15 +558,16 @@ class CombinedOpenEndedV1Module():
Output: Dictionary to be rendered via ajax that contains the result html.
"""
self.update_task_states()
- loop_up_to_task = self.current_task_number+1
- all_responses =[]
- for i in xrange(0,loop_up_to_task):
+ loop_up_to_task = self.current_task_number + 1
+ all_responses = []
+ for i in xrange(0, loop_up_to_task):
all_responses.append(self.get_last_response(i))
context_list = []
for ri in all_responses:
- for i in xrange(0,len(ri['rubric_scores'])):
- feedback = ri['feedback_dicts'][i].get('feedback','')
- rubric_data = self.rubric_renderer.render_rubric(stringify_children(self.static_data['rubric']), ri['rubric_scores'][i])
+ for i in xrange(0, len(ri['rubric_scores'])):
+ feedback = ri['feedback_dicts'][i].get('feedback', '')
+ rubric_data = self.rubric_renderer.render_rubric(stringify_children(self.static_data['rubric']),
+ ri['rubric_scores'][i])
if rubric_data['success']:
rubric_html = rubric_data['html']
else:
@@ -563,23 +575,23 @@ class CombinedOpenEndedV1Module():
context = {
'rubric_html': rubric_html,
'grader_type': ri['grader_type'],
- 'feedback' : feedback,
- 'grader_id' : ri['grader_ids'][i],
- 'submission_id' : ri['submission_ids'][i],
+ 'feedback': feedback,
+ 'grader_id': ri['grader_ids'][i],
+ 'submission_id': ri['submission_ids'][i],
}
context_list.append(context)
feedback_table = self.system.render_template('{0}/open_ended_result_table.html'.format(self.TEMPLATE_DIR), {
- 'context_list' : context_list,
- 'grader_type_image_dict' : GRADER_TYPE_IMAGE_DICT,
- 'human_grader_types' : HUMAN_GRADER_TYPE,
+ 'context_list': context_list,
+ 'grader_type_image_dict': GRADER_TYPE_IMAGE_DICT,
+ 'human_grader_types': HUMAN_GRADER_TYPE,
'rows': 50,
'cols': 50,
})
context = {
'results': feedback_table,
- 'task_name' : "Feedback",
- 'class_name' : "result-container",
- }
+ 'task_name': "Feedback",
+ 'class_name': "result-container",
+ }
html = self.system.render_template('{0}/combined_open_ended_results.html'.format(self.TEMPLATE_DIR), context)
return {'html': html, 'success': True}
@@ -608,8 +620,8 @@ class CombinedOpenEndedV1Module():
'reset': self.reset,
'get_results': self.get_results,
'get_combined_rubric': self.get_rubric,
- 'get_status' : self.get_status_ajax,
- 'get_legend' : self.get_legend,
+ 'get_status': self.get_status_ajax,
+ 'get_legend': self.get_legend,
}
if dispatch not in handlers:
@@ -672,7 +684,7 @@ class CombinedOpenEndedV1Module():
'task_states': self.task_states,
'student_attempts': self.student_attempts,
'ready_to_reset': self.ready_to_reset,
- }
+ }
return json.dumps(state)
@@ -690,11 +702,12 @@ class CombinedOpenEndedV1Module():
context = {
'status_list': status,
- 'grader_type_image_dict' : GRADER_TYPE_IMAGE_DICT,
- 'legend_list' : LEGEND_LIST,
- 'render_via_ajax' : render_via_ajax,
+ 'grader_type_image_dict': GRADER_TYPE_IMAGE_DICT,
+ 'legend_list': LEGEND_LIST,
+ 'render_via_ajax': render_via_ajax,
}
- status_html = self.system.render_template("{0}/combined_open_ended_status.html".format(self.TEMPLATE_DIR), context)
+ status_html = self.system.render_template("{0}/combined_open_ended_status.html".format(self.TEMPLATE_DIR),
+ context)
return status_html
@@ -727,7 +740,7 @@ class CombinedOpenEndedV1Module():
score_dict = {
'score': score,
'total': max_score,
- }
+ }
return score_dict
@@ -787,7 +800,9 @@ class CombinedOpenEndedV1Descriptor():
for child in expected_children:
if len(xml_object.xpath(child)) == 0:
#This is a staff_facing_error
- raise ValueError("Combined Open Ended definition must include at least one '{0}' tag. Contact the learning sciences group for assistance.".format(child))
+ raise ValueError(
+ "Combined Open Ended definition must include at least one '{0}' tag. Contact the learning sciences group for assistance.".format(
+ child))
def parse_task(k):
"""Assumes that xml_object has child k"""
diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_rubric.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_rubric.py
index 94c3318dd5..bceb12e444 100644
--- a/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_rubric.py
+++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_rubric.py
@@ -4,24 +4,26 @@ from lxml import etree
log = logging.getLogger(__name__)
GRADER_TYPE_IMAGE_DICT = {
- 'SA' : '/static/images/self_assessment_icon.png',
- 'PE' : '/static/images/peer_grading_icon.png',
- 'ML' : '/static/images/ml_grading_icon.png',
- 'IN' : '/static/images/peer_grading_icon.png',
- 'BC' : '/static/images/ml_grading_icon.png',
- }
+ 'SA': '/static/images/self_assessment_icon.png',
+ 'PE': '/static/images/peer_grading_icon.png',
+ 'ML': '/static/images/ml_grading_icon.png',
+ 'IN': '/static/images/peer_grading_icon.png',
+ 'BC': '/static/images/ml_grading_icon.png',
+}
HUMAN_GRADER_TYPE = {
- 'SA' : 'Self-Assessment',
- 'PE' : 'Peer-Assessment',
- 'IN' : 'Instructor-Assessment',
- 'ML' : 'AI-Assessment',
- 'BC' : 'AI-Assessment',
- }
+ 'SA': 'Self-Assessment',
+ 'PE': 'Peer-Assessment',
+ 'IN': 'Instructor-Assessment',
+ 'ML': 'AI-Assessment',
+ 'BC': 'AI-Assessment',
+}
DO_NOT_DISPLAY = ['BC', 'IN']
-LEGEND_LIST = [{'name' : HUMAN_GRADER_TYPE[k], 'image' : GRADER_TYPE_IMAGE_DICT[k]} for k in GRADER_TYPE_IMAGE_DICT.keys() if k not in DO_NOT_DISPLAY ]
+LEGEND_LIST = [{'name': HUMAN_GRADER_TYPE[k], 'image': GRADER_TYPE_IMAGE_DICT[k]} for k in GRADER_TYPE_IMAGE_DICT.keys()
+ if k not in DO_NOT_DISPLAY]
+
class RubricParsingError(Exception):
def __init__(self, msg):
@@ -29,15 +31,14 @@ class RubricParsingError(Exception):
class CombinedOpenEndedRubric(object):
-
TEMPLATE_DIR = "combinedopenended/openended"
- def __init__ (self, system, view_only = False):
+ def __init__(self, system, view_only=False):
self.has_score = False
self.view_only = view_only
self.system = system
- def render_rubric(self, rubric_xml, score_list = None):
+ def render_rubric(self, rubric_xml, score_list=None):
'''
render_rubric: takes in an xml string and outputs the corresponding
html for that xml, given the type of rubric we're generating
@@ -50,11 +51,11 @@ class CombinedOpenEndedRubric(object):
success = False
try:
rubric_categories = self.extract_categories(rubric_xml)
- if score_list and len(score_list)==len(rubric_categories):
- for i in xrange(0,len(rubric_categories)):
+ if score_list and len(score_list) == len(rubric_categories):
+ for i in xrange(0, len(rubric_categories)):
category = rubric_categories[i]
- for j in xrange(0,len(category['options'])):
- if score_list[i]==j:
+ for j in xrange(0, len(category['options'])):
+ if score_list[i] == j:
rubric_categories[i]['options'][j]['selected'] = True
rubric_scores = [cat['score'] for cat in rubric_categories]
max_scores = map((lambda cat: cat['options'][-1]['points']), rubric_categories)
@@ -63,19 +64,20 @@ class CombinedOpenEndedRubric(object):
if self.view_only:
rubric_template = '{0}/open_ended_view_only_rubric.html'.format(self.TEMPLATE_DIR)
html = self.system.render_template(rubric_template,
- {'categories': rubric_categories,
- 'has_score': self.has_score,
- 'view_only': self.view_only,
- 'max_score': max_score,
- 'combined_rubric' : False
- })
+ {'categories': rubric_categories,
+ 'has_score': self.has_score,
+ 'view_only': self.view_only,
+ 'max_score': max_score,
+ 'combined_rubric': False
+ })
success = True
except:
#This is a staff_facing_error
- error_message = "[render_rubric] Could not parse the rubric with xml: {0}. Contact the learning sciences group for assistance.".format(rubric_xml)
+ error_message = "[render_rubric] Could not parse the rubric with xml: {0}. Contact the learning sciences group for assistance.".format(
+ rubric_xml)
log.exception(error_message)
raise RubricParsingError(error_message)
- return {'success' : success, 'html' : html, 'rubric_scores' : rubric_scores}
+ return {'success': success, 'html': html, 'rubric_scores': rubric_scores}
def check_if_rubric_is_parseable(self, rubric_string, location, max_score_allowed, max_score):
rubric_dict = self.render_rubric(rubric_string)
@@ -83,7 +85,8 @@ class CombinedOpenEndedRubric(object):
rubric_feedback = rubric_dict['html']
if not success:
#This is a staff_facing_error
- error_message = "Could not parse rubric : {0} for location {1}. Contact the learning sciences group for assistance.".format(rubric_string, location.url())
+ error_message = "Could not parse rubric : {0} for location {1}. Contact the learning sciences group for assistance.".format(
+ rubric_string, location.url())
log.error(error_message)
raise RubricParsingError(error_message)
@@ -101,7 +104,7 @@ class CombinedOpenEndedRubric(object):
if int(total) != int(max_score):
#This is a staff_facing_error
error_msg = "The max score {0} for problem {1} does not match the total number of points in the rubric {2}. Contact the learning sciences group for assistance.".format(
- max_score, location, total)
+ max_score, location, total)
log.error(error_msg)
raise RubricParsingError(error_msg)
@@ -123,12 +126,13 @@ class CombinedOpenEndedRubric(object):
for category in element:
if category.tag != 'category':
#This is a staff_facing_error
- raise RubricParsingError("[extract_categories] Expected a
', '', clean_html))
except:
@@ -272,7 +275,7 @@ class OpenEndedChild(object):
"""
#This is a dev_facing_error
log.warning("Open ended child state out sync. state: %r, get: %r. %s",
- self.child_state, get, msg)
+ self.child_state, get, msg)
#This is a student_facing_error
return {'success': False,
'error': 'The problem state got out-of-sync. Please try reloading the page.'}
@@ -298,7 +301,7 @@ class OpenEndedChild(object):
@return: Boolean correct.
"""
correct = False
- if(isinstance(score, (int, long, float, complex))):
+ if (isinstance(score, (int, long, float, complex))):
score_ratio = int(score) / float(self.max_score())
correct = (score_ratio >= 0.66)
return correct
@@ -332,7 +335,8 @@ class OpenEndedChild(object):
try:
image_data.seek(0)
- success, s3_public_url = open_ended_image_submission.upload_to_s3(image_data, image_key, self.s3_interface)
+ success, s3_public_url = open_ended_image_submission.upload_to_s3(image_data, image_key,
+ self.s3_interface)
except:
log.exception("Could not upload image to S3.")
@@ -394,9 +398,9 @@ class OpenEndedChild(object):
#In this case, an image was submitted by the student, but the image could not be uploaded to S3. Likely
#a config issue (development vs deployment). For now, just treat this as a "success"
log.exception("Student AJAX post to combined open ended xmodule indicated that it contained an image, "
- "but the image was not able to be uploaded to S3. This could indicate a config"
- "issue with this deployment, but it could also indicate a problem with S3 or with the"
- "student image itself.")
+ "but the image was not able to be uploaded to S3. This could indicate a config"
+ "issue with this deployment, but it could also indicate a problem with S3 or with the"
+ "student image itself.")
overall_success = True
elif not has_file_to_upload:
#If there is no file to upload, probably the student has embedded the link in the answer text
@@ -435,7 +439,7 @@ class OpenEndedChild(object):
response = {}
#This is a student_facing_error
error_string = ("You need to peer grade {0} more in order to make another submission. "
- "You have graded {1}, and {2} are required. You have made {3} successful peer grading submissions.")
+ "You have graded {1}, and {2} are required. You have made {3} successful peer grading submissions.")
try:
response = self.peer_gs.get_data_for_location(self.location_string, student_id)
count_graded = response['count_graded']
@@ -444,16 +448,18 @@ class OpenEndedChild(object):
success = True
except:
#This is a dev_facing_error
- log.error("Could not contact external open ended graders for location {0} and student {1}".format(self.location_string,student_id))
+ log.error("Could not contact external open ended graders for location {0} and student {1}".format(
+ self.location_string, student_id))
#This is a student_facing_error
error_message = "Could not contact the graders. Please notify course staff."
return success, allowed_to_submit, error_message
- if count_graded>=count_required:
+ if count_graded >= count_required:
return success, allowed_to_submit, ""
else:
allowed_to_submit = False
#This is a student_facing_error
- error_message = error_string.format(count_required-count_graded, count_graded, count_required, student_sub_count)
+ error_message = error_string.format(count_required - count_graded, count_graded, count_required,
+ student_sub_count)
return success, allowed_to_submit, error_message
def get_eta(self):
@@ -468,7 +474,7 @@ class OpenEndedChild(object):
success = response['success']
if isinstance(success, basestring):
- success = (success.lower()=="true")
+ success = (success.lower() == "true")
if success:
eta = controller_query_service.convert_seconds_to_human_readable(response['eta'])
@@ -477,6 +483,3 @@ class OpenEndedChild(object):
eta_string = ""
return eta_string
-
-
-
diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/peer_grading_service.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/peer_grading_service.py
index 42c54f0463..5daf1b83b5 100644
--- a/common/lib/xmodule/xmodule/open_ended_grading_classes/peer_grading_service.py
+++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/peer_grading_service.py
@@ -14,6 +14,7 @@ class PeerGradingService(GradingService):
"""
Interface with the grading controller for peer grading
"""
+
def __init__(self, config, system):
config['system'] = system
super(PeerGradingService, self).__init__(config)
@@ -36,10 +37,11 @@ class PeerGradingService(GradingService):
def get_next_submission(self, problem_location, grader_id):
response = self.get(self.get_next_submission_url,
- {'location': problem_location, 'grader_id': grader_id})
+ {'location': problem_location, 'grader_id': grader_id})
return self.try_to_decode(self._render_rubric(response))
- def save_grade(self, location, grader_id, submission_id, score, feedback, submission_key, rubric_scores, submission_flagged):
+ def save_grade(self, location, grader_id, submission_id, score, feedback, submission_key, rubric_scores,
+ submission_flagged):
data = {'grader_id': grader_id,
'submission_id': submission_id,
'score': score,
@@ -89,6 +91,7 @@ class PeerGradingService(GradingService):
pass
return text
+
"""
This is a mock peer grading service that can be used for unit tests
without making actual service calls to the grading controller
@@ -122,7 +125,7 @@ class MockPeerGradingService(object):
'max_score': 4})
def save_calibration_essay(self, problem_location, grader_id,
- calibration_essay_id, submission_key, score,
+ calibration_essay_id, submission_key, score,
feedback, rubric_scores):
return {'success': True, 'actual_score': 2}
diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/self_assessment_module.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/self_assessment_module.py
index 709cd98263..f597b76723 100644
--- a/common/lib/xmodule/xmodule/open_ended_grading_classes/self_assessment_module.py
+++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/self_assessment_module.py
@@ -75,7 +75,6 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
html = system.render_template('{0}/self_assessment_prompt.html'.format(self.TEMPLATE_DIR), context)
return html
-
def handle_ajax(self, dispatch, get, system):
"""
This is called by courseware.module_render, to handle an AJAX call.
@@ -97,7 +96,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
#This is a dev_facing_error
log.error("Cannot find {0} in handlers in handle_ajax function for open_ended_module.py".format(dispatch))
#This is a dev_facing_error
- return json.dumps({'error': 'Error handling action. Please try again.', 'success' : False})
+ return json.dumps({'error': 'Error handling action. Please try again.', 'success': False})
before = self.get_progress()
d = handlers[dispatch](get, system)
@@ -161,7 +160,6 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
return system.render_template('{0}/self_assessment_hint.html'.format(self.TEMPLATE_DIR), context)
-
def save_answer(self, get, system):
"""
After the answer is submitted, show the rubric.
@@ -226,7 +224,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
try:
score = int(get['assessment'])
score_list = get.getlist('score_list[]')
- for i in xrange(0,len(score_list)):
+ for i in xrange(0, len(score_list)):
score_list[i] = int(score_list[i])
except ValueError:
#This is a dev_facing_error
@@ -270,7 +268,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
'allow_reset': self._allow_reset()}
def latest_post_assessment(self, system):
- latest_post_assessment = super(SelfAssessmentModule, self).latest_post_assessment(system)
+ latest_post_assessment = super(SelfAssessmentModule, self).latest_post_assessment(system)
try:
rubric_scores = json.loads(latest_post_assessment)
except:
@@ -310,7 +308,9 @@ class SelfAssessmentDescriptor():
for child in expected_children:
if len(xml_object.xpath(child)) != 1:
#This is a staff_facing_error
- raise ValueError("Self assessment definition must include exactly one '{0}' tag. Contact the learning sciences group for assistance.".format(child))
+ raise ValueError(
+ "Self assessment definition must include exactly one '{0}' tag. Contact the learning sciences group for assistance.".format(
+ child))
def parse(k):
"""Assumes that xml_object has child k"""
diff --git a/common/lib/xmodule/xmodule/peer_grading_module.py b/common/lib/xmodule/xmodule/peer_grading_module.py
index 347a121614..89290b7c91 100644
--- a/common/lib/xmodule/xmodule/peer_grading_module.py
+++ b/common/lib/xmodule/xmodule/peer_grading_module.py
@@ -44,7 +44,7 @@ class PeerGradingModule(PeerGradingFields, XModule):
resource_string(__name__, 'js/src/peergrading/peer_grading_problem.coffee'),
resource_string(__name__, 'js/src/collapsible.coffee'),
resource_string(__name__, 'js/src/javascript_loader.coffee'),
- ]}
+ ]}
js_module_name = "PeerGrading"
css = {'scss': [resource_string(__name__, 'css/combinedopenended/display.scss')]}
@@ -55,7 +55,7 @@ class PeerGradingModule(PeerGradingFields, XModule):
#We need to set the location here so the child modules can use it
system.set('location', location)
self.system = system
- if(self.system.open_ended_grading_interface):
+ if (self.system.open_ended_grading_interface):
self.peer_gs = PeerGradingService(self.system.open_ended_grading_interface, self.system)
else:
self.peer_gs = MockPeerGradingService()
@@ -139,13 +139,13 @@ class PeerGradingModule(PeerGradingFields, XModule):
'save_grade': self.save_grade,
'save_calibration_essay': self.save_calibration_essay,
'problem': self.peer_grading_problem,
- }
+ }
if dispatch not in handlers:
#This is a dev_facing_error
log.error("Cannot find {0} in handlers in handle_ajax function for open_ended_module.py".format(dispatch))
#This is a dev_facing_error
- return json.dumps({'error': 'Error handling action. Please try again.', 'success' : False})
+ return json.dumps({'error': 'Error handling action. Please try again.', 'success': False})
d = handlers[dispatch](get)
@@ -182,9 +182,10 @@ class PeerGradingModule(PeerGradingFields, XModule):
except:
success, response = self.query_data_for_location()
if not success:
- log.exception("No instance data found and could not get data from controller for loc {0} student {1}".format(
- self.system.location.url(), self.system.anonymous_student_id
- ))
+ log.exception(
+ "No instance data found and could not get data from controller for loc {0} student {1}".format(
+ self.system.location.url(), self.system.anonymous_student_id
+ ))
return None
count_graded = response['count_graded']
count_required = response['count_required']
@@ -195,7 +196,7 @@ class PeerGradingModule(PeerGradingFields, XModule):
score_dict = {
'score': int(count_graded >= count_required),
'total': self.max_grade,
- }
+ }
return score_dict
@@ -244,7 +245,7 @@ class PeerGradingModule(PeerGradingFields, XModule):
.format(self.peer_gs.url, location, grader_id))
#This is a student_facing_error
return {'success': False,
- 'error': EXTERNAL_GRADER_NO_CONTACT_ERROR}
+ 'error': EXTERNAL_GRADER_NO_CONTACT_ERROR}
def save_grade(self, get):
"""
@@ -262,7 +263,8 @@ class PeerGradingModule(PeerGradingFields, XModule):
error: if there was an error in the submission, this is the error message
"""
- required = set(['location', 'submission_id', 'submission_key', 'score', 'feedback', 'rubric_scores[]', 'submission_flagged'])
+ required = set(['location', 'submission_id', 'submission_key', 'score', 'feedback', 'rubric_scores[]',
+ 'submission_flagged'])
success, message = self._check_required(get, required)
if not success:
return self._err_response(message)
@@ -278,14 +280,14 @@ class PeerGradingModule(PeerGradingFields, XModule):
try:
response = self.peer_gs.save_grade(location, grader_id, submission_id,
- score, feedback, submission_key, rubric_scores, submission_flagged)
+ score, feedback, submission_key, rubric_scores, submission_flagged)
return response
except GradingServiceError:
#This is a dev_facing_error
log.exception("""Error saving grade to open ended grading service. server url: {0}, location: {1}, submission_id:{2},
submission_key: {3}, score: {4}"""
.format(self.peer_gs.url,
- location, submission_id, submission_key, score)
+ location, submission_id, submission_key, score)
)
#This is a student_facing_error
return {
@@ -373,7 +375,7 @@ class PeerGradingModule(PeerGradingFields, XModule):
.format(self.peer_gs.url, location))
#This is a student_facing_error
return {'success': False,
- 'error': EXTERNAL_GRADER_NO_CONTACT_ERROR}
+ 'error': EXTERNAL_GRADER_NO_CONTACT_ERROR}
# if we can't parse the rubric into HTML,
except etree.XMLSyntaxError:
#This is a dev_facing_error
@@ -381,7 +383,7 @@ class PeerGradingModule(PeerGradingFields, XModule):
.format(rubric))
#This is a student_facing_error
return {'success': False,
- 'error': 'Error displaying submission. Please notify course staff.'}
+ 'error': 'Error displaying submission. Please notify course staff.'}
def save_calibration_essay(self, get):
@@ -417,11 +419,13 @@ class PeerGradingModule(PeerGradingFields, XModule):
try:
response = self.peer_gs.save_calibration_essay(location, grader_id, calibration_essay_id,
- submission_key, score, feedback, rubric_scores)
+ submission_key, score, feedback, rubric_scores)
return response
except GradingServiceError:
#This is a dev_facing_error
- log.exception("Error saving calibration grade, location: {0}, submission_id: {1}, submission_key: {2}, grader_id: {3}".format(location, submission_id, submission_key, grader_id))
+ log.exception(
+ "Error saving calibration grade, location: {0}, submission_id: {1}, submission_key: {2}, grader_id: {3}".format(
+ location, submission_id, submission_key, grader_id))
#This is a student_facing_error
return self._err_response('There was an error saving your score. Please notify course staff.')
@@ -431,7 +435,7 @@ class PeerGradingModule(PeerGradingFields, XModule):
'''
html = self.system.render_template('peer_grading/peer_grading_closed.html', {
'use_for_single_location': self.use_for_single_location
- })
+ })
return html
@@ -501,7 +505,6 @@ class PeerGradingModule(PeerGradingFields, XModule):
problem['due'] = None
problem['closed'] = False
-
ajax_url = self.ajax_url
html = self.system.render_template('peer_grading/peer_grading.html', {
'course_id': self.system.course_id,
@@ -512,7 +515,7 @@ class PeerGradingModule(PeerGradingFields, XModule):
# Checked above
'staff_access': False,
'use_single_location': self.use_for_single_location,
- })
+ })
return html
@@ -524,7 +527,8 @@ class PeerGradingModule(PeerGradingFields, XModule):
if self.use_for_single_location not in TRUE_DICT:
#This is an error case, because it must be set to use a single location to be called without get parameters
#This is a dev_facing_error
- log.error("Peer grading problem in peer_grading_module called with no get parameters, but use_for_single_location is False.")
+ log.error(
+ "Peer grading problem in peer_grading_module called with no get parameters, but use_for_single_location is False.")
return {'html': "", 'success': False}
problem_location = self.link_to_location
@@ -540,7 +544,7 @@ class PeerGradingModule(PeerGradingFields, XModule):
# Checked above
'staff_access': False,
'use_single_location': self.use_for_single_location,
- })
+ })
return {'html': html, 'success': True}
@@ -553,7 +557,7 @@ class PeerGradingModule(PeerGradingFields, XModule):
state = {
'student_data_for_location': self.student_data_for_location,
- }
+ }
return json.dumps(state)
diff --git a/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py b/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py
index d3b869a802..09c86baf27 100644
--- a/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py
+++ b/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py
@@ -14,6 +14,7 @@ from datetime import datetime
from . import test_system
import test_util_open_ended
+
"""
Tests for the various pieces of the CombinedOpenEndedGrading system
@@ -39,41 +40,38 @@ class OpenEndedChildTest(unittest.TestCase):
max_score = 1
static_data = {
- 'max_attempts': 20,
- 'prompt': prompt,
- 'rubric': rubric,
- 'max_score': max_score,
- 'display_name': 'Name',
- 'accept_file_upload': False,
- 'close_date': None,
- 's3_interface' : "",
- 'open_ended_grading_interface' : {},
- 'skip_basic_checks' : False,
- }
+ 'max_attempts': 20,
+ 'prompt': prompt,
+ 'rubric': rubric,
+ 'max_score': max_score,
+ 'display_name': 'Name',
+ 'accept_file_upload': False,
+ 'close_date': None,
+ 's3_interface': "",
+ 'open_ended_grading_interface': {},
+ 'skip_basic_checks': False,
+ }
definition = Mock()
descriptor = Mock()
def setUp(self):
self.test_system = test_system()
self.openendedchild = OpenEndedChild(self.test_system, self.location,
- self.definition, self.descriptor, self.static_data, self.metadata)
+ self.definition, self.descriptor, self.static_data, self.metadata)
def test_latest_answer_empty(self):
answer = self.openendedchild.latest_answer()
self.assertEqual(answer, "")
-
def test_latest_score_empty(self):
answer = self.openendedchild.latest_score()
self.assertEqual(answer, None)
-
def test_latest_post_assessment_empty(self):
answer = self.openendedchild.latest_post_assessment(self.test_system)
self.assertEqual(answer, "")
-
def test_new_history_entry(self):
new_answer = "New Answer"
self.openendedchild.new_history_entry(new_answer)
@@ -99,7 +97,6 @@ class OpenEndedChildTest(unittest.TestCase):
score = self.openendedchild.latest_score()
self.assertEqual(score, 4)
-
def test_record_latest_post_assessment(self):
new_answer = "New Answer"
self.openendedchild.new_history_entry(new_answer)
@@ -107,7 +104,7 @@ class OpenEndedChildTest(unittest.TestCase):
post_assessment = "Post assessment"
self.openendedchild.record_latest_post_assessment(post_assessment)
self.assertEqual(post_assessment,
- self.openendedchild.latest_post_assessment(self.test_system))
+ self.openendedchild.latest_post_assessment(self.test_system))
def test_get_score(self):
new_answer = "New Answer"
@@ -124,24 +121,22 @@ class OpenEndedChildTest(unittest.TestCase):
self.assertEqual(score['score'], new_score)
self.assertEqual(score['total'], self.static_data['max_score'])
-
def test_reset(self):
self.openendedchild.reset(self.test_system)
state = json.loads(self.openendedchild.get_instance_state())
self.assertEqual(state['child_state'], OpenEndedChild.INITIAL)
-
def test_is_last_response_correct(self):
new_answer = "New Answer"
self.openendedchild.new_history_entry(new_answer)
self.openendedchild.record_latest_score(self.static_data['max_score'])
self.assertEqual(self.openendedchild.is_last_response_correct(),
- 'correct')
+ 'correct')
self.openendedchild.new_history_entry(new_answer)
self.openendedchild.record_latest_score(0)
self.assertEqual(self.openendedchild.is_last_response_correct(),
- 'incorrect')
+ 'incorrect')
class OpenEndedModuleTest(unittest.TestCase):
@@ -159,18 +154,18 @@ class OpenEndedModuleTest(unittest.TestCase):
max_score = 4
static_data = {
- 'max_attempts': 20,
- 'prompt': prompt,
- 'rubric': rubric,
- 'max_score': max_score,
- 'display_name': 'Name',
- 'accept_file_upload': False,
- 'rewrite_content_links' : "",
- 'close_date': None,
- 's3_interface' : test_util_open_ended.S3_INTERFACE,
- 'open_ended_grading_interface' : test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE,
- 'skip_basic_checks' : False,
- }
+ 'max_attempts': 20,
+ 'prompt': prompt,
+ 'rubric': rubric,
+ 'max_score': max_score,
+ 'display_name': 'Name',
+ 'accept_file_upload': False,
+ 'rewrite_content_links': "",
+ 'close_date': None,
+ 's3_interface': test_util_open_ended.S3_INTERFACE,
+ 'open_ended_grading_interface': test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE,
+ 'skip_basic_checks': False,
+ }
oeparam = etree.XML('''