Ran pyupgrade on lms/djangoapps

Ran pyupgrade on lms/djangoapps/instructor_analytics
Ran pyugprade on lms/djangoapps/instructor_task
Ran pyupgrade on lms/djangoapps/learner_dashboard
This commit is contained in:
usamasadiq
2021-02-16 12:55:05 +05:00
parent ba16e05899
commit 3f1df8eb2a
39 changed files with 518 additions and 592 deletions

View File

@@ -9,7 +9,6 @@ import datetime
import json
import logging
import six
from django.conf import settings
from django.contrib.auth.models import User # lint-amnesty, pylint: disable=imported-auth-user
from django.core.exceptions import ObjectDoesNotExist
@@ -18,16 +17,15 @@ from django.db.models import Count, Q # lint-amnesty, pylint: disable=unused-im
from django.urls import reverse
from edx_proctoring.api import get_exam_violation_report
from opaque_keys.edx.keys import CourseKey, UsageKey
from six import text_type
import xmodule.graders as xmgraders
from lms.djangoapps.courseware.models import StudentModule
from common.djangoapps.student.models import CourseEnrollment, CourseEnrollmentAllowed
from lms.djangoapps.certificates.models import CertificateStatuses, GeneratedCertificate
from lms.djangoapps.courseware.models import StudentModule
from lms.djangoapps.grades.api import context as grades_context
from lms.djangoapps.verify_student.services import IDVerificationService
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangolib.markup import HTML, Text
from common.djangoapps.student.models import CourseEnrollment, CourseEnrollmentAllowed
log = logging.getLogger(__name__)
@@ -68,7 +66,7 @@ def issued_certificates(course_key, features):
]
"""
report_run_date = datetime.date.today().strftime(u"%B %d, %Y")
report_run_date = datetime.date.today().strftime("%B %d, %Y")
certificate_features = [x for x in CERTIFICATE_FEATURES if x in features]
generated_certificates = list(GeneratedCertificate.eligible_certificates.filter(
course_id=course_key,
@@ -117,7 +115,7 @@ def enrolled_students_features(course_key, features):
DjangoJSONEncoder().default(attr)
return attr
except TypeError:
return six.text_type(attr)
return str(attr)
def extract_student(student, features):
""" convert student to dictionary """
@@ -133,12 +131,10 @@ def enrolled_students_features(course_key, features):
meta_key = feature.split('.')[1]
meta_features.append((feature, meta_key))
student_dict = dict((feature, extract_attr(student, feature))
for feature in student_features)
student_dict = {feature: extract_attr(student, feature) for feature in student_features}
profile = student.profile
if profile is not None:
profile_dict = dict((feature, extract_attr(profile, feature))
for feature in profile_features)
profile_dict = {feature: extract_attr(profile, feature) for feature in profile_features}
student_dict.update(profile_dict)
# now fetch the requested meta fields
@@ -196,7 +192,7 @@ def list_may_enroll(course_key, features):
"""
Build dict containing information about a single student.
"""
return dict((feature, getattr(student, feature)) for feature in features)
return {feature: getattr(student, feature) for feature in features}
return [extract_student(student, features) for student in may_enroll_and_unenrolled]
@@ -211,18 +207,18 @@ def get_proctored_exam_results(course_key, features):
"""
Build dict containing information about a single student exam_attempt.
"""
proctored_exam = dict(
(feature, exam_attempt.get(feature)) for feature in features if feature in exam_attempt
)
proctored_exam = {
feature: exam_attempt.get(feature) for feature in features if feature in exam_attempt
}
for status in comment_statuses:
comment_list = exam_attempt.get(
u'{status} Comments'.format(status=status),
f'{status} Comments',
[]
)
proctored_exam.update({
u'{status} Count'.format(status=status): len(comment_list),
u'{status} Comments'.format(status=status): '; '.join(comment_list),
f'{status} Count': len(comment_list),
f'{status} Comments': '; '.join(comment_list),
})
try:
proctored_exam['track'] = course_enrollments[exam_attempt['user_id']]
@@ -267,7 +263,7 @@ def coupon_codes_features(features, coupons_list, course_id):
"""
coupon_features = [x for x in COUPON_FEATURES if x in features]
coupon_dict = dict((feature, getattr(coupon, feature)) for feature in coupon_features)
coupon_dict = {feature: getattr(coupon, feature) for feature in coupon_features}
coupon_redemptions = coupon.couponredemption_set.filter(
order__status="purchased"
)
@@ -297,7 +293,7 @@ def coupon_codes_features(features, coupons_list, course_id):
# They have not been redeemed yet
coupon_dict['expiration_date'] = coupon.display_expiry_date
coupon_dict['course_id'] = text_type(coupon_dict['course_id'])
coupon_dict['course_id'] = str(coupon_dict['course_id'])
return coupon_dict
return [extract_coupon(coupon, features) for coupon in coupons_list]
@@ -371,8 +367,8 @@ def get_response_state(response):
except TypeError:
username = response.student.username
err_msg = (
u'Error occurred while attempting to load learner state '
u'{username} for state {state}.'.format(
'Error occurred while attempting to load learner state '
'{username} for state {state}.'.format(
username=username,
state=problem_state
)
@@ -426,7 +422,7 @@ def course_registration_features(features, registration_codes, csv_type):
site_name = configuration_helpers.get_value('SITE_NAME', settings.SITE_NAME)
registration_features = [x for x in COURSE_REGISTRATION_FEATURES if x in features]
course_registration_dict = dict((feature, getattr(registration_code, feature)) for feature in registration_features) # lint-amnesty, pylint: disable=line-too-long
course_registration_dict = {feature: getattr(registration_code, feature) for feature in registration_features} # lint-amnesty, pylint: disable=line-too-long
course_registration_dict['company_name'] = None
if registration_code.invoice_item:
course_registration_dict['company_name'] = registration_code.invoice_item.invoice.company_name
@@ -454,7 +450,7 @@ def course_registration_features(features, registration_codes, csv_type):
except ObjectDoesNotExist:
pass
course_registration_dict['course_id'] = text_type(course_registration_dict['course_id'])
course_registration_dict['course_id'] = str(course_registration_dict['course_id'])
return course_registration_dict
return [extract_course_registration(code, features, csv_type) for code in registration_codes]
@@ -477,26 +473,26 @@ def dump_grading_context(course):
msg += '\n'
msg += "Graded sections:\n"
for subgrader, category, weight in course.grader.subgraders:
msg += u" subgrader=%s, type=%s, category=%s, weight=%s\n"\
msg += " subgrader=%s, type=%s, category=%s, weight=%s\n"\
% (subgrader.__class__, subgrader.type, category, weight)
subgrader.index = 1
graders[subgrader.type] = subgrader
msg += hbar
msg += u"Listing grading context for course %s\n" % text_type(course.id)
msg += "Listing grading context for course %s\n" % str(course.id)
gcontext = grades_context.grading_context_for_course(course)
msg += "graded sections:\n"
msg += '%s\n' % list(gcontext['all_graded_subsections_by_type'].keys())
for (gsomething, gsvals) in gcontext['all_graded_subsections_by_type'].items():
msg += u"--> Section %s:\n" % (gsomething)
msg += "--> Section %s:\n" % (gsomething)
for sec in gsvals:
sdesc = sec['subsection_block']
frmat = getattr(sdesc, 'format', None)
aname = ''
if frmat in graders:
gform = graders[frmat]
aname = u'%s %02d' % (gform.short_label, gform.index)
aname = '%s %02d' % (gform.short_label, gform.index)
gform.index += 1
elif sdesc.display_name in graders:
gform = graders[sdesc.display_name]
@@ -504,7 +500,7 @@ def dump_grading_context(course):
notes = ''
if getattr(sdesc, 'score_by_attempt', False):
notes = ', score by attempt!'
msg += u" %s (format=%s, Assignment=%s%s)\n"\
msg += " %s (format=%s, Assignment=%s%s)\n"\
% (sdesc.display_name, frmat, aname, notes)
msg += "all graded blocks:\n"
msg += "length=%d\n" % gcontext['count_all_graded_blocks']

View File

@@ -7,8 +7,6 @@ Format and create csv responses
import csv
import six
from six.moves import map
from django.http import HttpResponse
@@ -24,18 +22,18 @@ def create_csv_response(filename, header, datarows):
"""
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = u'attachment; filename={0}'.format(filename)
response['Content-Disposition'] = f'attachment; filename={filename}'
csvwriter = csv.writer(
response,
dialect='excel',
quotechar='"',
quoting=csv.QUOTE_ALL)
encoded_header = [six.text_type(s) for s in header]
encoded_header = [str(s) for s in header]
csvwriter.writerow(encoded_header)
for datarow in datarows:
encoded_row = [six.text_type(s) for s in datarow]
encoded_row = [str(s) for s in datarow]
csvwriter.writerow(encoded_row)
return response

View File

@@ -1,4 +1,4 @@
u"""
"""
Profile Distributions
Aggregate sums for values of fields in students profiles.
@@ -39,7 +39,7 @@ DISPLAY_NAMES = {
}
class ProfileDistribution(object):
class ProfileDistribution:
"""
Container for profile distribution data
@@ -96,7 +96,7 @@ def profile_distribution(course_id, feature):
if feature not in AVAILABLE_PROFILE_FEATURES:
raise ValueError(
u"unsupported feature requested for distribution u'{}'".format(
"unsupported feature requested for distribution u'{}'".format(
feature)
)
@@ -152,8 +152,7 @@ def profile_distribution(course_id, feature):
# query_distribution is of the form [{'featureval': 'value1', 'featureval__count': 4},
# {'featureval': 'value2', 'featureval__count': 2}, ...]
distribution = dict((vald[feature], vald[feature + '__count'])
for vald in query_distribution)
distribution = {vald[feature]: vald[feature + '__count'] for vald in query_distribution}
# distribution is of the form {'value1': 4, 'value2': 2, ...}
# change none to no_data for valid json key

View File

@@ -4,22 +4,24 @@ Tests for instructor.basic
"""
import ddt
import datetime # lint-amnesty, pylint: disable=unused-import, wrong-import-order
import json # lint-amnesty, pylint: disable=wrong-import-order
import json
from unittest.mock import MagicMock, Mock, patch
import ddt
import pytz # lint-amnesty, pylint: disable=unused-import
from django.db.models import Q # lint-amnesty, pylint: disable=unused-import
from django.urls import reverse # lint-amnesty, pylint: disable=unused-import
from edx_proctoring.api import create_exam
from edx_proctoring.models import ProctoredExamStudentAttempt
from mock import MagicMock, Mock, patch
from opaque_keys.edx.locator import UsageKey
from six import text_type # lint-amnesty, pylint: disable=unused-import
from six.moves import range, zip
from common.djangoapps.course_modes.models import CourseMode # lint-amnesty, pylint: disable=unused-import
from common.djangoapps.course_modes.tests.factories import CourseModeFactory # lint-amnesty, pylint: disable=unused-import
from common.djangoapps.course_modes.tests.factories import \
CourseModeFactory # lint-amnesty, pylint: disable=unused-import
from common.djangoapps.student.models import CourseEnrollment, CourseEnrollmentAllowed
from common.djangoapps.student.roles import CourseSalesAdminRole # lint-amnesty, pylint: disable=unused-import
from common.djangoapps.student.tests.factories import UserFactory
from lms.djangoapps.courseware.tests.factories import InstructorFactory
from lms.djangoapps.instructor_analytics.basic import ( # lint-amnesty, pylint: disable=unused-import
AVAILABLE_FEATURES,
@@ -32,12 +34,9 @@ from lms.djangoapps.instructor_analytics.basic import ( # lint-amnesty, pylint:
get_proctored_exam_results,
get_response_state,
list_may_enroll,
list_problem_responses,
list_problem_responses
)
from openedx.core.djangoapps.course_groups.tests.helpers import CohortFactory
from common.djangoapps.student.models import CourseEnrollment, CourseEnrollmentAllowed
from common.djangoapps.student.roles import CourseSalesAdminRole # lint-amnesty, pylint: disable=unused-import
from common.djangoapps.student.tests.factories import UserFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
@@ -47,7 +46,7 @@ class TestAnalyticsBasic(ModuleStoreTestCase):
""" Test basic analytics functions. """
def setUp(self):
super(TestAnalyticsBasic, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
super().setUp()
self.course_key = self.store.make_course_key('robot', 'course', 'id')
self.users = tuple(UserFactory() for _ in range(30))
self.ces = tuple(CourseEnrollment.enroll(user, self.course_key)
@@ -55,8 +54,8 @@ class TestAnalyticsBasic(ModuleStoreTestCase):
self.instructor = InstructorFactory(course_key=self.course_key)
for user in self.users:
user.profile.meta = json.dumps({
"position": u"edX expert {}".format(user.id),
"company": u"Open edX Inc {}".format(user.id),
"position": f"edX expert {user.id}",
"company": f"Open edX Inc {user.id}",
})
user.profile.save()
self.students_who_may_enroll = list(self.users) + [UserFactory() for _ in range(5)]
@@ -66,8 +65,8 @@ class TestAnalyticsBasic(ModuleStoreTestCase):
)
@ddt.data(
(u'あなた', u'スの中'),
(u"ГЂіи lіиэ ъэтшээи", u"Ђэаvэи аиↁ Ђэѓэ")
('あなた', 'スの中'),
("ГЂіи lіиэ ъэтшээи", "Ђэаvэи аиↁ Ђэѓэ")
)
@ddt.unpack
def test_get_response_state_with_ora(self, files_descriptions, saved_response):
@@ -92,13 +91,13 @@ class TestAnalyticsBasic(ModuleStoreTestCase):
relevant info (student.username and state).
"""
result = Mock(spec=['student', 'state'])
result.student.username.return_value = u'user{}'.format(result_id)
result.state.return_value = u'state{}'.format(result_id)
result.student.username.return_value = f'user{result_id}'
result.state.return_value = f'state{result_id}'
return result
# Ensure that UsageKey.from_string returns a problem key that list_problem_responses can work with
# (even when called with a dummy location):
mock_problem_key = Mock(return_value=u'')
mock_problem_key = Mock(return_value='')
mock_problem_key.course_key = self.course_key
with patch.object(UsageKey, 'from_string') as patched_from_string:
patched_from_string.return_value = mock_problem_key
@@ -135,8 +134,8 @@ class TestAnalyticsBasic(ModuleStoreTestCase):
def test_enrolled_students_features_keys(self):
query_features = ('username', 'name', 'email', 'city', 'country',)
for user in self.users:
user.profile.city = u"Mos Eisley {}".format(user.id)
user.profile.country = u"Tatooine {}".format(user.id)
user.profile.city = f"Mos Eisley {user.id}"
user.profile.country = f"Tatooine {user.id}"
user.profile.save()
for feature in query_features:
assert feature in AVAILABLE_FEATURES
@@ -173,8 +172,8 @@ class TestAnalyticsBasic(ModuleStoreTestCase):
assert len(userreports) == len(self.users)
for userreport in userreports:
assert set(userreport.keys()) == set(query_features)
assert userreport['meta.position'] in [u'edX expert {}'.format(user.id) for user in self.users]
assert userreport['meta.company'] in [u'Open edX Inc {}'.format(user.id) for user in self.users]
assert userreport['meta.position'] in [f"edX expert {user.id}" for user in self.users]
assert userreport['meta.company'] in [f"Open edX Inc {user.id}" for user in self.users]
def test_enrolled_students_enrollment_verification(self):
"""

View File

@@ -3,7 +3,6 @@
import pytest
from django.test import TestCase
from six.moves import range
from lms.djangoapps.instructor_analytics.csvs import create_csv_response, format_dictlist, format_instances
@@ -17,7 +16,7 @@ class TestAnalyticsCSVS(TestCase):
res = create_csv_response('robot.csv', header, datarows)
assert res['Content-Type'] == 'text/csv'
assert res['Content-Disposition'] == u'attachment; filename={0}'.format('robot.csv')
assert res['Content-Disposition'] == 'attachment; filename={}'.format('robot.csv')
assert res.content.strip().decode('utf-8') == '"Name","Email"'
def test_create_csv_response(self):
@@ -26,7 +25,7 @@ class TestAnalyticsCSVS(TestCase):
res = create_csv_response('robot.csv', header, datarows)
assert res['Content-Type'] == 'text/csv'
assert res['Content-Disposition'] == u'attachment; filename={0}'.format('robot.csv')
assert res['Content-Disposition'] == 'attachment; filename={}'.format('robot.csv')
assert res.content.strip().decode('utf-8') ==\
'"Name","Email"\r\n"Jim","jim@edy.org"\r\n"Jake","jake@edy.org"\r\n"Jeeves","jeeves@edy.org"'
@@ -36,7 +35,7 @@ class TestAnalyticsCSVS(TestCase):
res = create_csv_response('robot.csv', header, datarows)
assert res['Content-Type'] == 'text/csv'
assert res['Content-Disposition'] == u'attachment; filename={0}'.format('robot.csv')
assert res['Content-Disposition'] == 'attachment; filename={}'.format('robot.csv')
assert res.content.strip().decode('utf-8') == ''
@@ -80,7 +79,7 @@ class TestAnalyticsFormatDictlist(TestCase):
res = create_csv_response('robot.csv', header, datarows)
assert res['Content-Type'] == 'text/csv'
assert res['Content-Disposition'] == u'attachment; filename={0}'.format('robot.csv')
assert res['Content-Disposition'] == 'attachment; filename={}'.format('robot.csv')
assert res.content.strip().decode('utf-8') ==\
'"Name","Email"\r\n"Jim","jim@edy.org"\r\n"Jake","jake@edy.org"\r\n"Jeeves","jeeves@edy.org"'
@@ -88,7 +87,7 @@ class TestAnalyticsFormatDictlist(TestCase):
class TestAnalyticsFormatInstances(TestCase):
""" test format_instances method """
class TestDataClass(object):
class TestDataClass:
""" Test class to generate objects for format_instances """
def __init__(self):
self.a_var = 'aval'
@@ -101,7 +100,7 @@ class TestAnalyticsFormatInstances(TestCase):
return 'dval'
def setUp(self):
super(TestAnalyticsFormatInstances, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
super().setUp()
self.instances = [self.TestDataClass() for _ in range(5)]
def test_format_instances_response(self):

View File

@@ -4,18 +4,17 @@
import pytest
from django.test import TestCase
from opaque_keys.edx.locator import CourseLocator
from six.moves import range
from lms.djangoapps.instructor_analytics.distributions import AVAILABLE_PROFILE_FEATURES, profile_distribution
from common.djangoapps.student.models import CourseEnrollment
from common.djangoapps.student.tests.factories import UserFactory
from lms.djangoapps.instructor_analytics.distributions import AVAILABLE_PROFILE_FEATURES, profile_distribution
class TestAnalyticsDistributions(TestCase):
'''Test analytics distribution gathering.'''
def setUp(self):
super(TestAnalyticsDistributions, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
super().setUp()
self.course_id = CourseLocator('robot', 'course', 'id')
self.users = [UserFactory(
@@ -78,7 +77,7 @@ class TestAnalyticsDistributionsNoData(TestCase):
'''Test analytics distribution gathering.'''
def setUp(self):
super(TestAnalyticsDistributionsNoData, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
super().setUp()
self.course_id = CourseLocator('robot', 'course', 'id')
self.users = [UserFactory(

View File

@@ -11,9 +11,9 @@ arguments.
import hashlib
from collections import Counter
import six
from celery.states import READY_STATES
from common.djangoapps.util import milestones_helpers
from lms.djangoapps.bulk_email.models import CourseEmail
from lms.djangoapps.certificates.models import CertificateGenerationHistory
from lms.djangoapps.instructor_task.api_helper import (
@@ -43,7 +43,6 @@ from lms.djangoapps.instructor_task.tasks import (
reset_problem_attempts,
send_bulk_course_email
)
from common.djangoapps.util import milestones_helpers
from xmodule.modulestore.django import modulestore
@@ -310,8 +309,8 @@ def submit_bulk_course_email(request, course_key, email_id):
targets = Counter([target.target_type for target in email_obj.targets.all()])
targets = [
target if count <= 1 else
u"{} {}".format(count, target)
for target, count in six.iteritems(targets)
f"{count} {target}"
for target, count in targets.items()
]
task_type = 'bulk_course_email'

View File

@@ -10,17 +10,16 @@ import hashlib
import json
import logging
import six
from celery.result import AsyncResult
from celery.states import FAILURE, READY_STATES, REVOKED, SUCCESS
from django.utils.translation import ugettext as _
from opaque_keys.edx.keys import UsageKey
import six
from six import text_type
from common.djangoapps.util.db import outer_atomic
from lms.djangoapps.courseware.courses import get_problems_in_section
from lms.djangoapps.courseware.module_render import get_xqueue_callback_url_prefix
from lms.djangoapps.instructor_task.models import PROGRESS, InstructorTask
from common.djangoapps.util.db import outer_atomic
from xmodule.modulestore.django import modulestore
log = logging.getLogger(__name__)
@@ -34,8 +33,8 @@ class AlreadyRunningError(Exception):
def __init__(self, message=None):
if not message:
message = self.message
super(AlreadyRunningError, self).__init__(message) # lint-amnesty, pylint: disable=super-with-arguments
message = self.message # pylint: disable=exception-message-attribute
super().__init__(message)
class QueueConnectionError(Exception):
@@ -46,8 +45,8 @@ class QueueConnectionError(Exception):
def __init__(self, message=None):
if not message:
message = self.message
super(QueueConnectionError, self).__init__(message) # lint-amnesty, pylint: disable=super-with-arguments
message = self.message # pylint: disable=exception-message-attribute
super().__init__(message)
def _task_is_running(course_id, task_type, task_key):
@@ -78,7 +77,7 @@ def _reserve_task(course_id, task_type, task_key, task_input, requester):
"""
if _task_is_running(course_id, task_type, task_key):
log.warning(u"Duplicate task found for task_type %s and task_key %s", task_type, task_key)
log.warning("Duplicate task found for task_type %s and task_key %s", task_type, task_key)
error_message = generate_already_running_error_message(task_type)
raise AlreadyRunningError(error_message)
@@ -88,7 +87,7 @@ def _reserve_task(course_id, task_type, task_key, task_input, requester):
most_recent_id = "None found"
finally:
log.warning(
u"No duplicate tasks found: task_type %s, task_key %s, and most recent task_id = %s",
"No duplicate tasks found: task_type %s, task_key %s, and most recent task_id = %s",
task_type,
task_key,
most_recent_id
@@ -120,7 +119,7 @@ def generate_already_running_error_message(task_type):
if report_types.get(task_type):
message = _(
u"The {report_type} report is being created. "
"The {report_type} report is being created. "
"To view the status of the report, see Pending Tasks below. "
"You will be able to download the report when it is complete."
).format(report_type=report_types.get(task_type))
@@ -215,20 +214,20 @@ def _update_instructor_task(instructor_task, task_result):
elif result_state in [PROGRESS, SUCCESS]:
# construct a status message directly from the task result's result:
# it needs to go back with the entry passed in.
log.info(u"background task (%s), state %s: result: %s", task_id, result_state, returned_result)
log.info("background task (%s), state %s: result: %s", task_id, result_state, returned_result)
task_output = InstructorTask.create_output_for_success(returned_result)
elif result_state == FAILURE:
# on failure, the result's result contains the exception that caused the failure
exception = returned_result
traceback = result_traceback if result_traceback is not None else ''
log.warning(u"background task (%s) failed: %s %s", task_id, returned_result, traceback)
log.warning("background task (%s) failed: %s %s", task_id, returned_result, traceback)
task_output = InstructorTask.create_output_for_failure(exception, result_traceback)
elif result_state == REVOKED:
# on revocation, the result's result doesn't contain anything
# but we cannot rely on the worker thread to set this status,
# so we set it here.
entry_needs_saving = True
log.warning(u"background task (%s) revoked.", task_id)
log.warning("background task (%s) revoked.", task_id)
task_output = InstructorTask.create_output_for_revoked()
# save progress and state into the entry, even if it's not being saved:
@@ -258,8 +257,8 @@ def _handle_instructor_task_failure(instructor_task, error):
"""
Do required operations if task creation was not complete.
"""
log.info(u"instructor task (%s) failed, result: %s", instructor_task.task_id, text_type(error))
_update_instructor_task_state(instructor_task, FAILURE, text_type(error))
log.info("instructor task (%s) failed, result: %s", instructor_task.task_id, str(error))
_update_instructor_task_state(instructor_task, FAILURE, str(error))
raise QueueConnectionError()
@@ -282,7 +281,7 @@ def get_updated_instructor_task(task_id):
try:
instructor_task = InstructorTask.objects.get(task_id=task_id)
except InstructorTask.DoesNotExist:
log.warning(u"query for InstructorTask status failed: task_id=(%s) not found", task_id)
log.warning("query for InstructorTask status failed: task_id=(%s) not found", task_id)
return None
# if the task is not already known to be done, then we need to query
@@ -390,11 +389,11 @@ def encode_problem_and_student_input(usage_key, student=None):
assert isinstance(usage_key, UsageKey)
if student is not None:
task_input = {'problem_url': text_type(usage_key), 'student': student.username}
task_key_stub = "{student}_{problem}".format(student=student.id, problem=text_type(usage_key))
task_input = {'problem_url': str(usage_key), 'student': student.username}
task_key_stub = "{student}_{problem}".format(student=student.id, problem=str(usage_key))
else:
task_input = {'problem_url': text_type(usage_key)}
task_key_stub = "_{problem}".format(problem=text_type(usage_key))
task_input = {'problem_url': str(usage_key)}
task_key_stub = "_{problem}".format(problem=str(usage_key))
# create the key value by using MD5 hash:
task_key = hashlib.md5(six.b(task_key_stub)).hexdigest()
@@ -412,11 +411,11 @@ def encode_entrance_exam_and_student_input(usage_key, student=None):
"""
assert isinstance(usage_key, UsageKey)
if student is not None:
task_input = {'entrance_exam_url': text_type(usage_key), 'student': student.username}
task_key_stub = "{student}_{entranceexam}".format(student=student.id, entranceexam=text_type(usage_key))
task_input = {'entrance_exam_url': str(usage_key), 'student': student.username}
task_key_stub = "{student}_{entranceexam}".format(student=student.id, entranceexam=str(usage_key))
else:
task_input = {'entrance_exam_url': text_type(usage_key)}
task_key_stub = "_{entranceexam}".format(entranceexam=text_type(usage_key))
task_input = {'entrance_exam_url': str(usage_key)}
task_key_stub = "_{entranceexam}".format(entranceexam=str(usage_key))
# create the key value by using MD5 hash:
task_key = hashlib.md5(task_key_stub.encode('utf-8')).hexdigest()

View File

@@ -9,7 +9,7 @@ class InstructorTaskConfig(AppConfig):
"""
Application Configuration for Instructor Task
"""
name = u'lms.djangoapps.instructor_task'
name = 'lms.djangoapps.instructor_task'
def ready(self):
pass

View File

@@ -4,6 +4,7 @@ waffle switches for the instructor_task app.
"""
from edx_toggles.toggles import LegacyWaffleFlagNamespace, LegacyWaffleSwitchNamespace
from openedx.core.djangoapps.waffle_utils import CourseWaffleFlag
WAFFLE_NAMESPACE = 'instructor_task'

View File

@@ -23,7 +23,7 @@ class TestFailOldQueueingTasksCommand(InstructorTaskTestCase):
"""
def setUp(self):
super(TestFailOldQueueingTasksCommand, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
super().setUp()
type_1_queueing = InstructorTaskFactory.create(
task_state=QUEUING,

View File

@@ -1,6 +1,3 @@
# -*- coding: utf-8 -*-
from django.conf import settings
from django.db import migrations, models
from opaque_keys.edx.django.models import CourseKeyField

View File

@@ -1,6 +1,3 @@
# -*- coding: utf-8 -*-
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Generated by Django 1.11.21 on 2019-07-01 12:48

View File

@@ -12,9 +12,6 @@ file and check it in at the same time as your model changes. To do that,
ASSUMPTIONS: modules have unique IDs, even across different module_types
"""
import codecs
import csv
import hashlib
import json
@@ -22,7 +19,6 @@ import logging
import os.path
from uuid import uuid4
import six
from boto.exception import BotoServerError
from django.conf import settings
from django.contrib.auth.models import User # lint-amnesty, pylint: disable=imported-auth-user
@@ -31,7 +27,6 @@ from django.db import models, transaction
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext as _
from opaque_keys.edx.django.models import CourseKeyField
from six import text_type
from openedx.core.storage import get_storage
@@ -68,7 +63,7 @@ class InstructorTask(models.Model):
.. no_pii:
"""
class Meta(object):
class Meta:
app_label = "instructor_task"
task_type = models.CharField(max_length=50, db_index=True)
@@ -84,17 +79,17 @@ class InstructorTask(models.Model):
subtasks = models.TextField(blank=True) # JSON dictionary
def __repr__(self):
return 'InstructorTask<%r>' % ({
return 'InstructorTask<{!r}>'.format({
'task_type': self.task_type,
'course_id': self.course_id,
'task_input': self.task_input,
'task_id': self.task_id,
'task_state': self.task_state,
'task_output': self.task_output,
},)
})
def __str__(self):
return six.text_type(repr(self))
return str(repr(self))
@classmethod
def create(cls, course_id, task_type, task_key, task_input, requester):
@@ -108,7 +103,7 @@ class InstructorTask(models.Model):
# check length of task_input, and return an exception if it's too long
if len(json_task_input) > TASK_INPUT_LENGTH:
logger.error(
u'Task input longer than: `%s` for `%s` of course: `%s`',
'Task input longer than: `%s` for `%s` of course: `%s`',
TASK_INPUT_LENGTH,
task_type,
course_id
@@ -148,7 +143,7 @@ class InstructorTask(models.Model):
# will fit in the column. In the meantime, just return an exception.
json_output = json.dumps(returned_result)
if len(json_output) > 1023:
raise ValueError(u"Length of task output is too long: {0}".format(json_output))
raise ValueError(f"Length of task output is too long: {json_output}")
return json_output
@staticmethod
@@ -163,7 +158,7 @@ class InstructorTask(models.Model):
Truncation is indicated by adding "..." to the end of the value.
"""
tag = '...'
task_progress = {'exception': type(exception).__name__, 'message': text_type(exception)}
task_progress = {'exception': type(exception).__name__, 'message': str(exception)}
if traceback_string is not None:
# truncate any traceback that goes into the InstructorTask model:
task_progress['traceback'] = traceback_string
@@ -193,7 +188,7 @@ class InstructorTask(models.Model):
return json.dumps({'message': 'Task revoked before running'})
class ReportStore(object):
class ReportStore:
"""
Simple abstraction layer that can fetch and store CSV files for reports
download. Should probably refactor later to create a ReportFile object that
@@ -237,10 +232,7 @@ class ReportStore(object):
compatibility.
"""
for row in rows:
if six.PY2:
yield [six.text_type(item).encode('utf-8') for item in row]
else:
yield [six.text_type(item) for item in row]
yield [str(item) for item in row]
class DjangoStorageReportStore(ReportStore):
@@ -281,13 +273,12 @@ class DjangoStorageReportStore(ReportStore):
path = self.path_to(course_id, filename)
# See https://github.com/boto/boto/issues/2868
# Boto doesn't play nice with unicode in python3
if not six.PY2:
buff_contents = buff.read()
buff_contents = buff.read()
if not isinstance(buff_contents, bytes):
buff_contents = buff_contents.encode('utf-8')
if not isinstance(buff_contents, bytes):
buff_contents = buff_contents.encode('utf-8')
buff = ContentFile(buff_contents)
buff = ContentFile(buff_contents)
self.storage.save(path, buff)
@@ -297,9 +288,6 @@ class DjangoStorageReportStore(ReportStore):
strings), write the rows to the storage backend in csv format.
"""
output_buffer = ContentFile('')
# Adding unicode signature (BOM) for MS Excel 2013 compatibility
if six.PY2:
output_buffer.write(codecs.BOM_UTF8)
csvwriter = csv.writer(output_buffer)
csvwriter.writerows(self._get_utf8_encoded_rows(rows))
output_buffer.seek(0)
@@ -320,7 +308,7 @@ class DjangoStorageReportStore(ReportStore):
return []
except BotoServerError as ex:
logger.error(
u'Fetching files failed for course: %s, status: %s, reason: %s',
'Fetching files failed for course: %s, status: %s, reason: %s',
course_id,
ex.status,
ex.reason
@@ -337,5 +325,5 @@ class DjangoStorageReportStore(ReportStore):
"""
Return the full path to a given file for a given course.
"""
hashed_course_id = hashlib.sha1(text_type(course_id).encode('utf-8')).hexdigest()
hashed_course_id = hashlib.sha1(str(course_id).encode('utf-8')).hexdigest()
return os.path.join(hashed_course_id, filename)

View File

@@ -11,12 +11,10 @@ from time import time
from uuid import uuid4
import psutil
import six
from celery.states import READY_STATES, RETRY, SUCCESS
from django.core.cache import cache
from django.db import DatabaseError, transaction
from django.utils.encoding import python_2_unicode_compatible
from six.moves import range, zip
from common.djangoapps.util.db import outer_atomic
@@ -119,11 +117,11 @@ def _generate_items_for_subtask(
# more items than items_per_task allows. We expect this to be a small enough
# number as to be negligible.
if num_items_queued != total_num_items:
TASK_LOG.info(u"Number of items generated by chunking %s not equal to original total %s", num_items_queued, total_num_items) # lint-amnesty, pylint: disable=line-too-long
TASK_LOG.info("Number of items generated by chunking %s not equal to original total %s", num_items_queued, total_num_items) # lint-amnesty, pylint: disable=line-too-long
@python_2_unicode_compatible
class SubtaskStatus(object):
class SubtaskStatus:
"""
Create and return a dict for tracking the status of a subtask.
@@ -205,11 +203,11 @@ class SubtaskStatus(object):
def __repr__(self):
"""Return print representation of a SubtaskStatus object."""
return 'SubtaskStatus<%r>' % (self.to_dict(),)
return f'SubtaskStatus<{self.to_dict()!r}>'
def __str__(self):
"""Return unicode version of a SubtaskStatus object representation."""
return six.text_type(repr(self))
return str(repr(self))
def initialize_subtask_info(entry, action_name, total_num, subtask_id_list):
@@ -309,7 +307,7 @@ def queue_subtasks_for_query(
# Update the InstructorTask with information about the subtasks we've defined.
TASK_LOG.info(
u"Task %s: updating InstructorTask %s with subtask info for %s subtasks to process %s items.",
"Task %s: updating InstructorTask %s with subtask info for %s subtasks to process %s items.",
task_id,
entry.id,
total_num_subtasks,
@@ -332,7 +330,7 @@ def queue_subtasks_for_query(
# Now create the subtasks, and start them running.
TASK_LOG.info(
u"Task %s: creating %s subtasks to process %s items.",
"Task %s: creating %s subtasks to process %s items.",
task_id,
total_num_subtasks,
total_num_items,
@@ -344,7 +342,7 @@ def queue_subtasks_for_query(
subtask_status = SubtaskStatus.create(subtask_id)
new_subtask = create_subtask_fcn(item_list, subtask_status)
TASK_LOG.info(
u"Queueing BulkEmail Task: %s Subtask: %s at timestamp: %s",
"Queueing BulkEmail Task: %s Subtask: %s at timestamp: %s",
task_id, subtask_id, datetime.now()
)
new_subtask.apply_async()
@@ -367,10 +365,10 @@ def _acquire_subtask_lock(task_id):
Returns true if the task_id was not already locked; false if it was.
"""
# cache.add fails if the key already exists
key = "subtask-{}".format(task_id)
key = f"subtask-{task_id}"
succeeded = cache.add(key, 'true', SUBTASK_LOCK_EXPIRE)
if not succeeded:
TASK_LOG.warning(u"task_id '%s': already locked. Contains value '%s'", task_id, cache.get(key))
TASK_LOG.warning("task_id '%s': already locked. Contains value '%s'", task_id, cache.get(key))
return succeeded
@@ -382,7 +380,7 @@ def _release_subtask_lock(task_id):
"""
# According to Celery task cookbook, "Memcache delete is very slow, but we have
# to use it to take advantage of using add() for atomic locking."
key = "subtask-{}".format(task_id)
key = f"subtask-{task_id}"
cache.delete(key)
@@ -412,7 +410,7 @@ def check_subtask_is_valid(entry_id, current_task_id, new_subtask_status):
# Confirm that the InstructorTask actually defines subtasks.
entry = InstructorTask.objects.get(pk=entry_id)
if len(entry.subtasks) == 0:
format_str = u"Unexpected task_id '{}': unable to find subtasks of instructor task '{}': rejecting task {}"
format_str = "Unexpected task_id '{}': unable to find subtasks of instructor task '{}': rejecting task {}"
msg = format_str.format(current_task_id, entry, new_subtask_status)
TASK_LOG.warning(msg)
raise DuplicateTaskException(msg)
@@ -421,7 +419,7 @@ def check_subtask_is_valid(entry_id, current_task_id, new_subtask_status):
subtask_dict = json.loads(entry.subtasks)
subtask_status_info = subtask_dict['status']
if current_task_id not in subtask_status_info:
format_str = u"Unexpected task_id '{}': unable to find status for subtask of instructor task '{}': rejecting task {}" # lint-amnesty, pylint: disable=line-too-long
format_str = "Unexpected task_id '{}': unable to find status for subtask of instructor task '{}': rejecting task {}" # lint-amnesty, pylint: disable=line-too-long
msg = format_str.format(current_task_id, entry, new_subtask_status)
TASK_LOG.warning(msg)
raise DuplicateTaskException(msg)
@@ -431,7 +429,7 @@ def check_subtask_is_valid(entry_id, current_task_id, new_subtask_status):
subtask_status = SubtaskStatus.from_dict(subtask_status_info[current_task_id])
subtask_state = subtask_status.state
if subtask_state in READY_STATES:
format_str = u"Unexpected task_id '{}': already completed - status {} for subtask of instructor task '{}': rejecting task {}" # lint-amnesty, pylint: disable=line-too-long
format_str = "Unexpected task_id '{}': already completed - status {} for subtask of instructor task '{}': rejecting task {}" # lint-amnesty, pylint: disable=line-too-long
msg = format_str.format(current_task_id, subtask_status, entry, new_subtask_status)
TASK_LOG.warning(msg)
raise DuplicateTaskException(msg)
@@ -444,7 +442,7 @@ def check_subtask_is_valid(entry_id, current_task_id, new_subtask_status):
new_retry_count = new_subtask_status.get_retry_count()
current_retry_count = subtask_status.get_retry_count()
if new_retry_count < current_retry_count:
format_str = u"Unexpected task_id '{}': already retried - status {} for subtask of instructor task '{}': rejecting task {}" # lint-amnesty, pylint: disable=line-too-long
format_str = "Unexpected task_id '{}': already retried - status {} for subtask of instructor task '{}': rejecting task {}" # lint-amnesty, pylint: disable=line-too-long
msg = format_str.format(current_task_id, subtask_status, entry, new_subtask_status)
TASK_LOG.warning(msg)
raise DuplicateTaskException(msg)
@@ -453,7 +451,7 @@ def check_subtask_is_valid(entry_id, current_task_id, new_subtask_status):
# If it fails, then it means that another worker is already in the
# middle of working on this.
if not _acquire_subtask_lock(current_task_id):
format_str = u"Unexpected task_id '{}': already being executed - for subtask of instructor task '{}'"
format_str = "Unexpected task_id '{}': already being executed - for subtask of instructor task '{}'"
msg = format_str.format(current_task_id, entry)
TASK_LOG.warning(msg)
raise DuplicateTaskException(msg)
@@ -477,11 +475,11 @@ def update_subtask_status(entry_id, current_task_id, new_subtask_status, retry_c
# If we fail, try again recursively.
retry_count += 1
if retry_count < MAX_DATABASE_LOCK_RETRIES:
TASK_LOG.info(u"Retrying to update status for subtask %s of instructor task %d with status %s: retry %d",
TASK_LOG.info("Retrying to update status for subtask %s of instructor task %d with status %s: retry %d",
current_task_id, entry_id, new_subtask_status, retry_count)
update_subtask_status(entry_id, current_task_id, new_subtask_status, retry_count)
else:
TASK_LOG.info(u"Failed to update status after %d retries for subtask %s of instructor task %d with status %s", # lint-amnesty, pylint: disable=line-too-long
TASK_LOG.info("Failed to update status after %d retries for subtask %s of instructor task %d with status %s", # lint-amnesty, pylint: disable=line-too-long
retry_count, current_task_id, entry_id, new_subtask_status)
raise
finally:
@@ -519,7 +517,7 @@ def _update_subtask_status(entry_id, current_task_id, new_subtask_status):
is the value of the SubtaskStatus.to_dict(), but could be expanded in future to store information
about failure messages, progress made, etc.
"""
TASK_LOG.info(u"Preparing to update status for subtask %s for instructor task %d with status %s",
TASK_LOG.info("Preparing to update status for subtask %s for instructor task %d with status %s",
current_task_id, entry_id, new_subtask_status)
try:
@@ -528,7 +526,7 @@ def _update_subtask_status(entry_id, current_task_id, new_subtask_status):
subtask_status_info = subtask_dict['status']
if current_task_id not in subtask_status_info:
# unexpected error -- raise an exception
format_str = u"Unexpected task_id '{}': unable to update status for subtask of instructor task '{}'"
format_str = "Unexpected task_id '{}': unable to update status for subtask of instructor task '{}'"
msg = format_str.format(current_task_id, entry_id)
TASK_LOG.warning(msg)
raise ValueError(msg)
@@ -575,7 +573,7 @@ def _update_subtask_status(entry_id, current_task_id, new_subtask_status):
TASK_LOG.debug("about to save....")
entry.save()
TASK_LOG.info(u"Task output updated to %s for subtask %s of instructor task %d",
TASK_LOG.info("Task output updated to %s for subtask %s of instructor task %d",
entry.task_output, current_task_id, entry_id)
except Exception:
TASK_LOG.exception("Unexpected error while updating InstructorTask.")

View File

@@ -31,10 +31,7 @@ from edx_django_utils.monitoring import set_code_owner_attribute
from lms.djangoapps.bulk_email.tasks import perform_delegate_email_batches
from lms.djangoapps.instructor_task.tasks_base import BaseInstructorTask
from lms.djangoapps.instructor_task.tasks_helper.certs import generate_students_certificates
from lms.djangoapps.instructor_task.tasks_helper.enrollments import (
upload_may_enroll_csv,
upload_students_csv
)
from lms.djangoapps.instructor_task.tasks_helper.enrollments import upload_may_enroll_csv, upload_students_csv
from lms.djangoapps.instructor_task.tasks_helper.grades import CourseGradeReport, ProblemGradeReport, ProblemResponses
from lms.djangoapps.instructor_task.tasks_helper.misc import (
cohort_students_and_upload,
@@ -190,7 +187,7 @@ def calculate_grades_csv(entry_id, xmodule_instance_args):
# Translators: This is a past-tense verb that is inserted into task progress messages as {action}.
action_name = ugettext_noop('graded')
TASK_LOG.info(
u"Task: %s, InstructorTask ID: %s, Task type: %s, Preparing for task execution",
"Task: %s, InstructorTask ID: %s, Task type: %s, Preparing for task execution",
xmodule_instance_args.get('task_id'), entry_id, action_name
)
@@ -208,7 +205,7 @@ def calculate_problem_grade_report(entry_id, xmodule_instance_args):
# Translators: This is a past-tense phrase that is inserted into task progress messages as {action}.
action_name = ugettext_noop('problem distribution graded')
TASK_LOG.info(
u"Task: %s, InstructorTask ID: %s, Task type: %s, Preparing for task execution",
"Task: %s, InstructorTask ID: %s, Task type: %s, Preparing for task execution",
xmodule_instance_args.get('task_id'), entry_id, action_name
)
@@ -277,7 +274,7 @@ def generate_certificates(entry_id, xmodule_instance_args):
# Translators: This is a past-tense verb that is inserted into task progress messages as {action}.
action_name = ugettext_noop('certificates generated')
TASK_LOG.info(
u"Task: %s, InstructorTask ID: %s, Task type: %s, Preparing for task execution",
"Task: %s, InstructorTask ID: %s, Task type: %s, Preparing for task execution",
xmodule_instance_args.get('task_id'), entry_id, action_name
)

View File

@@ -51,7 +51,7 @@ class BaseInstructorTask(Task): # lint-amnesty, pylint: disable=abstract-method
This is JSON-serialized and stored in the task_output column of the InstructorTask entry.
"""
TASK_LOG.debug(u'Task %s: success returned with progress: %s', task_id, task_progress)
TASK_LOG.debug('Task %s: success returned with progress: %s', task_id, task_progress)
# We should be able to find the InstructorTask object to update
# based on the task_id here, without having to dig into the
# original args to the task. On the other hand, the entry_id
@@ -84,16 +84,16 @@ class BaseInstructorTask(Task): # lint-amnesty, pylint: disable=abstract-method
Note that there is no way to record progress made within the task (e.g. attempted,
succeeded, etc.) when such failures occur.
"""
TASK_LOG.debug(u'Task %s: failure returned', task_id)
TASK_LOG.debug('Task %s: failure returned', task_id)
entry_id = args[0]
try:
entry = InstructorTask.objects.get(pk=entry_id)
except InstructorTask.DoesNotExist:
# if the InstructorTask object does not exist, then there's no point
# trying to update it.
TASK_LOG.error(u"Task (%s) has no InstructorTask object for id %s", task_id, entry_id)
TASK_LOG.error("Task (%s) has no InstructorTask object for id %s", task_id, entry_id)
else:
TASK_LOG.warning(u"Task (%s) failed", task_id, exc_info=True)
TASK_LOG.warning("Task (%s) failed", task_id, exc_info=True)
entry.task_output = InstructorTask.create_output_for_failure(einfo.exception, einfo.traceback)
entry.task_state = FAILURE
entry.save_now()

View File

@@ -7,12 +7,14 @@ from time import time
from django.contrib.auth import get_user_model
from django.db.models import Q
from xmodule.modulestore.django import modulestore
from common.djangoapps.student.models import CourseEnrollment
from lms.djangoapps.certificates.api import generate_user_certificates
from lms.djangoapps.certificates.models import CertificateStatuses, GeneratedCertificate
from xmodule.modulestore.django import modulestore
from .runner import TaskProgress
User = get_user_model()

View File

@@ -10,15 +10,17 @@ from time import time
from django.conf import settings # lint-amnesty, pylint: disable=unused-import
from django.utils.translation import ugettext as _ # lint-amnesty, pylint: disable=unused-import
from pytz import UTC
from six import StringIO # lint-amnesty, pylint: disable=unused-import
from common.djangoapps.edxmako.shortcuts import render_to_string # lint-amnesty, pylint: disable=unused-import
from common.djangoapps.student.models import ( # lint-amnesty, pylint: disable=unused-import
CourseAccessRole,
CourseEnrollment
)
from common.djangoapps.util.file import course_filename_prefix_generator # lint-amnesty, pylint: disable=unused-import
from lms.djangoapps.courseware.courses import get_course_by_id # lint-amnesty, pylint: disable=unused-import
from lms.djangoapps.instructor_analytics.basic import enrolled_students_features, list_may_enroll
from lms.djangoapps.instructor_analytics.csvs import format_dictlist
from lms.djangoapps.instructor_task.models import ReportStore # lint-amnesty, pylint: disable=unused-import
from common.djangoapps.student.models import CourseAccessRole, CourseEnrollment # lint-amnesty, pylint: disable=unused-import
from common.djangoapps.util.file import course_filename_prefix_generator # lint-amnesty, pylint: disable=unused-import
from .runner import TaskProgress
from .utils import tracker_emit, upload_csv_to_report_store # lint-amnesty, pylint: disable=unused-import

View File

@@ -3,37 +3,35 @@ Functionality for generating grade reports.
"""
import logging
import re
from collections import OrderedDict, defaultdict
from datetime import datetime
from itertools import chain
from time import time
import re
import six
from lms.djangoapps.course_blocks.api import get_course_blocks
from django.conf import settings # lint-amnesty, pylint: disable=wrong-import-order
from django.contrib.auth import get_user_model # lint-amnesty, pylint: disable=wrong-import-order
from lazy import lazy # lint-amnesty, pylint: disable=wrong-import-order
from opaque_keys.edx.keys import UsageKey # lint-amnesty, pylint: disable=wrong-import-order
from pytz import UTC # lint-amnesty, pylint: disable=wrong-import-order
from six import text_type # lint-amnesty, pylint: disable=wrong-import-order
from six.moves import zip, zip_longest # lint-amnesty, pylint: disable=wrong-import-order
from django.conf import settings
from django.contrib.auth import get_user_model
from lazy import lazy
from opaque_keys.edx.keys import UsageKey
from pytz import UTC
from six.moves import zip_longest
from common.djangoapps.course_modes.models import CourseMode
from common.djangoapps.student.models import CourseEnrollment
from common.djangoapps.student.roles import BulkRoleCache
from lms.djangoapps.certificates.models import CertificateWhitelist, GeneratedCertificate, certificate_info_for_user
from lms.djangoapps.course_blocks.api import get_course_blocks
from lms.djangoapps.courseware.courses import get_course_by_id
from lms.djangoapps.courseware.user_state_client import DjangoXBlockUserStateClient
from lms.djangoapps.grades.api import (
CourseGradeFactory,
context as grades_context,
prefetch_course_and_subsection_grades,
)
from lms.djangoapps.grades.api import CourseGradeFactory
from lms.djangoapps.grades.api import context as grades_context
from lms.djangoapps.grades.api import prefetch_course_and_subsection_grades
from lms.djangoapps.instructor_analytics.basic import list_problem_responses
from lms.djangoapps.instructor_analytics.csvs import format_dictlist
from lms.djangoapps.instructor_task.config.waffle import (
course_grade_report_verified_only,
optimize_get_learners_switch_enabled,
problem_grade_report_verified_only,
problem_grade_report_verified_only
)
from lms.djangoapps.teams.models import CourseTeamMembership
from lms.djangoapps.verify_student.services import IDVerificationService
@@ -41,11 +39,10 @@ from openedx.core.djangoapps.content.block_structure.api import get_course_in_ca
from openedx.core.djangoapps.course_groups.cohorts import bulk_cache_cohorts, get_cohort, is_course_cohorted
from openedx.core.djangoapps.user_api.course_tag.api import BulkCourseTags
from openedx.core.lib.cache_utils import get_cache
from common.djangoapps.student.models import CourseEnrollment
from common.djangoapps.student.roles import BulkRoleCache
from xmodule.modulestore.django import modulestore
from xmodule.partitions.partitions_service import PartitionService
from xmodule.split_test_module import get_split_user_partitions
from .runner import TaskProgress
from .utils import upload_csv_to_report_store
@@ -71,7 +68,7 @@ def _flatten(iterable):
return list(chain.from_iterable(iterable))
class GradeReportBase(object):
class GradeReportBase:
"""
Base class for grade reports (ProblemGradeReport and CourseGradeReport).
"""
@@ -91,14 +88,14 @@ class GradeReportBase(object):
Updates the status on the celery task to the given message.
Also logs the update.
"""
fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
fmt = 'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
task_info_string = fmt.format(
task_id=context.task_id,
entry_id=context.entry_id,
course_id=context.course_id,
task_input=context.task_input
)
TASK_LOG.info(u'%s, Task type: %s, %s, %s', task_info_string, context.action_name,
TASK_LOG.info('%s, Task type: %s, %s, %s', task_info_string, context.action_name,
message, context.task_progress.state)
def _handle_empty_generator(self, generator, default):
@@ -119,8 +116,7 @@ class GradeReportBase(object):
else:
TASK_LOG.info('GradeReport: Generator is not empty')
yield first_iteration_output
for element in generator:
yield element
yield from generator
def _batch_users(self, context):
"""
@@ -200,7 +196,7 @@ class GradeReportBase(object):
context.update_status(message)
class _CourseGradeReportContext(object):
class _CourseGradeReportContext:
"""
Internal class that provides a common context to use for a single grade
report. When a report is parallelized across multiple processes,
@@ -210,10 +206,10 @@ class _CourseGradeReportContext(object):
def __init__(self, _xmodule_instance_args, _entry_id, course_id, _task_input, action_name):
self.task_info_string = (
u'Task: {task_id}, '
u'InstructorTask ID: {entry_id}, '
u'Course: {course_id}, '
u'Input: {task_input}'
'Task: {task_id}, '
'InstructorTask ID: {entry_id}, '
'Course: {course_id}, '
'Input: {task_input}'
).format(
task_id=_xmodule_instance_args.get('task_id') if _xmodule_instance_args is not None else None,
entry_id=_entry_id,
@@ -253,24 +249,24 @@ class _CourseGradeReportContext(object):
"""
grading_cxt = grades_context.grading_context(self.course, self.course_structure)
graded_assignments_map = OrderedDict()
for assignment_type_name, subsection_infos in six.iteritems(grading_cxt['all_graded_subsections_by_type']):
for assignment_type_name, subsection_infos in grading_cxt['all_graded_subsections_by_type'].items():
graded_subsections_map = OrderedDict()
for subsection_index, subsection_info in enumerate(subsection_infos, start=1):
subsection = subsection_info['subsection_block']
header_name = u"{assignment_type} {subsection_index}: {subsection_name}".format(
header_name = "{assignment_type} {subsection_index}: {subsection_name}".format(
assignment_type=assignment_type_name,
subsection_index=subsection_index,
subsection_name=subsection.display_name,
)
graded_subsections_map[subsection.location] = header_name
average_header = u"{assignment_type}".format(assignment_type=assignment_type_name)
average_header = f"{assignment_type_name}"
# Use separate subsection and average columns only if
# there's more than one subsection.
separate_subsection_avg_headers = len(subsection_infos) > 1
if separate_subsection_avg_headers:
average_header += u" (Avg)"
average_header += " (Avg)"
graded_assignments_map[assignment_type_name] = {
'subsection_headers': graded_subsections_map,
@@ -285,11 +281,11 @@ class _CourseGradeReportContext(object):
Updates the status on the celery task to the given message.
Also logs the update.
"""
TASK_LOG.info(u'%s, Task type: %s, %s', self.task_info_string, self.action_name, message)
TASK_LOG.info('%s, Task type: %s, %s', self.task_info_string, self.action_name, message)
return self.task_progress.update_task_state(extra_meta={'step': message})
class _ProblemGradeReportContext(object):
class _ProblemGradeReportContext:
"""
Internal class that provides a common context to use for a single problem
grade report. When a report is parallelized across multiple processes,
@@ -331,7 +327,7 @@ class _ProblemGradeReportContext(object):
"""
scorable_blocks_map = OrderedDict()
grading_context = grades_context.grading_context_for_course(self.course)
for assignment_type_name, subsection_infos in six.iteritems(grading_context['all_graded_subsections_by_type']):
for assignment_type_name, subsection_infos in grading_context['all_graded_subsections_by_type'].items():
for subsection_index, subsection_info in enumerate(subsection_infos, start=1):
for scorable_block in subsection_info['scored_descendants']:
header_name = (
@@ -360,7 +356,7 @@ class _ProblemGradeReportContext(object):
return self.task_progress.update_task_state(extra_meta={'step': message})
class _CertificateBulkContext(object):
class _CertificateBulkContext:
def __init__(self, context, users):
certificate_whitelist = CertificateWhitelist.objects.filter(course_id=context.course_id, whitelist=True)
self.whitelisted_user_ids = [entry.user_id for entry in certificate_whitelist]
@@ -371,7 +367,7 @@ class _CertificateBulkContext(object):
}
class _TeamBulkContext(object): # lint-amnesty, pylint: disable=missing-class-docstring
class _TeamBulkContext: # lint-amnesty, pylint: disable=missing-class-docstring
def __init__(self, context, users):
self.enabled = context.teams_enabled
if self.enabled:
@@ -384,13 +380,13 @@ class _TeamBulkContext(object): # lint-amnesty, pylint: disable=missing-class-d
self.teams_by_user = {}
class _EnrollmentBulkContext(object):
class _EnrollmentBulkContext:
def __init__(self, context, users):
CourseEnrollment.bulk_fetch_enrollment_states(users, context.course_id)
self.verified_users = set(IDVerificationService.get_verified_user_ids(users))
class _CourseGradeBulkContext(object): # lint-amnesty, pylint: disable=missing-class-docstring
class _CourseGradeBulkContext: # lint-amnesty, pylint: disable=missing-class-docstring
def __init__(self, context, users):
self.certs = _CertificateBulkContext(context, users)
self.teams = _TeamBulkContext(context, users)
@@ -401,7 +397,7 @@ class _CourseGradeBulkContext(object): # lint-amnesty, pylint: disable=missing-
BulkCourseTags.prefetch(context.course_id, users)
class CourseGradeReport(object):
class CourseGradeReport:
"""
Class to encapsulate functionality related to generating Grade Reports.
"""
@@ -421,18 +417,18 @@ class CourseGradeReport(object):
"""
Internal method for generating a grade report for the given context.
"""
context.update_status(u'Starting grades')
context.update_status('Starting grades')
success_headers = self._success_headers(context)
error_headers = self._error_headers()
batched_rows = self._batched_rows(context)
context.update_status(u'Compiling grades')
context.update_status('Compiling grades')
success_rows, error_rows = self._compile(context, batched_rows)
context.update_status(u'Uploading grades')
context.update_status('Uploading grades')
self._upload(context, success_headers, success_rows, error_headers, error_rows)
return context.update_status(u'Completed grades')
return context.update_status('Completed grades')
def _success_headers(self, context):
"""
@@ -442,7 +438,7 @@ class CourseGradeReport(object):
["Student ID", "Email", "Username"] +
self._grades_header(context) +
(['Cohort Name'] if context.cohorts_enabled else []) +
[u'Experiment Group ({})'.format(partition.name) for partition in context.course_experiments] +
[f'Experiment Group ({partition.name})' for partition in context.course_experiments] +
(['Team Name'] if context.teams_enabled else []) +
['Enrollment Track', 'Verification Status'] +
['Certificate Eligible', 'Certificate Delivered', 'Certificate Type'] +
@@ -496,9 +492,9 @@ class CourseGradeReport(object):
"""
graded_assignments = context.graded_assignments
grades_header = ["Grade"]
for assignment_info in six.itervalues(graded_assignments):
for assignment_info in graded_assignments.values():
if assignment_info['separate_subsection_avg_headers']:
grades_header.extend(six.itervalues(assignment_info['subsection_headers']))
grades_header.extend(assignment_info['subsection_headers'].values())
grades_header.append(assignment_info['average_header'])
return grades_header
@@ -519,10 +515,10 @@ class CourseGradeReport(object):
verified_only (boolean): is a boolean when True, returns only verified enrollees.
"""
if optimize_get_learners_switch_enabled():
TASK_LOG.info(u'%s, Creating Course Grade with optimization', task_log_message)
TASK_LOG.info('%s, Creating Course Grade with optimization', task_log_message)
return users_for_course_v2(course_id, verified_only=verified_only)
TASK_LOG.info(u'%s, Creating Course Grade without optimization', task_log_message)
TASK_LOG.info('%s, Creating Course Grade without optimization', task_log_message)
return users_for_course(course_id, verified_only=verified_only)
def users_for_course(course_id, verified_only=False):
@@ -565,7 +561,7 @@ class CourseGradeReport(object):
).select_related('profile')
yield users
course_id = context.course_id
task_log_message = u'{}, Task type: {}'.format(context.task_info_string, context.action_name)
task_log_message = f'{context.task_info_string}, Task type: {context.action_name}'
return get_enrolled_learners_for_course(course_id=course_id, verified_only=context.report_for_verified_only)
def _user_grades(self, course_grade, context):
@@ -574,7 +570,7 @@ class CourseGradeReport(object):
to the headers for this report.
"""
grade_results = []
for _, assignment_info in six.iteritems(context.graded_assignments):
for _, assignment_info in context.graded_assignments.items():
subsection_grades, subsection_grades_results = self._user_subsection_grades(
course_grade,
assignment_info['subsection_headers'],
@@ -599,7 +595,7 @@ class CourseGradeReport(object):
if subsection_grade.attempted_graded or subsection_grade.override:
grade_result = subsection_grade.percent_graded
else:
grade_result = u'Not Attempted'
grade_result = 'Not Attempted'
grade_results.append([grade_result])
subsection_grades.append(subsection_grade)
return subsection_grades, grade_results
@@ -691,7 +687,7 @@ class CourseGradeReport(object):
):
if not course_grade:
# An empty gradeset means we failed to grade a student.
error_rows.append([user.id, user.username, text_type(error)])
error_rows.append([user.id, user.username, str(error)])
else:
success_rows.append(
[user.id, user.email, user.username] +
@@ -774,7 +770,7 @@ class ProblemGradeReport(GradeReportBase):
):
context.task_progress.attempted += 1
if not course_grade:
err_msg = text_type(error)
err_msg = str(error)
# There was an error grading this student.
if not err_msg:
err_msg = 'Unknown error'
@@ -818,7 +814,7 @@ class ProblemGradeReport(GradeReportBase):
get_cache(CourseEnrollment.MODE_CACHE_NAMESPACE).clear()
class ProblemResponses(object):
class ProblemResponses:
"""
Class to encapsulate functionality related to generating Problem Responses Reports.
"""
@@ -862,8 +858,7 @@ class ProblemResponses(object):
for block in course_blocks.get_children(root):
name = course_blocks.get_xblock_field(block, 'display_name') or block.block_type
for result in cls._build_problem_list(course_blocks, block, path + [name]):
yield result
yield from cls._build_problem_list(course_blocks, block, path + [name])
@classmethod
def _build_student_data(

View File

@@ -5,7 +5,9 @@ running state of a course.
"""
import csv
import logging
import os # lint-amnesty, pylint: disable=unused-import
from collections import OrderedDict
from contextlib import contextmanager
from datetime import datetime
@@ -13,10 +15,6 @@ from io import StringIO # lint-amnesty, pylint: disable=unused-import
from tempfile import TemporaryFile
from time import time
from zipfile import ZipFile # lint-amnesty, pylint: disable=unused-import
import csv
import os # lint-amnesty, pylint: disable=unused-import
import unicodecsv
import six
from django.contrib.auth.models import User # lint-amnesty, pylint: disable=imported-auth-user
from django.core.exceptions import ValidationError
@@ -26,18 +24,12 @@ from pytz import UTC
from lms.djangoapps.instructor_analytics.basic import get_proctored_exam_results
from lms.djangoapps.instructor_analytics.csvs import format_dictlist
from lms.djangoapps.survey.models import SurveyAnswer
from openedx.core.djangoapps.course_groups.cohorts import add_user_to_cohort
from openedx.core.djangoapps.course_groups.models import CourseUserGroup
from lms.djangoapps.survey.models import SurveyAnswer
from common.djangoapps.util.file import UniversalNewlineIterator
from .runner import TaskProgress
from .utils import (
UPDATE_STATUS_FAILED,
UPDATE_STATUS_SUCCEEDED,
upload_csv_to_report_store,
upload_zip_to_report_store,
)
from .utils import UPDATE_STATUS_FAILED, UPDATE_STATUS_SUCCEEDED, upload_csv_to_report_store, upload_zip_to_report_store
# define different loggers for use within tasks and on client side
TASK_LOG = logging.getLogger('edx.celery.task')
@@ -156,7 +148,7 @@ def _get_csv_file_content(csv_file):
returns appropriate csv file content based on input and output is
compatible with python versions
"""
if (not isinstance(csv_file, str)) and six.PY3:
if not isinstance(csv_file, str):
content = csv_file.read()
else:
content = csv_file
@@ -166,10 +158,7 @@ def _get_csv_file_content(csv_file):
else:
csv_content = content
if six.PY3:
return csv_content
else:
return UniversalNewlineIterator(csv_content)
return csv_content
def cohort_students_and_upload(_xmodule_instance_args, _entry_id, course_id, task_input, action_name): # lint-amnesty, pylint: disable=too-many-statements
@@ -183,10 +172,7 @@ def cohort_students_and_upload(_xmodule_instance_args, _entry_id, course_id, tas
# Iterate through rows to get total assignments for task progress
with DefaultStorage().open(task_input['file_name']) as f:
total_assignments = 0
if six.PY3:
reader = csv.DictReader(_get_csv_file_content(f).splitlines())
else:
reader = unicodecsv.DictReader(_get_csv_file_content(f), encoding='utf-8')
reader = csv.DictReader(_get_csv_file_content(f).splitlines())
for _line in reader:
total_assignments += 1
@@ -204,10 +190,7 @@ def cohort_students_and_upload(_xmodule_instance_args, _entry_id, course_id, tas
with DefaultStorage().open(task_input['file_name']) as f:
if six.PY3:
reader = csv.DictReader(_get_csv_file_content(f).splitlines())
else:
reader = unicodecsv.DictReader(_get_csv_file_content(f), encoding='utf-8')
reader = csv.DictReader(_get_csv_file_content(f).splitlines())
for row in reader:
# Try to use the 'email' field to identify the user. If it's not present, use 'username'.
@@ -277,7 +260,7 @@ def cohort_students_and_upload(_xmodule_instance_args, _entry_id, course_id, tas
else status_dict[column_name]
for column_name in output_header
]
for _cohort_name, status_dict in six.iteritems(cohorts_status)
for _cohort_name, status_dict in cohorts_status.items()
]
output_rows.insert(0, output_header)
upload_csv_to_report_store(output_rows, 'cohort_results', course_id, start_date)
@@ -298,21 +281,21 @@ def upload_ora2_data(
num_attempted = 1
num_total = 1
fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
fmt = 'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
task_info_string = fmt.format(
task_id=_xmodule_instance_args.get('task_id') if _xmodule_instance_args is not None else None,
entry_id=_entry_id,
course_id=course_id,
task_input=_task_input
)
TASK_LOG.info(u'%s, Task type: %s, Starting task execution', task_info_string, action_name)
TASK_LOG.info('%s, Task type: %s, Starting task execution', task_info_string, action_name)
task_progress = TaskProgress(action_name, num_total, start_time)
task_progress.attempted = num_attempted
curr_step = {'step': "Collecting responses"}
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s for all submissions',
'%s, Task type: %s, Current step: %s for all submissions',
task_info_string,
action_name,
curr_step,
@@ -336,7 +319,7 @@ def upload_ora2_data(
task_progress.succeeded = 1
curr_step = {'step': "Uploading CSV"}
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s',
'%s, Task type: %s, Current step: %s',
task_info_string,
action_name,
curr_step,
@@ -347,7 +330,7 @@ def upload_ora2_data(
curr_step = {'step': 'Finalizing ORA data report'}
task_progress.update_task_state(extra_meta=curr_step)
TASK_LOG.info(u'%s, Task type: %s, Upload complete.', task_info_string, action_name)
TASK_LOG.info('%s, Task type: %s, Upload complete.', task_info_string, action_name)
return UPDATE_STATUS_SUCCEEDED
@@ -408,7 +391,7 @@ def upload_ora2_submission_files(
course_id=course_id,
task_input=_task_input
)
TASK_LOG.info(u'%s, Task type: %s, Starting task execution', task_info_string, action_name)
TASK_LOG.info('%s, Task type: %s, Starting task execution', task_info_string, action_name)
task_progress = TaskProgress(action_name, num_total, start_time)
task_progress.attempted = num_attempted
@@ -452,6 +435,6 @@ def upload_ora2_submission_files(
task_progress.succeeded = 1
curr_step = {'step': 'Finalizing attachments extracting'}
task_progress.update_task_state(extra_meta=curr_step)
TASK_LOG.info(u'%s, Task type: %s, Upload complete.', task_info_string, action_name)
TASK_LOG.info('%s, Task type: %s, Upload complete.', task_info_string, action_name)
return UPDATE_STATUS_SUCCEEDED

View File

@@ -7,22 +7,21 @@ import json
import logging
from time import time
import six
from django.utils.translation import ugettext_noop
from opaque_keys.edx.keys import UsageKey
from xblock.runtime import KvsFieldData
from xblock.scorable import Score
from capa.responsetypes import LoncapaProblemError, ResponseError, StudentInputError
from common.djangoapps.student.models import get_user_by_username_or_email
from common.djangoapps.track.event_transaction_utils import create_new_event_transaction_id, set_event_transaction_type
from common.djangoapps.track.views import task_track
from common.djangoapps.util.db import outer_atomic
from lms.djangoapps.courseware.courses import get_course_by_id, get_problems_in_section
from lms.djangoapps.courseware.model_data import DjangoKeyValueStore, FieldDataCache
from lms.djangoapps.courseware.models import StudentModule
from lms.djangoapps.courseware.module_render import get_module_for_descriptor_internal
from lms.djangoapps.grades.api import events as grades_events
from common.djangoapps.student.models import get_user_by_username_or_email
from common.djangoapps.track.event_transaction_utils import create_new_event_transaction_id, set_event_transaction_type
from common.djangoapps.track.views import task_track
from common.djangoapps.util.db import outer_atomic
from xmodule.modulestore.django import modulestore
from ..exceptions import UpdateProblemModuleStateError
@@ -75,7 +74,7 @@ def perform_module_state_update(update_fcn, filter_fcn, _entry_id, course_id, ta
# find the problem descriptor:
problem_descriptor = modulestore().get_item(usage_key)
problems[six.text_type(usage_key)] = problem_descriptor
problems[str(usage_key)] = problem_descriptor
# if entrance_exam is present grab all problems in it
if entrance_exam_url:
@@ -91,7 +90,7 @@ def perform_module_state_update(update_fcn, filter_fcn, _entry_id, course_id, ta
for module_to_update in modules_to_update:
task_progress.attempted += 1
module_descriptor = problems[six.text_type(module_to_update.module_state_key)]
module_descriptor = problems[str(module_to_update.module_state_key)]
# There is no try here: if there's an error, we let it throw, and the task will
# be marked as FAILED, with a stack trace.
update_status = update_fcn(module_descriptor, module_to_update, task_input)
@@ -104,7 +103,7 @@ def perform_module_state_update(update_fcn, filter_fcn, _entry_id, course_id, ta
elif update_status == UPDATE_STATUS_SKIPPED:
task_progress.skipped += 1
else:
raise UpdateProblemModuleStateError(u"Unexpected update_status returned: {}".format(update_status))
raise UpdateProblemModuleStateError(f"Unexpected update_status returned: {update_status}")
return task_progress.update_task_state()
@@ -144,7 +143,7 @@ def rescore_problem_module_state(xmodule_instance_args, module_descriptor, stude
if instance is None:
# Either permissions just changed, or someone is trying to be clever
# and load something they shouldn't have access to.
msg = u"No module {location} for student {student}--access denied?".format(
msg = "No module {location} for student {student}--access denied?".format(
location=usage_key,
student=student
)
@@ -154,7 +153,7 @@ def rescore_problem_module_state(xmodule_instance_args, module_descriptor, stude
if not hasattr(instance, 'rescore'):
# This should not happen, since it should be already checked in the
# caller, but check here to be sure.
msg = u"Specified module {0} of type {1} does not support rescoring.".format(usage_key, instance.__class__)
msg = f"Specified module {usage_key} of type {instance.__class__} does not support rescoring."
raise UpdateProblemModuleStateError(msg)
# We check here to see if the problem has any submissions. If it does not, we don't want to rescore it
@@ -172,8 +171,8 @@ def rescore_problem_module_state(xmodule_instance_args, module_descriptor, stude
instance.rescore(only_if_higher=task_input['only_if_higher'])
except (LoncapaProblemError, StudentInputError, ResponseError):
TASK_LOG.warning(
u"error processing rescore call for course %(course)s, problem %(loc)s "
u"and student %(student)s",
"error processing rescore call for course %(course)s, problem %(loc)s "
"and student %(student)s",
dict(
course=course_id,
loc=usage_key,
@@ -184,8 +183,8 @@ def rescore_problem_module_state(xmodule_instance_args, module_descriptor, stude
instance.save()
TASK_LOG.debug(
u"successfully processed rescore call for course %(course)s, problem %(loc)s "
u"and student %(student)s",
"successfully processed rescore call for course %(course)s, problem %(loc)s "
"and student %(student)s",
dict(
course=course_id,
loc=usage_key,
@@ -229,7 +228,7 @@ def override_score_module_state(xmodule_instance_args, module_descriptor, studen
if instance is None:
# Either permissions just changed, or someone is trying to be clever
# and load something they shouldn't have access to.
msg = u"No module {location} for student {student}--access denied?".format(
msg = "No module {location} for student {student}--access denied?".format(
location=usage_key,
student=student
)
@@ -264,8 +263,8 @@ def override_score_module_state(xmodule_instance_args, module_descriptor, studen
instance.publish_grade()
instance.save()
TASK_LOG.debug(
u"successfully processed score override for course %(course)s, problem %(loc)s "
u"and student %(student)s",
"successfully processed score override for course %(course)s, problem %(loc)s "
"and student %(student)s",
dict(
course=course_id,
loc=usage_key,

View File

@@ -10,13 +10,13 @@ from time import time
from celery import current_task
from django.db import reset_queries
from lms.djangoapps.instructor_task.models import PROGRESS, InstructorTask
from common.djangoapps.util.db import outer_atomic
from lms.djangoapps.instructor_task.models import PROGRESS, InstructorTask
TASK_LOG = logging.getLogger('edx.celery.task')
class TaskProgress(object):
class TaskProgress:
"""
Encapsulates the current task's progress by keeping track of
'attempted', 'succeeded', 'skipped', 'failed', 'total',
@@ -103,15 +103,15 @@ def run_main_task(entry_id, task_fcn, action_name):
task_input = json.loads(entry.task_input)
# Construct log message
fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
fmt = 'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
task_info_string = fmt.format(task_id=task_id, entry_id=entry_id, course_id=course_id, task_input=task_input)
TASK_LOG.info(u'%s, Starting update (nothing %s yet)', task_info_string, action_name)
TASK_LOG.info('%s, Starting update (nothing %s yet)', task_info_string, action_name)
# Check that the task_id submitted in the InstructorTask matches the current task
# that is running.
request_task_id = _get_current_task().request.id
if task_id != request_task_id:
fmt = u'{task_info}, Requested task did not match actual task "{actual_id}"'
fmt = '{task_info}, Requested task did not match actual task "{actual_id}"'
message = fmt.format(task_info=task_info_string, actual_id=request_task_id)
TASK_LOG.error(message)
raise ValueError(message)
@@ -123,7 +123,7 @@ def run_main_task(entry_id, task_fcn, action_name):
reset_queries()
# Log and exit, returning task_progress info as task result
TASK_LOG.info(u'%s, Task type: %s, Finishing task: %s', task_info_string, action_name, task_progress)
TASK_LOG.info('%s, Task type: %s, Finishing task: %s', task_info_string, action_name, task_progress)
return task_progress

View File

@@ -5,10 +5,10 @@ Utility methods for instructor tasks
from eventtracking import tracker
from lms.djangoapps.instructor_task.models import ReportStore
from common.djangoapps.util.file import course_filename_prefix_generator
from lms.djangoapps.instructor_task.models import ReportStore
REPORT_REQUESTED_EVENT_NAME = u'edx.instructor.report.requested'
REPORT_REQUESTED_EVENT_NAME = 'edx.instructor.report.requested'
# define value to use when no task_id is provided:
UNKNOWN_TASK_ID = 'unknown-task_id'
@@ -37,7 +37,7 @@ def upload_csv_to_report_store(rows, csv_name, course_id, timestamp, config_name
report_name: string - Name of the generated report
"""
report_store = ReportStore.from_config(config_name)
report_name = u"{course_prefix}_{csv_name}_{timestamp_str}.csv".format(
report_name = "{course_prefix}_{csv_name}_{timestamp_str}.csv".format(
course_prefix=course_filename_prefix_generator(course_id),
csv_name=csv_name,
timestamp_str=timestamp.strftime("%Y-%m-%d-%H%M")
@@ -54,7 +54,7 @@ def upload_zip_to_report_store(file, zip_name, course_id, timestamp, config_name
"""
report_store = ReportStore.from_config(config_name)
report_name = u"{course_prefix}_{zip_name}_{timestamp_str}.zip".format(
report_name = "{course_prefix}_{zip_name}_{timestamp_str}.zip".format(
course_prefix=course_filename_prefix_generator(course_id),
zip_name=zip_name,
timestamp_str=timestamp.strftime("%Y-%m-%d-%H%M")

View File

@@ -10,12 +10,12 @@ from celery.states import PENDING
from factory.django import DjangoModelFactory
from opaque_keys.edx.locator import CourseLocator
from lms.djangoapps.instructor_task.models import InstructorTask
from common.djangoapps.student.tests.factories import UserFactory as StudentUserFactory
from lms.djangoapps.instructor_task.models import InstructorTask
class InstructorTaskFactory(DjangoModelFactory): # lint-amnesty, pylint: disable=missing-class-docstring
class Meta(object):
class Meta:
model = InstructorTask
task_type = 'rescore_problem'

View File

@@ -2,16 +2,16 @@
Test for LMS instructor background task queue management
"""
from unittest.mock import MagicMock, Mock, patch
import pytest
import ddt
from celery.states import FAILURE
from mock import MagicMock, Mock, patch
from six.moves import range
from lms.djangoapps.bulk_email.models import SEND_TO_LEARNERS, SEND_TO_MYSELF, SEND_TO_STAFF, CourseEmail
from common.test.utils import normalize_repr
from lms.djangoapps.courseware.tests.factories import UserFactory
from lms.djangoapps.bulk_email.models import SEND_TO_LEARNERS, SEND_TO_MYSELF, SEND_TO_STAFF, CourseEmail
from lms.djangoapps.certificates.models import CertificateGenerationHistory, CertificateStatuses
from lms.djangoapps.courseware.tests.factories import UserFactory
from lms.djangoapps.instructor_task.api import (
SpecificStudentIdMissingError,
generate_certificates_for_students,
@@ -95,7 +95,7 @@ class InstructorTaskModuleSubmitTest(InstructorTaskModuleTestCase):
"""Tests API methods that involve the submission of module-based background tasks."""
def setUp(self):
super(InstructorTaskModuleSubmitTest, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
super().setUp()
self.initialize_course()
self.student = UserFactory.create(username="student", email="student@edx.org")
@@ -194,7 +194,7 @@ class InstructorTaskCourseSubmitTest(TestReportMixin, InstructorTaskCourseTestCa
"""Tests API methods that involve the submission of course-based background tasks."""
def setUp(self):
super(InstructorTaskCourseSubmitTest, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
super().setUp()
self.initialize_course()
self.student = UserFactory.create(username="student", email="student@edx.org")
@@ -269,7 +269,7 @@ class InstructorTaskCourseSubmitTest(TestReportMixin, InstructorTaskCourseTestCa
api_call = lambda: submit_cohort_students(
self.create_task_request(self.instructor),
self.course.id,
file_name=u'filename.csv'
file_name='filename.csv'
)
self._test_resubmission(api_call)

View File

@@ -9,19 +9,18 @@ import json
import os
import shutil
from tempfile import mkdtemp
from unittest.mock import Mock, patch
from uuid import uuid4
import six
import unicodecsv
from celery.states import FAILURE, SUCCESS
from django.contrib.auth.models import User # lint-amnesty, pylint: disable=imported-auth-user
from django.urls import reverse
from mock import Mock, patch
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locations import Location
from six import text_type
from capa.tests.response_xml_factory import OptionResponseXMLFactory
from common.djangoapps.student.tests.factories import CourseEnrollmentFactory, UserFactory
from lms.djangoapps.courseware.model_data import StudentModule
from lms.djangoapps.courseware.tests.tests import LoginEnrollmentTestCase
from lms.djangoapps.instructor_task.api_helper import encode_problem_and_student_input
@@ -30,7 +29,6 @@ from lms.djangoapps.instructor_task.tests.factories import InstructorTaskFactory
from lms.djangoapps.instructor_task.views import instructor_task_status
from openedx.core.djangolib.testing.utils import CacheIsolationTestCase
from openedx.core.lib.url_utils import quote_slashes
from common.djangoapps.student.tests.factories import CourseEnrollmentFactory, UserFactory
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
@@ -55,7 +53,7 @@ class InstructorTaskTestCase(CacheIsolationTestCase):
Tests API and view methods that involve the reporting of status for background tasks.
"""
def setUp(self):
super(InstructorTaskTestCase, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
super().setUp()
self.student = UserFactory.create(username="student", email="student@edx.org")
self.instructor = UserFactory.create(username="instructor", email="instructor@edx.org")
@@ -155,7 +153,7 @@ class InstructorTaskCourseTestCase(LoginEnrollmentTestCase, ModuleStoreTestCase)
@staticmethod
def get_user_email(username):
"""Generate email address based on username"""
return u'{0}@test.com'.format(username)
return f'{username}@test.com'
def login_username(self, username):
"""Login the user, given the `username`."""
@@ -222,7 +220,7 @@ class InstructorTaskModuleTestCase(InstructorTaskCourseTestCase):
Returns the factory args for the option problem type.
"""
return {
'question_text': u'The correct answer is {0}'.format(correct_answer),
'question_text': f'The correct answer is {correct_answer}',
'options': [OPTION_1, OPTION_2],
'correct_option': correct_answer,
'num_responses': num_responses,
@@ -275,9 +273,9 @@ class InstructorTaskModuleTestCase(InstructorTaskCourseTestCase):
# URL, modified so that it can be easily stored in html, prepended with "input-" and
# appended with a sequence identifier for the particular response the input goes to.
course_key = self.course.id
return u'input_i4x-{0}-{1}-problem-{2}_{3}'.format(
course_key.org.replace(u'.', u'_'),
course_key.course.replace(u'.', u'_'),
return 'input_i4x-{}-{}-problem-{}_{}'.format(
course_key.org.replace('.', '_'),
course_key.course.replace('.', '_'),
problem_url_name,
response_id
)
@@ -287,9 +285,9 @@ class InstructorTaskModuleTestCase(InstructorTaskCourseTestCase):
self.login_username(username)
# make ajax call:
modx_url = reverse('xblock_handler', kwargs={
'course_id': text_type(self.course.id),
'course_id': str(self.course.id),
'usage_id': quote_slashes(
text_type(InstructorTaskModuleTestCase.problem_location(problem_url_name, self.course.id))
str(InstructorTaskModuleTestCase.problem_location(problem_url_name, self.course.id))
),
'handler': 'xmodule_handler',
'suffix': 'problem_check',
@@ -297,12 +295,12 @@ class InstructorTaskModuleTestCase(InstructorTaskCourseTestCase):
# assign correct identifier to each response.
resp = self.client.post(modx_url, {
get_input_id(u'{}_1').format(index): response for index, response in enumerate(responses, 2)
get_input_id('{}_1').format(index): response for index, response in enumerate(responses, 2)
})
return resp
class TestReportMixin(object):
class TestReportMixin:
"""
Cleans up after tests that place files in the reports directory.
"""
@@ -314,7 +312,7 @@ class TestReportMixin(object):
if os.path.exists(self.tmp_dir):
shutil.rmtree(self.tmp_dir)
super(TestReportMixin, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
super().setUp()
# Ensure that working with the temp directories in tests is thread safe
# by creating a unique temporary directory for each testcase.
@@ -370,8 +368,8 @@ class TestReportMixin(object):
assert csv_rows == expected_rows
assert numeric_csv_rows == numeric_expected_rows
else:
six.assertCountEqual(self, csv_rows, expected_rows)
six.assertCountEqual(self, numeric_csv_rows, numeric_expected_rows)
self.assertCountEqual(csv_rows, expected_rows)
self.assertCountEqual(numeric_csv_rows, numeric_expected_rows)
@staticmethod
def _extract_and_round_numeric_items(dictionary):

View File

@@ -11,16 +11,15 @@ import json
import logging
import textwrap
from collections import namedtuple
from unittest.mock import patch
import pytest
import ddt
import six
from celery.states import FAILURE, SUCCESS
from django.contrib.auth.models import User # lint-amnesty, pylint: disable=imported-auth-user
from django.test.utils import override_settings
from django.urls import reverse
from mock import patch
from six import text_type
from six.moves import range
from capa.responsetypes import StudentInputError
from capa.tests.response_xml_factory import CodeResponseXMLFactory, CustomResponseXMLFactory
@@ -61,8 +60,7 @@ class TestIntegrationTask(InstructorTaskModuleTestCase):
assert instructor_task.task_type == task_type
task_input = json.loads(instructor_task.task_input)
assert 'student' not in task_input
assert task_input['problem_url'] == text_type(InstructorTaskModuleTestCase.problem_location(problem_url_name))
# lint-amnesty, pylint: disable=line-too-long
assert task_input['problem_url'] == str(InstructorTaskModuleTestCase.problem_location(problem_url_name))
status = json.loads(instructor_task.task_output)
assert status['exception'] == 'ZeroDivisionError'
assert status['message'] == expected_message
@@ -81,7 +79,7 @@ class TestRescoringTask(TestIntegrationTask):
"""
def setUp(self):
super(TestRescoringTask, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
super().setUp()
self.initialize_course()
self.create_instructor('instructor')
@@ -104,8 +102,8 @@ class TestRescoringTask(TestIntegrationTask):
self.login_username(username)
# make ajax call:
modx_url = reverse('xblock_handler', kwargs={
'course_id': text_type(self.course.id),
'usage_id': quote_slashes(text_type(InstructorTaskModuleTestCase.problem_location(problem_url_name))),
'course_id': str(self.course.id),
'usage_id': quote_slashes(str(InstructorTaskModuleTestCase.problem_location(problem_url_name))),
'handler': 'xmodule_handler',
'suffix': 'problem_get',
})
@@ -292,7 +290,7 @@ class TestRescoringTask(TestIntegrationTask):
self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1])
# return an input error as if it were a numerical response, with an embedded unicode character:
expected_message = u"Could not interpret '2/3\u03a9' as a number"
expected_message = "Could not interpret '2/3\u03a9' as a number"
with patch('capa.capa_problem.LoncapaProblem.get_grade_from_current_answers') as mock_rescore:
mock_rescore.side_effect = StudentInputError(expected_message)
instructor_task = self.submit_rescore_all_student_answers('instructor', problem_url_name)
@@ -304,8 +302,7 @@ class TestRescoringTask(TestIntegrationTask):
assert instructor_task.task_type == 'rescore_problem'
task_input = json.loads(instructor_task.task_input)
assert 'student' not in task_input
assert task_input['problem_url'] == text_type(InstructorTaskModuleTestCase.problem_location(problem_url_name))
# lint-amnesty, pylint: disable=line-too-long
assert task_input['problem_url'] == str(InstructorTaskModuleTestCase.problem_location(problem_url_name))
status = json.loads(instructor_task.task_output)
assert status['attempted'] == 1
assert status['succeeded'] == 0
@@ -359,7 +356,7 @@ class TestRescoringTask(TestIntegrationTask):
to not-equals).
"""
factory = CustomResponseXMLFactory()
script = textwrap.dedent(u"""
script = textwrap.dedent("""
def check_func(expect, answer_given):
expected = str(random.randint(0, 100))
return {'ok': answer_given %s expected, 'msg': expected}
@@ -405,7 +402,7 @@ class TestRescoringTask(TestIntegrationTask):
module = self.get_student_module(user.username, descriptor)
state = json.loads(module.state)
correct_map = state['correct_map']
log.info(u"Correct Map: %s", correct_map)
log.info("Correct Map: %s", correct_map)
# only one response, so pull it out:
answer = list(correct_map.values())[0]['msg']
self.submit_student_answer(user.username, problem_url_name, [answer, answer])
@@ -444,7 +441,7 @@ class TestResetAttemptsTask(TestIntegrationTask):
userlist = ['u1', 'u2', 'u3', 'u4']
def setUp(self):
super(TestResetAttemptsTask, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
super().setUp()
self.initialize_course()
self.create_instructor('instructor')
for username in self.userlist:
@@ -513,7 +510,7 @@ class TestDeleteProblemTask(TestIntegrationTask):
userlist = ['u1', 'u2', 'u3', 'u4']
def setUp(self):
super(TestDeleteProblemTask, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
super().setUp()
self.initialize_course()
self.create_instructor('instructor')
@@ -602,7 +599,7 @@ class TestGradeReportConditionalContent(TestReportMixin, TestConditionalContent,
def user_partition_group(user):
"""Return a dict having single key with value equals to students group in partition"""
group_config_hdr_tpl = u'Experiment Group ({})'
group_config_hdr_tpl = 'Experiment Group ({})'
return {
group_config_hdr_tpl.format(self.partition.name): self.partition.scheme.get_group_for_user(
self.course.id, user, self.partition
@@ -616,7 +613,7 @@ class TestGradeReportConditionalContent(TestReportMixin, TestConditionalContent,
grades,
user_partition_group(student)
)
for student_grades in students_grades for student, grades in six.iteritems(student_grades)
for student_grades in students_grades for student, grades in student_grades.items()
],
ignore_other_columns=ignore_other_columns,
)
@@ -644,14 +641,14 @@ class TestGradeReportConditionalContent(TestReportMixin, TestConditionalContent,
[
{
self.student_a: {
u'Grade': '1.0',
u'Homework': '1.0',
'Grade': '1.0',
'Homework': '1.0',
}
},
{
self.student_b: {
u'Grade': '0.5',
u'Homework': '0.5',
'Grade': '0.5',
'Homework': '0.5',
}
},
],
@@ -677,14 +674,14 @@ class TestGradeReportConditionalContent(TestReportMixin, TestConditionalContent,
[
{
self.student_a: {
u'Grade': '1.0',
u'Homework': '1.0',
'Grade': '1.0',
'Homework': '1.0',
},
},
{
self.student_b: {
u'Grade': '0.0',
u'Homework': u'Not Attempted',
'Grade': '0.0',
'Homework': 'Not Attempted',
}
},
],

View File

@@ -5,14 +5,14 @@ Tests for instructor_task/models.py.
import copy
import time
from six import StringIO
from io import StringIO
import pytest
from django.conf import settings
from django.test import SimpleTestCase, TestCase, override_settings
from opaque_keys.edx.locator import CourseLocator
from common.test.utils import MockS3BotoMixin
from lms.djangoapps.instructor_task.models import InstructorTask, ReportStore, TASK_INPUT_LENGTH
from lms.djangoapps.instructor_task.models import TASK_INPUT_LENGTH, InstructorTask, ReportStore
from lms.djangoapps.instructor_task.tests.test_base import TestReportMixin
@@ -35,13 +35,13 @@ class TestInstructorTasksModel(TestCase):
)
class ReportStoreTestMixin(object):
class ReportStoreTestMixin:
"""
Mixin for report store tests.
"""
def setUp(self):
super(ReportStoreTestMixin, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
super().setUp()
self.course_id = CourseLocator(org="testx", course="coursex", run="runx")
def create_report_store(self):

View File

@@ -3,22 +3,20 @@ Unit tests for instructor_task subtasks.
"""
from unittest.mock import Mock, patch
from uuid import uuid4
from mock import Mock, patch
from six.moves import range
from common.djangoapps.student.models import CourseEnrollment
from lms.djangoapps.instructor_task.subtasks import queue_subtasks_for_query
from lms.djangoapps.instructor_task.tests.factories import InstructorTaskFactory
from lms.djangoapps.instructor_task.tests.test_base import InstructorTaskCourseTestCase
from common.djangoapps.student.models import CourseEnrollment
class TestSubtasks(InstructorTaskCourseTestCase):
"""Tests for subtasks."""
def setUp(self):
super(TestSubtasks, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
super().setUp()
self.initialize_course()
def _enroll_students_in_course(self, course_id, num_students): # lint-amnesty, pylint: disable=unused-argument
@@ -26,7 +24,7 @@ class TestSubtasks(InstructorTaskCourseTestCase):
for _ in range(num_students):
random_id = uuid4().hex[:8]
self.create_student(username='student{0}'.format(random_id))
self.create_student(username=f'student{random_id}')
def _queue_subtasks(self, create_subtask_fcn, items_per_task, initial_count, extra_count):
"""Queue subtasks while enrolling more students into course in the middle of the process."""

View File

@@ -8,14 +8,13 @@ paths actually work.
import json
from functools import partial # lint-amnesty, pylint: disable=unused-import
from unittest.mock import MagicMock, Mock, patch
from uuid import uuid4
import pytest
import ddt
from celery.states import FAILURE, SUCCESS
from django.utils.translation import ugettext_noop
from mock import MagicMock, Mock, patch
from opaque_keys.edx.keys import i4xEncoder
from six.moves import range
from common.djangoapps.course_modes.models import CourseMode
from lms.djangoapps.courseware.models import StudentModule
@@ -31,7 +30,8 @@ from lms.djangoapps.instructor_task.tasks import (
rescore_problem,
reset_problem_attempts
)
from lms.djangoapps.instructor_task.tasks_helper.misc import upload_ora2_data # lint-amnesty, pylint: disable=unused-import
from lms.djangoapps.instructor_task.tasks_helper.misc import \
upload_ora2_data # lint-amnesty, pylint: disable=unused-import
from lms.djangoapps.instructor_task.tests.factories import InstructorTaskFactory
from lms.djangoapps.instructor_task.tests.test_base import InstructorTaskModuleTestCase
from xmodule.modulestore.exceptions import ItemNotFoundError
@@ -52,7 +52,7 @@ class TestInstructorTasks(InstructorTaskModuleTestCase):
"""
def setUp(self):
super(TestInstructorTasks, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
super().setUp()
self.initialize_course()
self.instructor = self.create_instructor('instructor')
self.location = self.problem_location(PROBLEM_URL_NAME)
@@ -436,8 +436,10 @@ class TestRescoreInstructorTask(TestInstructorTasks):
entry = InstructorTask.objects.get(id=task_entry.id)
output = json.loads(entry.task_output)
assert output['exception'] == 'UpdateProblemModuleStateError'
assert output['message'] == u'Specified module {0} of type {1} does not support rescoring.'\
.format(self.location, mock_instance.__class__)
assert output['message'] == 'Specified module {} of type {} does not support rescoring.'.format(
self.location,
mock_instance.__class__,
)
assert len(output['traceback']) > 0
def test_rescoring_unaccessable(self):

View File

@@ -1,5 +1,3 @@
# -*- coding: utf-8 -*-
"""
Unit tests for LMS instructor-initiated background tasks helper functions.
@@ -13,32 +11,23 @@ import os
import shutil
import tempfile
from collections import OrderedDict
from contextlib import contextmanager, ExitStack
from contextlib import ExitStack, contextmanager
from datetime import datetime, timedelta
from unittest.mock import ANY, MagicMock, Mock, patch
from urllib.parse import quote
import ddt
import unicodecsv
from capa.tests.response_xml_factory import MultipleChoiceResponseXMLFactory
from django.conf import settings # lint-amnesty, pylint: disable=wrong-import-order
from django.test.utils import override_settings # lint-amnesty, pylint: disable=wrong-import-order
from edx_django_utils.cache import RequestCache # lint-amnesty, pylint: disable=wrong-import-order
from freezegun import freeze_time # lint-amnesty, pylint: disable=wrong-import-order
from mock import ANY, MagicMock, Mock, patch # lint-amnesty, pylint: disable=wrong-import-order
from pytz import UTC # lint-amnesty, pylint: disable=wrong-import-order
from six import text_type # lint-amnesty, pylint: disable=wrong-import-order
from six.moves import range, zip # lint-amnesty, pylint: disable=wrong-import-order
from six.moves.urllib.parse import quote # lint-amnesty, pylint: disable=wrong-import-order
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory, check_mongo_calls
from xmodule.partitions.partitions import Group, UserPartition
from django.conf import settings
from django.test.utils import override_settings
from edx_django_utils.cache import RequestCache
from freezegun import freeze_time
from pytz import UTC
import openedx.core.djangoapps.user_api.course_tag.api as course_tag_api
from capa.tests.response_xml_factory import MultipleChoiceResponseXMLFactory
from common.djangoapps.course_modes.models import CourseMode
from common.djangoapps.student.models import (
CourseEnrollment,
CourseEnrollmentAllowed
)
from common.djangoapps.student.models import CourseEnrollment, CourseEnrollmentAllowed
from common.djangoapps.student.tests.factories import CourseEnrollmentFactory, UserFactory
from lms.djangoapps.certificates.models import CertificateStatuses, GeneratedCertificate
from lms.djangoapps.certificates.tests.factories import CertificateWhitelistFactory, GeneratedCertificateFactory
@@ -49,10 +38,7 @@ from lms.djangoapps.grades.subsection_grade import CreateSubsectionGrade
from lms.djangoapps.grades.transformer import GradesTransformer
from lms.djangoapps.instructor_analytics.basic import UNAVAILABLE, list_problem_responses
from lms.djangoapps.instructor_task.tasks_helper.certs import generate_students_certificates
from lms.djangoapps.instructor_task.tasks_helper.enrollments import (
upload_may_enroll_csv,
upload_students_csv
)
from lms.djangoapps.instructor_task.tasks_helper.enrollments import upload_may_enroll_csv, upload_students_csv
from lms.djangoapps.instructor_task.tasks_helper.grades import (
ENROLLED_IN_COURSE,
NOT_ENROLLED_IN_COURSE,
@@ -80,6 +66,11 @@ from openedx.core.djangoapps.credit.tests.factories import CreditCourseFactory
from openedx.core.djangoapps.user_api.partition_schemes import RandomUserPartitionScheme
from openedx.core.djangoapps.util.testing import ContentGroupTestCase, TestConditionalContent
from openedx.core.lib.teams_config import TeamsConfig
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory, check_mongo_calls
from xmodule.partitions.partitions import Group, UserPartition
from ..models import ReportStore
from ..tasks_helper.utils import UPDATE_STATUS_FAILED, UPDATE_STATUS_SUCCEEDED
@@ -120,13 +111,13 @@ class TestInstructorGradeReport(InstructorGradeReportTestCase):
super().setUp()
self.course = CourseFactory.create()
@ddt.data([u'student@example.com', u'ni\xf1o@example.com'])
@ddt.data(['student@example.com', 'ni\xf1o@example.com'])
def test_unicode_emails(self, emails):
"""
Test that students with unicode characters in emails is handled.
"""
for i, email in enumerate(emails):
self.create_student('student{0}'.format(i), email)
self.create_student(f'student{i}', email)
self.current_task = Mock() # pylint: disable=attribute-defined-outside-init
self.current_task.update_state = Mock()
@@ -182,8 +173,8 @@ class TestInstructorGradeReport(InstructorGradeReportTestCase):
user2 = UserFactory.create(username='user2')
CourseEnrollment.enroll(user1, course.id)
CourseEnrollment.enroll(user2, course.id)
professor_x = u'ÞrÖfessÖr X'
magneto = u'MàgnëtÖ'
professor_x = 'ÞrÖfessÖr X'
magneto = 'MàgnëtÖ'
cohort1 = CohortFactory(course_id=course.id, name=professor_x)
cohort2 = CohortFactory(course_id=course.id, name=magneto)
membership1 = CohortMembership(course_user_group=cohort1, user=user1)
@@ -198,7 +189,7 @@ class TestInstructorGradeReport(InstructorGradeReportTestCase):
"""
Test that user partition groups can contain unicode characters.
"""
user_groups = [u'ÞrÖfessÖr X', u'MàgnëtÖ']
user_groups = ['ÞrÖfessÖr X', 'MàgnëtÖ']
user_partition = UserPartition(
0,
'x_man',
@@ -232,12 +223,12 @@ class TestInstructorGradeReport(InstructorGradeReportTestCase):
[Group(0, 'Group A'), Group(1, 'Group B')],
scheme_id='cohort'
)
experiment_group_a = Group(2, u'Expériment Group A')
experiment_group_b = Group(3, u'Expériment Group B')
experiment_group_a = Group(2, 'Expériment Group A')
experiment_group_b = Group(3, 'Expériment Group B')
experiment_partition = UserPartition(
1,
u'Content Expériment Configuration',
u'Group Configuration for Content Expériments',
'Content Expériment Configuration',
'Group Configuration for Content Expériments',
[experiment_group_a, experiment_group_b],
scheme_id='random'
)
@@ -270,7 +261,7 @@ class TestInstructorGradeReport(InstructorGradeReportTestCase):
# partition (by way of a cohort) to verify that the user
# partition group does not show up in the "Experiment Group"
# cell.
cohort_a = CohortFactory.create(course_id=course.id, name=u'Cohørt A', users=[user_a])
cohort_a = CohortFactory.create(course_id=course.id, name='Cohørt A', users=[user_a])
CourseUserGroupPartitionGroup(
course_user_group=cohort_a,
partition_id=cohort_scheme_partition.id,
@@ -280,7 +271,7 @@ class TestInstructorGradeReport(InstructorGradeReportTestCase):
# Verify that we see user_a and user_b in their respective
# content experiment groups, and that we do not see any
# content groups.
experiment_group_message = u'Experiment Group ({content_experiment})'
experiment_group_message = 'Experiment Group ({content_experiment})'
self._verify_cell_data_for_user(
user_a.username,
course.id,
@@ -310,7 +301,7 @@ class TestInstructorGradeReport(InstructorGradeReportTestCase):
user_b.username,
course.id,
cohort_name_header,
u'',
'',
)
@patch('lms.djangoapps.instructor_task.tasks_helper.runner._get_current_task')
@@ -320,7 +311,7 @@ class TestInstructorGradeReport(InstructorGradeReportTestCase):
Tests that CSV grade report works if unicode in headers.
"""
mock_course_grade = MagicMock()
mock_course_grade.summary = {'section_breakdown': [{'label': u'\u8282\u540e\u9898 01'}]}
mock_course_grade.summary = {'section_breakdown': [{'label': '\u8282\u540e\u9898 01'}]}
mock_course_grade.letter_grade = None
mock_course_grade.percent = 0
mock_grades_iter.return_value = [
@@ -384,12 +375,12 @@ class TestInstructorGradeReport(InstructorGradeReportTestCase):
@ddt.unpack
def test_query_counts(self, store_type, mongo_count):
with self.store.default_store(store_type):
experiment_group_a = Group(2, u'Expériment Group A')
experiment_group_b = Group(3, u'Expériment Group B')
experiment_group_a = Group(2, 'Expériment Group A')
experiment_group_b = Group(3, 'Expériment Group B')
experiment_partition = UserPartition(
1,
u'Content Expériment Configuration',
u'Group Configuration for Content Expériments',
'Content Expériment Configuration',
'Group Configuration for Content Expériments',
[experiment_group_a, experiment_group_b],
scheme_id='random'
)
@@ -506,10 +497,10 @@ class TestProblemResponsesReport(TestReportMixin, InstructorTaskModuleTestCase):
Ensure that the _build_student_data method respects the global setting for
maximum responses to return in a report.
"""
self.define_option_problem(u'Problem1')
self.define_option_problem('Problem1')
for ctr in range(5):
student = self.create_student('student{}'.format(ctr))
self.submit_student_answer(student.username, u'Problem1', ['Option 1'])
student = self.create_student(f'student{ctr}')
self.submit_student_answer(student.username, 'Problem1', ['Option 1'])
student_data, _ = ProblemResponses._build_student_data(
user_id=self.instructor.id,
@@ -528,8 +519,8 @@ class TestProblemResponsesReport(TestReportMixin, InstructorTaskModuleTestCase):
Ensure that building student data for a block the doesn't have the
``generate_report_data`` method works as expected.
"""
problem = self.define_option_problem(u'Problem1')
self.submit_student_answer(self.student.username, u'Problem1', ['Option 1'])
problem = self.define_option_problem('Problem1')
self.submit_student_answer(self.student.username, 'Problem1', ['Option 1'])
with self._remove_capa_report_generator():
student_data, student_data_keys_list = ProblemResponses._build_student_data(
user_id=self.instructor.id,
@@ -553,8 +544,8 @@ class TestProblemResponsesReport(TestReportMixin, InstructorTaskModuleTestCase):
Ensure that building student data for a block that supports the
``generate_report_data`` method works as expected.
"""
self.define_option_problem(u'Problem1')
self.submit_student_answer(self.student.username, u'Problem1', ['Option 1'])
self.define_option_problem('Problem1')
self.submit_student_answer(self.student.username, 'Problem1', ['Option 1'])
state1 = {'some': 'state1', 'more': 'state1!'}
state2 = {'some': 'state2', 'more': 'state2!'}
mock_generate_report_data.return_value = iter([
@@ -592,8 +583,8 @@ class TestProblemResponsesReport(TestReportMixin, InstructorTaskModuleTestCase):
Ensure that building student data for a block that returns OrderedDicts from the
``generate_report_data`` sorts the columns as expected.
"""
self.define_option_problem(u'Problem1')
self.submit_student_answer(self.student.username, u'Problem1', ['Option 1'])
self.define_option_problem('Problem1')
self.submit_student_answer(self.student.username, 'Problem1', ['Option 1'])
state1 = OrderedDict()
state1['some'] = 'state1'
state1['more'] = 'state1!'
@@ -632,8 +623,8 @@ class TestProblemResponsesReport(TestReportMixin, InstructorTaskModuleTestCase):
Ensure that building student data for a block that supports the
``generate_report_data`` method works as expected.
"""
self.define_option_problem(u'Problem1')
self.submit_student_answer(self.student.username, u'Problem1', ['Option 1'])
self.define_option_problem('Problem1')
self.submit_student_answer(self.student.username, 'Problem1', ['Option 1'])
student_data, student_data_keys_list = ProblemResponses._build_student_data(
user_id=self.instructor.id,
course_key=self.course.id,
@@ -647,8 +638,8 @@ class TestProblemResponsesReport(TestReportMixin, InstructorTaskModuleTestCase):
'title': 'Problem1',
'Answer ID': 'i4x-edx-1_23x-problem-Problem1_2_1',
'Answer': 'Option 1',
'Correct Answer': u'Option 1',
'Question': u'The correct answer is Option 1',
'Correct Answer': 'Option 1',
'Question': 'The correct answer is Option 1',
}, student_data[0])
assert 'state' in student_data[0]
assert student_data_keys_list == ['username', 'title', 'location', 'Answer', 'Answer ID', 'Correct Answer',
@@ -658,10 +649,10 @@ class TestProblemResponsesReport(TestReportMixin, InstructorTaskModuleTestCase):
"""
Ensure that building student data works when supplied multiple usage keys.
"""
problem1 = self.define_option_problem(u'Problem1')
problem2 = self.define_option_problem(u'Problem2')
self.submit_student_answer(self.student.username, u'Problem1', ['Option 1'])
self.submit_student_answer(self.student.username, u'Problem2', ['Option 1'])
problem1 = self.define_option_problem('Problem1')
problem2 = self.define_option_problem('Problem2')
self.submit_student_answer(self.student.username, 'Problem1', ['Option 1'])
self.submit_student_answer(self.student.username, 'Problem2', ['Option 1'])
student_data, _ = ProblemResponses._build_student_data(
user_id=self.instructor.id,
course_key=self.course.id,
@@ -671,13 +662,13 @@ class TestProblemResponsesReport(TestReportMixin, InstructorTaskModuleTestCase):
for idx in range(1, 3):
self.assertDictContainsSubset({
'username': 'student',
'location': u'test_course > Section > Subsection > Problem{}'.format(idx),
'block_key': 'i4x://edx/1.23x/problem/Problem{}'.format(idx),
'title': u'Problem{}'.format(idx),
'Answer ID': 'i4x-edx-1_23x-problem-Problem{}_2_1'.format(idx),
'Answer': u'Option 1',
'Correct Answer': u'Option 1',
'Question': u'The correct answer is Option 1',
'location': f'test_course > Section > Subsection > Problem{idx}',
'block_key': f'i4x://edx/1.23x/problem/Problem{idx}',
'title': f'Problem{idx}',
'Answer ID': f'i4x-edx-1_23x-problem-Problem{idx}_2_1',
'Answer': 'Option 1',
'Correct Answer': 'Option 1',
'Question': 'The correct answer is Option 1',
}, student_data[idx - 1])
assert 'state' in student_data[(idx - 1)]
@@ -693,18 +684,18 @@ class TestProblemResponsesReport(TestReportMixin, InstructorTaskModuleTestCase):
Ensure that building student data works when supplied multiple usage keys.
"""
for idx in range(1, 6):
self.define_option_problem(u'Problem{}'.format(idx))
self.define_option_problem(f'Problem{idx}')
item = ItemFactory.create(
parent_location=self.problem_section.location,
parent=self.problem_section,
category="test-category",
display_name=u"Item{}".format(idx),
display_name=f"Item{idx}",
data=''
)
StudentModule.save_state(self.student, self.course.id, item.location, {})
for idx in range(1, 6):
self.submit_student_answer(self.student.username, u'Problem{}'.format(idx), ['Option 1'])
self.submit_student_answer(self.student.username, f'Problem{idx}', ['Option 1'])
student_data, _ = ProblemResponses._build_student_data(
user_id=self.instructor.id,
@@ -725,7 +716,7 @@ class TestProblemResponsesReport(TestReportMixin, InstructorTaskModuleTestCase):
Ensure that if ``generate_report_data`` raises a NotImplementedError,
the report falls back to the alternative method.
"""
problem = self.define_option_problem(u'Problem1')
problem = self.define_option_problem('Problem1')
mock_generate_report_data.side_effect = NotImplementedError
ProblemResponses._build_student_data(
user_id=self.instructor.id,
@@ -745,9 +736,9 @@ class TestProblemResponsesReport(TestReportMixin, InstructorTaskModuleTestCase):
'.ProblemResponses._build_student_data') as mock_build_student_data:
mock_build_student_data.return_value = (
[
{'username': 'user0', 'state': u'state0'},
{'username': 'user1', 'state': u'state1'},
{'username': 'user2', 'state': u'state2'},
{'username': 'user0', 'state': 'state0'},
{'username': 'user1', 'state': 'state1'},
{'username': 'user2', 'state': 'state2'},
],
['username', 'state']
)
@@ -784,9 +775,9 @@ class TestProblemResponsesReport(TestReportMixin, InstructorTaskModuleTestCase):
'.ProblemResponses._build_student_data') as mock_build_student_data:
mock_build_student_data.return_value = (
[
{'username': 'user0', 'state': u'state0'},
{'username': 'user1', 'state': u'state1'},
{'username': 'user2', 'state': u'state2'},
{'username': 'user0', 'state': 'state0'},
{'username': 'user1', 'state': 'state1'},
{'username': 'user2', 'state': 'state2'},
],
['username', 'state']
)
@@ -806,9 +797,9 @@ class TestProblemGradeReport(TestReportMixin, InstructorTaskModuleTestCase):
self.initialize_course()
# Add unicode data to CSV even though unicode usernames aren't
# technically possible in openedx.
self.student_1 = self.create_student(u'üser_1')
self.student_2 = self.create_student(u'üser_2')
self.csv_header_row = [u'Student ID', u'Email', u'Username', u'Enrollment Status', u'Grade']
self.student_1 = self.create_student('üser_1')
self.student_2 = self.create_student('üser_2')
self.csv_header_row = ['Student ID', 'Email', 'Username', 'Enrollment Status', 'Grade']
@patch('lms.djangoapps.instructor_task.tasks_helper.runner._get_current_task')
def test_no_problems(self, _get_current_task):
@@ -821,11 +812,11 @@ class TestProblemGradeReport(TestReportMixin, InstructorTaskModuleTestCase):
self.verify_rows_in_csv([
dict(list(zip(
self.csv_header_row,
[text_type(self.student_1.id), self.student_1.email, self.student_1.username, ENROLLED_IN_COURSE, '0.0']
[str(self.student_1.id), self.student_1.email, self.student_1.username, ENROLLED_IN_COURSE, '0.0']
))),
dict(list(zip(
self.csv_header_row,
[text_type(self.student_2.id), self.student_2.email, self.student_2.username, ENROLLED_IN_COURSE, '0.0']
[str(self.student_2.id), self.student_2.email, self.student_2.username, ENROLLED_IN_COURSE, '0.0']
)))
])
@@ -837,18 +828,18 @@ class TestProblemGradeReport(TestReportMixin, InstructorTaskModuleTestCase):
metadata={'graded': True},
display_name='Problem Vertical'
)
self.define_option_problem(u'Problem1', parent=vertical)
self.define_option_problem('Problem1', parent=vertical)
self.submit_student_answer(self.student_1.username, u'Problem1', ['Option 1'])
self.submit_student_answer(self.student_1.username, 'Problem1', ['Option 1'])
result = ProblemGradeReport.generate(None, None, self.course.id, None, 'graded')
self.assertDictContainsSubset({'action_name': 'graded', 'attempted': 2, 'succeeded': 2, 'failed': 0}, result)
problem_name = u'Homework 1: Subsection - Problem1'
problem_name = 'Homework 1: Subsection - Problem1'
header_row = self.csv_header_row + [problem_name + ' (Earned)', problem_name + ' (Possible)']
self.verify_rows_in_csv([
dict(list(zip(
header_row,
[
text_type(self.student_1.id),
str(self.student_1.id),
self.student_1.email,
self.student_1.username,
ENROLLED_IN_COURSE,
@@ -858,11 +849,11 @@ class TestProblemGradeReport(TestReportMixin, InstructorTaskModuleTestCase):
dict(list(zip(
header_row,
[
text_type(self.student_2.id),
str(self.student_2.id),
self.student_2.email,
self.student_2.username,
ENROLLED_IN_COURSE,
'0.0', u'Not Attempted', '2.0',
'0.0', 'Not Attempted', '2.0',
]
)))
])
@@ -873,17 +864,17 @@ class TestProblemGradeReport(TestReportMixin, InstructorTaskModuleTestCase):
'lms.djangoapps.instructor_task.tasks_helper.grades.problem_grade_report_verified_only',
return_value=True,
):
student_verified = self.create_student(u'user_verified', mode='verified')
student_verified = self.create_student('user_verified', mode='verified')
vertical = ItemFactory.create(
parent_location=self.problem_section.location,
category='vertical',
metadata={'graded': True},
display_name='Problem Vertical'
)
self.define_option_problem(u'Problem1', parent=vertical)
self.define_option_problem('Problem1', parent=vertical)
self.submit_student_answer(self.student_1.username, u'Problem1', ['Option 1'])
self.submit_student_answer(student_verified.username, u'Problem1', ['Option 1'])
self.submit_student_answer(self.student_1.username, 'Problem1', ['Option 1'])
self.submit_student_answer(student_verified.username, 'Problem1', ['Option 1'])
result = ProblemGradeReport.generate(None, None, self.course.id, None, 'graded')
self.assertDictContainsSubset(
{'action_name': 'graded', 'attempted': 1, 'succeeded': 1, 'failed': 0}, result
@@ -901,18 +892,18 @@ class TestProblemGradeReport(TestReportMixin, InstructorTaskModuleTestCase):
metadata={'graded': True},
display_name='Problem Vertical'
)
self.define_option_problem(u'Problem1', parent=vertical)
self.define_option_problem('Problem1', parent=vertical)
self.submit_student_answer(self.student_1.username, u'Problem1', ['Option 1'])
self.submit_student_answer(self.student_1.username, 'Problem1', ['Option 1'])
result = ProblemGradeReport.generate(None, None, self.course.id, None, 'graded')
self.assertDictContainsSubset({'action_name': 'graded', 'attempted': 3, 'succeeded': 3, 'failed': 0}, result)
problem_name = u'Homework 1: Subsection - Problem1'
problem_name = 'Homework 1: Subsection - Problem1'
header_row = self.csv_header_row + [problem_name + ' (Earned)', problem_name + ' (Possible)']
self.verify_rows_in_csv([
dict(list(zip(
header_row,
[
text_type(self.student_1.id),
str(self.student_1.id),
self.student_1.email,
self.student_1.username,
ENROLLED_IN_COURSE,
@@ -922,21 +913,21 @@ class TestProblemGradeReport(TestReportMixin, InstructorTaskModuleTestCase):
dict(list(zip(
header_row,
[
text_type(self.student_2.id),
str(self.student_2.id),
self.student_2.email,
self.student_2.username,
ENROLLED_IN_COURSE,
'0.0', u'Not Attempted', '2.0',
'0.0', 'Not Attempted', '2.0',
]
))),
dict(list(zip(
header_row,
[
text_type(inactive_student.id),
str(inactive_student.id),
inactive_student.email,
inactive_student.username,
NOT_ENROLLED_IN_COURSE,
'0.0', u'Not Attempted', '2.0',
'0.0', 'Not Attempted', '2.0',
]
)))
])
@@ -951,8 +942,8 @@ class TestProblemReportSplitTestContent(TestReportMixin, TestConditionalContent,
def setUp(self):
super().setUp()
self.problem_a_url = u'problem_a_url'
self.problem_b_url = u'problem_b_url'
self.problem_a_url = 'problem_a_url'
self.problem_b_url = 'problem_b_url'
self.define_option_problem(self.problem_a_url, parent=self.vertical_a)
self.define_option_problem(self.problem_b_url, parent=self.vertical_b)
@@ -980,8 +971,8 @@ class TestProblemReportSplitTestContent(TestReportMixin, TestConditionalContent,
{'action_name': 'graded', 'attempted': 2, 'succeeded': 2, 'failed': 0}, result
)
problem_names = [u'Homework 1: Subsection - problem_a_url', u'Homework 1: Subsection - problem_b_url']
header_row = [u'Student ID', u'Email', u'Username', u'Enrollment Status', u'Grade']
problem_names = ['Homework 1: Subsection - problem_a_url', 'Homework 1: Subsection - problem_b_url']
header_row = ['Student ID', 'Email', 'Username', 'Enrollment Status', 'Grade']
for problem in problem_names:
header_row += [problem + ' (Earned)', problem + ' (Possible)']
@@ -989,21 +980,21 @@ class TestProblemReportSplitTestContent(TestReportMixin, TestConditionalContent,
dict(list(zip(
header_row,
[
text_type(self.student_a.id),
str(self.student_a.id),
self.student_a.email,
self.student_a.username,
ENROLLED_IN_COURSE,
u'1.0', u'2.0', u'2.0', u'Not Available', u'Not Available'
'1.0', '2.0', '2.0', 'Not Available', 'Not Available'
]
))),
dict(list(zip(
header_row,
[
text_type(self.student_b.id),
str(self.student_b.id),
self.student_b.email,
self.student_b.username,
ENROLLED_IN_COURSE,
u'0.5', u'Not Available', u'Not Available', u'1.0', u'2.0'
'0.5', 'Not Available', 'Not Available', '1.0', '2.0'
]
)))
])
@@ -1017,10 +1008,10 @@ class TestProblemReportSplitTestContent(TestReportMixin, TestConditionalContent,
self.course = CourseFactory.create(
grading_policy={
"GRADER": [{
"type": u"Homework %d" % i,
"type": "Homework %d" % i,
"min_count": 1,
"drop_count": 0,
"short_label": u"HW %d" % i,
"short_label": "HW %d" % i,
"weight": 1.0
} for i in range(1, grader_num)]
}
@@ -1035,10 +1026,10 @@ class TestProblemReportSplitTestContent(TestReportMixin, TestConditionalContent,
problem_vertical_list = []
for i in range(1, grader_num):
chapter_name = u'Chapter %d' % i
problem_section_name = u'Problem section %d' % i
problem_section_format = u'Homework %d' % i
problem_vertical_name = u'Problem Unit %d' % i
chapter_name = 'Chapter %d' % i
problem_section_name = 'Problem section %d' % i
problem_section_format = 'Homework %d' % i
problem_vertical_name = 'Problem Unit %d' % i
chapter = ItemFactory.create(parent_location=self.course.location,
display_name=chapter_name)
@@ -1062,10 +1053,10 @@ class TestProblemReportSplitTestContent(TestReportMixin, TestConditionalContent,
for i in range(1, grader_num):
problem_url = 'test_problem_%d' % i
self.define_option_problem(problem_url, parent=problem_vertical_list[i - 1])
title = u'Homework %d 1: Problem section %d - %s' % (i, i, problem_url)
title = 'Homework %d 1: Problem section %d - %s' % (i, i, problem_url)
problem_names.append(title)
header_row = [u'Student ID', u'Email', u'Username', u'Enrollment Status', u'Grade']
header_row = ['Student ID', 'Email', 'Username', 'Enrollment Status', 'Grade']
for problem in problem_names:
header_row += [problem + ' (Earned)', problem + ' (Possible)']
@@ -1089,12 +1080,12 @@ class TestProblemReportCohortedContent(TestReportMixin, ContentGroupTestCase, In
display_name='Problem Vertical'
)
self.define_option_problem(
u"Problem0",
"Problem0",
parent=vertical,
group_access={self.course.user_partitions[0].id: [self.course.user_partitions[0].groups[0].id]}
)
self.define_option_problem(
u"Problem1",
"Problem1",
parent=vertical,
group_access={self.course.user_partitions[0].id: [self.course.user_partitions[0].groups[1].id]}
)
@@ -1110,7 +1101,7 @@ class TestProblemReportCohortedContent(TestReportMixin, ContentGroupTestCase, In
return dict(list(zip(
header_row,
[
text_type(user.id),
str(user.id),
user.email,
user.username,
enrollment_status,
@@ -1118,21 +1109,21 @@ class TestProblemReportCohortedContent(TestReportMixin, ContentGroupTestCase, In
)))
def test_cohort_content(self):
self.submit_student_answer(self.alpha_user.username, u'Problem0', ['Option 1', 'Option 1'])
resp = self.submit_student_answer(self.alpha_user.username, u'Problem1', ['Option 1', 'Option 1'])
self.submit_student_answer(self.alpha_user.username, 'Problem0', ['Option 1', 'Option 1'])
resp = self.submit_student_answer(self.alpha_user.username, 'Problem1', ['Option 1', 'Option 1'])
assert resp.status_code == 404
resp = self.submit_student_answer(self.beta_user.username, u'Problem0', ['Option 1', 'Option 2'])
resp = self.submit_student_answer(self.beta_user.username, 'Problem0', ['Option 1', 'Option 2'])
assert resp.status_code == 404
self.submit_student_answer(self.beta_user.username, u'Problem1', ['Option 1', 'Option 2'])
self.submit_student_answer(self.beta_user.username, 'Problem1', ['Option 1', 'Option 2'])
with patch('lms.djangoapps.instructor_task.tasks_helper.runner._get_current_task'):
result = ProblemGradeReport.generate(None, None, self.course.id, None, 'graded')
self.assertDictContainsSubset(
{'action_name': 'graded', 'attempted': 5, 'succeeded': 5, 'failed': 0}, result
)
problem_names = [u'Homework 1: Subsection - Problem0', u'Homework 1: Subsection - Problem1']
header_row = [u'Student ID', u'Email', u'Username', u'Enrollment Status', u'Grade']
problem_names = ['Homework 1: Subsection - Problem0', 'Homework 1: Subsection - Problem1']
header_row = ['Student ID', 'Email', 'Username', 'Enrollment Status', 'Grade']
for problem in problem_names:
header_row += [problem + ' (Earned)', problem + ' (Possible)']
@@ -1140,27 +1131,27 @@ class TestProblemReportCohortedContent(TestReportMixin, ContentGroupTestCase, In
{
'user': self.staff_user,
'enrollment_status': ENROLLED_IN_COURSE,
'grade': [u'0.0', u'Not Attempted', u'2.0', u'Not Attempted', u'2.0'],
'grade': ['0.0', 'Not Attempted', '2.0', 'Not Attempted', '2.0'],
},
{
'user': self.alpha_user,
'enrollment_status': ENROLLED_IN_COURSE,
'grade': [u'1.0', u'2.0', u'2.0', u'Not Available', u'Not Available'],
'grade': ['1.0', '2.0', '2.0', 'Not Available', 'Not Available'],
},
{
'user': self.beta_user,
'enrollment_status': ENROLLED_IN_COURSE,
'grade': [u'0.5', u'Not Available', u'Not Available', u'1.0', u'2.0'],
'grade': ['0.5', 'Not Available', 'Not Available', '1.0', '2.0'],
},
{
'user': self.non_cohorted_user,
'enrollment_status': ENROLLED_IN_COURSE,
'grade': [u'0.0', u'Not Available', u'Not Available', u'Not Available', u'Not Available'],
'grade': ['0.0', 'Not Available', 'Not Available', 'Not Available', 'Not Available'],
},
{
'user': self.community_ta,
'enrollment_status': ENROLLED_IN_COURSE,
'grade': [u'0.0', u'Not Attempted', u'2.0', u'Not Available', u'Not Available'],
'grade': ['0.0', 'Not Attempted', '2.0', 'Not Available', 'Not Available'],
},
]
@@ -1284,14 +1275,14 @@ class TestStudentReport(TestReportMixin, InstructorTaskCourseTestCase):
assert len(links) == 1
self.assertDictContainsSubset({'attempted': 1, 'succeeded': 1, 'failed': 0}, result)
@ddt.data([u'student', u'student\xec'])
@ddt.data(['student', 'student\xec'])
def test_unicode_usernames(self, students):
"""
Test that students with unicode characters in their usernames
are handled.
"""
for i, student in enumerate(students):
self.create_student(username=student, email='student{0}@example.com'.format(i))
self.create_student(username=student, email=f'student{i}@example.com')
self.current_task = Mock() # pylint: disable=attribute-defined-outside-init
self.current_task.update_state = Mock()
@@ -1407,7 +1398,7 @@ class TestListMayEnroll(TestReportMixin, InstructorTaskCourseTestCase):
Test handling of unicode characters in email addresses of students
who may enroll in a course.
"""
enrollments = [u'student@example.com', u'ni\xf1o@example.com']
enrollments = ['student@example.com', 'ni\xf1o@example.com']
for email in enrollments:
self._create_enrollment(email)
@@ -1419,7 +1410,7 @@ class TestListMayEnroll(TestReportMixin, InstructorTaskCourseTestCase):
self.assertDictContainsSubset({'attempted': num_enrollments, 'succeeded': num_enrollments, 'failed': 0}, result)
class MockDefaultStorage(object):
class MockDefaultStorage:
"""Mock django's DefaultStorage"""
def __init__(self):
pass
@@ -1440,7 +1431,7 @@ class TestCohortStudents(TestReportMixin, InstructorTaskCourseTestCase):
self.course = CourseFactory.create()
self.cohort_1 = CohortFactory(course_id=self.course.id, name='Cohort 1')
self.cohort_2 = CohortFactory(course_id=self.course.id, name='Cohort 2')
self.student_1 = self.create_student(username=u'student_1\xec', email='student_1@example.com')
self.student_1 = self.create_student(username='student_1\xec', email='student_1@example.com')
self.student_2 = self.create_student(username='student_2', email='student_2@example.com')
self.csv_header_row = [
'Cohort Name', 'Exists', 'Learners Added', 'Learners Not Found',
@@ -1459,9 +1450,9 @@ class TestCohortStudents(TestReportMixin, InstructorTaskCourseTestCase):
def test_username(self):
result = self._cohort_students_and_upload(
u'username,email,cohort\n'
u'student_1\xec,,Cohort 1\n'
u'student_2,,Cohort 2'
'username,email,cohort\n'
'student_1\xec,,Cohort 1\n'
'student_2,,Cohort 2'
)
self.assertDictContainsSubset({'total': 2, 'attempted': 2, 'succeeded': 2, 'failed': 0}, result)
self.verify_rows_in_csv(
@@ -1489,9 +1480,9 @@ class TestCohortStudents(TestReportMixin, InstructorTaskCourseTestCase):
def test_username_and_email(self):
result = self._cohort_students_and_upload(
u'username,email,cohort\n'
u'student_1\xec,student_1@example.com,Cohort 1\n'
u'student_2,student_2@example.com,Cohort 2'
'username,email,cohort\n'
'student_1\xec,student_1@example.com,Cohort 1\n'
'student_2,student_2@example.com,Cohort 2'
)
self.assertDictContainsSubset({'total': 2, 'attempted': 2, 'succeeded': 2, 'failed': 0}, result)
self.verify_rows_in_csv(
@@ -1510,9 +1501,9 @@ class TestCohortStudents(TestReportMixin, InstructorTaskCourseTestCase):
ignored.
"""
result = self._cohort_students_and_upload(
u'username,email,cohort\n'
u'student_1\xec,student_1@example.com,Cohort 1\n' # valid username and email
u'Invalid,student_2@example.com,Cohort 2' # invalid username, valid email
'username,email,cohort\n'
'student_1\xec,student_1@example.com,Cohort 1\n' # valid username and email
'Invalid,student_2@example.com,Cohort 2' # invalid username, valid email
)
self.assertDictContainsSubset({'total': 2, 'attempted': 2, 'succeeded': 2, 'failed': 0}, result)
self.verify_rows_in_csv(
@@ -1591,9 +1582,9 @@ class TestCohortStudents(TestReportMixin, InstructorTaskCourseTestCase):
val_1 <- bad row; no trailing commas to indicate empty rows
"""
result = self._cohort_students_and_upload(
u'username,email,cohort\n'
u'student_1\xec,\n'
u'student_2'
'username,email,cohort\n'
'student_1\xec,\n'
'student_2'
)
self.assertDictContainsSubset({'total': 2, 'attempted': 2, 'succeeded': 0, 'failed': 2}, result)
self.verify_rows_in_csv(
@@ -1605,7 +1596,7 @@ class TestCohortStudents(TestReportMixin, InstructorTaskCourseTestCase):
def test_only_header_row(self):
result = self._cohort_students_and_upload(
u'username,email,cohort'
'username,email,cohort'
)
self.assertDictContainsSubset({'total': 0, 'attempted': 0, 'succeeded': 0, 'failed': 0}, result)
self.verify_rows_in_csv([])
@@ -1615,9 +1606,9 @@ class TestCohortStudents(TestReportMixin, InstructorTaskCourseTestCase):
Test that we can handle carriage returns in our file.
"""
result = self._cohort_students_and_upload(
u'username,email,cohort\r'
u'student_1\xec,,Cohort 1\r'
u'student_2,,Cohort 2'
'username,email,cohort\r'
'student_1\xec,,Cohort 1\r'
'student_2,,Cohort 2'
)
self.assertDictContainsSubset({'total': 2, 'attempted': 2, 'succeeded': 2, 'failed': 0}, result)
self.verify_rows_in_csv(
@@ -1633,9 +1624,9 @@ class TestCohortStudents(TestReportMixin, InstructorTaskCourseTestCase):
Test that we can handle carriage returns and line feeds in our file.
"""
result = self._cohort_students_and_upload(
u'username,email,cohort\r\n'
u'student_1\xec,,Cohort 1\r\n'
u'student_2,,Cohort 2'
'username,email,cohort\r\n'
'student_1\xec,,Cohort 1\r\n'
'student_2,,Cohort 2'
)
self.assertDictContainsSubset({'total': 2, 'attempted': 2, 'succeeded': 2, 'failed': 0}, result)
self.verify_rows_in_csv(
@@ -1653,9 +1644,9 @@ class TestCohortStudents(TestReportMixin, InstructorTaskCourseTestCase):
membership2.save()
result = self._cohort_students_and_upload(
u'username,email,cohort\n'
u'student_1\xec,,Cohort 2\n'
u'student_2,,Cohort 1'
'username,email,cohort\n'
'student_1\xec,,Cohort 2\n'
'student_2,,Cohort 1'
)
self.assertDictContainsSubset({'total': 2, 'attempted': 2, 'succeeded': 2, 'failed': 0}, result)
self.verify_rows_in_csv(
@@ -1673,9 +1664,9 @@ class TestCohortStudents(TestReportMixin, InstructorTaskCourseTestCase):
membership2.save()
result = self._cohort_students_and_upload(
u'username,email,cohort\n'
u'student_1\xec,,Cohort 1\n'
u'student_2,,Cohort 2'
'username,email,cohort\n'
'student_1\xec,,Cohort 1\n'
'student_2,,Cohort 2'
)
self.assertDictContainsSubset({'total': 2, 'attempted': 2, 'skipped': 2, 'failed': 0}, result)
self.verify_rows_in_csv(
@@ -1696,7 +1687,7 @@ class TestGradeReport(TestReportMixin, InstructorTaskModuleTestCase):
def setUp(self):
super().setUp()
self.create_course()
self.student = self.create_student(u'üser_1')
self.student = self.create_student('üser_1')
def create_course(self):
"""
@@ -1722,7 +1713,7 @@ class TestGradeReport(TestReportMixin, InstructorTaskModuleTestCase):
metadata={'graded': True, 'format': 'Homework'},
display_name='Subsection'
)
self.define_option_problem(u'Problem1', parent=self.problem_section, num_responses=1)
self.define_option_problem('Problem1', parent=self.problem_section, num_responses=1)
self.hidden_section = ItemFactory.create(
parent=self.chapter,
category='sequential',
@@ -1730,14 +1721,14 @@ class TestGradeReport(TestReportMixin, InstructorTaskModuleTestCase):
visible_to_staff_only=True,
display_name='Hidden',
)
self.define_option_problem(u'Problem2', parent=self.hidden_section)
self.define_option_problem('Problem2', parent=self.hidden_section)
self.unattempted_section = ItemFactory.create(
parent=self.chapter,
category='sequential',
metadata={'graded': True, 'format': 'Homework'},
display_name='Unattempted',
)
self.define_option_problem(u'Problem3', parent=self.unattempted_section)
self.define_option_problem('Problem3', parent=self.unattempted_section)
self.empty_section = ItemFactory.create(
parent=self.chapter,
category='sequential',
@@ -1746,7 +1737,7 @@ class TestGradeReport(TestReportMixin, InstructorTaskModuleTestCase):
)
def test_grade_report(self):
self.submit_student_answer(self.student.username, u'Problem1', ['Option 1'])
self.submit_student_answer(self.student.username, 'Problem1', ['Option 1'])
with patch('lms.djangoapps.instructor_task.tasks_helper.runner._get_current_task'):
result = CourseGradeReport.generate(None, None, self.course.id, None, 'graded')
@@ -1757,14 +1748,14 @@ class TestGradeReport(TestReportMixin, InstructorTaskModuleTestCase):
self.verify_rows_in_csv(
[
{
u'Student ID': text_type(self.student.id),
u'Email': self.student.email,
u'Username': self.student.username,
u'Grade': '0.13',
u'Homework 1: Subsection': '0.5',
u'Homework 2: Unattempted': 'Not Attempted',
u'Homework 3: Empty': 'Not Attempted',
u'Homework (Avg)': text_type(1.0 / 6.0),
'Student ID': str(self.student.id),
'Email': self.student.email,
'Username': self.student.username,
'Grade': '0.13',
'Homework 1: Subsection': '0.5',
'Homework 2: Unattempted': 'Not Attempted',
'Homework 3: Empty': 'Not Attempted',
'Homework (Avg)': str(1.0 / 6.0),
},
],
ignore_other_columns=True,
@@ -1783,7 +1774,7 @@ class TestGradeReport(TestReportMixin, InstructorTaskModuleTestCase):
self.addCleanup(grade_model.delete)
self.submit_student_answer(self.student.username, u'Problem1', ['Option 1'])
self.submit_student_answer(self.student.username, 'Problem1', ['Option 1'])
with patch('lms.djangoapps.instructor_task.tasks_helper.runner._get_current_task'):
result = CourseGradeReport.generate(None, None, self.course.id, None, 'graded')
@@ -1794,14 +1785,14 @@ class TestGradeReport(TestReportMixin, InstructorTaskModuleTestCase):
self.verify_rows_in_csv(
[
{
u'Student ID': text_type(self.student.id),
u'Email': self.student.email,
u'Username': self.student.username,
u'Grade': '0.38',
u'Homework 1: Subsection': '0.5',
u'Homework 2: Unattempted': '1.0',
u'Homework 3: Empty': 'Not Attempted',
u'Homework (Avg)': text_type(3.0 / 6.0),
'Student ID': str(self.student.id),
'Email': self.student.email,
'Username': self.student.username,
'Grade': '0.38',
'Homework 1: Subsection': '0.5',
'Homework 2: Unattempted': '1.0',
'Homework 3: Empty': 'Not Attempted',
'Homework (Avg)': str(3.0 / 6.0),
},
],
ignore_other_columns=True,
@@ -1817,18 +1808,18 @@ class TestGradeReport(TestReportMixin, InstructorTaskModuleTestCase):
'lms.djangoapps.instructor_task.tasks_helper.grades.course_grade_report_verified_only',
return_value=True,
):
student_1 = self.create_student(u'user_honor')
student_verified = self.create_student(u'user_verified', mode='verified')
student_1 = self.create_student('user_honor')
student_verified = self.create_student('user_verified', mode='verified')
vertical = ItemFactory.create(
parent_location=self.problem_section.location,
category='vertical',
metadata={'graded': True},
display_name='Problem Vertical'
)
self.define_option_problem(u'Problem1', parent=vertical)
self.define_option_problem('Problem1', parent=vertical)
self.submit_student_answer(student_1.username, u'Problem1', ['Option 1'])
self.submit_student_answer(student_verified.username, u'Problem1', ['Option 1'])
self.submit_student_answer(student_1.username, 'Problem1', ['Option 1'])
self.submit_student_answer(student_verified.username, 'Problem1', ['Option 1'])
result = CourseGradeReport.generate(None, None, self.course.id, None, 'graded')
self.assertDictContainsSubset(
{'action_name': 'graded', 'attempted': 1, 'succeeded': 1, 'failed': 0}, result
@@ -1837,7 +1828,7 @@ class TestGradeReport(TestReportMixin, InstructorTaskModuleTestCase):
@ddt.data(True, False)
def test_fast_generation(self, create_non_zero_grade):
if create_non_zero_grade:
self.submit_student_answer(self.student.username, u'Problem1', ['Option 1'])
self.submit_student_answer(self.student.username, 'Problem1', ['Option 1'])
with patch('lms.djangoapps.instructor_task.tasks_helper.runner._get_current_task'):
with patch('lms.djangoapps.grades.course_data.get_course_blocks') as mock_course_blocks:
with patch('lms.djangoapps.grades.subsection_grade.get_score') as mock_get_score:
@@ -2539,8 +2530,8 @@ class TestCertificateGeneration(InstructorTaskModuleTestCase):
"""
return [
self.create_student(
username='student_{}'.format(index),
email='student_{}@example.com'.format(index)
username=f'student_{index}',
email=f'student_{index}@example.com'
)
for index in range(number_of_students)
]
@@ -2552,7 +2543,7 @@ class TestInstructorOra2Report(SharedModuleStoreTestCase):
"""
@classmethod
def setUpClass(cls):
super(TestInstructorOra2Report, cls).setUpClass()
super().setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
@@ -2602,8 +2593,8 @@ class TestInstructorOra2Report(SharedModuleStoreTestCase):
return_val = upload_ora2_data(None, None, self.course.id, None, 'generated')
timestamp_str = datetime.now(UTC).strftime('%Y-%m-%d-%H%M')
course_id_string = quote(text_type(self.course.id).replace('/', '_'))
filename = u'{}_ORA_data_{}.csv'.format(course_id_string, timestamp_str)
course_id_string = quote(str(self.course.id).replace('/', '_'))
filename = f'{course_id_string}_ORA_data_{timestamp_str}.csv'
assert return_val == UPDATE_STATUS_SUCCEEDED
mock_store_rows.assert_called_once_with(self.course.id, filename, [test_header] + test_rows)

View File

@@ -1,15 +1,13 @@
"""
Test for LMS instructor background task views.
"""
import json
from unittest.mock import Mock, patch
from celery.states import FAILURE, PENDING, REVOKED, SUCCESS
from django.http import QueryDict
from mock import Mock, patch
from six.moves import range
from lms.djangoapps.instructor_task.models import PROGRESS
from lms.djangoapps.instructor_task.tests.test_base import (
@@ -368,7 +366,7 @@ class InstructorTaskReportTest(InstructorTaskTestCase):
def test_get_info_for_broken_output(self):
# check for non-JSON task_output
instructor_task = self._create_success_entry()
instructor_task.task_output = u"{ bad"
instructor_task.task_output = "{ bad"
succeeded, message = get_task_completion_info(instructor_task)
assert not succeeded
assert message == 'No parsable status information available'
@@ -384,7 +382,7 @@ class InstructorTaskReportTest(InstructorTaskTestCase):
def test_get_info_for_broken_input(self):
# check for non-JSON task_input, but then just ignore it
instructor_task = self._create_success_entry()
instructor_task.task_input = u"{ bad"
instructor_task.task_input = "{ bad"
succeeded, message = get_task_completion_info(instructor_task)
assert not succeeded
assert message == 'Status: rescored 2 of 3 (out of 5)'

View File

@@ -107,13 +107,13 @@ def get_task_completion_info(instructor_task): # lint-amnesty, pylint: disable=
# we're more surprised if there is no output for a completed task, but just warn:
if instructor_task.task_output is None:
log.warning(_(u"No task_output information found for instructor_task {0}").format(instructor_task.task_id))
log.warning(_("No task_output information found for instructor_task {0}").format(instructor_task.task_id))
return (succeeded, _("No status information available"))
try:
task_output = json.loads(instructor_task.task_output)
except ValueError:
fmt = _(u"No parsable task_output information found for instructor_task {0}: {1}")
fmt = _("No parsable task_output information found for instructor_task {0}: {1}")
log.warning(fmt.format(instructor_task.task_id, instructor_task.task_output))
return (succeeded, _("No parsable status information available"))
@@ -121,7 +121,7 @@ def get_task_completion_info(instructor_task): # lint-amnesty, pylint: disable=
return (succeeded, task_output.get('message', _('No message provided')))
if any([key not in task_output for key in ['action_name', 'attempted', 'total']]):
fmt = _(u"Invalid task_output information found for instructor_task {0}: {1}")
fmt = _("Invalid task_output information found for instructor_task {0}: {1}")
log.warning(fmt.format(instructor_task.task_id, instructor_task.task_output))
return (succeeded, _("No progress status information available"))
@@ -142,7 +142,7 @@ def get_task_completion_info(instructor_task): # lint-amnesty, pylint: disable=
try:
task_input = json.loads(instructor_task.task_input)
except ValueError:
fmt = _(u"No parsable task_input information found for instructor_task {0}: {1}")
fmt = _("No parsable task_input information found for instructor_task {0}: {1}")
log.warning(fmt.format(instructor_task.task_id, instructor_task.task_input))
else:
student = task_input.get('student')
@@ -153,72 +153,72 @@ def get_task_completion_info(instructor_task): # lint-amnesty, pylint: disable=
if instructor_task.task_state == PROGRESS:
# special message for providing progress updates:
# Translators: {action} is a past-tense verb that is localized separately. {attempted} and {succeeded} are counts. # lint-amnesty, pylint: disable=line-too-long
msg_format = _(u"Progress: {action} {succeeded} of {attempted} so far")
msg_format = _("Progress: {action} {succeeded} of {attempted} so far")
elif student is not None and problem_url is not None:
# this reports on actions on problems for a particular student:
if num_attempted == 0:
# Translators: {action} is a past-tense verb that is localized separately. {student} is a student identifier. # lint-amnesty, pylint: disable=line-too-long
msg_format = _(u"Unable to find submission to be {action} for student '{student}'")
msg_format = _("Unable to find submission to be {action} for student '{student}'")
elif num_succeeded == 0:
# Translators: {action} is a past-tense verb that is localized separately. {student} is a student identifier. # lint-amnesty, pylint: disable=line-too-long
msg_format = _(u"Problem failed to be {action} for student '{student}'")
msg_format = _("Problem failed to be {action} for student '{student}'")
else:
succeeded = True
# Translators: {action} is a past-tense verb that is localized separately. {student} is a student identifier. # lint-amnesty, pylint: disable=line-too-long
msg_format = _(u"Problem successfully {action} for student '{student}'")
msg_format = _("Problem successfully {action} for student '{student}'")
elif student is not None and entrance_exam_url is not None:
# this reports on actions on entrance exam for a particular student:
if num_attempted == 0:
# Translators: {action} is a past-tense verb that is localized separately.
# {student} is a student identifier.
msg_format = _(u"Unable to find entrance exam submission to be {action} for student '{student}'")
msg_format = _("Unable to find entrance exam submission to be {action} for student '{student}'")
else:
succeeded = True
# Translators: {action} is a past-tense verb that is localized separately.
# {student} is a student identifier.
msg_format = _(u"Entrance exam successfully {action} for student '{student}'")
msg_format = _("Entrance exam successfully {action} for student '{student}'")
elif student is None and problem_url is not None:
# this reports on actions on problems for all students:
if num_attempted == 0:
# Translators: {action} is a past-tense verb that is localized separately.
msg_format = _(u"Unable to find any students with submissions to be {action}")
msg_format = _("Unable to find any students with submissions to be {action}")
elif num_succeeded == 0:
# Translators: {action} is a past-tense verb that is localized separately. {attempted} is a count.
msg_format = _(u"Problem failed to be {action} for any of {attempted} students")
msg_format = _("Problem failed to be {action} for any of {attempted} students")
elif num_succeeded == num_attempted:
succeeded = True
# Translators: {action} is a past-tense verb that is localized separately. {attempted} is a count.
msg_format = _(u"Problem successfully {action} for {attempted} students")
msg_format = _("Problem successfully {action} for {attempted} students")
else: # num_succeeded < num_attempted
# Translators: {action} is a past-tense verb that is localized separately. {succeeded} and {attempted} are counts. # lint-amnesty, pylint: disable=line-too-long
msg_format = _(u"Problem {action} for {succeeded} of {attempted} students")
msg_format = _("Problem {action} for {succeeded} of {attempted} students")
elif email_id is not None:
# this reports on actions on bulk emails
if num_attempted == 0:
# Translators: {action} is a past-tense verb that is localized separately.
msg_format = _(u"Unable to find any recipients to be {action}")
msg_format = _("Unable to find any recipients to be {action}")
elif num_succeeded == 0:
# Translators: {action} is a past-tense verb that is localized separately. {attempted} is a count.
msg_format = _(u"Message failed to be {action} for any of {attempted} recipients ")
msg_format = _("Message failed to be {action} for any of {attempted} recipients ")
elif num_succeeded == num_attempted:
succeeded = True
# Translators: {action} is a past-tense verb that is localized separately. {attempted} is a count.
msg_format = _(u"Message successfully {action} for {attempted} recipients")
msg_format = _("Message successfully {action} for {attempted} recipients")
else: # num_succeeded < num_attempted
# Translators: {action} is a past-tense verb that is localized separately. {succeeded} and {attempted} are counts. # lint-amnesty, pylint: disable=line-too-long
msg_format = _(u"Message {action} for {succeeded} of {attempted} recipients")
msg_format = _("Message {action} for {succeeded} of {attempted} recipients")
else:
# provide a default:
# Translators: {action} is a past-tense verb that is localized separately. {succeeded} and {attempted} are counts. # lint-amnesty, pylint: disable=line-too-long
msg_format = _(u"Status: {action} {succeeded} of {attempted}")
msg_format = _("Status: {action} {succeeded} of {attempted}")
if num_skipped > 0:
# Translators: {skipped} is a count. This message is appended to task progress status messages.
msg_format += _(u" (skipping {skipped})")
msg_format += _(" (skipping {skipped})")
if student is None and num_attempted != num_total:
# Translators: {total} is a count. This message is appended to task progress status messages.
msg_format += _(u" (out of {total})")
msg_format += _(" (out of {total})")
# Update status in task result object itself:
message = msg_format.format(

View File

@@ -8,7 +8,7 @@ import json
from django.http import Http404
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils.translation import get_language_bidi, ugettext_lazy as _ # lint-amnesty, pylint: disable=unused-import
from django.utils.translation import ugettext_lazy as _ # lint-amnesty, pylint: disable=unused-import
from web_fragments.fragment import Fragment
from lms.djangoapps.commerce.utils import EcommerceService

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
"""
Unit tests covering the program listing and detail pages.
"""
@@ -6,16 +5,16 @@ Unit tests covering the program listing and detail pages.
import json
import re
from unittest import mock
from urllib.parse import urljoin
from uuid import uuid4
import mock
import six
from six.moves.urllib.parse import urljoin
from bs4 import BeautifulSoup
from django.conf import settings
from django.test import override_settings
from django.urls import reverse, reverse_lazy
from common.djangoapps.student.tests.factories import CourseEnrollmentFactory, UserFactory
from lms.envs.test import CREDENTIALS_PUBLIC_SERVICE_URL
from openedx.core.djangoapps.catalog.constants import PathwayType
from openedx.core.djangoapps.catalog.tests.factories import (
@@ -27,7 +26,6 @@ from openedx.core.djangoapps.catalog.tests.factories import (
from openedx.core.djangoapps.catalog.tests.mixins import CatalogIntegrationMixin
from openedx.core.djangoapps.programs.tests.mixins import ProgramsApiConfigMixin
from openedx.core.djangolib.testing.utils import skip_unless_lms
from common.djangoapps.student.tests.factories import CourseEnrollmentFactory, UserFactory
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory as ModuleStoreCourseFactory
@@ -39,7 +37,7 @@ def load_serialized_data(response, key):
"""
Extract and deserialize serialized data from the response.
"""
pattern = re.compile(u'{key}: (?P<data>\\[.*\\])'.format(key=key))
pattern = re.compile(f'{key}: (?P<data>\\[.*\\])')
match = pattern.search(response.content.decode('utf-8'))
serialized = match.group('data')
@@ -57,10 +55,10 @@ class TestProgramListing(ProgramsApiConfigMixin, SharedModuleStoreTestCase):
@classmethod
def setUpClass(cls):
super(TestProgramListing, cls).setUpClass()
super().setUpClass()
cls.course = ModuleStoreCourseFactory()
course_run = CourseRunFactory(key=six.text_type(cls.course.id)) # lint-amnesty, pylint: disable=no-member
course_run = CourseRunFactory(key=str(cls.course.id)) # lint-amnesty, pylint: disable=no-member
course = CourseFactory(course_runs=[course_run])
cls.first_program = ProgramFactory(courses=[course])
@@ -69,7 +67,7 @@ class TestProgramListing(ProgramsApiConfigMixin, SharedModuleStoreTestCase):
cls.data = sorted([cls.first_program, cls.second_program], key=cls.program_sort_key)
def setUp(self):
super(TestProgramListing, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
super().setUp()
self.user = UserFactory()
self.client.login(username=self.user.username, password=self.password)
@@ -205,10 +203,10 @@ class TestProgramDetails(ProgramsApiConfigMixin, CatalogIntegrationMixin, Shared
@classmethod
def setUpClass(cls):
super(TestProgramDetails, cls).setUpClass()
super().setUpClass()
modulestore_course = ModuleStoreCourseFactory()
course_run = CourseRunFactory(key=six.text_type(modulestore_course.id)) # lint-amnesty, pylint: disable=no-member
course_run = CourseRunFactory(key=str(modulestore_course.id)) # lint-amnesty, pylint: disable=no-member
course = CourseFactory(course_runs=[course_run])
cls.program_data = ProgramFactory(uuid=cls.program_uuid, courses=[course])
@@ -218,7 +216,7 @@ class TestProgramDetails(ProgramsApiConfigMixin, CatalogIntegrationMixin, Shared
del cls.pathway_data['programs'] # lint-amnesty, pylint: disable=unsupported-delete-operation
def setUp(self):
super(TestProgramDetails, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
super().setUp()
self.user = UserFactory()
self.client.login(username=self.user.username, password=self.password)
@@ -228,7 +226,7 @@ class TestProgramDetails(ProgramsApiConfigMixin, CatalogIntegrationMixin, Shared
self.assertContains(response, 'programData')
self.assertContains(response, 'urls')
self.assertContains(response,
u'"program_record_url": "{}/records/programs/'.format(CREDENTIALS_PUBLIC_SERVICE_URL))
f'"program_record_url": "{CREDENTIALS_PUBLIC_SERVICE_URL}/records/programs/')
self.assertContains(response, 'program_listing_url')
self.assertContains(response, self.program_data['title'])
self.assert_programs_tab_present(response)

View File

@@ -4,7 +4,6 @@ Unit test module covering utils module
import ddt
import six
from django.test import TestCase
from lms.djangoapps.learner_dashboard import utils
@@ -22,5 +21,5 @@ class TestUtils(TestCase):
Test to make sure the function 'strip_course_id'
handles various url input
"""
actual = utils.strip_course_id(path + six.text_type(utils.FAKE_COURSE_KEY))
actual = utils.strip_course_id(path + str(utils.FAKE_COURSE_KEY))
assert actual == path

View File

@@ -3,7 +3,6 @@ The utility methods and functions to help the djangoapp logic
"""
import six
from opaque_keys.edx.keys import CourseKey
FAKE_COURSE_KEY = CourseKey.from_string('course-v1:fake+course+run')
@@ -14,5 +13,5 @@ def strip_course_id(path):
The utility function to help remove the fake
course ID from the url path
"""
course_id = six.text_type(FAKE_COURSE_KEY)
course_id = str(FAKE_COURSE_KEY)
return path.split(course_id)[0]