Refactor Instructor Tasks - in prep for Grade report work
This commit is contained in:
@@ -17,13 +17,13 @@ from smtplib import SMTPDataError, SMTPServerDisconnected, SMTPConnectError
|
||||
|
||||
from bulk_email.models import CourseEmail, SEND_TO_MYSELF, BulkEmailFlag
|
||||
from bulk_email.tasks import perform_delegate_email_batches, send_course_email
|
||||
from lms.djangoapps.instructor_task.exceptions import DuplicateTaskException
|
||||
from lms.djangoapps.instructor_task.models import InstructorTask
|
||||
from lms.djangoapps.instructor_task.subtasks import (
|
||||
initialize_subtask_info,
|
||||
SubtaskStatus,
|
||||
check_subtask_is_valid,
|
||||
update_subtask_status,
|
||||
DuplicateTaskException,
|
||||
MAX_DATABASE_LOCK_RETRIES,
|
||||
)
|
||||
from opaque_keys.edx.locations import SlashSeparatedCourseKey
|
||||
|
||||
@@ -9,7 +9,7 @@ from .scores import possibly_scored
|
||||
|
||||
def grading_context_for_course(course_key):
|
||||
"""
|
||||
Same as grading_context, but takes in a course object.
|
||||
Same as grading_context, but takes in a course key.
|
||||
"""
|
||||
course_structure = get_course_in_cache(course_key)
|
||||
return grading_context(course_structure)
|
||||
|
||||
@@ -160,7 +160,7 @@ class GradesEventIntegrationTest(ProblemSubmissionTestMixin, SharedModuleStoreTe
|
||||
}
|
||||
)
|
||||
|
||||
@patch('lms.djangoapps.instructor_task.tasks_helper.tracker')
|
||||
@patch('lms.djangoapps.instructor_task.tasks_helper.module_state.tracker')
|
||||
@patch('lms.djangoapps.grades.signals.handlers.tracker')
|
||||
@patch('lms.djangoapps.grades.models.tracker')
|
||||
def test_rescoring_events(self, models_tracker, handlers_tracker, instructor_task_tracker):
|
||||
|
||||
18
lms/djangoapps/instructor_task/exceptions.py
Normal file
18
lms/djangoapps/instructor_task/exceptions.py
Normal file
@@ -0,0 +1,18 @@
|
||||
"""
|
||||
Exception classes used by Instructor tasks.
|
||||
"""
|
||||
|
||||
|
||||
class UpdateProblemModuleStateError(Exception):
|
||||
"""
|
||||
Error signaling a fatal condition while updating problem modules.
|
||||
|
||||
Used when the current module cannot be processed and no more
|
||||
modules should be attempted.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class DuplicateTaskException(Exception):
|
||||
"""Exception indicating that a task already exists or has already completed."""
|
||||
pass
|
||||
@@ -14,9 +14,12 @@ import dogstats_wrapper as dog_stats_api
|
||||
from django.db import transaction, DatabaseError
|
||||
from django.core.cache import cache
|
||||
|
||||
from lms.djangoapps.instructor_task.models import InstructorTask, PROGRESS, QUEUING
|
||||
from util.db import outer_atomic
|
||||
|
||||
from .exceptions import DuplicateTaskException
|
||||
from .models import InstructorTask, PROGRESS, QUEUING
|
||||
|
||||
|
||||
TASK_LOG = logging.getLogger('edx.celery.task')
|
||||
|
||||
# Lock expiration should be long enough to allow a subtask to complete.
|
||||
@@ -26,11 +29,6 @@ SUBTASK_LOCK_EXPIRE = 60 * 10 # Lock expires in 10 minutes
|
||||
MAX_DATABASE_LOCK_RETRIES = 5
|
||||
|
||||
|
||||
class DuplicateTaskException(Exception):
|
||||
"""Exception indicating that a task already exists or has already completed."""
|
||||
pass
|
||||
|
||||
|
||||
def _get_number_of_subtasks(total_num_items, items_per_task):
|
||||
"""
|
||||
Determines number of subtasks that would be generated by _generate_items_for_subtask.
|
||||
|
||||
@@ -27,25 +27,33 @@ from django.utils.translation import ugettext_noop
|
||||
|
||||
from celery import task
|
||||
from bulk_email.tasks import perform_delegate_email_batches
|
||||
from lms.djangoapps.instructor_task.tasks_helper import (
|
||||
run_main_task,
|
||||
BaseInstructorTask,
|
||||
from lms.djangoapps.instructor_task.tasks_base import BaseInstructorTask
|
||||
from lms.djangoapps.instructor_task.tasks_helper.runner import run_main_task
|
||||
from lms.djangoapps.instructor_task.tasks_helper.certs import (
|
||||
generate_students_certificates,
|
||||
)
|
||||
from lms.djangoapps.instructor_task.tasks_helper.enrollments import (
|
||||
upload_enrollment_report,
|
||||
upload_may_enroll_csv,
|
||||
upload_exec_summary_report,
|
||||
upload_students_csv,
|
||||
)
|
||||
from lms.djangoapps.instructor_task.tasks_helper.grades import (
|
||||
generate_course_grade_report,
|
||||
generate_problem_grade_report,
|
||||
upload_problem_responses_csv,
|
||||
)
|
||||
from lms.djangoapps.instructor_task.tasks_helper.misc import (
|
||||
cohort_students_and_upload,
|
||||
upload_course_survey_report,
|
||||
upload_proctored_exam_results_report,
|
||||
upload_ora2_data,
|
||||
)
|
||||
from lms.djangoapps.instructor_task.tasks_helper.module_state import (
|
||||
perform_module_state_update,
|
||||
rescore_problem_module_state,
|
||||
reset_attempts_module_state,
|
||||
delete_problem_module_state,
|
||||
upload_problem_responses_csv,
|
||||
upload_grades_csv,
|
||||
upload_problem_grade_report,
|
||||
upload_students_csv,
|
||||
cohort_students_and_upload,
|
||||
upload_enrollment_report,
|
||||
upload_may_enroll_csv,
|
||||
upload_exec_summary_report,
|
||||
upload_course_survey_report,
|
||||
generate_students_certificates,
|
||||
upload_proctored_exam_results_report,
|
||||
upload_ora2_data,
|
||||
)
|
||||
|
||||
|
||||
@@ -168,7 +176,7 @@ def calculate_grades_csv(entry_id, xmodule_instance_args):
|
||||
xmodule_instance_args.get('task_id'), entry_id, action_name
|
||||
)
|
||||
|
||||
task_fn = partial(upload_grades_csv, xmodule_instance_args)
|
||||
task_fn = partial(generate_course_grade_report, xmodule_instance_args)
|
||||
return run_main_task(entry_id, task_fn, action_name)
|
||||
|
||||
|
||||
@@ -185,7 +193,7 @@ def calculate_problem_grade_report(entry_id, xmodule_instance_args):
|
||||
xmodule_instance_args.get('task_id'), entry_id, action_name
|
||||
)
|
||||
|
||||
task_fn = partial(upload_problem_grade_report, xmodule_instance_args)
|
||||
task_fn = partial(generate_problem_grade_report, xmodule_instance_args)
|
||||
return run_main_task(entry_id, task_fn, action_name)
|
||||
|
||||
|
||||
|
||||
96
lms/djangoapps/instructor_task/tasks_base.py
Normal file
96
lms/djangoapps/instructor_task/tasks_base.py
Normal file
@@ -0,0 +1,96 @@
|
||||
"""
|
||||
Base class for Instructor celery tasks.
|
||||
"""
|
||||
from celery import Task
|
||||
from celery.states import SUCCESS, FAILURE
|
||||
import logging
|
||||
from lms.djangoapps.instructor_task.models import InstructorTask
|
||||
|
||||
|
||||
# define different loggers for use within tasks and on client side
|
||||
TASK_LOG = logging.getLogger('edx.celery.task')
|
||||
|
||||
|
||||
class BaseInstructorTask(Task):
|
||||
"""
|
||||
Base task class for use with InstructorTask models.
|
||||
|
||||
Permits updating information about task in corresponding InstructorTask for monitoring purposes.
|
||||
|
||||
Assumes that the entry_id of the InstructorTask model is the first argument to the task.
|
||||
|
||||
The `entry_id` is the primary key for the InstructorTask entry representing the task. This class
|
||||
updates the entry on success and failure of the task it wraps. It is setting the entry's value
|
||||
for task_state based on what Celery would set it to once the task returns to Celery:
|
||||
FAILURE if an exception is encountered, and SUCCESS if it returns normally.
|
||||
Other arguments are pass-throughs to perform_module_state_update, and documented there.
|
||||
"""
|
||||
abstract = True
|
||||
|
||||
def on_success(self, task_progress, task_id, args, kwargs):
|
||||
"""
|
||||
Update InstructorTask object corresponding to this task with info about success.
|
||||
|
||||
Updates task_output and task_state. But it shouldn't actually do anything
|
||||
if the task is only creating subtasks to actually do the work.
|
||||
|
||||
Assumes `task_progress` is a dict containing the task's result, with the following keys:
|
||||
|
||||
'attempted': number of attempts made
|
||||
'succeeded': number of attempts that "succeeded"
|
||||
'skipped': number of attempts that "skipped"
|
||||
'failed': number of attempts that "failed"
|
||||
'total': number of possible subtasks to attempt
|
||||
'action_name': user-visible verb to use in status messages. Should be past-tense.
|
||||
Pass-through of input `action_name`.
|
||||
'duration_ms': how long the task has (or had) been running.
|
||||
|
||||
This is JSON-serialized and stored in the task_output column of the InstructorTask entry.
|
||||
|
||||
"""
|
||||
TASK_LOG.debug('Task %s: success returned with progress: %s', task_id, task_progress)
|
||||
# We should be able to find the InstructorTask object to update
|
||||
# based on the task_id here, without having to dig into the
|
||||
# original args to the task. On the other hand, the entry_id
|
||||
# is the first value passed to all such args, so we'll use that.
|
||||
# And we assume that it exists, else we would already have had a failure.
|
||||
entry_id = args[0]
|
||||
entry = InstructorTask.objects.get(pk=entry_id)
|
||||
# Check to see if any subtasks had been defined as part of this task.
|
||||
# If not, then we know that we're done. (If so, let the subtasks
|
||||
# handle updating task_state themselves.)
|
||||
if len(entry.subtasks) == 0:
|
||||
entry.task_output = InstructorTask.create_output_for_success(task_progress)
|
||||
entry.task_state = SUCCESS
|
||||
entry.save_now()
|
||||
|
||||
def on_failure(self, exc, task_id, args, kwargs, einfo):
|
||||
"""
|
||||
Update InstructorTask object corresponding to this task with info about failure.
|
||||
|
||||
Fetches and updates exception and traceback information on failure.
|
||||
|
||||
If an exception is raised internal to the task, it is caught by celery and provided here.
|
||||
The information is recorded in the InstructorTask object as a JSON-serialized dict
|
||||
stored in the task_output column. It contains the following keys:
|
||||
|
||||
'exception': type of exception object
|
||||
'message': error message from exception object
|
||||
'traceback': traceback information (truncated if necessary)
|
||||
|
||||
Note that there is no way to record progress made within the task (e.g. attempted,
|
||||
succeeded, etc.) when such failures occur.
|
||||
"""
|
||||
TASK_LOG.debug(u'Task %s: failure returned', task_id)
|
||||
entry_id = args[0]
|
||||
try:
|
||||
entry = InstructorTask.objects.get(pk=entry_id)
|
||||
except InstructorTask.DoesNotExist:
|
||||
# if the InstructorTask object does not exist, then there's no point
|
||||
# trying to update it.
|
||||
TASK_LOG.error(u"Task (%s) has no InstructorTask object for id %s", task_id, entry_id)
|
||||
else:
|
||||
TASK_LOG.warning(u"Task (%s) failed", task_id, exc_info=True)
|
||||
entry.task_output = InstructorTask.create_output_for_failure(einfo.exception, einfo.traceback)
|
||||
entry.task_state = FAILURE
|
||||
entry.save_now()
|
||||
@@ -1,1759 +0,0 @@
|
||||
"""
|
||||
This file contains tasks that are designed to perform background operations on the
|
||||
running state of a course.
|
||||
|
||||
"""
|
||||
import json
|
||||
import logging
|
||||
from StringIO import StringIO
|
||||
from collections import OrderedDict
|
||||
from datetime import datetime
|
||||
from itertools import chain
|
||||
from time import time
|
||||
|
||||
import dogstats_wrapper as dog_stats_api
|
||||
import re
|
||||
import unicodecsv
|
||||
from celery import Task, current_task
|
||||
from celery.states import SUCCESS, FAILURE
|
||||
from django.conf import settings
|
||||
from django.contrib.auth.models import User
|
||||
from django.core.files.storage import DefaultStorage
|
||||
from django.db import reset_queries
|
||||
from django.db.models import Q
|
||||
from django.utils.translation import ugettext as _
|
||||
from eventtracking import tracker
|
||||
from lms.djangoapps.grades.scores import weighted_score
|
||||
from lms.djangoapps.instructor.paidcourse_enrollment_report import PaidCourseEnrollmentReportProvider
|
||||
from lms.djangoapps.teams.models import CourseTeamMembership
|
||||
from lms.djangoapps.verify_student.models import SoftwareSecurePhotoVerification
|
||||
from pytz import UTC
|
||||
from track import contexts
|
||||
from xmodule.modulestore.django import modulestore
|
||||
from xmodule.partitions.partitions_service import PartitionService
|
||||
from xmodule.split_test_module import get_split_user_partitions
|
||||
|
||||
from certificates.api import generate_user_certificates
|
||||
from certificates.models import (
|
||||
CertificateWhitelist,
|
||||
certificate_info_for_user,
|
||||
CertificateStatuses,
|
||||
GeneratedCertificate
|
||||
)
|
||||
from courseware.courses import get_course_by_id, get_problems_in_section
|
||||
from lms.djangoapps.grades.context import grading_context_for_course
|
||||
from lms.djangoapps.grades.new.course_grade_factory import CourseGradeFactory
|
||||
from courseware.model_data import DjangoKeyValueStore, FieldDataCache
|
||||
from courseware.models import StudentModule
|
||||
from courseware.module_render import get_module_for_descriptor_internal
|
||||
from edxmako.shortcuts import render_to_string
|
||||
from instructor_analytics.basic import (
|
||||
enrolled_students_features,
|
||||
get_proctored_exam_results,
|
||||
list_may_enroll,
|
||||
list_problem_responses
|
||||
)
|
||||
from instructor_analytics.csvs import format_dictlist
|
||||
from shoppingcart.models import (
|
||||
PaidCourseRegistration, CourseRegCodeItem, InvoiceTransaction,
|
||||
Invoice, CouponRedemption, RegistrationCodeRedemption, CourseRegistrationCode
|
||||
)
|
||||
from openassessment.data import OraAggregateData
|
||||
from lms.djangoapps.instructor_task.models import ReportStore, InstructorTask, PROGRESS
|
||||
from openedx.core.djangoapps.course_groups.cohorts import get_cohort
|
||||
from openedx.core.djangoapps.course_groups.models import CourseUserGroup
|
||||
from opaque_keys.edx.keys import UsageKey
|
||||
from openedx.core.djangoapps.course_groups.cohorts import add_user_to_cohort, is_course_cohorted
|
||||
from student.models import CourseEnrollment, CourseAccessRole
|
||||
from survey.models import SurveyAnswer
|
||||
from track.event_transaction_utils import set_event_transaction_type, create_new_event_transaction_id
|
||||
from track.views import task_track
|
||||
from util.db import outer_atomic
|
||||
from util.file import course_filename_prefix_generator, UniversalNewlineIterator
|
||||
from xblock.runtime import KvsFieldData
|
||||
|
||||
# define different loggers for use within tasks and on client side
|
||||
TASK_LOG = logging.getLogger('edx.celery.task')
|
||||
|
||||
# define value to use when no task_id is provided:
|
||||
UNKNOWN_TASK_ID = 'unknown-task_id'
|
||||
FILTERED_OUT_ROLES = ['staff', 'instructor', 'finance_admin', 'sales_admin']
|
||||
# define values for update functions to use to return status to perform_module_state_update
|
||||
UPDATE_STATUS_SUCCEEDED = 'succeeded'
|
||||
UPDATE_STATUS_FAILED = 'failed'
|
||||
UPDATE_STATUS_SKIPPED = 'skipped'
|
||||
|
||||
# define value to be used in grading events
|
||||
GRADES_RESCORE_EVENT_TYPE = 'edx.grades.problem.rescored'
|
||||
|
||||
# The setting name used for events when "settings" (account settings, preferences, profile information) change.
|
||||
REPORT_REQUESTED_EVENT_NAME = u'edx.instructor.report.requested'
|
||||
|
||||
|
||||
class BaseInstructorTask(Task):
|
||||
"""
|
||||
Base task class for use with InstructorTask models.
|
||||
|
||||
Permits updating information about task in corresponding InstructorTask for monitoring purposes.
|
||||
|
||||
Assumes that the entry_id of the InstructorTask model is the first argument to the task.
|
||||
|
||||
The `entry_id` is the primary key for the InstructorTask entry representing the task. This class
|
||||
updates the entry on success and failure of the task it wraps. It is setting the entry's value
|
||||
for task_state based on what Celery would set it to once the task returns to Celery:
|
||||
FAILURE if an exception is encountered, and SUCCESS if it returns normally.
|
||||
Other arguments are pass-throughs to perform_module_state_update, and documented there.
|
||||
"""
|
||||
abstract = True
|
||||
|
||||
def on_success(self, task_progress, task_id, args, kwargs):
|
||||
"""
|
||||
Update InstructorTask object corresponding to this task with info about success.
|
||||
|
||||
Updates task_output and task_state. But it shouldn't actually do anything
|
||||
if the task is only creating subtasks to actually do the work.
|
||||
|
||||
Assumes `task_progress` is a dict containing the task's result, with the following keys:
|
||||
|
||||
'attempted': number of attempts made
|
||||
'succeeded': number of attempts that "succeeded"
|
||||
'skipped': number of attempts that "skipped"
|
||||
'failed': number of attempts that "failed"
|
||||
'total': number of possible subtasks to attempt
|
||||
'action_name': user-visible verb to use in status messages. Should be past-tense.
|
||||
Pass-through of input `action_name`.
|
||||
'duration_ms': how long the task has (or had) been running.
|
||||
|
||||
This is JSON-serialized and stored in the task_output column of the InstructorTask entry.
|
||||
|
||||
"""
|
||||
TASK_LOG.debug('Task %s: success returned with progress: %s', task_id, task_progress)
|
||||
# We should be able to find the InstructorTask object to update
|
||||
# based on the task_id here, without having to dig into the
|
||||
# original args to the task. On the other hand, the entry_id
|
||||
# is the first value passed to all such args, so we'll use that.
|
||||
# And we assume that it exists, else we would already have had a failure.
|
||||
entry_id = args[0]
|
||||
entry = InstructorTask.objects.get(pk=entry_id)
|
||||
# Check to see if any subtasks had been defined as part of this task.
|
||||
# If not, then we know that we're done. (If so, let the subtasks
|
||||
# handle updating task_state themselves.)
|
||||
if len(entry.subtasks) == 0:
|
||||
entry.task_output = InstructorTask.create_output_for_success(task_progress)
|
||||
entry.task_state = SUCCESS
|
||||
entry.save_now()
|
||||
|
||||
def on_failure(self, exc, task_id, args, kwargs, einfo):
|
||||
"""
|
||||
Update InstructorTask object corresponding to this task with info about failure.
|
||||
|
||||
Fetches and updates exception and traceback information on failure.
|
||||
|
||||
If an exception is raised internal to the task, it is caught by celery and provided here.
|
||||
The information is recorded in the InstructorTask object as a JSON-serialized dict
|
||||
stored in the task_output column. It contains the following keys:
|
||||
|
||||
'exception': type of exception object
|
||||
'message': error message from exception object
|
||||
'traceback': traceback information (truncated if necessary)
|
||||
|
||||
Note that there is no way to record progress made within the task (e.g. attempted,
|
||||
succeeded, etc.) when such failures occur.
|
||||
"""
|
||||
TASK_LOG.debug(u'Task %s: failure returned', task_id)
|
||||
entry_id = args[0]
|
||||
try:
|
||||
entry = InstructorTask.objects.get(pk=entry_id)
|
||||
except InstructorTask.DoesNotExist:
|
||||
# if the InstructorTask object does not exist, then there's no point
|
||||
# trying to update it.
|
||||
TASK_LOG.error(u"Task (%s) has no InstructorTask object for id %s", task_id, entry_id)
|
||||
else:
|
||||
TASK_LOG.warning(u"Task (%s) failed", task_id, exc_info=True)
|
||||
entry.task_output = InstructorTask.create_output_for_failure(einfo.exception, einfo.traceback)
|
||||
entry.task_state = FAILURE
|
||||
entry.save_now()
|
||||
|
||||
|
||||
class UpdateProblemModuleStateError(Exception):
|
||||
"""
|
||||
Error signaling a fatal condition while updating problem modules.
|
||||
|
||||
Used when the current module cannot be processed and no more
|
||||
modules should be attempted.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
def _get_current_task():
|
||||
"""
|
||||
Stub to make it easier to test without actually running Celery.
|
||||
|
||||
This is a wrapper around celery.current_task, which provides access
|
||||
to the top of the stack of Celery's tasks. When running tests, however,
|
||||
it doesn't seem to work to mock current_task directly, so this wrapper
|
||||
is used to provide a hook to mock in tests, while providing the real
|
||||
`current_task` in production.
|
||||
"""
|
||||
return current_task
|
||||
|
||||
|
||||
class TaskProgress(object):
|
||||
"""
|
||||
Encapsulates the current task's progress by keeping track of
|
||||
'attempted', 'succeeded', 'skipped', 'failed', 'total',
|
||||
'action_name', and 'duration_ms' values.
|
||||
"""
|
||||
def __init__(self, action_name, total, start_time):
|
||||
self.action_name = action_name
|
||||
self.total = total
|
||||
self.start_time = start_time
|
||||
self.attempted = 0
|
||||
self.succeeded = 0
|
||||
self.skipped = 0
|
||||
self.failed = 0
|
||||
|
||||
def update_task_state(self, extra_meta=None):
|
||||
"""
|
||||
Update the current celery task's state to the progress state
|
||||
specified by the current object. Returns the progress
|
||||
dictionary for use by `run_main_task` and
|
||||
`BaseInstructorTask.on_success`.
|
||||
|
||||
Arguments:
|
||||
extra_meta (dict): Extra metadata to pass to `update_state`
|
||||
|
||||
Returns:
|
||||
dict: The current task's progress dict
|
||||
"""
|
||||
progress_dict = {
|
||||
'action_name': self.action_name,
|
||||
'attempted': self.attempted,
|
||||
'succeeded': self.succeeded,
|
||||
'skipped': self.skipped,
|
||||
'failed': self.failed,
|
||||
'total': self.total,
|
||||
'duration_ms': int((time() - self.start_time) * 1000),
|
||||
}
|
||||
if extra_meta is not None:
|
||||
progress_dict.update(extra_meta)
|
||||
_get_current_task().update_state(state=PROGRESS, meta=progress_dict)
|
||||
return progress_dict
|
||||
|
||||
|
||||
def run_main_task(entry_id, task_fcn, action_name):
|
||||
"""
|
||||
Applies the `task_fcn` to the arguments defined in `entry_id` InstructorTask.
|
||||
|
||||
Arguments passed to `task_fcn` are:
|
||||
|
||||
`entry_id` : the primary key for the InstructorTask entry representing the task.
|
||||
`course_id` : the id for the course.
|
||||
`task_input` : dict containing task-specific arguments, JSON-decoded from InstructorTask's task_input.
|
||||
`action_name` : past-tense verb to use for constructing status messages.
|
||||
|
||||
If no exceptions are raised, the `task_fcn` should return a dict containing
|
||||
the task's result with the following keys:
|
||||
|
||||
'attempted': number of attempts made
|
||||
'succeeded': number of attempts that "succeeded"
|
||||
'skipped': number of attempts that "skipped"
|
||||
'failed': number of attempts that "failed"
|
||||
'total': number of possible subtasks to attempt
|
||||
'action_name': user-visible verb to use in status messages.
|
||||
Should be past-tense. Pass-through of input `action_name`.
|
||||
'duration_ms': how long the task has (or had) been running.
|
||||
|
||||
"""
|
||||
|
||||
# Get the InstructorTask to be updated. If this fails then let the exception return to Celery.
|
||||
# There's no point in catching it here.
|
||||
with outer_atomic():
|
||||
entry = InstructorTask.objects.get(pk=entry_id)
|
||||
entry.task_state = PROGRESS
|
||||
entry.save_now()
|
||||
|
||||
# Get inputs to use in this task from the entry
|
||||
task_id = entry.task_id
|
||||
course_id = entry.course_id
|
||||
task_input = json.loads(entry.task_input)
|
||||
|
||||
# Construct log message
|
||||
fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
|
||||
task_info_string = fmt.format(task_id=task_id, entry_id=entry_id, course_id=course_id, task_input=task_input)
|
||||
TASK_LOG.info(u'%s, Starting update (nothing %s yet)', task_info_string, action_name)
|
||||
|
||||
# Check that the task_id submitted in the InstructorTask matches the current task
|
||||
# that is running.
|
||||
request_task_id = _get_current_task().request.id
|
||||
if task_id != request_task_id:
|
||||
fmt = u'{task_info}, Requested task did not match actual task "{actual_id}"'
|
||||
message = fmt.format(task_info=task_info_string, actual_id=request_task_id)
|
||||
TASK_LOG.error(message)
|
||||
raise ValueError(message)
|
||||
|
||||
# Now do the work
|
||||
with dog_stats_api.timer('instructor_tasks.time.overall', tags=[u'action:{name}'.format(name=action_name)]):
|
||||
task_progress = task_fcn(entry_id, course_id, task_input, action_name)
|
||||
|
||||
# Release any queries that the connection has been hanging onto
|
||||
reset_queries()
|
||||
|
||||
# Log and exit, returning task_progress info as task result
|
||||
TASK_LOG.info(u'%s, Task type: %s, Finishing task: %s', task_info_string, action_name, task_progress)
|
||||
return task_progress
|
||||
|
||||
|
||||
def perform_module_state_update(update_fcn, filter_fcn, _entry_id, course_id, task_input, action_name):
|
||||
"""
|
||||
Performs generic update by visiting StudentModule instances with the update_fcn provided.
|
||||
|
||||
StudentModule instances are those that match the specified `course_id` and `module_state_key`.
|
||||
If `student_identifier` is not None, it is used as an additional filter to limit the modules to those belonging
|
||||
to that student. If `student_identifier` is None, performs update on modules for all students on the specified
|
||||
problem.
|
||||
|
||||
If a `filter_fcn` is not None, it is applied to the query that has been constructed. It takes one
|
||||
argument, which is the query being filtered, and returns the filtered version of the query.
|
||||
|
||||
The `update_fcn` is called on each StudentModule that passes the resulting filtering.
|
||||
It is passed four arguments: the module_descriptor for the module pointed to by the
|
||||
module_state_key, the particular StudentModule to update, the xmodule_instance_args, and the task_input
|
||||
being passed through. If the value returned by the update function evaluates to a boolean True,
|
||||
the update is successful; False indicates the update on the particular student module failed.
|
||||
A raised exception indicates a fatal condition -- that no other student modules should be considered.
|
||||
|
||||
The return value is a dict containing the task's results, with the following keys:
|
||||
|
||||
'attempted': number of attempts made
|
||||
'succeeded': number of attempts that "succeeded"
|
||||
'skipped': number of attempts that "skipped"
|
||||
'failed': number of attempts that "failed"
|
||||
'total': number of possible updates to attempt
|
||||
'action_name': user-visible verb to use in status messages. Should be past-tense.
|
||||
Pass-through of input `action_name`.
|
||||
'duration_ms': how long the task has (or had) been running.
|
||||
|
||||
Because this is run internal to a task, it does not catch exceptions. These are allowed to pass up to the
|
||||
next level, so that it can set the failure modes and capture the error trace in the InstructorTask and the
|
||||
result object.
|
||||
|
||||
"""
|
||||
start_time = time()
|
||||
usage_keys = []
|
||||
problem_url = task_input.get('problem_url')
|
||||
entrance_exam_url = task_input.get('entrance_exam_url')
|
||||
student_identifier = task_input.get('student')
|
||||
problems = {}
|
||||
|
||||
# if problem_url is present make a usage key from it
|
||||
if problem_url:
|
||||
usage_key = course_id.make_usage_key_from_deprecated_string(problem_url)
|
||||
usage_keys.append(usage_key)
|
||||
|
||||
# find the problem descriptor:
|
||||
problem_descriptor = modulestore().get_item(usage_key)
|
||||
problems[unicode(usage_key)] = problem_descriptor
|
||||
|
||||
# if entrance_exam is present grab all problems in it
|
||||
if entrance_exam_url:
|
||||
problems = get_problems_in_section(entrance_exam_url)
|
||||
usage_keys = [UsageKey.from_string(location) for location in problems.keys()]
|
||||
|
||||
# find the modules in question
|
||||
modules_to_update = StudentModule.objects.filter(course_id=course_id, module_state_key__in=usage_keys)
|
||||
|
||||
# give the option of updating an individual student. If not specified,
|
||||
# then updates all students who have responded to a problem so far
|
||||
student = None
|
||||
if student_identifier is not None:
|
||||
# if an identifier is supplied, then look for the student,
|
||||
# and let it throw an exception if none is found.
|
||||
if "@" in student_identifier:
|
||||
student = User.objects.get(email=student_identifier)
|
||||
elif student_identifier is not None:
|
||||
student = User.objects.get(username=student_identifier)
|
||||
|
||||
if student is not None:
|
||||
modules_to_update = modules_to_update.filter(student_id=student.id)
|
||||
|
||||
if filter_fcn is not None:
|
||||
modules_to_update = filter_fcn(modules_to_update)
|
||||
|
||||
task_progress = TaskProgress(action_name, modules_to_update.count(), start_time)
|
||||
task_progress.update_task_state()
|
||||
|
||||
for module_to_update in modules_to_update:
|
||||
task_progress.attempted += 1
|
||||
module_descriptor = problems[unicode(module_to_update.module_state_key)]
|
||||
# There is no try here: if there's an error, we let it throw, and the task will
|
||||
# be marked as FAILED, with a stack trace.
|
||||
with dog_stats_api.timer('instructor_tasks.module.time.step', tags=[u'action:{name}'.format(name=action_name)]):
|
||||
update_status = update_fcn(module_descriptor, module_to_update, task_input)
|
||||
if update_status == UPDATE_STATUS_SUCCEEDED:
|
||||
# If the update_fcn returns true, then it performed some kind of work.
|
||||
# Logging of failures is left to the update_fcn itself.
|
||||
task_progress.succeeded += 1
|
||||
elif update_status == UPDATE_STATUS_FAILED:
|
||||
task_progress.failed += 1
|
||||
elif update_status == UPDATE_STATUS_SKIPPED:
|
||||
task_progress.skipped += 1
|
||||
else:
|
||||
raise UpdateProblemModuleStateError("Unexpected update_status returned: {}".format(update_status))
|
||||
|
||||
return task_progress.update_task_state()
|
||||
|
||||
|
||||
def _get_task_id_from_xmodule_args(xmodule_instance_args):
|
||||
"""Gets task_id from `xmodule_instance_args` dict, or returns default value if missing."""
|
||||
if xmodule_instance_args is None:
|
||||
return UNKNOWN_TASK_ID
|
||||
else:
|
||||
return xmodule_instance_args.get('task_id', UNKNOWN_TASK_ID)
|
||||
|
||||
|
||||
def _get_xqueue_callback_url_prefix(xmodule_instance_args):
|
||||
"""Gets prefix to use when constructing xqueue_callback_url."""
|
||||
if xmodule_instance_args is None:
|
||||
return ''
|
||||
else:
|
||||
return xmodule_instance_args.get('xqueue_callback_url_prefix', '')
|
||||
|
||||
|
||||
def _get_track_function_for_task(student, xmodule_instance_args=None, source_page='x_module_task'):
|
||||
"""
|
||||
Make a tracking function that logs what happened.
|
||||
|
||||
For insertion into ModuleSystem, and used by CapaModule, which will
|
||||
provide the event_type (as string) and event (as dict) as arguments.
|
||||
The request_info and task_info (and page) are provided here.
|
||||
"""
|
||||
# get request-related tracking information from args passthrough, and supplement with task-specific
|
||||
# information:
|
||||
request_info = xmodule_instance_args.get('request_info', {}) if xmodule_instance_args is not None else {}
|
||||
task_info = {'student': student.username, 'task_id': _get_task_id_from_xmodule_args(xmodule_instance_args)}
|
||||
|
||||
return lambda event_type, event: task_track(request_info, task_info, event_type, event, page=source_page)
|
||||
|
||||
|
||||
def _get_module_instance_for_task(course_id, student, module_descriptor, xmodule_instance_args=None,
|
||||
grade_bucket_type=None, course=None):
|
||||
"""
|
||||
Fetches a StudentModule instance for a given `course_id`, `student` object, and `module_descriptor`.
|
||||
|
||||
`xmodule_instance_args` is used to provide information for creating a track function and an XQueue callback.
|
||||
These are passed, along with `grade_bucket_type`, to get_module_for_descriptor_internal, which sidesteps
|
||||
the need for a Request object when instantiating an xmodule instance.
|
||||
"""
|
||||
# reconstitute the problem's corresponding XModule:
|
||||
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(course_id, student, module_descriptor)
|
||||
student_data = KvsFieldData(DjangoKeyValueStore(field_data_cache))
|
||||
|
||||
# get request-related tracking information from args passthrough, and supplement with task-specific
|
||||
# information:
|
||||
request_info = xmodule_instance_args.get('request_info', {}) if xmodule_instance_args is not None else {}
|
||||
task_info = {"student": student.username, "task_id": _get_task_id_from_xmodule_args(xmodule_instance_args)}
|
||||
|
||||
def make_track_function():
|
||||
'''
|
||||
Make a tracking function that logs what happened.
|
||||
|
||||
For insertion into ModuleSystem, and used by CapaModule, which will
|
||||
provide the event_type (as string) and event (as dict) as arguments.
|
||||
The request_info and task_info (and page) are provided here.
|
||||
'''
|
||||
return lambda event_type, event: task_track(request_info, task_info, event_type, event, page='x_module_task')
|
||||
|
||||
xqueue_callback_url_prefix = xmodule_instance_args.get('xqueue_callback_url_prefix', '') \
|
||||
if xmodule_instance_args is not None else ''
|
||||
|
||||
return get_module_for_descriptor_internal(
|
||||
user=student,
|
||||
descriptor=module_descriptor,
|
||||
student_data=student_data,
|
||||
course_id=course_id,
|
||||
track_function=make_track_function(),
|
||||
xqueue_callback_url_prefix=xqueue_callback_url_prefix,
|
||||
grade_bucket_type=grade_bucket_type,
|
||||
# This module isn't being used for front-end rendering
|
||||
request_token=None,
|
||||
# pass in a loaded course for override enabling
|
||||
course=course
|
||||
)
|
||||
|
||||
|
||||
@outer_atomic
|
||||
def rescore_problem_module_state(xmodule_instance_args, module_descriptor, student_module, task_input):
|
||||
'''
|
||||
Takes an XModule descriptor and a corresponding StudentModule object, and
|
||||
performs rescoring on the student's problem submission.
|
||||
|
||||
Throws exceptions if the rescoring is fatal and should be aborted if in a loop.
|
||||
In particular, raises UpdateProblemModuleStateError if module fails to instantiate,
|
||||
or if the module doesn't support rescoring.
|
||||
|
||||
Returns True if problem was successfully rescored for the given student, and False
|
||||
if problem encountered some kind of error in rescoring.
|
||||
'''
|
||||
# unpack the StudentModule:
|
||||
course_id = student_module.course_id
|
||||
student = student_module.student
|
||||
usage_key = student_module.module_state_key
|
||||
|
||||
with modulestore().bulk_operations(course_id):
|
||||
course = get_course_by_id(course_id)
|
||||
# TODO: Here is a call site where we could pass in a loaded course. I
|
||||
# think we certainly need it since grading is happening here, and field
|
||||
# overrides would be important in handling that correctly
|
||||
instance = _get_module_instance_for_task(
|
||||
course_id,
|
||||
student,
|
||||
module_descriptor,
|
||||
xmodule_instance_args,
|
||||
grade_bucket_type='rescore',
|
||||
course=course
|
||||
)
|
||||
|
||||
if instance is None:
|
||||
# Either permissions just changed, or someone is trying to be clever
|
||||
# and load something they shouldn't have access to.
|
||||
msg = "No module {loc} for student {student}--access denied?".format(
|
||||
loc=usage_key,
|
||||
student=student
|
||||
)
|
||||
TASK_LOG.warning(msg)
|
||||
return UPDATE_STATUS_FAILED
|
||||
|
||||
# TODO: (TNL-6594) Remove this switch once rescore_problem support
|
||||
# once CAPA uses ScorableXBlockMixin.
|
||||
for method in ['rescore', 'rescore_problem']:
|
||||
rescore_method = getattr(instance, method, None)
|
||||
if rescore_method is not None:
|
||||
break
|
||||
else: # for-else: Neither method exists on the block.
|
||||
# This should not happen, since it should be already checked in the
|
||||
# caller, but check here to be sure.
|
||||
msg = "Specified problem does not support rescoring."
|
||||
raise UpdateProblemModuleStateError(msg)
|
||||
|
||||
# TODO: Remove the first part of this if-else with TNL-6594
|
||||
# We check here to see if the problem has any submissions. If it does not, we don't want to rescore it
|
||||
if hasattr(instance, "done"):
|
||||
if not instance.done:
|
||||
return UPDATE_STATUS_SKIPPED
|
||||
elif not instance.has_submitted_answer():
|
||||
return UPDATE_STATUS_SKIPPED
|
||||
|
||||
# Set the tracking info before this call, because it makes downstream
|
||||
# calls that create events. We retrieve and store the id here because
|
||||
# the request cache will be erased during downstream calls.
|
||||
event_transaction_id = create_new_event_transaction_id()
|
||||
set_event_transaction_type(GRADES_RESCORE_EVENT_TYPE)
|
||||
|
||||
result = rescore_method(only_if_higher=task_input['only_if_higher'])
|
||||
instance.save()
|
||||
|
||||
if result is None or result.get(u'success') in {u'correct', u'incorrect'}:
|
||||
TASK_LOG.debug(
|
||||
u"successfully processed rescore call for course %(course)s, problem %(loc)s "
|
||||
u"and student %(student)s",
|
||||
dict(
|
||||
course=course_id,
|
||||
loc=usage_key,
|
||||
student=student
|
||||
)
|
||||
)
|
||||
|
||||
if result is not None: # Only for CAPA. This will get moved to the grade handler.
|
||||
new_weighted_earned, new_weighted_possible = weighted_score(
|
||||
result['new_raw_earned'] if result else None,
|
||||
result['new_raw_possible'] if result else None,
|
||||
module_descriptor.weight,
|
||||
)
|
||||
|
||||
# TODO: remove this context manager after completion of AN-6134
|
||||
context = contexts.course_context_from_course_id(course_id)
|
||||
with tracker.get_tracker().context(GRADES_RESCORE_EVENT_TYPE, context):
|
||||
tracker.emit(
|
||||
unicode(GRADES_RESCORE_EVENT_TYPE),
|
||||
{
|
||||
'course_id': unicode(course_id),
|
||||
'user_id': unicode(student.id),
|
||||
'problem_id': unicode(usage_key),
|
||||
'new_weighted_earned': new_weighted_earned,
|
||||
'new_weighted_possible': new_weighted_possible,
|
||||
'only_if_higher': task_input['only_if_higher'],
|
||||
'instructor_id': unicode(xmodule_instance_args['request_info']['user_id']),
|
||||
'event_transaction_id': unicode(event_transaction_id),
|
||||
'event_transaction_type': unicode(GRADES_RESCORE_EVENT_TYPE),
|
||||
}
|
||||
)
|
||||
return UPDATE_STATUS_SUCCEEDED
|
||||
else:
|
||||
TASK_LOG.warning(
|
||||
u"error processing rescore call for course %(course)s, problem %(loc)s "
|
||||
u"and student %(student)s: %(msg)s",
|
||||
dict(
|
||||
msg=result.get('success', result),
|
||||
course=course_id,
|
||||
loc=usage_key,
|
||||
student=student
|
||||
)
|
||||
)
|
||||
return UPDATE_STATUS_FAILED
|
||||
|
||||
|
||||
@outer_atomic
|
||||
def reset_attempts_module_state(xmodule_instance_args, _module_descriptor, student_module, _task_input):
|
||||
"""
|
||||
Resets problem attempts to zero for specified `student_module`.
|
||||
|
||||
Returns a status of UPDATE_STATUS_SUCCEEDED if a problem has non-zero attempts
|
||||
that are being reset, and UPDATE_STATUS_SKIPPED otherwise.
|
||||
"""
|
||||
update_status = UPDATE_STATUS_SKIPPED
|
||||
problem_state = json.loads(student_module.state) if student_module.state else {}
|
||||
if 'attempts' in problem_state:
|
||||
old_number_of_attempts = problem_state["attempts"]
|
||||
if old_number_of_attempts > 0:
|
||||
problem_state["attempts"] = 0
|
||||
# convert back to json and save
|
||||
student_module.state = json.dumps(problem_state)
|
||||
student_module.save()
|
||||
# get request-related tracking information from args passthrough,
|
||||
# and supplement with task-specific information:
|
||||
track_function = _get_track_function_for_task(student_module.student, xmodule_instance_args)
|
||||
event_info = {"old_attempts": old_number_of_attempts, "new_attempts": 0}
|
||||
track_function('problem_reset_attempts', event_info)
|
||||
update_status = UPDATE_STATUS_SUCCEEDED
|
||||
|
||||
return update_status
|
||||
|
||||
|
||||
@outer_atomic
|
||||
def delete_problem_module_state(xmodule_instance_args, _module_descriptor, student_module, _task_input):
|
||||
"""
|
||||
Delete the StudentModule entry.
|
||||
|
||||
Always returns UPDATE_STATUS_SUCCEEDED, indicating success, if it doesn't raise an exception due to database error.
|
||||
"""
|
||||
student_module.delete()
|
||||
# get request-related tracking information from args passthrough,
|
||||
# and supplement with task-specific information:
|
||||
track_function = _get_track_function_for_task(student_module.student, xmodule_instance_args)
|
||||
track_function('problem_delete_state', {})
|
||||
return UPDATE_STATUS_SUCCEEDED
|
||||
|
||||
|
||||
def upload_csv_to_report_store(rows, csv_name, course_id, timestamp, config_name='GRADES_DOWNLOAD'):
|
||||
"""
|
||||
Upload data as a CSV using ReportStore.
|
||||
|
||||
Arguments:
|
||||
rows: CSV data in the following format (first column may be a
|
||||
header):
|
||||
[
|
||||
[row1_colum1, row1_colum2, ...],
|
||||
...
|
||||
]
|
||||
csv_name: Name of the resulting CSV
|
||||
course_id: ID of the course
|
||||
"""
|
||||
report_store = ReportStore.from_config(config_name)
|
||||
report_store.store_rows(
|
||||
course_id,
|
||||
u"{course_prefix}_{csv_name}_{timestamp_str}.csv".format(
|
||||
course_prefix=course_filename_prefix_generator(course_id),
|
||||
csv_name=csv_name,
|
||||
timestamp_str=timestamp.strftime("%Y-%m-%d-%H%M")
|
||||
),
|
||||
rows
|
||||
)
|
||||
tracker.emit(REPORT_REQUESTED_EVENT_NAME, {"report_type": csv_name, })
|
||||
|
||||
|
||||
def upload_exec_summary_to_store(data_dict, report_name, course_id, generated_at, config_name='FINANCIAL_REPORTS'):
|
||||
"""
|
||||
Upload Executive Summary Html file using ReportStore.
|
||||
|
||||
Arguments:
|
||||
data_dict: containing executive report data.
|
||||
report_name: Name of the resulting Html File.
|
||||
course_id: ID of the course
|
||||
"""
|
||||
report_store = ReportStore.from_config(config_name)
|
||||
|
||||
# Use the data dict and html template to generate the output buffer
|
||||
output_buffer = StringIO(render_to_string("instructor/instructor_dashboard_2/executive_summary.html", data_dict))
|
||||
|
||||
report_store.store(
|
||||
course_id,
|
||||
u"{course_prefix}_{report_name}_{timestamp_str}.html".format(
|
||||
course_prefix=course_filename_prefix_generator(course_id),
|
||||
report_name=report_name,
|
||||
timestamp_str=generated_at.strftime("%Y-%m-%d-%H%M")
|
||||
),
|
||||
output_buffer,
|
||||
)
|
||||
tracker.emit(REPORT_REQUESTED_EVENT_NAME, {"report_type": report_name})
|
||||
|
||||
|
||||
def upload_grades_csv(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name): # pylint: disable=too-many-statements
|
||||
"""
|
||||
For a given `course_id`, generate a grades CSV file for all students that
|
||||
are enrolled, and store using a `ReportStore`. Once created, the files can
|
||||
be accessed by instantiating another `ReportStore` (via
|
||||
`ReportStore.from_config()`) and calling `link_for()` on it. Writes are
|
||||
buffered, so we'll never write part of a CSV file to S3 -- i.e. any files
|
||||
that are visible in ReportStore will be complete ones.
|
||||
|
||||
As we start to add more CSV downloads, it will probably be worthwhile to
|
||||
make a more general CSVDoc class instead of building out the rows like we
|
||||
do here.
|
||||
"""
|
||||
start_time = time()
|
||||
start_date = datetime.now(UTC)
|
||||
status_interval = 100
|
||||
enrolled_students = CourseEnrollment.objects.users_enrolled_in(course_id)
|
||||
total_enrolled_students = enrolled_students.count()
|
||||
task_progress = TaskProgress(action_name, total_enrolled_students, start_time)
|
||||
|
||||
fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
|
||||
task_info_string = fmt.format(
|
||||
task_id=_xmodule_instance_args.get('task_id') if _xmodule_instance_args is not None else None,
|
||||
entry_id=_entry_id,
|
||||
course_id=course_id,
|
||||
task_input=_task_input
|
||||
)
|
||||
TASK_LOG.info(u'%s, Task type: %s, Starting task execution', task_info_string, action_name)
|
||||
|
||||
course = get_course_by_id(course_id)
|
||||
course_is_cohorted = is_course_cohorted(course.id)
|
||||
teams_enabled = course.teams_enabled
|
||||
cohorts_header = ['Cohort Name'] if course_is_cohorted else []
|
||||
teams_header = ['Team Name'] if teams_enabled else []
|
||||
|
||||
experiment_partitions = get_split_user_partitions(course.user_partitions)
|
||||
group_configs_header = [u'Experiment Group ({})'.format(partition.name) for partition in experiment_partitions]
|
||||
|
||||
certificate_info_header = ['Certificate Eligible', 'Certificate Delivered', 'Certificate Type']
|
||||
certificate_whitelist = CertificateWhitelist.objects.filter(course_id=course_id, whitelist=True)
|
||||
whitelisted_user_ids = [entry.user_id for entry in certificate_whitelist]
|
||||
|
||||
# Loop over all our students and build our CSV lists in memory
|
||||
rows = []
|
||||
err_rows = [["id", "username", "error_msg"]]
|
||||
current_step = {'step': 'Calculating Grades'}
|
||||
|
||||
student_counter = 0
|
||||
TASK_LOG.info(
|
||||
u'%s, Task type: %s, Current step: %s, Starting grade calculation for total students: %s',
|
||||
task_info_string,
|
||||
action_name,
|
||||
current_step,
|
||||
total_enrolled_students,
|
||||
)
|
||||
|
||||
graded_assignments = _graded_assignments(course_id)
|
||||
grade_header = []
|
||||
for assignment_info in graded_assignments.itervalues():
|
||||
if assignment_info['use_subsection_headers']:
|
||||
grade_header.extend(assignment_info['subsection_headers'].itervalues())
|
||||
grade_header.append(assignment_info['average_header'])
|
||||
|
||||
rows.append(
|
||||
["Student ID", "Email", "Username", "Grade"] +
|
||||
grade_header +
|
||||
cohorts_header +
|
||||
group_configs_header +
|
||||
teams_header +
|
||||
['Enrollment Track', 'Verification Status'] +
|
||||
certificate_info_header
|
||||
)
|
||||
|
||||
for student, course_grade, err_msg in CourseGradeFactory().iter(course, enrolled_students):
|
||||
# Periodically update task status (this is a cache write)
|
||||
if task_progress.attempted % status_interval == 0:
|
||||
task_progress.update_task_state(extra_meta=current_step)
|
||||
task_progress.attempted += 1
|
||||
|
||||
# Now add a log entry after each student is graded to get a sense
|
||||
# of the task's progress
|
||||
student_counter += 1
|
||||
TASK_LOG.info(
|
||||
u'%s, Task type: %s, Current step: %s, Grade calculation in-progress for students: %s/%s',
|
||||
task_info_string,
|
||||
action_name,
|
||||
current_step,
|
||||
student_counter,
|
||||
total_enrolled_students
|
||||
)
|
||||
|
||||
if not course_grade:
|
||||
# An empty gradeset means we failed to grade a student.
|
||||
task_progress.failed += 1
|
||||
err_rows.append([student.id, student.username, err_msg])
|
||||
continue
|
||||
|
||||
# We were able to successfully grade this student for this course.
|
||||
task_progress.succeeded += 1
|
||||
|
||||
cohorts_group_name = []
|
||||
if course_is_cohorted:
|
||||
group = get_cohort(student, course_id, assign=False)
|
||||
cohorts_group_name.append(group.name if group else '')
|
||||
|
||||
group_configs_group_names = []
|
||||
for partition in experiment_partitions:
|
||||
group = PartitionService(course_id).get_group(student, partition, assign=False)
|
||||
group_configs_group_names.append(group.name if group else '')
|
||||
|
||||
team_name = []
|
||||
if teams_enabled:
|
||||
try:
|
||||
membership = CourseTeamMembership.objects.get(user=student, team__course_id=course_id)
|
||||
team_name.append(membership.team.name)
|
||||
except CourseTeamMembership.DoesNotExist:
|
||||
team_name.append('')
|
||||
|
||||
enrollment_mode = CourseEnrollment.enrollment_mode_for_user(student, course_id)[0]
|
||||
verification_status = SoftwareSecurePhotoVerification.verification_status_for_user(
|
||||
student,
|
||||
course_id,
|
||||
enrollment_mode
|
||||
)
|
||||
certificate_info = certificate_info_for_user(
|
||||
student,
|
||||
course_id,
|
||||
course_grade.letter_grade,
|
||||
student.id in whitelisted_user_ids
|
||||
)
|
||||
|
||||
TASK_LOG.info(
|
||||
u'Student certificate eligibility: %s '
|
||||
u'(user=%s, course_id=%s, grade_percent=%s letter_grade=%s gradecutoffs=%s, allow_certificate=%s, '
|
||||
u'is_whitelisted=%s)',
|
||||
certificate_info[0],
|
||||
student,
|
||||
course_id,
|
||||
course_grade.percent,
|
||||
course_grade.letter_grade,
|
||||
course.grade_cutoffs,
|
||||
student.profile.allow_certificate,
|
||||
student.id in whitelisted_user_ids
|
||||
)
|
||||
|
||||
grade_results = []
|
||||
for assignment_type, assignment_info in graded_assignments.iteritems():
|
||||
for subsection_location in assignment_info['subsection_headers']:
|
||||
try:
|
||||
subsection_grade = course_grade.graded_subsections_by_format[assignment_type][subsection_location]
|
||||
except KeyError:
|
||||
grade_results.append([u'Not Available'])
|
||||
else:
|
||||
if subsection_grade.graded_total.first_attempted is not None:
|
||||
grade_results.append(
|
||||
[subsection_grade.graded_total.earned / subsection_grade.graded_total.possible]
|
||||
)
|
||||
else:
|
||||
grade_results.append([u'Not Attempted'])
|
||||
if assignment_info['use_subsection_headers']:
|
||||
assignment_average = course_grade.grader_result['grade_breakdown'].get(assignment_type, {}).get(
|
||||
'percent'
|
||||
)
|
||||
grade_results.append([assignment_average])
|
||||
|
||||
grade_results = list(chain.from_iterable(grade_results))
|
||||
|
||||
rows.append(
|
||||
[student.id, student.email, student.username, course_grade.percent] +
|
||||
grade_results + cohorts_group_name + group_configs_group_names + team_name +
|
||||
[enrollment_mode] + [verification_status] + certificate_info
|
||||
)
|
||||
|
||||
TASK_LOG.info(
|
||||
u'%s, Task type: %s, Current step: %s, Grade calculation completed for students: %s/%s',
|
||||
task_info_string,
|
||||
action_name,
|
||||
current_step,
|
||||
student_counter,
|
||||
total_enrolled_students
|
||||
)
|
||||
|
||||
# By this point, we've got the rows we're going to stuff into our CSV files.
|
||||
current_step = {'step': 'Uploading CSVs'}
|
||||
task_progress.update_task_state(extra_meta=current_step)
|
||||
TASK_LOG.info(u'%s, Task type: %s, Current step: %s', task_info_string, action_name, current_step)
|
||||
|
||||
# Perform the actual upload
|
||||
upload_csv_to_report_store(rows, 'grade_report', course_id, start_date)
|
||||
|
||||
# If there are any error rows (don't count the header), write them out as well
|
||||
if len(err_rows) > 1:
|
||||
upload_csv_to_report_store(err_rows, 'grade_report_err', course_id, start_date)
|
||||
|
||||
# One last update before we close out...
|
||||
TASK_LOG.info(u'%s, Task type: %s, Finalizing grade task', task_info_string, action_name)
|
||||
return task_progress.update_task_state(extra_meta=current_step)
|
||||
|
||||
|
||||
def _graded_assignments(course_key):
|
||||
"""
|
||||
Returns an OrderedDict that maps an assignment type to a dict of subsection-headers and average-header.
|
||||
"""
|
||||
grading_context = grading_context_for_course(course_key)
|
||||
graded_assignments_map = OrderedDict()
|
||||
for assignment_type_name, subsection_infos in grading_context['all_graded_subsections_by_type'].iteritems():
|
||||
graded_subsections_map = OrderedDict()
|
||||
|
||||
for subsection_index, subsection_info in enumerate(subsection_infos, start=1):
|
||||
subsection = subsection_info['subsection_block']
|
||||
header_name = u"{assignment_type} {subsection_index}: {subsection_name}".format(
|
||||
assignment_type=assignment_type_name,
|
||||
subsection_index=subsection_index,
|
||||
subsection_name=subsection.display_name,
|
||||
)
|
||||
graded_subsections_map[subsection.location] = header_name
|
||||
|
||||
average_header = u"{assignment_type}".format(assignment_type=assignment_type_name)
|
||||
|
||||
# Use separate subsection and average columns only if
|
||||
# there's more than one subsection.
|
||||
use_subsection_headers = len(subsection_infos) > 1
|
||||
if use_subsection_headers:
|
||||
average_header += u" (Avg)"
|
||||
|
||||
graded_assignments_map[assignment_type_name] = {
|
||||
'subsection_headers': graded_subsections_map,
|
||||
'average_header': average_header,
|
||||
'use_subsection_headers': use_subsection_headers
|
||||
}
|
||||
return graded_assignments_map
|
||||
|
||||
|
||||
def _graded_scorable_blocks_to_header(course_key):
|
||||
"""
|
||||
Returns an OrderedDict that maps a scorable block's id to its
|
||||
headers in the final report.
|
||||
"""
|
||||
scorable_blocks_map = OrderedDict()
|
||||
grading_context = grading_context_for_course(course_key)
|
||||
for assignment_type_name, subsection_infos in grading_context['all_graded_subsections_by_type'].iteritems():
|
||||
for subsection_index, subsection_info in enumerate(subsection_infos, start=1):
|
||||
for scorable_block in subsection_info['scored_descendants']:
|
||||
header_name = (
|
||||
u"{assignment_type} {subsection_index}: "
|
||||
u"{subsection_name} - {scorable_block_name}"
|
||||
).format(
|
||||
scorable_block_name=scorable_block.display_name,
|
||||
assignment_type=assignment_type_name,
|
||||
subsection_index=subsection_index,
|
||||
subsection_name=subsection_info['subsection_block'].display_name,
|
||||
)
|
||||
scorable_blocks_map[scorable_block.location] = [header_name + " (Earned)", header_name + " (Possible)"]
|
||||
return scorable_blocks_map
|
||||
|
||||
|
||||
def upload_problem_responses_csv(_xmodule_instance_args, _entry_id, course_id, task_input, action_name):
|
||||
"""
|
||||
For a given `course_id`, generate a CSV file containing
|
||||
all student answers to a given problem, and store using a `ReportStore`.
|
||||
"""
|
||||
start_time = time()
|
||||
start_date = datetime.now(UTC)
|
||||
num_reports = 1
|
||||
task_progress = TaskProgress(action_name, num_reports, start_time)
|
||||
current_step = {'step': 'Calculating students answers to problem'}
|
||||
task_progress.update_task_state(extra_meta=current_step)
|
||||
|
||||
# Compute result table and format it
|
||||
problem_location = task_input.get('problem_location')
|
||||
student_data = list_problem_responses(course_id, problem_location)
|
||||
features = ['username', 'state']
|
||||
header, rows = format_dictlist(student_data, features)
|
||||
|
||||
task_progress.attempted = task_progress.succeeded = len(rows)
|
||||
task_progress.skipped = task_progress.total - task_progress.attempted
|
||||
|
||||
rows.insert(0, header)
|
||||
|
||||
current_step = {'step': 'Uploading CSV'}
|
||||
task_progress.update_task_state(extra_meta=current_step)
|
||||
|
||||
# Perform the upload
|
||||
problem_location = re.sub(r'[:/]', '_', problem_location)
|
||||
csv_name = 'student_state_from_{}'.format(problem_location)
|
||||
upload_csv_to_report_store(rows, csv_name, course_id, start_date)
|
||||
|
||||
return task_progress.update_task_state(extra_meta=current_step)
|
||||
|
||||
|
||||
def upload_problem_grade_report(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name):
|
||||
"""
|
||||
Generate a CSV containing all students' problem grades within a given
|
||||
`course_id`.
|
||||
"""
|
||||
start_time = time()
|
||||
start_date = datetime.now(UTC)
|
||||
status_interval = 100
|
||||
enrolled_students = CourseEnrollment.objects.users_enrolled_in(course_id)
|
||||
task_progress = TaskProgress(action_name, enrolled_students.count(), start_time)
|
||||
|
||||
# This struct encapsulates both the display names of each static item in the
|
||||
# header row as values as well as the django User field names of those items
|
||||
# as the keys. It is structured in this way to keep the values related.
|
||||
header_row = OrderedDict([('id', 'Student ID'), ('email', 'Email'), ('username', 'Username')])
|
||||
|
||||
graded_scorable_blocks = _graded_scorable_blocks_to_header(course_id)
|
||||
|
||||
# Just generate the static fields for now.
|
||||
rows = [list(header_row.values()) + ['Grade'] + list(chain.from_iterable(graded_scorable_blocks.values()))]
|
||||
error_rows = [list(header_row.values()) + ['error_msg']]
|
||||
current_step = {'step': 'Calculating Grades'}
|
||||
|
||||
course = get_course_by_id(course_id)
|
||||
for student, course_grade, err_msg in CourseGradeFactory().iter(course, enrolled_students):
|
||||
student_fields = [getattr(student, field_name) for field_name in header_row]
|
||||
task_progress.attempted += 1
|
||||
|
||||
if not course_grade:
|
||||
# There was an error grading this student.
|
||||
if not err_msg:
|
||||
err_msg = u'Unknown error'
|
||||
error_rows.append(student_fields + [err_msg])
|
||||
task_progress.failed += 1
|
||||
continue
|
||||
|
||||
earned_possible_values = []
|
||||
for block_location in graded_scorable_blocks:
|
||||
try:
|
||||
problem_score = course_grade.problem_scores[block_location]
|
||||
except KeyError:
|
||||
earned_possible_values.append([u'Not Available', u'Not Available'])
|
||||
else:
|
||||
if problem_score.first_attempted:
|
||||
earned_possible_values.append([problem_score.earned, problem_score.possible])
|
||||
else:
|
||||
earned_possible_values.append([u'Not Attempted', problem_score.possible])
|
||||
|
||||
rows.append(student_fields + [course_grade.percent] + list(chain.from_iterable(earned_possible_values)))
|
||||
|
||||
task_progress.succeeded += 1
|
||||
if task_progress.attempted % status_interval == 0:
|
||||
task_progress.update_task_state(extra_meta=current_step)
|
||||
|
||||
# Perform the upload if any students have been successfully graded
|
||||
if len(rows) > 1:
|
||||
upload_csv_to_report_store(rows, 'problem_grade_report', course_id, start_date)
|
||||
# If there are any error rows, write them out as well
|
||||
if len(error_rows) > 1:
|
||||
upload_csv_to_report_store(error_rows, 'problem_grade_report_err', course_id, start_date)
|
||||
|
||||
return task_progress.update_task_state(extra_meta={'step': 'Uploading CSV'})
|
||||
|
||||
|
||||
def upload_students_csv(_xmodule_instance_args, _entry_id, course_id, task_input, action_name):
|
||||
"""
|
||||
For a given `course_id`, generate a CSV file containing profile
|
||||
information for all students that are enrolled, and store using a
|
||||
`ReportStore`.
|
||||
"""
|
||||
start_time = time()
|
||||
start_date = datetime.now(UTC)
|
||||
enrolled_students = CourseEnrollment.objects.users_enrolled_in(course_id)
|
||||
task_progress = TaskProgress(action_name, enrolled_students.count(), start_time)
|
||||
|
||||
current_step = {'step': 'Calculating Profile Info'}
|
||||
task_progress.update_task_state(extra_meta=current_step)
|
||||
|
||||
# compute the student features table and format it
|
||||
query_features = task_input
|
||||
student_data = enrolled_students_features(course_id, query_features)
|
||||
header, rows = format_dictlist(student_data, query_features)
|
||||
|
||||
task_progress.attempted = task_progress.succeeded = len(rows)
|
||||
task_progress.skipped = task_progress.total - task_progress.attempted
|
||||
|
||||
rows.insert(0, header)
|
||||
|
||||
current_step = {'step': 'Uploading CSV'}
|
||||
task_progress.update_task_state(extra_meta=current_step)
|
||||
|
||||
# Perform the upload
|
||||
upload_csv_to_report_store(rows, 'student_profile_info', course_id, start_date)
|
||||
|
||||
return task_progress.update_task_state(extra_meta=current_step)
|
||||
|
||||
|
||||
def upload_enrollment_report(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name):
|
||||
"""
|
||||
For a given `course_id`, generate a CSV file containing profile
|
||||
information for all students that are enrolled, and store using a
|
||||
`ReportStore`.
|
||||
"""
|
||||
start_time = time()
|
||||
start_date = datetime.now(UTC)
|
||||
status_interval = 100
|
||||
students_in_course = CourseEnrollment.objects.enrolled_and_dropped_out_users(course_id)
|
||||
task_progress = TaskProgress(action_name, students_in_course.count(), start_time)
|
||||
|
||||
fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
|
||||
task_info_string = fmt.format(
|
||||
task_id=_xmodule_instance_args.get('task_id') if _xmodule_instance_args is not None else None,
|
||||
entry_id=_entry_id,
|
||||
course_id=course_id,
|
||||
task_input=_task_input
|
||||
)
|
||||
TASK_LOG.info(u'%s, Task type: %s, Starting task execution', task_info_string, action_name)
|
||||
|
||||
# Loop over all our students and build our CSV lists in memory
|
||||
rows = []
|
||||
header = None
|
||||
current_step = {'step': 'Gathering Profile Information'}
|
||||
enrollment_report_provider = PaidCourseEnrollmentReportProvider()
|
||||
total_students = students_in_course.count()
|
||||
student_counter = 0
|
||||
TASK_LOG.info(
|
||||
u'%s, Task type: %s, Current step: %s, generating detailed enrollment report for total students: %s',
|
||||
task_info_string,
|
||||
action_name,
|
||||
current_step,
|
||||
total_students
|
||||
)
|
||||
|
||||
for student in students_in_course:
|
||||
# Periodically update task status (this is a cache write)
|
||||
if task_progress.attempted % status_interval == 0:
|
||||
task_progress.update_task_state(extra_meta=current_step)
|
||||
task_progress.attempted += 1
|
||||
|
||||
# Now add a log entry after certain intervals to get a hint that task is in progress
|
||||
student_counter += 1
|
||||
if student_counter % 100 == 0:
|
||||
TASK_LOG.info(
|
||||
u'%s, Task type: %s, Current step: %s, gathering enrollment profile for students in progress: %s/%s',
|
||||
task_info_string,
|
||||
action_name,
|
||||
current_step,
|
||||
student_counter,
|
||||
total_students
|
||||
)
|
||||
|
||||
user_data = enrollment_report_provider.get_user_profile(student.id)
|
||||
course_enrollment_data = enrollment_report_provider.get_enrollment_info(student, course_id)
|
||||
payment_data = enrollment_report_provider.get_payment_info(student, course_id)
|
||||
|
||||
# display name map for the column headers
|
||||
enrollment_report_headers = {
|
||||
'User ID': _('User ID'),
|
||||
'Username': _('Username'),
|
||||
'Full Name': _('Full Name'),
|
||||
'First Name': _('First Name'),
|
||||
'Last Name': _('Last Name'),
|
||||
'Company Name': _('Company Name'),
|
||||
'Title': _('Title'),
|
||||
'Language': _('Language'),
|
||||
'Year of Birth': _('Year of Birth'),
|
||||
'Gender': _('Gender'),
|
||||
'Level of Education': _('Level of Education'),
|
||||
'Mailing Address': _('Mailing Address'),
|
||||
'Goals': _('Goals'),
|
||||
'City': _('City'),
|
||||
'Country': _('Country'),
|
||||
'Enrollment Date': _('Enrollment Date'),
|
||||
'Currently Enrolled': _('Currently Enrolled'),
|
||||
'Enrollment Source': _('Enrollment Source'),
|
||||
'Manual (Un)Enrollment Reason': _('Manual (Un)Enrollment Reason'),
|
||||
'Enrollment Role': _('Enrollment Role'),
|
||||
'List Price': _('List Price'),
|
||||
'Payment Amount': _('Payment Amount'),
|
||||
'Coupon Codes Used': _('Coupon Codes Used'),
|
||||
'Registration Code Used': _('Registration Code Used'),
|
||||
'Payment Status': _('Payment Status'),
|
||||
'Transaction Reference Number': _('Transaction Reference Number')
|
||||
}
|
||||
|
||||
if not header:
|
||||
header = user_data.keys() + course_enrollment_data.keys() + payment_data.keys()
|
||||
display_headers = []
|
||||
for header_element in header:
|
||||
# translate header into a localizable display string
|
||||
display_headers.append(enrollment_report_headers.get(header_element, header_element))
|
||||
rows.append(display_headers)
|
||||
|
||||
rows.append(user_data.values() + course_enrollment_data.values() + payment_data.values())
|
||||
task_progress.succeeded += 1
|
||||
|
||||
TASK_LOG.info(
|
||||
u'%s, Task type: %s, Current step: %s, Detailed enrollment report generated for students: %s/%s',
|
||||
task_info_string,
|
||||
action_name,
|
||||
current_step,
|
||||
student_counter,
|
||||
total_students
|
||||
)
|
||||
|
||||
# By this point, we've got the rows we're going to stuff into our CSV files.
|
||||
current_step = {'step': 'Uploading CSVs'}
|
||||
task_progress.update_task_state(extra_meta=current_step)
|
||||
TASK_LOG.info(u'%s, Task type: %s, Current step: %s', task_info_string, action_name, current_step)
|
||||
|
||||
# Perform the actual upload
|
||||
upload_csv_to_report_store(rows, 'enrollment_report', course_id, start_date, config_name='FINANCIAL_REPORTS')
|
||||
|
||||
# One last update before we close out...
|
||||
TASK_LOG.info(u'%s, Task type: %s, Finalizing detailed enrollment task', task_info_string, action_name)
|
||||
return task_progress.update_task_state(extra_meta=current_step)
|
||||
|
||||
|
||||
def upload_may_enroll_csv(_xmodule_instance_args, _entry_id, course_id, task_input, action_name):
|
||||
"""
|
||||
For a given `course_id`, generate a CSV file containing
|
||||
information about students who may enroll but have not done so
|
||||
yet, and store using a `ReportStore`.
|
||||
"""
|
||||
start_time = time()
|
||||
start_date = datetime.now(UTC)
|
||||
num_reports = 1
|
||||
task_progress = TaskProgress(action_name, num_reports, start_time)
|
||||
current_step = {'step': 'Calculating info about students who may enroll'}
|
||||
task_progress.update_task_state(extra_meta=current_step)
|
||||
|
||||
# Compute result table and format it
|
||||
query_features = task_input.get('features')
|
||||
student_data = list_may_enroll(course_id, query_features)
|
||||
header, rows = format_dictlist(student_data, query_features)
|
||||
|
||||
task_progress.attempted = task_progress.succeeded = len(rows)
|
||||
task_progress.skipped = task_progress.total - task_progress.attempted
|
||||
|
||||
rows.insert(0, header)
|
||||
|
||||
current_step = {'step': 'Uploading CSV'}
|
||||
task_progress.update_task_state(extra_meta=current_step)
|
||||
|
||||
# Perform the upload
|
||||
upload_csv_to_report_store(rows, 'may_enroll_info', course_id, start_date)
|
||||
|
||||
return task_progress.update_task_state(extra_meta=current_step)
|
||||
|
||||
|
||||
def get_executive_report(course_id):
|
||||
"""
|
||||
Returns dict containing information about the course executive summary.
|
||||
"""
|
||||
single_purchase_total = PaidCourseRegistration.get_total_amount_of_purchased_item(course_id)
|
||||
bulk_purchase_total = CourseRegCodeItem.get_total_amount_of_purchased_item(course_id)
|
||||
paid_invoices_total = InvoiceTransaction.get_total_amount_of_paid_course_invoices(course_id)
|
||||
gross_paid_revenue = single_purchase_total + bulk_purchase_total + paid_invoices_total
|
||||
|
||||
all_invoices_total = Invoice.get_invoice_total_amount_for_course(course_id)
|
||||
gross_pending_revenue = all_invoices_total - float(paid_invoices_total)
|
||||
|
||||
gross_revenue = float(gross_paid_revenue) + float(gross_pending_revenue)
|
||||
|
||||
refunded_self_purchased_seats = PaidCourseRegistration.get_self_purchased_seat_count(
|
||||
course_id, status='refunded'
|
||||
)
|
||||
refunded_bulk_purchased_seats = CourseRegCodeItem.get_bulk_purchased_seat_count(
|
||||
course_id, status='refunded'
|
||||
)
|
||||
total_seats_refunded = refunded_self_purchased_seats + refunded_bulk_purchased_seats
|
||||
|
||||
self_purchased_refunds = PaidCourseRegistration.get_total_amount_of_purchased_item(
|
||||
course_id,
|
||||
status='refunded'
|
||||
)
|
||||
bulk_purchase_refunds = CourseRegCodeItem.get_total_amount_of_purchased_item(course_id, status='refunded')
|
||||
total_amount_refunded = self_purchased_refunds + bulk_purchase_refunds
|
||||
|
||||
top_discounted_codes = CouponRedemption.get_top_discount_codes_used(course_id)
|
||||
total_coupon_codes_purchases = CouponRedemption.get_total_coupon_code_purchases(course_id)
|
||||
|
||||
bulk_purchased_codes = CourseRegistrationCode.order_generated_registration_codes(course_id)
|
||||
|
||||
unused_registration_codes = 0
|
||||
for registration_code in bulk_purchased_codes:
|
||||
if not RegistrationCodeRedemption.is_registration_code_redeemed(registration_code.code):
|
||||
unused_registration_codes += 1
|
||||
|
||||
self_purchased_seat_count = PaidCourseRegistration.get_self_purchased_seat_count(course_id)
|
||||
bulk_purchased_seat_count = CourseRegCodeItem.get_bulk_purchased_seat_count(course_id)
|
||||
total_invoiced_seats = CourseRegistrationCode.invoice_generated_registration_codes(course_id).count()
|
||||
|
||||
total_seats = self_purchased_seat_count + bulk_purchased_seat_count + total_invoiced_seats
|
||||
|
||||
self_purchases_percentage = 0.0
|
||||
bulk_purchases_percentage = 0.0
|
||||
invoice_purchases_percentage = 0.0
|
||||
avg_price_paid = 0.0
|
||||
|
||||
if total_seats != 0:
|
||||
self_purchases_percentage = (float(self_purchased_seat_count) / float(total_seats)) * 100
|
||||
bulk_purchases_percentage = (float(bulk_purchased_seat_count) / float(total_seats)) * 100
|
||||
invoice_purchases_percentage = (float(total_invoiced_seats) / float(total_seats)) * 100
|
||||
avg_price_paid = gross_revenue / total_seats
|
||||
|
||||
course = get_course_by_id(course_id, depth=0)
|
||||
currency = settings.PAID_COURSE_REGISTRATION_CURRENCY[1]
|
||||
|
||||
return {
|
||||
'display_name': course.display_name,
|
||||
'start_date': course.start.strftime("%Y-%m-%d") if course.start is not None else 'N/A',
|
||||
'end_date': course.end.strftime("%Y-%m-%d") if course.end is not None else 'N/A',
|
||||
'total_seats': total_seats,
|
||||
'currency': currency,
|
||||
'gross_revenue': float(gross_revenue),
|
||||
'gross_paid_revenue': float(gross_paid_revenue),
|
||||
'gross_pending_revenue': gross_pending_revenue,
|
||||
'total_seats_refunded': total_seats_refunded,
|
||||
'total_amount_refunded': float(total_amount_refunded),
|
||||
'average_paid_price': float(avg_price_paid),
|
||||
'discount_codes_data': top_discounted_codes,
|
||||
'total_seats_using_discount_codes': total_coupon_codes_purchases,
|
||||
'total_self_purchase_seats': self_purchased_seat_count,
|
||||
'total_bulk_purchase_seats': bulk_purchased_seat_count,
|
||||
'total_invoiced_seats': total_invoiced_seats,
|
||||
'unused_bulk_purchase_code_count': unused_registration_codes,
|
||||
'self_purchases_percentage': self_purchases_percentage,
|
||||
'bulk_purchases_percentage': bulk_purchases_percentage,
|
||||
'invoice_purchases_percentage': invoice_purchases_percentage,
|
||||
}
|
||||
|
||||
|
||||
def upload_exec_summary_report(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name):
|
||||
"""
|
||||
For a given `course_id`, generate a html report containing information,
|
||||
which provides a snapshot of how the course is doing.
|
||||
"""
|
||||
start_time = time()
|
||||
report_generation_date = datetime.now(UTC)
|
||||
status_interval = 100
|
||||
|
||||
enrolled_users = CourseEnrollment.objects.users_enrolled_in(course_id)
|
||||
true_enrollment_count = 0
|
||||
for user in enrolled_users:
|
||||
if not user.is_staff and not CourseAccessRole.objects.filter(
|
||||
user=user, course_id=course_id, role__in=FILTERED_OUT_ROLES
|
||||
).exists():
|
||||
true_enrollment_count += 1
|
||||
|
||||
task_progress = TaskProgress(action_name, true_enrollment_count, start_time)
|
||||
|
||||
fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
|
||||
task_info_string = fmt.format(
|
||||
task_id=_xmodule_instance_args.get('task_id') if _xmodule_instance_args is not None else None,
|
||||
entry_id=_entry_id,
|
||||
course_id=course_id,
|
||||
task_input=_task_input
|
||||
)
|
||||
|
||||
TASK_LOG.info(u'%s, Task type: %s, Starting task execution', task_info_string, action_name)
|
||||
current_step = {'step': 'Gathering executive summary report information'}
|
||||
|
||||
TASK_LOG.info(
|
||||
u'%s, Task type: %s, Current step: %s, generating executive summary report',
|
||||
task_info_string,
|
||||
action_name,
|
||||
current_step
|
||||
)
|
||||
|
||||
if task_progress.attempted % status_interval == 0:
|
||||
task_progress.update_task_state(extra_meta=current_step)
|
||||
task_progress.attempted += 1
|
||||
|
||||
# get the course executive summary report information.
|
||||
data_dict = get_executive_report(course_id)
|
||||
data_dict.update(
|
||||
{
|
||||
'total_enrollments': true_enrollment_count,
|
||||
'report_generation_date': report_generation_date.strftime("%Y-%m-%d"),
|
||||
}
|
||||
)
|
||||
|
||||
# By this point, we've got the data that we need to generate html report.
|
||||
current_step = {'step': 'Uploading executive summary report HTML file'}
|
||||
task_progress.update_task_state(extra_meta=current_step)
|
||||
TASK_LOG.info(u'%s, Task type: %s, Current step: %s', task_info_string, action_name, current_step)
|
||||
|
||||
# Perform the actual upload
|
||||
upload_exec_summary_to_store(data_dict, 'executive_report', course_id, report_generation_date)
|
||||
task_progress.succeeded += 1
|
||||
# One last update before we close out...
|
||||
TASK_LOG.info(u'%s, Task type: %s, Finalizing executive summary report task', task_info_string, action_name)
|
||||
return task_progress.update_task_state(extra_meta=current_step)
|
||||
|
||||
|
||||
def upload_course_survey_report(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name):
|
||||
"""
|
||||
For a given `course_id`, generate a html report containing the survey results for a course.
|
||||
"""
|
||||
start_time = time()
|
||||
start_date = datetime.now(UTC)
|
||||
num_reports = 1
|
||||
task_progress = TaskProgress(action_name, num_reports, start_time)
|
||||
|
||||
current_step = {'step': 'Gathering course survey report information'}
|
||||
task_progress.update_task_state(extra_meta=current_step)
|
||||
|
||||
distinct_survey_fields_queryset = SurveyAnswer.objects.filter(course_key=course_id).values('field_name').distinct()
|
||||
survey_fields = []
|
||||
for unique_field_row in distinct_survey_fields_queryset:
|
||||
survey_fields.append(unique_field_row['field_name'])
|
||||
survey_fields.sort()
|
||||
|
||||
user_survey_answers = OrderedDict()
|
||||
survey_answers_for_course = SurveyAnswer.objects.filter(course_key=course_id).select_related('user')
|
||||
|
||||
for survey_field_record in survey_answers_for_course:
|
||||
user_id = survey_field_record.user.id
|
||||
if user_id not in user_survey_answers.keys():
|
||||
user_survey_answers[user_id] = {
|
||||
'username': survey_field_record.user.username,
|
||||
'email': survey_field_record.user.email
|
||||
}
|
||||
|
||||
user_survey_answers[user_id][survey_field_record.field_name] = survey_field_record.field_value
|
||||
|
||||
header = ["User ID", "User Name", "Email"]
|
||||
header.extend(survey_fields)
|
||||
csv_rows = []
|
||||
|
||||
for user_id in user_survey_answers.keys():
|
||||
row = []
|
||||
row.append(user_id)
|
||||
row.append(user_survey_answers[user_id].get('username', ''))
|
||||
row.append(user_survey_answers[user_id].get('email', ''))
|
||||
for survey_field in survey_fields:
|
||||
row.append(user_survey_answers[user_id].get(survey_field, ''))
|
||||
csv_rows.append(row)
|
||||
|
||||
task_progress.attempted = task_progress.succeeded = len(csv_rows)
|
||||
task_progress.skipped = task_progress.total - task_progress.attempted
|
||||
|
||||
csv_rows.insert(0, header)
|
||||
|
||||
current_step = {'step': 'Uploading CSV'}
|
||||
task_progress.update_task_state(extra_meta=current_step)
|
||||
|
||||
# Perform the upload
|
||||
upload_csv_to_report_store(csv_rows, 'course_survey_results', course_id, start_date)
|
||||
|
||||
return task_progress.update_task_state(extra_meta=current_step)
|
||||
|
||||
|
||||
def upload_proctored_exam_results_report(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name): # pylint: disable=invalid-name
|
||||
"""
|
||||
For a given `course_id`, generate a CSV file containing
|
||||
information about proctored exam results, and store using a `ReportStore`.
|
||||
"""
|
||||
start_time = time()
|
||||
start_date = datetime.now(UTC)
|
||||
num_reports = 1
|
||||
task_progress = TaskProgress(action_name, num_reports, start_time)
|
||||
current_step = {'step': 'Calculating info about proctored exam results in a course'}
|
||||
task_progress.update_task_state(extra_meta=current_step)
|
||||
|
||||
# Compute result table and format it
|
||||
query_features = _task_input.get('features')
|
||||
student_data = get_proctored_exam_results(course_id, query_features)
|
||||
header, rows = format_dictlist(student_data, query_features)
|
||||
|
||||
task_progress.attempted = task_progress.succeeded = len(rows)
|
||||
task_progress.skipped = task_progress.total - task_progress.attempted
|
||||
|
||||
rows.insert(0, header)
|
||||
|
||||
current_step = {'step': 'Uploading CSV'}
|
||||
task_progress.update_task_state(extra_meta=current_step)
|
||||
|
||||
# Perform the upload
|
||||
upload_csv_to_report_store(rows, 'proctored_exam_results_report', course_id, start_date)
|
||||
|
||||
return task_progress.update_task_state(extra_meta=current_step)
|
||||
|
||||
|
||||
def generate_students_certificates(
|
||||
_xmodule_instance_args, _entry_id, course_id, task_input, action_name):
|
||||
"""
|
||||
For a given `course_id`, generate certificates for only students present in 'students' key in task_input
|
||||
json column, otherwise generate certificates for all enrolled students.
|
||||
"""
|
||||
start_time = time()
|
||||
students_to_generate_certs_for = CourseEnrollment.objects.users_enrolled_in(course_id)
|
||||
|
||||
student_set = task_input.get('student_set')
|
||||
if student_set == 'all_whitelisted':
|
||||
# Generate Certificates for all white listed students.
|
||||
students_to_generate_certs_for = students_to_generate_certs_for.filter(
|
||||
certificatewhitelist__course_id=course_id,
|
||||
certificatewhitelist__whitelist=True
|
||||
)
|
||||
|
||||
elif student_set == 'whitelisted_not_generated':
|
||||
# Whitelist students who did not get certificates already.
|
||||
students_to_generate_certs_for = students_to_generate_certs_for.filter(
|
||||
certificatewhitelist__course_id=course_id,
|
||||
certificatewhitelist__whitelist=True
|
||||
).exclude(
|
||||
generatedcertificate__course_id=course_id,
|
||||
generatedcertificate__status__in=CertificateStatuses.PASSED_STATUSES
|
||||
)
|
||||
|
||||
elif student_set == "specific_student":
|
||||
specific_student_id = task_input.get('specific_student_id')
|
||||
students_to_generate_certs_for = students_to_generate_certs_for.filter(id=specific_student_id)
|
||||
|
||||
task_progress = TaskProgress(action_name, students_to_generate_certs_for.count(), start_time)
|
||||
|
||||
current_step = {'step': 'Calculating students already have certificates'}
|
||||
task_progress.update_task_state(extra_meta=current_step)
|
||||
|
||||
statuses_to_regenerate = task_input.get('statuses_to_regenerate', [])
|
||||
if student_set is not None and not statuses_to_regenerate:
|
||||
# We want to skip 'filtering students' only when students are given and statuses to regenerate are not
|
||||
students_require_certs = students_to_generate_certs_for
|
||||
else:
|
||||
students_require_certs = students_require_certificate(
|
||||
course_id, students_to_generate_certs_for, statuses_to_regenerate
|
||||
)
|
||||
|
||||
if statuses_to_regenerate:
|
||||
# Mark existing generated certificates as 'unavailable' before regenerating
|
||||
# We need to call this method after "students_require_certificate" otherwise "students_require_certificate"
|
||||
# would return no results.
|
||||
invalidate_generated_certificates(course_id, students_to_generate_certs_for, statuses_to_regenerate)
|
||||
|
||||
task_progress.skipped = task_progress.total - len(students_require_certs)
|
||||
|
||||
current_step = {'step': 'Generating Certificates'}
|
||||
task_progress.update_task_state(extra_meta=current_step)
|
||||
|
||||
course = modulestore().get_course(course_id, depth=0)
|
||||
# Generate certificate for each student
|
||||
for student in students_require_certs:
|
||||
task_progress.attempted += 1
|
||||
status = generate_user_certificates(
|
||||
student,
|
||||
course_id,
|
||||
course=course
|
||||
)
|
||||
|
||||
if CertificateStatuses.is_passing_status(status):
|
||||
task_progress.succeeded += 1
|
||||
else:
|
||||
task_progress.failed += 1
|
||||
|
||||
return task_progress.update_task_state(extra_meta=current_step)
|
||||
|
||||
|
||||
def cohort_students_and_upload(_xmodule_instance_args, _entry_id, course_id, task_input, action_name):
|
||||
"""
|
||||
Within a given course, cohort students in bulk, then upload the results
|
||||
using a `ReportStore`.
|
||||
"""
|
||||
start_time = time()
|
||||
start_date = datetime.now(UTC)
|
||||
|
||||
# Iterate through rows to get total assignments for task progress
|
||||
with DefaultStorage().open(task_input['file_name']) as f:
|
||||
total_assignments = 0
|
||||
for _line in unicodecsv.DictReader(UniversalNewlineIterator(f)):
|
||||
total_assignments += 1
|
||||
|
||||
task_progress = TaskProgress(action_name, total_assignments, start_time)
|
||||
current_step = {'step': 'Cohorting Students'}
|
||||
task_progress.update_task_state(extra_meta=current_step)
|
||||
|
||||
# cohorts_status is a mapping from cohort_name to metadata about
|
||||
# that cohort. The metadata will include information about users
|
||||
# successfully added to the cohort, users not found, and a cached
|
||||
# reference to the corresponding cohort object to prevent
|
||||
# redundant cohort queries.
|
||||
cohorts_status = {}
|
||||
|
||||
with DefaultStorage().open(task_input['file_name']) as f:
|
||||
for row in unicodecsv.DictReader(UniversalNewlineIterator(f), encoding='utf-8'):
|
||||
# Try to use the 'email' field to identify the user. If it's not present, use 'username'.
|
||||
username_or_email = row.get('email') or row.get('username')
|
||||
cohort_name = row.get('cohort') or ''
|
||||
task_progress.attempted += 1
|
||||
|
||||
if not cohorts_status.get(cohort_name):
|
||||
cohorts_status[cohort_name] = {
|
||||
'Cohort Name': cohort_name,
|
||||
'Students Added': 0,
|
||||
'Students Not Found': set()
|
||||
}
|
||||
try:
|
||||
cohorts_status[cohort_name]['cohort'] = CourseUserGroup.objects.get(
|
||||
course_id=course_id,
|
||||
group_type=CourseUserGroup.COHORT,
|
||||
name=cohort_name
|
||||
)
|
||||
cohorts_status[cohort_name]["Exists"] = True
|
||||
except CourseUserGroup.DoesNotExist:
|
||||
cohorts_status[cohort_name]["Exists"] = False
|
||||
|
||||
if not cohorts_status[cohort_name]['Exists']:
|
||||
task_progress.failed += 1
|
||||
continue
|
||||
|
||||
try:
|
||||
add_user_to_cohort(cohorts_status[cohort_name]['cohort'], username_or_email)
|
||||
cohorts_status[cohort_name]['Students Added'] += 1
|
||||
task_progress.succeeded += 1
|
||||
except User.DoesNotExist:
|
||||
cohorts_status[cohort_name]['Students Not Found'].add(username_or_email)
|
||||
task_progress.failed += 1
|
||||
except ValueError:
|
||||
# Raised when the user is already in the given cohort
|
||||
task_progress.skipped += 1
|
||||
|
||||
task_progress.update_task_state(extra_meta=current_step)
|
||||
|
||||
current_step['step'] = 'Uploading CSV'
|
||||
task_progress.update_task_state(extra_meta=current_step)
|
||||
|
||||
# Filter the output of `add_users_to_cohorts` in order to upload the result.
|
||||
output_header = ['Cohort Name', 'Exists', 'Students Added', 'Students Not Found']
|
||||
output_rows = [
|
||||
[
|
||||
','.join(status_dict.get(column_name, '')) if column_name == 'Students Not Found'
|
||||
else status_dict[column_name]
|
||||
for column_name in output_header
|
||||
]
|
||||
for _cohort_name, status_dict in cohorts_status.iteritems()
|
||||
]
|
||||
output_rows.insert(0, output_header)
|
||||
upload_csv_to_report_store(output_rows, 'cohort_results', course_id, start_date)
|
||||
|
||||
return task_progress.update_task_state(extra_meta=current_step)
|
||||
|
||||
|
||||
def students_require_certificate(course_id, enrolled_students, statuses_to_regenerate=None):
|
||||
"""
|
||||
Returns list of students where certificates needs to be generated.
|
||||
if 'statuses_to_regenerate' is given then return students that have Generated Certificates
|
||||
and the generated certificate status lies in 'statuses_to_regenerate'
|
||||
|
||||
if 'statuses_to_regenerate' is not given then return all the enrolled student skipping the ones
|
||||
whose certificates have already been generated.
|
||||
|
||||
:param course_id:
|
||||
:param enrolled_students:
|
||||
:param statuses_to_regenerate:
|
||||
"""
|
||||
if statuses_to_regenerate:
|
||||
# Return Students that have Generated Certificates and the generated certificate status
|
||||
# lies in 'statuses_to_regenerate'
|
||||
students_require_certificates = enrolled_students.filter(
|
||||
generatedcertificate__course_id=course_id,
|
||||
generatedcertificate__status__in=statuses_to_regenerate
|
||||
)
|
||||
# Fetch results otherwise subsequent operations on table cause wrong data fetch
|
||||
return list(students_require_certificates)
|
||||
else:
|
||||
# compute those students whose certificates are already generated
|
||||
students_already_have_certs = User.objects.filter(
|
||||
~Q(generatedcertificate__status=CertificateStatuses.unavailable),
|
||||
generatedcertificate__course_id=course_id)
|
||||
|
||||
# Return all the enrolled student skipping the ones whose certificates have already been generated
|
||||
return list(set(enrolled_students) - set(students_already_have_certs))
|
||||
|
||||
|
||||
def invalidate_generated_certificates(course_id, enrolled_students, certificate_statuses): # pylint: disable=invalid-name
|
||||
"""
|
||||
Invalidate generated certificates for all enrolled students in the given course having status in
|
||||
'certificate_statuses'.
|
||||
|
||||
Generated Certificates are invalidated by marking its status 'unavailable' and updating verify_uuid, download_uuid,
|
||||
download_url and grade with empty string.
|
||||
|
||||
:param course_id: Course Key for the course whose generated certificates need to be removed
|
||||
:param enrolled_students: (queryset or list) students enrolled in the course
|
||||
:param certificate_statuses: certificates statuses for whom to remove generated certificate
|
||||
"""
|
||||
certificates = GeneratedCertificate.objects.filter( # pylint: disable=no-member
|
||||
user__in=enrolled_students,
|
||||
course_id=course_id,
|
||||
status__in=certificate_statuses,
|
||||
)
|
||||
|
||||
# Mark generated certificates as 'unavailable' and update download_url, download_uui, verify_uuid and
|
||||
# grade with empty string for each row
|
||||
certificates.update(
|
||||
status=CertificateStatuses.unavailable,
|
||||
verify_uuid='',
|
||||
download_uuid='',
|
||||
download_url='',
|
||||
grade='',
|
||||
)
|
||||
|
||||
|
||||
def upload_ora2_data(
|
||||
_xmodule_instance_args, _entry_id, course_id, _task_input, action_name
|
||||
):
|
||||
"""
|
||||
Collect ora2 responses and upload them to S3 as a CSV
|
||||
"""
|
||||
|
||||
start_date = datetime.now(UTC)
|
||||
start_time = time()
|
||||
|
||||
num_attempted = 1
|
||||
num_total = 1
|
||||
|
||||
fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
|
||||
task_info_string = fmt.format(
|
||||
task_id=_xmodule_instance_args.get('task_id') if _xmodule_instance_args is not None else None,
|
||||
entry_id=_entry_id,
|
||||
course_id=course_id,
|
||||
task_input=_task_input
|
||||
)
|
||||
TASK_LOG.info(u'%s, Task type: %s, Starting task execution', task_info_string, action_name)
|
||||
|
||||
task_progress = TaskProgress(action_name, num_total, start_time)
|
||||
task_progress.attempted = num_attempted
|
||||
|
||||
curr_step = {'step': "Collecting responses"}
|
||||
TASK_LOG.info(
|
||||
u'%s, Task type: %s, Current step: %s for all submissions',
|
||||
task_info_string,
|
||||
action_name,
|
||||
curr_step,
|
||||
)
|
||||
|
||||
task_progress.update_task_state(extra_meta=curr_step)
|
||||
|
||||
try:
|
||||
header, datarows = OraAggregateData.collect_ora2_data(course_id)
|
||||
rows = [header] + [row for row in datarows]
|
||||
# Update progress to failed regardless of error type
|
||||
except Exception: # pylint: disable=broad-except
|
||||
TASK_LOG.exception('Failed to get ORA data.')
|
||||
task_progress.failed = 1
|
||||
curr_step = {'step': "Error while collecting data"}
|
||||
|
||||
task_progress.update_task_state(extra_meta=curr_step)
|
||||
|
||||
return UPDATE_STATUS_FAILED
|
||||
|
||||
task_progress.succeeded = 1
|
||||
curr_step = {'step': "Uploading CSV"}
|
||||
TASK_LOG.info(
|
||||
u'%s, Task type: %s, Current step: %s',
|
||||
task_info_string,
|
||||
action_name,
|
||||
curr_step,
|
||||
)
|
||||
task_progress.update_task_state(extra_meta=curr_step)
|
||||
|
||||
upload_csv_to_report_store(rows, 'ORA_data', course_id, start_date)
|
||||
|
||||
curr_step = {'step': 'Finalizing ORA data report'}
|
||||
task_progress.update_task_state(extra_meta=curr_step)
|
||||
TASK_LOG.info(u'%s, Task type: %s, Upload complete.', task_info_string, action_name)
|
||||
|
||||
return UPDATE_STATUS_SUCCEEDED
|
||||
147
lms/djangoapps/instructor_task/tasks_helper/certs.py
Normal file
147
lms/djangoapps/instructor_task/tasks_helper/certs.py
Normal file
@@ -0,0 +1,147 @@
|
||||
"""
|
||||
Instructor tasks related to certificates.
|
||||
"""
|
||||
from django.contrib.auth.models import User
|
||||
from django.db.models import Q
|
||||
from time import time
|
||||
|
||||
from certificates.api import generate_user_certificates
|
||||
from certificates.models import CertificateStatuses, GeneratedCertificate
|
||||
from student.models import CourseEnrollment
|
||||
from xmodule.modulestore.django import modulestore
|
||||
from .runner import TaskProgress
|
||||
|
||||
|
||||
def generate_students_certificates(
|
||||
_xmodule_instance_args, _entry_id, course_id, task_input, action_name):
|
||||
"""
|
||||
For a given `course_id`, generate certificates for only students present in 'students' key in task_input
|
||||
json column, otherwise generate certificates for all enrolled students.
|
||||
"""
|
||||
start_time = time()
|
||||
students_to_generate_certs_for = CourseEnrollment.objects.users_enrolled_in(course_id)
|
||||
|
||||
student_set = task_input.get('student_set')
|
||||
if student_set == 'all_whitelisted':
|
||||
# Generate Certificates for all white listed students.
|
||||
students_to_generate_certs_for = students_to_generate_certs_for.filter(
|
||||
certificatewhitelist__course_id=course_id,
|
||||
certificatewhitelist__whitelist=True
|
||||
)
|
||||
|
||||
elif student_set == 'whitelisted_not_generated':
|
||||
# Whitelist students who did not get certificates already.
|
||||
students_to_generate_certs_for = students_to_generate_certs_for.filter(
|
||||
certificatewhitelist__course_id=course_id,
|
||||
certificatewhitelist__whitelist=True
|
||||
).exclude(
|
||||
generatedcertificate__course_id=course_id,
|
||||
generatedcertificate__status__in=CertificateStatuses.PASSED_STATUSES
|
||||
)
|
||||
|
||||
elif student_set == "specific_student":
|
||||
specific_student_id = task_input.get('specific_student_id')
|
||||
students_to_generate_certs_for = students_to_generate_certs_for.filter(id=specific_student_id)
|
||||
|
||||
task_progress = TaskProgress(action_name, students_to_generate_certs_for.count(), start_time)
|
||||
|
||||
current_step = {'step': 'Calculating students already have certificates'}
|
||||
task_progress.update_task_state(extra_meta=current_step)
|
||||
|
||||
statuses_to_regenerate = task_input.get('statuses_to_regenerate', [])
|
||||
if student_set is not None and not statuses_to_regenerate:
|
||||
# We want to skip 'filtering students' only when students are given and statuses to regenerate are not
|
||||
students_require_certs = students_to_generate_certs_for
|
||||
else:
|
||||
students_require_certs = students_require_certificate(
|
||||
course_id, students_to_generate_certs_for, statuses_to_regenerate
|
||||
)
|
||||
|
||||
if statuses_to_regenerate:
|
||||
# Mark existing generated certificates as 'unavailable' before regenerating
|
||||
# We need to call this method after "students_require_certificate" otherwise "students_require_certificate"
|
||||
# would return no results.
|
||||
invalidate_generated_certificates(course_id, students_to_generate_certs_for, statuses_to_regenerate)
|
||||
|
||||
task_progress.skipped = task_progress.total - len(students_require_certs)
|
||||
|
||||
current_step = {'step': 'Generating Certificates'}
|
||||
task_progress.update_task_state(extra_meta=current_step)
|
||||
|
||||
course = modulestore().get_course(course_id, depth=0)
|
||||
# Generate certificate for each student
|
||||
for student in students_require_certs:
|
||||
task_progress.attempted += 1
|
||||
status = generate_user_certificates(
|
||||
student,
|
||||
course_id,
|
||||
course=course
|
||||
)
|
||||
|
||||
if CertificateStatuses.is_passing_status(status):
|
||||
task_progress.succeeded += 1
|
||||
else:
|
||||
task_progress.failed += 1
|
||||
|
||||
return task_progress.update_task_state(extra_meta=current_step)
|
||||
|
||||
|
||||
def students_require_certificate(course_id, enrolled_students, statuses_to_regenerate=None):
|
||||
"""
|
||||
Returns list of students where certificates needs to be generated.
|
||||
if 'statuses_to_regenerate' is given then return students that have Generated Certificates
|
||||
and the generated certificate status lies in 'statuses_to_regenerate'
|
||||
|
||||
if 'statuses_to_regenerate' is not given then return all the enrolled student skipping the ones
|
||||
whose certificates have already been generated.
|
||||
|
||||
:param course_id:
|
||||
:param enrolled_students:
|
||||
:param statuses_to_regenerate:
|
||||
"""
|
||||
if statuses_to_regenerate:
|
||||
# Return Students that have Generated Certificates and the generated certificate status
|
||||
# lies in 'statuses_to_regenerate'
|
||||
students_require_certificates = enrolled_students.filter(
|
||||
generatedcertificate__course_id=course_id,
|
||||
generatedcertificate__status__in=statuses_to_regenerate
|
||||
)
|
||||
# Fetch results otherwise subsequent operations on table cause wrong data fetch
|
||||
return list(students_require_certificates)
|
||||
else:
|
||||
# compute those students whose certificates are already generated
|
||||
students_already_have_certs = User.objects.filter(
|
||||
~Q(generatedcertificate__status=CertificateStatuses.unavailable),
|
||||
generatedcertificate__course_id=course_id)
|
||||
|
||||
# Return all the enrolled student skipping the ones whose certificates have already been generated
|
||||
return list(set(enrolled_students) - set(students_already_have_certs))
|
||||
|
||||
|
||||
def invalidate_generated_certificates(course_id, enrolled_students, certificate_statuses): # pylint: disable=invalid-name
|
||||
"""
|
||||
Invalidate generated certificates for all enrolled students in the given course having status in
|
||||
'certificate_statuses'.
|
||||
|
||||
Generated Certificates are invalidated by marking its status 'unavailable' and updating verify_uuid, download_uuid,
|
||||
download_url and grade with empty string.
|
||||
|
||||
:param course_id: Course Key for the course whose generated certificates need to be removed
|
||||
:param enrolled_students: (queryset or list) students enrolled in the course
|
||||
:param certificate_statuses: certificates statuses for whom to remove generated certificate
|
||||
"""
|
||||
certificates = GeneratedCertificate.objects.filter( # pylint: disable=no-member
|
||||
user__in=enrolled_students,
|
||||
course_id=course_id,
|
||||
status__in=certificate_statuses,
|
||||
)
|
||||
|
||||
# Mark generated certificates as 'unavailable' and update download_url, download_uui, verify_uuid and
|
||||
# grade with empty string for each row
|
||||
certificates.update(
|
||||
status=CertificateStatuses.unavailable,
|
||||
verify_uuid='',
|
||||
download_uuid='',
|
||||
download_url='',
|
||||
grade='',
|
||||
)
|
||||
388
lms/djangoapps/instructor_task/tasks_helper/enrollments.py
Normal file
388
lms/djangoapps/instructor_task/tasks_helper/enrollments.py
Normal file
@@ -0,0 +1,388 @@
|
||||
"""
|
||||
Instructor tasks related to enrollments.
|
||||
"""
|
||||
from datetime import datetime
|
||||
from django.conf import settings
|
||||
from django.utils.translation import ugettext as _
|
||||
import logging
|
||||
from pytz import UTC
|
||||
from StringIO import StringIO
|
||||
from time import time
|
||||
|
||||
from edxmako.shortcuts import render_to_string
|
||||
from courseware.courses import get_course_by_id
|
||||
from lms.djangoapps.instructor.paidcourse_enrollment_report import PaidCourseEnrollmentReportProvider
|
||||
from lms.djangoapps.instructor_task.models import ReportStore
|
||||
from instructor_analytics.basic import enrolled_students_features, list_may_enroll
|
||||
from instructor_analytics.csvs import format_dictlist
|
||||
from shoppingcart.models import (
|
||||
PaidCourseRegistration, CourseRegCodeItem, InvoiceTransaction,
|
||||
Invoice, CouponRedemption, RegistrationCodeRedemption, CourseRegistrationCode
|
||||
)
|
||||
from student.models import CourseEnrollment, CourseAccessRole
|
||||
from util.file import course_filename_prefix_generator
|
||||
|
||||
from .runner import TaskProgress
|
||||
from .utils import tracker_emit, upload_csv_to_report_store
|
||||
|
||||
|
||||
TASK_LOG = logging.getLogger('edx.celery.task')
|
||||
FILTERED_OUT_ROLES = ['staff', 'instructor', 'finance_admin', 'sales_admin']
|
||||
|
||||
|
||||
def upload_enrollment_report(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name):
|
||||
"""
|
||||
For a given `course_id`, generate a CSV file containing profile
|
||||
information for all students that are enrolled, and store using a
|
||||
`ReportStore`.
|
||||
"""
|
||||
start_time = time()
|
||||
start_date = datetime.now(UTC)
|
||||
status_interval = 100
|
||||
students_in_course = CourseEnrollment.objects.enrolled_and_dropped_out_users(course_id)
|
||||
task_progress = TaskProgress(action_name, students_in_course.count(), start_time)
|
||||
|
||||
fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
|
||||
task_info_string = fmt.format(
|
||||
task_id=_xmodule_instance_args.get('task_id') if _xmodule_instance_args is not None else None,
|
||||
entry_id=_entry_id,
|
||||
course_id=course_id,
|
||||
task_input=_task_input
|
||||
)
|
||||
TASK_LOG.info(u'%s, Task type: %s, Starting task execution', task_info_string, action_name)
|
||||
|
||||
# Loop over all our students and build our CSV lists in memory
|
||||
rows = []
|
||||
header = None
|
||||
current_step = {'step': 'Gathering Profile Information'}
|
||||
enrollment_report_provider = PaidCourseEnrollmentReportProvider()
|
||||
total_students = students_in_course.count()
|
||||
student_counter = 0
|
||||
TASK_LOG.info(
|
||||
u'%s, Task type: %s, Current step: %s, generating detailed enrollment report for total students: %s',
|
||||
task_info_string,
|
||||
action_name,
|
||||
current_step,
|
||||
total_students
|
||||
)
|
||||
|
||||
for student in students_in_course:
|
||||
# Periodically update task status (this is a cache write)
|
||||
if task_progress.attempted % status_interval == 0:
|
||||
task_progress.update_task_state(extra_meta=current_step)
|
||||
task_progress.attempted += 1
|
||||
|
||||
# Now add a log entry after certain intervals to get a hint that task is in progress
|
||||
student_counter += 1
|
||||
if student_counter % 100 == 0:
|
||||
TASK_LOG.info(
|
||||
u'%s, Task type: %s, Current step: %s, gathering enrollment profile for students in progress: %s/%s',
|
||||
task_info_string,
|
||||
action_name,
|
||||
current_step,
|
||||
student_counter,
|
||||
total_students
|
||||
)
|
||||
|
||||
user_data = enrollment_report_provider.get_user_profile(student.id)
|
||||
course_enrollment_data = enrollment_report_provider.get_enrollment_info(student, course_id)
|
||||
payment_data = enrollment_report_provider.get_payment_info(student, course_id)
|
||||
|
||||
# display name map for the column headers
|
||||
enrollment_report_headers = {
|
||||
'User ID': _('User ID'),
|
||||
'Username': _('Username'),
|
||||
'Full Name': _('Full Name'),
|
||||
'First Name': _('First Name'),
|
||||
'Last Name': _('Last Name'),
|
||||
'Company Name': _('Company Name'),
|
||||
'Title': _('Title'),
|
||||
'Language': _('Language'),
|
||||
'Year of Birth': _('Year of Birth'),
|
||||
'Gender': _('Gender'),
|
||||
'Level of Education': _('Level of Education'),
|
||||
'Mailing Address': _('Mailing Address'),
|
||||
'Goals': _('Goals'),
|
||||
'City': _('City'),
|
||||
'Country': _('Country'),
|
||||
'Enrollment Date': _('Enrollment Date'),
|
||||
'Currently Enrolled': _('Currently Enrolled'),
|
||||
'Enrollment Source': _('Enrollment Source'),
|
||||
'Manual (Un)Enrollment Reason': _('Manual (Un)Enrollment Reason'),
|
||||
'Enrollment Role': _('Enrollment Role'),
|
||||
'List Price': _('List Price'),
|
||||
'Payment Amount': _('Payment Amount'),
|
||||
'Coupon Codes Used': _('Coupon Codes Used'),
|
||||
'Registration Code Used': _('Registration Code Used'),
|
||||
'Payment Status': _('Payment Status'),
|
||||
'Transaction Reference Number': _('Transaction Reference Number')
|
||||
}
|
||||
|
||||
if not header:
|
||||
header = user_data.keys() + course_enrollment_data.keys() + payment_data.keys()
|
||||
display_headers = []
|
||||
for header_element in header:
|
||||
# translate header into a localizable display string
|
||||
display_headers.append(enrollment_report_headers.get(header_element, header_element))
|
||||
rows.append(display_headers)
|
||||
|
||||
rows.append(user_data.values() + course_enrollment_data.values() + payment_data.values())
|
||||
task_progress.succeeded += 1
|
||||
|
||||
TASK_LOG.info(
|
||||
u'%s, Task type: %s, Current step: %s, Detailed enrollment report generated for students: %s/%s',
|
||||
task_info_string,
|
||||
action_name,
|
||||
current_step,
|
||||
student_counter,
|
||||
total_students
|
||||
)
|
||||
|
||||
# By this point, we've got the rows we're going to stuff into our CSV files.
|
||||
current_step = {'step': 'Uploading CSVs'}
|
||||
task_progress.update_task_state(extra_meta=current_step)
|
||||
TASK_LOG.info(u'%s, Task type: %s, Current step: %s', task_info_string, action_name, current_step)
|
||||
|
||||
# Perform the actual upload
|
||||
upload_csv_to_report_store(rows, 'enrollment_report', course_id, start_date, config_name='FINANCIAL_REPORTS')
|
||||
|
||||
# One last update before we close out...
|
||||
TASK_LOG.info(u'%s, Task type: %s, Finalizing detailed enrollment task', task_info_string, action_name)
|
||||
return task_progress.update_task_state(extra_meta=current_step)
|
||||
|
||||
|
||||
def upload_may_enroll_csv(_xmodule_instance_args, _entry_id, course_id, task_input, action_name):
|
||||
"""
|
||||
For a given `course_id`, generate a CSV file containing
|
||||
information about students who may enroll but have not done so
|
||||
yet, and store using a `ReportStore`.
|
||||
"""
|
||||
start_time = time()
|
||||
start_date = datetime.now(UTC)
|
||||
num_reports = 1
|
||||
task_progress = TaskProgress(action_name, num_reports, start_time)
|
||||
current_step = {'step': 'Calculating info about students who may enroll'}
|
||||
task_progress.update_task_state(extra_meta=current_step)
|
||||
|
||||
# Compute result table and format it
|
||||
query_features = task_input.get('features')
|
||||
student_data = list_may_enroll(course_id, query_features)
|
||||
header, rows = format_dictlist(student_data, query_features)
|
||||
|
||||
task_progress.attempted = task_progress.succeeded = len(rows)
|
||||
task_progress.skipped = task_progress.total - task_progress.attempted
|
||||
|
||||
rows.insert(0, header)
|
||||
|
||||
current_step = {'step': 'Uploading CSV'}
|
||||
task_progress.update_task_state(extra_meta=current_step)
|
||||
|
||||
# Perform the upload
|
||||
upload_csv_to_report_store(rows, 'may_enroll_info', course_id, start_date)
|
||||
|
||||
return task_progress.update_task_state(extra_meta=current_step)
|
||||
|
||||
|
||||
def upload_students_csv(_xmodule_instance_args, _entry_id, course_id, task_input, action_name):
|
||||
"""
|
||||
For a given `course_id`, generate a CSV file containing profile
|
||||
information for all students that are enrolled, and store using a
|
||||
`ReportStore`.
|
||||
"""
|
||||
start_time = time()
|
||||
start_date = datetime.now(UTC)
|
||||
enrolled_students = CourseEnrollment.objects.users_enrolled_in(course_id)
|
||||
task_progress = TaskProgress(action_name, enrolled_students.count(), start_time)
|
||||
|
||||
current_step = {'step': 'Calculating Profile Info'}
|
||||
task_progress.update_task_state(extra_meta=current_step)
|
||||
|
||||
# compute the student features table and format it
|
||||
query_features = task_input
|
||||
student_data = enrolled_students_features(course_id, query_features)
|
||||
header, rows = format_dictlist(student_data, query_features)
|
||||
|
||||
task_progress.attempted = task_progress.succeeded = len(rows)
|
||||
task_progress.skipped = task_progress.total - task_progress.attempted
|
||||
|
||||
rows.insert(0, header)
|
||||
|
||||
current_step = {'step': 'Uploading CSV'}
|
||||
task_progress.update_task_state(extra_meta=current_step)
|
||||
|
||||
# Perform the upload
|
||||
upload_csv_to_report_store(rows, 'student_profile_info', course_id, start_date)
|
||||
|
||||
return task_progress.update_task_state(extra_meta=current_step)
|
||||
|
||||
|
||||
def get_executive_report(course_id):
|
||||
"""
|
||||
Returns dict containing information about the course executive summary.
|
||||
"""
|
||||
single_purchase_total = PaidCourseRegistration.get_total_amount_of_purchased_item(course_id)
|
||||
bulk_purchase_total = CourseRegCodeItem.get_total_amount_of_purchased_item(course_id)
|
||||
paid_invoices_total = InvoiceTransaction.get_total_amount_of_paid_course_invoices(course_id)
|
||||
gross_paid_revenue = single_purchase_total + bulk_purchase_total + paid_invoices_total
|
||||
|
||||
all_invoices_total = Invoice.get_invoice_total_amount_for_course(course_id)
|
||||
gross_pending_revenue = all_invoices_total - float(paid_invoices_total)
|
||||
|
||||
gross_revenue = float(gross_paid_revenue) + float(gross_pending_revenue)
|
||||
|
||||
refunded_self_purchased_seats = PaidCourseRegistration.get_self_purchased_seat_count(
|
||||
course_id, status='refunded'
|
||||
)
|
||||
refunded_bulk_purchased_seats = CourseRegCodeItem.get_bulk_purchased_seat_count(
|
||||
course_id, status='refunded'
|
||||
)
|
||||
total_seats_refunded = refunded_self_purchased_seats + refunded_bulk_purchased_seats
|
||||
|
||||
self_purchased_refunds = PaidCourseRegistration.get_total_amount_of_purchased_item(
|
||||
course_id,
|
||||
status='refunded'
|
||||
)
|
||||
bulk_purchase_refunds = CourseRegCodeItem.get_total_amount_of_purchased_item(course_id, status='refunded')
|
||||
total_amount_refunded = self_purchased_refunds + bulk_purchase_refunds
|
||||
|
||||
top_discounted_codes = CouponRedemption.get_top_discount_codes_used(course_id)
|
||||
total_coupon_codes_purchases = CouponRedemption.get_total_coupon_code_purchases(course_id)
|
||||
|
||||
bulk_purchased_codes = CourseRegistrationCode.order_generated_registration_codes(course_id)
|
||||
|
||||
unused_registration_codes = 0
|
||||
for registration_code in bulk_purchased_codes:
|
||||
if not RegistrationCodeRedemption.is_registration_code_redeemed(registration_code.code):
|
||||
unused_registration_codes += 1
|
||||
|
||||
self_purchased_seat_count = PaidCourseRegistration.get_self_purchased_seat_count(course_id)
|
||||
bulk_purchased_seat_count = CourseRegCodeItem.get_bulk_purchased_seat_count(course_id)
|
||||
total_invoiced_seats = CourseRegistrationCode.invoice_generated_registration_codes(course_id).count()
|
||||
|
||||
total_seats = self_purchased_seat_count + bulk_purchased_seat_count + total_invoiced_seats
|
||||
|
||||
self_purchases_percentage = 0.0
|
||||
bulk_purchases_percentage = 0.0
|
||||
invoice_purchases_percentage = 0.0
|
||||
avg_price_paid = 0.0
|
||||
|
||||
if total_seats != 0:
|
||||
self_purchases_percentage = (float(self_purchased_seat_count) / float(total_seats)) * 100
|
||||
bulk_purchases_percentage = (float(bulk_purchased_seat_count) / float(total_seats)) * 100
|
||||
invoice_purchases_percentage = (float(total_invoiced_seats) / float(total_seats)) * 100
|
||||
avg_price_paid = gross_revenue / total_seats
|
||||
|
||||
course = get_course_by_id(course_id, depth=0)
|
||||
currency = settings.PAID_COURSE_REGISTRATION_CURRENCY[1]
|
||||
|
||||
return {
|
||||
'display_name': course.display_name,
|
||||
'start_date': course.start.strftime("%Y-%m-%d") if course.start is not None else 'N/A',
|
||||
'end_date': course.end.strftime("%Y-%m-%d") if course.end is not None else 'N/A',
|
||||
'total_seats': total_seats,
|
||||
'currency': currency,
|
||||
'gross_revenue': float(gross_revenue),
|
||||
'gross_paid_revenue': float(gross_paid_revenue),
|
||||
'gross_pending_revenue': gross_pending_revenue,
|
||||
'total_seats_refunded': total_seats_refunded,
|
||||
'total_amount_refunded': float(total_amount_refunded),
|
||||
'average_paid_price': float(avg_price_paid),
|
||||
'discount_codes_data': top_discounted_codes,
|
||||
'total_seats_using_discount_codes': total_coupon_codes_purchases,
|
||||
'total_self_purchase_seats': self_purchased_seat_count,
|
||||
'total_bulk_purchase_seats': bulk_purchased_seat_count,
|
||||
'total_invoiced_seats': total_invoiced_seats,
|
||||
'unused_bulk_purchase_code_count': unused_registration_codes,
|
||||
'self_purchases_percentage': self_purchases_percentage,
|
||||
'bulk_purchases_percentage': bulk_purchases_percentage,
|
||||
'invoice_purchases_percentage': invoice_purchases_percentage,
|
||||
}
|
||||
|
||||
|
||||
def upload_exec_summary_report(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name):
|
||||
"""
|
||||
For a given `course_id`, generate a html report containing information,
|
||||
which provides a snapshot of how the course is doing.
|
||||
"""
|
||||
start_time = time()
|
||||
report_generation_date = datetime.now(UTC)
|
||||
status_interval = 100
|
||||
|
||||
enrolled_users = CourseEnrollment.objects.users_enrolled_in(course_id)
|
||||
true_enrollment_count = 0
|
||||
for user in enrolled_users:
|
||||
if not user.is_staff and not CourseAccessRole.objects.filter(
|
||||
user=user, course_id=course_id, role__in=FILTERED_OUT_ROLES
|
||||
).exists():
|
||||
true_enrollment_count += 1
|
||||
|
||||
task_progress = TaskProgress(action_name, true_enrollment_count, start_time)
|
||||
|
||||
fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
|
||||
task_info_string = fmt.format(
|
||||
task_id=_xmodule_instance_args.get('task_id') if _xmodule_instance_args is not None else None,
|
||||
entry_id=_entry_id,
|
||||
course_id=course_id,
|
||||
task_input=_task_input
|
||||
)
|
||||
|
||||
TASK_LOG.info(u'%s, Task type: %s, Starting task execution', task_info_string, action_name)
|
||||
current_step = {'step': 'Gathering executive summary report information'}
|
||||
|
||||
TASK_LOG.info(
|
||||
u'%s, Task type: %s, Current step: %s, generating executive summary report',
|
||||
task_info_string,
|
||||
action_name,
|
||||
current_step
|
||||
)
|
||||
|
||||
if task_progress.attempted % status_interval == 0:
|
||||
task_progress.update_task_state(extra_meta=current_step)
|
||||
task_progress.attempted += 1
|
||||
|
||||
# get the course executive summary report information.
|
||||
data_dict = get_executive_report(course_id)
|
||||
data_dict.update(
|
||||
{
|
||||
'total_enrollments': true_enrollment_count,
|
||||
'report_generation_date': report_generation_date.strftime("%Y-%m-%d"),
|
||||
}
|
||||
)
|
||||
|
||||
# By this point, we've got the data that we need to generate html report.
|
||||
current_step = {'step': 'Uploading executive summary report HTML file'}
|
||||
task_progress.update_task_state(extra_meta=current_step)
|
||||
TASK_LOG.info(u'%s, Task type: %s, Current step: %s', task_info_string, action_name, current_step)
|
||||
|
||||
# Perform the actual upload
|
||||
_upload_exec_summary_to_store(data_dict, 'executive_report', course_id, report_generation_date)
|
||||
task_progress.succeeded += 1
|
||||
# One last update before we close out...
|
||||
TASK_LOG.info(u'%s, Task type: %s, Finalizing executive summary report task', task_info_string, action_name)
|
||||
return task_progress.update_task_state(extra_meta=current_step)
|
||||
|
||||
|
||||
def _upload_exec_summary_to_store(data_dict, report_name, course_id, generated_at, config_name='FINANCIAL_REPORTS'):
|
||||
"""
|
||||
Upload Executive Summary Html file using ReportStore.
|
||||
|
||||
Arguments:
|
||||
data_dict: containing executive report data.
|
||||
report_name: Name of the resulting Html File.
|
||||
course_id: ID of the course
|
||||
"""
|
||||
report_store = ReportStore.from_config(config_name)
|
||||
|
||||
# Use the data dict and html template to generate the output buffer
|
||||
output_buffer = StringIO(render_to_string("instructor/instructor_dashboard_2/executive_summary.html", data_dict))
|
||||
|
||||
report_store.store(
|
||||
course_id,
|
||||
u"{course_prefix}_{report_name}_{timestamp_str}.html".format(
|
||||
course_prefix=course_filename_prefix_generator(course_id),
|
||||
report_name=report_name,
|
||||
timestamp_str=generated_at.strftime("%Y-%m-%d-%H%M")
|
||||
),
|
||||
output_buffer,
|
||||
)
|
||||
tracker_emit(report_name)
|
||||
383
lms/djangoapps/instructor_task/tasks_helper/grades.py
Normal file
383
lms/djangoapps/instructor_task/tasks_helper/grades.py
Normal file
@@ -0,0 +1,383 @@
|
||||
"""
|
||||
Functionality for generating grade reports.
|
||||
"""
|
||||
from collections import OrderedDict
|
||||
from datetime import datetime
|
||||
from itertools import chain
|
||||
import logging
|
||||
from pytz import UTC
|
||||
import re
|
||||
from time import time
|
||||
|
||||
from instructor_analytics.basic import list_problem_responses
|
||||
from instructor_analytics.csvs import format_dictlist
|
||||
from certificates.models import CertificateWhitelist, certificate_info_for_user
|
||||
from courseware.courses import get_course_by_id
|
||||
from lms.djangoapps.grades.context import grading_context_for_course
|
||||
from lms.djangoapps.grades.new.course_grade_factory import CourseGradeFactory
|
||||
from lms.djangoapps.teams.models import CourseTeamMembership
|
||||
from lms.djangoapps.verify_student.models import SoftwareSecurePhotoVerification
|
||||
from openedx.core.djangoapps.course_groups.cohorts import get_cohort, is_course_cohorted
|
||||
from student.models import CourseEnrollment
|
||||
from xmodule.partitions.partitions_service import PartitionService
|
||||
from xmodule.split_test_module import get_split_user_partitions
|
||||
|
||||
from .runner import TaskProgress
|
||||
from .utils import upload_csv_to_report_store
|
||||
|
||||
|
||||
TASK_LOG = logging.getLogger('edx.celery.task')
|
||||
|
||||
|
||||
def generate_course_grade_report(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name): # pylint: disable=too-many-statements
|
||||
"""
|
||||
For a given `course_id`, generate a grades CSV file for all students that
|
||||
are enrolled, and store using a `ReportStore`. Once created, the files can
|
||||
be accessed by instantiating another `ReportStore` (via
|
||||
`ReportStore.from_config()`) and calling `link_for()` on it. Writes are
|
||||
buffered, so we'll never write part of a CSV file to S3 -- i.e. any files
|
||||
that are visible in ReportStore will be complete ones.
|
||||
|
||||
As we start to add more CSV downloads, it will probably be worthwhile to
|
||||
make a more general CSVDoc class instead of building out the rows like we
|
||||
do here.
|
||||
"""
|
||||
start_time = time()
|
||||
start_date = datetime.now(UTC)
|
||||
status_interval = 100
|
||||
enrolled_students = CourseEnrollment.objects.users_enrolled_in(course_id)
|
||||
total_enrolled_students = enrolled_students.count()
|
||||
task_progress = TaskProgress(action_name, total_enrolled_students, start_time)
|
||||
|
||||
fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
|
||||
task_info_string = fmt.format(
|
||||
task_id=_xmodule_instance_args.get('task_id') if _xmodule_instance_args is not None else None,
|
||||
entry_id=_entry_id,
|
||||
course_id=course_id,
|
||||
task_input=_task_input
|
||||
)
|
||||
TASK_LOG.info(u'%s, Task type: %s, Starting task execution', task_info_string, action_name)
|
||||
|
||||
course = get_course_by_id(course_id)
|
||||
course_is_cohorted = is_course_cohorted(course.id)
|
||||
teams_enabled = course.teams_enabled
|
||||
cohorts_header = ['Cohort Name'] if course_is_cohorted else []
|
||||
teams_header = ['Team Name'] if teams_enabled else []
|
||||
|
||||
experiment_partitions = get_split_user_partitions(course.user_partitions)
|
||||
group_configs_header = [u'Experiment Group ({})'.format(partition.name) for partition in experiment_partitions]
|
||||
|
||||
certificate_info_header = ['Certificate Eligible', 'Certificate Delivered', 'Certificate Type']
|
||||
certificate_whitelist = CertificateWhitelist.objects.filter(course_id=course_id, whitelist=True)
|
||||
whitelisted_user_ids = [entry.user_id for entry in certificate_whitelist]
|
||||
|
||||
# Loop over all our students and build our CSV lists in memory
|
||||
rows = []
|
||||
err_rows = [["id", "username", "error_msg"]]
|
||||
current_step = {'step': 'Calculating Grades'}
|
||||
|
||||
student_counter = 0
|
||||
TASK_LOG.info(
|
||||
u'%s, Task type: %s, Current step: %s, Starting grade calculation for total students: %s',
|
||||
task_info_string,
|
||||
action_name,
|
||||
current_step,
|
||||
total_enrolled_students,
|
||||
)
|
||||
|
||||
graded_assignments = _graded_assignments(course_id)
|
||||
grade_header = []
|
||||
for assignment_info in graded_assignments.itervalues():
|
||||
if assignment_info['use_subsection_headers']:
|
||||
grade_header.extend(assignment_info['subsection_headers'].itervalues())
|
||||
grade_header.append(assignment_info['average_header'])
|
||||
|
||||
rows.append(
|
||||
["Student ID", "Email", "Username", "Grade"] +
|
||||
grade_header +
|
||||
cohorts_header +
|
||||
group_configs_header +
|
||||
teams_header +
|
||||
['Enrollment Track', 'Verification Status'] +
|
||||
certificate_info_header
|
||||
)
|
||||
|
||||
for student, course_grade, err_msg in CourseGradeFactory().iter(course, enrolled_students):
|
||||
# Periodically update task status (this is a cache write)
|
||||
if task_progress.attempted % status_interval == 0:
|
||||
task_progress.update_task_state(extra_meta=current_step)
|
||||
task_progress.attempted += 1
|
||||
|
||||
# Now add a log entry after each student is graded to get a sense
|
||||
# of the task's progress
|
||||
student_counter += 1
|
||||
TASK_LOG.info(
|
||||
u'%s, Task type: %s, Current step: %s, Grade calculation in-progress for students: %s/%s',
|
||||
task_info_string,
|
||||
action_name,
|
||||
current_step,
|
||||
student_counter,
|
||||
total_enrolled_students
|
||||
)
|
||||
|
||||
if not course_grade:
|
||||
# An empty gradeset means we failed to grade a student.
|
||||
task_progress.failed += 1
|
||||
err_rows.append([student.id, student.username, err_msg])
|
||||
continue
|
||||
|
||||
# We were able to successfully grade this student for this course.
|
||||
task_progress.succeeded += 1
|
||||
|
||||
cohorts_group_name = []
|
||||
if course_is_cohorted:
|
||||
group = get_cohort(student, course_id, assign=False)
|
||||
cohorts_group_name.append(group.name if group else '')
|
||||
|
||||
group_configs_group_names = []
|
||||
for partition in experiment_partitions:
|
||||
group = PartitionService(course_id).get_group(student, partition, assign=False)
|
||||
group_configs_group_names.append(group.name if group else '')
|
||||
|
||||
team_name = []
|
||||
if teams_enabled:
|
||||
try:
|
||||
membership = CourseTeamMembership.objects.get(user=student, team__course_id=course_id)
|
||||
team_name.append(membership.team.name)
|
||||
except CourseTeamMembership.DoesNotExist:
|
||||
team_name.append('')
|
||||
|
||||
enrollment_mode = CourseEnrollment.enrollment_mode_for_user(student, course_id)[0]
|
||||
verification_status = SoftwareSecurePhotoVerification.verification_status_for_user(
|
||||
student,
|
||||
course_id,
|
||||
enrollment_mode
|
||||
)
|
||||
certificate_info = certificate_info_for_user(
|
||||
student,
|
||||
course_id,
|
||||
course_grade.letter_grade,
|
||||
student.id in whitelisted_user_ids
|
||||
)
|
||||
|
||||
TASK_LOG.info(
|
||||
u'Student certificate eligibility: %s '
|
||||
u'(user=%s, course_id=%s, grade_percent=%s letter_grade=%s gradecutoffs=%s, allow_certificate=%s, '
|
||||
u'is_whitelisted=%s)',
|
||||
certificate_info[0],
|
||||
student,
|
||||
course_id,
|
||||
course_grade.percent,
|
||||
course_grade.letter_grade,
|
||||
course.grade_cutoffs,
|
||||
student.profile.allow_certificate,
|
||||
student.id in whitelisted_user_ids
|
||||
)
|
||||
|
||||
grade_results = []
|
||||
for assignment_type, assignment_info in graded_assignments.iteritems():
|
||||
for subsection_location in assignment_info['subsection_headers']:
|
||||
try:
|
||||
subsection_grade = course_grade.graded_subsections_by_format[assignment_type][subsection_location]
|
||||
except KeyError:
|
||||
grade_results.append([u'Not Available'])
|
||||
else:
|
||||
if subsection_grade.graded_total.first_attempted is not None:
|
||||
grade_results.append(
|
||||
[subsection_grade.graded_total.earned / subsection_grade.graded_total.possible]
|
||||
)
|
||||
else:
|
||||
grade_results.append([u'Not Attempted'])
|
||||
if assignment_info['use_subsection_headers']:
|
||||
assignment_average = course_grade.grader_result['grade_breakdown'].get(assignment_type, {}).get(
|
||||
'percent'
|
||||
)
|
||||
grade_results.append([assignment_average])
|
||||
|
||||
grade_results = list(chain.from_iterable(grade_results))
|
||||
|
||||
rows.append(
|
||||
[student.id, student.email, student.username, course_grade.percent] +
|
||||
grade_results + cohorts_group_name + group_configs_group_names + team_name +
|
||||
[enrollment_mode] + [verification_status] + certificate_info
|
||||
)
|
||||
|
||||
TASK_LOG.info(
|
||||
u'%s, Task type: %s, Current step: %s, Grade calculation completed for students: %s/%s',
|
||||
task_info_string,
|
||||
action_name,
|
||||
current_step,
|
||||
student_counter,
|
||||
total_enrolled_students
|
||||
)
|
||||
|
||||
# By this point, we've got the rows we're going to stuff into our CSV files.
|
||||
current_step = {'step': 'Uploading CSVs'}
|
||||
task_progress.update_task_state(extra_meta=current_step)
|
||||
TASK_LOG.info(u'%s, Task type: %s, Current step: %s', task_info_string, action_name, current_step)
|
||||
|
||||
# Perform the actual upload
|
||||
upload_csv_to_report_store(rows, 'grade_report', course_id, start_date)
|
||||
|
||||
# If there are any error rows (don't count the header), write them out as well
|
||||
if len(err_rows) > 1:
|
||||
upload_csv_to_report_store(err_rows, 'grade_report_err', course_id, start_date)
|
||||
|
||||
# One last update before we close out...
|
||||
TASK_LOG.info(u'%s, Task type: %s, Finalizing grade task', task_info_string, action_name)
|
||||
return task_progress.update_task_state(extra_meta=current_step)
|
||||
|
||||
|
||||
def generate_problem_grade_report(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name):
|
||||
"""
|
||||
Generate a CSV containing all students' problem grades within a given
|
||||
`course_id`.
|
||||
"""
|
||||
start_time = time()
|
||||
start_date = datetime.now(UTC)
|
||||
status_interval = 100
|
||||
enrolled_students = CourseEnrollment.objects.users_enrolled_in(course_id)
|
||||
task_progress = TaskProgress(action_name, enrolled_students.count(), start_time)
|
||||
|
||||
# This struct encapsulates both the display names of each static item in the
|
||||
# header row as values as well as the django User field names of those items
|
||||
# as the keys. It is structured in this way to keep the values related.
|
||||
header_row = OrderedDict([('id', 'Student ID'), ('email', 'Email'), ('username', 'Username')])
|
||||
|
||||
graded_scorable_blocks = _graded_scorable_blocks_to_header(course_id)
|
||||
|
||||
# Just generate the static fields for now.
|
||||
rows = [list(header_row.values()) + ['Grade'] + list(chain.from_iterable(graded_scorable_blocks.values()))]
|
||||
error_rows = [list(header_row.values()) + ['error_msg']]
|
||||
current_step = {'step': 'Calculating Grades'}
|
||||
|
||||
course = get_course_by_id(course_id)
|
||||
for student, course_grade, err_msg in CourseGradeFactory().iter(course, enrolled_students):
|
||||
student_fields = [getattr(student, field_name) for field_name in header_row]
|
||||
task_progress.attempted += 1
|
||||
|
||||
if not course_grade:
|
||||
# There was an error grading this student.
|
||||
if not err_msg:
|
||||
err_msg = u'Unknown error'
|
||||
error_rows.append(student_fields + [err_msg])
|
||||
task_progress.failed += 1
|
||||
continue
|
||||
|
||||
earned_possible_values = []
|
||||
for block_location in graded_scorable_blocks:
|
||||
try:
|
||||
problem_score = course_grade.problem_scores[block_location]
|
||||
except KeyError:
|
||||
earned_possible_values.append([u'Not Available', u'Not Available'])
|
||||
else:
|
||||
if problem_score.first_attempted:
|
||||
earned_possible_values.append([problem_score.earned, problem_score.possible])
|
||||
else:
|
||||
earned_possible_values.append([u'Not Attempted', problem_score.possible])
|
||||
|
||||
rows.append(student_fields + [course_grade.percent] + list(chain.from_iterable(earned_possible_values)))
|
||||
|
||||
task_progress.succeeded += 1
|
||||
if task_progress.attempted % status_interval == 0:
|
||||
task_progress.update_task_state(extra_meta=current_step)
|
||||
|
||||
# Perform the upload if any students have been successfully graded
|
||||
if len(rows) > 1:
|
||||
upload_csv_to_report_store(rows, 'problem_grade_report', course_id, start_date)
|
||||
# If there are any error rows, write them out as well
|
||||
if len(error_rows) > 1:
|
||||
upload_csv_to_report_store(error_rows, 'problem_grade_report_err', course_id, start_date)
|
||||
|
||||
return task_progress.update_task_state(extra_meta={'step': 'Uploading CSV'})
|
||||
|
||||
|
||||
def upload_problem_responses_csv(_xmodule_instance_args, _entry_id, course_id, task_input, action_name):
|
||||
"""
|
||||
For a given `course_id`, generate a CSV file containing
|
||||
all student answers to a given problem, and store using a `ReportStore`.
|
||||
"""
|
||||
start_time = time()
|
||||
start_date = datetime.now(UTC)
|
||||
num_reports = 1
|
||||
task_progress = TaskProgress(action_name, num_reports, start_time)
|
||||
current_step = {'step': 'Calculating students answers to problem'}
|
||||
task_progress.update_task_state(extra_meta=current_step)
|
||||
|
||||
# Compute result table and format it
|
||||
problem_location = task_input.get('problem_location')
|
||||
student_data = list_problem_responses(course_id, problem_location)
|
||||
features = ['username', 'state']
|
||||
header, rows = format_dictlist(student_data, features)
|
||||
|
||||
task_progress.attempted = task_progress.succeeded = len(rows)
|
||||
task_progress.skipped = task_progress.total - task_progress.attempted
|
||||
|
||||
rows.insert(0, header)
|
||||
|
||||
current_step = {'step': 'Uploading CSV'}
|
||||
task_progress.update_task_state(extra_meta=current_step)
|
||||
|
||||
# Perform the upload
|
||||
problem_location = re.sub(r'[:/]', '_', problem_location)
|
||||
csv_name = 'student_state_from_{}'.format(problem_location)
|
||||
upload_csv_to_report_store(rows, csv_name, course_id, start_date)
|
||||
|
||||
return task_progress.update_task_state(extra_meta=current_step)
|
||||
|
||||
|
||||
def _graded_assignments(course_key):
|
||||
"""
|
||||
Returns an OrderedDict that maps an assignment type to a dict of subsection-headers and average-header.
|
||||
"""
|
||||
grading_context = grading_context_for_course(course_key)
|
||||
graded_assignments_map = OrderedDict()
|
||||
for assignment_type_name, subsection_infos in grading_context['all_graded_subsections_by_type'].iteritems():
|
||||
graded_subsections_map = OrderedDict()
|
||||
|
||||
for subsection_index, subsection_info in enumerate(subsection_infos, start=1):
|
||||
subsection = subsection_info['subsection_block']
|
||||
header_name = u"{assignment_type} {subsection_index}: {subsection_name}".format(
|
||||
assignment_type=assignment_type_name,
|
||||
subsection_index=subsection_index,
|
||||
subsection_name=subsection.display_name,
|
||||
)
|
||||
graded_subsections_map[subsection.location] = header_name
|
||||
|
||||
average_header = u"{assignment_type}".format(assignment_type=assignment_type_name)
|
||||
|
||||
# Use separate subsection and average columns only if
|
||||
# there's more than one subsection.
|
||||
use_subsection_headers = len(subsection_infos) > 1
|
||||
if use_subsection_headers:
|
||||
average_header += u" (Avg)"
|
||||
|
||||
graded_assignments_map[assignment_type_name] = {
|
||||
'subsection_headers': graded_subsections_map,
|
||||
'average_header': average_header,
|
||||
'use_subsection_headers': use_subsection_headers
|
||||
}
|
||||
return graded_assignments_map
|
||||
|
||||
|
||||
def _graded_scorable_blocks_to_header(course_key):
|
||||
"""
|
||||
Returns an OrderedDict that maps a scorable block's id to its
|
||||
headers in the final report.
|
||||
"""
|
||||
scorable_blocks_map = OrderedDict()
|
||||
grading_context = grading_context_for_course(course_key)
|
||||
for assignment_type_name, subsection_infos in grading_context['all_graded_subsections_by_type'].iteritems():
|
||||
for subsection_index, subsection_info in enumerate(subsection_infos, start=1):
|
||||
for scorable_block in subsection_info['scored_descendants']:
|
||||
header_name = (
|
||||
u"{assignment_type} {subsection_index}: "
|
||||
u"{subsection_name} - {scorable_block_name}"
|
||||
).format(
|
||||
scorable_block_name=scorable_block.display_name,
|
||||
assignment_type=assignment_type_name,
|
||||
subsection_index=subsection_index,
|
||||
subsection_name=subsection_info['subsection_block'].display_name,
|
||||
)
|
||||
scorable_blocks_map[scorable_block.location] = [header_name + " (Earned)", header_name + " (Possible)"]
|
||||
return scorable_blocks_map
|
||||
269
lms/djangoapps/instructor_task/tasks_helper/misc.py
Normal file
269
lms/djangoapps/instructor_task/tasks_helper/misc.py
Normal file
@@ -0,0 +1,269 @@
|
||||
"""
|
||||
This file contains tasks that are designed to perform background operations on the
|
||||
running state of a course.
|
||||
|
||||
"""
|
||||
from collections import OrderedDict
|
||||
from datetime import datetime
|
||||
import logging
|
||||
from pytz import UTC
|
||||
from time import time
|
||||
import unicodecsv
|
||||
|
||||
from django.contrib.auth.models import User
|
||||
from django.core.files.storage import DefaultStorage
|
||||
|
||||
from instructor_analytics.basic import get_proctored_exam_results
|
||||
from instructor_analytics.csvs import format_dictlist
|
||||
from openassessment.data import OraAggregateData
|
||||
from openedx.core.djangoapps.course_groups.models import CourseUserGroup
|
||||
from openedx.core.djangoapps.course_groups.cohorts import add_user_to_cohort
|
||||
from survey.models import SurveyAnswer
|
||||
from util.file import course_filename_prefix_generator, UniversalNewlineIterator
|
||||
|
||||
from .runner import TaskProgress
|
||||
from .utils import upload_csv_to_report_store, UPDATE_STATUS_SUCCEEDED, UPDATE_STATUS_FAILED
|
||||
|
||||
|
||||
# define different loggers for use within tasks and on client side
|
||||
TASK_LOG = logging.getLogger('edx.celery.task')
|
||||
|
||||
|
||||
def upload_course_survey_report(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name):
|
||||
"""
|
||||
For a given `course_id`, generate a html report containing the survey results for a course.
|
||||
"""
|
||||
start_time = time()
|
||||
start_date = datetime.now(UTC)
|
||||
num_reports = 1
|
||||
task_progress = TaskProgress(action_name, num_reports, start_time)
|
||||
|
||||
current_step = {'step': 'Gathering course survey report information'}
|
||||
task_progress.update_task_state(extra_meta=current_step)
|
||||
|
||||
distinct_survey_fields_queryset = SurveyAnswer.objects.filter(course_key=course_id).values('field_name').distinct()
|
||||
survey_fields = []
|
||||
for unique_field_row in distinct_survey_fields_queryset:
|
||||
survey_fields.append(unique_field_row['field_name'])
|
||||
survey_fields.sort()
|
||||
|
||||
user_survey_answers = OrderedDict()
|
||||
survey_answers_for_course = SurveyAnswer.objects.filter(course_key=course_id).select_related('user')
|
||||
|
||||
for survey_field_record in survey_answers_for_course:
|
||||
user_id = survey_field_record.user.id
|
||||
if user_id not in user_survey_answers.keys():
|
||||
user_survey_answers[user_id] = {
|
||||
'username': survey_field_record.user.username,
|
||||
'email': survey_field_record.user.email
|
||||
}
|
||||
|
||||
user_survey_answers[user_id][survey_field_record.field_name] = survey_field_record.field_value
|
||||
|
||||
header = ["User ID", "User Name", "Email"]
|
||||
header.extend(survey_fields)
|
||||
csv_rows = []
|
||||
|
||||
for user_id in user_survey_answers.keys():
|
||||
row = []
|
||||
row.append(user_id)
|
||||
row.append(user_survey_answers[user_id].get('username', ''))
|
||||
row.append(user_survey_answers[user_id].get('email', ''))
|
||||
for survey_field in survey_fields:
|
||||
row.append(user_survey_answers[user_id].get(survey_field, ''))
|
||||
csv_rows.append(row)
|
||||
|
||||
task_progress.attempted = task_progress.succeeded = len(csv_rows)
|
||||
task_progress.skipped = task_progress.total - task_progress.attempted
|
||||
|
||||
csv_rows.insert(0, header)
|
||||
|
||||
current_step = {'step': 'Uploading CSV'}
|
||||
task_progress.update_task_state(extra_meta=current_step)
|
||||
|
||||
# Perform the upload
|
||||
upload_csv_to_report_store(csv_rows, 'course_survey_results', course_id, start_date)
|
||||
|
||||
return task_progress.update_task_state(extra_meta=current_step)
|
||||
|
||||
|
||||
def upload_proctored_exam_results_report(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name): # pylint: disable=invalid-name
|
||||
"""
|
||||
For a given `course_id`, generate a CSV file containing
|
||||
information about proctored exam results, and store using a `ReportStore`.
|
||||
"""
|
||||
start_time = time()
|
||||
start_date = datetime.now(UTC)
|
||||
num_reports = 1
|
||||
task_progress = TaskProgress(action_name, num_reports, start_time)
|
||||
current_step = {'step': 'Calculating info about proctored exam results in a course'}
|
||||
task_progress.update_task_state(extra_meta=current_step)
|
||||
|
||||
# Compute result table and format it
|
||||
query_features = _task_input.get('features')
|
||||
student_data = get_proctored_exam_results(course_id, query_features)
|
||||
header, rows = format_dictlist(student_data, query_features)
|
||||
|
||||
task_progress.attempted = task_progress.succeeded = len(rows)
|
||||
task_progress.skipped = task_progress.total - task_progress.attempted
|
||||
|
||||
rows.insert(0, header)
|
||||
|
||||
current_step = {'step': 'Uploading CSV'}
|
||||
task_progress.update_task_state(extra_meta=current_step)
|
||||
|
||||
# Perform the upload
|
||||
upload_csv_to_report_store(rows, 'proctored_exam_results_report', course_id, start_date)
|
||||
|
||||
return task_progress.update_task_state(extra_meta=current_step)
|
||||
|
||||
|
||||
def cohort_students_and_upload(_xmodule_instance_args, _entry_id, course_id, task_input, action_name):
|
||||
"""
|
||||
Within a given course, cohort students in bulk, then upload the results
|
||||
using a `ReportStore`.
|
||||
"""
|
||||
start_time = time()
|
||||
start_date = datetime.now(UTC)
|
||||
|
||||
# Iterate through rows to get total assignments for task progress
|
||||
with DefaultStorage().open(task_input['file_name']) as f:
|
||||
total_assignments = 0
|
||||
for _line in unicodecsv.DictReader(UniversalNewlineIterator(f)):
|
||||
total_assignments += 1
|
||||
|
||||
task_progress = TaskProgress(action_name, total_assignments, start_time)
|
||||
current_step = {'step': 'Cohorting Students'}
|
||||
task_progress.update_task_state(extra_meta=current_step)
|
||||
|
||||
# cohorts_status is a mapping from cohort_name to metadata about
|
||||
# that cohort. The metadata will include information about users
|
||||
# successfully added to the cohort, users not found, and a cached
|
||||
# reference to the corresponding cohort object to prevent
|
||||
# redundant cohort queries.
|
||||
cohorts_status = {}
|
||||
|
||||
with DefaultStorage().open(task_input['file_name']) as f:
|
||||
for row in unicodecsv.DictReader(UniversalNewlineIterator(f), encoding='utf-8'):
|
||||
# Try to use the 'email' field to identify the user. If it's not present, use 'username'.
|
||||
username_or_email = row.get('email') or row.get('username')
|
||||
cohort_name = row.get('cohort') or ''
|
||||
task_progress.attempted += 1
|
||||
|
||||
if not cohorts_status.get(cohort_name):
|
||||
cohorts_status[cohort_name] = {
|
||||
'Cohort Name': cohort_name,
|
||||
'Students Added': 0,
|
||||
'Students Not Found': set()
|
||||
}
|
||||
try:
|
||||
cohorts_status[cohort_name]['cohort'] = CourseUserGroup.objects.get(
|
||||
course_id=course_id,
|
||||
group_type=CourseUserGroup.COHORT,
|
||||
name=cohort_name
|
||||
)
|
||||
cohorts_status[cohort_name]["Exists"] = True
|
||||
except CourseUserGroup.DoesNotExist:
|
||||
cohorts_status[cohort_name]["Exists"] = False
|
||||
|
||||
if not cohorts_status[cohort_name]['Exists']:
|
||||
task_progress.failed += 1
|
||||
continue
|
||||
|
||||
try:
|
||||
add_user_to_cohort(cohorts_status[cohort_name]['cohort'], username_or_email)
|
||||
cohorts_status[cohort_name]['Students Added'] += 1
|
||||
task_progress.succeeded += 1
|
||||
except User.DoesNotExist:
|
||||
cohorts_status[cohort_name]['Students Not Found'].add(username_or_email)
|
||||
task_progress.failed += 1
|
||||
except ValueError:
|
||||
# Raised when the user is already in the given cohort
|
||||
task_progress.skipped += 1
|
||||
|
||||
task_progress.update_task_state(extra_meta=current_step)
|
||||
|
||||
current_step['step'] = 'Uploading CSV'
|
||||
task_progress.update_task_state(extra_meta=current_step)
|
||||
|
||||
# Filter the output of `add_users_to_cohorts` in order to upload the result.
|
||||
output_header = ['Cohort Name', 'Exists', 'Students Added', 'Students Not Found']
|
||||
output_rows = [
|
||||
[
|
||||
','.join(status_dict.get(column_name, '')) if column_name == 'Students Not Found'
|
||||
else status_dict[column_name]
|
||||
for column_name in output_header
|
||||
]
|
||||
for _cohort_name, status_dict in cohorts_status.iteritems()
|
||||
]
|
||||
output_rows.insert(0, output_header)
|
||||
upload_csv_to_report_store(output_rows, 'cohort_results', course_id, start_date)
|
||||
|
||||
return task_progress.update_task_state(extra_meta=current_step)
|
||||
|
||||
|
||||
def upload_ora2_data(
|
||||
_xmodule_instance_args, _entry_id, course_id, _task_input, action_name
|
||||
):
|
||||
"""
|
||||
Collect ora2 responses and upload them to S3 as a CSV
|
||||
"""
|
||||
|
||||
start_date = datetime.now(UTC)
|
||||
start_time = time()
|
||||
|
||||
num_attempted = 1
|
||||
num_total = 1
|
||||
|
||||
fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
|
||||
task_info_string = fmt.format(
|
||||
task_id=_xmodule_instance_args.get('task_id') if _xmodule_instance_args is not None else None,
|
||||
entry_id=_entry_id,
|
||||
course_id=course_id,
|
||||
task_input=_task_input
|
||||
)
|
||||
TASK_LOG.info(u'%s, Task type: %s, Starting task execution', task_info_string, action_name)
|
||||
|
||||
task_progress = TaskProgress(action_name, num_total, start_time)
|
||||
task_progress.attempted = num_attempted
|
||||
|
||||
curr_step = {'step': "Collecting responses"}
|
||||
TASK_LOG.info(
|
||||
u'%s, Task type: %s, Current step: %s for all submissions',
|
||||
task_info_string,
|
||||
action_name,
|
||||
curr_step,
|
||||
)
|
||||
|
||||
task_progress.update_task_state(extra_meta=curr_step)
|
||||
|
||||
try:
|
||||
header, datarows = OraAggregateData.collect_ora2_data(course_id)
|
||||
rows = [header] + [row for row in datarows]
|
||||
# Update progress to failed regardless of error type
|
||||
except Exception: # pylint: disable=broad-except
|
||||
TASK_LOG.exception('Failed to get ORA data.')
|
||||
task_progress.failed = 1
|
||||
curr_step = {'step': "Error while collecting data"}
|
||||
|
||||
task_progress.update_task_state(extra_meta=curr_step)
|
||||
|
||||
return UPDATE_STATUS_FAILED
|
||||
|
||||
task_progress.succeeded = 1
|
||||
curr_step = {'step': "Uploading CSV"}
|
||||
TASK_LOG.info(
|
||||
u'%s, Task type: %s, Current step: %s',
|
||||
task_info_string,
|
||||
action_name,
|
||||
curr_step,
|
||||
)
|
||||
task_progress.update_task_state(extra_meta=curr_step)
|
||||
|
||||
upload_csv_to_report_store(rows, 'ORA_data', course_id, start_date)
|
||||
|
||||
curr_step = {'step': 'Finalizing ORA data report'}
|
||||
task_progress.update_task_state(extra_meta=curr_step)
|
||||
TASK_LOG.info(u'%s, Task type: %s, Upload complete.', task_info_string, action_name)
|
||||
|
||||
return UPDATE_STATUS_SUCCEEDED
|
||||
365
lms/djangoapps/instructor_task/tasks_helper/module_state.py
Normal file
365
lms/djangoapps/instructor_task/tasks_helper/module_state.py
Normal file
@@ -0,0 +1,365 @@
|
||||
"""
|
||||
Instructor Tasks related to module state.
|
||||
"""
|
||||
from django.contrib.auth.models import User
|
||||
import dogstats_wrapper as dog_stats_api
|
||||
import json
|
||||
import logging
|
||||
from time import time
|
||||
|
||||
from eventtracking import tracker
|
||||
from opaque_keys.edx.keys import UsageKey
|
||||
from xmodule.modulestore.django import modulestore
|
||||
from courseware.courses import get_course_by_id, get_problems_in_section
|
||||
from courseware.models import StudentModule
|
||||
from courseware.model_data import DjangoKeyValueStore, FieldDataCache
|
||||
from courseware.module_render import get_module_for_descriptor_internal
|
||||
from lms.djangoapps.grades.scores import weighted_score
|
||||
from track.contexts import course_context_from_course_id
|
||||
from track.event_transaction_utils import set_event_transaction_type, create_new_event_transaction_id
|
||||
from track.views import task_track
|
||||
from util.db import outer_atomic
|
||||
from xblock.runtime import KvsFieldData
|
||||
|
||||
from ..exceptions import UpdateProblemModuleStateError
|
||||
from .runner import TaskProgress
|
||||
from .utils import UPDATE_STATUS_SUCCEEDED, UPDATE_STATUS_FAILED, UPDATE_STATUS_SKIPPED, UNKNOWN_TASK_ID
|
||||
|
||||
|
||||
TASK_LOG = logging.getLogger('edx.celery.task')
|
||||
|
||||
# define value to be used in grading events
|
||||
GRADES_RESCORE_EVENT_TYPE = 'edx.grades.problem.rescored'
|
||||
|
||||
|
||||
def perform_module_state_update(update_fcn, filter_fcn, _entry_id, course_id, task_input, action_name):
|
||||
"""
|
||||
Performs generic update by visiting StudentModule instances with the update_fcn provided.
|
||||
|
||||
StudentModule instances are those that match the specified `course_id` and `module_state_key`.
|
||||
If `student_identifier` is not None, it is used as an additional filter to limit the modules to those belonging
|
||||
to that student. If `student_identifier` is None, performs update on modules for all students on the specified
|
||||
problem.
|
||||
|
||||
If a `filter_fcn` is not None, it is applied to the query that has been constructed. It takes one
|
||||
argument, which is the query being filtered, and returns the filtered version of the query.
|
||||
|
||||
The `update_fcn` is called on each StudentModule that passes the resulting filtering.
|
||||
It is passed four arguments: the module_descriptor for the module pointed to by the
|
||||
module_state_key, the particular StudentModule to update, the xmodule_instance_args, and the task_input
|
||||
being passed through. If the value returned by the update function evaluates to a boolean True,
|
||||
the update is successful; False indicates the update on the particular student module failed.
|
||||
A raised exception indicates a fatal condition -- that no other student modules should be considered.
|
||||
|
||||
The return value is a dict containing the task's results, with the following keys:
|
||||
|
||||
'attempted': number of attempts made
|
||||
'succeeded': number of attempts that "succeeded"
|
||||
'skipped': number of attempts that "skipped"
|
||||
'failed': number of attempts that "failed"
|
||||
'total': number of possible updates to attempt
|
||||
'action_name': user-visible verb to use in status messages. Should be past-tense.
|
||||
Pass-through of input `action_name`.
|
||||
'duration_ms': how long the task has (or had) been running.
|
||||
|
||||
Because this is run internal to a task, it does not catch exceptions. These are allowed to pass up to the
|
||||
next level, so that it can set the failure modes and capture the error trace in the InstructorTask and the
|
||||
result object.
|
||||
|
||||
"""
|
||||
start_time = time()
|
||||
usage_keys = []
|
||||
problem_url = task_input.get('problem_url')
|
||||
entrance_exam_url = task_input.get('entrance_exam_url')
|
||||
student_identifier = task_input.get('student')
|
||||
problems = {}
|
||||
|
||||
# if problem_url is present make a usage key from it
|
||||
if problem_url:
|
||||
usage_key = course_id.make_usage_key_from_deprecated_string(problem_url)
|
||||
usage_keys.append(usage_key)
|
||||
|
||||
# find the problem descriptor:
|
||||
problem_descriptor = modulestore().get_item(usage_key)
|
||||
problems[unicode(usage_key)] = problem_descriptor
|
||||
|
||||
# if entrance_exam is present grab all problems in it
|
||||
if entrance_exam_url:
|
||||
problems = get_problems_in_section(entrance_exam_url)
|
||||
usage_keys = [UsageKey.from_string(location) for location in problems.keys()]
|
||||
|
||||
# find the modules in question
|
||||
modules_to_update = StudentModule.objects.filter(course_id=course_id, module_state_key__in=usage_keys)
|
||||
|
||||
# give the option of updating an individual student. If not specified,
|
||||
# then updates all students who have responded to a problem so far
|
||||
student = None
|
||||
if student_identifier is not None:
|
||||
# if an identifier is supplied, then look for the student,
|
||||
# and let it throw an exception if none is found.
|
||||
if "@" in student_identifier:
|
||||
student = User.objects.get(email=student_identifier)
|
||||
elif student_identifier is not None:
|
||||
student = User.objects.get(username=student_identifier)
|
||||
|
||||
if student is not None:
|
||||
modules_to_update = modules_to_update.filter(student_id=student.id)
|
||||
|
||||
if filter_fcn is not None:
|
||||
modules_to_update = filter_fcn(modules_to_update)
|
||||
|
||||
task_progress = TaskProgress(action_name, modules_to_update.count(), start_time)
|
||||
task_progress.update_task_state()
|
||||
|
||||
for module_to_update in modules_to_update:
|
||||
task_progress.attempted += 1
|
||||
module_descriptor = problems[unicode(module_to_update.module_state_key)]
|
||||
# There is no try here: if there's an error, we let it throw, and the task will
|
||||
# be marked as FAILED, with a stack trace.
|
||||
with dog_stats_api.timer('instructor_tasks.module.time.step', tags=[u'action:{name}'.format(name=action_name)]):
|
||||
update_status = update_fcn(module_descriptor, module_to_update, task_input)
|
||||
if update_status == UPDATE_STATUS_SUCCEEDED:
|
||||
# If the update_fcn returns true, then it performed some kind of work.
|
||||
# Logging of failures is left to the update_fcn itself.
|
||||
task_progress.succeeded += 1
|
||||
elif update_status == UPDATE_STATUS_FAILED:
|
||||
task_progress.failed += 1
|
||||
elif update_status == UPDATE_STATUS_SKIPPED:
|
||||
task_progress.skipped += 1
|
||||
else:
|
||||
raise UpdateProblemModuleStateError("Unexpected update_status returned: {}".format(update_status))
|
||||
|
||||
return task_progress.update_task_state()
|
||||
|
||||
|
||||
@outer_atomic
|
||||
def rescore_problem_module_state(xmodule_instance_args, module_descriptor, student_module, task_input):
|
||||
'''
|
||||
Takes an XModule descriptor and a corresponding StudentModule object, and
|
||||
performs rescoring on the student's problem submission.
|
||||
|
||||
Throws exceptions if the rescoring is fatal and should be aborted if in a loop.
|
||||
In particular, raises UpdateProblemModuleStateError if module fails to instantiate,
|
||||
or if the module doesn't support rescoring.
|
||||
|
||||
Returns True if problem was successfully rescored for the given student, and False
|
||||
if problem encountered some kind of error in rescoring.
|
||||
'''
|
||||
# unpack the StudentModule:
|
||||
course_id = student_module.course_id
|
||||
student = student_module.student
|
||||
usage_key = student_module.module_state_key
|
||||
|
||||
with modulestore().bulk_operations(course_id):
|
||||
course = get_course_by_id(course_id)
|
||||
# TODO: Here is a call site where we could pass in a loaded course. I
|
||||
# think we certainly need it since grading is happening here, and field
|
||||
# overrides would be important in handling that correctly
|
||||
instance = _get_module_instance_for_task(
|
||||
course_id,
|
||||
student,
|
||||
module_descriptor,
|
||||
xmodule_instance_args,
|
||||
grade_bucket_type='rescore',
|
||||
course=course
|
||||
)
|
||||
|
||||
if instance is None:
|
||||
# Either permissions just changed, or someone is trying to be clever
|
||||
# and load something they shouldn't have access to.
|
||||
msg = "No module {loc} for student {student}--access denied?".format(
|
||||
loc=usage_key,
|
||||
student=student
|
||||
)
|
||||
TASK_LOG.warning(msg)
|
||||
return UPDATE_STATUS_FAILED
|
||||
|
||||
# TODO: (TNL-6594) Remove this switch once rescore_problem support
|
||||
# once CAPA uses ScorableXBlockMixin.
|
||||
for method in ['rescore', 'rescore_problem']:
|
||||
rescore_method = getattr(instance, method, None)
|
||||
if rescore_method is not None:
|
||||
break
|
||||
else: # for-else: Neither method exists on the block.
|
||||
# This should not happen, since it should be already checked in the
|
||||
# caller, but check here to be sure.
|
||||
msg = "Specified problem does not support rescoring."
|
||||
raise UpdateProblemModuleStateError(msg)
|
||||
|
||||
# TODO: Remove the first part of this if-else with TNL-6594
|
||||
# We check here to see if the problem has any submissions. If it does not, we don't want to rescore it
|
||||
if hasattr(instance, "done"):
|
||||
if not instance.done:
|
||||
return UPDATE_STATUS_SKIPPED
|
||||
elif not instance.has_submitted_answer():
|
||||
return UPDATE_STATUS_SKIPPED
|
||||
|
||||
# Set the tracking info before this call, because it makes downstream
|
||||
# calls that create events. We retrieve and store the id here because
|
||||
# the request cache will be erased during downstream calls.
|
||||
event_transaction_id = create_new_event_transaction_id()
|
||||
set_event_transaction_type(GRADES_RESCORE_EVENT_TYPE)
|
||||
|
||||
result = rescore_method(only_if_higher=task_input['only_if_higher'])
|
||||
instance.save()
|
||||
|
||||
if result is None or result.get(u'success') in {u'correct', u'incorrect'}:
|
||||
TASK_LOG.debug(
|
||||
u"successfully processed rescore call for course %(course)s, problem %(loc)s "
|
||||
u"and student %(student)s",
|
||||
dict(
|
||||
course=course_id,
|
||||
loc=usage_key,
|
||||
student=student
|
||||
)
|
||||
)
|
||||
|
||||
if result is not None: # Only for CAPA. This will get moved to the grade handler.
|
||||
new_weighted_earned, new_weighted_possible = weighted_score(
|
||||
result['new_raw_earned'] if result else None,
|
||||
result['new_raw_possible'] if result else None,
|
||||
module_descriptor.weight,
|
||||
)
|
||||
|
||||
# TODO: remove this context manager after completion of AN-6134
|
||||
context = course_context_from_course_id(course_id)
|
||||
with tracker.get_tracker().context(GRADES_RESCORE_EVENT_TYPE, context):
|
||||
tracker.emit(
|
||||
unicode(GRADES_RESCORE_EVENT_TYPE),
|
||||
{
|
||||
'course_id': unicode(course_id),
|
||||
'user_id': unicode(student.id),
|
||||
'problem_id': unicode(usage_key),
|
||||
'new_weighted_earned': new_weighted_earned,
|
||||
'new_weighted_possible': new_weighted_possible,
|
||||
'only_if_higher': task_input['only_if_higher'],
|
||||
'instructor_id': unicode(xmodule_instance_args['request_info']['user_id']),
|
||||
'event_transaction_id': unicode(event_transaction_id),
|
||||
'event_transaction_type': unicode(GRADES_RESCORE_EVENT_TYPE),
|
||||
}
|
||||
)
|
||||
return UPDATE_STATUS_SUCCEEDED
|
||||
else:
|
||||
TASK_LOG.warning(
|
||||
u"error processing rescore call for course %(course)s, problem %(loc)s "
|
||||
u"and student %(student)s: %(msg)s",
|
||||
dict(
|
||||
msg=result.get('success', result),
|
||||
course=course_id,
|
||||
loc=usage_key,
|
||||
student=student
|
||||
)
|
||||
)
|
||||
return UPDATE_STATUS_FAILED
|
||||
|
||||
|
||||
@outer_atomic
|
||||
def reset_attempts_module_state(xmodule_instance_args, _module_descriptor, student_module, _task_input):
|
||||
"""
|
||||
Resets problem attempts to zero for specified `student_module`.
|
||||
|
||||
Returns a status of UPDATE_STATUS_SUCCEEDED if a problem has non-zero attempts
|
||||
that are being reset, and UPDATE_STATUS_SKIPPED otherwise.
|
||||
"""
|
||||
update_status = UPDATE_STATUS_SKIPPED
|
||||
problem_state = json.loads(student_module.state) if student_module.state else {}
|
||||
if 'attempts' in problem_state:
|
||||
old_number_of_attempts = problem_state["attempts"]
|
||||
if old_number_of_attempts > 0:
|
||||
problem_state["attempts"] = 0
|
||||
# convert back to json and save
|
||||
student_module.state = json.dumps(problem_state)
|
||||
student_module.save()
|
||||
# get request-related tracking information from args passthrough,
|
||||
# and supplement with task-specific information:
|
||||
track_function = _get_track_function_for_task(student_module.student, xmodule_instance_args)
|
||||
event_info = {"old_attempts": old_number_of_attempts, "new_attempts": 0}
|
||||
track_function('problem_reset_attempts', event_info)
|
||||
update_status = UPDATE_STATUS_SUCCEEDED
|
||||
|
||||
return update_status
|
||||
|
||||
|
||||
@outer_atomic
|
||||
def delete_problem_module_state(xmodule_instance_args, _module_descriptor, student_module, _task_input):
|
||||
"""
|
||||
Delete the StudentModule entry.
|
||||
|
||||
Always returns UPDATE_STATUS_SUCCEEDED, indicating success, if it doesn't raise an exception due to database error.
|
||||
"""
|
||||
student_module.delete()
|
||||
# get request-related tracking information from args passthrough,
|
||||
# and supplement with task-specific information:
|
||||
track_function = _get_track_function_for_task(student_module.student, xmodule_instance_args)
|
||||
track_function('problem_delete_state', {})
|
||||
return UPDATE_STATUS_SUCCEEDED
|
||||
|
||||
|
||||
def _get_module_instance_for_task(course_id, student, module_descriptor, xmodule_instance_args=None,
|
||||
grade_bucket_type=None, course=None):
|
||||
"""
|
||||
Fetches a StudentModule instance for a given `course_id`, `student` object, and `module_descriptor`.
|
||||
|
||||
`xmodule_instance_args` is used to provide information for creating a track function and an XQueue callback.
|
||||
These are passed, along with `grade_bucket_type`, to get_module_for_descriptor_internal, which sidesteps
|
||||
the need for a Request object when instantiating an xmodule instance.
|
||||
"""
|
||||
# reconstitute the problem's corresponding XModule:
|
||||
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(course_id, student, module_descriptor)
|
||||
student_data = KvsFieldData(DjangoKeyValueStore(field_data_cache))
|
||||
|
||||
# get request-related tracking information from args passthrough, and supplement with task-specific
|
||||
# information:
|
||||
request_info = xmodule_instance_args.get('request_info', {}) if xmodule_instance_args is not None else {}
|
||||
task_info = {"student": student.username, "task_id": _get_task_id_from_xmodule_args(xmodule_instance_args)}
|
||||
|
||||
def make_track_function():
|
||||
'''
|
||||
Make a tracking function that logs what happened.
|
||||
|
||||
For insertion into ModuleSystem, and used by CapaModule, which will
|
||||
provide the event_type (as string) and event (as dict) as arguments.
|
||||
The request_info and task_info (and page) are provided here.
|
||||
'''
|
||||
return lambda event_type, event: task_track(request_info, task_info, event_type, event, page='x_module_task')
|
||||
|
||||
xqueue_callback_url_prefix = xmodule_instance_args.get('xqueue_callback_url_prefix', '') \
|
||||
if xmodule_instance_args is not None else ''
|
||||
|
||||
return get_module_for_descriptor_internal(
|
||||
user=student,
|
||||
descriptor=module_descriptor,
|
||||
student_data=student_data,
|
||||
course_id=course_id,
|
||||
track_function=make_track_function(),
|
||||
xqueue_callback_url_prefix=xqueue_callback_url_prefix,
|
||||
grade_bucket_type=grade_bucket_type,
|
||||
# This module isn't being used for front-end rendering
|
||||
request_token=None,
|
||||
# pass in a loaded course for override enabling
|
||||
course=course
|
||||
)
|
||||
|
||||
|
||||
def _get_track_function_for_task(student, xmodule_instance_args=None, source_page='x_module_task'):
|
||||
"""
|
||||
Make a tracking function that logs what happened.
|
||||
|
||||
For insertion into ModuleSystem, and used by CapaModule, which will
|
||||
provide the event_type (as string) and event (as dict) as arguments.
|
||||
The request_info and task_info (and page) are provided here.
|
||||
"""
|
||||
# get request-related tracking information from args passthrough, and supplement with task-specific
|
||||
# information:
|
||||
request_info = xmodule_instance_args.get('request_info', {}) if xmodule_instance_args is not None else {}
|
||||
task_info = {'student': student.username, 'task_id': _get_task_id_from_xmodule_args(xmodule_instance_args)}
|
||||
|
||||
return lambda event_type, event: task_track(request_info, task_info, event_type, event, page=source_page)
|
||||
|
||||
|
||||
def _get_task_id_from_xmodule_args(xmodule_instance_args):
|
||||
"""Gets task_id from `xmodule_instance_args` dict, or returns default value if missing."""
|
||||
if xmodule_instance_args is None:
|
||||
return UNKNOWN_TASK_ID
|
||||
else:
|
||||
return xmodule_instance_args.get('task_id', UNKNOWN_TASK_ID)
|
||||
131
lms/djangoapps/instructor_task/tasks_helper/runner.py
Normal file
131
lms/djangoapps/instructor_task/tasks_helper/runner.py
Normal file
@@ -0,0 +1,131 @@
|
||||
from django.db import reset_queries
|
||||
from celery import Task, current_task
|
||||
import dogstats_wrapper as dog_stats_api
|
||||
import json
|
||||
import logging
|
||||
from util.db import outer_atomic
|
||||
from time import time
|
||||
|
||||
from lms.djangoapps.instructor_task.models import InstructorTask, PROGRESS
|
||||
|
||||
|
||||
TASK_LOG = logging.getLogger('edx.celery.task')
|
||||
|
||||
|
||||
class TaskProgress(object):
|
||||
"""
|
||||
Encapsulates the current task's progress by keeping track of
|
||||
'attempted', 'succeeded', 'skipped', 'failed', 'total',
|
||||
'action_name', and 'duration_ms' values.
|
||||
"""
|
||||
def __init__(self, action_name, total, start_time):
|
||||
self.action_name = action_name
|
||||
self.total = total
|
||||
self.start_time = start_time
|
||||
self.attempted = 0
|
||||
self.succeeded = 0
|
||||
self.skipped = 0
|
||||
self.failed = 0
|
||||
|
||||
def update_task_state(self, extra_meta=None):
|
||||
"""
|
||||
Update the current celery task's state to the progress state
|
||||
specified by the current object. Returns the progress
|
||||
dictionary for use by `run_main_task` and
|
||||
`BaseInstructorTask.on_success`.
|
||||
|
||||
Arguments:
|
||||
extra_meta (dict): Extra metadata to pass to `update_state`
|
||||
|
||||
Returns:
|
||||
dict: The current task's progress dict
|
||||
"""
|
||||
progress_dict = {
|
||||
'action_name': self.action_name,
|
||||
'attempted': self.attempted,
|
||||
'succeeded': self.succeeded,
|
||||
'skipped': self.skipped,
|
||||
'failed': self.failed,
|
||||
'total': self.total,
|
||||
'duration_ms': int((time() - self.start_time) * 1000),
|
||||
}
|
||||
if extra_meta is not None:
|
||||
progress_dict.update(extra_meta)
|
||||
_get_current_task().update_state(state=PROGRESS, meta=progress_dict)
|
||||
return progress_dict
|
||||
|
||||
|
||||
def run_main_task(entry_id, task_fcn, action_name):
|
||||
"""
|
||||
Applies the `task_fcn` to the arguments defined in `entry_id` InstructorTask.
|
||||
|
||||
Arguments passed to `task_fcn` are:
|
||||
|
||||
`entry_id` : the primary key for the InstructorTask entry representing the task.
|
||||
`course_id` : the id for the course.
|
||||
`task_input` : dict containing task-specific arguments, JSON-decoded from InstructorTask's task_input.
|
||||
`action_name` : past-tense verb to use for constructing status messages.
|
||||
|
||||
If no exceptions are raised, the `task_fcn` should return a dict containing
|
||||
the task's result with the following keys:
|
||||
|
||||
'attempted': number of attempts made
|
||||
'succeeded': number of attempts that "succeeded"
|
||||
'skipped': number of attempts that "skipped"
|
||||
'failed': number of attempts that "failed"
|
||||
'total': number of possible subtasks to attempt
|
||||
'action_name': user-visible verb to use in status messages.
|
||||
Should be past-tense. Pass-through of input `action_name`.
|
||||
'duration_ms': how long the task has (or had) been running.
|
||||
|
||||
"""
|
||||
|
||||
# Get the InstructorTask to be updated. If this fails then let the exception return to Celery.
|
||||
# There's no point in catching it here.
|
||||
with outer_atomic():
|
||||
entry = InstructorTask.objects.get(pk=entry_id)
|
||||
entry.task_state = PROGRESS
|
||||
entry.save_now()
|
||||
|
||||
# Get inputs to use in this task from the entry
|
||||
task_id = entry.task_id
|
||||
course_id = entry.course_id
|
||||
task_input = json.loads(entry.task_input)
|
||||
|
||||
# Construct log message
|
||||
fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
|
||||
task_info_string = fmt.format(task_id=task_id, entry_id=entry_id, course_id=course_id, task_input=task_input)
|
||||
TASK_LOG.info(u'%s, Starting update (nothing %s yet)', task_info_string, action_name)
|
||||
|
||||
# Check that the task_id submitted in the InstructorTask matches the current task
|
||||
# that is running.
|
||||
request_task_id = _get_current_task().request.id
|
||||
if task_id != request_task_id:
|
||||
fmt = u'{task_info}, Requested task did not match actual task "{actual_id}"'
|
||||
message = fmt.format(task_info=task_info_string, actual_id=request_task_id)
|
||||
TASK_LOG.error(message)
|
||||
raise ValueError(message)
|
||||
|
||||
# Now do the work
|
||||
with dog_stats_api.timer('instructor_tasks.time.overall', tags=[u'action:{name}'.format(name=action_name)]):
|
||||
task_progress = task_fcn(entry_id, course_id, task_input, action_name)
|
||||
|
||||
# Release any queries that the connection has been hanging onto
|
||||
reset_queries()
|
||||
|
||||
# Log and exit, returning task_progress info as task result
|
||||
TASK_LOG.info(u'%s, Task type: %s, Finishing task: %s', task_info_string, action_name, task_progress)
|
||||
return task_progress
|
||||
|
||||
|
||||
def _get_current_task():
|
||||
"""
|
||||
Stub to make it easier to test without actually running Celery.
|
||||
|
||||
This is a wrapper around celery.current_task, which provides access
|
||||
to the top of the stack of Celery's tasks. When running tests, however,
|
||||
it doesn't seem to work to mock current_task directly, so this wrapper
|
||||
is used to provide a hook to mock in tests, while providing the real
|
||||
`current_task` in production.
|
||||
"""
|
||||
return current_task
|
||||
48
lms/djangoapps/instructor_task/tasks_helper/utils.py
Normal file
48
lms/djangoapps/instructor_task/tasks_helper/utils.py
Normal file
@@ -0,0 +1,48 @@
|
||||
from eventtracking import tracker
|
||||
from lms.djangoapps.instructor_task.models import ReportStore
|
||||
from util.file import course_filename_prefix_generator
|
||||
|
||||
|
||||
REPORT_REQUESTED_EVENT_NAME = u'edx.instructor.report.requested'
|
||||
|
||||
# define value to use when no task_id is provided:
|
||||
UNKNOWN_TASK_ID = 'unknown-task_id'
|
||||
|
||||
# define values for update functions to use to return status to perform_module_state_update
|
||||
UPDATE_STATUS_SUCCEEDED = 'succeeded'
|
||||
UPDATE_STATUS_FAILED = 'failed'
|
||||
UPDATE_STATUS_SKIPPED = 'skipped'
|
||||
|
||||
|
||||
def upload_csv_to_report_store(rows, csv_name, course_id, timestamp, config_name='GRADES_DOWNLOAD'):
|
||||
"""
|
||||
Upload data as a CSV using ReportStore.
|
||||
|
||||
Arguments:
|
||||
rows: CSV data in the following format (first column may be a
|
||||
header):
|
||||
[
|
||||
[row1_colum1, row1_colum2, ...],
|
||||
...
|
||||
]
|
||||
csv_name: Name of the resulting CSV
|
||||
course_id: ID of the course
|
||||
"""
|
||||
report_store = ReportStore.from_config(config_name)
|
||||
report_store.store_rows(
|
||||
course_id,
|
||||
u"{course_prefix}_{csv_name}_{timestamp_str}.csv".format(
|
||||
course_prefix=course_filename_prefix_generator(course_id),
|
||||
csv_name=csv_name,
|
||||
timestamp_str=timestamp.strftime("%Y-%m-%d-%H%M")
|
||||
),
|
||||
rows
|
||||
)
|
||||
tracker_emit(csv_name)
|
||||
|
||||
|
||||
def tracker_emit(report_name):
|
||||
"""
|
||||
Emits a 'report.requested' event for the given report.
|
||||
"""
|
||||
tracker.emit(REPORT_REQUESTED_EVENT_NAME, {"report_type": report_name, })
|
||||
@@ -32,7 +32,7 @@ from lms.djangoapps.instructor_task.api import (
|
||||
submit_delete_problem_state_for_all_students
|
||||
)
|
||||
from lms.djangoapps.instructor_task.models import InstructorTask
|
||||
from lms.djangoapps.instructor_task.tasks_helper import upload_grades_csv
|
||||
from lms.djangoapps.instructor_task.tasks_helper.grades import generate_course_grade_report
|
||||
from lms.djangoapps.instructor_task.tests.test_base import (
|
||||
InstructorTaskModuleTestCase,
|
||||
TestReportMixin,
|
||||
@@ -572,10 +572,10 @@ class TestGradeReportConditionalContent(TestReportMixin, TestConditionalContent,
|
||||
def verify_csv_task_success(self, task_result):
|
||||
"""
|
||||
Verify that all students were successfully graded by
|
||||
`upload_grades_csv`.
|
||||
`generate_course_grade_report`.
|
||||
|
||||
Arguments:
|
||||
task_result (dict): Return value of `upload_grades_csv`.
|
||||
task_result (dict): Return value of `generate_course_grade_report`.
|
||||
"""
|
||||
self.assertDictContainsSubset({'attempted': 2, 'succeeded': 2, 'failed': 0}, task_result)
|
||||
|
||||
@@ -635,8 +635,8 @@ class TestGradeReportConditionalContent(TestReportMixin, TestConditionalContent,
|
||||
self.submit_student_answer(self.student_a.username, problem_a_url, [OPTION_1, OPTION_1])
|
||||
self.submit_student_answer(self.student_b.username, problem_b_url, [OPTION_1, OPTION_2])
|
||||
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper._get_current_task'):
|
||||
result = upload_grades_csv(None, None, self.course.id, None, 'graded')
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper.runner._get_current_task'):
|
||||
result = generate_course_grade_report(None, None, self.course.id, None, 'graded')
|
||||
self.verify_csv_task_success(result)
|
||||
self.verify_grades_in_csv(
|
||||
[
|
||||
@@ -668,8 +668,8 @@ class TestGradeReportConditionalContent(TestReportMixin, TestConditionalContent,
|
||||
|
||||
self.submit_student_answer(self.student_a.username, problem_a_url, [OPTION_1, OPTION_1])
|
||||
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper._get_current_task'):
|
||||
result = upload_grades_csv(None, None, self.course.id, None, 'graded')
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper.runner._get_current_task'):
|
||||
result = generate_course_grade_report(None, None, self.course.id, None, 'graded')
|
||||
self.verify_csv_task_success(result)
|
||||
self.verify_grades_in_csv(
|
||||
[
|
||||
|
||||
@@ -22,6 +22,7 @@ from courseware.tests.factories import StudentModuleFactory
|
||||
from student.tests.factories import UserFactory, CourseEnrollmentFactory
|
||||
from xmodule.modulestore.exceptions import ItemNotFoundError
|
||||
|
||||
from lms.djangoapps.instructor_task.exceptions import UpdateProblemModuleStateError
|
||||
from lms.djangoapps.instructor_task.models import InstructorTask
|
||||
from lms.djangoapps.instructor_task.tests.test_base import InstructorTaskModuleTestCase
|
||||
from lms.djangoapps.instructor_task.tests.factories import InstructorTaskFactory
|
||||
@@ -32,10 +33,8 @@ from lms.djangoapps.instructor_task.tasks import (
|
||||
generate_certificates,
|
||||
export_ora2_data,
|
||||
)
|
||||
from lms.djangoapps.instructor_task.tasks_helper import (
|
||||
UpdateProblemModuleStateError,
|
||||
upload_ora2_data,
|
||||
)
|
||||
from lms.djangoapps.instructor_task.tasks_helper.misc import upload_ora2_data
|
||||
|
||||
|
||||
PROBLEM_URL_NAME = "test_urlname"
|
||||
|
||||
@@ -97,7 +96,7 @@ class TestInstructorTasks(InstructorTaskModuleTestCase):
|
||||
self.current_task.update_state.side_effect = TestTaskFailure(expected_failure_message)
|
||||
task_args = [entry_id, self._get_xmodule_instance_args()]
|
||||
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper._get_current_task') as mock_get_task:
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper.runner._get_current_task') as mock_get_task:
|
||||
mock_get_task.return_value = self.current_task
|
||||
return task_class.apply(task_args, task_id=task_id).get()
|
||||
|
||||
@@ -275,7 +274,7 @@ class TestRescoreInstructorTask(TestInstructorTasks):
|
||||
mock_instance = MagicMock()
|
||||
del mock_instance.rescore_problem
|
||||
del mock_instance.rescore
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper.get_module_for_descriptor_internal') as mock_get_module:
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper.module_state.get_module_for_descriptor_internal') as mock_get_module:
|
||||
mock_get_module.return_value = mock_instance
|
||||
with self.assertRaises(UpdateProblemModuleStateError):
|
||||
self._run_task_with_mock_celery(rescore_problem, task_entry.id, task_entry.task_id)
|
||||
@@ -295,7 +294,7 @@ class TestRescoreInstructorTask(TestInstructorTasks):
|
||||
num_students = 1
|
||||
self._create_students_with_state(num_students, input_state)
|
||||
task_entry = self._create_input_entry()
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper.get_module_for_descriptor_internal', return_value=None):
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper.module_state.get_module_for_descriptor_internal', return_value=None):
|
||||
self._run_task_with_mock_celery(rescore_problem, task_entry.id, task_entry.task_id)
|
||||
|
||||
self.assert_task_output(
|
||||
@@ -331,7 +330,7 @@ class TestRescoreInstructorTask(TestInstructorTasks):
|
||||
num_students = 10
|
||||
self._create_students_with_state(num_students)
|
||||
task_entry = self._create_input_entry()
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper.get_module_for_descriptor_internal') as mock_get_module:
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper.module_state.get_module_for_descriptor_internal') as mock_get_module:
|
||||
mock_get_module.return_value = mock_instance
|
||||
self._run_task_with_mock_celery(rescore_problem, task_entry.id, task_entry.task_id)
|
||||
|
||||
@@ -356,7 +355,7 @@ class TestRescoreInstructorTask(TestInstructorTasks):
|
||||
mock_instance = Mock()
|
||||
mock_instance.rescore_problem = Mock(return_value={'success': 'bogus'})
|
||||
del mock_instance.rescore
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper.get_module_for_descriptor_internal') as mock_get_module:
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper.module_state.get_module_for_descriptor_internal') as mock_get_module:
|
||||
mock_get_module.return_value = mock_instance
|
||||
self._run_task_with_mock_celery(rescore_problem, task_entry.id, task_entry.task_id)
|
||||
|
||||
@@ -381,7 +380,7 @@ class TestRescoreInstructorTask(TestInstructorTasks):
|
||||
mock_instance = Mock()
|
||||
mock_instance.rescore_problem = Mock(return_value={'bogus': 'value'})
|
||||
del mock_instance.rescore
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper.get_module_for_descriptor_internal') as mock_get_module:
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper.module_state.get_module_for_descriptor_internal') as mock_get_module:
|
||||
mock_get_module.return_value = mock_instance
|
||||
self._run_task_with_mock_celery(rescore_problem, task_entry.id, task_entry.task_id)
|
||||
|
||||
|
||||
@@ -49,18 +49,27 @@ from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
|
||||
from xmodule.partitions.partitions import Group, UserPartition
|
||||
|
||||
from ..models import ReportStore
|
||||
from ..tasks_helper import (
|
||||
cohort_students_and_upload,
|
||||
upload_problem_responses_csv,
|
||||
upload_grades_csv,
|
||||
upload_problem_grade_report,
|
||||
upload_students_csv,
|
||||
upload_may_enroll_csv,
|
||||
upload_enrollment_report,
|
||||
upload_exec_summary_report,
|
||||
upload_course_survey_report,
|
||||
from lms.djangoapps.instructor_task.tasks_helper.certs import (
|
||||
generate_students_certificates,
|
||||
)
|
||||
from lms.djangoapps.instructor_task.tasks_helper.enrollments import (
|
||||
upload_enrollment_report,
|
||||
upload_may_enroll_csv,
|
||||
upload_exec_summary_report,
|
||||
upload_students_csv,
|
||||
)
|
||||
from lms.djangoapps.instructor_task.tasks_helper.grades import (
|
||||
generate_course_grade_report,
|
||||
generate_problem_grade_report,
|
||||
upload_problem_responses_csv,
|
||||
)
|
||||
from lms.djangoapps.instructor_task.tasks_helper.misc import (
|
||||
cohort_students_and_upload,
|
||||
upload_course_survey_report,
|
||||
upload_proctored_exam_results_report,
|
||||
upload_ora2_data,
|
||||
)
|
||||
from ..tasks_helper.utils import (
|
||||
UPDATE_STATUS_FAILED,
|
||||
UPDATE_STATUS_SUCCEEDED,
|
||||
)
|
||||
@@ -79,8 +88,8 @@ class InstructorGradeReportTestCase(TestReportMixin, InstructorTaskCourseTestCas
|
||||
"""
|
||||
Verify cell data in the grades CSV for a particular user.
|
||||
"""
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper._get_current_task'):
|
||||
result = upload_grades_csv(None, None, course_id, None, 'graded')
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper.runner._get_current_task'):
|
||||
result = generate_course_grade_report(None, None, course_id, None, 'graded')
|
||||
self.assertDictContainsSubset({'attempted': 2, 'succeeded': 2, 'failed': 0}, result)
|
||||
report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
|
||||
report_csv_filename = report_store.links_for(course_id)[0][0]
|
||||
@@ -110,13 +119,13 @@ class TestInstructorGradeReport(InstructorGradeReportTestCase):
|
||||
|
||||
self.current_task = Mock()
|
||||
self.current_task.update_state = Mock()
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper._get_current_task') as mock_current_task:
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper.runner._get_current_task') as mock_current_task:
|
||||
mock_current_task.return_value = self.current_task
|
||||
result = upload_grades_csv(None, None, self.course.id, None, 'graded')
|
||||
result = generate_course_grade_report(None, None, self.course.id, None, 'graded')
|
||||
num_students = len(emails)
|
||||
self.assertDictContainsSubset({'attempted': num_students, 'succeeded': num_students, 'failed': 0}, result)
|
||||
|
||||
@patch('lms.djangoapps.instructor_task.tasks_helper._get_current_task')
|
||||
@patch('lms.djangoapps.instructor_task.tasks_helper.runner._get_current_task')
|
||||
@patch('lms.djangoapps.grades.new.course_grade_factory.CourseGradeFactory.iter')
|
||||
def test_grading_failure(self, mock_grades_iter, _mock_current_task):
|
||||
"""
|
||||
@@ -126,7 +135,7 @@ class TestInstructorGradeReport(InstructorGradeReportTestCase):
|
||||
mock_grades_iter.return_value = [
|
||||
(self.create_student('username', 'student@example.com'), None, 'Cannot grade student')
|
||||
]
|
||||
result = upload_grades_csv(None, None, self.course.id, None, 'graded')
|
||||
result = generate_course_grade_report(None, None, self.course.id, None, 'graded')
|
||||
self.assertDictContainsSubset({'attempted': 1, 'succeeded': 0, 'failed': 1}, result)
|
||||
|
||||
report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
|
||||
@@ -293,7 +302,7 @@ class TestInstructorGradeReport(InstructorGradeReportTestCase):
|
||||
u'Default Group',
|
||||
)
|
||||
|
||||
@patch('lms.djangoapps.instructor_task.tasks_helper._get_current_task')
|
||||
@patch('lms.djangoapps.instructor_task.tasks_helper.runner._get_current_task')
|
||||
@patch('lms.djangoapps.grades.new.course_grade_factory.CourseGradeFactory.iter')
|
||||
def test_unicode_in_csv_header(self, mock_grades_iter, _mock_current_task):
|
||||
"""
|
||||
@@ -310,7 +319,7 @@ class TestInstructorGradeReport(InstructorGradeReportTestCase):
|
||||
'',
|
||||
)
|
||||
]
|
||||
result = upload_grades_csv(None, None, self.course.id, None, 'graded')
|
||||
result = generate_course_grade_report(None, None, self.course.id, None, 'graded')
|
||||
self.assertDictContainsSubset({'attempted': 1, 'succeeded': 1, 'failed': 0}, result)
|
||||
|
||||
|
||||
@@ -362,8 +371,8 @@ class TestProblemResponsesReport(TestReportMixin, InstructorTaskCourseTestCase):
|
||||
|
||||
def test_success(self):
|
||||
task_input = {'problem_location': ''}
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper._get_current_task'):
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper.list_problem_responses') as patched_data_source:
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper.runner._get_current_task'):
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper.grades.list_problem_responses') as patched_data_source:
|
||||
patched_data_source.return_value = [
|
||||
{'username': 'user0', 'state': u'state0'},
|
||||
{'username': 'user1', 'state': u'state1'},
|
||||
@@ -410,7 +419,7 @@ class TestInstructorDetailedEnrollmentReport(TestReportMixin, InstructorTaskCour
|
||||
def test_success(self):
|
||||
self.create_student('student', 'student@example.com')
|
||||
task_input = {'features': []}
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper._get_current_task'):
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper.runner._get_current_task'):
|
||||
result = upload_enrollment_report(None, None, self.course.id, task_input, 'generating_enrollment_report')
|
||||
|
||||
self.assertDictContainsSubset({'attempted': 1, 'succeeded': 1, 'failed': 0}, result)
|
||||
@@ -426,7 +435,7 @@ class TestInstructorDetailedEnrollmentReport(TestReportMixin, InstructorTaskCour
|
||||
student_cart.purchase()
|
||||
|
||||
task_input = {'features': []}
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper._get_current_task'):
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper.runner._get_current_task'):
|
||||
result = upload_enrollment_report(None, None, self.course.id, task_input, 'generating_enrollment_report')
|
||||
self.assertDictContainsSubset({'attempted': 1, 'succeeded': 1, 'failed': 0}, result)
|
||||
self._verify_cell_data_in_csv(student.username, 'Enrollment Source', 'Credit Card - Individual')
|
||||
@@ -445,7 +454,7 @@ class TestInstructorDetailedEnrollmentReport(TestReportMixin, InstructorTaskCour
|
||||
)
|
||||
|
||||
task_input = {'features': []}
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper._get_current_task'):
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper.runner._get_current_task'):
|
||||
result = upload_enrollment_report(None, None, self.course.id, task_input, 'generating_enrollment_report')
|
||||
|
||||
enrollment_source = u'manually enrolled by username: {username}'.format(
|
||||
@@ -485,7 +494,7 @@ class TestInstructorDetailedEnrollmentReport(TestReportMixin, InstructorTaskCour
|
||||
self.assertEquals(response.status_code, 200)
|
||||
|
||||
task_input = {'features': []}
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper._get_current_task'):
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper.runner._get_current_task'):
|
||||
result = upload_enrollment_report(None, None, self.course.id, task_input, 'generating_enrollment_report')
|
||||
self.assertDictContainsSubset({'attempted': 1, 'succeeded': 1, 'failed': 0}, result)
|
||||
self._verify_cell_data_in_csv(student.username, 'Enrollment Source', 'Used Registration Code')
|
||||
@@ -519,7 +528,7 @@ class TestInstructorDetailedEnrollmentReport(TestReportMixin, InstructorTaskCour
|
||||
self.assertEquals(response.status_code, 200)
|
||||
|
||||
task_input = {'features': []}
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper._get_current_task'):
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper.runner._get_current_task'):
|
||||
result = upload_enrollment_report(None, None, self.course.id, task_input, 'generating_enrollment_report')
|
||||
self.assertDictContainsSubset({'attempted': 1, 'succeeded': 1, 'failed': 0}, result)
|
||||
self._verify_cell_data_in_csv(student.username, 'Enrollment Source', 'Used Registration Code')
|
||||
@@ -560,7 +569,7 @@ class TestInstructorDetailedEnrollmentReport(TestReportMixin, InstructorTaskCour
|
||||
self.assertEquals(response.status_code, 200)
|
||||
|
||||
task_input = {'features': []}
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper._get_current_task'):
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper.runner._get_current_task'):
|
||||
result = upload_enrollment_report(None, None, self.course.id, task_input, 'generating_enrollment_report')
|
||||
self.assertDictContainsSubset({'attempted': 1, 'succeeded': 1, 'failed': 0}, result)
|
||||
self._verify_cell_data_in_csv(student.username, 'Enrollment Source', 'Used Registration Code')
|
||||
@@ -594,13 +603,13 @@ class TestProblemGradeReport(TestReportMixin, InstructorTaskModuleTestCase):
|
||||
self.student_2 = self.create_student(u'üser_2')
|
||||
self.csv_header_row = [u'Student ID', u'Email', u'Username', u'Grade']
|
||||
|
||||
@patch('lms.djangoapps.instructor_task.tasks_helper._get_current_task')
|
||||
@patch('lms.djangoapps.instructor_task.tasks_helper.runner._get_current_task')
|
||||
def test_no_problems(self, _get_current_task):
|
||||
"""
|
||||
Verify that we see no grade information for a course with no graded
|
||||
problems.
|
||||
"""
|
||||
result = upload_problem_grade_report(None, None, self.course.id, None, 'graded')
|
||||
result = generate_problem_grade_report(None, None, self.course.id, None, 'graded')
|
||||
self.assertDictContainsSubset({'action_name': 'graded', 'attempted': 2, 'succeeded': 2, 'failed': 0}, result)
|
||||
self.verify_rows_in_csv([
|
||||
dict(zip(
|
||||
@@ -613,7 +622,7 @@ class TestProblemGradeReport(TestReportMixin, InstructorTaskModuleTestCase):
|
||||
))
|
||||
])
|
||||
|
||||
@patch('lms.djangoapps.instructor_task.tasks_helper._get_current_task')
|
||||
@patch('lms.djangoapps.instructor_task.tasks_helper.runner._get_current_task')
|
||||
def test_single_problem(self, _get_current_task):
|
||||
vertical = ItemFactory.create(
|
||||
parent_location=self.problem_section.location,
|
||||
@@ -624,7 +633,7 @@ class TestProblemGradeReport(TestReportMixin, InstructorTaskModuleTestCase):
|
||||
self.define_option_problem(u'Problem1', parent=vertical)
|
||||
|
||||
self.submit_student_answer(self.student_1.username, u'Problem1', ['Option 1'])
|
||||
result = upload_problem_grade_report(None, None, self.course.id, None, 'graded')
|
||||
result = generate_problem_grade_report(None, None, self.course.id, None, 'graded')
|
||||
self.assertDictContainsSubset({'action_name': 'graded', 'attempted': 2, 'succeeded': 2, 'failed': 0}, result)
|
||||
problem_name = u'Homework 1: Subsection - Problem1'
|
||||
header_row = self.csv_header_row + [problem_name + ' (Earned)', problem_name + ' (Possible)']
|
||||
@@ -649,7 +658,7 @@ class TestProblemGradeReport(TestReportMixin, InstructorTaskModuleTestCase):
|
||||
))
|
||||
])
|
||||
|
||||
@patch('lms.djangoapps.instructor_task.tasks_helper._get_current_task')
|
||||
@patch('lms.djangoapps.instructor_task.tasks_helper.runner._get_current_task')
|
||||
@patch('lms.djangoapps.grades.new.course_grade_factory.CourseGradeFactory.iter')
|
||||
@ddt.data(u'Cannot grade student', '')
|
||||
def test_grading_failure(self, error_message, mock_grades_iter, _mock_current_task):
|
||||
@@ -661,7 +670,7 @@ class TestProblemGradeReport(TestReportMixin, InstructorTaskModuleTestCase):
|
||||
mock_grades_iter.return_value = [
|
||||
(student, None, error_message)
|
||||
]
|
||||
result = upload_problem_grade_report(None, None, self.course.id, None, 'graded')
|
||||
result = generate_problem_grade_report(None, None, self.course.id, None, 'graded')
|
||||
self.assertDictContainsSubset({'attempted': 1, 'succeeded': 0, 'failed': 1}, result)
|
||||
|
||||
report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
|
||||
@@ -710,8 +719,8 @@ class TestProblemReportSplitTestContent(TestReportMixin, TestConditionalContent,
|
||||
self.submit_student_answer(self.student_b.username, self.problem_a_url, [self.OPTION_1, self.OPTION_2])
|
||||
self.submit_student_answer(self.student_b.username, self.problem_b_url, [self.OPTION_1, self.OPTION_2])
|
||||
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper._get_current_task'):
|
||||
result = upload_problem_grade_report(None, None, self.course.id, None, 'graded')
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper.runner._get_current_task'):
|
||||
result = generate_problem_grade_report(None, None, self.course.id, None, 'graded')
|
||||
self.assertDictContainsSubset(
|
||||
{'action_name': 'graded', 'attempted': 2, 'succeeded': 2, 'failed': 0}, result
|
||||
)
|
||||
@@ -802,8 +811,8 @@ class TestProblemReportSplitTestContent(TestReportMixin, TestConditionalContent,
|
||||
for problem in problem_names:
|
||||
header_row += [problem + ' (Earned)', problem + ' (Possible)']
|
||||
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper._get_current_task'):
|
||||
upload_problem_grade_report(None, None, self.course.id, None, 'graded')
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper.runner._get_current_task'):
|
||||
generate_problem_grade_report(None, None, self.course.id, None, 'graded')
|
||||
self.assertEquals(self.get_csv_row_with_headers(), header_row)
|
||||
|
||||
|
||||
@@ -858,8 +867,8 @@ class TestProblemReportCohortedContent(TestReportMixin, ContentGroupTestCase, In
|
||||
self.assertEqual(resp.status_code, 404)
|
||||
self.submit_student_answer(self.beta_user.username, u'Problem1', ['Option 1', 'Option 2'])
|
||||
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper._get_current_task'):
|
||||
result = upload_problem_grade_report(None, None, self.course.id, None, 'graded')
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper.runner._get_current_task'):
|
||||
result = generate_problem_grade_report(None, None, self.course.id, None, 'graded')
|
||||
self.assertDictContainsSubset(
|
||||
{'action_name': 'graded', 'attempted': 4, 'succeeded': 4, 'failed': 0}, result
|
||||
)
|
||||
@@ -943,7 +952,7 @@ class TestExecutiveSummaryReport(TestReportMixin, InstructorTaskCourseTestCase):
|
||||
Test that successfully generates the executive summary report.
|
||||
"""
|
||||
task_input = {'features': []}
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper._get_current_task'):
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper.runner._get_current_task'):
|
||||
result = upload_exec_summary_report(
|
||||
None, None, self.course.id,
|
||||
task_input, 'generating executive summary report'
|
||||
@@ -996,7 +1005,7 @@ class TestExecutiveSummaryReport(TestReportMixin, InstructorTaskCourseTestCase):
|
||||
"""
|
||||
self.students_purchases()
|
||||
task_input = {'features': []}
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper._get_current_task'):
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper.runner._get_current_task'):
|
||||
result = upload_exec_summary_report(
|
||||
None, None, self.course.id,
|
||||
task_input, 'generating executive summary report'
|
||||
@@ -1062,7 +1071,7 @@ class TestCourseSurveyReport(TestReportMixin, InstructorTaskCourseTestCase):
|
||||
Test that successfully generates the course survey report.
|
||||
"""
|
||||
task_input = {'features': []}
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper._get_current_task'):
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper.runner._get_current_task'):
|
||||
result = upload_course_survey_report(
|
||||
None, None, self.course.id,
|
||||
task_input, 'generating course survey report'
|
||||
@@ -1077,7 +1086,7 @@ class TestCourseSurveyReport(TestReportMixin, InstructorTaskCourseTestCase):
|
||||
"""
|
||||
|
||||
task_input = {'features': []}
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper._get_current_task'):
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper.runner._get_current_task'):
|
||||
result = upload_course_survey_report(
|
||||
None, None, self.course.id,
|
||||
task_input, 'generating course survey report'
|
||||
@@ -1129,7 +1138,7 @@ class TestStudentReport(TestReportMixin, InstructorTaskCourseTestCase):
|
||||
def test_success(self):
|
||||
self.create_student('student', 'student@example.com')
|
||||
task_input = {'features': []}
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper._get_current_task'):
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper.runner._get_current_task'):
|
||||
result = upload_students_csv(None, None, self.course.id, task_input, 'calculated')
|
||||
report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
|
||||
links = report_store.links_for(self.course.id)
|
||||
@@ -1155,7 +1164,7 @@ class TestStudentReport(TestReportMixin, InstructorTaskCourseTestCase):
|
||||
'goals'
|
||||
]
|
||||
}
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper._get_current_task') as mock_current_task:
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper.runner._get_current_task') as mock_current_task:
|
||||
mock_current_task.return_value = self.current_task
|
||||
result = upload_students_csv(None, None, self.course.id, task_input, 'calculated')
|
||||
# This assertion simply confirms that the generation completed with no errors
|
||||
@@ -1187,7 +1196,7 @@ class TestTeamStudentReport(TestReportMixin, InstructorTaskCourseTestCase):
|
||||
'goals', 'team'
|
||||
]
|
||||
}
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper._get_current_task') as mock_current_task:
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper.runner._get_current_task') as mock_current_task:
|
||||
mock_current_task.return_value = current_task
|
||||
result = upload_students_csv(None, None, self.course.id, task_input, 'calculated')
|
||||
self.assertDictContainsSubset({'attempted': 2, 'succeeded': 2, 'failed': 0}, result)
|
||||
@@ -1244,7 +1253,7 @@ class TestListMayEnroll(TestReportMixin, InstructorTaskCourseTestCase):
|
||||
def test_success(self):
|
||||
self._create_enrollment('user@example.com')
|
||||
task_input = {'features': []}
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper._get_current_task'):
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper.runner._get_current_task'):
|
||||
result = upload_may_enroll_csv(None, None, self.course.id, task_input, 'calculated')
|
||||
report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
|
||||
links = report_store.links_for(self.course.id)
|
||||
@@ -1262,7 +1271,7 @@ class TestListMayEnroll(TestReportMixin, InstructorTaskCourseTestCase):
|
||||
self._create_enrollment(email)
|
||||
|
||||
task_input = {'features': ['email']}
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper._get_current_task'):
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper.runner._get_current_task'):
|
||||
result = upload_may_enroll_csv(None, None, self.course.id, task_input, 'calculated')
|
||||
# This assertion simply confirms that the generation completed with no errors
|
||||
num_enrollments = len(enrollments)
|
||||
@@ -1279,7 +1288,7 @@ class MockDefaultStorage(object):
|
||||
return open(file_name)
|
||||
|
||||
|
||||
@patch('lms.djangoapps.instructor_task.tasks_helper.DefaultStorage', new=MockDefaultStorage)
|
||||
@patch('lms.djangoapps.instructor_task.tasks_helper.misc.DefaultStorage', new=MockDefaultStorage)
|
||||
class TestCohortStudents(TestReportMixin, InstructorTaskCourseTestCase):
|
||||
"""
|
||||
Tests that bulk student cohorting works.
|
||||
@@ -1301,7 +1310,7 @@ class TestCohortStudents(TestReportMixin, InstructorTaskCourseTestCase):
|
||||
with tempfile.NamedTemporaryFile() as temp_file:
|
||||
temp_file.write(csv_data.encode('utf-8'))
|
||||
temp_file.flush()
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper._get_current_task'):
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper.runner._get_current_task'):
|
||||
return cohort_students_and_upload(None, None, self.course.id, {'file_name': temp_file.name}, 'cohorted')
|
||||
|
||||
def test_username(self):
|
||||
@@ -1509,7 +1518,7 @@ class TestCohortStudents(TestReportMixin, InstructorTaskCourseTestCase):
|
||||
)
|
||||
|
||||
|
||||
@patch('lms.djangoapps.instructor_task.tasks_helper.DefaultStorage', new=MockDefaultStorage)
|
||||
@patch('lms.djangoapps.instructor_task.tasks_helper.misc.DefaultStorage', new=MockDefaultStorage)
|
||||
class TestGradeReport(TestReportMixin, InstructorTaskModuleTestCase):
|
||||
"""
|
||||
Test that grade report has correct grade values.
|
||||
@@ -1569,8 +1578,8 @@ class TestGradeReport(TestReportMixin, InstructorTaskModuleTestCase):
|
||||
def test_grade_report(self):
|
||||
self.submit_student_answer(self.student.username, u'Problem1', ['Option 1'])
|
||||
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper._get_current_task'):
|
||||
result = upload_grades_csv(None, None, self.course.id, None, 'graded')
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper.runner._get_current_task'):
|
||||
result = generate_course_grade_report(None, None, self.course.id, None, 'graded')
|
||||
self.assertDictContainsSubset(
|
||||
{'action_name': 'graded', 'attempted': 1, 'succeeded': 1, 'failed': 0},
|
||||
result,
|
||||
@@ -1594,7 +1603,7 @@ class TestGradeReport(TestReportMixin, InstructorTaskModuleTestCase):
|
||||
|
||||
|
||||
@ddt.ddt
|
||||
@patch('lms.djangoapps.instructor_task.tasks_helper.DefaultStorage', new=MockDefaultStorage)
|
||||
@patch('lms.djangoapps.instructor_task.tasks_helper.misc.DefaultStorage', new=MockDefaultStorage)
|
||||
class TestGradeReportEnrollmentAndCertificateInfo(TestReportMixin, InstructorTaskModuleTestCase):
|
||||
"""
|
||||
Test that grade report has correct user enrolment, verification, and certificate information.
|
||||
@@ -1644,8 +1653,8 @@ class TestGradeReportEnrollmentAndCertificateInfo(TestReportMixin, InstructorTas
|
||||
"""
|
||||
Verify grade report data.
|
||||
"""
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper._get_current_task'):
|
||||
upload_grades_csv(None, None, self.course.id, None, 'graded')
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper.runner._get_current_task'):
|
||||
generate_course_grade_report(None, None, self.course.id, None, 'graded')
|
||||
report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
|
||||
report_csv_filename = report_store.links_for(self.course.id)[0][0]
|
||||
report_path = report_store.path_to(self.course.id, report_csv_filename)
|
||||
@@ -2266,7 +2275,7 @@ class TestCertificateGeneration(InstructorTaskModuleTestCase):
|
||||
current_task = Mock()
|
||||
current_task.update_state = Mock()
|
||||
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper._get_current_task') as mock_current_task:
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper.runner._get_current_task') as mock_current_task:
|
||||
mock_current_task.return_value = current_task
|
||||
with patch('capa.xqueue_interface.XQueueInterface.send_to_queue') as mock_queue:
|
||||
mock_queue.return_value = (0, "Successfully queued")
|
||||
@@ -2314,11 +2323,11 @@ class TestInstructorOra2Report(SharedModuleStoreTestCase):
|
||||
|
||||
def test_report_fails_if_error(self):
|
||||
with patch(
|
||||
'lms.djangoapps.instructor_task.tasks_helper.OraAggregateData.collect_ora2_data'
|
||||
'lms.djangoapps.instructor_task.tasks_helper.misc.OraAggregateData.collect_ora2_data'
|
||||
) as mock_collect_data:
|
||||
mock_collect_data.side_effect = KeyError
|
||||
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper._get_current_task') as mock_current_task:
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper.runner._get_current_task') as mock_current_task:
|
||||
mock_current_task.return_value = self.current_task
|
||||
|
||||
response = upload_ora2_data(None, None, self.course.id, None, 'generated')
|
||||
@@ -2329,11 +2338,11 @@ class TestInstructorOra2Report(SharedModuleStoreTestCase):
|
||||
test_header = ['field1', 'field2']
|
||||
test_rows = [['row1_field1', 'row1_field2'], ['row2_field1', 'row2_field2']]
|
||||
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper._get_current_task') as mock_current_task:
|
||||
with patch('lms.djangoapps.instructor_task.tasks_helper.runner._get_current_task') as mock_current_task:
|
||||
mock_current_task.return_value = self.current_task
|
||||
|
||||
with patch(
|
||||
'lms.djangoapps.instructor_task.tasks_helper.OraAggregateData.collect_ora2_data'
|
||||
'lms.djangoapps.instructor_task.tasks_helper.misc.OraAggregateData.collect_ora2_data'
|
||||
) as mock_collect_data:
|
||||
mock_collect_data.return_value = (test_header, test_rows)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user