@@ -2,11 +2,12 @@
|
||||
Management command to generate a list of grades for
|
||||
all students that are enrolled in a course.
|
||||
"""
|
||||
from courseware import grades, courses
|
||||
from certificates.models import GeneratedCertificate
|
||||
from django.test.client import RequestFactory
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
import os
|
||||
from lms.djangoapps.courseware import courses
|
||||
from lms.djangoapps.certificates.models import GeneratedCertificate
|
||||
from lms.djangoapps.grades import course_grades
|
||||
from opaque_keys import InvalidKeyError
|
||||
from opaque_keys.edx.keys import CourseKey
|
||||
from opaque_keys.edx.locations import SlashSeparatedCourseKey
|
||||
@@ -117,7 +118,7 @@ class Command(BaseCommand):
|
||||
count, total, hours, minutes)
|
||||
start = datetime.datetime.now()
|
||||
request.user = student
|
||||
grade = grades.grade(student, request, course)
|
||||
grade = course_grades.summary(student, request, course)
|
||||
if not header:
|
||||
header = [section['label'] for section in grade[u'section_breakdown']]
|
||||
rows.append(["email", "username", "certificate-grade", "grade"] + header)
|
||||
|
||||
@@ -14,7 +14,6 @@ from cStringIO import StringIO
|
||||
from django.conf import settings
|
||||
from django.core.urlresolvers import reverse
|
||||
from django.http import (
|
||||
Http404,
|
||||
HttpResponse,
|
||||
HttpResponseForbidden,
|
||||
)
|
||||
@@ -31,8 +30,8 @@ from courseware.access import has_access
|
||||
from courseware.courses import get_course_by_id
|
||||
|
||||
from courseware.field_overrides import disable_overrides
|
||||
from courseware.grades import iterate_grades_for
|
||||
from edxmako.shortcuts import render_to_response
|
||||
from grades.course_grades import iterate_grades_for
|
||||
from opaque_keys.edx.keys import CourseKey
|
||||
from ccx_keys.locator import CCXLocator
|
||||
from student.roles import CourseCcxCoachRole
|
||||
|
||||
@@ -4,7 +4,8 @@ Management command which fixes ungraded certificates for students
|
||||
|
||||
|
||||
from certificates.models import GeneratedCertificate
|
||||
from courseware import grades, courses
|
||||
from courseware import courses
|
||||
from lms.djangoapps.grades import course_grades
|
||||
from django.test.client import RequestFactory
|
||||
from django.core.management.base import BaseCommand
|
||||
from optparse import make_option
|
||||
@@ -51,7 +52,7 @@ class Command(BaseCommand):
|
||||
|
||||
for cert in ungraded:
|
||||
# grade the student
|
||||
grade = grades.grade(cert.user, course)
|
||||
grade = course_grades.summary(cert.user, course)
|
||||
print "grading {0} - {1}".format(cert.user, grade['percent'])
|
||||
cert.grade = grade['percent']
|
||||
if not options['noop']:
|
||||
|
||||
@@ -11,7 +11,7 @@ from django.conf import settings
|
||||
from django.core.urlresolvers import reverse
|
||||
from requests.auth import HTTPBasicAuth
|
||||
|
||||
from courseware import grades
|
||||
from lms.djangoapps.grades import course_grades
|
||||
from xmodule.modulestore.django import modulestore
|
||||
from capa.xqueue_interface import XQueueInterface
|
||||
from capa.xqueue_interface import make_xheader, make_hashkey
|
||||
@@ -257,7 +257,7 @@ class XQueueCertInterface(object):
|
||||
self.request.session = {}
|
||||
|
||||
is_whitelisted = self.whitelist.filter(user=student, course_id=course_id, whitelist=True).exists()
|
||||
grade = grades.grade(student, course)
|
||||
grade = course_grades.summary(student, course)
|
||||
enrollment_mode, __ = CourseEnrollment.enrollment_mode_for_user(student, course_id)
|
||||
mode_is_verified = enrollment_mode in GeneratedCertificate.VERIFIED_CERTS_MODES
|
||||
user_is_verified = SoftwareSecurePhotoVerification.user_is_verified(student)
|
||||
|
||||
@@ -14,6 +14,7 @@ from config_models.models import cache
|
||||
from course_modes.models import CourseMode
|
||||
from course_modes.tests.factories import CourseModeFactory
|
||||
from courseware.tests.factories import GlobalStaffFactory
|
||||
from lms.djangoapps.grades.tests.utils import mock_passing_grade
|
||||
from microsite_configuration import microsite
|
||||
from student.models import CourseEnrollment
|
||||
from student.tests.factories import UserFactory
|
||||
@@ -47,16 +48,6 @@ class WebCertificateTestMixin(object):
|
||||
"""
|
||||
Mixin with helpers for testing Web Certificates.
|
||||
"""
|
||||
@contextmanager
|
||||
def _mock_passing_grade(self):
|
||||
"""
|
||||
Mock the grading function to always return a passing grade.
|
||||
"""
|
||||
symbol = 'courseware.grades.grade'
|
||||
with patch(symbol) as mock_grade:
|
||||
mock_grade.return_value = {'grade': 'Pass', 'percent': 0.75}
|
||||
yield
|
||||
|
||||
@contextmanager
|
||||
def _mock_queue(self, is_successful=True):
|
||||
"""
|
||||
@@ -193,7 +184,7 @@ class CertificateDownloadableStatusTests(WebCertificateTestMixin, ModuleStoreTes
|
||||
def test_with_downloadable_web_cert(self):
|
||||
CourseEnrollment.enroll(self.student, self.course.id, mode='honor')
|
||||
self._setup_course_certificate()
|
||||
with self._mock_passing_grade():
|
||||
with mock_passing_grade():
|
||||
certs_api.generate_user_certificates(self.student, self.course.id)
|
||||
|
||||
cert_status = certificate_status_for_student(self.student, self.course.id)
|
||||
@@ -437,7 +428,7 @@ class GenerateUserCertificatesTest(EventTestMixin, WebCertificateTestMixin, Modu
|
||||
self.request_factory = RequestFactory()
|
||||
|
||||
def test_new_cert_requests_into_xqueue_returns_generating(self):
|
||||
with self._mock_passing_grade():
|
||||
with mock_passing_grade():
|
||||
with self._mock_queue():
|
||||
certs_api.generate_user_certificates(self.student, self.course.id)
|
||||
|
||||
@@ -455,7 +446,7 @@ class GenerateUserCertificatesTest(EventTestMixin, WebCertificateTestMixin, Modu
|
||||
)
|
||||
|
||||
def test_xqueue_submit_task_error(self):
|
||||
with self._mock_passing_grade():
|
||||
with mock_passing_grade():
|
||||
with self._mock_queue(is_successful=False):
|
||||
certs_api.generate_user_certificates(self.student, self.course.id)
|
||||
|
||||
@@ -476,7 +467,7 @@ class GenerateUserCertificatesTest(EventTestMixin, WebCertificateTestMixin, Modu
|
||||
mode='verified'
|
||||
)
|
||||
|
||||
with self._mock_passing_grade():
|
||||
with mock_passing_grade():
|
||||
with self._mock_queue(is_successful=False):
|
||||
status = certs_api.generate_user_certificates(self.student, self.course.id)
|
||||
self.assertEqual(status, None)
|
||||
@@ -487,7 +478,7 @@ class GenerateUserCertificatesTest(EventTestMixin, WebCertificateTestMixin, Modu
|
||||
Test no message sent to Xqueue if HTML certificate view is enabled
|
||||
"""
|
||||
self._setup_course_certificate()
|
||||
with self._mock_passing_grade():
|
||||
with mock_passing_grade():
|
||||
certs_api.generate_user_certificates(self.student, self.course.id)
|
||||
|
||||
# Verify that the certificate has status 'downloadable'
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
"""Tests for the resubmit_error_certificates management command. """
|
||||
import ddt
|
||||
from contextlib import contextmanager
|
||||
from django.core.management.base import CommandError
|
||||
from nose.plugins.attrib import attr
|
||||
from django.test.utils import override_settings
|
||||
@@ -12,6 +11,7 @@ from opaque_keys.edx.locator import CourseLocator
|
||||
from badges.events.course_complete import get_completion_badge
|
||||
from badges.models import BadgeAssertion
|
||||
from badges.tests.factories import BadgeAssertionFactory, CourseCompleteImageConfigurationFactory
|
||||
from lms.djangoapps.grades.tests.utils import mock_passing_grade
|
||||
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
|
||||
from xmodule.modulestore.tests.factories import CourseFactory, check_mongo_calls, ItemFactory
|
||||
from student.tests.factories import UserFactory, CourseEnrollmentFactory
|
||||
@@ -247,7 +247,7 @@ class UngenerateCertificatesTest(CertificateManagementTest):
|
||||
mock_send_to_queue.return_value = (0, "Successfully queued")
|
||||
key = self.course.location.course_key
|
||||
self._create_cert(key, self.user, CertificateStatuses.unavailable)
|
||||
with self._mock_passing_grade():
|
||||
with mock_passing_grade():
|
||||
self._run_command(
|
||||
course=unicode(key), noop=False, insecure=True, force=False
|
||||
)
|
||||
@@ -257,11 +257,3 @@ class UngenerateCertificatesTest(CertificateManagementTest):
|
||||
course_id=key
|
||||
)
|
||||
self.assertEqual(certificate.status, CertificateStatuses.generating)
|
||||
|
||||
@contextmanager
|
||||
def _mock_passing_grade(self):
|
||||
"""Mock the grading function to always return a passing grade. """
|
||||
symbol = 'courseware.grades.grade'
|
||||
with patch(symbol) as mock_grade:
|
||||
mock_grade.return_value = {'grade': 'Pass', 'percent': 0.75}
|
||||
yield
|
||||
|
||||
@@ -13,6 +13,7 @@ import freezegun
|
||||
import pytz
|
||||
|
||||
from course_modes.models import CourseMode
|
||||
from lms.djangoapps.grades.tests.utils import mock_passing_grade
|
||||
from opaque_keys.edx.locator import CourseLocator
|
||||
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
|
||||
from student.tests.factories import UserFactory, CourseEnrollmentFactory
|
||||
@@ -58,7 +59,8 @@ class XQueueCertInterfaceAddCertificateTest(ModuleStoreTestCase):
|
||||
SoftwareSecurePhotoVerificationFactory.create(user=self.user_2, status='approved')
|
||||
|
||||
def test_add_cert_callback_url(self):
|
||||
with patch('courseware.grades.grade', Mock(return_value={'grade': 'Pass', 'percent': 0.75})):
|
||||
|
||||
with mock_passing_grade():
|
||||
with patch.object(XQueueInterface, 'send_to_queue') as mock_send:
|
||||
mock_send.return_value = (0, None)
|
||||
self.xqueue.add_cert(self.user, self.course.id)
|
||||
@@ -73,7 +75,7 @@ class XQueueCertInterfaceAddCertificateTest(ModuleStoreTestCase):
|
||||
"""
|
||||
Tests there is no certificate create message in the queue if generate_pdf is False
|
||||
"""
|
||||
with patch('courseware.grades.grade', Mock(return_value={'grade': 'Pass', 'percent': 0.75})):
|
||||
with mock_passing_grade():
|
||||
with patch.object(XQueueInterface, 'send_to_queue') as mock_send:
|
||||
self.xqueue.add_cert(self.user, self.course.id, generate_pdf=False)
|
||||
|
||||
@@ -121,7 +123,7 @@ class XQueueCertInterfaceAddCertificateTest(ModuleStoreTestCase):
|
||||
CertificateWhitelistFactory(course_id=self.course.id, user=self.user_2)
|
||||
|
||||
# Generate certs
|
||||
with patch('courseware.grades.grade', Mock(return_value={'grade': 'Pass', 'percent': 0.75})):
|
||||
with mock_passing_grade():
|
||||
with patch.object(XQueueInterface, 'send_to_queue') as mock_send:
|
||||
mock_send.return_value = (0, None)
|
||||
self.xqueue.add_cert(self.user_2, self.course.id)
|
||||
@@ -145,7 +147,7 @@ class XQueueCertInterfaceAddCertificateTest(ModuleStoreTestCase):
|
||||
is_active=True,
|
||||
mode=mode,
|
||||
)
|
||||
with patch('courseware.grades.grade', Mock(return_value={'grade': 'Pass', 'percent': 0.75})):
|
||||
with mock_passing_grade():
|
||||
with patch.object(XQueueInterface, 'send_to_queue') as mock_send:
|
||||
mock_send.return_value = (0, None)
|
||||
self.xqueue.add_cert(self.user_2, self.course.id)
|
||||
@@ -270,7 +272,7 @@ class XQueueCertInterfaceAddCertificateTest(ModuleStoreTestCase):
|
||||
)
|
||||
|
||||
# Run grading/cert generation again
|
||||
with patch('courseware.grades.grade', Mock(return_value={'grade': grade, 'percent': 0.75})):
|
||||
with mock_passing_grade(grade_pass=grade):
|
||||
with patch.object(XQueueInterface, 'send_to_queue') as mock_send:
|
||||
mock_send.return_value = (0, None)
|
||||
self.xqueue.add_cert(self.user_2, self.course.id)
|
||||
|
||||
@@ -21,6 +21,7 @@ from lms.djangoapps.badges.tests.factories import (
|
||||
CourseCompleteImageConfigurationFactory,
|
||||
BadgeClassFactory,
|
||||
)
|
||||
from lms.djangoapps.grades.tests.utils import mock_passing_grade
|
||||
from openedx.core.lib.tests.assertions.events import assert_event_matches
|
||||
from student.tests.factories import UserFactory, CourseEnrollmentFactory
|
||||
from student.roles import CourseStaffRole
|
||||
@@ -836,8 +837,7 @@ class CertificatesViewsTests(CommonCertificatesTestCase):
|
||||
request_certificate_url = reverse('certificates.views.request_certificate')
|
||||
with patch('capa.xqueue_interface.XQueueInterface.send_to_queue') as mock_queue:
|
||||
mock_queue.return_value = (0, "Successfully queued")
|
||||
with patch('courseware.grades.grade') as mock_grade:
|
||||
mock_grade.return_value = {'grade': 'Pass', 'percent': 0.75}
|
||||
with mock_passing_grade():
|
||||
response = self.client.post(request_certificate_url, {'course_id': unicode(self.course.id)})
|
||||
self.assertEqual(response.status_code, 200)
|
||||
response_json = json.loads(response.content)
|
||||
|
||||
@@ -1,762 +0,0 @@
|
||||
# Compute grades using real division, with no integer truncation
|
||||
from __future__ import division
|
||||
|
||||
import json
|
||||
import logging
|
||||
import random
|
||||
from collections import defaultdict
|
||||
|
||||
import dogstats_wrapper as dog_stats_api
|
||||
from course_blocks.api import get_course_blocks
|
||||
from courseware import courses
|
||||
from django.conf import settings
|
||||
from django.core.cache import cache
|
||||
from django.test.client import RequestFactory
|
||||
from opaque_keys import InvalidKeyError
|
||||
from opaque_keys.edx.keys import CourseKey
|
||||
from opaque_keys.edx.locator import BlockUsageLocator
|
||||
from openedx.core.djangoapps.content.block_structure.api import get_course_in_cache
|
||||
from openedx.core.lib.cache_utils import memoized
|
||||
from openedx.core.lib.gating import api as gating_api
|
||||
from courseware.model_data import FieldDataCache, ScoresClient
|
||||
from openedx.core.djangoapps.signals.signals import GRADES_UPDATED
|
||||
from student.models import anonymous_id_for_user
|
||||
from util.db import outer_atomic
|
||||
from util.module_utils import yield_dynamic_descriptor_descendants
|
||||
from xblock.core import XBlock
|
||||
from xmodule import graders, block_metadata_utils
|
||||
from xmodule.graders import Score
|
||||
from xmodule.modulestore.django import modulestore
|
||||
from xmodule.modulestore.exceptions import ItemNotFoundError
|
||||
from .models import StudentModule
|
||||
from .module_render import get_module_for_descriptor
|
||||
from .transformers.grades import GradesTransformer
|
||||
|
||||
|
||||
log = logging.getLogger("edx.courseware")
|
||||
|
||||
|
||||
class ProgressSummary(object):
|
||||
"""
|
||||
Wrapper class for the computation of a user's scores across a course.
|
||||
|
||||
Attributes
|
||||
chapters: a summary of all sections with problems in the course. It is
|
||||
organized as an array of chapters, each containing an array of sections,
|
||||
each containing an array of scores. This contains information for graded
|
||||
and ungraded problems, and is good for displaying a course summary with
|
||||
due dates, etc.
|
||||
|
||||
weighted_scores: a dictionary mapping module locations to weighted Score
|
||||
objects.
|
||||
|
||||
locations_to_children: a function mapping locations to their
|
||||
direct descendants.
|
||||
"""
|
||||
def __init__(self, chapters, weighted_scores, locations_to_children):
|
||||
self.chapters = chapters
|
||||
self.weighted_scores = weighted_scores
|
||||
self.locations_to_children = locations_to_children
|
||||
|
||||
def score_for_module(self, location):
|
||||
"""
|
||||
Calculate the aggregate weighted score for any location in the course.
|
||||
This method returns a tuple containing (earned_score, possible_score).
|
||||
|
||||
If the location is of 'problem' type, this method will return the
|
||||
possible and earned scores for that problem. If the location refers to a
|
||||
composite module (a vertical or section ) the scores will be the sums of
|
||||
all scored problems that are children of the chosen location.
|
||||
"""
|
||||
if location in self.weighted_scores:
|
||||
score = self.weighted_scores[location]
|
||||
return score.earned, score.possible
|
||||
children = self.locations_to_children[location]
|
||||
earned = 0.0
|
||||
possible = 0.0
|
||||
for child in children:
|
||||
child_earned, child_possible = self.score_for_module(child)
|
||||
earned += child_earned
|
||||
possible += child_possible
|
||||
return earned, possible
|
||||
|
||||
|
||||
@memoized
|
||||
def block_types_with_scores():
|
||||
"""
|
||||
Returns the block types that could have a score.
|
||||
|
||||
Something might be a scored item if it is capable of storing a score
|
||||
(has_score=True). We also have to include anything that can have children,
|
||||
since those children might have scores. We can avoid things like Videos,
|
||||
which have state but cannot ever impact someone's grade.
|
||||
"""
|
||||
return frozenset(
|
||||
cat for (cat, xblock_class) in XBlock.load_classes() if (
|
||||
getattr(xblock_class, 'has_score', False) or getattr(xblock_class, 'has_children', False)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def possibly_scored(usage_key):
|
||||
"""
|
||||
Returns whether the given block could impact grading (i.e. scored, or has children).
|
||||
"""
|
||||
return usage_key.block_type in block_types_with_scores()
|
||||
|
||||
|
||||
def grading_context_for_course(course):
|
||||
"""
|
||||
Same as grading_context, but takes in a course object.
|
||||
"""
|
||||
course_structure = get_course_in_cache(course.id)
|
||||
return grading_context(course_structure)
|
||||
|
||||
|
||||
def grading_context(course_structure):
|
||||
"""
|
||||
This returns a dictionary with keys necessary for quickly grading
|
||||
a student. They are used by grades.grade()
|
||||
|
||||
The grading context has two keys:
|
||||
graded_sections - This contains the sections that are graded, as
|
||||
well as all possible children modules that can affect the
|
||||
grading. This allows some sections to be skipped if the student
|
||||
hasn't seen any part of it.
|
||||
|
||||
The format is a dictionary keyed by section-type. The values are
|
||||
arrays of dictionaries containing
|
||||
"section_block" : The section block
|
||||
"scored_descendant_keys" : An array of usage keys for blocks
|
||||
could possibly be in the section, for any student
|
||||
|
||||
all_graded_blocks - This contains a list of all blocks that can
|
||||
affect grading a student. This is used to efficiently fetch
|
||||
all the xmodule state for a FieldDataCache without walking
|
||||
the descriptor tree again.
|
||||
|
||||
"""
|
||||
all_graded_blocks = []
|
||||
all_graded_sections = defaultdict(list)
|
||||
|
||||
for chapter_key in course_structure.get_children(course_structure.root_block_usage_key):
|
||||
for section_key in course_structure.get_children(chapter_key):
|
||||
section = course_structure[section_key]
|
||||
scored_descendants_of_section = [section]
|
||||
if section.graded:
|
||||
for descendant_key in course_structure.post_order_traversal(
|
||||
filter_func=possibly_scored,
|
||||
start_node=section_key,
|
||||
):
|
||||
scored_descendants_of_section.append(
|
||||
course_structure[descendant_key],
|
||||
)
|
||||
|
||||
# include only those blocks that have scores, not if they are just a parent
|
||||
section_info = {
|
||||
'section_block': section,
|
||||
'scored_descendants': [
|
||||
child for child in scored_descendants_of_section
|
||||
if getattr(child, 'has_score', None)
|
||||
]
|
||||
}
|
||||
section_format = getattr(section, 'format', '')
|
||||
all_graded_sections[section_format].append(section_info)
|
||||
all_graded_blocks.extend(scored_descendants_of_section)
|
||||
|
||||
return {
|
||||
'all_graded_sections': all_graded_sections,
|
||||
'all_graded_blocks': all_graded_blocks,
|
||||
}
|
||||
|
||||
|
||||
def answer_distributions(course_key):
|
||||
"""
|
||||
Given a course_key, return answer distributions in the form of a dictionary
|
||||
mapping:
|
||||
|
||||
(problem url_name, problem display_name, problem_id) -> {dict: answer -> count}
|
||||
|
||||
Answer distributions are found by iterating through all StudentModule
|
||||
entries for a given course with type="problem" and a grade that is not null.
|
||||
This means that we only count LoncapaProblems that people have submitted.
|
||||
Other types of items like ORA or sequences will not be collected. Empty
|
||||
Loncapa problem state that gets created from running the progress page is
|
||||
also not counted.
|
||||
|
||||
This method accesses the StudentModule table directly instead of using the
|
||||
CapaModule abstraction. The main reason for this is so that we can generate
|
||||
the report without any side-effects -- we don't have to worry about answer
|
||||
distribution potentially causing re-evaluation of the student answer. This
|
||||
also allows us to use the read-replica database, which reduces risk of bad
|
||||
locking behavior. And quite frankly, it makes this a lot less confusing.
|
||||
|
||||
Also, we're pulling all available records from the database for this course
|
||||
rather than crawling through a student's course-tree -- the latter could
|
||||
potentially cause us trouble with A/B testing. The distribution report may
|
||||
not be aware of problems that are not visible to the user being used to
|
||||
generate the report.
|
||||
|
||||
This method will try to use a read-replica database if one is available.
|
||||
"""
|
||||
# dict: { module.module_state_key : (url_name, display_name) }
|
||||
state_keys_to_problem_info = {} # For caching, used by url_and_display_name
|
||||
|
||||
def url_and_display_name(usage_key):
|
||||
"""
|
||||
For a given usage_key, return the problem's url and display_name.
|
||||
Handle modulestore access and caching. This method ignores permissions.
|
||||
|
||||
Raises:
|
||||
InvalidKeyError: if the usage_key does not parse
|
||||
ItemNotFoundError: if there is no content that corresponds
|
||||
to this usage_key.
|
||||
"""
|
||||
problem_store = modulestore()
|
||||
if usage_key not in state_keys_to_problem_info:
|
||||
problem = problem_store.get_item(usage_key)
|
||||
problem_info = (problem.url_name, problem.display_name_with_default_escaped)
|
||||
state_keys_to_problem_info[usage_key] = problem_info
|
||||
|
||||
return state_keys_to_problem_info[usage_key]
|
||||
|
||||
# Iterate through all problems submitted for this course in no particular
|
||||
# order, and build up our answer_counts dict that we will eventually return
|
||||
answer_counts = defaultdict(lambda: defaultdict(int))
|
||||
for module in StudentModule.all_submitted_problems_read_only(course_key):
|
||||
try:
|
||||
state_dict = json.loads(module.state) if module.state else {}
|
||||
raw_answers = state_dict.get("student_answers", {})
|
||||
except ValueError:
|
||||
log.error(
|
||||
u"Answer Distribution: Could not parse module state for StudentModule id=%s, course=%s",
|
||||
module.id,
|
||||
course_key,
|
||||
)
|
||||
continue
|
||||
|
||||
try:
|
||||
url, display_name = url_and_display_name(module.module_state_key.map_into_course(course_key))
|
||||
# Each problem part has an ID that is derived from the
|
||||
# module.module_state_key (with some suffix appended)
|
||||
for problem_part_id, raw_answer in raw_answers.items():
|
||||
# Convert whatever raw answers we have (numbers, unicode, None, etc.)
|
||||
# to be unicode values. Note that if we get a string, it's always
|
||||
# unicode and not str -- state comes from the json decoder, and that
|
||||
# always returns unicode for strings.
|
||||
answer = unicode(raw_answer)
|
||||
answer_counts[(url, display_name, problem_part_id)][answer] += 1
|
||||
|
||||
except (ItemNotFoundError, InvalidKeyError):
|
||||
msg = (
|
||||
"Answer Distribution: Item {} referenced in StudentModule {} " +
|
||||
"for user {} in course {} not found; " +
|
||||
"This can happen if a student answered a question that " +
|
||||
"was later deleted from the course. This answer will be " +
|
||||
"omitted from the answer distribution CSV."
|
||||
).format(
|
||||
module.module_state_key, module.id, module.student_id, course_key
|
||||
)
|
||||
log.warning(msg)
|
||||
continue
|
||||
|
||||
return answer_counts
|
||||
|
||||
|
||||
def grade(student, course, keep_raw_scores=False, course_structure=None):
|
||||
"""
|
||||
Returns the grade of the student.
|
||||
|
||||
Also sends a signal to update the minimum grade requirement status.
|
||||
"""
|
||||
grade_summary = _grade(student, course, keep_raw_scores, course_structure)
|
||||
responses = GRADES_UPDATED.send_robust(
|
||||
sender=None,
|
||||
username=student.username,
|
||||
grade_summary=grade_summary,
|
||||
course_key=course.id,
|
||||
deadline=course.end
|
||||
)
|
||||
|
||||
for receiver, response in responses:
|
||||
log.info('Signal fired when student grade is calculated. Receiver: %s. Response: %s', receiver, response)
|
||||
|
||||
return grade_summary
|
||||
|
||||
|
||||
def _grade(student, course, keep_raw_scores, course_structure=None):
|
||||
"""
|
||||
Unwrapped version of "grade"
|
||||
|
||||
This grades a student as quickly as possible. It returns the
|
||||
output from the course grader, augmented with the final letter
|
||||
grade. The keys in the output are:
|
||||
|
||||
- course: a CourseDescriptor
|
||||
- keep_raw_scores : if True, then value for key 'raw_scores' contains scores
|
||||
for every graded module
|
||||
|
||||
More information on the format is in the docstring for CourseGrader.
|
||||
"""
|
||||
if course_structure is None:
|
||||
course_structure = get_course_blocks(student, course.location)
|
||||
grading_context_result = grading_context(course_structure)
|
||||
scorable_locations = [block.location for block in grading_context_result['all_graded_blocks']]
|
||||
|
||||
with outer_atomic():
|
||||
scores_client = ScoresClient.create_for_locations(course.id, student.id, scorable_locations)
|
||||
|
||||
# Dict of item_ids -> (earned, possible) point tuples. This *only* grabs
|
||||
# scores that were registered with the submissions API, which for the moment
|
||||
# means only openassessment (edx-ora2)
|
||||
# We need to import this here to avoid a circular dependency of the form:
|
||||
# XBlock --> submissions --> Django Rest Framework error strings -->
|
||||
# Django translation --> ... --> courseware --> submissions
|
||||
from submissions import api as sub_api # installed from the edx-submissions repository
|
||||
|
||||
with outer_atomic():
|
||||
submissions_scores = sub_api.get_scores(
|
||||
course.id.to_deprecated_string(),
|
||||
anonymous_id_for_user(student, course.id)
|
||||
)
|
||||
|
||||
totaled_scores, raw_scores = _calculate_totaled_scores(
|
||||
student, grading_context_result, submissions_scores, scores_client, keep_raw_scores
|
||||
)
|
||||
|
||||
with outer_atomic():
|
||||
# Grading policy might be overriden by a CCX, need to reset it
|
||||
course.set_grading_policy(course.grading_policy)
|
||||
grade_summary = course.grader.grade(totaled_scores, generate_random_scores=settings.GENERATE_PROFILE_SCORES)
|
||||
|
||||
# We round the grade here, to make sure that the grade is a whole percentage and
|
||||
# doesn't get displayed differently than it gets grades
|
||||
grade_summary['percent'] = round(grade_summary['percent'] * 100 + 0.05) / 100
|
||||
|
||||
letter_grade = grade_for_percentage(course.grade_cutoffs, grade_summary['percent'])
|
||||
grade_summary['grade'] = letter_grade
|
||||
grade_summary['totaled_scores'] = totaled_scores # make this available, eg for instructor download & debugging
|
||||
if keep_raw_scores:
|
||||
# way to get all RAW scores out to instructor
|
||||
# so grader can be double-checked
|
||||
grade_summary['raw_scores'] = raw_scores
|
||||
|
||||
return grade_summary
|
||||
|
||||
|
||||
def _calculate_totaled_scores(
|
||||
student,
|
||||
grading_context_result,
|
||||
submissions_scores,
|
||||
scores_client,
|
||||
keep_raw_scores,
|
||||
):
|
||||
"""
|
||||
Returns the totaled scores, which can be passed to the grader.
|
||||
"""
|
||||
raw_scores = []
|
||||
totaled_scores = {}
|
||||
for section_format, sections in grading_context_result['all_graded_sections'].iteritems():
|
||||
format_scores = []
|
||||
for section_info in sections:
|
||||
section = section_info['section_block']
|
||||
section_name = block_metadata_utils.display_name_with_default(section)
|
||||
|
||||
with outer_atomic():
|
||||
# Check to
|
||||
# see if any of our locations are in the scores from the submissions
|
||||
# API. If scores exist, we have to calculate grades for this section.
|
||||
should_grade_section = any(
|
||||
unicode(descendant.location) in submissions_scores
|
||||
for descendant in section_info['scored_descendants']
|
||||
)
|
||||
|
||||
if not should_grade_section:
|
||||
should_grade_section = any(
|
||||
descendant.location in scores_client
|
||||
for descendant in section_info['scored_descendants']
|
||||
)
|
||||
|
||||
# If we haven't seen a single problem in the section, we don't have
|
||||
# to grade it at all! We can assume 0%
|
||||
if should_grade_section:
|
||||
scores = []
|
||||
|
||||
for descendant in section_info['scored_descendants']:
|
||||
|
||||
(correct, total) = get_score(
|
||||
student,
|
||||
descendant,
|
||||
scores_client,
|
||||
submissions_scores,
|
||||
)
|
||||
if correct is None and total is None:
|
||||
continue
|
||||
|
||||
if settings.GENERATE_PROFILE_SCORES: # for debugging!
|
||||
if total > 1:
|
||||
correct = random.randrange(max(total - 2, 1), total + 1)
|
||||
else:
|
||||
correct = total
|
||||
|
||||
graded = descendant.graded
|
||||
if not total > 0:
|
||||
# We simply cannot grade a problem that is 12/0, because we might need it as a percentage
|
||||
graded = False
|
||||
|
||||
scores.append(
|
||||
Score(
|
||||
correct,
|
||||
total,
|
||||
graded,
|
||||
block_metadata_utils.display_name_with_default_escaped(descendant),
|
||||
descendant.location
|
||||
)
|
||||
)
|
||||
|
||||
__, graded_total = graders.aggregate_scores(scores, section_name)
|
||||
if keep_raw_scores:
|
||||
raw_scores += scores
|
||||
else:
|
||||
graded_total = Score(0.0, 1.0, True, section_name, None)
|
||||
|
||||
# Add the graded total to totaled_scores
|
||||
if graded_total.possible > 0:
|
||||
format_scores.append(graded_total)
|
||||
else:
|
||||
log.info(
|
||||
"Unable to grade a section with a total possible score of zero. " +
|
||||
str(section.location)
|
||||
)
|
||||
|
||||
totaled_scores[section_format] = format_scores
|
||||
|
||||
return totaled_scores, raw_scores
|
||||
|
||||
|
||||
def grade_for_percentage(grade_cutoffs, percentage):
|
||||
"""
|
||||
Returns a letter grade as defined in grading_policy (e.g. 'A' 'B' 'C' for 6.002x) or None.
|
||||
|
||||
Arguments
|
||||
- grade_cutoffs is a dictionary mapping a grade to the lowest
|
||||
possible percentage to earn that grade.
|
||||
- percentage is the final percent across all problems in a course
|
||||
"""
|
||||
|
||||
letter_grade = None
|
||||
|
||||
# Possible grades, sorted in descending order of score
|
||||
descending_grades = sorted(grade_cutoffs, key=lambda x: grade_cutoffs[x], reverse=True)
|
||||
for possible_grade in descending_grades:
|
||||
if percentage >= grade_cutoffs[possible_grade]:
|
||||
letter_grade = possible_grade
|
||||
break
|
||||
|
||||
return letter_grade
|
||||
|
||||
|
||||
def progress_summary(student, course, course_structure=None):
|
||||
"""
|
||||
Returns progress summary for all chapters in the course.
|
||||
"""
|
||||
|
||||
progress = _progress_summary(student, course, course_structure)
|
||||
if progress:
|
||||
return progress.chapters
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def get_weighted_scores(student, course):
|
||||
"""
|
||||
Uses the _progress_summary method to return a ProgressSummary object
|
||||
containing details of a students weighted scores for the course.
|
||||
"""
|
||||
return _progress_summary(student, course)
|
||||
|
||||
|
||||
def _progress_summary(student, course, course_structure=None):
|
||||
"""
|
||||
Unwrapped version of "progress_summary".
|
||||
|
||||
This pulls a summary of all problems in the course.
|
||||
|
||||
Returns
|
||||
- courseware_summary is a summary of all sections with problems in the course.
|
||||
It is organized as an array of chapters, each containing an array of sections,
|
||||
each containing an array of scores. This contains information for graded and
|
||||
ungraded problems, and is good for displaying a course summary with due dates,
|
||||
etc.
|
||||
- None if the student does not have access to load the course module.
|
||||
|
||||
Arguments:
|
||||
student: A User object for the student to grade
|
||||
course: A Descriptor containing the course to grade
|
||||
|
||||
"""
|
||||
if course_structure is None:
|
||||
course_structure = get_course_blocks(student, course.location)
|
||||
if not len(course_structure):
|
||||
return None
|
||||
scorable_locations = [block_key for block_key in course_structure if possibly_scored(block_key)]
|
||||
|
||||
with outer_atomic():
|
||||
scores_client = ScoresClient.create_for_locations(course.id, student.id, scorable_locations)
|
||||
|
||||
# We need to import this here to avoid a circular dependency of the form:
|
||||
# XBlock --> submissions --> Django Rest Framework error strings -->
|
||||
# Django translation --> ... --> courseware --> submissions
|
||||
from submissions import api as sub_api # installed from the edx-submissions repository
|
||||
with outer_atomic():
|
||||
submissions_scores = sub_api.get_scores(
|
||||
unicode(course.id), anonymous_id_for_user(student, course.id)
|
||||
)
|
||||
|
||||
# Check for gated content
|
||||
gated_content = gating_api.get_gated_content(course, student)
|
||||
|
||||
chapters = []
|
||||
locations_to_weighted_scores = {}
|
||||
|
||||
for chapter_key in course_structure.get_children(course_structure.root_block_usage_key):
|
||||
chapter = course_structure[chapter_key]
|
||||
sections = []
|
||||
for section_key in course_structure.get_children(chapter_key):
|
||||
if unicode(section_key) in gated_content:
|
||||
continue
|
||||
|
||||
section = course_structure[section_key]
|
||||
|
||||
graded = getattr(section, 'graded', False)
|
||||
scores = []
|
||||
|
||||
for descendant_key in course_structure.post_order_traversal(
|
||||
filter_func=possibly_scored,
|
||||
start_node=section_key,
|
||||
):
|
||||
descendant = course_structure[descendant_key]
|
||||
|
||||
(correct, total) = get_score(
|
||||
student,
|
||||
descendant,
|
||||
scores_client,
|
||||
submissions_scores,
|
||||
)
|
||||
if correct is None and total is None:
|
||||
continue
|
||||
|
||||
weighted_location_score = Score(
|
||||
correct,
|
||||
total,
|
||||
graded,
|
||||
block_metadata_utils.display_name_with_default_escaped(descendant),
|
||||
descendant.location
|
||||
)
|
||||
|
||||
scores.append(weighted_location_score)
|
||||
locations_to_weighted_scores[descendant.location] = weighted_location_score
|
||||
|
||||
escaped_section_name = block_metadata_utils.display_name_with_default_escaped(section)
|
||||
section_total, _ = graders.aggregate_scores(scores, escaped_section_name)
|
||||
|
||||
sections.append({
|
||||
'display_name': escaped_section_name,
|
||||
'url_name': block_metadata_utils.url_name_for_block(section),
|
||||
'scores': scores,
|
||||
'section_total': section_total,
|
||||
'format': getattr(section, 'format', ''),
|
||||
'due': getattr(section, 'due', None),
|
||||
'graded': graded,
|
||||
})
|
||||
|
||||
chapters.append({
|
||||
'course': course.display_name_with_default_escaped,
|
||||
'display_name': block_metadata_utils.display_name_with_default_escaped(chapter),
|
||||
'url_name': block_metadata_utils.url_name_for_block(chapter),
|
||||
'sections': sections
|
||||
})
|
||||
|
||||
return ProgressSummary(chapters, locations_to_weighted_scores, course_structure.get_children)
|
||||
|
||||
|
||||
def weighted_score(raw_correct, raw_total, weight):
|
||||
"""Return a tuple that represents the weighted (correct, total) score."""
|
||||
# If there is no weighting, or weighting can't be applied, return input.
|
||||
if weight is None or raw_total == 0:
|
||||
return (raw_correct, raw_total)
|
||||
return (float(raw_correct) * weight / raw_total, float(weight))
|
||||
|
||||
|
||||
def get_score(user, block, scores_client, submissions_scores_cache):
|
||||
"""
|
||||
Return the score for a user on a problem, as a tuple (correct, total).
|
||||
e.g. (5,7) if you got 5 out of 7 points.
|
||||
|
||||
If this problem doesn't have a score, or we couldn't load it, returns (None,
|
||||
None).
|
||||
|
||||
user: a Student object
|
||||
block: a BlockStructure's BlockData object
|
||||
scores_client: an initialized ScoresClient
|
||||
submissions_scores_cache: A dict of location names to (earned, possible) point tuples.
|
||||
If an entry is found in this cache, it takes precedence.
|
||||
"""
|
||||
submissions_scores_cache = submissions_scores_cache or {}
|
||||
|
||||
if not user.is_authenticated():
|
||||
return (None, None)
|
||||
|
||||
location_url = unicode(block.location)
|
||||
if location_url in submissions_scores_cache:
|
||||
return submissions_scores_cache[location_url]
|
||||
|
||||
if not getattr(block, 'has_score', False):
|
||||
# These are not problems, and do not have a score
|
||||
return (None, None)
|
||||
|
||||
# Check the score that comes from the ScoresClient (out of CSM).
|
||||
# If an entry exists and has a total associated with it, we trust that
|
||||
# value. This is important for cases where a student might have seen an
|
||||
# older version of the problem -- they're still graded on what was possible
|
||||
# when they tried the problem, not what it's worth now.
|
||||
score = scores_client.get(block.location)
|
||||
if score and score.total is not None:
|
||||
# We have a valid score, just use it.
|
||||
correct = score.correct if score.correct is not None else 0.0
|
||||
total = score.total
|
||||
else:
|
||||
# This means we don't have a valid score entry and we don't have a
|
||||
# cached_max_score on hand. We know they've earned 0.0 points on this.
|
||||
correct = 0.0
|
||||
total = block.transformer_data[GradesTransformer].max_score
|
||||
|
||||
# Problem may be an error module (if something in the problem builder failed)
|
||||
# In which case total might be None
|
||||
if total is None:
|
||||
return (None, None)
|
||||
|
||||
return weighted_score(correct, total, block.weight)
|
||||
|
||||
|
||||
def iterate_grades_for(course_or_id, students, keep_raw_scores=False):
|
||||
"""Given a course_id and an iterable of students (User), yield a tuple of:
|
||||
|
||||
(student, gradeset, err_msg) for every student enrolled in the course.
|
||||
|
||||
If an error occurred, gradeset will be an empty dict and err_msg will be an
|
||||
exception message. If there was no error, err_msg is an empty string.
|
||||
|
||||
The gradeset is a dictionary with the following fields:
|
||||
|
||||
- grade : A final letter grade.
|
||||
- percent : The final percent for the class (rounded up).
|
||||
- section_breakdown : A breakdown of each section that makes
|
||||
up the grade. (For display)
|
||||
- grade_breakdown : A breakdown of the major components that
|
||||
make up the final grade. (For display)
|
||||
- raw_scores: contains scores for every graded module
|
||||
"""
|
||||
if isinstance(course_or_id, (basestring, CourseKey)):
|
||||
course = courses.get_course_by_id(course_or_id)
|
||||
else:
|
||||
course = course_or_id
|
||||
|
||||
for student in students:
|
||||
with dog_stats_api.timer('lms.grades.iterate_grades_for', tags=[u'action:{}'.format(course.id)]):
|
||||
try:
|
||||
gradeset = grade(student, course, keep_raw_scores)
|
||||
yield student, gradeset, ""
|
||||
except Exception as exc: # pylint: disable=broad-except
|
||||
# Keep marching on even if this student couldn't be graded for
|
||||
# some reason, but log it for future reference.
|
||||
log.exception(
|
||||
'Cannot grade student %s (%s) in course %s because of exception: %s',
|
||||
student.username,
|
||||
student.id,
|
||||
course.id,
|
||||
exc.message
|
||||
)
|
||||
yield student, {}, exc.message
|
||||
|
||||
|
||||
def _get_mock_request(student):
|
||||
"""
|
||||
Make a fake request because grading code expects to be able to look at
|
||||
the request. We have to attach the correct user to the request before
|
||||
grading that student.
|
||||
"""
|
||||
request = RequestFactory().get('/')
|
||||
request.user = student
|
||||
return request
|
||||
|
||||
|
||||
def _calculate_score_for_modules(user_id, course, modules):
|
||||
"""
|
||||
Calculates the cumulative score (percent) of the given modules
|
||||
"""
|
||||
|
||||
# removing branch and version from exam modules locator
|
||||
# otherwise student module would not return scores since module usage keys would not match
|
||||
modules = [m for m in modules]
|
||||
locations = [
|
||||
BlockUsageLocator(
|
||||
course_key=course.id,
|
||||
block_type=module.location.block_type,
|
||||
block_id=module.location.block_id
|
||||
)
|
||||
if isinstance(module.location, BlockUsageLocator) and module.location.version
|
||||
else module.location
|
||||
for module in modules
|
||||
]
|
||||
|
||||
scores_client = ScoresClient(course.id, user_id)
|
||||
scores_client.fetch_scores(locations)
|
||||
|
||||
# Iterate over all of the exam modules to get score percentage of user for each of them
|
||||
module_percentages = []
|
||||
ignore_categories = ['course', 'chapter', 'sequential', 'vertical', 'randomize', 'library_content']
|
||||
for index, module in enumerate(modules):
|
||||
if module.category not in ignore_categories and (module.graded or module.has_score):
|
||||
module_score = scores_client.get(locations[index])
|
||||
if module_score:
|
||||
correct = module_score.correct or 0
|
||||
total = module_score.total or 1
|
||||
module_percentages.append(correct / total)
|
||||
|
||||
return sum(module_percentages) / float(len(module_percentages)) if module_percentages else 0
|
||||
|
||||
|
||||
def get_module_score(user, course, module):
|
||||
"""
|
||||
Collects all children of the given module and calculates the cumulative
|
||||
score for this set of modules for the given user.
|
||||
|
||||
Arguments:
|
||||
user (User): The user
|
||||
course (CourseModule): The course
|
||||
module (XBlock): The module
|
||||
|
||||
Returns:
|
||||
float: The cumulative score
|
||||
"""
|
||||
def inner_get_module(descriptor):
|
||||
"""
|
||||
Delegate to get_module_for_descriptor
|
||||
"""
|
||||
field_data_cache = FieldDataCache([descriptor], course.id, user)
|
||||
return get_module_for_descriptor(
|
||||
user,
|
||||
_get_mock_request(user),
|
||||
descriptor,
|
||||
field_data_cache,
|
||||
course.id,
|
||||
course=course
|
||||
)
|
||||
|
||||
modules = yield_dynamic_descriptor_descendants(
|
||||
module,
|
||||
user.id,
|
||||
inner_get_module
|
||||
)
|
||||
return _calculate_score_for_modules(user.id, course, modules)
|
||||
@@ -19,11 +19,8 @@ from django.contrib.auth.models import User
|
||||
from django.conf import settings
|
||||
from django.db import models
|
||||
from django.db.models.signals import post_save
|
||||
from django.dispatch import receiver, Signal
|
||||
|
||||
from model_utils.models import TimeStampedModel
|
||||
from student.models import user_by_anonymous_id
|
||||
from submissions.models import score_set, score_reset
|
||||
import coursewarehistoryextended
|
||||
|
||||
from xmodule_django.models import CourseKeyField, LocationKeyField, BlockTypeKeyField
|
||||
@@ -359,101 +356,3 @@ class StudentFieldOverride(TimeStampedModel):
|
||||
|
||||
field = models.CharField(max_length=255)
|
||||
value = models.TextField(default='null')
|
||||
|
||||
|
||||
# Signal that indicates that a user's score for a problem has been updated.
|
||||
# This signal is generated when a scoring event occurs either within the core
|
||||
# platform or in the Submissions module. Note that this signal will be triggered
|
||||
# regardless of the new and previous values of the score (i.e. it may be the
|
||||
# case that this signal is generated when a user re-attempts a problem but
|
||||
# receives the same score).
|
||||
SCORE_CHANGED = Signal(
|
||||
providing_args=[
|
||||
'points_possible', # Maximum score available for the exercise
|
||||
'points_earned', # Score obtained by the user
|
||||
'user_id', # Integer User ID
|
||||
'course_id', # Unicode string representing the course
|
||||
'usage_id' # Unicode string indicating the courseware instance
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
@receiver(score_set)
|
||||
def submissions_score_set_handler(sender, **kwargs): # pylint: disable=unused-argument
|
||||
"""
|
||||
Consume the score_set signal defined in the Submissions API, and convert it
|
||||
to a SCORE_CHANGED signal defined in this module. Converts the unicode keys
|
||||
for user, course and item into the standard representation for the
|
||||
SCORE_CHANGED signal.
|
||||
|
||||
This method expects that the kwargs dictionary will contain the following
|
||||
entries (See the definition of score_set):
|
||||
- 'points_possible': integer,
|
||||
- 'points_earned': integer,
|
||||
- 'anonymous_user_id': unicode,
|
||||
- 'course_id': unicode,
|
||||
- 'item_id': unicode
|
||||
"""
|
||||
points_possible = kwargs.get('points_possible', None)
|
||||
points_earned = kwargs.get('points_earned', None)
|
||||
course_id = kwargs.get('course_id', None)
|
||||
usage_id = kwargs.get('item_id', None)
|
||||
user = None
|
||||
if 'anonymous_user_id' in kwargs:
|
||||
user = user_by_anonymous_id(kwargs.get('anonymous_user_id'))
|
||||
|
||||
# If any of the kwargs were missing, at least one of the following values
|
||||
# will be None.
|
||||
if all((user, points_possible, points_earned, course_id, usage_id)):
|
||||
SCORE_CHANGED.send(
|
||||
sender=None,
|
||||
points_possible=points_possible,
|
||||
points_earned=points_earned,
|
||||
user_id=user.id,
|
||||
course_id=course_id,
|
||||
usage_id=usage_id
|
||||
)
|
||||
else:
|
||||
log.exception(
|
||||
u"Failed to process score_set signal from Submissions API. "
|
||||
"points_possible: %s, points_earned: %s, user: %s, course_id: %s, "
|
||||
"usage_id: %s", points_possible, points_earned, user, course_id, usage_id
|
||||
)
|
||||
|
||||
|
||||
@receiver(score_reset)
|
||||
def submissions_score_reset_handler(sender, **kwargs): # pylint: disable=unused-argument
|
||||
"""
|
||||
Consume the score_reset signal defined in the Submissions API, and convert
|
||||
it to a SCORE_CHANGED signal indicating that the score has been set to 0/0.
|
||||
Converts the unicode keys for user, course and item into the standard
|
||||
representation for the SCORE_CHANGED signal.
|
||||
|
||||
This method expects that the kwargs dictionary will contain the following
|
||||
entries (See the definition of score_reset):
|
||||
- 'anonymous_user_id': unicode,
|
||||
- 'course_id': unicode,
|
||||
- 'item_id': unicode
|
||||
"""
|
||||
course_id = kwargs.get('course_id', None)
|
||||
usage_id = kwargs.get('item_id', None)
|
||||
user = None
|
||||
if 'anonymous_user_id' in kwargs:
|
||||
user = user_by_anonymous_id(kwargs.get('anonymous_user_id'))
|
||||
|
||||
# If any of the kwargs were missing, at least one of the following values
|
||||
# will be None.
|
||||
if all((user, course_id, usage_id)):
|
||||
SCORE_CHANGED.send(
|
||||
sender=None,
|
||||
points_possible=0,
|
||||
points_earned=0,
|
||||
user_id=user.id,
|
||||
course_id=course_id,
|
||||
usage_id=usage_id
|
||||
)
|
||||
else:
|
||||
log.exception(
|
||||
u"Failed to process score_reset signal from Submissions API. "
|
||||
"user: %s, course_id: %s, usage_id: %s", user, course_id, usage_id
|
||||
)
|
||||
|
||||
@@ -46,7 +46,7 @@ from courseware.masquerade import (
|
||||
setup_masquerade,
|
||||
)
|
||||
from courseware.model_data import DjangoKeyValueStore, FieldDataCache, set_score
|
||||
from courseware.models import SCORE_CHANGED
|
||||
from lms.djangoapps.grades.signals import SCORE_CHANGED
|
||||
from edxmako.shortcuts import render_to_string
|
||||
from lms.djangoapps.lms_xblock.field_data import LmsFieldData
|
||||
from lms.djangoapps.lms_xblock.models import XBlockAsidesConfig
|
||||
|
||||
@@ -18,11 +18,10 @@ from capa.tests.response_xml_factory import (
|
||||
OptionResponseXMLFactory, CustomResponseXMLFactory, SchematicResponseXMLFactory,
|
||||
CodeResponseXMLFactory,
|
||||
)
|
||||
from courseware import grades
|
||||
from lms.djangoapps.grades import course_grades, progress
|
||||
from courseware.models import StudentModule, BaseStudentModuleHistory
|
||||
from courseware.tests.helpers import LoginEnrollmentTestCase
|
||||
from lms.djangoapps.lms_xblock.runtime import quote_slashes
|
||||
from student.tests.factories import UserFactory
|
||||
from student.models import anonymous_id_for_user
|
||||
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
|
||||
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
|
||||
@@ -246,7 +245,7 @@ class TestSubmittingProblems(ModuleStoreTestCase, LoginEnrollmentTestCase, Probl
|
||||
|
||||
def get_grade_summary(self):
|
||||
"""
|
||||
calls grades.grade for current user and course.
|
||||
calls course_grades.summary for current user and course.
|
||||
|
||||
the keywords for the returned object are
|
||||
- grade : A final letter grade.
|
||||
@@ -256,7 +255,7 @@ class TestSubmittingProblems(ModuleStoreTestCase, LoginEnrollmentTestCase, Probl
|
||||
- grade_breakdown : A breakdown of the major components that
|
||||
make up the final grade. (For display)
|
||||
"""
|
||||
return grades.grade(self.student_user, self.course)
|
||||
return course_grades.summary(self.student_user, self.course)
|
||||
|
||||
def get_progress_summary(self):
|
||||
"""
|
||||
@@ -269,7 +268,7 @@ class TestSubmittingProblems(ModuleStoreTestCase, LoginEnrollmentTestCase, Probl
|
||||
ungraded problems, and is good for displaying a course summary with due dates,
|
||||
etc.
|
||||
"""
|
||||
return grades.progress_summary(self.student_user, self.course)
|
||||
return progress.summary(self.student_user, self.course).chapters
|
||||
|
||||
def check_grade_percent(self, percent):
|
||||
"""
|
||||
@@ -989,162 +988,6 @@ class TestPythonGradedResponse(TestSubmittingProblems):
|
||||
self._check_ireset(name)
|
||||
|
||||
|
||||
@attr('shard_1')
|
||||
class TestAnswerDistributions(TestSubmittingProblems):
|
||||
"""Check that we can pull answer distributions for problems."""
|
||||
|
||||
def setUp(self):
|
||||
"""Set up a simple course with four problems."""
|
||||
super(TestAnswerDistributions, self).setUp()
|
||||
|
||||
self.homework = self.add_graded_section_to_course('homework')
|
||||
self.p1_html_id = self.add_dropdown_to_section(self.homework.location, 'p1', 1).location.html_id()
|
||||
self.p2_html_id = self.add_dropdown_to_section(self.homework.location, 'p2', 1).location.html_id()
|
||||
self.p3_html_id = self.add_dropdown_to_section(self.homework.location, 'p3', 1).location.html_id()
|
||||
self.refresh_course()
|
||||
|
||||
def test_empty(self):
|
||||
# Just make sure we can process this without errors.
|
||||
empty_distribution = grades.answer_distributions(self.course.id)
|
||||
self.assertFalse(empty_distribution) # should be empty
|
||||
|
||||
def test_one_student(self):
|
||||
# Basic test to make sure we have simple behavior right for a student
|
||||
|
||||
# Throw in a non-ASCII answer
|
||||
self.submit_question_answer('p1', {'2_1': u'ⓤⓝⓘⓒⓞⓓⓔ'})
|
||||
self.submit_question_answer('p2', {'2_1': 'Correct'})
|
||||
|
||||
distributions = grades.answer_distributions(self.course.id)
|
||||
self.assertEqual(
|
||||
distributions,
|
||||
{
|
||||
('p1', 'p1', '{}_2_1'.format(self.p1_html_id)): {
|
||||
u'ⓤⓝⓘⓒⓞⓓⓔ': 1
|
||||
},
|
||||
('p2', 'p2', '{}_2_1'.format(self.p2_html_id)): {
|
||||
'Correct': 1
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
def test_multiple_students(self):
|
||||
# Our test class is based around making requests for a particular user,
|
||||
# so we're going to cheat by creating another user and copying and
|
||||
# modifying StudentModule entries to make them from other users. It's
|
||||
# a little hacky, but it seemed the simpler way to do this.
|
||||
self.submit_question_answer('p1', {'2_1': u'Correct'})
|
||||
self.submit_question_answer('p2', {'2_1': u'Incorrect'})
|
||||
self.submit_question_answer('p3', {'2_1': u'Correct'})
|
||||
|
||||
# Make the above submissions owned by user2
|
||||
user2 = UserFactory.create()
|
||||
problems = StudentModule.objects.filter(
|
||||
course_id=self.course.id,
|
||||
student=self.student_user
|
||||
)
|
||||
for problem in problems:
|
||||
problem.student_id = user2.id
|
||||
problem.save()
|
||||
|
||||
# Now make more submissions by our original user
|
||||
self.submit_question_answer('p1', {'2_1': u'Correct'})
|
||||
self.submit_question_answer('p2', {'2_1': u'Correct'})
|
||||
|
||||
self.assertEqual(
|
||||
grades.answer_distributions(self.course.id),
|
||||
{
|
||||
('p1', 'p1', '{}_2_1'.format(self.p1_html_id)): {
|
||||
'Correct': 2
|
||||
},
|
||||
('p2', 'p2', '{}_2_1'.format(self.p2_html_id)): {
|
||||
'Correct': 1,
|
||||
'Incorrect': 1
|
||||
},
|
||||
('p3', 'p3', '{}_2_1'.format(self.p3_html_id)): {
|
||||
'Correct': 1
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
def test_other_data_types(self):
|
||||
# We'll submit one problem, and then muck with the student_answers
|
||||
# dict inside its state to try different data types (str, int, float,
|
||||
# none)
|
||||
self.submit_question_answer('p1', {'2_1': u'Correct'})
|
||||
|
||||
# Now fetch the state entry for that problem.
|
||||
student_module = StudentModule.objects.get(
|
||||
course_id=self.course.id,
|
||||
student=self.student_user
|
||||
)
|
||||
for val in ('Correct', True, False, 0, 0.0, 1, 1.0, None):
|
||||
state = json.loads(student_module.state)
|
||||
state["student_answers"]['{}_2_1'.format(self.p1_html_id)] = val
|
||||
student_module.state = json.dumps(state)
|
||||
student_module.save()
|
||||
|
||||
self.assertEqual(
|
||||
grades.answer_distributions(self.course.id),
|
||||
{
|
||||
('p1', 'p1', '{}_2_1'.format(self.p1_html_id)): {
|
||||
str(val): 1
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
def test_missing_content(self):
|
||||
# If there's a StudentModule entry for content that no longer exists,
|
||||
# we just quietly ignore it (because we can't display a meaningful url
|
||||
# or name for it).
|
||||
self.submit_question_answer('p1', {'2_1': 'Incorrect'})
|
||||
|
||||
# Now fetch the state entry for that problem and alter it so it points
|
||||
# to a non-existent problem.
|
||||
student_module = StudentModule.objects.get(
|
||||
course_id=self.course.id,
|
||||
student=self.student_user
|
||||
)
|
||||
student_module.module_state_key = student_module.module_state_key.replace(
|
||||
name=student_module.module_state_key.name + "_fake"
|
||||
)
|
||||
student_module.save()
|
||||
|
||||
# It should be empty (ignored)
|
||||
empty_distribution = grades.answer_distributions(self.course.id)
|
||||
self.assertFalse(empty_distribution) # should be empty
|
||||
|
||||
def test_broken_state(self):
|
||||
# Missing or broken state for a problem should be skipped without
|
||||
# causing the whole answer_distribution call to explode.
|
||||
|
||||
# Submit p1
|
||||
self.submit_question_answer('p1', {'2_1': u'Correct'})
|
||||
|
||||
# Now fetch the StudentModule entry for p1 so we can corrupt its state
|
||||
prb1 = StudentModule.objects.get(
|
||||
course_id=self.course.id,
|
||||
student=self.student_user
|
||||
)
|
||||
|
||||
# Submit p2
|
||||
self.submit_question_answer('p2', {'2_1': u'Incorrect'})
|
||||
|
||||
for new_p1_state in ('{"student_answers": {}}', "invalid json!", None):
|
||||
prb1.state = new_p1_state
|
||||
prb1.save()
|
||||
|
||||
# p1 won't show up, but p2 should still work
|
||||
self.assertEqual(
|
||||
grades.answer_distributions(self.course.id),
|
||||
{
|
||||
('p2', 'p2', '{}_2_1'.format(self.p2_html_id)): {
|
||||
'Incorrect': 1
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@attr('shard_1')
|
||||
class TestConditionalContent(TestSubmittingProblems):
|
||||
"""
|
||||
|
||||
@@ -1254,8 +1254,10 @@ class ProgressPageTests(ModuleStoreTestCase):
|
||||
self.assertNotContains(resp, 'Request Certificate')
|
||||
|
||||
@patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': True})
|
||||
@patch('courseware.grades.grade', Mock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [],
|
||||
'grade_breakdown': []}))
|
||||
@patch(
|
||||
'lms.djangoapps.grades.course_grades.summary',
|
||||
Mock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [], 'grade_breakdown': []})
|
||||
)
|
||||
def test_view_certificate_link(self):
|
||||
"""
|
||||
If certificate web view is enabled then certificate web view button should appear for user who certificate is
|
||||
@@ -1315,8 +1317,10 @@ class ProgressPageTests(ModuleStoreTestCase):
|
||||
self.assertContains(resp, "creating your certificate")
|
||||
|
||||
@patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': False})
|
||||
@patch('courseware.grades.grade', Mock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [],
|
||||
'grade_breakdown': []}))
|
||||
@patch(
|
||||
'lms.djangoapps.grades.course_grades.summary',
|
||||
Mock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [], 'grade_breakdown': []})
|
||||
)
|
||||
def test_view_certificate_link_hidden(self):
|
||||
"""
|
||||
If certificate web view is disabled then certificate web view button should not appear for user who certificate
|
||||
@@ -1355,7 +1359,7 @@ class ProgressPageTests(ModuleStoreTestCase):
|
||||
)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
|
||||
@patch('courseware.grades.grade', Mock(return_value={
|
||||
@patch('lms.djangoapps.grades.course_grades.summary', Mock(return_value={
|
||||
'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [], 'grade_breakdown': []
|
||||
}))
|
||||
@ddt.data(
|
||||
@@ -1393,8 +1397,10 @@ class ProgressPageTests(ModuleStoreTestCase):
|
||||
'Request Certificate' not in resp.content)
|
||||
|
||||
@patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': True})
|
||||
@patch('courseware.grades.grade', Mock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [],
|
||||
'grade_breakdown': []}))
|
||||
@patch(
|
||||
'lms.djangoapps.grades.course_grades.summary',
|
||||
Mock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [], 'grade_breakdown': []})
|
||||
)
|
||||
def test_page_with_invalidated_certificate_with_html_view(self):
|
||||
"""
|
||||
Verify that for html certs if certificate is marked as invalidated than
|
||||
@@ -1427,8 +1433,10 @@ class ProgressPageTests(ModuleStoreTestCase):
|
||||
self.assertContains(resp, u"View Certificate")
|
||||
self.assert_invalidate_certificate(generated_certificate)
|
||||
|
||||
@patch('courseware.grades.grade', Mock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [],
|
||||
'grade_breakdown': []}))
|
||||
@patch(
|
||||
'lms.djangoapps.grades.course_grades.summary',
|
||||
Mock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [], 'grade_breakdown': []})
|
||||
)
|
||||
def test_page_with_invalidated_certificate_with_pdf(self):
|
||||
"""
|
||||
Verify that for pdf certs if certificate is marked as invalidated than
|
||||
@@ -1444,8 +1452,10 @@ class ProgressPageTests(ModuleStoreTestCase):
|
||||
self.assertContains(resp, u'Download Your Certificate')
|
||||
self.assert_invalidate_certificate(generated_certificate)
|
||||
|
||||
@patch('courseware.grades.grade', Mock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [],
|
||||
'grade_breakdown': []}))
|
||||
@patch(
|
||||
'lms.djangoapps.grades.course_grades.summary',
|
||||
Mock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [], 'grade_breakdown': []})
|
||||
)
|
||||
def test_message_for_audit_mode(self):
|
||||
""" Verify that message appears on progress page, if learner is enrolled
|
||||
in audit mode.
|
||||
@@ -1632,19 +1642,19 @@ class IsCoursePassedTests(ModuleStoreTestCase):
|
||||
# If user has not grade then false will return
|
||||
self.assertFalse(views.is_course_passed(self.course, None, self.student, self.request))
|
||||
|
||||
@patch('courseware.grades.grade', Mock(return_value={'percent': 0.9}))
|
||||
@patch('lms.djangoapps.grades.course_grades.summary', Mock(return_value={'percent': 0.9}))
|
||||
def test_user_pass_if_percent_appears_above_passing_point(self):
|
||||
# Mocking the grades.grade
|
||||
# If user has above passing marks then True will return
|
||||
self.assertTrue(views.is_course_passed(self.course, None, self.student, self.request))
|
||||
|
||||
@patch('courseware.grades.grade', Mock(return_value={'percent': 0.2}))
|
||||
@patch('lms.djangoapps.grades.course_grades.summary', Mock(return_value={'percent': 0.2}))
|
||||
def test_user_fail_if_percent_appears_below_passing_point(self):
|
||||
# Mocking the grades.grade
|
||||
# If user has below passing marks then False will return
|
||||
self.assertFalse(views.is_course_passed(self.course, None, self.student, self.request))
|
||||
|
||||
@patch('courseware.grades.grade', Mock(return_value={'percent': SUCCESS_CUTOFF}))
|
||||
@patch('lms.djangoapps.grades.course_grades.summary', Mock(return_value={'percent': SUCCESS_CUTOFF}))
|
||||
def test_user_with_passing_marks_and_achieved_marks_equal(self):
|
||||
# Mocking the grades.grade
|
||||
# If user's achieved passing marks are equal to the required passing
|
||||
@@ -1678,7 +1688,7 @@ class GenerateUserCertTests(ModuleStoreTestCase):
|
||||
self.assertEqual(resp.status_code, HttpResponseBadRequest.status_code)
|
||||
self.assertIn("Your certificate will be available when you pass the course.", resp.content)
|
||||
|
||||
@patch('courseware.grades.grade', Mock(return_value={'grade': 'Pass', 'percent': 0.75}))
|
||||
@patch('lms.djangoapps.grades.course_grades.summary', Mock(return_value={'grade': 'Pass', 'percent': 0.75}))
|
||||
@override_settings(CERT_QUEUE='certificates', LMS_SEGMENT_KEY="foobar")
|
||||
def test_user_with_passing_grade(self):
|
||||
# If user has above passing grading then json will return cert generating message and
|
||||
@@ -1710,7 +1720,7 @@ class GenerateUserCertTests(ModuleStoreTestCase):
|
||||
)
|
||||
mock_tracker.reset_mock()
|
||||
|
||||
@patch('courseware.grades.grade', Mock(return_value={'grade': 'Pass', 'percent': 0.75}))
|
||||
@patch('lms.djangoapps.grades.course_grades.summary', Mock(return_value={'grade': 'Pass', 'percent': 0.75}))
|
||||
def test_user_with_passing_existing_generating_cert(self):
|
||||
# If user has passing grade but also has existing generating cert
|
||||
# then json will return cert generating message with bad request code
|
||||
@@ -1724,7 +1734,7 @@ class GenerateUserCertTests(ModuleStoreTestCase):
|
||||
self.assertEqual(resp.status_code, HttpResponseBadRequest.status_code)
|
||||
self.assertIn("Certificate is being created.", resp.content)
|
||||
|
||||
@patch('courseware.grades.grade', Mock(return_value={'grade': 'Pass', 'percent': 0.75}))
|
||||
@patch('lms.djangoapps.grades.course_grades.summary', Mock(return_value={'grade': 'Pass', 'percent': 0.75}))
|
||||
@override_settings(CERT_QUEUE='certificates', LMS_SEGMENT_KEY="foobar")
|
||||
def test_user_with_passing_existing_downloadable_cert(self):
|
||||
# If user has already downloadable certificate
|
||||
|
||||
@@ -46,7 +46,7 @@ from openedx.core.djangoapps.models.course_details import CourseDetails
|
||||
from commerce.utils import EcommerceService
|
||||
from enrollment.api import add_enrollment
|
||||
from course_modes.models import CourseMode
|
||||
from courseware import grades
|
||||
from lms.djangoapps.grades import course_grades, progress as grades_progress
|
||||
from courseware.access import has_access, has_ccx_coach_role, _adjust_start_date_for_beta_testers
|
||||
from courseware.access_response import StartDateError
|
||||
from courseware.access_utils import in_preview_mode
|
||||
@@ -723,12 +723,12 @@ def _progress(request, course_key, student_id):
|
||||
# Fetch course blocks once for performance reasons
|
||||
course_structure = get_course_blocks(student, course.location)
|
||||
|
||||
courseware_summary = grades.progress_summary(student, course, course_structure)
|
||||
courseware_summary = grades_progress.summary(student, course, course_structure).chapters
|
||||
if courseware_summary is None:
|
||||
# This means the student didn't have access to the course (which the instructor requested)
|
||||
raise Http404
|
||||
|
||||
grade_summary = grades.grade(student, course, course_structure=course_structure)
|
||||
grade_summary = course_grades.summary(student, course, course_structure=course_structure)
|
||||
studio_url = get_studio_url(course, 'settings/grading')
|
||||
|
||||
# checking certificate generation configuration
|
||||
@@ -1123,7 +1123,7 @@ def is_course_passed(course, grade_summary=None, student=None, request=None):
|
||||
success_cutoff = min(nonzero_cutoffs) if nonzero_cutoffs else None
|
||||
|
||||
if grade_summary is None:
|
||||
grade_summary = grades.grade(student, course)
|
||||
grade_summary = course_grades.summary(student, course)
|
||||
|
||||
return success_cutoff and grade_summary['percent'] >= success_cutoff
|
||||
|
||||
|
||||
@@ -9,6 +9,7 @@ from django.contrib.auth.models import User
|
||||
from xmodule.modulestore.django import modulestore
|
||||
from milestones import api as milestones_api
|
||||
from openedx.core.lib.gating import api as gating_api
|
||||
from lms.djangoapps.grades.module_grades import get_module_score
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
@@ -64,7 +65,6 @@ def evaluate_prerequisite(course, prereq_content_key, user_id):
|
||||
|
||||
gated_content = gated_content_milestones.get(prereq_milestone['id'])
|
||||
if gated_content:
|
||||
from courseware.grades import get_module_score
|
||||
user = User.objects.get(id=user_id)
|
||||
score = get_module_score(user, course, sequential) * 100
|
||||
for milestone in gated_content:
|
||||
|
||||
@@ -4,7 +4,7 @@ Signal handlers for the gating djangoapp
|
||||
from django.dispatch import receiver
|
||||
from opaque_keys.edx.keys import CourseKey, UsageKey
|
||||
from xmodule.modulestore.django import modulestore
|
||||
from courseware.models import SCORE_CHANGED
|
||||
from lms.djangoapps.grades.signals import SCORE_CHANGED
|
||||
from gating import api as gating_api
|
||||
|
||||
|
||||
|
||||
@@ -136,7 +136,7 @@ class TestEvaluatePrerequisite(GatingTestCase, MilestonesTestCaseMixin):
|
||||
gating_api.set_required_content(self.course.id, self.seq2.location, self.seq1.location, min_score)
|
||||
self.prereq_milestone = gating_api.get_gating_milestone(self.course.id, self.seq1.location, 'fulfills')
|
||||
|
||||
@patch('courseware.grades.get_module_score')
|
||||
@patch('gating.api.get_module_score')
|
||||
@data((.5, True), (1, True), (0, False))
|
||||
@unpack
|
||||
def test_min_score_achieved(self, module_score, result, mock_module_score):
|
||||
@@ -149,7 +149,7 @@ class TestEvaluatePrerequisite(GatingTestCase, MilestonesTestCaseMixin):
|
||||
self.assertEqual(milestones_api.user_has_milestone(self.user_dict, self.prereq_milestone), result)
|
||||
|
||||
@patch('gating.api.log.warning')
|
||||
@patch('courseware.grades.get_module_score')
|
||||
@patch('gating.api.get_module_score')
|
||||
@data((.5, False), (1, True))
|
||||
@unpack
|
||||
def test_invalid_min_score(self, module_score, result, mock_module_score, mock_log):
|
||||
@@ -162,21 +162,21 @@ class TestEvaluatePrerequisite(GatingTestCase, MilestonesTestCaseMixin):
|
||||
self.assertEqual(milestones_api.user_has_milestone(self.user_dict, self.prereq_milestone), result)
|
||||
self.assertTrue(mock_log.called)
|
||||
|
||||
@patch('courseware.grades.get_module_score')
|
||||
@patch('gating.api.get_module_score')
|
||||
def test_orphaned_xblock(self, mock_module_score):
|
||||
""" Test test_orphaned_xblock """
|
||||
|
||||
evaluate_prerequisite(self.course, self.prob2.location, self.user.id)
|
||||
self.assertFalse(mock_module_score.called)
|
||||
|
||||
@patch('courseware.grades.get_module_score')
|
||||
@patch('gating.api.get_module_score')
|
||||
def test_no_prerequisites(self, mock_module_score):
|
||||
""" Test test_no_prerequisites """
|
||||
|
||||
evaluate_prerequisite(self.course, self.prob1.location, self.user.id)
|
||||
self.assertFalse(mock_module_score.called)
|
||||
|
||||
@patch('courseware.grades.get_module_score')
|
||||
@patch('gating.api.get_module_score')
|
||||
def test_no_gated_content(self, mock_module_score):
|
||||
""" Test test_no_gated_content """
|
||||
|
||||
|
||||
72
lms/djangoapps/grades/context.py
Normal file
72
lms/djangoapps/grades/context.py
Normal file
@@ -0,0 +1,72 @@
|
||||
"""
|
||||
Grading Context
|
||||
"""
|
||||
from collections import defaultdict
|
||||
from openedx.core.djangoapps.content.block_structure.api import get_course_in_cache
|
||||
|
||||
from .scores import possibly_scored
|
||||
|
||||
|
||||
def grading_context_for_course(course):
|
||||
"""
|
||||
Same as grading_context, but takes in a course object.
|
||||
"""
|
||||
course_structure = get_course_in_cache(course.id)
|
||||
return grading_context(course_structure)
|
||||
|
||||
|
||||
def grading_context(course_structure):
|
||||
"""
|
||||
This returns a dictionary with keys necessary for quickly grading
|
||||
a student. They are used by grades.grade()
|
||||
|
||||
The grading context has two keys:
|
||||
graded_sections - This contains the sections that are graded, as
|
||||
well as all possible children modules that can affect the
|
||||
grading. This allows some sections to be skipped if the student
|
||||
hasn't seen any part of it.
|
||||
|
||||
The format is a dictionary keyed by section-type. The values are
|
||||
arrays of dictionaries containing
|
||||
"section_block" : The section block
|
||||
"scored_descendant_keys" : An array of usage keys for blocks
|
||||
could possibly be in the section, for any student
|
||||
|
||||
all_graded_blocks - This contains a list of all blocks that can
|
||||
affect grading a student. This is used to efficiently fetch
|
||||
all the xmodule state for a FieldDataCache without walking
|
||||
the descriptor tree again.
|
||||
|
||||
"""
|
||||
all_graded_blocks = []
|
||||
all_graded_sections = defaultdict(list)
|
||||
|
||||
for chapter_key in course_structure.get_children(course_structure.root_block_usage_key):
|
||||
for section_key in course_structure.get_children(chapter_key):
|
||||
section = course_structure[section_key]
|
||||
scored_descendants_of_section = [section]
|
||||
if section.graded:
|
||||
for descendant_key in course_structure.post_order_traversal(
|
||||
filter_func=possibly_scored,
|
||||
start_node=section_key,
|
||||
):
|
||||
scored_descendants_of_section.append(
|
||||
course_structure[descendant_key],
|
||||
)
|
||||
|
||||
# include only those blocks that have scores, not if they are just a parent
|
||||
section_info = {
|
||||
'section_block': section,
|
||||
'scored_descendants': [
|
||||
child for child in scored_descendants_of_section
|
||||
if getattr(child, 'has_score', None)
|
||||
]
|
||||
}
|
||||
section_format = getattr(section, 'format', '')
|
||||
all_graded_sections[section_format].append(section_info)
|
||||
all_graded_blocks.extend(scored_descendants_of_section)
|
||||
|
||||
return {
|
||||
'all_graded_sections': all_graded_sections,
|
||||
'all_graded_blocks': all_graded_blocks,
|
||||
}
|
||||
256
lms/djangoapps/grades/course_grades.py
Normal file
256
lms/djangoapps/grades/course_grades.py
Normal file
@@ -0,0 +1,256 @@
|
||||
"""
|
||||
Functionality for course-level grades.
|
||||
"""
|
||||
from logging import getLogger
|
||||
from django.conf import settings
|
||||
import dogstats_wrapper as dog_stats_api
|
||||
import random
|
||||
|
||||
from opaque_keys.edx.keys import CourseKey
|
||||
from openedx.core.djangoapps.signals.signals import GRADES_UPDATED
|
||||
|
||||
from course_blocks.api import get_course_blocks
|
||||
from courseware.courses import get_course_by_id
|
||||
from courseware.model_data import ScoresClient
|
||||
from student.models import anonymous_id_for_user
|
||||
from util.db import outer_atomic
|
||||
from xmodule import graders, block_metadata_utils
|
||||
from xmodule.graders import Score
|
||||
|
||||
from .context import grading_context
|
||||
from .scores import get_score
|
||||
|
||||
|
||||
log = getLogger(__name__)
|
||||
|
||||
|
||||
def iterate_grades_for(course_or_id, students, keep_raw_scores=False):
|
||||
"""Given a course_id and an iterable of students (User), yield a tuple of:
|
||||
|
||||
(student, gradeset, err_msg) for every student enrolled in the course.
|
||||
|
||||
If an error occurred, gradeset will be an empty dict and err_msg will be an
|
||||
exception message. If there was no error, err_msg is an empty string.
|
||||
|
||||
The gradeset is a dictionary with the following fields:
|
||||
|
||||
- grade : A final letter grade.
|
||||
- percent : The final percent for the class (rounded up).
|
||||
- section_breakdown : A breakdown of each section that makes
|
||||
up the grade. (For display)
|
||||
- grade_breakdown : A breakdown of the major components that
|
||||
make up the final grade. (For display)
|
||||
- raw_scores: contains scores for every graded module
|
||||
"""
|
||||
if isinstance(course_or_id, (basestring, CourseKey)):
|
||||
course = get_course_by_id(course_or_id)
|
||||
else:
|
||||
course = course_or_id
|
||||
|
||||
for student in students:
|
||||
with dog_stats_api.timer('lms.grades.iterate_grades_for', tags=[u'action:{}'.format(course.id)]):
|
||||
try:
|
||||
gradeset = summary(student, course, keep_raw_scores)
|
||||
yield student, gradeset, ""
|
||||
except Exception as exc: # pylint: disable=broad-except
|
||||
# Keep marching on even if this student couldn't be graded for
|
||||
# some reason, but log it for future reference.
|
||||
log.exception(
|
||||
'Cannot grade student %s (%s) in course %s because of exception: %s',
|
||||
student.username,
|
||||
student.id,
|
||||
course.id,
|
||||
exc.message
|
||||
)
|
||||
yield student, {}, exc.message
|
||||
|
||||
|
||||
def summary(student, course, keep_raw_scores=False, course_structure=None):
|
||||
"""
|
||||
Returns the grade summary of the student for the given course.
|
||||
|
||||
Also sends a signal to update the minimum grade requirement status.
|
||||
"""
|
||||
grade_summary = _summary(student, course, keep_raw_scores, course_structure)
|
||||
responses = GRADES_UPDATED.send_robust(
|
||||
sender=None,
|
||||
username=student.username,
|
||||
grade_summary=grade_summary,
|
||||
course_key=course.id,
|
||||
deadline=course.end
|
||||
)
|
||||
|
||||
for receiver, response in responses:
|
||||
log.info('Signal fired when student grade is calculated. Receiver: %s. Response: %s', receiver, response)
|
||||
|
||||
return grade_summary
|
||||
|
||||
|
||||
def _summary(student, course, keep_raw_scores, course_structure=None):
|
||||
"""
|
||||
This grades a student as quickly as possible. It returns the
|
||||
output from the course grader, augmented with the final letter
|
||||
grade. The keys in the output are:
|
||||
|
||||
- course: a CourseDescriptor
|
||||
- keep_raw_scores : if True, then value for key 'raw_scores' contains scores
|
||||
for every graded module
|
||||
|
||||
More information on the format is in the docstring for CourseGrader.
|
||||
"""
|
||||
if course_structure is None:
|
||||
course_structure = get_course_blocks(student, course.location)
|
||||
grading_context_result = grading_context(course_structure)
|
||||
scorable_locations = [block.location for block in grading_context_result['all_graded_blocks']]
|
||||
|
||||
with outer_atomic():
|
||||
scores_client = ScoresClient.create_for_locations(course.id, student.id, scorable_locations)
|
||||
|
||||
# Dict of item_ids -> (earned, possible) point tuples. This *only* grabs
|
||||
# scores that were registered with the submissions API, which for the moment
|
||||
# means only openassessment (edx-ora2)
|
||||
# We need to import this here to avoid a circular dependency of the form:
|
||||
# XBlock --> submissions --> Django Rest Framework error strings -->
|
||||
# Django translation --> ... --> courseware --> submissions
|
||||
from submissions import api as sub_api # installed from the edx-submissions repository
|
||||
|
||||
with outer_atomic():
|
||||
submissions_scores = sub_api.get_scores(
|
||||
course.id.to_deprecated_string(),
|
||||
anonymous_id_for_user(student, course.id)
|
||||
)
|
||||
|
||||
totaled_scores, raw_scores = _calculate_totaled_scores(
|
||||
student, grading_context_result, submissions_scores, scores_client, keep_raw_scores
|
||||
)
|
||||
|
||||
with outer_atomic():
|
||||
# Grading policy might be overriden by a CCX, need to reset it
|
||||
course.set_grading_policy(course.grading_policy)
|
||||
grade_summary = course.grader.grade(totaled_scores, generate_random_scores=settings.GENERATE_PROFILE_SCORES)
|
||||
|
||||
# We round the grade here, to make sure that the grade is a whole percentage and
|
||||
# doesn't get displayed differently than it gets grades
|
||||
grade_summary['percent'] = round(grade_summary['percent'] * 100 + 0.05) / 100
|
||||
|
||||
letter_grade = _letter_grade(course.grade_cutoffs, grade_summary['percent'])
|
||||
grade_summary['grade'] = letter_grade
|
||||
grade_summary['totaled_scores'] = totaled_scores # make this available, eg for instructor download & debugging
|
||||
if keep_raw_scores:
|
||||
# way to get all RAW scores out to instructor
|
||||
# so grader can be double-checked
|
||||
grade_summary['raw_scores'] = raw_scores
|
||||
|
||||
return grade_summary
|
||||
|
||||
|
||||
def _calculate_totaled_scores(
|
||||
student,
|
||||
grading_context_result,
|
||||
submissions_scores,
|
||||
scores_client,
|
||||
keep_raw_scores,
|
||||
):
|
||||
"""
|
||||
Returns a tuple of totaled scores and raw scores, which can be passed to the grader.
|
||||
"""
|
||||
raw_scores = []
|
||||
totaled_scores = {}
|
||||
for section_format, sections in grading_context_result['all_graded_sections'].iteritems():
|
||||
format_scores = []
|
||||
for section_info in sections:
|
||||
section = section_info['section_block']
|
||||
section_name = block_metadata_utils.display_name_with_default(section)
|
||||
|
||||
with outer_atomic():
|
||||
# Check to
|
||||
# see if any of our locations are in the scores from the submissions
|
||||
# API. If scores exist, we have to calculate grades for this section.
|
||||
should_grade_section = any(
|
||||
unicode(descendant.location) in submissions_scores
|
||||
for descendant in section_info['scored_descendants']
|
||||
)
|
||||
|
||||
if not should_grade_section:
|
||||
should_grade_section = any(
|
||||
descendant.location in scores_client
|
||||
for descendant in section_info['scored_descendants']
|
||||
)
|
||||
|
||||
# If we haven't seen a single problem in the section, we don't have
|
||||
# to grade it at all! We can assume 0%
|
||||
if should_grade_section:
|
||||
scores = []
|
||||
|
||||
for descendant in section_info['scored_descendants']:
|
||||
|
||||
(correct, total) = get_score(
|
||||
student,
|
||||
descendant,
|
||||
scores_client,
|
||||
submissions_scores,
|
||||
)
|
||||
if correct is None and total is None:
|
||||
continue
|
||||
|
||||
if settings.GENERATE_PROFILE_SCORES: # for debugging!
|
||||
if total > 1:
|
||||
correct = random.randrange(max(total - 2, 1), total + 1)
|
||||
else:
|
||||
correct = total
|
||||
|
||||
graded = descendant.graded
|
||||
if not total > 0:
|
||||
# We simply cannot grade a problem that is 12/0, because we might need it as a percentage
|
||||
graded = False
|
||||
|
||||
scores.append(
|
||||
Score(
|
||||
correct,
|
||||
total,
|
||||
graded,
|
||||
block_metadata_utils.display_name_with_default_escaped(descendant),
|
||||
descendant.location
|
||||
)
|
||||
)
|
||||
|
||||
__, graded_total = graders.aggregate_scores(scores, section_name)
|
||||
if keep_raw_scores:
|
||||
raw_scores += scores
|
||||
else:
|
||||
graded_total = Score(0.0, 1.0, True, section_name, None)
|
||||
|
||||
# Add the graded total to totaled_scores
|
||||
if graded_total.possible > 0:
|
||||
format_scores.append(graded_total)
|
||||
else:
|
||||
log.info(
|
||||
"Unable to grade a section with a total possible score of zero. " +
|
||||
str(section.location)
|
||||
)
|
||||
|
||||
totaled_scores[section_format] = format_scores
|
||||
|
||||
return totaled_scores, raw_scores
|
||||
|
||||
|
||||
def _letter_grade(grade_cutoffs, percentage):
|
||||
"""
|
||||
Returns a letter grade as defined in grading_policy (e.g. 'A' 'B' 'C' for 6.002x) or None.
|
||||
|
||||
Arguments
|
||||
- grade_cutoffs is a dictionary mapping a grade to the lowest
|
||||
possible percentage to earn that grade.
|
||||
- percentage is the final percent across all problems in a course
|
||||
"""
|
||||
|
||||
letter_grade = None
|
||||
|
||||
# Possible grades, sorted in descending order of score
|
||||
descending_grades = sorted(grade_cutoffs, key=lambda x: grade_cutoffs[x], reverse=True)
|
||||
for possible_grade in descending_grades:
|
||||
if percentage >= grade_cutoffs[possible_grade]:
|
||||
letter_grade = possible_grade
|
||||
break
|
||||
|
||||
return letter_grade
|
||||
93
lms/djangoapps/grades/module_grades.py
Normal file
93
lms/djangoapps/grades/module_grades.py
Normal file
@@ -0,0 +1,93 @@
|
||||
"""
|
||||
Functionality for module-level grades.
|
||||
"""
|
||||
# TODO The code in this file needs to be updated to use BlockTransformers. (TNL-4448)
|
||||
# TODO The code here needs to be validated - may not be calculating correctly.
|
||||
|
||||
from django.test.client import RequestFactory
|
||||
from courseware.model_data import FieldDataCache, ScoresClient
|
||||
from courseware.module_render import get_module_for_descriptor
|
||||
from opaque_keys.edx.locator import BlockUsageLocator
|
||||
from util.module_utils import yield_dynamic_descriptor_descendants
|
||||
|
||||
|
||||
def _get_mock_request(student):
|
||||
"""
|
||||
Make a fake request because grading code expects to be able to look at
|
||||
the request. We have to attach the correct user to the request before
|
||||
grading that student.
|
||||
"""
|
||||
request = RequestFactory().get('/')
|
||||
request.user = student
|
||||
return request
|
||||
|
||||
|
||||
def _calculate_score_for_modules(user_id, course, modules):
|
||||
"""
|
||||
Calculates the cumulative score (percent) of the given modules
|
||||
"""
|
||||
|
||||
# removing branch and version from exam modules locator
|
||||
# otherwise student module would not return scores since module usage keys would not match
|
||||
modules = [m for m in modules]
|
||||
locations = [
|
||||
BlockUsageLocator(
|
||||
course_key=course.id,
|
||||
block_type=module.location.block_type,
|
||||
block_id=module.location.block_id
|
||||
)
|
||||
if isinstance(module.location, BlockUsageLocator) and module.location.version
|
||||
else module.location
|
||||
for module in modules
|
||||
]
|
||||
|
||||
scores_client = ScoresClient(course.id, user_id)
|
||||
scores_client.fetch_scores(locations)
|
||||
|
||||
# Iterate over all of the exam modules to get score percentage of user for each of them
|
||||
module_percentages = []
|
||||
ignore_categories = ['course', 'chapter', 'sequential', 'vertical', 'randomize', 'library_content']
|
||||
for index, module in enumerate(modules):
|
||||
if module.category not in ignore_categories and (module.graded or module.has_score):
|
||||
module_score = scores_client.get(locations[index])
|
||||
if module_score:
|
||||
correct = module_score.correct or 0
|
||||
total = module_score.total or 1
|
||||
module_percentages.append(correct / total)
|
||||
|
||||
return sum(module_percentages) / float(len(module_percentages)) if module_percentages else 0
|
||||
|
||||
|
||||
def get_module_score(user, course, module):
|
||||
"""
|
||||
Collects all children of the given module and calculates the cumulative
|
||||
score for this set of modules for the given user.
|
||||
|
||||
Arguments:
|
||||
user (User): The user
|
||||
course (CourseModule): The course
|
||||
module (XBlock): The module
|
||||
|
||||
Returns:
|
||||
float: The cumulative score
|
||||
"""
|
||||
def inner_get_module(descriptor):
|
||||
"""
|
||||
Delegate to get_module_for_descriptor
|
||||
"""
|
||||
field_data_cache = FieldDataCache([descriptor], course.id, user)
|
||||
return get_module_for_descriptor(
|
||||
user,
|
||||
_get_mock_request(user),
|
||||
descriptor,
|
||||
field_data_cache,
|
||||
course.id,
|
||||
course=course
|
||||
)
|
||||
|
||||
modules = yield_dynamic_descriptor_descendants(
|
||||
module,
|
||||
user.id,
|
||||
inner_get_module
|
||||
)
|
||||
return _calculate_score_for_modules(user.id, course, modules)
|
||||
159
lms/djangoapps/grades/progress.py
Normal file
159
lms/djangoapps/grades/progress.py
Normal file
@@ -0,0 +1,159 @@
|
||||
"""
|
||||
Progress Summary of a learner's course grades.
|
||||
"""
|
||||
from course_blocks.api import get_course_blocks
|
||||
from courseware.model_data import ScoresClient
|
||||
from openedx.core.lib.gating import api as gating_api
|
||||
from student.models import anonymous_id_for_user
|
||||
from util.db import outer_atomic
|
||||
from xmodule import graders, block_metadata_utils
|
||||
from xmodule.graders import Score
|
||||
|
||||
from .scores import get_score, possibly_scored
|
||||
|
||||
|
||||
class ProgressSummary(object):
|
||||
"""
|
||||
Wrapper class for the computation of a user's scores across a course.
|
||||
|
||||
Attributes
|
||||
chapters: a summary of all sections with problems in the course. It is
|
||||
organized as an array of chapters, each containing an array of sections,
|
||||
each containing an array of scores. This contains information for graded
|
||||
and ungraded problems, and is good for displaying a course summary with
|
||||
due dates, etc.
|
||||
|
||||
weighted_scores: a dictionary mapping module locations to weighted Score
|
||||
objects.
|
||||
|
||||
locations_to_children: a function mapping locations to their
|
||||
direct descendants.
|
||||
"""
|
||||
def __init__(self, chapters=None, weighted_scores=None, locations_to_children=None):
|
||||
self.chapters = chapters
|
||||
self.weighted_scores = weighted_scores
|
||||
self.locations_to_children = locations_to_children
|
||||
|
||||
def score_for_module(self, location):
|
||||
"""
|
||||
Calculate the aggregate weighted score for any location in the course.
|
||||
This method returns a tuple containing (earned_score, possible_score).
|
||||
|
||||
If the location is of 'problem' type, this method will return the
|
||||
possible and earned scores for that problem. If the location refers to a
|
||||
composite module (a vertical or section ) the scores will be the sums of
|
||||
all scored problems that are children of the chosen location.
|
||||
"""
|
||||
if location in self.weighted_scores:
|
||||
score = self.weighted_scores[location]
|
||||
return score.earned, score.possible
|
||||
children = self.locations_to_children[location]
|
||||
earned = 0.0
|
||||
possible = 0.0
|
||||
for child in children:
|
||||
child_earned, child_possible = self.score_for_module(child)
|
||||
earned += child_earned
|
||||
possible += child_possible
|
||||
return earned, possible
|
||||
|
||||
|
||||
def summary(student, course, course_structure=None):
|
||||
"""
|
||||
This pulls a summary of all problems in the course.
|
||||
|
||||
Returns
|
||||
- courseware_summary is a summary of all sections with problems in the course.
|
||||
It is organized as an array of chapters, each containing an array of sections,
|
||||
each containing an array of scores. This contains information for graded and
|
||||
ungraded problems, and is good for displaying a course summary with due dates,
|
||||
etc.
|
||||
- None if the student does not have access to load the course module.
|
||||
|
||||
Arguments:
|
||||
student: A User object for the student to grade
|
||||
course: A Descriptor containing the course to grade
|
||||
|
||||
"""
|
||||
if course_structure is None:
|
||||
course_structure = get_course_blocks(student, course.location)
|
||||
if not len(course_structure):
|
||||
return ProgressSummary()
|
||||
scorable_locations = [block_key for block_key in course_structure if possibly_scored(block_key)]
|
||||
|
||||
with outer_atomic():
|
||||
scores_client = ScoresClient.create_for_locations(course.id, student.id, scorable_locations)
|
||||
|
||||
# We need to import this here to avoid a circular dependency of the form:
|
||||
# XBlock --> submissions --> Django Rest Framework error strings -->
|
||||
# Django translation --> ... --> courseware --> submissions
|
||||
from submissions import api as sub_api # installed from the edx-submissions repository
|
||||
with outer_atomic():
|
||||
submissions_scores = sub_api.get_scores(
|
||||
unicode(course.id), anonymous_id_for_user(student, course.id)
|
||||
)
|
||||
|
||||
# Check for gated content
|
||||
gated_content = gating_api.get_gated_content(course, student)
|
||||
|
||||
chapters = []
|
||||
locations_to_weighted_scores = {}
|
||||
|
||||
for chapter_key in course_structure.get_children(course_structure.root_block_usage_key):
|
||||
chapter = course_structure[chapter_key]
|
||||
sections = []
|
||||
for section_key in course_structure.get_children(chapter_key):
|
||||
if unicode(section_key) in gated_content:
|
||||
continue
|
||||
|
||||
section = course_structure[section_key]
|
||||
|
||||
graded = getattr(section, 'graded', False)
|
||||
scores = []
|
||||
|
||||
for descendant_key in course_structure.post_order_traversal(
|
||||
filter_func=possibly_scored,
|
||||
start_node=section_key,
|
||||
):
|
||||
descendant = course_structure[descendant_key]
|
||||
|
||||
(correct, total) = get_score(
|
||||
student,
|
||||
descendant,
|
||||
scores_client,
|
||||
submissions_scores,
|
||||
)
|
||||
if correct is None and total is None:
|
||||
continue
|
||||
|
||||
weighted_location_score = Score(
|
||||
correct,
|
||||
total,
|
||||
graded,
|
||||
block_metadata_utils.display_name_with_default_escaped(descendant),
|
||||
descendant.location
|
||||
)
|
||||
|
||||
scores.append(weighted_location_score)
|
||||
locations_to_weighted_scores[descendant.location] = weighted_location_score
|
||||
|
||||
escaped_section_name = block_metadata_utils.display_name_with_default_escaped(section)
|
||||
section_total, _ = graders.aggregate_scores(scores, escaped_section_name)
|
||||
|
||||
sections.append({
|
||||
'display_name': escaped_section_name,
|
||||
'url_name': block_metadata_utils.url_name_for_block(section),
|
||||
'scores': scores,
|
||||
'section_total': section_total,
|
||||
'format': getattr(section, 'format', ''),
|
||||
'due': getattr(section, 'due', None),
|
||||
'graded': graded,
|
||||
})
|
||||
|
||||
chapters.append({
|
||||
'course': course.display_name_with_default_escaped,
|
||||
'display_name': block_metadata_utils.display_name_with_default_escaped(chapter),
|
||||
'url_name': block_metadata_utils.url_name_for_block(chapter),
|
||||
'sections': sections
|
||||
})
|
||||
|
||||
return ProgressSummary(chapters, locations_to_weighted_scores, course_structure.get_children)
|
||||
89
lms/djangoapps/grades/scores.py
Normal file
89
lms/djangoapps/grades/scores.py
Normal file
@@ -0,0 +1,89 @@
|
||||
"""
|
||||
Functionality for problem scores.
|
||||
"""
|
||||
from openedx.core.lib.cache_utils import memoized
|
||||
from xblock.core import XBlock
|
||||
from .transformer import GradesTransformer
|
||||
|
||||
|
||||
@memoized
|
||||
def block_types_with_scores():
|
||||
"""
|
||||
Returns the block types that could have a score.
|
||||
|
||||
Something might be a scored item if it is capable of storing a score
|
||||
(has_score=True). We also have to include anything that can have children,
|
||||
since those children might have scores. We can avoid things like Videos,
|
||||
which have state but cannot ever impact someone's grade.
|
||||
"""
|
||||
return frozenset(
|
||||
cat for (cat, xblock_class) in XBlock.load_classes() if (
|
||||
getattr(xblock_class, 'has_score', False) or getattr(xblock_class, 'has_children', False)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def possibly_scored(usage_key):
|
||||
"""
|
||||
Returns whether the given block could impact grading (i.e. scored, or has children).
|
||||
"""
|
||||
return usage_key.block_type in block_types_with_scores()
|
||||
|
||||
|
||||
def weighted_score(raw_correct, raw_total, weight):
|
||||
"""Return a tuple that represents the weighted (correct, total) score."""
|
||||
# If there is no weighting, or weighting can't be applied, return input.
|
||||
if weight is None or raw_total == 0:
|
||||
return (raw_correct, raw_total)
|
||||
return (float(raw_correct) * weight / raw_total, float(weight))
|
||||
|
||||
|
||||
def get_score(user, block, scores_client, submissions_scores_cache):
|
||||
"""
|
||||
Return the score for a user on a problem, as a tuple (correct, total).
|
||||
e.g. (5,7) if you got 5 out of 7 points.
|
||||
|
||||
If this problem doesn't have a score, or we couldn't load it, returns (None,
|
||||
None).
|
||||
|
||||
user: a Student object
|
||||
block: a BlockStructure's BlockData object
|
||||
scores_client: an initialized ScoresClient
|
||||
submissions_scores_cache: A dict of location names to (earned, possible) point tuples.
|
||||
If an entry is found in this cache, it takes precedence.
|
||||
"""
|
||||
submissions_scores_cache = submissions_scores_cache or {}
|
||||
|
||||
if not user.is_authenticated():
|
||||
return (None, None)
|
||||
|
||||
location_url = unicode(block.location)
|
||||
if location_url in submissions_scores_cache:
|
||||
return submissions_scores_cache[location_url]
|
||||
|
||||
if not getattr(block, 'has_score', False):
|
||||
# These are not problems, and do not have a score
|
||||
return (None, None)
|
||||
|
||||
# Check the score that comes from the ScoresClient (out of CSM).
|
||||
# If an entry exists and has a total associated with it, we trust that
|
||||
# value. This is important for cases where a student might have seen an
|
||||
# older version of the problem -- they're still graded on what was possible
|
||||
# when they tried the problem, not what it's worth now.
|
||||
score = scores_client.get(block.location)
|
||||
if score and score.total is not None:
|
||||
# We have a valid score, just use it.
|
||||
correct = score.correct if score.correct is not None else 0.0
|
||||
total = score.total
|
||||
else:
|
||||
# This means we don't have a valid score entry and we don't have a
|
||||
# cached_max_score on hand. We know they've earned 0.0 points on this.
|
||||
correct = 0.0
|
||||
total = block.transformer_data[GradesTransformer].max_score
|
||||
|
||||
# Problem may be an error module (if something in the problem builder failed)
|
||||
# In which case total might be None
|
||||
if total is None:
|
||||
return (None, None)
|
||||
|
||||
return weighted_score(correct, total, block.weight)
|
||||
108
lms/djangoapps/grades/signals.py
Normal file
108
lms/djangoapps/grades/signals.py
Normal file
@@ -0,0 +1,108 @@
|
||||
"""
|
||||
Grades related signals.
|
||||
"""
|
||||
from django.dispatch import receiver, Signal
|
||||
from logging import getLogger
|
||||
from student.models import user_by_anonymous_id
|
||||
from submissions.models import score_set, score_reset
|
||||
|
||||
|
||||
log = getLogger(__name__)
|
||||
|
||||
|
||||
# Signal that indicates that a user's score for a problem has been updated.
|
||||
# This signal is generated when a scoring event occurs either within the core
|
||||
# platform or in the Submissions module. Note that this signal will be triggered
|
||||
# regardless of the new and previous values of the score (i.e. it may be the
|
||||
# case that this signal is generated when a user re-attempts a problem but
|
||||
# receives the same score).
|
||||
SCORE_CHANGED = Signal(
|
||||
providing_args=[
|
||||
'points_possible', # Maximum score available for the exercise
|
||||
'points_earned', # Score obtained by the user
|
||||
'user_id', # Integer User ID
|
||||
'course_id', # Unicode string representing the course
|
||||
'usage_id' # Unicode string indicating the courseware instance
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
@receiver(score_set)
|
||||
def submissions_score_set_handler(sender, **kwargs): # pylint: disable=unused-argument
|
||||
"""
|
||||
Consume the score_set signal defined in the Submissions API, and convert it
|
||||
to a SCORE_CHANGED signal defined in this module. Converts the unicode keys
|
||||
for user, course and item into the standard representation for the
|
||||
SCORE_CHANGED signal.
|
||||
|
||||
This method expects that the kwargs dictionary will contain the following
|
||||
entries (See the definition of score_set):
|
||||
- 'points_possible': integer,
|
||||
- 'points_earned': integer,
|
||||
- 'anonymous_user_id': unicode,
|
||||
- 'course_id': unicode,
|
||||
- 'item_id': unicode
|
||||
"""
|
||||
points_possible = kwargs.get('points_possible', None)
|
||||
points_earned = kwargs.get('points_earned', None)
|
||||
course_id = kwargs.get('course_id', None)
|
||||
usage_id = kwargs.get('item_id', None)
|
||||
user = None
|
||||
if 'anonymous_user_id' in kwargs:
|
||||
user = user_by_anonymous_id(kwargs.get('anonymous_user_id'))
|
||||
|
||||
# If any of the kwargs were missing, at least one of the following values
|
||||
# will be None.
|
||||
if all((user, points_possible, points_earned, course_id, usage_id)):
|
||||
SCORE_CHANGED.send(
|
||||
sender=None,
|
||||
points_possible=points_possible,
|
||||
points_earned=points_earned,
|
||||
user_id=user.id,
|
||||
course_id=course_id,
|
||||
usage_id=usage_id
|
||||
)
|
||||
else:
|
||||
log.exception(
|
||||
u"Failed to process score_set signal from Submissions API. "
|
||||
"points_possible: %s, points_earned: %s, user: %s, course_id: %s, "
|
||||
"usage_id: %s", points_possible, points_earned, user, course_id, usage_id
|
||||
)
|
||||
|
||||
|
||||
@receiver(score_reset)
|
||||
def submissions_score_reset_handler(sender, **kwargs): # pylint: disable=unused-argument
|
||||
"""
|
||||
Consume the score_reset signal defined in the Submissions API, and convert
|
||||
it to a SCORE_CHANGED signal indicating that the score has been set to 0/0.
|
||||
Converts the unicode keys for user, course and item into the standard
|
||||
representation for the SCORE_CHANGED signal.
|
||||
|
||||
This method expects that the kwargs dictionary will contain the following
|
||||
entries (See the definition of score_reset):
|
||||
- 'anonymous_user_id': unicode,
|
||||
- 'course_id': unicode,
|
||||
- 'item_id': unicode
|
||||
"""
|
||||
course_id = kwargs.get('course_id', None)
|
||||
usage_id = kwargs.get('item_id', None)
|
||||
user = None
|
||||
if 'anonymous_user_id' in kwargs:
|
||||
user = user_by_anonymous_id(kwargs.get('anonymous_user_id'))
|
||||
|
||||
# If any of the kwargs were missing, at least one of the following values
|
||||
# will be None.
|
||||
if all((user, course_id, usage_id)):
|
||||
SCORE_CHANGED.send(
|
||||
sender=None,
|
||||
points_possible=0,
|
||||
points_earned=0,
|
||||
user_id=user.id,
|
||||
course_id=course_id,
|
||||
usage_id=usage_id
|
||||
)
|
||||
else:
|
||||
log.exception(
|
||||
u"Failed to process score_reset signal from Submissions API. "
|
||||
"user: %s, course_id: %s, usage_id: %s", user, course_id, usage_id
|
||||
)
|
||||
0
lms/djangoapps/grades/tests/__init__.py
Normal file
0
lms/djangoapps/grades/tests/__init__.py
Normal file
@@ -3,19 +3,12 @@ Test grade calculation.
|
||||
"""
|
||||
from django.http import Http404
|
||||
from django.test import TestCase
|
||||
from django.test.client import RequestFactory
|
||||
|
||||
from mock import patch, MagicMock
|
||||
from nose.plugins.attrib import attr
|
||||
from opaque_keys.edx.locations import SlashSeparatedCourseKey
|
||||
from opaque_keys.edx.locator import CourseLocator, BlockUsageLocator
|
||||
|
||||
from courseware.grades import (
|
||||
grade,
|
||||
iterate_grades_for,
|
||||
ProgressSummary,
|
||||
get_module_score
|
||||
)
|
||||
from courseware.module_render import get_module
|
||||
from courseware.model_data import FieldDataCache, set_score
|
||||
from courseware.tests.helpers import (
|
||||
@@ -28,6 +21,11 @@ from student.models import CourseEnrollment
|
||||
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
|
||||
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
|
||||
|
||||
from .. import course_grades
|
||||
from ..course_grades import summary as grades_summary
|
||||
from ..module_grades import get_module_score
|
||||
from ..progress import ProgressSummary
|
||||
|
||||
|
||||
def _grade_with_errors(student, course, keep_raw_scores=False):
|
||||
"""This fake grade method will throw exceptions for student3 and
|
||||
@@ -40,7 +38,7 @@ def _grade_with_errors(student, course, keep_raw_scores=False):
|
||||
if student.username in ['student3', 'student4']:
|
||||
raise Exception("I don't like {}".format(student.username))
|
||||
|
||||
return grade(student, course, keep_raw_scores=keep_raw_scores)
|
||||
return grades_summary(student, course, keep_raw_scores=keep_raw_scores)
|
||||
|
||||
|
||||
@attr('shard_1')
|
||||
@@ -76,7 +74,7 @@ class TestGradeIteration(SharedModuleStoreTestCase):
|
||||
def test_empty_student_list(self):
|
||||
"""If we don't pass in any students, it should return a zero-length
|
||||
iterator, but it shouldn't error."""
|
||||
gradeset_results = list(iterate_grades_for(self.course.id, []))
|
||||
gradeset_results = list(course_grades.iterate_grades_for(self.course.id, []))
|
||||
self.assertEqual(gradeset_results, [])
|
||||
|
||||
def test_nonexistent_course(self):
|
||||
@@ -84,7 +82,7 @@ class TestGradeIteration(SharedModuleStoreTestCase):
|
||||
should be raised. This is a horrible crossing of abstraction boundaries
|
||||
and should be fixed, but for now we're just testing the behavior. :-("""
|
||||
with self.assertRaises(Http404):
|
||||
gradeset_results = iterate_grades_for(SlashSeparatedCourseKey("I", "dont", "exist"), [])
|
||||
gradeset_results = course_grades.iterate_grades_for(SlashSeparatedCourseKey("I", "dont", "exist"), [])
|
||||
gradeset_results.next()
|
||||
|
||||
def test_all_empty_grades(self):
|
||||
@@ -95,7 +93,7 @@ class TestGradeIteration(SharedModuleStoreTestCase):
|
||||
self.assertIsNone(gradeset['grade'])
|
||||
self.assertEqual(gradeset['percent'], 0.0)
|
||||
|
||||
@patch('courseware.grades.grade', _grade_with_errors)
|
||||
@patch('lms.djangoapps.grades.course_grades.summary', _grade_with_errors)
|
||||
def test_grading_exception(self):
|
||||
"""Test that we correctly capture exception messages that bubble up from
|
||||
grading. Note that we only see errors at this level if the grading
|
||||
@@ -136,7 +134,7 @@ class TestGradeIteration(SharedModuleStoreTestCase):
|
||||
students_to_gradesets = {}
|
||||
students_to_errors = {}
|
||||
|
||||
for student, gradeset, err_msg in iterate_grades_for(course_id, students):
|
||||
for student, gradeset, err_msg in course_grades.iterate_grades_for(course_id, students):
|
||||
students_to_gradesets[student] = gradeset
|
||||
if err_msg:
|
||||
students_to_errors[student] = err_msg
|
||||
@@ -144,30 +142,6 @@ class TestGradeIteration(SharedModuleStoreTestCase):
|
||||
return students_to_gradesets, students_to_errors
|
||||
|
||||
|
||||
class TestFieldDataCacheScorableLocations(SharedModuleStoreTestCase):
|
||||
"""
|
||||
Make sure we can filter the locations we pull back student state for via
|
||||
the FieldDataCache.
|
||||
"""
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
super(TestFieldDataCacheScorableLocations, cls).setUpClass()
|
||||
cls.course = CourseFactory.create()
|
||||
chapter = ItemFactory.create(category='chapter', parent=cls.course)
|
||||
sequential = ItemFactory.create(category='sequential', parent=chapter)
|
||||
vertical = ItemFactory.create(category='vertical', parent=sequential)
|
||||
ItemFactory.create(category='video', parent=vertical)
|
||||
ItemFactory.create(category='html', parent=vertical)
|
||||
ItemFactory.create(category='discussion', parent=vertical)
|
||||
ItemFactory.create(category='problem', parent=vertical)
|
||||
|
||||
def setUp(self):
|
||||
super(TestFieldDataCacheScorableLocations, self).setUp()
|
||||
self.student = UserFactory.create()
|
||||
|
||||
CourseEnrollment.enroll(self.student, self.course.id)
|
||||
|
||||
|
||||
class TestProgressSummary(TestCase):
|
||||
"""
|
||||
Test the method that calculates the score for a given block based on the
|
||||
@@ -5,7 +5,8 @@ Tests for the score change signals defined in the courseware models module.
|
||||
from django.test import TestCase
|
||||
from mock import patch, MagicMock
|
||||
|
||||
from courseware.models import submissions_score_set_handler, submissions_score_reset_handler
|
||||
from ..signals import submissions_score_set_handler, submissions_score_reset_handler
|
||||
|
||||
|
||||
SUBMISSION_SET_KWARGS = {
|
||||
'points_possible': 10,
|
||||
@@ -15,6 +16,7 @@ SUBMISSION_SET_KWARGS = {
|
||||
'item_id': 'i4x://org/course/usage/123456'
|
||||
}
|
||||
|
||||
|
||||
SUBMISSION_RESET_KWARGS = {
|
||||
'anonymous_user_id': 'anonymous_id',
|
||||
'course_id': 'CourseID',
|
||||
@@ -35,10 +37,10 @@ class SubmissionSignalRelayTest(TestCase):
|
||||
Configure mocks for all the dependencies of the render method
|
||||
"""
|
||||
super(SubmissionSignalRelayTest, self).setUp()
|
||||
self.signal_mock = self.setup_patch('courseware.models.SCORE_CHANGED.send', None)
|
||||
self.signal_mock = self.setup_patch('lms.djangoapps.grades.signals.SCORE_CHANGED.send', None)
|
||||
self.user_mock = MagicMock()
|
||||
self.user_mock.id = 42
|
||||
self.get_user_mock = self.setup_patch('courseware.models.user_by_anonymous_id', self.user_mock)
|
||||
self.get_user_mock = self.setup_patch('lms.djangoapps.grades.signals.user_by_anonymous_id', self.user_mock)
|
||||
|
||||
def setup_patch(self, function_name, return_value):
|
||||
"""
|
||||
@@ -50,15 +52,6 @@ class SubmissionSignalRelayTest(TestCase):
|
||||
self.addCleanup(new_patch.stop)
|
||||
return mock
|
||||
|
||||
def setup_patch_with_mock(self, function_name, mock):
|
||||
"""
|
||||
Patch a function with a given mock
|
||||
"""
|
||||
new_patch = patch(function_name, new=mock)
|
||||
new_patch.start()
|
||||
self.addCleanup(new_patch.stop)
|
||||
return mock
|
||||
|
||||
def test_score_set_signal_handler(self):
|
||||
"""
|
||||
Ensure that, on receipt of a score_set signal from the Submissions API,
|
||||
@@ -103,7 +96,7 @@ class SubmissionSignalRelayTest(TestCase):
|
||||
that has an invalid user ID, the courseware model does not generate a
|
||||
signal.
|
||||
"""
|
||||
self.get_user_mock = self.setup_patch('courseware.models.user_by_anonymous_id', None)
|
||||
self.get_user_mock = self.setup_patch('lms.djangoapps.grades.signals.user_by_anonymous_id', None)
|
||||
submissions_score_set_handler(None, **SUBMISSION_SET_KWARGS)
|
||||
self.signal_mock.assert_not_called()
|
||||
|
||||
@@ -152,6 +145,6 @@ class SubmissionSignalRelayTest(TestCase):
|
||||
that has an invalid user ID, the courseware model does not generate a
|
||||
signal.
|
||||
"""
|
||||
self.get_user_mock = self.setup_patch('courseware.models.user_by_anonymous_id', None)
|
||||
self.get_user_mock = self.setup_patch('lms.djangoapps.grades.signals.user_by_anonymous_id', None)
|
||||
submissions_score_reset_handler(None, **SUBMISSION_RESET_KWARGS)
|
||||
self.signal_mock.assert_not_called()
|
||||
@@ -13,7 +13,7 @@ from xmodule.modulestore.tests.factories import check_mongo_calls
|
||||
from lms.djangoapps.course_blocks.api import get_course_blocks
|
||||
from lms.djangoapps.course_blocks.transformers.tests.helpers import CourseStructureTestCase
|
||||
from openedx.core.djangoapps.content.block_structure.api import get_cache
|
||||
from ..transformers.grades import GradesTransformer
|
||||
from ..transformer import GradesTransformer
|
||||
|
||||
|
||||
class GradesTransformerTestCase(CourseStructureTestCase):
|
||||
15
lms/djangoapps/grades/tests/utils.py
Normal file
15
lms/djangoapps/grades/tests/utils.py
Normal file
@@ -0,0 +1,15 @@
|
||||
"""
|
||||
Utilities for grades related tests
|
||||
"""
|
||||
from contextlib import contextmanager
|
||||
from mock import patch
|
||||
|
||||
|
||||
@contextmanager
|
||||
def mock_passing_grade(grade_pass='Pass', percent=0.75):
|
||||
"""
|
||||
Mock the grading function to always return a passing grade.
|
||||
"""
|
||||
with patch('lms.djangoapps.grades.course_grades.summary') as mock_grade:
|
||||
mock_grade.return_value = {'grade': grade_pass, 'percent': percent}
|
||||
yield
|
||||
@@ -3,10 +3,10 @@ Grades Transformer
|
||||
"""
|
||||
from django.test.client import RequestFactory
|
||||
|
||||
from courseware.model_data import FieldDataCache
|
||||
from courseware.module_render import get_module_for_descriptor
|
||||
from openedx.core.lib.block_structure.transformer import BlockStructureTransformer
|
||||
from openedx.core.djangoapps.util.user_utils import SystemUser
|
||||
from .. import module_render
|
||||
from courseware.model_data import FieldDataCache
|
||||
|
||||
|
||||
class GradesTransformer(BlockStructureTransformer):
|
||||
@@ -98,5 +98,5 @@ class GradesTransformer(BlockStructureTransformer):
|
||||
for block_locator in block_structure.post_order_traversal():
|
||||
block = block_structure.get_xblock(block_locator)
|
||||
if getattr(block, 'has_score', False):
|
||||
module = module_render.get_module_for_descriptor(user, request, block, cache, course_key)
|
||||
module = get_module_for_descriptor(user, request, block, cache, course_key)
|
||||
yield module
|
||||
@@ -10,9 +10,10 @@ import json
|
||||
import time
|
||||
|
||||
from json import JSONEncoder
|
||||
from courseware import grades, models
|
||||
from courseware import models
|
||||
from courseware.courses import get_course_by_id
|
||||
from django.contrib.auth.models import User
|
||||
from lms.djangoapps.grades import course_grades
|
||||
from opaque_keys import OpaqueKey
|
||||
from opaque_keys.edx.keys import UsageKey
|
||||
from xmodule.graders import Score
|
||||
@@ -50,7 +51,7 @@ def offline_grade_calculation(course_key):
|
||||
request.user = student
|
||||
request.session = {}
|
||||
|
||||
gradeset = grades.grade(student, course, keep_raw_scores=True)
|
||||
gradeset = course_grades.summary(student, course, keep_raw_scores=True)
|
||||
# Convert Score namedtuples to dicts:
|
||||
totaled_scores = gradeset['totaled_scores']
|
||||
for section in totaled_scores:
|
||||
@@ -89,7 +90,7 @@ def student_grades(student, request, course, keep_raw_scores=False, use_offline=
|
||||
as use_offline. If use_offline is True then this will look for an offline computed gradeset in the DB.
|
||||
'''
|
||||
if not use_offline:
|
||||
return grades.grade(student, course, keep_raw_scores=keep_raw_scores)
|
||||
return course_grades.summary(student, course, keep_raw_scores=keep_raw_scores)
|
||||
|
||||
try:
|
||||
ocg = models.OfflineComputedGrade.objects.get(user=student, course_id=course.id)
|
||||
|
||||
@@ -57,7 +57,7 @@ class TestOfflineGradeCalc(ModuleStoreTestCase):
|
||||
self.user = UserFactory.create()
|
||||
CourseEnrollment.enroll(self.user, self.course.id)
|
||||
|
||||
patcher = patch('courseware.grades.grade', new=mock_grade)
|
||||
patcher = patch('lms.djangoapps.grades.course_grades.summary', new=mock_grade)
|
||||
patcher.start()
|
||||
self.addCleanup(patcher.stop)
|
||||
|
||||
@@ -102,6 +102,6 @@ class TestOfflineGradeCalc(ModuleStoreTestCase):
|
||||
def test_student_grades(self):
|
||||
""" Test that the data returned by student_grades() and grades.grade() match """
|
||||
offline_grade_calculation(self.course.id)
|
||||
with patch('courseware.grades.grade', side_effect=AssertionError('Should not re-grade')):
|
||||
with patch('lms.djangoapps.grades.course_grades.summary', side_effect=AssertionError('Should not re-grade')):
|
||||
result = student_grades(self.user, None, self.course, use_offline=True)
|
||||
self.assertEqual(result, mock_grade(self.user, self.course))
|
||||
|
||||
@@ -24,7 +24,7 @@ from courseware.models import StudentModule
|
||||
from certificates.models import GeneratedCertificate
|
||||
from django.db.models import Count
|
||||
from certificates.models import CertificateStatuses
|
||||
from courseware.grades import grading_context_for_course
|
||||
from grades.context import grading_context_for_course
|
||||
|
||||
|
||||
STUDENT_FEATURES = ('id', 'username', 'first_name', 'last_name', 'is_staff', 'email')
|
||||
|
||||
@@ -46,7 +46,7 @@ from certificates.models import (
|
||||
)
|
||||
from certificates.api import generate_user_certificates
|
||||
from courseware.courses import get_course_by_id, get_problems_in_section
|
||||
from courseware.grades import iterate_grades_for
|
||||
from grades.course_grades import iterate_grades_for
|
||||
from courseware.models import StudentModule
|
||||
from courseware.model_data import DjangoKeyValueStore, FieldDataCache
|
||||
from courseware.module_render import get_module_for_descriptor_internal
|
||||
|
||||
@@ -7,8 +7,8 @@ from django.contrib.auth.models import User
|
||||
from django.dispatch import receiver
|
||||
import logging
|
||||
|
||||
from courseware.grades import get_weighted_scores
|
||||
from courseware.models import SCORE_CHANGED
|
||||
from lms.djangoapps.grades import progress
|
||||
from lms.djangoapps.grades.signals import SCORE_CHANGED
|
||||
from lms import CELERY_APP
|
||||
from lti_provider.models import GradedAssignment
|
||||
import lti_provider.outcomes as outcomes
|
||||
@@ -23,7 +23,7 @@ log = logging.getLogger("edx.lti_provider")
|
||||
def score_changed_handler(sender, **kwargs): # pylint: disable=unused-argument
|
||||
"""
|
||||
Consume signals that indicate score changes. See the definition of
|
||||
courseware.models.SCORE_CHANGED for a description of the signal.
|
||||
SCORE_CHANGED for a description of the signal.
|
||||
"""
|
||||
points_possible = kwargs.get('points_possible', None)
|
||||
points_earned = kwargs.get('points_earned', None)
|
||||
@@ -109,7 +109,7 @@ def send_composite_outcome(user_id, course_id, assignment_id, version):
|
||||
mapped_usage_key = assignment.usage_key.map_into_course(course_key)
|
||||
user = User.objects.get(id=user_id)
|
||||
course = modulestore().get_course(course_key, depth=0)
|
||||
progress_summary = get_weighted_scores(user, course)
|
||||
progress_summary = progress.summary(user, course)
|
||||
earned, possible = progress_summary.score_for_module(mapped_usage_key)
|
||||
if possible == 0:
|
||||
weighted_score = 0
|
||||
|
||||
@@ -101,7 +101,7 @@ class SendCompositeOutcomeTest(BaseOutcomeTest):
|
||||
)
|
||||
self.weighted_scores = MagicMock()
|
||||
self.weighted_scores_mock = self.setup_patch(
|
||||
'lti_provider.tasks.get_weighted_scores', self.weighted_scores
|
||||
'lti_provider.tasks.progress.summary', self.weighted_scores
|
||||
)
|
||||
self.module_store = MagicMock()
|
||||
self.module_store.get_item = MagicMock(return_value=self.descriptor)
|
||||
|
||||
@@ -25,6 +25,7 @@ from courseware.access_response import (
|
||||
VisibilityError,
|
||||
)
|
||||
from course_modes.models import CourseMode
|
||||
from lms.djangoapps.grades.tests.utils import mock_passing_grade
|
||||
from openedx.core.lib.courses import course_image_url
|
||||
from student.models import CourseEnrollment
|
||||
from util.milestones_helpers import set_prerequisite_courses
|
||||
@@ -247,8 +248,7 @@ class TestUserEnrollmentApi(UrlResetMixin, MobileAPITestCase, MobileAuthUserTest
|
||||
self.course.cert_html_view_enabled = True
|
||||
self.store.update_item(self.course, self.user.id)
|
||||
|
||||
with patch('courseware.grades.grade') as mock_grade:
|
||||
mock_grade.return_value = {'grade': 'Pass', 'percent': 0.75}
|
||||
with mock_passing_grade():
|
||||
generate_user_certificates(self.user, self.course.id)
|
||||
|
||||
response = self.api_response()
|
||||
|
||||
@@ -1890,6 +1890,7 @@ INSTALLED_APPS = (
|
||||
'openedx.core.djangoapps.course_groups',
|
||||
'bulk_email',
|
||||
'branding',
|
||||
'grades',
|
||||
|
||||
# Student support tools
|
||||
'support',
|
||||
|
||||
@@ -5,7 +5,7 @@ This module contains all signals.
|
||||
from django.dispatch import Signal
|
||||
|
||||
|
||||
# Signal that fires when a user is graded (in lms/courseware/grades.py)
|
||||
# Signal that fires when a user is graded (in lms/grades/course_grades.py)
|
||||
GRADES_UPDATED = Signal(providing_args=["username", "grade_summary", "course_key", "deadline"])
|
||||
|
||||
# Signal that fires when a user is awarded a certificate in a course (in the certificates django app)
|
||||
|
||||
2
setup.py
2
setup.py
@@ -52,7 +52,7 @@ setup(
|
||||
"hidden_content = lms.djangoapps.course_blocks.transformers.hidden_content:HiddenContentTransformer",
|
||||
"course_blocks_api = lms.djangoapps.course_api.blocks.transformers.blocks_api:BlocksAPITransformer",
|
||||
"proctored_exam = lms.djangoapps.course_api.blocks.transformers.proctored_exam:ProctoredExamTransformer",
|
||||
"grades = lms.djangoapps.courseware.transformers.grades:GradesTransformer",
|
||||
"grades = lms.djangoapps.grades.transformer:GradesTransformer",
|
||||
],
|
||||
}
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user