diff --git a/cms/djangoapps/contentstore/tests/test_transcripts_utils.py b/cms/djangoapps/contentstore/tests/test_transcripts_utils.py index 1f2cdef395..3cdf01e543 100644 --- a/cms/djangoapps/contentstore/tests/test_transcripts_utils.py +++ b/cms/djangoapps/contentstore/tests/test_transcripts_utils.py @@ -9,7 +9,6 @@ from mock import patch, Mock from django.test.utils import override_settings from django.conf import settings from django.utils import translation -from django.utils.crypto import get_random_string from nose.plugins.skip import SkipTest @@ -231,17 +230,6 @@ class TestDownloadYoutubeSubs(SharedModuleStoreTestCase): self.assertEqual(html5_ids[2], 'baz.1.4') self.assertEqual(html5_ids[3], 'foo') - def test_html5_id_length(self): - """ - Test that html5_id is parsed with length less than 255, as html5 ids are - used as name for transcript objects and ultimately as filename while creating - file for transcript at the time of exporting a course. - Filename can't be longer than 255 characters. - 150 chars is agreed length. - """ - html5_ids = transcripts_utils.get_html5_ids([get_random_string(255)]) - self.assertEqual(len(html5_ids[0]), 150) - @patch('xmodule.video_module.transcripts_utils.requests.get') def test_fail_downloading_subs(self, mock_get): diff --git a/cms/static/js/spec/video/transcripts/utils_spec.js b/cms/static/js/spec/video/transcripts/utils_spec.js index b5f448989d..f9ca4e6ae9 100644 --- a/cms/static/js/spec/video/transcripts/utils_spec.js +++ b/cms/static/js/spec/video/transcripts/utils_spec.js @@ -215,42 +215,6 @@ function($, _, Utils, _str) { }); }); }); - - describe('Too long arguments ', function() { - var longFileName = (function() { - var text = ''; - var possibleChars = 'abcdefghijklmnopqrstuvwxyz'; - /* eslint vars-on-top: 0 */ - for (var i = 0; i < 255; i++) { - text += possibleChars.charAt(Math.floor(Math.random() * possibleChars.length)); - } - return text; - }()), - html5LongUrls = (function(videoName) { - var links = [ - 'http://somelink.com/%s?param=1¶m=2#hash', - 'http://somelink.com/%s#hash', - 'http://somelink.com/%s?param=1¶m=2', - 'http://somelink.com/%s', - 'ftp://somelink.com/%s', - 'https://somelink.com/%s', - 'https://somelink.com/sub/sub/%s', - 'http://cdn.somecdn.net/v/%s', - 'somelink.com/%s', - '%s' - ]; - return $.map(links, function(link) { - return _str.sprintf(link, videoName); - }); - }(longFileName)); - - $.each(html5LongUrls, function(index, link) { - it(link, function() { - var result = Utils.parseHTML5Link(link); - expect(result.video.length).toBe(150); - }); - }); - }); }); it('Method: getYoutubeLink', function() { diff --git a/cms/static/js/views/video/transcripts/utils.js b/cms/static/js/views/video/transcripts/utils.js index 4c2d74fc83..4945d87801 100644 --- a/cms/static/js/views/video/transcripts/utils.js +++ b/cms/static/js/views/video/transcripts/utils.js @@ -110,7 +110,6 @@ define(['jquery', 'underscore', 'jquery.ajaxQueue'], function($) { */ var _videoLinkParser = (function() { var cache = {}; - var maxVideoNameLength = 150; return function(url) { if (typeof url !== 'string') { @@ -130,10 +129,7 @@ define(['jquery', 'underscore', 'jquery.ajaxQueue'], function($) { match = link.pathname.match(/\/{1}([^\/]+)\.([^\/]+)$/); if (match) { cache[url] = { - /* avoid too long video name, as it will be used as filename for video's transcript - and a filename can not be more that 255 chars, limiting here to 150. - */ - video: match[1].slice(0, maxVideoNameLength), + video: match[1], type: match[2] }; } else { @@ -143,7 +139,7 @@ define(['jquery', 'underscore', 'jquery.ajaxQueue'], function($) { match = link.pathname.match(/\/{1}([^\/\.]+)$/); if (match) { cache[url] = { - video: match[1].slice(0, maxVideoNameLength), + video: match[1], type: 'other' }; } diff --git a/cms/templates/container.html b/cms/templates/container.html index 356e271897..ebabe6764e 100644 --- a/cms/templates/container.html +++ b/cms/templates/container.html @@ -144,7 +144,7 @@ from openedx.core.djangolib.markup import HTML, Text
${_("Location ID")}

${unit.location.name} - Tip: ${_("Use this ID when you create links to this unit from other course content. You enter the ID in the URL field.")} + Tip: ${_('To create a link to this unit from an HTML component in this course, enter "/jump_to_id/" as the URL value.')}

diff --git a/common/djangoapps/util/tests/test_submit_feedback.py b/common/djangoapps/util/tests/test_submit_feedback.py index 077765e1d7..35e80c83d5 100644 --- a/common/djangoapps/util/tests/test_submit_feedback.py +++ b/common/djangoapps/util/tests/test_submit_feedback.py @@ -11,8 +11,19 @@ from util import views from zendesk import ZendeskError import json import mock +from ddt import ddt, data, unpack from student.tests.test_configuration_overrides import fake_get_value +from student.tests.factories import CourseEnrollmentFactory + +TEST_SUPPORT_EMAIL = "support@example.com" +TEST_ZENDESK_CUSTOM_FIELD_CONFIG = {"course_id": 1234, "enrollment_mode": 5678} +TEST_REQUEST_HEADERS = { + "HTTP_REFERER": "test_referer", + "HTTP_USER_AGENT": "test_user_agent", + "REMOTE_ADDR": "1.2.3.4", + "SERVER_NAME": "test_server", +} def fake_support_backend_values(name, default=None): # pylint: disable=unused-argument @@ -21,13 +32,20 @@ def fake_support_backend_values(name, default=None): # pylint: disable=unused-a """ config_dict = { "CONTACT_FORM_SUBMISSION_BACKEND": "email", - "email_from_address": "support_from@example.com", + "email_from_address": TEST_SUPPORT_EMAIL, } return config_dict[name] +@ddt @mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_FEEDBACK_SUBMISSION": True}) -@override_settings(ZENDESK_URL="dummy", ZENDESK_USER="dummy", ZENDESK_API_KEY="dummy") +@override_settings( + DEFAULT_FROM_EMAIL=TEST_SUPPORT_EMAIL, + ZENDESK_URL="dummy", + ZENDESK_USER="dummy", + ZENDESK_API_KEY="dummy", + ZENDESK_CUSTOM_FIELDS={} +) @mock.patch("util.views.dog_stats_api") @mock.patch("util.views._ZendeskApi", autospec=True) class SubmitFeedbackTest(TestCase): @@ -44,14 +62,12 @@ class SubmitFeedbackTest(TestCase): username="test", profile__name="Test User" ) - # This contains issue_type and course_id to ensure that tags are submitted correctly self._anon_fields = { "email": "test@edx.org", "name": "Test User", "subject": "a subject", "details": "some details", - "issue_type": "test_issue", - "course_id": "test_course" + "issue_type": "test_issue" } # This does not contain issue_type nor course_id to ensure that they are optional self._auth_fields = {"subject": "a subject", "details": "some details"} @@ -66,10 +82,10 @@ class SubmitFeedbackTest(TestCase): req = self._request_factory.post( "/submit_feedback", data=fields, - HTTP_REFERER="test_referer", - HTTP_USER_AGENT="test_user_agent", - REMOTE_ADDR="1.2.3.4", - SERVER_NAME="test_server", + HTTP_REFERER=TEST_REQUEST_HEADERS["HTTP_REFERER"], + HTTP_USER_AGENT=TEST_REQUEST_HEADERS["HTTP_USER_AGENT"], + REMOTE_ADDR=TEST_REQUEST_HEADERS["REMOTE_ADDR"], + SERVER_NAME=TEST_REQUEST_HEADERS["SERVER_NAME"], ) req.user = user return views.submit_feedback(req) @@ -130,13 +146,58 @@ class SubmitFeedbackTest(TestCase): resp = self._build_and_run_request(user, fields) self.assertEqual(resp.status_code, 200) - def _assert_datadog_called(self, datadog_mock, with_tags): - expected_datadog_calls = [ - mock.call.increment( - views.DATADOG_FEEDBACK_METRIC, - tags=(["course_id:test_course", "issue_type:test_issue"] if with_tags else []) - ) + def _build_zendesk_ticket(self, recipient, name, email, subject, details, tags, custom_fields=None): + """ + Build a Zendesk ticket that can be used in assertions to verify that the correct + data was submitted to create a Zendesk ticket. + """ + ticket = { + "ticket": { + "recipient": recipient, + "requester": {"name": name, "email": email}, + "subject": subject, + "comment": {"body": details}, + "tags": tags + } + } + + if custom_fields is not None: + ticket["ticket"]["custom_fields"] = custom_fields + + return ticket + + def _build_zendesk_ticket_update(self, request_headers, username=None): + """ + Build a Zendesk ticket update that can be used in assertions to verify that the correct + data was submitted to update a Zendesk ticket. + """ + body = [] + if username: + body.append("username: {}".format(username)) + + # FIXME the tests rely on the body string being built in this specific order, which doesn't seem + # reliable given that the view builds the string by iterating over a dictionary. + header_text_mapping = [ + ("Client IP", "REMOTE_ADDR"), + ("Host", "SERVER_NAME"), + ("Page", "HTTP_REFERER"), + ("Browser", "HTTP_USER_AGENT") ] + + for text, header in header_text_mapping: + body.append("{}: {}".format(text, request_headers[header])) + + body = "Additional information:\n\n" + "\n".join(body) + return {"ticket": {"comment": {"public": False, "body": body}}} + + def _assert_zendesk_called(self, zendesk_mock, ticket_id, ticket, ticket_update): + """Assert that Zendesk was called with the correct ticket and ticket_update.""" + expected_zendesk_calls = [mock.call.create_ticket(ticket), mock.call.update_ticket(ticket_id, ticket_update)] + self.assertEqual(zendesk_mock.mock_calls, expected_zendesk_calls) + + def _assert_datadog_called(self, datadog_mock, tags): + """Assert that datadog was called with the correct tags.""" + expected_datadog_calls = [mock.call.increment(views.DATADOG_FEEDBACK_METRIC, tags=tags)] self.assertEqual(datadog_mock.mock_calls, expected_datadog_calls) def test_bad_request_anon_user_no_name(self, zendesk_mock_class, datadog_mock): @@ -174,39 +235,26 @@ class SubmitFeedbackTest(TestCase): the given information should have been submitted via the Zendesk API. """ zendesk_mock_instance = zendesk_mock_class.return_value - zendesk_mock_instance.create_ticket.return_value = 42 - self._test_success(self._anon_user, self._anon_fields) - expected_zendesk_calls = [ - mock.call.create_ticket( - { - "ticket": { - "recipient": "registration@example.com", - "requester": {"name": "Test User", "email": "test@edx.org"}, - "subject": "a subject", - "comment": {"body": "some details"}, - "tags": ["test_course", "test_issue", "LMS"] - } - } - ), - mock.call.update_ticket( - 42, - { - "ticket": { - "comment": { - "public": False, - "body": - "Additional information:\n\n" - "Client IP: 1.2.3.4\n" - "Host: test_server\n" - "Page: test_referer\n" - "Browser: test_user_agent" - } - } - } - ) - ] - self.assertEqual(zendesk_mock_instance.mock_calls, expected_zendesk_calls) - self._assert_datadog_called(datadog_mock, with_tags=True) + user = self._anon_user + fields = self._anon_fields + + ticket_id = 42 + zendesk_mock_instance.create_ticket.return_value = ticket_id + + ticket = self._build_zendesk_ticket( + recipient=TEST_SUPPORT_EMAIL, + name=fields["name"], + email=fields["email"], + subject=fields["subject"], + details=fields["details"], + tags=[fields["issue_type"], "LMS"] + ) + + ticket_update = self._build_zendesk_ticket_update(TEST_REQUEST_HEADERS) + + self._test_success(user, fields) + self._assert_zendesk_called(zendesk_mock_instance, ticket_id, ticket, ticket_update) + self._assert_datadog_called(datadog_mock, ["issue_type:{}".format(fields["issue_type"])]) @mock.patch("openedx.core.djangoapps.site_configuration.helpers.get_value", fake_get_value) def test_valid_request_anon_user_configuration_override(self, zendesk_mock_class, datadog_mock): @@ -218,39 +266,75 @@ class SubmitFeedbackTest(TestCase): tag that will come from site configuration override. """ zendesk_mock_instance = zendesk_mock_class.return_value - zendesk_mock_instance.create_ticket.return_value = 42 - self._test_success(self._anon_user, self._anon_fields) - expected_zendesk_calls = [ - mock.call.create_ticket( - { - "ticket": { - "recipient": "no-reply@fakeuniversity.com", - "requester": {"name": "Test User", "email": "test@edx.org"}, - "subject": "a subject", - "comment": {"body": "some details"}, - "tags": ["test_course", "test_issue", "LMS", "whitelabel_fakeorg"] - } - } - ), - mock.call.update_ticket( - 42, - { - "ticket": { - "comment": { - "public": False, - "body": - "Additional information:\n\n" - "Client IP: 1.2.3.4\n" - "Host: test_server\n" - "Page: test_referer\n" - "Browser: test_user_agent" - } - } - } - ) - ] - self.assertEqual(zendesk_mock_instance.mock_calls, expected_zendesk_calls) - self._assert_datadog_called(datadog_mock, with_tags=True) + user = self._anon_user + fields = self._anon_fields + + ticket_id = 42 + zendesk_mock_instance.create_ticket.return_value = ticket_id + + ticket = self._build_zendesk_ticket( + recipient=fake_get_value("email_from_address"), + name=fields["name"], + email=fields["email"], + subject=fields["subject"], + details=fields["details"], + tags=[fields["issue_type"], "LMS", "whitelabel_{}".format(fake_get_value("course_org_filter"))] + ) + + ticket_update = self._build_zendesk_ticket_update(TEST_REQUEST_HEADERS) + + self._test_success(user, fields) + self._assert_zendesk_called(zendesk_mock_instance, ticket_id, ticket, ticket_update) + self._assert_datadog_called(datadog_mock, ["issue_type:{}".format(fields["issue_type"])]) + + @data("course-v1:testOrg+testCourseNumber+testCourseRun", "", None) + @override_settings(ZENDESK_CUSTOM_FIELDS=TEST_ZENDESK_CUSTOM_FIELD_CONFIG) + def test_valid_request_anon_user_with_custom_fields(self, course_id, zendesk_mock_class, datadog_mock): + """ + Test a valid request from an anonymous user when configured to use Zendesk Custom Fields. + + The response should have a 200 (success) status code, and a ticket with + the given information should have been submitted via the Zendesk API. When course_id is + present, it should be sent to Zendesk via a custom field. When course_id is blank or missing, + the request should still be processed successfully. + """ + zendesk_mock_instance = zendesk_mock_class.return_value + user = self._anon_user + + fields = self._anon_fields.copy() + if course_id is not None: + fields["course_id"] = course_id + + ticket_id = 42 + zendesk_mock_instance.create_ticket.return_value = ticket_id + + zendesk_tags = [fields["issue_type"], "LMS"] + datadog_tags = ["issue_type:{}".format(fields["issue_type"])] + zendesk_custom_fields = None + if course_id: + # FIXME the tests rely on the tags being in this specific order, which doesn't seem + # reliable given that the view builds the list by iterating over a dictionary. + zendesk_tags.insert(0, course_id) + datadog_tags.insert(0, "course_id:{}".format(course_id)) + zendesk_custom_fields = [ + {"id": TEST_ZENDESK_CUSTOM_FIELD_CONFIG["course_id"], "value": course_id} + ] + + ticket = self._build_zendesk_ticket( + recipient=TEST_SUPPORT_EMAIL, + name=fields["name"], + email=fields["email"], + subject=fields["subject"], + details=fields["details"], + tags=zendesk_tags, + custom_fields=zendesk_custom_fields + ) + + ticket_update = self._build_zendesk_ticket_update(TEST_REQUEST_HEADERS) + + self._test_success(user, fields) + self._assert_zendesk_called(zendesk_mock_instance, ticket_id, ticket, ticket_update) + self._assert_datadog_called(datadog_mock, datadog_tags) def test_bad_request_auth_user_no_subject(self, zendesk_mock_class, datadog_mock): """Test a request from an authenticated user not specifying `subject`.""" @@ -270,40 +354,92 @@ class SubmitFeedbackTest(TestCase): the given information should have been submitted via the Zendesk API. """ zendesk_mock_instance = zendesk_mock_class.return_value - zendesk_mock_instance.create_ticket.return_value = 42 - self._test_success(self._auth_user, self._auth_fields) - expected_zendesk_calls = [ - mock.call.create_ticket( - { - "ticket": { - "recipient": "registration@example.com", - "requester": {"name": "Test User", "email": "test@edx.org"}, - "subject": "a subject", - "comment": {"body": "some details"}, - "tags": ["LMS"] - } - } - ), - mock.call.update_ticket( - 42, - { - "ticket": { - "comment": { - "public": False, - "body": - "Additional information:\n\n" - "username: test\n" - "Client IP: 1.2.3.4\n" - "Host: test_server\n" - "Page: test_referer\n" - "Browser: test_user_agent" - } - } - } - ) - ] - self.assertEqual(zendesk_mock_instance.mock_calls, expected_zendesk_calls) - self._assert_datadog_called(datadog_mock, with_tags=False) + user = self._auth_user + fields = self._auth_fields + + ticket_id = 42 + zendesk_mock_instance.create_ticket.return_value = ticket_id + + ticket = self._build_zendesk_ticket( + recipient=TEST_SUPPORT_EMAIL, + name=user.profile.name, + email=user.email, + subject=fields["subject"], + details=fields["details"], + tags=["LMS"] + ) + + ticket_update = self._build_zendesk_ticket_update(TEST_REQUEST_HEADERS, user.username) + + self._test_success(user, fields) + self._assert_zendesk_called(zendesk_mock_instance, ticket_id, ticket, ticket_update) + self._assert_datadog_called(datadog_mock, []) + + @data( + ("course-v1:testOrg+testCourseNumber+testCourseRun", True), + ("course-v1:testOrg+testCourseNumber+testCourseRun", False), + ("", None), + (None, None) + ) + @unpack + @override_settings(ZENDESK_CUSTOM_FIELDS=TEST_ZENDESK_CUSTOM_FIELD_CONFIG) + def test_valid_request_auth_user_with_custom_fields(self, course_id, enrolled, zendesk_mock_class, datadog_mock): + """ + Test a valid request from an authenticated user when configured to use Zendesk Custom Fields. + + The response should have a 200 (success) status code, and a ticket with + the given information should have been submitted via the Zendesk API. When course_id is + present, it should be sent to Zendesk via a custom field, along with the enrollment mode + if the user has an active enrollment for that course. When course_id is blank or missing, + the request should still be processed successfully. + """ + zendesk_mock_instance = zendesk_mock_class.return_value + user = self._auth_user + + fields = self._auth_fields.copy() + if course_id is not None: + fields["course_id"] = course_id + + ticket_id = 42 + zendesk_mock_instance.create_ticket.return_value = ticket_id + + zendesk_tags = ["LMS"] + datadog_tags = [] + zendesk_custom_fields = None + if course_id: + # FIXME the tests rely on the tags being in this specific order, which doesn't seem + # reliable given that the view builds the list by iterating over a dictionary. + zendesk_tags.insert(0, course_id) + datadog_tags.insert(0, "course_id:{}".format(course_id)) + zendesk_custom_fields = [ + {"id": TEST_ZENDESK_CUSTOM_FIELD_CONFIG["course_id"], "value": course_id} + ] + if enrolled is not None: + enrollment = CourseEnrollmentFactory.create( + user=user, + course_id=course_id, + is_active=enrolled + ) + if enrollment.is_active: + zendesk_custom_fields.append( + {"id": TEST_ZENDESK_CUSTOM_FIELD_CONFIG["enrollment_mode"], "value": enrollment.mode} + ) + + ticket = self._build_zendesk_ticket( + recipient=TEST_SUPPORT_EMAIL, + name=user.profile.name, + email=user.email, + subject=fields["subject"], + details=fields["details"], + tags=zendesk_tags, + custom_fields=zendesk_custom_fields + ) + + ticket_update = self._build_zendesk_ticket_update(TEST_REQUEST_HEADERS, user.username) + + self._test_success(user, fields) + self._assert_zendesk_called(zendesk_mock_instance, ticket_id, ticket, ticket_update) + self._assert_datadog_called(datadog_mock, datadog_tags) def test_get_request(self, zendesk_mock_class, datadog_mock): """Test that a GET results in a 405 even with all required fields""" @@ -329,7 +465,7 @@ class SubmitFeedbackTest(TestCase): resp = self._build_and_run_request(self._anon_user, self._anon_fields) self.assertEqual(resp.status_code, 500) self.assertFalse(resp.content) - self._assert_datadog_called(datadog_mock, with_tags=True) + self._assert_datadog_called(datadog_mock, ["issue_type:{}".format(self._anon_fields["issue_type"])]) def test_zendesk_error_on_update(self, zendesk_mock_class, datadog_mock): """ @@ -344,7 +480,7 @@ class SubmitFeedbackTest(TestCase): zendesk_mock_instance.update_ticket.side_effect = err resp = self._build_and_run_request(self._anon_user, self._anon_fields) self.assertEqual(resp.status_code, 200) - self._assert_datadog_called(datadog_mock, with_tags=True) + self._assert_datadog_called(datadog_mock, ["issue_type:{}".format(self._anon_fields["issue_type"])]) @mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_FEEDBACK_SUBMISSION": False}) def test_not_enabled(self, zendesk_mock_class, datadog_mock): diff --git a/common/djangoapps/util/views.py b/common/djangoapps/util/views.py index ea886e7777..2c7045b540 100644 --- a/common/djangoapps/util/views.py +++ b/common/djangoapps/util/views.py @@ -25,6 +25,7 @@ from edxmako.shortcuts import render_to_response, render_to_string from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers import track.views from student.roles import GlobalStaff +from student.models import CourseEnrollment log = logging.getLogger(__name__) @@ -222,6 +223,40 @@ class _ZendeskApi(object): return None +def _get_zendesk_custom_field_context(request): + """ + Construct a dictionary of data that can be stored in Zendesk custom fields. + """ + context = {} + + course_id = request.POST.get("course_id") + if not course_id: + return context + + context["course_id"] = course_id + if not request.user.is_authenticated(): + return context + + enrollment = CourseEnrollment.get_enrollment(request.user, CourseKey.from_string(course_id)) + if enrollment and enrollment.is_active: + context["enrollment_mode"] = enrollment.mode + + return context + + +def _format_zendesk_custom_fields(context): + """ + Format the data in `context` for compatibility with the Zendesk API. + Ignore any keys that have not been configured in `ZENDESK_CUSTOM_FIELDS`. + """ + custom_fields = [] + for key, val, in settings.ZENDESK_CUSTOM_FIELDS.items(): + if key in context: + custom_fields.append({"id": val, "value": context[key]}) + + return custom_fields + + def _record_feedback_in_zendesk( realname, email, @@ -231,7 +266,8 @@ def _record_feedback_in_zendesk( additional_info, group_name=None, require_update=False, - support_email=None + support_email=None, + custom_fields=None ): """ Create a new user-requested Zendesk ticket. @@ -246,6 +282,8 @@ def _record_feedback_in_zendesk( If `require_update` is provided, returns False when the update does not succeed. This allows using the private comment to add necessary information which the user will not see in followup emails from support. + + If `custom_fields` is provided, submits data to those fields in Zendesk. """ zendesk_api = _ZendeskApi() @@ -271,6 +309,10 @@ def _record_feedback_in_zendesk( "tags": zendesk_tags } } + + if custom_fields: + new_ticket["ticket"]["custom_fields"] = custom_fields + group = None if group_name is not None: group = zendesk_api.get_group(group_name) @@ -322,7 +364,7 @@ def get_feedback_form_context(request): context["subject"] = request.POST["subject"] context["details"] = request.POST["details"] context["tags"] = dict( - [(tag, request.POST[tag]) for tag in ["issue_type", "course_id"] if tag in request.POST] + [(tag, request.POST[tag]) for tag in ["issue_type", "course_id"] if request.POST.get(tag)] ) context["additional_info"] = {} @@ -412,6 +454,11 @@ def submit_feedback(request): if not settings.ZENDESK_URL or not settings.ZENDESK_USER or not settings.ZENDESK_API_KEY: raise Exception("Zendesk enabled but not configured") + custom_fields = None + if settings.ZENDESK_CUSTOM_FIELDS: + custom_field_context = _get_zendesk_custom_field_context(request) + custom_fields = _format_zendesk_custom_fields(custom_field_context) + success = _record_feedback_in_zendesk( context["realname"], context["email"], @@ -419,7 +466,8 @@ def submit_feedback(request): context["details"], context["tags"], context["additional_info"], - support_email=context["support_email"] + support_email=context["support_email"], + custom_fields=custom_fields ) _record_feedback_in_datadog(context["tags"]) diff --git a/common/lib/capa/capa/inputtypes.py b/common/lib/capa/capa/inputtypes.py index b18e26fd0e..467629d7a1 100644 --- a/common/lib/capa/capa/inputtypes.py +++ b/common/lib/capa/capa/inputtypes.py @@ -328,9 +328,16 @@ class InputTypeBase(object): } # Generate the list of ids to be used with the aria-describedby field. + descriptions = list() + + # If there is trailing text, add the id as the first element to the list before adding the status id + if 'trailing_text' in self.loaded_attributes and self.loaded_attributes['trailing_text']: + trailing_text_id = 'trailing_text_' + self.input_id + descriptions.append(trailing_text_id) + # Every list should contain the status id status_id = 'status_' + self.input_id - descriptions = list([status_id]) + descriptions.append(status_id) descriptions.extend(self.response_data.get('descriptions', {}).keys()) description_ids = ' '.join(descriptions) context.update( diff --git a/common/lib/capa/capa/templates/formulaequationinput.html b/common/lib/capa/capa/templates/formulaequationinput.html index fe66dfa89a..798623f06e 100644 --- a/common/lib/capa/capa/templates/formulaequationinput.html +++ b/common/lib/capa/capa/templates/formulaequationinput.html @@ -16,7 +16,7 @@ size="${size}" % endif /> - ${trailing_text} + ${trailing_text} <%include file="status_span.html" args="status=status, status_id=id"/> diff --git a/common/lib/capa/capa/templates/textline.html b/common/lib/capa/capa/templates/textline.html index 73452bacd8..632fb0f7da 100644 --- a/common/lib/capa/capa/templates/textline.html +++ b/common/lib/capa/capa/templates/textline.html @@ -34,7 +34,7 @@ style="display:none;" % endif /> -${trailing_text} +${trailing_text} <%include file="status_span.html" args="status=status, status_id=id"/> diff --git a/common/lib/capa/capa/tests/test_inputtypes.py b/common/lib/capa/capa/tests/test_inputtypes.py index 851de494cb..2e8ae0f32c 100644 --- a/common/lib/capa/capa/tests/test_inputtypes.py +++ b/common/lib/capa/capa/tests/test_inputtypes.py @@ -37,6 +37,8 @@ lookup_tag = inputtypes.registry.get_class_for_tag DESCRIBEDBY = HTML('aria-describedby="status_{status_id} desc-1 desc-2"') +# Use TRAILING_TEXT_DESCRIBEDBY when trailing_text is not null +TRAILING_TEXT_DESCRIBEDBY = HTML('aria-describedby="trailing_text_{trailing_text_id} status_{status_id} desc-1 desc-2"') DESCRIPTIONS = OrderedDict([('desc-1', 'description text 1'), ('desc-2', 'description text 2')]) RESPONSE_DATA = { 'label': 'question text 101', @@ -361,7 +363,7 @@ class TextLineTest(unittest.TestCase): 'trailing_text': expected_text, 'preprocessor': None, 'response_data': RESPONSE_DATA, - 'describedby_html': DESCRIBEDBY.format(status_id=prob_id) + 'describedby_html': TRAILING_TEXT_DESCRIBEDBY.format(trailing_text_id=prob_id, status_id=prob_id) } self.assertEqual(context, expected) @@ -1295,7 +1297,7 @@ class FormulaEquationTest(unittest.TestCase): 'inline': False, 'trailing_text': expected_text, 'response_data': RESPONSE_DATA, - 'describedby_html': DESCRIBEDBY.format(status_id=prob_id) + 'describedby_html': TRAILING_TEXT_DESCRIBEDBY.format(trailing_text_id=prob_id, status_id=prob_id) } self.assertEqual(context, expected) diff --git a/common/lib/xmodule/xmodule/modulestore/tests/factories.py b/common/lib/xmodule/xmodule/modulestore/tests/factories.py index c7f6e91e68..da8af6cecf 100644 --- a/common/lib/xmodule/xmodule/modulestore/tests/factories.py +++ b/common/lib/xmodule/xmodule/modulestore/tests/factories.py @@ -13,7 +13,7 @@ from contextlib import contextmanager from uuid import uuid4 from factory import Factory, Sequence, lazy_attribute_sequence, lazy_attribute -from factory.containers import CyclicDefinitionError +from factory.errors import CyclicDefinitionError from mock import patch from nose.tools import assert_less_equal, assert_greater_equal import dogstats_wrapper as dog_stats_api diff --git a/common/lib/xmodule/xmodule/video_module/transcripts_utils.py b/common/lib/xmodule/xmodule/video_module/transcripts_utils.py index 22d6786e3b..f464a9d1db 100644 --- a/common/lib/xmodule/xmodule/video_module/transcripts_utils.py +++ b/common/lib/xmodule/xmodule/video_module/transcripts_utils.py @@ -296,11 +296,9 @@ def copy_or_rename_transcript(new_name, old_name, item, delete_old=False, user=N def get_html5_ids(html5_sources): """ Helper method to parse out an HTML5 source into the ideas - NOTE: This assumes that '/' are not in the filename. - Slices each id by 150, restricting too long strings as video names. + NOTE: This assumes that '/' are not in the filename """ - html5_ids = [x.split('/')[-1].rsplit('.', 1)[0][:150] for x in html5_sources] - + html5_ids = [x.split('/')[-1].rsplit('.', 1)[0] for x in html5_sources] return html5_ids diff --git a/common/static/common/js/discussion/views/discussion_thread_list_view.js b/common/static/common/js/discussion/views/discussion_thread_list_view.js index ef9cf793a2..ac151c9493 100644 --- a/common/static/common/js/discussion/views/discussion_thread_list_view.js +++ b/common/static/common/js/discussion/views/discussion_thread_list_view.js @@ -93,6 +93,7 @@ this.courseSettings = options.courseSettings; this.hideRefineBar = options.hideRefineBar; this.supportsActiveThread = options.supportsActiveThread; + this.hideReadState = options.hideReadState || false; this.displayedCollection = new Discussion(this.collection.models, { pages: this.collection.pages }); @@ -342,7 +343,8 @@ neverRead: neverRead, threadUrl: thread.urlFor('retrieve'), threadPreview: threadPreview, - showThreadPreview: this.showThreadPreview + showThreadPreview: this.showThreadPreview, + hideReadState: this.hideReadState }, thread.toJSON() ); diff --git a/common/static/common/js/spec/discussion/view/discussion_thread_list_view_spec.js b/common/static/common/js/spec/discussion/view/discussion_thread_list_view_spec.js index 50f5048ab8..713ffa801a 100644 --- a/common/static/common/js/spec/discussion/view/discussion_thread_list_view_spec.js +++ b/common/static/common/js/spec/discussion/view/discussion_thread_list_view_spec.js @@ -169,6 +169,7 @@ }); return this.view.render(); }); + setupAjax = function(callback) { return $.ajax.and.callFake(function(params) { if (callback) { @@ -185,19 +186,27 @@ }; }); }; + renderSingleThreadWithProps = function(props) { return makeView(new Discussion([new Thread(DiscussionViewSpecHelper.makeThreadWithProps(props))])).render(); }; - makeView = function(discussion) { - return new DiscussionThreadListView({ - el: $('#fixture-element'), - collection: discussion, - showThreadPreview: true, - courseSettings: new DiscussionCourseSettings({ - is_cohorted: true - }) - }); + + makeView = function(discussion, props) { + return new DiscussionThreadListView( + _.extend( + { + el: $('#fixture-element'), + collection: discussion, + showThreadPreview: true, + courseSettings: new DiscussionCourseSettings({ + is_cohorted: true + }) + }, + props + ) + ); }; + expectFilter = function(filterVal) { return $.ajax.and.callFake(function(params) { _.each(['unread', 'unanswered', 'flagged'], function(paramName) { @@ -681,5 +690,45 @@ expect(view.$el.find('.thread-preview-body').length).toEqual(0); }); }); + + describe('read/unread state', function() { + it('adds never-read class to unread threads', function() { + var unreads = this.threads.filter(function(thread) { + return !thread.read && thread.unread_comments_count === thread.comments_count; + }).length; + + this.view = makeView(new Discussion(this.threads)); + this.view.render(); + expect(this.view.$('.never-read').length).toEqual(unreads); + }); + + it('shows a "x new" message for threads that are read, but have unread comments', function() { + var unreadThread = this.threads.filter(function(thread) { + return thread.read && thread.unread_comments_count !== thread.comments_count; + })[0], + newCommentsOnUnreadThread = unreadThread.unread_comments_count; + + this.view = makeView(new Discussion(this.threads)); + this.view.render(); + expect( + this.view.$('.forum-nav-thread-unread-comments-count') + .first() + .text() + .trim() + ).toEqual(newCommentsOnUnreadThread + ' new'); + }); + + it('should display every thread as read if hideReadState: true is passed to the constructor', function() { + this.view = makeView(new Discussion(this.threads), {hideReadState: true}); + this.view.render(); + expect(this.view.$('.never-read').length).toEqual(0); + }); + + it('does not show the "x new" indicator for any thread if hideReadState: true is passed', function() { + this.view = makeView(new Discussion(this.threads), {hideReadState: true}); + this.view.render(); + expect(this.view.$('.forum-nav-thread-unread-comments-count').length).toEqual(0); + }); + }); }); }).call(this); diff --git a/common/static/common/templates/discussion/thread-list-item.underscore b/common/static/common/templates/discussion/thread-list-item.underscore index d57aec8630..6aa6e4070b 100644 --- a/common/static/common/templates/discussion/thread-list-item.underscore +++ b/common/static/common/templates/discussion/thread-list-item.underscore @@ -1,4 +1,4 @@ -
  • +
  • <% @@ -75,7 +75,7 @@ %> - <% if (!neverRead && unread_comments_count > 0) { %> + <% if (!hideReadState && !neverRead && unread_comments_count > 0) { %> <%- StringUtils.interpolate( diff --git a/common/test/acceptance/tests/helpers.py b/common/test/acceptance/tests/helpers.py index a768395d97..f8d8f2f9be 100644 --- a/common/test/acceptance/tests/helpers.py +++ b/common/test/acceptance/tests/helpers.py @@ -69,25 +69,12 @@ def is_youtube_available(): bool: """ - - youtube_api_urls = { - 'main': 'https://www.youtube.com/', - 'player': 'https://www.youtube.com/iframe_api', - # For transcripts, you need to check an actual video, so we will - # just specify our default video and see if that one is available. - 'transcript': 'http://video.google.com/timedtext?lang=en&v=3_yD_cEKoCk', - } - - for url in youtube_api_urls.itervalues(): - try: - response = requests.get(url, allow_redirects=False) - except requests.exceptions.ConnectionError: - return False - - if response.status_code >= 300: - return False - - return True + # Skip all the youtube tests for now because they are failing intermittently + # due to changes on their side. See: TE-1927 + # TODO: Design and implement a better solution that is reliable and repeatable, + # reflects how the application works in production, and limits the third-party + # network traffic (e.g. repeatedly retrieving the js from youtube from the browser). + return False def is_focused_on_element(browser, selector): diff --git a/lms/djangoapps/discussion/static/discussion/js/views/discussion_user_profile_view.js b/lms/djangoapps/discussion/static/discussion/js/views/discussion_user_profile_view.js index 7c3e199ecd..b8148b5f55 100644 --- a/lms/djangoapps/discussion/static/discussion/js/views/discussion_user_profile_view.js +++ b/lms/djangoapps/discussion/static/discussion/js/views/discussion_user_profile_view.js @@ -39,7 +39,10 @@ collection: this.discussion, el: this.$('.inline-threads'), courseSettings: this.courseSettings, - hideRefineBar: true // TODO: re-enable the search/filter bar when it works correctly + hideRefineBar: true, // TODO: re-enable the search/filter bar when it works correctly + // @TODO: On the profile page, thread read state for the viewing user is not accessible via API. + // Fix this when the Discussions API can support this query. Until then, hide read state. + hideReadState: true }).render(); this.discussionThreadListView.on('thread:selected', _.bind(this.navigateToThread, this)); diff --git a/lms/djangoapps/email_marketing/migrations/0004_emailmarketingconfiguration_welcome_email_send_delay.py b/lms/djangoapps/email_marketing/migrations/0004_emailmarketingconfiguration_welcome_email_send_delay.py new file mode 100644 index 0000000000..b632e26a55 --- /dev/null +++ b/lms/djangoapps/email_marketing/migrations/0004_emailmarketingconfiguration_welcome_email_send_delay.py @@ -0,0 +1,19 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('email_marketing', '0003_auto_20160715_1145'), + ] + + operations = [ + migrations.AddField( + model_name='emailmarketingconfiguration', + name='welcome_email_send_delay', + field=models.IntegerField(default=600, help_text='Number of seconds to delay the sending of User Welcome email after user has been activated'), + ), + ] diff --git a/lms/djangoapps/email_marketing/models.py b/lms/djangoapps/email_marketing/models.py index 04d7285eeb..d16146e4c5 100644 --- a/lms/djangoapps/email_marketing/models.py +++ b/lms/djangoapps/email_marketing/models.py @@ -127,6 +127,16 @@ class EmailMarketingConfiguration(ConfigurationModel): ) ) + # The number of seconds to delay for welcome emails sending. This is needed to acommendate those + # learners who created user account during course enrollment so we can send a different message + # in our welcome email. + welcome_email_send_delay = models.fields.IntegerField( + default=600, + help_text=_( + "Number of seconds to delay the sending of User Welcome email after user has been activated" + ) + ) + def __unicode__(self): return u"Email marketing configuration: New user list %s, Activation template: %s" % \ (self.sailthru_new_user_list, self.sailthru_activation_template) diff --git a/lms/djangoapps/email_marketing/tasks.py b/lms/djangoapps/email_marketing/tasks.py index bd7a229663..ab29d59875 100644 --- a/lms/djangoapps/email_marketing/tasks.py +++ b/lms/djangoapps/email_marketing/tasks.py @@ -3,6 +3,7 @@ This file contains celery tasks for email marketing signal handler. """ import logging import time +from datetime import datetime, timedelta from celery import task from django.core.cache import cache @@ -56,10 +57,16 @@ def update_user(self, sailthru_vars, email, site=None, new_user=False, activatio # if activating user, send welcome email if activation and email_config.sailthru_activation_template: + scheduled_datetime = datetime.utcnow() + timedelta(seconds=email_config.welcome_email_send_delay) try: - sailthru_response = sailthru_client.api_post("send", - {"email": email, - "template": email_config.sailthru_activation_template}) + sailthru_response = sailthru_client.api_post( + "send", + { + "email": email, + "template": email_config.sailthru_activation_template, + "schedule_time": scheduled_datetime.strftime('%Y-%m-%dT%H:%M:%SZ') + } + ) except SailthruClientError as exc: log.error("Exception attempting to send welcome email to user %s in Sailthru - %s", email, unicode(exc)) raise self.retry(exc=exc, diff --git a/lms/djangoapps/email_marketing/tests/test_signals.py b/lms/djangoapps/email_marketing/tests/test_signals.py index 7982e4a45b..8a8dce4af2 100644 --- a/lms/djangoapps/email_marketing/tests/test_signals.py +++ b/lms/djangoapps/email_marketing/tests/test_signals.py @@ -1,6 +1,7 @@ """Tests of email marketing signal handlers.""" import ddt import logging +import datetime from django.test import TestCase from django.contrib.auth.models import AnonymousUser @@ -45,6 +46,7 @@ def update_email_marketing_config(enabled=True, key='badkey', secret='badsecret' sailthru_get_tags_from_sailthru=False, sailthru_enroll_cost=enroll_cost, sailthru_max_retries=0, + welcome_email_send_delay=600 ) @@ -168,12 +170,14 @@ class EmailMarketingTests(TestCase): """ mock_sailthru_post.return_value = SailthruResponse(JsonResponse({'ok': True})) mock_sailthru_get.return_value = SailthruResponse(JsonResponse({'lists': [{'name': 'new list'}], 'ok': True})) + expected_schedule = datetime.datetime.utcnow() + datetime.timedelta(seconds=600) update_user.delay({}, self.user.email, new_user=True, activation=True) # look for call args for 2nd call self.assertEquals(mock_sailthru_post.call_args[0][0], "send") userparms = mock_sailthru_post.call_args[0][1] self.assertEquals(userparms['email'], TEST_EMAIL) self.assertEquals(userparms['template'], "Activation") + self.assertEquals(userparms['schedule_time'], expected_schedule.strftime('%Y-%m-%dT%H:%M:%SZ')) @patch('email_marketing.tasks.log.error') @patch('email_marketing.tasks.SailthruClient.api_post') diff --git a/lms/djangoapps/grades/management/commands/reset_grades.py b/lms/djangoapps/grades/management/commands/reset_grades.py new file mode 100644 index 0000000000..43e9776b6c --- /dev/null +++ b/lms/djangoapps/grades/management/commands/reset_grades.py @@ -0,0 +1,150 @@ +""" +Reset persistent grades for learners. +""" +from datetime import datetime +import logging +from textwrap import dedent + +from django.core.management.base import BaseCommand, CommandError +from django.db.models import Count + +from opaque_keys import InvalidKeyError +from opaque_keys.edx.keys import CourseKey + +from lms.djangoapps.grades.models import PersistentSubsectionGrade, PersistentCourseGrade + + +log = logging.getLogger(__name__) + + +DATE_FORMAT = "%Y-%m-%d %H:%M" + + +class Command(BaseCommand): + """ + Reset persistent grades for learners. + """ + help = dedent(__doc__).strip() + + def add_arguments(self, parser): + """ + Add arguments to the command parser. + """ + parser.add_argument( + '--dry_run', + action='store_true', + default=False, + dest='dry_run', + help="Output what we're going to do, but don't actually do it. To actually delete, use --delete instead." + ) + parser.add_argument( + '--delete', + action='store_true', + default=False, + dest='delete', + help="Actually perform the deletions of the course. For a Dry Run, use --dry_run instead." + ) + parser.add_argument( + '--courses', + dest='courses', + nargs='+', + help='Reset persistent grades for the list of courses provided.', + ) + parser.add_argument( + '--all_courses', + action='store_true', + dest='all_courses', + default=False, + help='Reset persistent grades for all courses.', + ) + parser.add_argument( + '--modified_start', + dest='modified_start', + help='Starting range for modified date (inclusive): e.g. "2016-08-23 16:43"', + ) + parser.add_argument( + '--modified_end', + dest='modified_end', + help='Ending range for modified date (inclusive): e.g. "2016-12-23 16:43"', + ) + + def handle(self, *args, **options): + course_keys = None + modified_start = None + modified_end = None + + run_mode = self._get_mutually_exclusive_option(options, 'delete', 'dry_run') + courses_mode = self._get_mutually_exclusive_option(options, 'courses', 'all_courses') + + if options.get('modified_start'): + modified_start = datetime.strptime(options['modified_start'], DATE_FORMAT) + + if options.get('modified_end'): + if not modified_start: + raise CommandError('Optional value for modified_end provided without a value for modified_start.') + modified_end = datetime.strptime(options['modified_end'], DATE_FORMAT) + + if courses_mode == 'courses': + try: + course_keys = [CourseKey.from_string(course_key_string) for course_key_string in options['courses']] + except InvalidKeyError as error: + raise CommandError('Invalid key specified: {}'.format(error.message)) + + log.info("reset_grade: Started in %s mode!", run_mode) + + operation = self._query_grades if run_mode == 'dry_run' else self._delete_grades + + operation(PersistentSubsectionGrade, course_keys, modified_start, modified_end) + operation(PersistentCourseGrade, course_keys, modified_start, modified_end) + + log.info("reset_grade: Finished in %s mode!", run_mode) + + def _delete_grades(self, grade_model_class, *args, **kwargs): + """ + Deletes the requested grades in the given model, filtered by the provided args and kwargs. + """ + grades_query_set = grade_model_class.query_grades(*args, **kwargs) + num_rows_to_delete = grades_query_set.count() + + log.info("reset_grade: Deleting %s: %d row(s).", grade_model_class.__name__, num_rows_to_delete) + + grade_model_class.delete_grades(*args, **kwargs) + + log.info("reset_grade: Deleted %s: %d row(s).", grade_model_class.__name__, num_rows_to_delete) + + def _query_grades(self, grade_model_class, *args, **kwargs): + """ + Queries the requested grades in the given model, filtered by the provided args and kwargs. + """ + total_for_all_courses = 0 + + grades_query_set = grade_model_class.query_grades(*args, **kwargs) + grades_stats = grades_query_set.values('course_id').order_by().annotate(total=Count('course_id')) + + for stat in grades_stats: + total_for_all_courses += stat['total'] + log.info( + "reset_grade: Would delete %s for COURSE %s: %d row(s).", + grade_model_class.__name__, + stat['course_id'], + stat['total'], + ) + + log.info( + "reset_grade: Would delete %s in TOTAL: %d row(s).", + grade_model_class.__name__, + total_for_all_courses, + ) + + def _get_mutually_exclusive_option(self, options, option_1, option_2): + """ + Validates that exactly one of the 2 given options is specified. + Returns the name of the found option. + """ + if not options.get(option_1) and not options.get(option_2): + raise CommandError('Either --{} or --{} must be specified.'.format(option_1, option_2)) + + if options.get(option_1) and options.get(option_2): + raise CommandError('Both --{} and --{} cannot be specified.'.format(option_1, option_2)) + + return option_1 if options.get(option_1) else option_2 diff --git a/lms/djangoapps/grades/management/commands/tests/test_reset_grades.py b/lms/djangoapps/grades/management/commands/tests/test_reset_grades.py new file mode 100644 index 0000000000..647b9c8e31 --- /dev/null +++ b/lms/djangoapps/grades/management/commands/tests/test_reset_grades.py @@ -0,0 +1,296 @@ +""" +Tests for reset_grades management command. +""" +from datetime import datetime, timedelta +import ddt +from django.core.management.base import CommandError +from django.test import TestCase +from freezegun import freeze_time +from mock import patch, MagicMock + +from lms.djangoapps.grades.management.commands import reset_grades +from lms.djangoapps.grades.models import PersistentSubsectionGrade, PersistentCourseGrade +from opaque_keys.edx.locator import CourseLocator, BlockUsageLocator + + +@ddt.ddt +class TestResetGrades(TestCase): + """ + Tests generate course blocks management command. + """ + num_users = 3 + num_courses = 5 + num_subsections = 7 + + def setUp(self): + super(TestResetGrades, self).setUp() + self.command = reset_grades.Command() + + self.user_ids = [user_id for user_id in range(self.num_users)] + + self.course_keys = [] + for course_index in range(self.num_courses): + self.course_keys.append( + CourseLocator( + org='some_org', + course='some_course', + run=unicode(course_index), + ) + ) + + self.subsection_keys_by_course = {} + for course_key in self.course_keys: + subsection_keys_in_course = [] + for subsection_index in range(self.num_subsections): + subsection_keys_in_course.append( + BlockUsageLocator( + course_key=course_key, + block_type='sequential', + block_id=unicode(subsection_index), + ) + ) + self.subsection_keys_by_course[course_key] = subsection_keys_in_course + + def _update_or_create_grades(self, courses_keys=None): + """ + Creates grades for all courses and subsections. + """ + if courses_keys is None: + courses_keys = self.course_keys + + course_grade_params = { + "course_version": "JoeMcEwing", + "course_edited_timestamp": datetime( + year=2016, + month=8, + day=1, + hour=18, + minute=53, + second=24, + microsecond=354741, + ), + "percent_grade": 77.7, + "letter_grade": "Great job", + "passed": True, + } + subsection_grade_params = { + "course_version": "deadbeef", + "subtree_edited_timestamp": "2016-08-01 18:53:24.354741", + "earned_all": 6.0, + "possible_all": 12.0, + "earned_graded": 6.0, + "possible_graded": 8.0, + "visible_blocks": MagicMock(), + "attempted": True, + } + + for course_key in courses_keys: + for user_id in self.user_ids: + course_grade_params['user_id'] = user_id + course_grade_params['course_id'] = course_key + PersistentCourseGrade.update_or_create_course_grade(**course_grade_params) + for subsection_key in self.subsection_keys_by_course[course_key]: + subsection_grade_params['user_id'] = user_id + subsection_grade_params['usage_key'] = subsection_key + PersistentSubsectionGrade.update_or_create_grade(**subsection_grade_params) + + def _assert_grades_exist_for_courses(self, course_keys): + """ + Assert grades for given courses exist. + """ + for course_key in course_keys: + self.assertIsNotNone(PersistentCourseGrade.read_course_grade(self.user_ids[0], course_key)) + for subsection_key in self.subsection_keys_by_course[course_key]: + self.assertIsNotNone(PersistentSubsectionGrade.read_grade(self.user_ids[0], subsection_key)) + + def _assert_grades_absent_for_courses(self, course_keys): + """ + Assert grades for given courses do not exist. + """ + for course_key in course_keys: + with self.assertRaises(PersistentCourseGrade.DoesNotExist): + PersistentCourseGrade.read_course_grade(self.user_ids[0], course_key) + + for subsection_key in self.subsection_keys_by_course[course_key]: + with self.assertRaises(PersistentSubsectionGrade.DoesNotExist): + PersistentSubsectionGrade.read_grade(self.user_ids[0], subsection_key) + + def _assert_stat_logged(self, mock_log, num_rows, grade_model_class, message_substring, log_offset): + self.assertIn('reset_grade: ' + message_substring, mock_log.info.call_args_list[log_offset][0][0]) + self.assertEqual(grade_model_class.__name__, mock_log.info.call_args_list[log_offset][0][1]) + self.assertEqual(num_rows, mock_log.info.call_args_list[log_offset][0][2]) + + def _assert_course_delete_stat_logged(self, mock_log, num_rows): + self._assert_stat_logged(mock_log, num_rows, PersistentCourseGrade, 'Deleted', log_offset=4) + + def _assert_subsection_delete_stat_logged(self, mock_log, num_rows): + self._assert_stat_logged(mock_log, num_rows, PersistentSubsectionGrade, 'Deleted', log_offset=2) + + def _assert_course_query_stat_logged(self, mock_log, num_rows, num_courses=None): + if num_courses is None: + num_courses = self.num_courses + log_offset = num_courses + 1 + num_courses + 1 + self._assert_stat_logged(mock_log, num_rows, PersistentCourseGrade, 'Would delete', log_offset) + + def _assert_subsection_query_stat_logged(self, mock_log, num_rows, num_courses=None): + if num_courses is None: + num_courses = self.num_courses + log_offset = num_courses + 1 + self._assert_stat_logged(mock_log, num_rows, PersistentSubsectionGrade, 'Would delete', log_offset) + + def _date_from_now(self, days=None): + return datetime.now() + timedelta(days=days) + + def _date_str_from_now(self, days=None): + future_date = self._date_from_now(days=days) + return future_date.strftime(reset_grades.DATE_FORMAT) + + @patch('lms.djangoapps.grades.management.commands.reset_grades.log') + def test_reset_all_courses(self, mock_log): + self._update_or_create_grades() + self._assert_grades_exist_for_courses(self.course_keys) + + with self.assertNumQueries(4): + self.command.handle(delete=True, all_courses=True) + + self._assert_grades_absent_for_courses(self.course_keys) + self._assert_subsection_delete_stat_logged( + mock_log, + num_rows=self.num_users * self.num_courses * self.num_subsections, + ) + self._assert_course_delete_stat_logged( + mock_log, + num_rows=self.num_users * self.num_courses, + ) + + @patch('lms.djangoapps.grades.management.commands.reset_grades.log') + @ddt.data(1, 2, 3) + def test_reset_some_courses(self, num_courses_to_reset, mock_log): + self._update_or_create_grades() + self._assert_grades_exist_for_courses(self.course_keys) + + with self.assertNumQueries(4): + self.command.handle( + delete=True, + courses=[unicode(course_key) for course_key in self.course_keys[:num_courses_to_reset]] + ) + + self._assert_grades_absent_for_courses(self.course_keys[:num_courses_to_reset]) + self._assert_grades_exist_for_courses(self.course_keys[num_courses_to_reset:]) + self._assert_subsection_delete_stat_logged( + mock_log, + num_rows=self.num_users * num_courses_to_reset * self.num_subsections, + ) + self._assert_course_delete_stat_logged( + mock_log, + num_rows=self.num_users * num_courses_to_reset, + ) + + def test_reset_by_modified_start_date(self): + self._update_or_create_grades() + self._assert_grades_exist_for_courses(self.course_keys) + + num_courses_with_updated_grades = 2 + with freeze_time(self._date_from_now(days=4)): + self._update_or_create_grades(self.course_keys[:num_courses_with_updated_grades]) + + with self.assertNumQueries(4): + self.command.handle(delete=True, modified_start=self._date_str_from_now(days=2), all_courses=True) + + self._assert_grades_absent_for_courses(self.course_keys[:num_courses_with_updated_grades]) + self._assert_grades_exist_for_courses(self.course_keys[num_courses_with_updated_grades:]) + + def test_reset_by_modified_start_end_date(self): + self._update_or_create_grades() + self._assert_grades_exist_for_courses(self.course_keys) + + with freeze_time(self._date_from_now(days=3)): + self._update_or_create_grades(self.course_keys[:2]) + with freeze_time(self._date_from_now(days=5)): + self._update_or_create_grades(self.course_keys[2:4]) + + with self.assertNumQueries(4): + self.command.handle( + delete=True, + modified_start=self._date_str_from_now(days=2), + modified_end=self._date_str_from_now(days=4), + all_courses=True, + ) + + # Only grades for courses modified within the 2->4 days + # should be deleted. + self._assert_grades_absent_for_courses(self.course_keys[:2]) + self._assert_grades_exist_for_courses(self.course_keys[2:]) + + @patch('lms.djangoapps.grades.management.commands.reset_grades.log') + def test_dry_run_all_courses(self, mock_log): + self._update_or_create_grades() + self._assert_grades_exist_for_courses(self.course_keys) + + with self.assertNumQueries(2): + self.command.handle(dry_run=True, all_courses=True) + + self._assert_grades_exist_for_courses(self.course_keys) + self._assert_subsection_query_stat_logged( + mock_log, + num_rows=self.num_users * self.num_courses * self.num_subsections, + ) + self._assert_course_query_stat_logged( + mock_log, + num_rows=self.num_users * self.num_courses, + ) + + @patch('lms.djangoapps.grades.management.commands.reset_grades.log') + @ddt.data(1, 2, 3) + def test_dry_run_some_courses(self, num_courses_to_query, mock_log): + self._update_or_create_grades() + self._assert_grades_exist_for_courses(self.course_keys) + + with self.assertNumQueries(2): + self.command.handle( + dry_run=True, + courses=[unicode(course_key) for course_key in self.course_keys[:num_courses_to_query]] + ) + + self._assert_grades_exist_for_courses(self.course_keys) + self._assert_subsection_query_stat_logged( + mock_log, + num_rows=self.num_users * num_courses_to_query * self.num_subsections, + num_courses=num_courses_to_query, + ) + self._assert_course_query_stat_logged( + mock_log, + num_rows=self.num_users * num_courses_to_query, + num_courses=num_courses_to_query, + ) + + @patch('lms.djangoapps.grades.management.commands.reset_grades.log') + def test_reset_no_existing_grades(self, mock_log): + self._assert_grades_absent_for_courses(self.course_keys) + + with self.assertNumQueries(4): + self.command.handle(delete=True, all_courses=True) + + self._assert_grades_absent_for_courses(self.course_keys) + self._assert_subsection_delete_stat_logged(mock_log, num_rows=0) + self._assert_course_delete_stat_logged(mock_log, num_rows=0) + + def test_invalid_key(self): + with self.assertRaisesRegexp(CommandError, 'Invalid key specified.*invalid/key'): + self.command.handle(dry_run=True, courses=['invalid/key']) + + def test_no_run_mode(self): + with self.assertRaisesMessage(CommandError, 'Either --delete or --dry_run must be specified.'): + self.command.handle(all_courses=True) + + def test_both_run_modes(self): + with self.assertRaisesMessage(CommandError, 'Both --delete and --dry_run cannot be specified.'): + self.command.handle(all_courses=True, dry_run=True, delete=True) + + def test_no_course_mode(self): + with self.assertRaisesMessage(CommandError, 'Either --courses or --all_courses must be specified.'): + self.command.handle(dry_run=True) + + def test_both_course_modes(self): + with self.assertRaisesMessage(CommandError, 'Both --courses and --all_courses cannot be specified.'): + self.command.handle(dry_run=True, all_courses=True, courses=['some/course/key']) diff --git a/lms/djangoapps/grades/models.py b/lms/djangoapps/grades/models.py index fcf8c6da23..e95cda16e0 100644 --- a/lms/djangoapps/grades/models.py +++ b/lms/djangoapps/grades/models.py @@ -38,6 +38,38 @@ BLOCK_RECORD_LIST_VERSION = 1 BlockRecord = namedtuple('BlockRecord', ['locator', 'weight', 'raw_possible', 'graded']) +class DeleteGradesMixin(object): + """ + A Mixin class that provides functionality to delete grades. + """ + + @classmethod + def query_grades(cls, course_ids=None, modified_start=None, modified_end=None): + """ + Queries all the grades in the table, filtered by the provided arguments. + """ + kwargs = {} + + if course_ids: + kwargs['course_id__in'] = [course_id for course_id in course_ids] + + if modified_start: + if modified_end: + kwargs['modified__range'] = (modified_start, modified_end) + else: + kwargs['modified__gt'] = modified_start + + return cls.objects.filter(**kwargs) + + @classmethod + def delete_grades(cls, *args, **kwargs): + """ + Deletes all the grades in the table, filtered by the provided arguments. + """ + query = cls.query_grades(*args, **kwargs) + query.delete() + + class BlockRecordList(tuple): """ An immutable ordered list of BlockRecord objects. @@ -208,7 +240,7 @@ class VisibleBlocks(models.Model): cls.bulk_create(non_existent_brls) -class PersistentSubsectionGrade(TimeStampedModel): +class PersistentSubsectionGrade(DeleteGradesMixin, TimeStampedModel): """ A django model tracking persistent grades at the subsection level. """ @@ -458,7 +490,7 @@ class PersistentSubsectionGrade(TimeStampedModel): ) -class PersistentCourseGrade(TimeStampedModel): +class PersistentCourseGrade(DeleteGradesMixin, TimeStampedModel): """ A django model tracking persistent course grades. """ diff --git a/lms/djangoapps/grades/new/course_grade.py b/lms/djangoapps/grades/new/course_grade.py index 01af70e70a..10a4240456 100644 --- a/lms/djangoapps/grades/new/course_grade.py +++ b/lms/djangoapps/grades/new/course_grade.py @@ -12,6 +12,7 @@ from lazy import lazy from lms.djangoapps.course_blocks.api import get_course_blocks from lms.djangoapps.grades.config.models import PersistentGradesEnabledFlag +from openedx.core.djangoapps.content.block_structure.api import get_block_structure_manager from openedx.core.djangoapps.signals.signals import COURSE_GRADE_CHANGED from xmodule import block_metadata_utils @@ -323,17 +324,23 @@ class CourseGradeFactory(object): """ Factory class to create Course Grade objects """ - def create(self, student, course, read_only=True): + def create(self, student, course, collected_block_structure=None, read_only=True): """ Returns the CourseGrade object for the given student and course. If read_only is True, doesn't save any updates to the grades. Raises a PermissionDenied if the user does not have course access. """ - course_structure = get_course_blocks(student, course.location) + course_structure = get_course_blocks( + student, + course.location, + collected_block_structure=collected_block_structure, + ) + # if user does not have access to this course, throw an exception if not self._user_has_access_to_course(course_structure): raise PermissionDenied("User does not have access to this course") + return ( self._get_saved_grade(student, course, course_structure) or self._compute_and_update_grade(student, course, course_structure, read_only) @@ -351,11 +358,17 @@ class CourseGradeFactory(object): If an error occurred, course_grade will be None and err_msg will be an exception message. If there was no error, err_msg is an empty string. """ + # Pre-fetch the collected course_structure so: + # 1. Correctness: the same version of the course is used to + # compute the grade for all students. + # 2. Optimization: the collected course_structure is not + # retrieved from the data store multiple times. + + collected_block_structure = get_block_structure_manager(course.id).get_collected() for student in students: with dog_stats_api.timer('lms.grades.CourseGradeFactory.iter', tags=[u'action:{}'.format(course.id)]): - try: - course_grade = CourseGradeFactory().create(student, course) + course_grade = CourseGradeFactory().create(student, course, collected_block_structure) yield self.GradeResult(student, course_grade, "") except Exception as exc: # pylint: disable=broad-except diff --git a/lms/djangoapps/grades/tests/test_grades.py b/lms/djangoapps/grades/tests/test_grades.py index b1edde1da5..66505656ce 100644 --- a/lms/djangoapps/grades/tests/test_grades.py +++ b/lms/djangoapps/grades/tests/test_grades.py @@ -12,6 +12,7 @@ from courseware.model_data import set_score from courseware.tests.helpers import LoginEnrollmentTestCase from lms.djangoapps.course_blocks.api import get_course_blocks +from openedx.core.lib.block_structure.factory import BlockStructureFactory from openedx.core.djangolib.testing.utils import get_mock_request from student.tests.factories import UserFactory from student.models import CourseEnrollment @@ -68,7 +69,14 @@ class TestGradeIteration(SharedModuleStoreTestCase): """ No students have grade entries. """ - all_course_grades, all_errors = self._course_grades_and_errors_for(self.course, self.students) + with patch.object( + BlockStructureFactory, + 'create_from_cache', + wraps=BlockStructureFactory.create_from_cache + ) as mock_create_from_cache: + all_course_grades, all_errors = self._course_grades_and_errors_for(self.course, self.students) + self.assertEquals(mock_create_from_cache.call_count, 1) + self.assertEqual(len(all_errors), 0) for course_grade in all_course_grades.values(): self.assertIsNone(course_grade.letter_grade) diff --git a/lms/envs/aws.py b/lms/envs/aws.py index 340c2079fa..3e85617b0f 100644 --- a/lms/envs/aws.py +++ b/lms/envs/aws.py @@ -333,6 +333,8 @@ COMMENTS_SERVICE_URL = ENV_TOKENS.get("COMMENTS_SERVICE_URL", '') COMMENTS_SERVICE_KEY = ENV_TOKENS.get("COMMENTS_SERVICE_KEY", '') CERT_QUEUE = ENV_TOKENS.get("CERT_QUEUE", 'test-pull') ZENDESK_URL = ENV_TOKENS.get('ZENDESK_URL', ZENDESK_URL) +ZENDESK_CUSTOM_FIELDS = ENV_TOKENS.get('ZENDESK_CUSTOM_FIELDS', ZENDESK_CUSTOM_FIELDS) + FEEDBACK_SUBMISSION_EMAIL = ENV_TOKENS.get("FEEDBACK_SUBMISSION_EMAIL") MKTG_URLS = ENV_TOKENS.get('MKTG_URLS', MKTG_URLS) diff --git a/lms/envs/bok_choy.py b/lms/envs/bok_choy.py index 6c5651bbef..51b2d680ce 100644 --- a/lms/envs/bok_choy.py +++ b/lms/envs/bok_choy.py @@ -76,6 +76,17 @@ PIPELINE_JS_COMPRESSOR = None CELERY_ALWAYS_EAGER = True CELERY_RESULT_BACKEND = 'djcelery.backends.cache:CacheBackend' +BLOCK_STRUCTURES_SETTINGS = dict( + # We have CELERY_ALWAYS_EAGER set to True, so there's no asynchronous + # code running and the celery routing is unimportant. + # It does not make sense to retry. + BLOCK_STRUCTURES_TASK_MAX_RETRIES=0, + # course publish task delay is irrelevant is because the task is run synchronously + BLOCK_STRUCTURES_COURSE_PUBLISH_TASK_DELAY=0, + # retry delay is irrelevent because we never retry + BLOCK_STRUCTURES_TASK_DEFAULT_RETRY_DELAY=0, +) + ###################### Grade Downloads ###################### GRADES_DOWNLOAD = { 'STORAGE_TYPE': 'localfs', diff --git a/lms/envs/common.py b/lms/envs/common.py index 6c58a774cf..c826fd1b9f 100644 --- a/lms/envs/common.py +++ b/lms/envs/common.py @@ -1003,6 +1003,7 @@ FEEDBACK_SUBMISSION_EMAIL = None ZENDESK_URL = None ZENDESK_USER = None ZENDESK_API_KEY = None +ZENDESK_CUSTOM_FIELDS = {} ##### EMBARGO ##### EMBARGO_SITE_REDIRECT_URL = None diff --git a/lms/static/sass/_build-lms-v1.scss b/lms/static/sass/_build-lms-v1.scss index bb376fec9f..460e2dbc2d 100644 --- a/lms/static/sass/_build-lms-v1.scss +++ b/lms/static/sass/_build-lms-v1.scss @@ -35,6 +35,7 @@ @import 'shared/modal'; @import 'shared/activation_messages'; @import 'shared/unsubscribe'; +@import 'shared/help-tab'; // shared - platform @import 'multicourse/home'; diff --git a/lms/static/sass/shared-v2/_help-tab.scss b/lms/static/sass/shared-v2/_help-tab.scss index 080c5f4e6c..99cf9b3836 100644 --- a/lms/static/sass/shared-v2/_help-tab.scss +++ b/lms/static/sass/shared-v2/_help-tab.scss @@ -78,3 +78,9 @@ padding: 0 $baseline $baseline; } } + +.feedback-form-select { + background: $white; + margin-bottom: $baseline; + width: 100%; +} diff --git a/lms/static/sass/shared/_help-tab.scss b/lms/static/sass/shared/_help-tab.scss new file mode 100644 index 0000000000..7af1407ac0 --- /dev/null +++ b/lms/static/sass/shared/_help-tab.scss @@ -0,0 +1,5 @@ +.feedback-form-select { + background: $white; + margin-bottom: $baseline; + width: 100%; +} diff --git a/lms/templates/help_modal.html b/lms/templates/help_modal.html index abe1779d99..5198e893c1 100644 --- a/lms/templates/help_modal.html +++ b/lms/templates/help_modal.html @@ -99,15 +99,21 @@ from xmodule.tabs import CourseTabList % endif + +
    + % if course: + + % endif +
    + + ${_('Describe what you were doing when you encountered the issue. Include any details that will help us to troubleshoot, including error messages that you saw.')} + - % if course: - - % endif
    @@ -138,7 +144,12 @@ from xmodule.tabs import CourseTabList