Merge pull request #19420 from edx/diana/remove-datadog

Remove all references to datadog from our code.
This commit is contained in:
Diana Huang
2019-01-09 09:07:22 -05:00
committed by GitHub
44 changed files with 114 additions and 614 deletions

View File

@@ -36,7 +36,6 @@ from six import iteritems, text_type
from user_tasks.models import UserTaskArtifact, UserTaskStatus
from user_tasks.tasks import UserTask
import dogstats_wrapper as dog_stats_api
from contentstore.courseware_index import CoursewareSearchIndexer, LibrarySearchIndexer, SearchIndexingError
from contentstore.storage import course_import_export_storage
from contentstore.utils import initialize_permissions, reverse_usage_url
@@ -865,17 +864,13 @@ def import_olx(self, user_id, course_key_string, archive_path, archive_name, lan
self.status.set_state(u'Updating')
self.status.increment_completed_steps()
with dog_stats_api.timer(
u'courselike_import.time',
tags=[u"courselike:{}".format(courselike_key)]
):
courselike_items = import_func(
modulestore(), user.id,
settings.GITHUB_REPO_ROOT, [dirpath],
load_error_modules=False,
static_content_store=contentstore(),
target_id=courselike_key
)
courselike_items = import_func(
modulestore(), user.id,
settings.GITHUB_REPO_ROOT, [dirpath],
load_error_modules=False,
static_content_store=contentstore(),
target_id=courselike_key
)
new_location = courselike_items[0].location
LOGGER.debug(u'new course at %s', new_location)

View File

@@ -13,7 +13,6 @@ from django.utils.translation import ugettext as _
from opaque_keys.edx.keys import UsageKey
from xblock.core import XBlock
import dogstats_wrapper as dog_stats_api
from contentstore.utils import reverse_course_url, reverse_library_url, reverse_usage_url
from edxmako.shortcuts import render_to_string
from models.settings.course_grading import CourseGradingModel
@@ -268,15 +267,6 @@ def create_xblock(parent_locator, user, category, display_name, boilerplate=None
# if we add one then we need to also add it to the policy information (i.e. metadata)
# we should remove this once we can break this reference from the course to static tabs
if category == 'static_tab':
dog_stats_api.increment(
DEPRECATION_VSCOMPAT_EVENT,
tags=(
"location:create_xblock_static_tab",
u"course:{}".format(unicode(dest_usage_key.course_key)),
)
)
display_name = display_name or _("Empty") # Prevent name being None
course = store.get_course(dest_usage_key.course_key)
course.tabs.append(

View File

@@ -23,7 +23,6 @@ from web_fragments.fragment import Fragment
from xblock.core import XBlock
from xblock.fields import Scope
import dogstats_wrapper as dog_stats_api
from cms.lib.xblock.authoring_mixin import VISIBILITY_VIEW
from contentstore.utils import (
ancestor_has_staff_lock,
@@ -934,15 +933,6 @@ def _delete_item(usage_key, user):
# if we add one then we need to also add it to the policy information (i.e. metadata)
# we should remove this once we can break this reference from the course to static tabs
if usage_key.block_type == 'static_tab':
dog_stats_api.increment(
DEPRECATION_VSCOMPAT_EVENT,
tags=(
"location:_delete_item_static_tab",
u"course:{}".format(unicode(usage_key.course_key)),
)
)
course = store.get_course(usage_key.course_key)
existing_tabs = course.tabs or []
course.tabs = [tab for tab in existing_tabs if tab.get('url_slug') != usage_key.block_id]

View File

@@ -1030,9 +1030,6 @@ INSTALLED_APPS = [
'track',
'eventtracking.django.apps.EventTrackingConfig',
# Monitoring
'openedx.core.djangoapps.datadog.apps.DatadogConfig',
# For asset pipelining
'edxmako.apps.EdxMakoConfig',
'pipeline',

View File

@@ -22,7 +22,7 @@ HTTPS = 'off'
import logging
# Disable noisy loggers
for pkg_name in ['track.contexts', 'track.middleware', 'dd.dogapi']:
for pkg_name in ['track.contexts', 'track.middleware']:
logging.getLogger(pkg_name).setLevel(logging.CRITICAL)

View File

@@ -22,7 +22,6 @@ import inspect
from importlib import import_module
from django.conf import settings
from dogapi import dog_stats_api
from track.backends import BaseBackend
@@ -81,17 +80,14 @@ def _instantiate_backend_from_name(name, options):
return backend
@dog_stats_api.timed('track.send')
def send(event):
"""
Send an event object to all the initialized backends.
"""
dog_stats_api.increment('track.send.count')
for name, backend in backends.iteritems():
with dog_stats_api.timer('track.send.backend.{0}'.format(name)):
backend.send(event)
backend.send(event)
_initialize_backends_from_django_settings()

View File

@@ -53,7 +53,6 @@ def fake_support_backend_values(name, default=None): # pylint: disable=unused-a
ZENDESK_API_KEY="dummy",
ZENDESK_CUSTOM_FIELDS={},
)
@mock.patch("util.views.dog_stats_api")
@mock.patch("util.views._ZendeskApi", autospec=True)
class SubmitFeedbackTest(EnterpriseServiceMockMixin, TestCase):
"""
@@ -105,7 +104,7 @@ class SubmitFeedbackTest(EnterpriseServiceMockMixin, TestCase):
req.user = user
return views.submit_feedback(req)
def _assert_bad_request(self, response, field, zendesk_mock_class, datadog_mock):
def _assert_bad_request(self, response, field, zendesk_mock_class):
"""
Assert that the given `response` contains correct failure data.
@@ -119,9 +118,8 @@ class SubmitFeedbackTest(EnterpriseServiceMockMixin, TestCase):
self.assertIn("error", resp_json)
# There should be absolutely no interaction with Zendesk
self.assertFalse(zendesk_mock_class.return_value.mock_calls)
self.assertFalse(datadog_mock.mock_calls)
def _test_bad_request_omit_field(self, user, fields, omit_field, zendesk_mock_class, datadog_mock):
def _test_bad_request_omit_field(self, user, fields, omit_field, zendesk_mock_class):
"""
Invoke the view with a request missing a field and assert correctness.
@@ -133,9 +131,9 @@ class SubmitFeedbackTest(EnterpriseServiceMockMixin, TestCase):
"""
filtered_fields = {k: v for (k, v) in fields.items() if k != omit_field}
resp = self._build_and_run_request(user, filtered_fields)
self._assert_bad_request(resp, omit_field, zendesk_mock_class, datadog_mock)
self._assert_bad_request(resp, omit_field, zendesk_mock_class)
def _test_bad_request_empty_field(self, user, fields, empty_field, zendesk_mock_class, datadog_mock):
def _test_bad_request_empty_field(self, user, fields, empty_field, zendesk_mock_class):
"""
Invoke the view with an empty field and assert correctness.
@@ -148,7 +146,7 @@ class SubmitFeedbackTest(EnterpriseServiceMockMixin, TestCase):
altered_fields = fields.copy()
altered_fields[empty_field] = ""
resp = self._build_and_run_request(user, altered_fields)
self._assert_bad_request(resp, empty_field, zendesk_mock_class, datadog_mock)
self._assert_bad_request(resp, empty_field, zendesk_mock_class)
def _test_success(self, user, fields):
"""
@@ -210,39 +208,34 @@ class SubmitFeedbackTest(EnterpriseServiceMockMixin, TestCase):
expected_zendesk_calls = [mock.call.create_ticket(ticket), mock.call.update_ticket(ticket_id, ticket_update)]
self.assertEqual(zendesk_mock.mock_calls, expected_zendesk_calls)
def _assert_datadog_called(self, datadog_mock, tags):
"""Assert that datadog was called with the correct tags."""
expected_datadog_calls = [mock.call.increment(views.DATADOG_FEEDBACK_METRIC, tags=tags)]
self.assertEqual(datadog_mock.mock_calls, expected_datadog_calls)
def test_bad_request_anon_user_no_name(self, zendesk_mock_class, datadog_mock):
def test_bad_request_anon_user_no_name(self, zendesk_mock_class):
"""Test a request from an anonymous user not specifying `name`."""
self._test_bad_request_omit_field(self._anon_user, self._anon_fields, "name", zendesk_mock_class, datadog_mock)
self._test_bad_request_empty_field(self._anon_user, self._anon_fields, "name", zendesk_mock_class, datadog_mock)
self._test_bad_request_omit_field(self._anon_user, self._anon_fields, "name", zendesk_mock_class)
self._test_bad_request_empty_field(self._anon_user, self._anon_fields, "name", zendesk_mock_class)
def test_bad_request_anon_user_no_email(self, zendesk_mock_class, datadog_mock):
def test_bad_request_anon_user_no_email(self, zendesk_mock_class):
"""Test a request from an anonymous user not specifying `email`."""
self._test_bad_request_omit_field(self._anon_user, self._anon_fields, "email", zendesk_mock_class, datadog_mock)
self._test_bad_request_empty_field(self._anon_user, self._anon_fields, "email", zendesk_mock_class, datadog_mock)
self._test_bad_request_omit_field(self._anon_user, self._anon_fields, "email", zendesk_mock_class)
self._test_bad_request_empty_field(self._anon_user, self._anon_fields, "email", zendesk_mock_class)
def test_bad_request_anon_user_invalid_email(self, zendesk_mock_class, datadog_mock):
def test_bad_request_anon_user_invalid_email(self, zendesk_mock_class):
"""Test a request from an anonymous user specifying an invalid `email`."""
fields = self._anon_fields.copy()
fields["email"] = "This is not a valid email address!"
resp = self._build_and_run_request(self._anon_user, fields)
self._assert_bad_request(resp, "email", zendesk_mock_class, datadog_mock)
self._assert_bad_request(resp, "email", zendesk_mock_class)
def test_bad_request_anon_user_no_subject(self, zendesk_mock_class, datadog_mock):
def test_bad_request_anon_user_no_subject(self, zendesk_mock_class):
"""Test a request from an anonymous user not specifying `subject`."""
self._test_bad_request_omit_field(self._anon_user, self._anon_fields, "subject", zendesk_mock_class, datadog_mock)
self._test_bad_request_empty_field(self._anon_user, self._anon_fields, "subject", zendesk_mock_class, datadog_mock)
self._test_bad_request_omit_field(self._anon_user, self._anon_fields, "subject", zendesk_mock_class)
self._test_bad_request_empty_field(self._anon_user, self._anon_fields, "subject", zendesk_mock_class)
def test_bad_request_anon_user_no_details(self, zendesk_mock_class, datadog_mock):
def test_bad_request_anon_user_no_details(self, zendesk_mock_class):
"""Test a request from an anonymous user not specifying `details`."""
self._test_bad_request_omit_field(self._anon_user, self._anon_fields, "details", zendesk_mock_class, datadog_mock)
self._test_bad_request_empty_field(self._anon_user, self._anon_fields, "details", zendesk_mock_class, datadog_mock)
self._test_bad_request_omit_field(self._anon_user, self._anon_fields, "details", zendesk_mock_class)
self._test_bad_request_empty_field(self._anon_user, self._anon_fields, "details", zendesk_mock_class)
def test_valid_request_anon_user(self, zendesk_mock_class, datadog_mock):
def test_valid_request_anon_user(self, zendesk_mock_class):
"""
Test a valid request from an anonymous user.
@@ -269,10 +262,9 @@ class SubmitFeedbackTest(EnterpriseServiceMockMixin, TestCase):
self._test_success(user, fields)
self._assert_zendesk_called(zendesk_mock_instance, ticket_id, ticket, ticket_update)
self._assert_datadog_called(datadog_mock, ["issue_type:{}".format(fields["issue_type"])])
@mock.patch("openedx.core.djangoapps.site_configuration.helpers.get_value", fake_get_value)
def test_valid_request_anon_user_configuration_override(self, zendesk_mock_class, datadog_mock):
def test_valid_request_anon_user_configuration_override(self, zendesk_mock_class):
"""
Test a valid request from an anonymous user to a mocked out site with configuration override
@@ -300,11 +292,10 @@ class SubmitFeedbackTest(EnterpriseServiceMockMixin, TestCase):
self._test_success(user, fields)
self._assert_zendesk_called(zendesk_mock_instance, ticket_id, ticket, ticket_update)
self._assert_datadog_called(datadog_mock, ["issue_type:{}".format(fields["issue_type"])])
@data("course-v1:testOrg+testCourseNumber+testCourseRun", "", None)
@override_settings(ZENDESK_CUSTOM_FIELDS=TEST_ZENDESK_CUSTOM_FIELD_CONFIG)
def test_valid_request_anon_user_with_custom_fields(self, course_id, zendesk_mock_class, datadog_mock):
def test_valid_request_anon_user_with_custom_fields(self, course_id, zendesk_mock_class):
"""
Test a valid request from an anonymous user when configured to use Zendesk Custom Fields.
@@ -324,13 +315,11 @@ class SubmitFeedbackTest(EnterpriseServiceMockMixin, TestCase):
zendesk_mock_instance.create_ticket.return_value = ticket_id
zendesk_tags = [fields["issue_type"], "LMS"]
datadog_tags = ["issue_type:{}".format(fields["issue_type"])]
zendesk_custom_fields = None
if course_id:
# FIXME the tests rely on the tags being in this specific order, which doesn't seem
# reliable given that the view builds the list by iterating over a dictionary.
zendesk_tags.insert(0, course_id)
datadog_tags.insert(0, "course_id:{}".format(course_id))
zendesk_custom_fields = [
{"id": TEST_ZENDESK_CUSTOM_FIELD_CONFIG["course_id"], "value": course_id}
]
@@ -349,19 +338,18 @@ class SubmitFeedbackTest(EnterpriseServiceMockMixin, TestCase):
self._test_success(user, fields)
self._assert_zendesk_called(zendesk_mock_instance, ticket_id, ticket, ticket_update)
self._assert_datadog_called(datadog_mock, datadog_tags)
def test_bad_request_auth_user_no_subject(self, zendesk_mock_class, datadog_mock):
def test_bad_request_auth_user_no_subject(self, zendesk_mock_class):
"""Test a request from an authenticated user not specifying `subject`."""
self._test_bad_request_omit_field(self._auth_user, self._auth_fields, "subject", zendesk_mock_class, datadog_mock)
self._test_bad_request_empty_field(self._auth_user, self._auth_fields, "subject", zendesk_mock_class, datadog_mock)
self._test_bad_request_omit_field(self._auth_user, self._auth_fields, "subject", zendesk_mock_class)
self._test_bad_request_empty_field(self._auth_user, self._auth_fields, "subject", zendesk_mock_class)
def test_bad_request_auth_user_no_details(self, zendesk_mock_class, datadog_mock):
def test_bad_request_auth_user_no_details(self, zendesk_mock_class):
"""Test a request from an authenticated user not specifying `details`."""
self._test_bad_request_omit_field(self._auth_user, self._auth_fields, "details", zendesk_mock_class, datadog_mock)
self._test_bad_request_empty_field(self._auth_user, self._auth_fields, "details", zendesk_mock_class, datadog_mock)
self._test_bad_request_omit_field(self._auth_user, self._auth_fields, "details", zendesk_mock_class)
self._test_bad_request_empty_field(self._auth_user, self._auth_fields, "details", zendesk_mock_class)
def test_valid_request_auth_user(self, zendesk_mock_class, datadog_mock):
def test_valid_request_auth_user(self, zendesk_mock_class):
"""
Test a valid request from an authenticated user.
@@ -388,7 +376,6 @@ class SubmitFeedbackTest(EnterpriseServiceMockMixin, TestCase):
self._test_success(user, fields)
self._assert_zendesk_called(zendesk_mock_instance, ticket_id, ticket, ticket_update)
self._assert_datadog_called(datadog_mock, [])
@data(
("course-v1:testOrg+testCourseNumber+testCourseRun", True),
@@ -398,7 +385,7 @@ class SubmitFeedbackTest(EnterpriseServiceMockMixin, TestCase):
)
@unpack
@override_settings(ZENDESK_CUSTOM_FIELDS=TEST_ZENDESK_CUSTOM_FIELD_CONFIG)
def test_valid_request_auth_user_with_custom_fields(self, course_id, enrolled, zendesk_mock_class, datadog_mock):
def test_valid_request_auth_user_with_custom_fields(self, course_id, enrolled, zendesk_mock_class):
"""
Test a valid request from an authenticated user when configured to use Zendesk Custom Fields.
@@ -419,13 +406,11 @@ class SubmitFeedbackTest(EnterpriseServiceMockMixin, TestCase):
zendesk_mock_instance.create_ticket.return_value = ticket_id
zendesk_tags = ["LMS"]
datadog_tags = []
zendesk_custom_fields = None
if course_id:
# FIXME the tests rely on the tags being in this specific order, which doesn't seem
# reliable given that the view builds the list by iterating over a dictionary.
zendesk_tags.insert(0, course_id)
datadog_tags.insert(0, "course_id:{}".format(course_id))
zendesk_custom_fields = [
{"id": TEST_ZENDESK_CUSTOM_FIELD_CONFIG["course_id"], "value": course_id}
]
@@ -454,7 +439,6 @@ class SubmitFeedbackTest(EnterpriseServiceMockMixin, TestCase):
self._test_success(user, fields)
self._assert_zendesk_called(zendesk_mock_instance, ticket_id, ticket, ticket_update)
self._assert_datadog_called(datadog_mock, datadog_tags)
@httpretty.activate
@data(
@@ -464,7 +448,7 @@ class SubmitFeedbackTest(EnterpriseServiceMockMixin, TestCase):
@unpack
@override_settings(ZENDESK_CUSTOM_FIELDS=TEST_ZENDESK_CUSTOM_FIELD_CONFIG)
@mock.patch.dict("django.conf.settings.FEATURES", dict(ENABLE_ENTERPRISE_INTEGRATION=True))
def test_valid_request_auth_user_with_enterprise_info(self, course_id, enrolled, zendesk_mock_class, datadog_mock):
def test_valid_request_auth_user_with_enterprise_info(self, course_id, enrolled, zendesk_mock_class):
"""
Test a valid request from an authenticated user with enterprise tags.
"""
@@ -480,12 +464,10 @@ class SubmitFeedbackTest(EnterpriseServiceMockMixin, TestCase):
zendesk_mock_instance.create_ticket.return_value = ticket_id
zendesk_tags = ["enterprise_learner", "LMS"]
datadog_tags = ['learner_type:enterprise_learner']
zendesk_custom_fields = []
if course_id:
zendesk_tags.insert(0, course_id)
datadog_tags.insert(0, "course_id:{}".format(course_id))
zendesk_custom_fields.append({"id": TEST_ZENDESK_CUSTOM_FIELD_CONFIG["course_id"], "value": course_id})
if enrolled is not None:
enrollment = CourseEnrollmentFactory.create(
@@ -518,12 +500,11 @@ class SubmitFeedbackTest(EnterpriseServiceMockMixin, TestCase):
ticket_update = self._build_zendesk_ticket_update(TEST_REQUEST_HEADERS, user.username)
self._test_success(user, fields)
self._assert_zendesk_called(zendesk_mock_instance, ticket_id, ticket, ticket_update)
self._assert_datadog_called(datadog_mock, datadog_tags)
@httpretty.activate
@override_settings(ZENDESK_CUSTOM_FIELDS=TEST_ZENDESK_CUSTOM_FIELD_CONFIG)
@mock.patch.dict("django.conf.settings.FEATURES", dict(ENABLE_ENTERPRISE_INTEGRATION=True))
def test_request_with_anonymous_user_without_enterprise_info(self, zendesk_mock_class, datadog_mock):
def test_request_with_anonymous_user_without_enterprise_info(self, zendesk_mock_class):
"""
Test tags related to enterprise should not be there in case an unauthenticated user.
"""
@@ -533,22 +514,13 @@ class SubmitFeedbackTest(EnterpriseServiceMockMixin, TestCase):
zendesk_mock_instance = zendesk_mock_class.return_value
zendesk_mock_instance.create_ticket.return_value = ticket_id
datadog_valid_tags = ["issue_type:{}".format(self._anon_fields["issue_type"])]
datadog_invalid_tags = ['learner_type:enterprise_learner']
resp = self._build_and_run_request(user, self._anon_fields)
self.assertEqual(resp.status_code, 200)
expected_datadog_calls = [mock.call.increment(views.DATADOG_FEEDBACK_METRIC, tags=datadog_valid_tags)]
self.assertEqual(datadog_mock.mock_calls, expected_datadog_calls)
not_expected_datadog_calls = [mock.call.increment(views.DATADOG_FEEDBACK_METRIC, tags=datadog_invalid_tags)]
self.assertNotEqual(datadog_mock.mock_calls, not_expected_datadog_calls)
@httpretty.activate
@override_settings(ZENDESK_CUSTOM_FIELDS=TEST_ZENDESK_CUSTOM_FIELD_CONFIG)
@mock.patch.dict("django.conf.settings.FEATURES", dict(ENABLE_ENTERPRISE_INTEGRATION=True))
def test_tags_in_request_with_auth_user_with_enterprise_info(self, zendesk_mock_class, datadog_mock):
def test_tags_in_request_with_auth_user_with_enterprise_info(self, zendesk_mock_class):
"""
Test tags related to enterprise should be there in case the request is generated by an authenticated user.
"""
@@ -558,13 +530,11 @@ class SubmitFeedbackTest(EnterpriseServiceMockMixin, TestCase):
zendesk_mock_instance = zendesk_mock_class.return_value
zendesk_mock_instance.create_ticket.return_value = ticket_id
datadog_valid_tags = ['learner_type:enterprise_learner']
resp = self._build_and_run_request(user, self._auth_fields)
self.assertEqual(resp.status_code, 200)
self._assert_datadog_called(datadog_mock, datadog_valid_tags)
def test_get_request(self, zendesk_mock_class, datadog_mock):
def test_get_request(self, zendesk_mock_class):
"""Test that a GET results in a 405 even with all required fields"""
req = self._request_factory.get("/submit_feedback", data=self._anon_fields)
req.user = self._anon_user
@@ -574,9 +544,8 @@ class SubmitFeedbackTest(EnterpriseServiceMockMixin, TestCase):
self.assertEqual(resp["Allow"], "POST")
# There should be absolutely no interaction with Zendesk
self.assertFalse(zendesk_mock_class.mock_calls)
self.assertFalse(datadog_mock.mock_calls)
def test_zendesk_error_on_create(self, zendesk_mock_class, datadog_mock):
def test_zendesk_error_on_create(self, zendesk_mock_class):
"""
Test Zendesk returning an error on ticket creation.
@@ -588,9 +557,8 @@ class SubmitFeedbackTest(EnterpriseServiceMockMixin, TestCase):
resp = self._build_and_run_request(self._anon_user, self._anon_fields)
self.assertEqual(resp.status_code, 500)
self.assertFalse(resp.content)
self._assert_datadog_called(datadog_mock, ["issue_type:{}".format(self._anon_fields["issue_type"])])
def test_zendesk_error_on_update(self, zendesk_mock_class, datadog_mock):
def test_zendesk_error_on_update(self, zendesk_mock_class):
"""
Test for Zendesk returning an error on ticket update.
@@ -603,10 +571,9 @@ class SubmitFeedbackTest(EnterpriseServiceMockMixin, TestCase):
zendesk_mock_instance.update_ticket.side_effect = err
resp = self._build_and_run_request(self._anon_user, self._anon_fields)
self.assertEqual(resp.status_code, 200)
self._assert_datadog_called(datadog_mock, ["issue_type:{}".format(self._anon_fields["issue_type"])])
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_FEEDBACK_SUBMISSION": False})
def test_not_enabled(self, zendesk_mock_class, datadog_mock):
def test_not_enabled(self, zendesk_mock_class):
"""
Test for Zendesk submission not enabled in `settings`.
@@ -615,7 +582,7 @@ class SubmitFeedbackTest(EnterpriseServiceMockMixin, TestCase):
with self.assertRaises(Http404):
self._build_and_run_request(self._anon_user, self._anon_fields)
def test_zendesk_not_configured(self, zendesk_mock_class, datadog_mock):
def test_zendesk_not_configured(self, zendesk_mock_class):
"""
Test for Zendesk not fully configured in `settings`.
@@ -632,7 +599,7 @@ class SubmitFeedbackTest(EnterpriseServiceMockMixin, TestCase):
test_case("django.conf.settings.ZENDESK_API_KEY")
@mock.patch("openedx.core.djangoapps.site_configuration.helpers.get_value", fake_support_backend_values)
def test_valid_request_over_email(self, zendesk_mock_class, datadog_mock): # pylint: disable=unused-argument
def test_valid_request_over_email(self, zendesk_mock_class): # pylint: disable=unused-argument
with mock.patch("util.views.send_mail") as patched_send_email:
resp = self._build_and_run_request(self._anon_user, self._anon_fields)
self.assertEqual(patched_send_email.call_count, 1)
@@ -640,7 +607,7 @@ class SubmitFeedbackTest(EnterpriseServiceMockMixin, TestCase):
self.assertEqual(resp.status_code, 200)
@mock.patch("openedx.core.djangoapps.site_configuration.helpers.get_value", fake_support_backend_values)
def test_exception_request_over_email(self, zendesk_mock_class, datadog_mock): # pylint: disable=unused-argument
def test_exception_request_over_email(self, zendesk_mock_class): # pylint: disable=unused-argument
with mock.patch("util.views.send_mail", side_effect=SMTPException) as patched_send_email:
resp = self._build_and_run_request(self._anon_user, self._anon_fields)
self.assertEqual(patched_send_email.call_count, 1)

View File

@@ -18,7 +18,6 @@ from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey, UsageKey
import calc
import dogstats_wrapper as dog_stats_api
import track.views
from edxmako.shortcuts import render_to_response, render_to_string
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
@@ -368,11 +367,6 @@ def _record_feedback_in_zendesk(
return True
def _record_feedback_in_datadog(tags):
datadog_tags = [u"{k}:{v}".format(k=k, v=v) for k, v in tags.items()]
dog_stats_api.increment(DATADOG_FEEDBACK_METRIC, tags=datadog_tags)
def get_feedback_form_context(request):
"""
Extract the submitted form fields to be used as a context for
@@ -495,8 +489,6 @@ def submit_feedback(request):
custom_fields=custom_fields
)
_record_feedback_in_datadog(context["tags"])
return HttpResponse(status=(200 if success else 500))

View File

@@ -38,7 +38,6 @@ from six import text_type
import capa.safe_exec as safe_exec
import capa.xqueue_interface as xqueue_interface
import dogstats_wrapper as dog_stats_api
# specific library imports
from calc import UndefinedVariable, UnmatchedParenthesis, evaluator
from cmath import isnan
@@ -182,14 +181,14 @@ class LoncapaResponse(object):
msg = "%s: cannot have input field %s" % (
unicode(self), abox.tag)
msg += "\nSee XML source line %s" % getattr(
xml, 'sourceline', '<unavailable>')
xml, 'sourceline', '[unavailable]')
raise LoncapaProblemError(msg)
if self.max_inputfields and len(inputfields) > self.max_inputfields:
msg = "%s: cannot have more than %s input fields" % (
unicode(self), self.max_inputfields)
msg += "\nSee XML source line %s" % getattr(
xml, 'sourceline', '<unavailable>')
xml, 'sourceline', '[unavailable]')
raise LoncapaProblemError(msg)
for prop in self.required_attributes:
@@ -197,7 +196,7 @@ class LoncapaResponse(object):
msg = "Error in problem specification: %s missing required attribute %s" % (
unicode(self), prop)
msg += "\nSee XML source line %s" % getattr(
xml, 'sourceline', '<unavailable>')
xml, 'sourceline', '[unavailable]')
raise LoncapaProblemError(msg)
# ordered list of answer_id values for this response
@@ -583,7 +582,7 @@ class LoncapaResponse(object):
# First try wrapping the text in a <div> and parsing
# it as an XHTML tree
try:
response_msg_div = etree.XML('<div>%s</div>' % str(response_msg))
response_msg_div = etree.XML(HTML('<div>{}</div>').format(HTML(str(response_msg))))
# If we can't do that, create the <div> and set the message
# as the text of the <div>
@@ -2068,7 +2067,7 @@ class StringResponse(LoncapaResponse):
_ = self.capa_system.i18n.ugettext
# Translators: Separator used in StringResponse to display multiple answers.
# Example: "Answer: Answer_1 or Answer_2 or Answer_3".
separator = u' <b>{}</b> '.format(_('or'))
separator = HTML(' <b>{}</b> ').format(_('or'))
return {self.answer_id: separator.join(self.correct_answer)}
#-----------------------------------------------------------------------------
@@ -2207,7 +2206,7 @@ class CustomResponse(LoncapaResponse):
# default to no error message on empty answer (to be consistent with other
# responsetypes) but allow author to still have the old behavior by setting
# empty_answer_err attribute
msg = (u'<span class="inline-error">{0}</span>'.format(_(u'No answer entered!'))
msg = (HTML(u'<span class="inline-error">{0}</span>').format(_(u'No answer entered!'))
if self.xml.get('empty_answer_err') else '')
return CorrectMap(idset[0], 'incorrect', msg=msg)
@@ -2462,7 +2461,7 @@ class CustomResponse(LoncapaResponse):
# When we parse *msg* using etree, there needs to be a root
# element, so we wrap the *msg* text in <html> tags
msg = '<html>' + msg + '</html>'
msg = HTML('<html>{msg}</html>').format(msg=HTML(msg))
# Replace < characters
msg = msg.replace('&#60;', '&lt;')
@@ -2752,13 +2751,6 @@ class CodeResponse(LoncapaResponse):
_ = self.capa_system.i18n.ugettext
dog_stats_api.increment(xqueue_interface.XQUEUE_METRIC_NAME, tags=[
'action:update_score',
'correct:{}'.format(correct)
])
dog_stats_api.histogram(xqueue_interface.XQUEUE_METRIC_NAME + '.update_score.points_earned', points)
if not valid_score_msg:
# Translators: 'grader' refers to the edX automatic code grader.
error_msg = _('Invalid grader reply. Please contact the course staff.')
@@ -2791,7 +2783,7 @@ class CodeResponse(LoncapaResponse):
return oldcmap
def get_answers(self):
anshtml = '<span class="code-answer"><pre><code>%s</code></pre></span>' % self.answer
anshtml = HTML('<span class="code-answer"><pre><code>{}</code></pre></span>').format(self.answer)
return {self.answer_id: anshtml}
def get_initial_display(self):
@@ -2908,7 +2900,7 @@ class ExternalResponse(LoncapaResponse):
msg = '%s: Missing answer script code for externalresponse' % unicode(
self)
msg += "\nSee XML source line %s" % getattr(
self.xml, 'sourceline', '<unavailable>')
self.xml, 'sourceline', '[unavailable]')
raise LoncapaProblemError(msg)
self.tests = xml.get('tests')
@@ -2984,7 +2976,8 @@ class ExternalResponse(LoncapaResponse):
self.answer_ids), ['incorrect'] * len(idset))))
cmap.set_property(
self.answer_ids[0], 'msg',
'<span class="inline-error">%s</span>' % str(err).replace('<', '&lt;'))
Text('<span class="inline-error">{}</span>').format(str(err))
)
return cmap
awd = rxml.find('awarddetail').text
@@ -3012,8 +3005,7 @@ class ExternalResponse(LoncapaResponse):
except Exception as err: # pylint: disable=broad-except
log.error('Error %s', err)
if self.capa_system.DEBUG:
msg = '<span class="inline-error">%s</span>' % str(
err).replace('<', '&lt;')
msg = HTML('<span class="inline-error">{}</span>').format(err)
exans = [''] * len(self.answer_ids)
exans[0] = msg

View File

@@ -4,7 +4,6 @@ from codejail.safe_exec import safe_exec as codejail_safe_exec
from codejail.safe_exec import not_safe_exec as codejail_not_safe_exec
from codejail.safe_exec import json_safe, SafeExecException
from . import lazymod
from dogapi import dog_stats_api
from six import text_type
import hashlib
@@ -74,7 +73,6 @@ def update_hash(hasher, obj):
hasher.update(repr(obj))
@dog_stats_api.timed('capa.safe_exec.time')
def safe_exec(
code,
globals_dict,

View File

@@ -7,7 +7,6 @@ import logging
import requests
import dogstats_wrapper as dog_stats_api
log = logging.getLogger(__name__)
dateformat = '%Y%m%d%H%M%S'
@@ -93,10 +92,6 @@ class XQueueInterface(object):
# log the send to xqueue
header_info = json.loads(header)
queue_name = header_info.get('queue_name', u'')
dog_stats_api.increment(XQUEUE_METRIC_NAME, tags=[
u'action:send_to_queue',
u'queue:{}'.format(queue_name)
])
# Attempt to send to queue
(error, msg) = self._send_to_queue(header, body, files_to_upload)

View File

@@ -1 +0,0 @@
from .wrapper import increment, histogram, timer

View File

@@ -1,47 +0,0 @@
"""
Wrapper for dog_stats_api, ensuring tags are valid.
See: http://help.datadoghq.com/customer/portal/questions/908720-api-guidelines
"""
from dogapi import dog_stats_api
def _clean_tags(tags):
"""
Helper method that does the actual cleaning of tags for sending to statsd.
1. Handles any type of tag - a plain string, UTF-8 binary, or a unicode
string, and converts it to UTF-8 encoded bytestring needed by statsd.
2. Escape pipe character - used by statsd as a field separator.
3. Trim to 200 characters (DataDog API limitation)
"""
def clean(tagstr):
if isinstance(tagstr, str):
return tagstr.replace('|', '_')[:200]
return unicode(tagstr).replace('|', '_')[:200].encode("utf-8")
return [clean(t) for t in tags]
def increment(metric_name, *args, **kwargs):
"""
Wrapper around dog_stats_api.increment that cleans any tags used.
"""
if "tags" in kwargs:
kwargs["tags"] = _clean_tags(kwargs["tags"])
dog_stats_api.increment(metric_name, *args, **kwargs)
def histogram(metric_name, *args, **kwargs):
"""
Wrapper around dog_stats_api.histogram that cleans any tags used.
"""
if "tags" in kwargs:
kwargs["tags"] = _clean_tags(kwargs["tags"])
dog_stats_api.histogram(metric_name, *args, **kwargs)
def timer(metric_name, *args, **kwargs):
"""
Wrapper around dog_stats_api.timer that cleans any tags used.
"""
if "tags" in kwargs:
kwargs["tags"] = _clean_tags(kwargs["tags"])
return dog_stats_api.timer(metric_name, *args, **kwargs)

View File

@@ -1,10 +0,0 @@
from setuptools import setup
setup(
name="dogstats_wrapper",
version="0.1",
packages=["dogstats_wrapper"],
install_requires=[
"dogapi",
],
)

View File

@@ -1,5 +1,4 @@
"""Implements basics of Capa, including class CapaModule."""
import cgi
import copy
import datetime
import hashlib
@@ -12,11 +11,6 @@ import sys
import traceback
from django.conf import settings
# We don't want to force a dependency on datadog, so make the import conditional
try:
import dogstats_wrapper as dog_stats_api
except ImportError:
dog_stats_api = None
from pytz import utc
from django.utils.encoding import smart_text
from six import text_type
@@ -273,20 +267,18 @@ class CapaMixin(ScorableXBlockMixin, CapaFields):
# TODO (vshnayder): This logic should be general, not here--and may
# want to preserve the data instead of replacing it.
# e.g. in the CMS
msg = u'<p>{msg}</p>'.format(msg=cgi.escape(msg))
msg += u'<p><pre>{tb}</pre></p>'.format(
msg = HTML(u'<p>{msg}</p>').format(msg=msg)
msg += HTML(u'<p><pre>{tb}</pre></p>').format(
# just the traceback, no message - it is already present above
tb=cgi.escape(
u''.join(
['Traceback (most recent call last):\n'] +
traceback.format_tb(sys.exc_info()[2])
)
tb=u''.join(
['Traceback (most recent call last):\n'] +
traceback.format_tb(sys.exc_info()[2])
)
)
# create a dummy problem with error message instead of failing
problem_text = (
u'<problem><text><span class="inline-error">'
u'Problem {url} has an error:</span>{msg}</text></problem>'.format(
HTML(u'<problem><text><span class="inline-error">'
u'Problem {url} has an error:</span>{msg}</text></problem>').format(
url=text_type(self.location),
msg=msg,
)
@@ -548,13 +540,14 @@ class CapaMixin(ScorableXBlockMixin, CapaFields):
# TODO (vshnayder): another switch on DEBUG.
if self.runtime.DEBUG:
msg = (
msg = HTML(
u'[courseware.capa.capa_module] <font size="+1" color="red">'
u'Failed to generate HTML for problem {url}</font>'.format(
url=cgi.escape(text_type(self.location)))
u'Failed to generate HTML for problem {url}</font>'
).format(
url=text_type(self.location)
)
msg += u'<p>Error:</p><p><pre>{msg}</pre></p>'.format(msg=cgi.escape(text_type(err)))
msg += u'<p><pre>{tb}</pre></p>'.format(tb=traceback.format_exc())
msg += HTML(u'<p>Error:</p><p><pre>{msg}</pre></p>').format(msg=text_type(err))
msg += HTML(u'<p><pre>{tb}</pre></p>').format(tb=traceback.format_exc())
html = msg
else:
@@ -581,19 +574,19 @@ class CapaMixin(ScorableXBlockMixin, CapaFields):
self.set_score(self.score_from_lcp())
# Prepend a scary warning to the student
_ = self.runtime.service(self, "i18n").ugettext
warning_msg = _("Warning: The problem has been reset to its initial state!")
warning = '<div class="capa_reset"> <h2> ' + warning_msg + '</h2>'
warning_msg = Text(_("Warning: The problem has been reset to its initial state!"))
warning = HTML('<div class="capa_reset"> <h2>{}</h2>').format(warning_msg)
# Translators: Following this message, there will be a bulleted list of items.
warning_msg = _("The problem's state was corrupted by an invalid submission. The submission consisted of:")
warning += warning_msg + '<ul>'
warning += HTML('{}<ul>').format(warning_msg)
for student_answer in student_answers.values():
if student_answer != '':
warning += '<li>' + cgi.escape(student_answer) + '</li>'
warning += HTML('<li>{}</li>').format(student_answer)
warning_msg = _('If this error persists, please contact the course staff.')
warning += '</ul>' + warning_msg + '</div>'
warning += HTML('</ul>{}</div>').format(warning_msg)
html = warning
try:
@@ -745,9 +738,9 @@ class CapaMixin(ScorableXBlockMixin, CapaFields):
html = self.runtime.render_template('problem.html', context)
if encapsulate:
html = u'<div id="problem_{id}" class="problem" data-url="{ajax_url}">'.format(
id=self.location.html_id(), ajax_url=self.runtime.ajax_url
) + html + "</div>"
html = HTML(u'<div id="problem_{id}" class="problem" data-url="{ajax_url}">{html}</div>').format(
id=self.location.html_id(), ajax_url=self.runtime.ajax_url, html=HTML(html)
)
# Now do all the substitutions which the LMS module_render normally does, but
# we need to do here explicitly since we can get called for our HTML via AJAX
@@ -834,7 +827,7 @@ class CapaMixin(ScorableXBlockMixin, CapaFields):
'correcthint', 'regexphint', 'additional_answer', 'stringequalhint', 'compoundhint',
'stringequalhint']
for tag in tags:
html = re.sub(r'<%s.*?>.*?</%s>' % (tag, tag), '', html, flags=re.DOTALL)
html = re.sub(r'<%s.*?>.*?</%s>' % (tag, tag), '', html, flags=re.DOTALL) # xss-lint: disable=python-interpolate-html
# Some of these tags span multiple lines
# Note: could probably speed this up by calling sub() once with a big regex
# vs. simply calling sub() many times as we have here.
@@ -1179,16 +1172,12 @@ class CapaMixin(ScorableXBlockMixin, CapaFields):
if self.closed():
event_info['failure'] = 'closed'
self.track_function_unmask('problem_check_fail', event_info)
if dog_stats_api:
dog_stats_api.increment(metric_name('checks'), tags=[u'result:failed', u'failure:closed'])
raise NotFoundError(_("Problem is closed."))
# Problem submitted. Student should reset before checking again
if self.done and self.rerandomize == RANDOMIZATION.ALWAYS:
event_info['failure'] = 'unreset'
self.track_function_unmask('problem_check_fail', event_info)
if dog_stats_api:
dog_stats_api.increment(metric_name('checks'), tags=[u'result:failed', u'failure:unreset'])
raise NotFoundError(_("Problem must be reset before it can be submitted again."))
# Problem queued. Students must wait a specified waittime before they are allowed to submit
@@ -1285,18 +1274,6 @@ class CapaMixin(ScorableXBlockMixin, CapaFields):
event_info['submission'] = self.get_submission_metadata_safe(answers_without_files, correct_map)
self.track_function_unmask('problem_check', event_info)
if dog_stats_api:
dog_stats_api.increment(metric_name('checks'), tags=[u'result:success'])
if published_grade['max_grade'] != 0:
dog_stats_api.histogram(
metric_name('correct_pct'),
float(published_grade['grade']) / published_grade['max_grade'],
)
dog_stats_api.histogram(
metric_name('attempts'),
self.attempts,
)
# render problem into HTML
html = self.get_problem_html(encapsulate=False, submit_notification=True)

View File

@@ -7,7 +7,6 @@ import sys
from lxml import etree
from pkg_resources import resource_string
import dogstats_wrapper as dog_stats_api
from capa import responsetypes
from xmodule.exceptions import NotFoundError, ProcessingError
from xmodule.raw_module import RawDescriptor
@@ -194,10 +193,6 @@ class CapaDescriptor(CapaFields, RawDescriptor):
# edited in the cms
@classmethod
def backcompat_paths(cls, path):
dog_stats_api.increment(
DEPRECATION_VSCOMPAT_EVENT,
tags=["location:capa_descriptor_backcompat_paths"]
)
return [
'problems/' + path[8:],
path[8:],

View File

@@ -15,7 +15,6 @@ from web_fragments.fragment import Fragment
from xblock.core import XBlock
from xblock.fields import Boolean, List, Scope, String
import dogstats_wrapper as dog_stats_api
from xmodule.contentstore.content import StaticContent
from xmodule.editing_module import EditingDescriptor
from xmodule.edxnotes_utils import edxnotes
@@ -155,12 +154,6 @@ class HtmlDescriptor(HtmlBlock, XmlDescriptor, EditingDescriptor): # pylint: di
"""
Get paths for html and xml files.
"""
dog_stats_api.increment(
DEPRECATION_VSCOMPAT_EVENT,
tags=["location:html_descriptor_backcompat_paths"]
)
if filepath.endswith('.html.xml'):
filepath = filepath[:-9] + '.html' # backcompat--look for html instead of xml
if filepath.endswith('.html.html'):
@@ -251,11 +244,6 @@ class HtmlDescriptor(HtmlBlock, XmlDescriptor, EditingDescriptor): # pylint: di
# online and has imported all current (fall 2012) courses from xml
if not system.resources_fs.exists(filepath):
dog_stats_api.increment(
DEPRECATION_VSCOMPAT_EVENT,
tags=["location:html_descriptor_load_definition"]
)
candidates = cls.backcompat_paths(filepath)
# log.debug("candidates = {0}".format(candidates))
for candidate in candidates:

View File

@@ -20,7 +20,6 @@ try:
except ImportError:
DJANGO_AVAILABLE = False
import dogstats_wrapper as dog_stats_api
import logging
from contracts import check, new_contract
@@ -137,27 +136,6 @@ class QueryTimer(object):
end = time()
tags = tagger.tags
tags.append('course:{}'.format(course_context))
for name, size in tagger.measures:
dog_stats_api.histogram(
'{}.{}'.format(metric_name, name),
size,
timestamp=end,
tags=[tag for tag in tags if not tag.startswith('{}:'.format(metric_name))],
sample_rate=tagger.sample_rate,
)
dog_stats_api.histogram(
'{}.duration'.format(metric_name),
end - start,
timestamp=end,
tags=tags,
sample_rate=tagger.sample_rate,
)
dog_stats_api.increment(
metric_name,
timestamp=end,
tags=tags,
sample_rate=tagger.sample_rate,
)
TIMER = QueryTimer(__name__, 0.01)

View File

@@ -15,7 +15,6 @@ from uuid import uuid4
from factory import Factory, Sequence, lazy_attribute_sequence, lazy_attribute
from factory.errors import CyclicDefinitionError
from mock import patch
import dogstats_wrapper as dog_stats_api
from opaque_keys.edx.locator import BlockUsageLocator
from opaque_keys.edx.keys import UsageKey
@@ -392,14 +391,6 @@ class ItemFactory(XModuleFactory):
# if we add one then we need to also add it to the policy information (i.e. metadata)
# we should remove this once we can break this reference from the course to static tabs
if category == 'static_tab':
dog_stats_api.increment(
DEPRECATION_VSCOMPAT_EVENT,
tags=(
"location:itemfactory_create_static_tab",
u"block:{}".format(location.block_type),
)
)
course = store.get_course(location.course_key)
course.tabs.append(
CourseTab.load('static_tab', name='Static Tab', url_slug=location.block_id)

View File

@@ -33,8 +33,6 @@ from xblock.field_data import DictFieldData
from xblock.runtime import DictKeyValueStore
from xblock.fields import ScopeIds
import dogstats_wrapper as dog_stats_api
from .exceptions import ItemNotFoundError
from .inheritance import compute_inherited_metadata, inheriting_field_data, InheritanceKeyValueStore
@@ -54,11 +52,6 @@ def clean_out_mako_templating(xml_string):
orig_xml = xml_string
xml_string = xml_string.replace('%include', 'include')
xml_string = re.sub(r"(?m)^\s*%.*$", '', xml_string)
if orig_xml != xml_string:
dog_stats_api.increment(
DEPRECATION_VSCOMPAT_EVENT,
tags=["location:xml_clean_out_mako_templating"]
)
return xml_string
@@ -125,14 +118,6 @@ class ImportSystem(XMLParsingSystem, MakoDescriptorSystem):
def fallback_name(orig_name=None):
"""Return the fallback name for this module. This is a function instead of a variable
because we want it to be lazy."""
dog_stats_api.increment(
DEPRECATION_VSCOMPAT_EVENT,
tags=(
"location:import_system_fallback_name",
u"name:{}".format(orig_name),
)
)
if looks_like_fallback(orig_name):
# We're about to re-hash, in case something changed, so get rid of the tag_ and hash
orig_name = orig_name[len(tag) + 1:-12]
@@ -423,7 +408,7 @@ class XMLModuleStore(ModuleStoreReadBase):
'''
String representation - for debugging
'''
return '<%s data_dir=%r, %d courselikes, %d modules>' % (
return '<%s data_dir=%r, %d courselikes, %d modules>' % ( # xss-lint: disable=python-interpolate-html
self.__class__.__name__, self.data_dir, len(self.courses), len(self.modules)
)
@@ -506,32 +491,12 @@ class XMLModuleStore(ModuleStoreReadBase):
# VS[compat]: remove once courses use the policy dirs.
if policy == {}:
dog_stats_api.increment(
DEPRECATION_VSCOMPAT_EVENT,
tags=(
"location:xml_load_course_policy_dir",
u"course:{}".format(course),
)
)
old_policy_path = self.data_dir / course_dir / 'policies' / '{0}.json'.format(url_name)
policy = self.load_policy(old_policy_path, tracker)
else:
policy = {}
# VS[compat] : 'name' is deprecated, but support it for now...
if course_data.get('name'):
dog_stats_api.increment(
DEPRECATION_VSCOMPAT_EVENT,
tags=(
"location:xml_load_course_course_data_name",
u"course:{}".format(course_data.get('course')),
u"org:{}".format(course_data.get('org')),
u"name:{}".format(course_data.get('name')),
)
)
url_name = BlockUsageLocator.clean(course_data.get('name'))
tracker("'name' is deprecated for module xml. Please use "
"display_name and url_name.")
@@ -719,14 +684,6 @@ class XMLModuleStore(ModuleStoreReadBase):
# Hack because we need to pull in the 'display_name' for static tabs (because we need to edit them)
# from the course policy
if category == "static_tab":
dog_stats_api.increment(
DEPRECATION_VSCOMPAT_EVENT,
tags=(
"location:xml_load_extra_content_static_tab",
u"course_dir:{}".format(course_dir),
)
)
tab = CourseTabList.get_tab_by_slug(tab_list=course_descriptor.tabs, url_slug=slug)
if tab:
module.display_name = tab.name

View File

@@ -4,7 +4,6 @@ Template module
from lxml import etree
from mako.template import Template
import dogstats_wrapper as dog_stats_api
from xmodule.raw_module import RawDescriptor
from xmodule.x_module import DEPRECATION_VSCOMPAT_EVENT, XModule
@@ -48,11 +47,6 @@ class CustomTagDescriptor(RawDescriptor):
template_name = xmltree.attrib['impl']
else:
# VS[compat] backwards compatibility with old nested customtag structure
dog_stats_api.increment(
DEPRECATION_VSCOMPAT_EVENT,
tags=["location:customtag_descriptor_render_template"]
)
child_impl = xmltree.find('impl')
if child_impl is not None:
template_name = child_impl.text

View File

@@ -37,7 +37,6 @@ from xmodule.util.xmodule_django import add_webpack_to_fragment
from opaque_keys.edx.keys import UsageKey
from opaque_keys.edx.asides import AsideUsageKeyV2, AsideDefinitionKeyV2
from xmodule.exceptions import UndefinedContext
import dogstats_wrapper as dog_stats_api
from openedx.core.djangolib.markup import HTML
@@ -516,14 +515,6 @@ class XModuleMixin(XModuleFields, XBlock):
child = super(XModuleMixin, self).get_child(usage_id)
except ItemNotFoundError:
log.warning(u'Unable to load item %s, skipping', usage_id)
dog_stats_api.increment(
"xmodule.item_not_found_error",
tags=[
u"course_id:{}".format(usage_id.course_key),
u"block_type:{}".format(usage_id.block_type),
u"parent_block_type:{}".format(self.location.block_type),
]
)
return None
if child is None:
@@ -1090,12 +1081,6 @@ class XModuleDescriptor(HTMLSnippet, ResourceTemplates, XModuleMixin):
@classmethod
def _translate(cls, key):
'VS[compat]'
if key in cls.metadata_translations:
dog_stats_api.increment(
DEPRECATION_VSCOMPAT_EVENT,
tags=["location:xmodule_descriptor_translate"]
)
return cls.metadata_translations.get(key, key)
# ================================= XML PARSING ============================
@@ -1351,13 +1336,6 @@ class MetricsMixin(object):
u'block_type:{}'.format(block.scope_ids.block_type),
u'block_family:{}'.format(block.entry_point),
]
dog_stats_api.increment(XMODULE_METRIC_NAME, tags=tags, sample_rate=XMODULE_METRIC_SAMPLE_RATE)
dog_stats_api.histogram(
XMODULE_DURATION_METRIC_NAME,
duration,
tags=tags,
sample_rate=XMODULE_METRIC_SAMPLE_RATE,
)
log.debug(
"%.3fs - render %s.%s (%s)",
duration,
@@ -1388,13 +1366,6 @@ class MetricsMixin(object):
u'block_type:{}'.format(block.scope_ids.block_type),
u'block_family:{}'.format(block.entry_point),
]
dog_stats_api.increment(XMODULE_METRIC_NAME, tags=tags, sample_rate=XMODULE_METRIC_SAMPLE_RATE)
dog_stats_api.histogram(
XMODULE_DURATION_METRIC_NAME,
duration,
tags=tags,
sample_rate=XMODULE_METRIC_SAMPLE_RATE
)
log.debug(
"%.3fs - handle %s.%s (%s)",
duration,

View File

@@ -10,7 +10,6 @@ from xblock.core import XML_NAMESPACES
from xblock.fields import Dict, Scope, ScopeIds
from xblock.runtime import KvsFieldData
import dogstats_wrapper as dog_stats_api
from xmodule.modulestore import EdxJSONEncoder
from xmodule.modulestore.inheritance import InheritanceKeyValueStore, own_metadata
from xmodule.x_module import DEPRECATION_VSCOMPAT_EVENT, XModuleDescriptor
@@ -233,11 +232,6 @@ class XmlParserMixin(object):
filepath = ''
aside_children = []
else:
dog_stats_api.increment(
DEPRECATION_VSCOMPAT_EVENT,
tags=["location:xmlparser_util_mixin_load_definition_filename"]
)
filepath = cls._format_filepath(xml_object.tag, filename)
# VS[compat]
@@ -246,11 +240,6 @@ class XmlParserMixin(object):
# again in the correct format. This should go away once the CMS is
# online and has imported all current (fall 2012) courses from xml
if not system.resources_fs.exists(filepath) and hasattr(cls, 'backcompat_paths'):
dog_stats_api.increment(
DEPRECATION_VSCOMPAT_EVENT,
tags=["location:xmlparser_util_mixin_load_definition_backcompat"]
)
candidates = cls.backcompat_paths(filepath)
for candidate in candidates:
if system.resources_fs.exists(candidate):
@@ -289,14 +278,6 @@ class XmlParserMixin(object):
attr = cls._translate(attr)
if attr in cls.metadata_to_strip:
if attr in ('course', 'org', 'url_name', 'filename'):
dog_stats_api.increment(
DEPRECATION_VSCOMPAT_EVENT,
tags=(
"location:xmlparser_util_mixin_load_metadata",
"metadata:{}".format(attr),
)
)
# don't load these
continue
@@ -356,10 +337,6 @@ class XmlParserMixin(object):
else:
filepath = None
definition_xml = node
dog_stats_api.increment(
DEPRECATION_VSCOMPAT_EVENT,
tags=["location:xmlparser_util_mixin_parse_xml"]
)
# Note: removes metadata.
definition, children = cls.load_definition(definition_xml, runtime, def_id, id_generator)

View File

@@ -38,7 +38,6 @@ from django.utils.translation import ugettext as _
from markupsafe import escape
from six import text_type
import dogstats_wrapper as dog_stats_api
from bulk_email.models import CourseEmail, Optout
from courseware.courses import get_course
from lms.djangoapps.instructor_task.models import InstructorTask
@@ -295,14 +294,13 @@ def send_course_email(entry_id, email_id, to_list, global_email_context, subtask
new_subtask_status = None
try:
course_title = global_email_context['course_title']
with dog_stats_api.timer('course_email.single_task.time.overall', tags=[_statsd_tag(course_title)]):
new_subtask_status, send_exception = _send_course_email(
entry_id,
email_id,
to_list,
global_email_context,
subtask_status,
)
new_subtask_status, send_exception = _send_course_email(
entry_id,
email_id,
to_list,
global_email_context,
subtask_status,
)
except Exception:
# Unexpected exception. Try to write out the failure to the entry before failing.
log.exception("Send-email task %s for email %s: failed unexpectedly!", current_task_id, email_id)
@@ -386,7 +384,7 @@ def _get_source_address(course_id, course_title, course_language, truncate=True)
from_addr_format = u'{name} {email}'.format(
# Translators: Bulk email from address e.g. ("Physics 101" Course Staff)
name=_('"{course_title}" Course Staff'),
email=u'<{course_name}-{from_email}>',
email=u'<{course_name}-{from_email}>', # xss-lint: disable=python-wrap-html
)
def format_address(course_title_no_quotes):
@@ -566,8 +564,7 @@ def _send_course_email(entry_id, email_id, to_list, global_email_context, subtas
current_recipient['profile__name'],
email
)
with dog_stats_api.timer('course_email.single_send.time.overall', tags=[_statsd_tag(course_title)]):
connection.send_messages([email_msg])
connection.send_messages([email_msg])
except SMTPDataError as exc:
# According to SMTP spec, we'll retry error codes in the 4xx range. 5xx range indicates hard failure.
@@ -598,7 +595,6 @@ def _send_course_email(entry_id, email_id, to_list, global_email_context, subtas
email,
exc.smtp_error
)
dog_stats_api.increment('course_email.error', tags=[_statsd_tag(course_title)])
subtask_status.increment(failed=1)
except SINGLE_EMAIL_FAILURE_ERRORS as exc:
@@ -615,7 +611,6 @@ def _send_course_email(entry_id, email_id, to_list, global_email_context, subtas
email,
exc
)
dog_stats_api.increment('course_email.error', tags=[_statsd_tag(course_title)])
subtask_status.increment(failed=1)
else:
@@ -630,7 +625,6 @@ def _send_course_email(entry_id, email_id, to_list, global_email_context, subtas
total_recipients,
email
)
dog_stats_api.increment('course_email.sent', tags=[_statsd_tag(course_title)])
if settings.BULK_EMAIL_LOG_SENT_EMAILS:
log.info('Email with id %s sent to %s', email_id, email)
else:
@@ -667,7 +661,6 @@ def _send_course_email(entry_id, email_id, to_list, global_email_context, subtas
)
except INFINITE_RETRY_ERRORS as exc:
dog_stats_api.increment('course_email.infinite_retry', tags=[_statsd_tag(course_title)])
# Increment the "retried_nomax" counter, update other counters with progress to date,
# and set the state to RETRY:
subtask_status.increment(retried_nomax=1, state=RETRY)
@@ -679,7 +672,6 @@ def _send_course_email(entry_id, email_id, to_list, global_email_context, subtas
# Errors caught here cause the email to be retried. The entire task is actually retried
# without popping the current recipient off of the existing list.
# Errors caught are those that indicate a temporary condition that might succeed on retry.
dog_stats_api.increment('course_email.limited_retry', tags=[_statsd_tag(course_title)])
# Increment the "retried_withmax" counter, update other counters with progress to date,
# and set the state to RETRY:
subtask_status.increment(retried_withmax=1, state=RETRY)
@@ -688,7 +680,6 @@ def _send_course_email(entry_id, email_id, to_list, global_email_context, subtas
)
except BULK_EMAIL_FAILURE_ERRORS as exc:
dog_stats_api.increment('course_email.error', tags=[_statsd_tag(course_title)])
num_pending = len(to_list)
log.exception(('Task %s: email with id %d caused send_course_email task to fail '
'with "fatal" exception. %d emails unsent.'),
@@ -703,7 +694,6 @@ def _send_course_email(entry_id, email_id, to_list, global_email_context, subtas
# without popping the current recipient off of the existing list.
# These are unexpected errors. Since they might be due to a temporary condition that might
# succeed on retry, we give them a retry.
dog_stats_api.increment('course_email.limited_retry', tags=[_statsd_tag(course_title)])
log.exception(('Task %s: email with id %d caused send_course_email task to fail '
'with unexpected exception. Generating retry.'),
task_id, email_id)
@@ -838,11 +828,3 @@ def _submit_for_retry(entry_id, email_id, to_list, global_email_context,
num_failed = len(to_list)
subtask_status.increment(failed=num_failed, state=FAILURE)
return subtask_status, retry_exc
def _statsd_tag(course_title):
"""
Prefix the tag we will use for DataDog.
The tag also gets modified by our dogstats_wrapper code.
"""
return u"course_email:{0}".format(course_title)

View File

@@ -11,7 +11,6 @@ from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from opaque_keys.edx.keys import CourseKey
import dogstats_wrapper as dog_stats_api
from capa.xqueue_interface import XQUEUE_METRIC_NAME
from lms.djangoapps.certificates.api import generate_user_certificates
from lms.djangoapps.certificates.models import (
@@ -128,11 +127,6 @@ def update_certificate(request):
content_type='application/json'
)
dog_stats_api.increment(XQUEUE_METRIC_NAME, tags=[
u'action:update_certificate',
u'course_id:{}'.format(cert.course_id)
])
cert.save()
return HttpResponse(json.dumps({'return_code': 0}),
content_type='application/json')

View File

@@ -17,7 +17,6 @@ from edx_django_utils import monitoring as monitoring_utils
from edx_user_state_client.interface import XBlockUserState, XBlockUserStateClient
from xblock.fields import Scope
import dogstats_wrapper as dog_stats_api
from courseware.models import BaseStudentModuleHistory, StudentModule
try:
@@ -105,27 +104,6 @@ class DjangoXBlockUserStateClient(XBlockUserStateClient):
usage_key = student_module.module_state_key.map_into_course(student_module.course_id)
yield (student_module, usage_key)
def _ddog_increment(self, evt_time, evt_name):
"""
DataDog increment method.
"""
dog_stats_api.increment(
'DjangoXBlockUserStateClient.{}'.format(evt_name),
timestamp=evt_time,
sample_rate=self.API_DATADOG_SAMPLE_RATE,
)
def _ddog_histogram(self, evt_time, evt_name, value):
"""
DataDog histogram method.
"""
dog_stats_api.histogram(
'DjangoXBlockUserStateClient.{}'.format(evt_name),
value,
timestamp=evt_time,
sample_rate=self.API_DATADOG_SAMPLE_RATE,
)
def _nr_metric_name(self, function_name, stat_name, block_type=None):
"""
Return a metric name (string) representing the provided descriptors.
@@ -195,22 +173,16 @@ class DjangoXBlockUserStateClient(XBlockUserStateClient):
self._nr_stat_increment('get_many', 'calls')
# keep track of blocks requested
self._ddog_histogram(evt_time, 'get_many.blks_requested', len(block_keys))
self._nr_stat_accumulate('get_many', 'blocks_requested', len(block_keys))
modules = self._get_student_modules(username, block_keys)
for module, usage_key in modules:
if module.state is None:
self._ddog_increment(evt_time, 'get_many.empty_state')
continue
state = json.loads(module.state)
state_length = len(module.state)
# record this metric before the check for empty state, so that we
# have some visibility into empty blocks.
self._ddog_histogram(evt_time, 'get_many.block_size', state_length)
# If the state is the empty dict, then it has been deleted, and so
# conformant UserStateClients should treat it as if it doesn't exist.
if state == {}:
@@ -233,9 +205,6 @@ class DjangoXBlockUserStateClient(XBlockUserStateClient):
# The rest of this method exists only to report metrics.
finish_time = time()
duration = (finish_time - evt_time) * 1000 # milliseconds
self._ddog_histogram(evt_time, 'get_many.blks_out', total_block_count)
self._ddog_histogram(evt_time, 'get_many.response_time', duration)
self._nr_stat_accumulate('get_many', 'duration', duration)
def set_many(self, username, block_keys_to_state, scope=Scope.user_state):
@@ -325,28 +294,19 @@ class DjangoXBlockUserStateClient(XBlockUserStateClient):
# Record whether a state row has been created or updated.
if created:
self._ddog_increment(evt_time, 'set_many.state_created')
self._nr_block_stat_increment('set_many', usage_key.block_type, 'blocks_created')
else:
self._ddog_increment(evt_time, 'set_many.state_updated')
self._nr_block_stat_increment('set_many', usage_key.block_type, 'blocks_updated')
# Event to record number of fields sent in to set/set_many.
self._ddog_histogram(evt_time, 'set_many.fields_in', len(state))
# Event to record number of new fields set in set/set_many.
num_new_fields_set = num_fields_after - num_fields_before
self._ddog_histogram(evt_time, 'set_many.fields_set', num_new_fields_set)
# Event to record number of existing fields updated in set/set_many.
num_fields_updated = max(0, len(state) - num_new_fields_set)
self._ddog_histogram(evt_time, 'set_many.fields_updated', num_fields_updated)
# Events for the entire set_many call.
finish_time = time()
duration = (finish_time - evt_time) * 1000 # milliseconds
self._ddog_histogram(evt_time, 'set_many.blks_updated', len(block_keys_to_state))
self._ddog_histogram(evt_time, 'set_many.response_time', duration)
self._nr_stat_accumulate('set_many', 'duration', duration)
def delete_many(self, username, block_keys, scope=Scope.user_state, fields=None):
@@ -363,13 +323,6 @@ class DjangoXBlockUserStateClient(XBlockUserStateClient):
raise ValueError("Only Scope.user_state is supported")
evt_time = time()
if fields is None:
self._ddog_increment(evt_time, 'delete_many.empty_state')
else:
self._ddog_histogram(evt_time, 'delete_many.field_count', len(fields))
self._ddog_histogram(evt_time, 'delete_many.block_count', len(block_keys))
student_modules = self._get_student_modules(username, block_keys)
for student_module, _ in student_modules:
if fields is None:
@@ -387,7 +340,6 @@ class DjangoXBlockUserStateClient(XBlockUserStateClient):
# Event for the entire delete_many call.
finish_time = time()
self._ddog_histogram(evt_time, 'delete_many.response_time', (finish_time - evt_time) * 1000)
def get_history(self, username, block_key, scope=Scope.user_state):
"""

View File

@@ -4,7 +4,6 @@ Course Grade Factory Class
from collections import namedtuple
from logging import getLogger
import dogstats_wrapper as dog_stats_api
from six import text_type
from openedx.core.djangoapps.signals.signals import COURSE_GRADE_CHANGED, COURSE_GRADE_NOW_PASSED
@@ -103,8 +102,7 @@ class CourseGradeFactory(object):
)
stats_tags = [u'action:{}'.format(course_data.course_key)]
for user in users:
with dog_stats_api.timer('lms.grades.CourseGradeFactory.iter', tags=stats_tags):
yield self._iter_grade_result(user, course_data, force_update)
yield self._iter_grade_result(user, course_data, force_update)
def _iter_grade_result(self, user, course_data, force_update):
try:

View File

@@ -12,7 +12,6 @@ from celery.states import READY_STATES, RETRY, SUCCESS
from django.core.cache import cache
from django.db import DatabaseError, transaction
import dogstats_wrapper as dog_stats_api
from util.db import outer_atomic
from .exceptions import DuplicateTaskException
@@ -59,11 +58,6 @@ def track_memory_usage(metric, course_id):
total_memory_info = process.get_memory_info()
total_usage = getattr(total_memory_info, memory_type)
memory_used = total_usage - baseline_usage
dog_stats_api.increment(
metric + "." + memory_type,
memory_used,
tags=["course_id:{}".format(course_id)],
)
def _generate_items_for_subtask(
@@ -410,7 +404,6 @@ def check_subtask_is_valid(entry_id, current_task_id, new_subtask_status):
format_str = "Unexpected task_id '{}': unable to find subtasks of instructor task '{}': rejecting task {}"
msg = format_str.format(current_task_id, entry, new_subtask_status)
TASK_LOG.warning(msg)
dog_stats_api.increment('instructor_task.subtask.duplicate.nosubtasks', tags=[entry.course_id])
raise DuplicateTaskException(msg)
# Confirm that the InstructorTask knows about this particular subtask.
@@ -420,7 +413,6 @@ def check_subtask_is_valid(entry_id, current_task_id, new_subtask_status):
format_str = "Unexpected task_id '{}': unable to find status for subtask of instructor task '{}': rejecting task {}"
msg = format_str.format(current_task_id, entry, new_subtask_status)
TASK_LOG.warning(msg)
dog_stats_api.increment('instructor_task.subtask.duplicate.unknown', tags=[entry.course_id])
raise DuplicateTaskException(msg)
# Confirm that the InstructorTask doesn't think that this subtask has already been
@@ -431,7 +423,6 @@ def check_subtask_is_valid(entry_id, current_task_id, new_subtask_status):
format_str = "Unexpected task_id '{}': already completed - status {} for subtask of instructor task '{}': rejecting task {}"
msg = format_str.format(current_task_id, subtask_status, entry, new_subtask_status)
TASK_LOG.warning(msg)
dog_stats_api.increment('instructor_task.subtask.duplicate.completed', tags=[entry.course_id])
raise DuplicateTaskException(msg)
# Confirm that the InstructorTask doesn't think that this subtask is already being
@@ -445,7 +436,6 @@ def check_subtask_is_valid(entry_id, current_task_id, new_subtask_status):
format_str = "Unexpected task_id '{}': already retried - status {} for subtask of instructor task '{}': rejecting task {}"
msg = format_str.format(current_task_id, subtask_status, entry, new_subtask_status)
TASK_LOG.warning(msg)
dog_stats_api.increment('instructor_task.subtask.duplicate.retried', tags=[entry.course_id])
raise DuplicateTaskException(msg)
# Now we are ready to start working on this. Try to lock it.
@@ -455,7 +445,6 @@ def check_subtask_is_valid(entry_id, current_task_id, new_subtask_status):
format_str = "Unexpected task_id '{}': already being executed - for subtask of instructor task '{}'"
msg = format_str.format(current_task_id, entry)
TASK_LOG.warning(msg)
dog_stats_api.increment('instructor_task.subtask.duplicate.locked', tags=[entry.course_id])
raise DuplicateTaskException(msg)
@@ -479,12 +468,10 @@ def update_subtask_status(entry_id, current_task_id, new_subtask_status, retry_c
if retry_count < MAX_DATABASE_LOCK_RETRIES:
TASK_LOG.info("Retrying to update status for subtask %s of instructor task %d with status %s: retry %d",
current_task_id, entry_id, new_subtask_status, retry_count)
dog_stats_api.increment('instructor_task.subtask.retry_after_failed_update')
update_subtask_status(entry_id, current_task_id, new_subtask_status, retry_count)
else:
TASK_LOG.info("Failed to update status after %d retries for subtask %s of instructor task %d with status %s",
retry_count, current_task_id, entry_id, new_subtask_status)
dog_stats_api.increment('instructor_task.subtask.failed_after_update_retries')
raise
finally:
# Only release the lock on the subtask when we're done trying to update it.
@@ -581,5 +568,4 @@ def _update_subtask_status(entry_id, current_task_id, new_subtask_status):
entry.task_output, current_task_id, entry_id)
except Exception:
TASK_LOG.exception("Unexpected error while updating InstructorTask.")
dog_stats_api.increment('instructor_task.subtask.update_exception')
raise

View File

@@ -8,7 +8,6 @@ from time import time
from django.utils.translation import ugettext_noop
from opaque_keys.edx.keys import UsageKey
import dogstats_wrapper as dog_stats_api
from capa.responsetypes import LoncapaProblemError, ResponseError, StudentInputError
from courseware.courses import get_course_by_id, get_problems_in_section
from courseware.model_data import DjangoKeyValueStore, FieldDataCache
@@ -92,18 +91,17 @@ def perform_module_state_update(update_fcn, filter_fcn, _entry_id, course_id, ta
module_descriptor = problems[unicode(module_to_update.module_state_key)]
# There is no try here: if there's an error, we let it throw, and the task will
# be marked as FAILED, with a stack trace.
with dog_stats_api.timer('instructor_tasks.module.time.step', tags=[u'action:{name}'.format(name=action_name)]):
update_status = update_fcn(module_descriptor, module_to_update, task_input)
if update_status == UPDATE_STATUS_SUCCEEDED:
# If the update_fcn returns true, then it performed some kind of work.
# Logging of failures is left to the update_fcn itself.
task_progress.succeeded += 1
elif update_status == UPDATE_STATUS_FAILED:
task_progress.failed += 1
elif update_status == UPDATE_STATUS_SKIPPED:
task_progress.skipped += 1
else:
raise UpdateProblemModuleStateError("Unexpected update_status returned: {}".format(update_status))
update_status = update_fcn(module_descriptor, module_to_update, task_input)
if update_status == UPDATE_STATUS_SUCCEEDED:
# If the update_fcn returns true, then it performed some kind of work.
# Logging of failures is left to the update_fcn itself.
task_progress.succeeded += 1
elif update_status == UPDATE_STATUS_FAILED:
task_progress.failed += 1
elif update_status == UPDATE_STATUS_SKIPPED:
task_progress.skipped += 1
else:
raise UpdateProblemModuleStateError("Unexpected update_status returned: {}".format(update_status))
return task_progress.update_task_state()

View File

@@ -5,7 +5,6 @@ from time import time
from celery import current_task
from django.db import reset_queries
import dogstats_wrapper as dog_stats_api
from lms.djangoapps.instructor_task.models import PROGRESS, InstructorTask
from util.db import outer_atomic
@@ -109,8 +108,7 @@ def run_main_task(entry_id, task_fcn, action_name):
raise ValueError(message)
# Now do the work
with dog_stats_api.timer('instructor_tasks.time.overall', tags=[u'action:{name}'.format(name=action_name)]):
task_progress = task_fcn(entry_id, course_id, task_input, action_name)
task_progress = task_fcn(entry_id, course_id, task_input, action_name)
# Release any queries that the connection has been hanging onto
reset_queries()

View File

@@ -125,7 +125,6 @@ import logging
LOG_OVERRIDES = [
('track.middleware', logging.CRITICAL),
('edxmako.shortcuts', logging.ERROR),
('dd.dogapi', logging.ERROR),
('edx.discussion', logging.CRITICAL),
]
for log_name, log_level in LOG_OVERRIDES:

View File

@@ -2114,9 +2114,6 @@ INSTALLED_APPS = [
# Splash screen
'splash',
# Monitoring
'openedx.core.djangoapps.datadog.apps.DatadogConfig',
# User API
'rest_framework',
'openedx.core.djangoapps.user_api',

View File

@@ -32,7 +32,6 @@ import logging
LOG_OVERRIDES = [
('track.contexts', logging.CRITICAL),
('track.middleware', logging.CRITICAL),
('dd.dogapi', logging.CRITICAL),
('django_comment_client.utils', logging.CRITICAL),
]
for log_name, log_level in LOG_OVERRIDES:

View File

@@ -7,7 +7,6 @@ from uuid import uuid4
import requests
from django.utils.translation import get_language
import dogstats_wrapper as dog_stats_api
from .settings import SERVICE_HOST as COMMENTS_SERVICE
log = logging.getLogger(__name__)
@@ -30,25 +29,6 @@ def extract(dic, keys):
return strip_none({k: dic.get(k) for k in keys})
@contextmanager
def request_timer(request_id, method, url, tags=None):
start = time()
with dog_stats_api.timer('comment_client.request.time', tags=tags):
yield
end = time()
duration = end - start
log.info(
u"comment_client_request_log: request_id={request_id}, method={method}, "
u"url={url}, duration={duration}".format(
request_id=request_id,
method=method,
url=url,
duration=duration
)
)
def perform_request(method, url, data_or_params=None, raw=False,
metric_action=None, metric_tags=None, paged_results=False):
# To avoid dependency conflict
@@ -81,15 +61,14 @@ def perform_request(method, url, data_or_params=None, raw=False,
data = None
params = data_or_params.copy()
params.update(request_id_dict)
with request_timer(request_id, method, url, metric_tags):
response = requests.request(
method,
url,
data=data,
params=params,
headers=headers,
timeout=config.connection_timeout
)
response = requests.request(
method,
url,
data=data,
params=params,
headers=headers,
timeout=config.connection_timeout
)
metric_tags.append(u'status_code:{}'.format(response.status_code))
if response.status_code > 200:
@@ -97,8 +76,6 @@ def perform_request(method, url, data_or_params=None, raw=False,
else:
metric_tags.append(u'result:success')
dog_stats_api.increment('comment_client.request.count', tags=metric_tags)
if 200 < response.status_code < 500:
raise CommentClientRequestError(response.text, response.status_code)
# Heroku returns a 503 when an application is in maintenance mode
@@ -119,22 +96,6 @@ def perform_request(method, url, data_or_params=None, raw=False,
content=response.text[:100]
)
)
if paged_results:
dog_stats_api.histogram(
'comment_client.request.paged.result_count',
value=len(data.get('collection', [])),
tags=metric_tags
)
dog_stats_api.histogram(
'comment_client.request.paged.page',
value=data.get('page', 1),
tags=metric_tags
)
dog_stats_api.histogram(
'comment_client.request.paged.num_pages',
value=data.get('num_pages', 1),
tags=metric_tags
)
return data

View File

@@ -1,32 +0,0 @@
"""
Configuration for datadog Django app
"""
from django.apps import AppConfig
from django.conf import settings
from dogapi import dog_http_api, dog_stats_api
class DatadogConfig(AppConfig):
"""
Configuration class for datadog Django app
"""
name = 'openedx.core.djangoapps.datadog'
verbose_name = "Datadog"
def ready(self):
"""
Initialize connection to datadog during django startup.
Configure using DATADOG dictionary in the django project settings.
"""
# By default use the statsd agent
options = {'statsd': True}
if hasattr(settings, 'DATADOG'):
options.update(settings.DATADOG)
# Not all arguments are documented.
# Look at the source code for details.
dog_stats_api.start(**options)
dog_http_api.api_key = options.get('api_key')

View File

@@ -1,13 +1,11 @@
"""
Views for verifying the health (heartbeat) of the app.
"""
from dogapi import dog_stats_api
from util.json_request import JsonResponse
from .runchecks import runchecks
@dog_stats_api.timed('edxapp.heartbeat')
def heartbeat(request):
"""
Simple view that a loadbalancer can check to verify that the app is up. Returns a json doc

View File

@@ -5,11 +5,9 @@ Django Celery tasks for service status app
import time
from djcelery import celery
from dogapi import dog_stats_api
@celery.task
@dog_stats_api.timed('status.service.celery.pong')
def delayed_ping(value, delay):
"""A simple tasks that replies to a message after a especified amount
of seconds.

View File

@@ -8,7 +8,6 @@ import time
from celery.exceptions import TimeoutError
from django.http import HttpResponse
from djcelery import celery
from dogapi import dog_stats_api
from openedx.core.djangoapps.service_status.tasks import delayed_ping
@@ -20,7 +19,6 @@ def index(_):
return HttpResponse()
@dog_stats_api.timed('status.service.celery.status')
def celery_status(_):
"""
A view that returns Celery stats
@@ -30,7 +28,6 @@ def celery_status(_):
content_type="application/json")
@dog_stats_api.timed('status.service.celery.ping')
def celery_ping(_):
"""
A Simple view that checks if Celery can process a simple task

View File

@@ -6,7 +6,6 @@ import datetime
import json
import logging
import dogstats_wrapper as dog_stats_api
from django.conf import settings
from django.contrib.auth import login as django_login
from django.contrib.auth.models import User
@@ -193,8 +192,6 @@ def create_account_with_params(request, params):
except Exception: # pylint: disable=broad-except
log.exception("Enable discussion notifications failed for user {id}.".format(id=user.id))
dog_stats_api.increment("common.student.account_created")
_track_user_registration(user, profile, params, third_party_provider)
# Announce registration

View File

@@ -18,7 +18,6 @@ git+https://github.com/jazzband/django-pipeline.git@d068a019169c9de5ee20ece041a6
-e git+https://github.com/edx/django-wiki.git@v0.0.20#egg=django-wiki
git+https://github.com/edx/django-rest-framework-oauth.git@0a43e8525f1e3048efe4bc70c03de308a277197c#egg=djangorestframework-oauth==1.1.1
git+https://github.com/edx/django-rest-framework.git@1ceda7c086fddffd1c440cc86856441bbf0bd9cb#egg=djangorestframework==3.6.3
-e common/lib/dogstats
-e git+https://github.com/edx/DoneXBlock.git@01a14f3bd80ae47dd08cdbbe2f88f3eb88d00fba#egg=done-xblock
-e git+https://github.com/jazkarta/edx-jsme.git@690dbf75441fa91c7c4899df0b83d77f7deb5458#egg=edx-jsme
git+https://github.com/mitodl/edx-sga.git@3828ba9e413080a81b907a3381e5ffa05e063f81#egg=edx-sga==0.8.3

View File

@@ -20,7 +20,6 @@ git+https://github.com/jazzband/django-pipeline.git@d068a019169c9de5ee20ece041a6
-e git+https://github.com/edx/django-wiki.git@v0.0.20#egg=django-wiki
git+https://github.com/edx/django-rest-framework-oauth.git@0a43e8525f1e3048efe4bc70c03de308a277197c#egg=djangorestframework-oauth==1.1.1
git+https://github.com/edx/django-rest-framework.git@1ceda7c086fddffd1c440cc86856441bbf0bd9cb#egg=djangorestframework==3.6.3
-e common/lib/dogstats
-e git+https://github.com/edx/DoneXBlock.git@01a14f3bd80ae47dd08cdbbe2f88f3eb88d00fba#egg=done-xblock
-e git+https://github.com/jazkarta/edx-jsme.git@690dbf75441fa91c7c4899df0b83d77f7deb5458#egg=edx-jsme
git+https://github.com/mitodl/edx-sga.git@3828ba9e413080a81b907a3381e5ffa05e063f81#egg=edx-sga==0.8.3

View File

@@ -3,7 +3,6 @@
-e common/lib/calc
-e common/lib/capa
-e common/lib/chem
-e common/lib/dogstats
-e common/lib/safe_lxml
-e common/lib/sandbox-packages
-e common/lib/symmath

View File

@@ -18,7 +18,6 @@ git+https://github.com/jazzband/django-pipeline.git@d068a019169c9de5ee20ece041a6
-e git+https://github.com/edx/django-wiki.git@v0.0.20#egg=django-wiki
git+https://github.com/edx/django-rest-framework-oauth.git@0a43e8525f1e3048efe4bc70c03de308a277197c#egg=djangorestframework-oauth==1.1.1
git+https://github.com/edx/django-rest-framework.git@1ceda7c086fddffd1c440cc86856441bbf0bd9cb#egg=djangorestframework==3.6.3
-e common/lib/dogstats
-e git+https://github.com/edx/DoneXBlock.git@01a14f3bd80ae47dd08cdbbe2f88f3eb88d00fba#egg=done-xblock
-e git+https://github.com/jazkarta/edx-jsme.git@690dbf75441fa91c7c4899df0b83d77f7deb5458#egg=edx-jsme
git+https://github.com/mitodl/edx-sga.git@3828ba9e413080a81b907a3381e5ffa05e063f81#egg=edx-sga==0.8.3