diff --git a/.ruby-version b/.ruby-version
new file mode 100644
index 0000000000..e46f918393
--- /dev/null
+++ b/.ruby-version
@@ -0,0 +1 @@
+1.8.7-p371
diff --git a/Gemfile b/Gemfile
index 0fe7df217d..b95f3b5d1a 100644
--- a/Gemfile
+++ b/Gemfile
@@ -1,7 +1,7 @@
source :rubygems
-ruby "1.9.3"
-gem 'rake'
+ruby "1.8.7"
+gem 'rake', '~> 10.0.3'
gem 'sass', '3.1.15'
gem 'bourbon', '~> 1.3.6'
-gem 'colorize'
-gem 'launchy'
+gem 'colorize', '~> 0.5.8'
+gem 'launchy', '~> 2.1.2'
diff --git a/cms/envs/common.py b/cms/envs/common.py
index 98a5fbf26d..f2d47dfdc6 100644
--- a/cms/envs/common.py
+++ b/cms/envs/common.py
@@ -34,7 +34,7 @@ MITX_FEATURES = {
'GITHUB_PUSH': False,
'ENABLE_DISCUSSION_SERVICE': False,
'AUTH_USE_MIT_CERTIFICATES' : False,
- 'STUB_VIDEO_FOR_TESTING': False, # do not display video when running automated acceptance tests
+ 'STUB_VIDEO_FOR_TESTING': False, # do not display video when running automated acceptance tests
}
ENABLE_JASMINE = False
@@ -281,7 +281,7 @@ INSTALLED_APPS = (
'contentstore',
'auth',
'student', # misleading name due to sharing with lms
-
+ 'course_groups', # not used in cms (yet), but tests run
# For asset pipelining
'pipeline',
'staticfiles',
diff --git a/common/djangoapps/course_groups/__init__.py b/common/djangoapps/course_groups/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/common/djangoapps/course_groups/cohorts.py b/common/djangoapps/course_groups/cohorts.py
new file mode 100644
index 0000000000..f84e18b214
--- /dev/null
+++ b/common/djangoapps/course_groups/cohorts.py
@@ -0,0 +1,214 @@
+"""
+This file contains the logic for cohort groups, as exposed internally to the
+forums, and to the cohort admin views.
+"""
+
+from django.contrib.auth.models import User
+from django.http import Http404
+import logging
+
+from courseware import courses
+from student.models import get_user_by_username_or_email
+from .models import CourseUserGroup
+
+log = logging.getLogger(__name__)
+
+def is_course_cohorted(course_id):
+ """
+ Given a course id, return a boolean for whether or not the course is
+ cohorted.
+
+ Raises:
+ Http404 if the course doesn't exist.
+ """
+ return courses.get_course_by_id(course_id).is_cohorted
+
+
+def get_cohort_id(user, course_id):
+ """
+ Given a course id and a user, return the id of the cohort that user is
+ assigned to in that course. If they don't have a cohort, return None.
+ """
+ cohort = get_cohort(user, course_id)
+ return None if cohort is None else cohort.id
+
+
+def is_commentable_cohorted(course_id, commentable_id):
+ """
+ Args:
+ course_id: string
+ commentable_id: string
+
+ Returns:
+ Bool: is this commentable cohorted?
+
+ Raises:
+ Http404 if the course doesn't exist.
+ """
+ course = courses.get_course_by_id(course_id)
+
+ if not course.is_cohorted:
+ # this is the easy case :)
+ ans = False
+ elif commentable_id in course.top_level_discussion_topic_ids:
+ # top level discussions have to be manually configured as cohorted
+ # (default is not)
+ ans = commentable_id in course.cohorted_discussions
+ else:
+ # inline discussions are cohorted by default
+ ans = True
+
+ log.debug("is_commentable_cohorted({0}, {1}) = {2}".format(course_id,
+ commentable_id,
+ ans))
+ return ans
+
+
+def get_cohort(user, course_id):
+ """
+ Given a django User and a course_id, return the user's cohort in that
+ cohort.
+
+ Arguments:
+ user: a Django User object.
+ course_id: string in the format 'org/course/run'
+
+ Returns:
+ A CourseUserGroup object if the course is cohorted and the User has a
+ cohort, else None.
+
+ Raises:
+ ValueError if the course_id doesn't exist.
+ """
+ # First check whether the course is cohorted (users shouldn't be in a cohort
+ # in non-cohorted courses, but settings can change after course starts)
+ try:
+ course = courses.get_course_by_id(course_id)
+ except Http404:
+ raise ValueError("Invalid course_id")
+
+ if not course.is_cohorted:
+ return None
+
+ try:
+ return CourseUserGroup.objects.get(course_id=course_id,
+ group_type=CourseUserGroup.COHORT,
+ users__id=user.id)
+ except CourseUserGroup.DoesNotExist:
+ # TODO: add auto-cohorting logic here once we know what that will be.
+ return None
+
+
+def get_course_cohorts(course_id):
+ """
+ Get a list of all the cohorts in the given course.
+
+ Arguments:
+ course_id: string in the format 'org/course/run'
+
+ Returns:
+ A list of CourseUserGroup objects. Empty if there are no cohorts. Does
+ not check whether the course is cohorted.
+ """
+ return list(CourseUserGroup.objects.filter(course_id=course_id,
+ group_type=CourseUserGroup.COHORT))
+
+### Helpers for cohort management views
+
+def get_cohort_by_name(course_id, name):
+ """
+ Return the CourseUserGroup object for the given cohort. Raises DoesNotExist
+ it isn't present.
+ """
+ return CourseUserGroup.objects.get(course_id=course_id,
+ group_type=CourseUserGroup.COHORT,
+ name=name)
+
+def get_cohort_by_id(course_id, cohort_id):
+ """
+ Return the CourseUserGroup object for the given cohort. Raises DoesNotExist
+ it isn't present. Uses the course_id for extra validation...
+ """
+ return CourseUserGroup.objects.get(course_id=course_id,
+ group_type=CourseUserGroup.COHORT,
+ id=cohort_id)
+
+def add_cohort(course_id, name):
+ """
+ Add a cohort to a course. Raises ValueError if a cohort of the same name already
+ exists.
+ """
+ log.debug("Adding cohort %s to %s", name, course_id)
+ if CourseUserGroup.objects.filter(course_id=course_id,
+ group_type=CourseUserGroup.COHORT,
+ name=name).exists():
+ raise ValueError("Can't create two cohorts with the same name")
+
+ return CourseUserGroup.objects.create(course_id=course_id,
+ group_type=CourseUserGroup.COHORT,
+ name=name)
+
+class CohortConflict(Exception):
+ """
+ Raised when user to be added is already in another cohort in same course.
+ """
+ pass
+
+def add_user_to_cohort(cohort, username_or_email):
+ """
+ Look up the given user, and if successful, add them to the specified cohort.
+
+ Arguments:
+ cohort: CourseUserGroup
+ username_or_email: string. Treated as email if has '@'
+
+ Returns:
+ User object.
+
+ Raises:
+ User.DoesNotExist if can't find user.
+
+ ValueError if user already present in this cohort.
+
+ CohortConflict if user already in another cohort.
+ """
+ user = get_user_by_username_or_email(username_or_email)
+
+ # If user in any cohorts in this course already, complain
+ course_cohorts = CourseUserGroup.objects.filter(
+ course_id=cohort.course_id,
+ users__id=user.id,
+ group_type=CourseUserGroup.COHORT)
+ if course_cohorts.exists():
+ if course_cohorts[0] == cohort:
+ raise ValueError("User {0} already present in cohort {1}".format(
+ user.username,
+ cohort.name))
+ else:
+ raise CohortConflict("User {0} is in another cohort {1} in course"
+ .format(user.username,
+ course_cohorts[0].name))
+
+ cohort.users.add(user)
+ return user
+
+
+def get_course_cohort_names(course_id):
+ """
+ Return a list of the cohort names in a course.
+ """
+ return [c.name for c in get_course_cohorts(course_id)]
+
+
+def delete_empty_cohort(course_id, name):
+ """
+ Remove an empty cohort. Raise ValueError if cohort is not empty.
+ """
+ cohort = get_cohort_by_name(course_id, name)
+ if cohort.users.exists():
+ raise ValueError(
+ "Can't delete non-empty cohort {0} in course {1}".format(
+ name, course_id))
+
+ cohort.delete()
+
diff --git a/common/djangoapps/course_groups/models.py b/common/djangoapps/course_groups/models.py
new file mode 100644
index 0000000000..957d230d92
--- /dev/null
+++ b/common/djangoapps/course_groups/models.py
@@ -0,0 +1,34 @@
+import logging
+
+from django.contrib.auth.models import User
+from django.db import models
+
+log = logging.getLogger(__name__)
+
+class CourseUserGroup(models.Model):
+ """
+ This model represents groups of users in a course. Groups may have different types,
+ which may be treated specially. For example, a user can be in at most one cohort per
+ course, and cohorts are used to split up the forums by group.
+ """
+ class Meta:
+ unique_together = (('name', 'course_id'), )
+
+ name = models.CharField(max_length=255,
+ help_text=("What is the name of this group? "
+ "Must be unique within a course."))
+ users = models.ManyToManyField(User, db_index=True, related_name='course_groups',
+ help_text="Who is in this group?")
+
+ # Note: groups associated with particular runs of a course. E.g. Fall 2012 and Spring
+ # 2013 versions of 6.00x will have separate groups.
+ course_id = models.CharField(max_length=255, db_index=True,
+ help_text="Which course is this group associated with?")
+
+ # For now, only have group type 'cohort', but adding a type field to support
+ # things like 'question_discussion', 'friends', 'off-line-class', etc
+ COHORT = 'cohort'
+ GROUP_TYPE_CHOICES = ((COHORT, 'Cohort'),)
+ group_type = models.CharField(max_length=20, choices=GROUP_TYPE_CHOICES)
+
+
diff --git a/common/djangoapps/course_groups/tests/tests.py b/common/djangoapps/course_groups/tests/tests.py
new file mode 100644
index 0000000000..21fad8bbeb
--- /dev/null
+++ b/common/djangoapps/course_groups/tests/tests.py
@@ -0,0 +1,170 @@
+import django.test
+from django.contrib.auth.models import User
+from django.conf import settings
+
+from override_settings import override_settings
+
+from course_groups.models import CourseUserGroup
+from course_groups.cohorts import (get_cohort, get_course_cohorts,
+ is_commentable_cohorted)
+
+from xmodule.modulestore.django import modulestore, _MODULESTORES
+
+class TestCohorts(django.test.TestCase):
+
+
+ @staticmethod
+ def topic_name_to_id(course, name):
+ """
+ Given a discussion topic name, return an id for that name (includes
+ course and url_name).
+ """
+ return "{course}_{run}_{name}".format(course=course.location.course,
+ run=course.url_name,
+ name=name)
+
+
+ @staticmethod
+ def config_course_cohorts(course, discussions,
+ cohorted, cohorted_discussions=None):
+ """
+ Given a course with no discussion set up, add the discussions and set
+ the cohort config appropriately.
+
+ Arguments:
+ course: CourseDescriptor
+ discussions: list of topic names strings. Picks ids and sort_keys
+ automatically.
+ cohorted: bool.
+ cohorted_discussions: optional list of topic names. If specified,
+ converts them to use the same ids as topic names.
+
+ Returns:
+ Nothing -- modifies course in place.
+ """
+ def to_id(name):
+ return TestCohorts.topic_name_to_id(course, name)
+
+ topics = dict((name, {"sort_key": "A",
+ "id": to_id(name)})
+ for name in discussions)
+
+ course.metadata["discussion_topics"] = topics
+
+ d = {"cohorted": cohorted}
+ if cohorted_discussions is not None:
+ d["cohorted_discussions"] = [to_id(name)
+ for name in cohorted_discussions]
+ course.metadata["cohort_config"] = d
+
+
+ def setUp(self):
+ """
+ Make sure that course is reloaded every time--clear out the modulestore.
+ """
+ # don't like this, but don't know a better way to undo all changes made
+ # to course. We don't have a course.clone() method.
+ _MODULESTORES.clear()
+
+
+ def test_get_cohort(self):
+ # Need to fix this, but after we're testing on staging. (Looks like
+ # problem is that when get_cohort internally tries to look up the
+ # course.id, it fails, even though we loaded it through the modulestore.
+
+ # Proper fix: give all tests a standard modulestore that uses the test
+ # dir.
+ course = modulestore().get_course("edX/toy/2012_Fall")
+ self.assertEqual(course.id, "edX/toy/2012_Fall")
+ self.assertFalse(course.is_cohorted)
+
+ user = User.objects.create(username="test", email="a@b.com")
+ other_user = User.objects.create(username="test2", email="a2@b.com")
+
+ self.assertIsNone(get_cohort(user, course.id), "No cohort created yet")
+
+ cohort = CourseUserGroup.objects.create(name="TestCohort",
+ course_id=course.id,
+ group_type=CourseUserGroup.COHORT)
+
+ cohort.users.add(user)
+
+ self.assertIsNone(get_cohort(user, course.id),
+ "Course isn't cohorted, so shouldn't have a cohort")
+
+ # Make the course cohorted...
+ self.config_course_cohorts(course, [], cohorted=True)
+
+ self.assertEquals(get_cohort(user, course.id).id, cohort.id,
+ "Should find the right cohort")
+
+ self.assertEquals(get_cohort(other_user, course.id), None,
+ "other_user shouldn't have a cohort")
+
+
+ def test_get_course_cohorts(self):
+ course1_id = 'a/b/c'
+ course2_id = 'e/f/g'
+
+ # add some cohorts to course 1
+ cohort = CourseUserGroup.objects.create(name="TestCohort",
+ course_id=course1_id,
+ group_type=CourseUserGroup.COHORT)
+
+ cohort = CourseUserGroup.objects.create(name="TestCohort2",
+ course_id=course1_id,
+ group_type=CourseUserGroup.COHORT)
+
+
+ # second course should have no cohorts
+ self.assertEqual(get_course_cohorts(course2_id), [])
+
+ cohorts = sorted([c.name for c in get_course_cohorts(course1_id)])
+ self.assertEqual(cohorts, ['TestCohort', 'TestCohort2'])
+
+
+ def test_is_commentable_cohorted(self):
+ course = modulestore().get_course("edX/toy/2012_Fall")
+ self.assertFalse(course.is_cohorted)
+
+ def to_id(name):
+ return self.topic_name_to_id(course, name)
+
+ # no topics
+ self.assertFalse(is_commentable_cohorted(course.id, to_id("General")),
+ "Course doesn't even have a 'General' topic")
+
+ # not cohorted
+ self.config_course_cohorts(course, ["General", "Feedback"],
+ cohorted=False)
+
+ self.assertFalse(is_commentable_cohorted(course.id, to_id("General")),
+ "Course isn't cohorted")
+
+ # cohorted, but top level topics aren't
+ self.config_course_cohorts(course, ["General", "Feedback"],
+ cohorted=True)
+
+ self.assertTrue(course.is_cohorted)
+ self.assertFalse(is_commentable_cohorted(course.id, to_id("General")),
+ "Course is cohorted, but 'General' isn't.")
+
+ self.assertTrue(
+ is_commentable_cohorted(course.id, to_id("random")),
+ "Non-top-level discussion is always cohorted in cohorted courses.")
+
+ # cohorted, including "Feedback" top-level topics aren't
+ self.config_course_cohorts(course, ["General", "Feedback"],
+ cohorted=True,
+ cohorted_discussions=["Feedback"])
+
+ self.assertTrue(course.is_cohorted)
+ self.assertFalse(is_commentable_cohorted(course.id, to_id("General")),
+ "Course is cohorted, but 'General' isn't.")
+
+ self.assertTrue(
+ is_commentable_cohorted(course.id, to_id("Feedback")),
+ "Feedback was listed as cohorted. Should be.")
+
+
+
diff --git a/common/djangoapps/course_groups/views.py b/common/djangoapps/course_groups/views.py
new file mode 100644
index 0000000000..d591c44356
--- /dev/null
+++ b/common/djangoapps/course_groups/views.py
@@ -0,0 +1,219 @@
+from django_future.csrf import ensure_csrf_cookie
+from django.contrib.auth.decorators import login_required
+from django.views.decorators.http import require_POST
+from django.contrib.auth.models import User
+from django.core.context_processors import csrf
+from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
+from django.core.urlresolvers import reverse
+from django.http import HttpResponse, HttpResponseForbidden, Http404
+from django.shortcuts import redirect
+import json
+import logging
+import re
+
+from courseware.courses import get_course_with_access
+from mitxmako.shortcuts import render_to_response, render_to_string
+
+from .models import CourseUserGroup
+from . import cohorts
+
+import track.views
+
+
+log = logging.getLogger(__name__)
+
+def json_http_response(data):
+ """
+ Return an HttpResponse with the data json-serialized and the right content
+ type header.
+ """
+ return HttpResponse(json.dumps(data), content_type="application/json")
+
+def split_by_comma_and_whitespace(s):
+ """
+ Split a string both by commas and whitespice. Returns a list.
+ """
+ return re.split(r'[\s,]+', s)
+
+
+@ensure_csrf_cookie
+def list_cohorts(request, course_id):
+ """
+ Return json dump of dict:
+
+ {'success': True,
+ 'cohorts': [{'name': name, 'id': id}, ...]}
+ """
+ get_course_with_access(request.user, course_id, 'staff')
+
+ all_cohorts = [{'name': c.name, 'id': c.id}
+ for c in cohorts.get_course_cohorts(course_id)]
+
+ return json_http_response({'success': True,
+ 'cohorts': all_cohorts})
+
+
+@ensure_csrf_cookie
+@require_POST
+def add_cohort(request, course_id):
+ """
+ Return json of dict:
+ {'success': True,
+ 'cohort': {'id': id,
+ 'name': name}}
+
+ or
+
+ {'success': False,
+ 'msg': error_msg} if there's an error
+ """
+ get_course_with_access(request.user, course_id, 'staff')
+
+ name = request.POST.get("name")
+ if not name:
+ return json_http_response({'success': False,
+ 'msg': "No name specified"})
+
+ try:
+ cohort = cohorts.add_cohort(course_id, name)
+ except ValueError as err:
+ return json_http_response({'success': False,
+ 'msg': str(err)})
+
+ return json_http_response({'success': 'True',
+ 'cohort': {
+ 'id': cohort.id,
+ 'name': cohort.name
+ }})
+
+
+@ensure_csrf_cookie
+def users_in_cohort(request, course_id, cohort_id):
+ """
+ Return users in the cohort. Show up to 100 per page, and page
+ using the 'page' GET attribute in the call. Format:
+
+ Returns:
+ Json dump of dictionary in the following format:
+ {'success': True,
+ 'page': page,
+ 'num_pages': paginator.num_pages,
+ 'users': [{'username': ..., 'email': ..., 'name': ...}]
+ }
+ """
+ get_course_with_access(request.user, course_id, 'staff')
+
+ # this will error if called with a non-int cohort_id. That's ok--it
+ # shoudn't happen for valid clients.
+ cohort = cohorts.get_cohort_by_id(course_id, int(cohort_id))
+
+ paginator = Paginator(cohort.users.all(), 100)
+ page = request.GET.get('page')
+ try:
+ users = paginator.page(page)
+ except PageNotAnInteger:
+ # return the first page
+ page = 1
+ users = paginator.page(page)
+ except EmptyPage:
+ # Page is out of range. Return last page
+ page = paginator.num_pages
+ contacts = paginator.page(page)
+
+ user_info = [{'username': u.username,
+ 'email': u.email,
+ 'name': '{0} {1}'.format(u.first_name, u.last_name)}
+ for u in users]
+
+ return json_http_response({'success': True,
+ 'page': page,
+ 'num_pages': paginator.num_pages,
+ 'users': user_info})
+
+
+@ensure_csrf_cookie
+@require_POST
+def add_users_to_cohort(request, course_id, cohort_id):
+ """
+ Return json dict of:
+
+ {'success': True,
+ 'added': [{'username': username,
+ 'name': name,
+ 'email': email}, ...],
+ 'conflict': [{'username_or_email': ...,
+ 'msg': ...}], # in another cohort
+ 'present': [str1, str2, ...], # already there
+ 'unknown': [str1, str2, ...]}
+ """
+ get_course_with_access(request.user, course_id, 'staff')
+
+ cohort = cohorts.get_cohort_by_id(course_id, cohort_id)
+
+ users = request.POST.get('users', '')
+ added = []
+ present = []
+ conflict = []
+ unknown = []
+ for username_or_email in split_by_comma_and_whitespace(users):
+ try:
+ user = cohorts.add_user_to_cohort(cohort, username_or_email)
+ added.append({'username': user.username,
+ 'name': "{0} {1}".format(user.first_name, user.last_name),
+ 'email': user.email,
+ })
+ except ValueError:
+ present.append(username_or_email)
+ except User.DoesNotExist:
+ unknown.append(username_or_email)
+ except cohorts.CohortConflict as err:
+ conflict.append({'username_or_email': username_or_email,
+ 'msg': str(err)})
+
+
+ return json_http_response({'success': True,
+ 'added': added,
+ 'present': present,
+ 'conflict': conflict,
+ 'unknown': unknown})
+
+@ensure_csrf_cookie
+@require_POST
+def remove_user_from_cohort(request, course_id, cohort_id):
+ """
+ Expects 'username': username in POST data.
+
+ Return json dict of:
+
+ {'success': True} or
+ {'success': False,
+ 'msg': error_msg}
+ """
+ get_course_with_access(request.user, course_id, 'staff')
+
+ username = request.POST.get('username')
+ if username is None:
+ return json_http_response({'success': False,
+ 'msg': 'No username specified'})
+
+ cohort = cohorts.get_cohort_by_id(course_id, cohort_id)
+ try:
+ user = User.objects.get(username=username)
+ cohort.users.remove(user)
+ return json_http_response({'success': True})
+ except User.DoesNotExist:
+ log.debug('no user')
+ return json_http_response({'success': False,
+ 'msg': "No user '{0}'".format(username)})
+
+
+def debug_cohort_mgmt(request, course_id):
+ """
+ Debugging view for dev.
+ """
+ # add staff check to make sure it's safe if it's accidentally deployed.
+ get_course_with_access(request.user, course_id, 'staff')
+
+ context = {'cohorts_ajax_url': reverse('cohorts',
+ kwargs={'course_id': course_id})}
+ return render_to_response('/course_groups/debug.html', context)
diff --git a/common/djangoapps/student/management/commands/pearson_export_cdd.py b/common/djangoapps/student/management/commands/pearson_export_cdd.py
index 67230c7f74..463eec6b70 100644
--- a/common/djangoapps/student/management/commands/pearson_export_cdd.py
+++ b/common/djangoapps/student/management/commands/pearson_export_cdd.py
@@ -1,15 +1,17 @@
import csv
+import os
from collections import OrderedDict
from datetime import datetime
-from os.path import isdir
from optparse import make_option
-from django.core.management.base import BaseCommand
+from django.conf import settings
+from django.core.management.base import BaseCommand, CommandError
from student.models import TestCenterUser
+
class Command(BaseCommand):
-
+
CSV_TO_MODEL_FIELDS = OrderedDict([
# Skipping optional field CandidateID
("ClientCandidateID", "client_candidate_id"),
@@ -34,43 +36,52 @@ class Command(BaseCommand):
("FAXCountryCode", "fax_country_code"),
("CompanyName", "company_name"),
# Skipping optional field CustomQuestion
- ("LastUpdate", "user_updated_at"), # in UTC, so same as what we store
+ ("LastUpdate", "user_updated_at"), # in UTC, so same as what we store
])
+ # define defaults, even thought 'store_true' shouldn't need them.
+ # (call_command will set None as default value for all options that don't have one,
+ # so one cannot rely on presence/absence of flags in that world.)
option_list = BaseCommand.option_list + (
- make_option(
- '--dump_all',
- action='store_true',
- dest='dump_all',
- ),
+ make_option('--dest-from-settings',
+ action='store_true',
+ dest='dest-from-settings',
+ default=False,
+ help='Retrieve the destination to export to from django.'),
+ make_option('--destination',
+ action='store',
+ dest='destination',
+ default=None,
+ help='Where to store the exported files')
)
-
- args = ''
- help = """
- Export user demographic information from TestCenterUser model into a tab delimited
- text file with a format that Pearson expects.
- """
- def handle(self, *args, **kwargs):
- if len(args) < 1:
- print Command.help
- return
+ def handle(self, **options):
# update time should use UTC in order to be comparable to the user_updated_at
# field
uploaded_at = datetime.utcnow()
- # if specified destination is an existing directory, then
+ # if specified destination is an existing directory, then
# create a filename for it automatically. If it doesn't exist,
- # or exists as a file, then we will just write to it.
+ # then we will create the directory.
# Name will use timestamp -- this is UTC, so it will look funny,
- # but it should at least be consistent with the other timestamps
+ # but it should at least be consistent with the other timestamps
# used in the system.
- dest = args[0]
- if isdir(dest):
- destfile = os.path.join(dest, uploaded_at.strftime("cdd-%Y%m%d-%H%M%S.dat"))
+ if 'dest-from-settings' in options and options['dest-from-settings']:
+ if 'LOCAL_EXPORT' in settings.PEARSON:
+ dest = settings.PEARSON['LOCAL_EXPORT']
+ else:
+ raise CommandError('--dest-from-settings was enabled but the'
+ 'PEARSON[LOCAL_EXPORT] setting was not set.')
+ elif 'destination' in options and options['destination']:
+ dest = options['destination']
else:
- destfile = dest
-
+ raise CommandError('--destination or --dest-from-settings must be used')
+
+ if not os.path.isdir(dest):
+ os.makedirs(dest)
+
+ destfile = os.path.join(dest, uploaded_at.strftime("cdd-%Y%m%d-%H%M%S.dat"))
+
# strings must be in latin-1 format. CSV parser will
# otherwise convert unicode objects to ascii.
def ensure_encoding(value):
@@ -78,8 +89,8 @@ class Command(BaseCommand):
return value.encode('iso-8859-1')
else:
return value
-
- dump_all = kwargs['dump_all']
+
+# dump_all = options['dump_all']
with open(destfile, "wb") as outfile:
writer = csv.DictWriter(outfile,
@@ -89,7 +100,7 @@ class Command(BaseCommand):
extrasaction='ignore')
writer.writeheader()
for tcu in TestCenterUser.objects.order_by('id'):
- if dump_all or tcu.needs_uploading:
+ if tcu.needs_uploading: # or dump_all
record = dict((csv_field, ensure_encoding(getattr(tcu, model_field)))
for csv_field, model_field
in Command.CSV_TO_MODEL_FIELDS.items())
@@ -97,6 +108,3 @@ class Command(BaseCommand):
writer.writerow(record)
tcu.uploaded_at = uploaded_at
tcu.save()
-
-
-
diff --git a/common/djangoapps/student/management/commands/pearson_export_ead.py b/common/djangoapps/student/management/commands/pearson_export_ead.py
index de3bfc04ee..03dbce0024 100644
--- a/common/djangoapps/student/management/commands/pearson_export_ead.py
+++ b/common/djangoapps/student/management/commands/pearson_export_ead.py
@@ -1,15 +1,17 @@
import csv
+import os
from collections import OrderedDict
from datetime import datetime
-from os.path import isdir, join
from optparse import make_option
-from django.core.management.base import BaseCommand
+from django.conf import settings
+from django.core.management.base import BaseCommand, CommandError
+
+from student.models import TestCenterRegistration, ACCOMMODATION_REJECTED_CODE
-from student.models import TestCenterRegistration
class Command(BaseCommand):
-
+
CSV_TO_MODEL_FIELDS = OrderedDict([
('AuthorizationTransactionType', 'authorization_transaction_type'),
('AuthorizationID', 'authorization_id'),
@@ -20,51 +22,60 @@ class Command(BaseCommand):
('Accommodations', 'accommodation_code'),
('EligibilityApptDateFirst', 'eligibility_appointment_date_first'),
('EligibilityApptDateLast', 'eligibility_appointment_date_last'),
- ("LastUpdate", "user_updated_at"), # in UTC, so same as what we store
+ ("LastUpdate", "user_updated_at"), # in UTC, so same as what we store
])
- args = ''
- help = """
- Export user registration information from TestCenterRegistration model into a tab delimited
- text file with a format that Pearson expects.
- """
-
option_list = BaseCommand.option_list + (
- make_option(
- '--dump_all',
- action='store_true',
- dest='dump_all',
- ),
- make_option(
- '--force_add',
- action='store_true',
- dest='force_add',
- ),
+ make_option('--dest-from-settings',
+ action='store_true',
+ dest='dest-from-settings',
+ default=False,
+ help='Retrieve the destination to export to from django.'),
+ make_option('--destination',
+ action='store',
+ dest='destination',
+ default=None,
+ help='Where to store the exported files'),
+ make_option('--dump_all',
+ action='store_true',
+ dest='dump_all',
+ default=False,
+ ),
+ make_option('--force_add',
+ action='store_true',
+ dest='force_add',
+ default=False,
+ ),
)
-
-
- def handle(self, *args, **kwargs):
- if len(args) < 1:
- print Command.help
- return
- # update time should use UTC in order to be comparable to the user_updated_at
+ def handle(self, **options):
+ # update time should use UTC in order to be comparable to the user_updated_at
# field
uploaded_at = datetime.utcnow()
- # if specified destination is an existing directory, then
+ # if specified destination is an existing directory, then
# create a filename for it automatically. If it doesn't exist,
- # or exists as a file, then we will just write to it.
+ # then we will create the directory.
# Name will use timestamp -- this is UTC, so it will look funny,
- # but it should at least be consistent with the other timestamps
+ # but it should at least be consistent with the other timestamps
# used in the system.
- dest = args[0]
- if isdir(dest):
- destfile = join(dest, uploaded_at.strftime("ead-%Y%m%d-%H%M%S.dat"))
+ if 'dest-from-settings' in options and options['dest-from-settings']:
+ if 'LOCAL_EXPORT' in settings.PEARSON:
+ dest = settings.PEARSON['LOCAL_EXPORT']
+ else:
+ raise CommandError('--dest-from-settings was enabled but the'
+ 'PEARSON[LOCAL_EXPORT] setting was not set.')
+ elif 'destination' in options and options['destination']:
+ dest = options['destination']
else:
- destfile = dest
+ raise CommandError('--destination or --dest-from-settings must be used')
- dump_all = kwargs['dump_all']
+ if not os.path.isdir(dest):
+ os.makedirs(dest)
+
+ destfile = os.path.join(dest, uploaded_at.strftime("ead-%Y%m%d-%H%M%S.dat"))
+
+ dump_all = options['dump_all']
with open(destfile, "wb") as outfile:
writer = csv.DictWriter(outfile,
@@ -81,13 +92,11 @@ class Command(BaseCommand):
record["LastUpdate"] = record["LastUpdate"].strftime("%Y/%m/%d %H:%M:%S")
record["EligibilityApptDateFirst"] = record["EligibilityApptDateFirst"].strftime("%Y/%m/%d")
record["EligibilityApptDateLast"] = record["EligibilityApptDateLast"].strftime("%Y/%m/%d")
- if kwargs['force_add']:
+ if record["Accommodations"] == ACCOMMODATION_REJECTED_CODE:
+ record["Accommodations"] = ""
+ if options['force_add']:
record['AuthorizationTransactionType'] = 'Add'
writer.writerow(record)
tcr.uploaded_at = uploaded_at
tcr.save()
-
-
-
-
diff --git a/common/djangoapps/student/management/commands/pearson_import_conf_zip.py b/common/djangoapps/student/management/commands/pearson_import_conf_zip.py
new file mode 100644
index 0000000000..9c3a34a90c
--- /dev/null
+++ b/common/djangoapps/student/management/commands/pearson_import_conf_zip.py
@@ -0,0 +1,119 @@
+import csv
+
+from zipfile import ZipFile, is_zipfile
+from time import strptime, strftime
+
+from collections import OrderedDict
+from datetime import datetime
+from os.path import isdir
+from optparse import make_option
+from dogapi import dog_http_api, dog_stats_api
+
+from django.core.management.base import BaseCommand, CommandError
+from django.conf import settings
+
+from student.models import TestCenterUser, TestCenterRegistration
+
+
+class Command(BaseCommand):
+
+ dog_http_api.api_key = settings.DATADOG_API
+ args = ''
+ help = """
+ Import Pearson confirmation files and update TestCenterUser
+ and TestCenterRegistration tables with status.
+ """
+
+ @staticmethod
+ def datadog_error(string, tags):
+ dog_http_api.event("Pearson Import", string, alert_type='error', tags=[tags])
+
+ def handle(self, *args, **kwargs):
+ if len(args) < 1:
+ print Command.help
+ return
+
+ source_zip = args[0]
+ if not is_zipfile(source_zip):
+ error = "Input file is not a zipfile: \"{}\"".format(source_zip)
+ Command.datadog_error(error, source_zip)
+ raise CommandError(error)
+
+ # loop through all files in zip, and process them based on filename prefix:
+ with ZipFile(source_zip, 'r') as zipfile:
+ for fileinfo in zipfile.infolist():
+ with zipfile.open(fileinfo) as zipentry:
+ if fileinfo.filename.startswith("eac-"):
+ self.process_eac(zipentry)
+ elif fileinfo.filename.startswith("vcdc-"):
+ self.process_vcdc(zipentry)
+ else:
+ error = "Unrecognized confirmation file type\"{}\" in confirmation zip file \"{}\"".format(fileinfo.filename, zipfile)
+ Command.datadog_error(error, source_zip)
+ raise CommandError(error)
+
+ def process_eac(self, eacfile):
+ print "processing eac"
+ reader = csv.DictReader(eacfile, delimiter="\t")
+ for row in reader:
+ client_authorization_id = row['ClientAuthorizationID']
+ if not client_authorization_id:
+ if row['Status'] == 'Error':
+ Command.datadog_error("Error in EAD file processing ({}): {}".format(row['Date'], row['Message']), eacfile.name)
+ else:
+ Command.datadog_error("Encountered bad record: {}".format(row), eacfile.name)
+ else:
+ try:
+ registration = TestCenterRegistration.objects.get(client_authorization_id=client_authorization_id)
+ Command.datadog_error("Found authorization record for user {}".format(registration.testcenter_user.user.username), eacfile)
+ # now update the record:
+ registration.upload_status = row['Status']
+ registration.upload_error_message = row['Message']
+ try:
+ registration.processed_at = strftime('%Y-%m-%d %H:%M:%S', strptime(row['Date'], '%Y/%m/%d %H:%M:%S'))
+ except ValueError as ve:
+ Command.datadog_error("Bad Date value found for {}: message {}".format(client_authorization_id, ve), eacfile.name)
+ # store the authorization Id if one is provided. (For debugging)
+ if row['AuthorizationID']:
+ try:
+ registration.authorization_id = int(row['AuthorizationID'])
+ except ValueError as ve:
+ Command.datadog_error("Bad AuthorizationID value found for {}: message {}".format(client_authorization_id, ve), eacfile.name)
+
+ registration.confirmed_at = datetime.utcnow()
+ registration.save()
+ except TestCenterRegistration.DoesNotExist:
+ Command.datadog_error("Failed to find record for client_auth_id {}".format(client_authorization_id), eacfile.name)
+
+ def process_vcdc(self, vcdcfile):
+ print "processing vcdc"
+ reader = csv.DictReader(vcdcfile, delimiter="\t")
+ for row in reader:
+ client_candidate_id = row['ClientCandidateID']
+ if not client_candidate_id:
+ if row['Status'] == 'Error':
+ Command.datadog_error("Error in CDD file processing ({}): {}".format(row['Date'], row['Message']), vcdcfile.name)
+ else:
+ Command.datadog_error("Encountered bad record: {}".format(row), vcdcfile.name)
+ else:
+ try:
+ tcuser = TestCenterUser.objects.get(client_candidate_id=client_candidate_id)
+ Command.datadog_error("Found demographics record for user {}".format(tcuser.user.username), vcdcfile.name)
+ # now update the record:
+ tcuser.upload_status = row['Status']
+ tcuser.upload_error_message = row['Message']
+ try:
+ tcuser.processed_at = strftime('%Y-%m-%d %H:%M:%S', strptime(row['Date'], '%Y/%m/%d %H:%M:%S'))
+ except ValueError as ve:
+ Command.datadog_error("Bad Date value found for {}: message {}".format(client_candidate_id, ve), vcdcfile.name)
+ # store the candidate Id if one is provided. (For debugging)
+ if row['CandidateID']:
+ try:
+ tcuser.candidate_id = int(row['CandidateID'])
+ except ValueError as ve:
+ Command.datadog_error("Bad CandidateID value found for {}: message {}".format(client_candidate_id, ve), vcdcfile.name)
+ tcuser.confirmed_at = datetime.utcnow()
+ tcuser.save()
+ except TestCenterUser.DoesNotExist:
+ Command.datadog_error(" Failed to find record for client_candidate_id {}".format(client_candidate_id), vcdcfile.name)
+
diff --git a/common/djangoapps/student/management/commands/pearson_make_tc_registration.py b/common/djangoapps/student/management/commands/pearson_make_tc_registration.py
index 81a478d19d..2fcfa9ae48 100644
--- a/common/djangoapps/student/management/commands/pearson_make_tc_registration.py
+++ b/common/djangoapps/student/management/commands/pearson_make_tc_registration.py
@@ -71,6 +71,12 @@ class Command(BaseCommand):
dest='ignore_registration_dates',
help='find exam info for course based on exam_series_code, even if the exam is not active.'
),
+ make_option(
+ '--create_dummy_exam',
+ action='store_true',
+ dest='create_dummy_exam',
+ help='create dummy exam info for course, even if course exists'
+ ),
)
args = ""
help = "Create or modify a TestCenterRegistration entry for a given Student"
@@ -98,15 +104,20 @@ class Command(BaseCommand):
except TestCenterUser.DoesNotExist:
raise CommandError("User \"{}\" does not have an existing demographics record".format(username))
- # check to see if a course_id was specified, and use information from that:
- try:
- course = course_from_id(course_id)
- if 'ignore_registration_dates' in our_options:
- examlist = [exam for exam in course.test_center_exams if exam.exam_series_code == our_options.get('exam_series_code')]
- exam = examlist[0] if len(examlist) > 0 else None
- else:
- exam = course.current_test_center_exam
- except ItemNotFoundError:
+ # get an "exam" object. Check to see if a course_id was specified, and use information from that:
+ exam = None
+ create_dummy_exam = 'create_dummy_exam' in our_options and our_options['create_dummy_exam']
+ if not create_dummy_exam:
+ try:
+ course = course_from_id(course_id)
+ if 'ignore_registration_dates' in our_options:
+ examlist = [exam for exam in course.test_center_exams if exam.exam_series_code == our_options.get('exam_series_code')]
+ exam = examlist[0] if len(examlist) > 0 else None
+ else:
+ exam = course.current_test_center_exam
+ except ItemNotFoundError:
+ pass
+ else:
# otherwise use explicit values (so we don't have to define a course):
exam_name = "Dummy Placeholder Name"
exam_info = { 'Exam_Series_Code': our_options['exam_series_code'],
@@ -120,7 +131,7 @@ class Command(BaseCommand):
our_options['eligibility_appointment_date_last'] = strftime("%Y-%m-%d", exam.last_eligible_appointment_date)
if exam is None:
- raise CommandError("Exam for course_id {%s} does not exist".format(course_id))
+ raise CommandError("Exam for course_id {} does not exist".format(course_id))
exam_code = exam.exam_series_code
diff --git a/common/djangoapps/student/management/commands/pearson_make_tc_user.py b/common/djangoapps/student/management/commands/pearson_make_tc_user.py
index da9bfc3bd0..87e0b4dadd 100644
--- a/common/djangoapps/student/management/commands/pearson_make_tc_user.py
+++ b/common/djangoapps/student/management/commands/pearson_make_tc_user.py
@@ -1,7 +1,7 @@
from optparse import make_option
from django.contrib.auth.models import User
-from django.core.management.base import BaseCommand
+from django.core.management.base import BaseCommand, CommandError
from student.models import TestCenterUser, TestCenterUserForm
@@ -161,15 +161,16 @@ class Command(BaseCommand):
if form.is_valid():
form.update_and_save()
else:
+ errorlist = []
if (len(form.errors) > 0):
- print "Field Form errors encountered:"
- for fielderror in form.errors:
- print "Field Form Error: %s" % fielderror
- if (len(form.non_field_errors()) > 0):
- print "Non-field Form errors encountered:"
- for nonfielderror in form.non_field_errors:
- print "Non-field Form Error: %s" % nonfielderror
-
+ errorlist.append("Field Form errors encountered:")
+ for fielderror in form.errors:
+ errorlist.append("Field Form Error: {}".format(fielderror))
+ if (len(form.non_field_errors()) > 0):
+ errorlist.append("Non-field Form errors encountered:")
+ for nonfielderror in form.non_field_errors:
+ errorlist.append("Non-field Form Error: {}".format(nonfielderror))
+ raise CommandError("\n".join(errorlist))
else:
print "No changes necessary to make to existing user's demographics."
diff --git a/common/djangoapps/student/management/commands/pearson_transfer.py b/common/djangoapps/student/management/commands/pearson_transfer.py
new file mode 100644
index 0000000000..6811e1833d
--- /dev/null
+++ b/common/djangoapps/student/management/commands/pearson_transfer.py
@@ -0,0 +1,163 @@
+import os
+from optparse import make_option
+from stat import S_ISDIR
+
+from django.conf import settings
+from django.core.management.base import BaseCommand, CommandError
+from django.core.management import call_command
+from dogapi import dog_http_api, dog_stats_api
+import paramiko
+import boto
+
+dog_http_api.api_key = settings.DATADOG_API
+
+
+class Command(BaseCommand):
+ help = """
+ This command handles the importing and exporting of student records for
+ Pearson. It uses some other Django commands to export and import the
+ files and then uploads over SFTP to Pearson and stuffs the entry in an
+ S3 bucket for archive purposes.
+
+ Usage: django-admin.py pearson-transfer --mode [import|export|both]
+ """
+
+ option_list = BaseCommand.option_list + (
+ make_option('--mode',
+ action='store',
+ dest='mode',
+ default='both',
+ choices=('import', 'export', 'both'),
+ help='mode is import, export, or both'),
+ )
+
+ def handle(self, **options):
+
+ if not hasattr(settings, 'PEARSON'):
+ raise CommandError('No PEARSON entries in auth/env.json.')
+
+ # check settings needed for either import or export:
+ for value in ['SFTP_HOSTNAME', 'SFTP_USERNAME', 'SFTP_PASSWORD', 'S3_BUCKET']:
+ if value not in settings.PEARSON:
+ raise CommandError('No entry in the PEARSON settings'
+ '(env/auth.json) for {0}'.format(value))
+
+ for value in ['AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY']:
+ if not hasattr(settings, value):
+ raise CommandError('No entry in the AWS settings'
+ '(env/auth.json) for {0}'.format(value))
+
+ # check additional required settings for import and export:
+ if options['mode'] in ('export', 'both'):
+ for value in ['LOCAL_EXPORT','SFTP_EXPORT']:
+ if value not in settings.PEARSON:
+ raise CommandError('No entry in the PEARSON settings'
+ '(env/auth.json) for {0}'.format(value))
+ # make sure that the import directory exists or can be created:
+ source_dir = settings.PEARSON['LOCAL_EXPORT']
+ if not os.path.isdir(source_dir):
+ os.makedirs(source_dir)
+
+ if options['mode'] in ('import', 'both'):
+ for value in ['LOCAL_IMPORT','SFTP_IMPORT']:
+ if value not in settings.PEARSON:
+ raise CommandError('No entry in the PEARSON settings'
+ '(env/auth.json) for {0}'.format(value))
+ # make sure that the import directory exists or can be created:
+ dest_dir = settings.PEARSON['LOCAL_IMPORT']
+ if not os.path.isdir(dest_dir):
+ os.makedirs(dest_dir)
+
+
+ def sftp(files_from, files_to, mode, deleteAfterCopy=False):
+ with dog_stats_api.timer('pearson.{0}'.format(mode), tags='sftp'):
+ try:
+ t = paramiko.Transport((settings.PEARSON['SFTP_HOSTNAME'], 22))
+ t.connect(username=settings.PEARSON['SFTP_USERNAME'],
+ password=settings.PEARSON['SFTP_PASSWORD'])
+ sftp = paramiko.SFTPClient.from_transport(t)
+
+ if mode == 'export':
+ try:
+ sftp.chdir(files_to)
+ except IOError:
+ raise CommandError('SFTP destination path does not exist: {}'.format(files_to))
+ for filename in os.listdir(files_from):
+ sftp.put(files_from + '/' + filename, filename)
+ if deleteAfterCopy:
+ os.remove(os.path.join(files_from, filename))
+ else:
+ try:
+ sftp.chdir(files_from)
+ except IOError:
+ raise CommandError('SFTP source path does not exist: {}'.format(files_from))
+ for filename in sftp.listdir('.'):
+ # skip subdirectories
+ if not S_ISDIR(sftp.stat(filename).st_mode):
+ sftp.get(filename, files_to + '/' + filename)
+ # delete files from sftp server once they are successfully pulled off:
+ if deleteAfterCopy:
+ sftp.remove(filename)
+ except:
+ dog_http_api.event('pearson {0}'.format(mode),
+ 'sftp uploading failed',
+ alert_type='error')
+ raise
+ finally:
+ sftp.close()
+ t.close()
+
+ def s3(files_from, bucket, mode, deleteAfterCopy=False):
+ with dog_stats_api.timer('pearson.{0}'.format(mode), tags='s3'):
+ try:
+ for filename in os.listdir(files_from):
+ source_file = os.path.join(files_from, filename)
+ # use mode as name of directory into which to write files
+ dest_file = os.path.join(mode, filename)
+ upload_file_to_s3(bucket, source_file, dest_file)
+ if deleteAfterCopy:
+ os.remove(files_from + '/' + filename)
+ except:
+ dog_http_api.event('pearson {0}'.format(mode),
+ 's3 archiving failed')
+ raise
+
+ def upload_file_to_s3(bucket, source_file, dest_file):
+ """
+ Upload file to S3
+ """
+ s3 = boto.connect_s3(settings.AWS_ACCESS_KEY_ID,
+ settings.AWS_SECRET_ACCESS_KEY)
+ from boto.s3.key import Key
+ b = s3.get_bucket(bucket)
+ k = Key(b)
+ k.key = "{filename}".format(filename=dest_file)
+ k.set_contents_from_filename(source_file)
+
+ def export_pearson():
+ options = { 'dest-from-settings' : True }
+ call_command('pearson_export_cdd', **options)
+ call_command('pearson_export_ead', **options)
+ mode = 'export'
+ sftp(settings.PEARSON['LOCAL_EXPORT'], settings.PEARSON['SFTP_EXPORT'], mode, deleteAfterCopy = False)
+ s3(settings.PEARSON['LOCAL_EXPORT'], settings.PEARSON['S3_BUCKET'], mode, deleteAfterCopy=True)
+
+ def import_pearson():
+ mode = 'import'
+ try:
+ sftp(settings.PEARSON['SFTP_IMPORT'], settings.PEARSON['LOCAL_IMPORT'], mode, deleteAfterCopy = True)
+ s3(settings.PEARSON['LOCAL_IMPORT'], settings.PEARSON['S3_BUCKET'], mode, deleteAfterCopy=False)
+ except Exception as e:
+ dog_http_api.event('Pearson Import failure', str(e))
+ raise e
+ else:
+ for filename in os.listdir(settings.PEARSON['LOCAL_IMPORT']):
+ filepath = os.path.join(settings.PEARSON['LOCAL_IMPORT'], filename)
+ call_command('pearson_import_conf_zip', filepath)
+ os.remove(filepath)
+
+ # actually do the work!
+ if options['mode'] in ('export', 'both'):
+ export_pearson()
+ if options['mode'] in ('import', 'both'):
+ import_pearson()
diff --git a/common/djangoapps/student/management/commands/tests/__init__.py b/common/djangoapps/student/management/commands/tests/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/common/djangoapps/student/management/commands/tests/test_pearson.py b/common/djangoapps/student/management/commands/tests/test_pearson.py
new file mode 100644
index 0000000000..199557bf87
--- /dev/null
+++ b/common/djangoapps/student/management/commands/tests/test_pearson.py
@@ -0,0 +1,382 @@
+'''
+Created on Jan 17, 2013
+
+@author: brian
+'''
+import logging
+import os
+from tempfile import mkdtemp
+import cStringIO
+import sys
+
+from django.test import TestCase
+from django.core.management import call_command
+from nose.plugins.skip import SkipTest
+
+from student.models import User, TestCenterRegistration, TestCenterUser, get_testcenter_registration
+
+log = logging.getLogger(__name__)
+
+def create_tc_user(username):
+ user = User.objects.create_user(username, '{}@edx.org'.format(username), 'fakepass')
+ options = {
+ 'first_name' : 'TestFirst',
+ 'last_name' : 'TestLast',
+ 'address_1' : 'Test Address',
+ 'city' : 'TestCity',
+ 'state' : 'Alberta',
+ 'postal_code' : 'A0B 1C2',
+ 'country' : 'CAN',
+ 'phone' : '252-1866',
+ 'phone_country_code' : '1',
+ }
+ call_command('pearson_make_tc_user', username, **options)
+ return TestCenterUser.objects.get(user=user)
+
+
+def create_tc_registration(username, course_id = 'org1/course1/term1', exam_code = 'exam1', accommodation_code = None):
+
+ options = { 'exam_series_code' : exam_code,
+ 'eligibility_appointment_date_first' : '2013-01-01T00:00',
+ 'eligibility_appointment_date_last' : '2013-12-31T23:59',
+ 'accommodation_code' : accommodation_code,
+ }
+
+ call_command('pearson_make_tc_registration', username, course_id, **options)
+ user = User.objects.get(username=username)
+ registrations = get_testcenter_registration(user, course_id, exam_code)
+ return registrations[0]
+
+def create_multiple_registrations(prefix='test'):
+ username1 = '{}_multiple1'.format(prefix)
+ create_tc_user(username1)
+ create_tc_registration(username1)
+ create_tc_registration(username1, course_id = 'org1/course2/term1')
+ create_tc_registration(username1, exam_code = 'exam2')
+ username2 = '{}_multiple2'.format(prefix)
+ create_tc_user(username2)
+ create_tc_registration(username2)
+ username3 = '{}_multiple3'.format(prefix)
+ create_tc_user(username3)
+ create_tc_registration(username3, course_id = 'org1/course2/term1')
+ username4 = '{}_multiple4'.format(prefix)
+ create_tc_user(username4)
+ create_tc_registration(username4, exam_code = 'exam2')
+
+def get_command_error_text(*args, **options):
+ stderr_string = None
+ old_stderr = sys.stderr
+ sys.stderr = cStringIO.StringIO()
+ try:
+ call_command(*args, **options)
+ except SystemExit, why1:
+ # The goal here is to catch CommandError calls.
+ # But these are actually translated into nice messages,
+ # and sys.exit(1) is then called. For testing, we
+ # want to catch what sys.exit throws, and get the
+ # relevant text either from stdout or stderr.
+ if (why1.message > 0):
+ stderr_string = sys.stderr.getvalue()
+ else:
+ raise why1
+ except Exception, why:
+ raise why
+
+ finally:
+ sys.stderr = old_stderr
+
+ if stderr_string is None:
+ raise Exception("Expected call to {} to fail, but it succeeded!".format(args[0]))
+ return stderr_string
+
+def get_error_string_for_management_call(*args, **options):
+ stdout_string = None
+ old_stdout = sys.stdout
+ old_stderr = sys.stderr
+ sys.stdout = cStringIO.StringIO()
+ sys.stderr = cStringIO.StringIO()
+ try:
+ call_command(*args, **options)
+ except SystemExit, why1:
+ # The goal here is to catch CommandError calls.
+ # But these are actually translated into nice messages,
+ # and sys.exit(1) is then called. For testing, we
+ # want to catch what sys.exit throws, and get the
+ # relevant text either from stdout or stderr.
+ if (why1.message == 1):
+ stdout_string = sys.stdout.getvalue()
+ stderr_string = sys.stderr.getvalue()
+ else:
+ raise why1
+ except Exception, why:
+ raise why
+
+ finally:
+ sys.stdout = old_stdout
+ sys.stderr = old_stderr
+
+ if stdout_string is None:
+ raise Exception("Expected call to {} to fail, but it succeeded!".format(args[0]))
+ return stdout_string, stderr_string
+
+
+def get_file_info(dirpath):
+ filelist = os.listdir(dirpath)
+ print 'Files found: {}'.format(filelist)
+ numfiles = len(filelist)
+ if numfiles == 1:
+ filepath = os.path.join(dirpath, filelist[0])
+ with open(filepath, 'r') as cddfile:
+ filecontents = cddfile.readlines()
+ numlines = len(filecontents)
+ return filepath, numlines
+ else:
+ raise Exception("Expected to find a single file in {}, but found {}".format(dirpath,filelist))
+
+class PearsonTestCase(TestCase):
+ '''
+ Base class for tests running Pearson-related commands
+ '''
+ import_dir = mkdtemp(prefix="import")
+ export_dir = mkdtemp(prefix="export")
+
+ def assertErrorContains(self, error_message, expected):
+ self.assertTrue(error_message.find(expected) >= 0, 'error message "{}" did not contain "{}"'.format(error_message, expected))
+
+ def tearDown(self):
+ def delete_temp_dir(dirname):
+ if os.path.exists(dirname):
+ for filename in os.listdir(dirname):
+ os.remove(os.path.join(dirname, filename))
+ os.rmdir(dirname)
+
+ # clean up after any test data was dumped to temp directory
+ delete_temp_dir(self.import_dir)
+ delete_temp_dir(self.export_dir)
+
+ # and clean up the database:
+# TestCenterUser.objects.all().delete()
+# TestCenterRegistration.objects.all().delete()
+
+class PearsonCommandTestCase(PearsonTestCase):
+
+ def test_missing_demographic_fields(self):
+ # We won't bother to test all details of form validation here.
+ # It is enough to show that it works here, but deal with test cases for the form
+ # validation in the student tests, not these management tests.
+ username = 'baduser'
+ User.objects.create_user(username, '{}@edx.org'.format(username), 'fakepass')
+ options = {}
+ error_string = get_command_error_text('pearson_make_tc_user', username, **options)
+ self.assertTrue(error_string.find('Field Form errors encountered:') >= 0)
+ self.assertTrue(error_string.find('Field Form Error: city') >= 0)
+ self.assertTrue(error_string.find('Field Form Error: first_name') >= 0)
+ self.assertTrue(error_string.find('Field Form Error: last_name') >= 0)
+ self.assertTrue(error_string.find('Field Form Error: country') >= 0)
+ self.assertTrue(error_string.find('Field Form Error: phone_country_code') >= 0)
+ self.assertTrue(error_string.find('Field Form Error: phone') >= 0)
+ self.assertTrue(error_string.find('Field Form Error: address_1') >= 0)
+ self.assertErrorContains(error_string, 'Field Form Error: address_1')
+
+ def test_create_good_testcenter_user(self):
+ testcenter_user = create_tc_user("test1")
+ self.assertIsNotNone(testcenter_user)
+
+ def test_create_good_testcenter_registration(self):
+ username = 'test1'
+ create_tc_user(username)
+ registration = create_tc_registration(username)
+ self.assertIsNotNone(registration)
+
+ def test_cdd_missing_option(self):
+ error_string = get_command_error_text('pearson_export_cdd', **{})
+ self.assertErrorContains(error_string, 'Error: --destination or --dest-from-settings must be used')
+
+ def test_ead_missing_option(self):
+ error_string = get_command_error_text('pearson_export_ead', **{})
+ self.assertErrorContains(error_string, 'Error: --destination or --dest-from-settings must be used')
+
+ def test_export_single_cdd(self):
+ # before we generate any tc_users, we expect there to be nothing to output:
+ options = { 'dest-from-settings' : True }
+ with self.settings(PEARSON={ 'LOCAL_EXPORT' : self.export_dir }):
+ call_command('pearson_export_cdd', **options)
+ (filepath, numlines) = get_file_info(self.export_dir)
+ self.assertEquals(numlines, 1, "Expect cdd file to have no non-header lines")
+ os.remove(filepath)
+
+ # generating a tc_user should result in a line in the output
+ username = 'test_single_cdd'
+ create_tc_user(username)
+ call_command('pearson_export_cdd', **options)
+ (filepath, numlines) = get_file_info(self.export_dir)
+ self.assertEquals(numlines, 2, "Expect cdd file to have one non-header line")
+ os.remove(filepath)
+
+ # output after registration should not have any entries again.
+ call_command('pearson_export_cdd', **options)
+ (filepath, numlines) = get_file_info(self.export_dir)
+ self.assertEquals(numlines, 1, "Expect cdd file to have no non-header lines")
+ os.remove(filepath)
+
+ # if we modify the record, then it should be output again:
+ user_options = { 'first_name' : 'NewTestFirst', }
+ call_command('pearson_make_tc_user', username, **user_options)
+ call_command('pearson_export_cdd', **options)
+ (filepath, numlines) = get_file_info(self.export_dir)
+ self.assertEquals(numlines, 2, "Expect cdd file to have one non-header line")
+ os.remove(filepath)
+
+ def test_export_single_ead(self):
+ # before we generate any registrations, we expect there to be nothing to output:
+ options = { 'dest-from-settings' : True }
+ with self.settings(PEARSON={ 'LOCAL_EXPORT' : self.export_dir }):
+ call_command('pearson_export_ead', **options)
+ (filepath, numlines) = get_file_info(self.export_dir)
+ self.assertEquals(numlines, 1, "Expect ead file to have no non-header lines")
+ os.remove(filepath)
+
+ # generating a registration should result in a line in the output
+ username = 'test_single_ead'
+ create_tc_user(username)
+ create_tc_registration(username)
+ call_command('pearson_export_ead', **options)
+ (filepath, numlines) = get_file_info(self.export_dir)
+ self.assertEquals(numlines, 2, "Expect ead file to have one non-header line")
+ os.remove(filepath)
+
+ # output after registration should not have any entries again.
+ call_command('pearson_export_ead', **options)
+ (filepath, numlines) = get_file_info(self.export_dir)
+ self.assertEquals(numlines, 1, "Expect ead file to have no non-header lines")
+ os.remove(filepath)
+
+ # if we modify the record, then it should be output again:
+ create_tc_registration(username, accommodation_code='EQPMNT')
+ call_command('pearson_export_ead', **options)
+ (filepath, numlines) = get_file_info(self.export_dir)
+ self.assertEquals(numlines, 2, "Expect ead file to have one non-header line")
+ os.remove(filepath)
+
+ def test_export_multiple(self):
+ create_multiple_registrations("export")
+ with self.settings(PEARSON={ 'LOCAL_EXPORT' : self.export_dir }):
+ options = { 'dest-from-settings' : True }
+ call_command('pearson_export_cdd', **options)
+ (filepath, numlines) = get_file_info(self.export_dir)
+ self.assertEquals(numlines, 5, "Expect cdd file to have four non-header lines: total was {}".format(numlines))
+ os.remove(filepath)
+
+ call_command('pearson_export_ead', **options)
+ (filepath, numlines) = get_file_info(self.export_dir)
+ self.assertEquals(numlines, 7, "Expect ead file to have six non-header lines: total was {}".format(numlines))
+ os.remove(filepath)
+
+
+# def test_bad_demographic_option(self):
+# username = 'nonuser'
+# output_string, stderrmsg = get_error_string_for_management_call('pearson_make_tc_user', username, **{'--garbage' : None })
+# print stderrmsg
+# self.assertErrorContains(stderrmsg, 'Unexpected option')
+#
+# def test_missing_demographic_user(self):
+# username = 'nonuser'
+# output_string, error_string = get_error_string_for_management_call('pearson_make_tc_user', username, **{})
+# self.assertErrorContains(error_string, 'User matching query does not exist')
+
+# credentials for a test SFTP site:
+SFTP_HOSTNAME = 'ec2-23-20-150-101.compute-1.amazonaws.com'
+SFTP_USERNAME = 'pearsontest'
+SFTP_PASSWORD = 'password goes here'
+
+S3_BUCKET = 'edx-pearson-archive'
+AWS_ACCESS_KEY_ID = 'put yours here'
+AWS_SECRET_ACCESS_KEY = 'put yours here'
+
+class PearsonTransferTestCase(PearsonTestCase):
+ '''
+ Class for tests running Pearson transfers
+ '''
+
+ def test_transfer_config(self):
+ with self.settings(DATADOG_API='FAKE_KEY'):
+ # TODO: why is this failing with the wrong error message?!
+ stderrmsg = get_command_error_text('pearson_transfer', **{'mode' : 'garbage'})
+ self.assertErrorContains(stderrmsg, 'Error: No PEARSON entries')
+ with self.settings(DATADOG_API='FAKE_KEY'):
+ stderrmsg = get_command_error_text('pearson_transfer')
+ self.assertErrorContains(stderrmsg, 'Error: No PEARSON entries')
+ with self.settings(DATADOG_API='FAKE_KEY',
+ PEARSON={'LOCAL_EXPORT' : self.export_dir,
+ 'LOCAL_IMPORT' : self.import_dir }):
+ stderrmsg = get_command_error_text('pearson_transfer')
+ self.assertErrorContains(stderrmsg, 'Error: No entry in the PEARSON settings')
+
+ def test_transfer_export_missing_dest_dir(self):
+ raise SkipTest()
+ create_multiple_registrations('export_missing_dest')
+ with self.settings(DATADOG_API='FAKE_KEY',
+ PEARSON={'LOCAL_EXPORT' : self.export_dir,
+ 'SFTP_EXPORT' : 'this/does/not/exist',
+ 'SFTP_HOSTNAME' : SFTP_HOSTNAME,
+ 'SFTP_USERNAME' : SFTP_USERNAME,
+ 'SFTP_PASSWORD' : SFTP_PASSWORD,
+ 'S3_BUCKET' : S3_BUCKET,
+ },
+ AWS_ACCESS_KEY_ID = AWS_ACCESS_KEY_ID,
+ AWS_SECRET_ACCESS_KEY = AWS_SECRET_ACCESS_KEY):
+ options = { 'mode' : 'export'}
+ stderrmsg = get_command_error_text('pearson_transfer', **options)
+ self.assertErrorContains(stderrmsg, 'Error: SFTP destination path does not exist')
+
+ def test_transfer_export(self):
+ raise SkipTest()
+ create_multiple_registrations("transfer_export")
+ with self.settings(DATADOG_API='FAKE_KEY',
+ PEARSON={'LOCAL_EXPORT' : self.export_dir,
+ 'SFTP_EXPORT' : 'results/topvue',
+ 'SFTP_HOSTNAME' : SFTP_HOSTNAME,
+ 'SFTP_USERNAME' : SFTP_USERNAME,
+ 'SFTP_PASSWORD' : SFTP_PASSWORD,
+ 'S3_BUCKET' : S3_BUCKET,
+ },
+ AWS_ACCESS_KEY_ID = AWS_ACCESS_KEY_ID,
+ AWS_SECRET_ACCESS_KEY = AWS_SECRET_ACCESS_KEY):
+ options = { 'mode' : 'export'}
+# call_command('pearson_transfer', **options)
+# # confirm that the export directory is still empty:
+# self.assertEqual(len(os.listdir(self.export_dir)), 0, "expected export directory to be empty")
+
+ def test_transfer_import_missing_source_dir(self):
+ raise SkipTest()
+ create_multiple_registrations('import_missing_src')
+ with self.settings(DATADOG_API='FAKE_KEY',
+ PEARSON={'LOCAL_IMPORT' : self.import_dir,
+ 'SFTP_IMPORT' : 'this/does/not/exist',
+ 'SFTP_HOSTNAME' : SFTP_HOSTNAME,
+ 'SFTP_USERNAME' : SFTP_USERNAME,
+ 'SFTP_PASSWORD' : SFTP_PASSWORD,
+ 'S3_BUCKET' : S3_BUCKET,
+ },
+ AWS_ACCESS_KEY_ID = AWS_ACCESS_KEY_ID,
+ AWS_SECRET_ACCESS_KEY = AWS_SECRET_ACCESS_KEY):
+ options = { 'mode' : 'import'}
+ stderrmsg = get_command_error_text('pearson_transfer', **options)
+ self.assertErrorContains(stderrmsg, 'Error: SFTP source path does not exist')
+
+ def test_transfer_import(self):
+ raise SkipTest()
+ create_multiple_registrations('import_missing_src')
+ with self.settings(DATADOG_API='FAKE_KEY',
+ PEARSON={'LOCAL_IMPORT' : self.import_dir,
+ 'SFTP_IMPORT' : 'results',
+ 'SFTP_HOSTNAME' : SFTP_HOSTNAME,
+ 'SFTP_USERNAME' : SFTP_USERNAME,
+ 'SFTP_PASSWORD' : SFTP_PASSWORD,
+ 'S3_BUCKET' : S3_BUCKET,
+ },
+ AWS_ACCESS_KEY_ID = AWS_ACCESS_KEY_ID,
+ AWS_SECRET_ACCESS_KEY = AWS_SECRET_ACCESS_KEY):
+ options = { 'mode' : 'import'}
+ call_command('pearson_transfer', **options)
+ self.assertEqual(len(os.listdir(self.import_dir)), 0, "expected import directory to be empty")
diff --git a/common/djangoapps/student/models.py b/common/djangoapps/student/models.py
index f13a691215..44b947c045 100644
--- a/common/djangoapps/student/models.py
+++ b/common/djangoapps/student/models.py
@@ -1,30 +1,5 @@
"""
-Models for Student Information
-
-Replication Notes
-
-TODO: Update this to be consistent with reality (no portal servers, no more askbot)
-
-In our live deployment, we intend to run in a scenario where there is a pool of
-Portal servers that hold the canoncial user information and that user
-information is replicated to slave Course server pools. Each Course has a set of
-servers that serves only its content and has users that are relevant only to it.
-
-We replicate the following tables into the Course DBs where the user is
-enrolled. Only the Portal servers should ever write to these models.
-* UserProfile
-* CourseEnrollment
-
-We do a partial replication of:
-* User -- Askbot extends this and uses the extra fields, so we replicate only
- the stuff that comes with basic django_auth and ignore the rest.)
-
-There are a couple different scenarios:
-
-1. There's an update of User or UserProfile -- replicate it to all Course DBs
- that the user is enrolled in (found via CourseEnrollment).
-2. There's a change in CourseEnrollment. We need to push copies of UserProfile,
- CourseEnrollment, and the base fields in User
+Models for User Information (students, staff, etc)
Migration Notes
@@ -146,8 +121,8 @@ class TestCenterUser(models.Model):
The field names and lengths are modeled on the conventions and constraints
of Pearson's data import system, including oddities such as suffix having
a limit of 255 while last_name only gets 50.
-
- Also storing here the confirmation information received from Pearson (if any)
+
+ Also storing here the confirmation information received from Pearson (if any)
as to the success or failure of the upload. (VCDC file)
"""
# Our own record keeping...
@@ -197,7 +172,7 @@ class TestCenterUser(models.Model):
uploaded_at = models.DateTimeField(null=True, blank=True, db_index=True)
# confirmation back from the test center, as well as timestamps
- # on when they processed the request, and when we received
+ # on when they processed the request, and when we received
# confirmation back.
processed_at = models.DateTimeField(null=True, db_index=True)
upload_status = models.CharField(max_length=20, blank=True, db_index=True) # 'Error' or 'Accepted'
@@ -211,52 +186,52 @@ class TestCenterUser(models.Model):
@property
def needs_uploading(self):
return self.uploaded_at is None or self.uploaded_at < self.user_updated_at
-
+
@staticmethod
def user_provided_fields():
- return [ 'first_name', 'middle_name', 'last_name', 'suffix', 'salutation',
- 'address_1', 'address_2', 'address_3', 'city', 'state', 'postal_code', 'country',
+ return [ 'first_name', 'middle_name', 'last_name', 'suffix', 'salutation',
+ 'address_1', 'address_2', 'address_3', 'city', 'state', 'postal_code', 'country',
'phone', 'extension', 'phone_country_code', 'fax', 'fax_country_code', 'company_name']
-
+
@property
def email(self):
return self.user.email
-
+
def needs_update(self, fields):
for fieldname in TestCenterUser.user_provided_fields():
if fieldname in fields and getattr(self, fieldname) != fields[fieldname]:
return True
-
- return False
-
+
+ return False
+
@staticmethod
def _generate_edx_id(prefix):
NUM_DIGITS = 12
return u"{}{:012}".format(prefix, randint(1, 10**NUM_DIGITS-1))
-
+
@staticmethod
def _generate_candidate_id():
return TestCenterUser._generate_edx_id("edX")
-
+
@classmethod
def create(cls, user):
testcenter_user = cls(user=user)
- # testcenter_user.candidate_id remains unset
+ # testcenter_user.candidate_id remains unset
# assign an ID of our own:
cand_id = cls._generate_candidate_id()
while TestCenterUser.objects.filter(client_candidate_id=cand_id).exists():
cand_id = cls._generate_candidate_id()
- testcenter_user.client_candidate_id = cand_id
+ testcenter_user.client_candidate_id = cand_id
return testcenter_user
@property
def is_accepted(self):
return self.upload_status == TEST_CENTER_STATUS_ACCEPTED
-
+
@property
def is_rejected(self):
return self.upload_status == TEST_CENTER_STATUS_ERROR
-
+
@property
def is_pending(self):
return not self.is_accepted and not self.is_rejected
@@ -264,26 +239,26 @@ class TestCenterUser(models.Model):
class TestCenterUserForm(ModelForm):
class Meta:
model = TestCenterUser
- fields = ( 'first_name', 'middle_name', 'last_name', 'suffix', 'salutation',
- 'address_1', 'address_2', 'address_3', 'city', 'state', 'postal_code', 'country',
+ fields = ( 'first_name', 'middle_name', 'last_name', 'suffix', 'salutation',
+ 'address_1', 'address_2', 'address_3', 'city', 'state', 'postal_code', 'country',
'phone', 'extension', 'phone_country_code', 'fax', 'fax_country_code', 'company_name')
-
+
def update_and_save(self):
new_user = self.save(commit=False)
# create additional values here:
new_user.user_updated_at = datetime.utcnow()
new_user.upload_status = ''
new_user.save()
- log.info("Updated demographic information for user's test center exam registration: username \"{}\" ".format(new_user.user.username))
-
+ log.info("Updated demographic information for user's test center exam registration: username \"{}\" ".format(new_user.user.username))
+
# add validation:
-
+
def clean_country(self):
code = self.cleaned_data['country']
if code and len(code) != 3:
raise forms.ValidationError(u'Must be three characters (ISO 3166-1): e.g. USA, CAN, MNG')
return code
-
+
def clean(self):
def _can_encode_as_latin(fieldvalue):
try:
@@ -291,40 +266,40 @@ class TestCenterUserForm(ModelForm):
except UnicodeEncodeError:
return False
return True
-
+
cleaned_data = super(TestCenterUserForm, self).clean()
-
+
# check for interactions between fields:
if 'country' in cleaned_data:
country = cleaned_data.get('country')
if country == 'USA' or country == 'CAN':
if 'state' in cleaned_data and len(cleaned_data['state']) == 0:
- self._errors['state'] = self.error_class([u'Required if country is USA or CAN.'])
+ self._errors['state'] = self.error_class([u'Required if country is USA or CAN.'])
del cleaned_data['state']
if 'postal_code' in cleaned_data and len(cleaned_data['postal_code']) == 0:
- self._errors['postal_code'] = self.error_class([u'Required if country is USA or CAN.'])
+ self._errors['postal_code'] = self.error_class([u'Required if country is USA or CAN.'])
del cleaned_data['postal_code']
-
+
if 'fax' in cleaned_data and len(cleaned_data['fax']) > 0 and 'fax_country_code' in cleaned_data and len(cleaned_data['fax_country_code']) == 0:
- self._errors['fax_country_code'] = self.error_class([u'Required if fax is specified.'])
+ self._errors['fax_country_code'] = self.error_class([u'Required if fax is specified.'])
del cleaned_data['fax_country_code']
# check encoding for all fields:
cleaned_data_fields = [fieldname for fieldname in cleaned_data]
for fieldname in cleaned_data_fields:
if not _can_encode_as_latin(cleaned_data[fieldname]):
- self._errors[fieldname] = self.error_class([u'Must only use characters in Latin-1 (iso-8859-1) encoding'])
+ self._errors[fieldname] = self.error_class([u'Must only use characters in Latin-1 (iso-8859-1) encoding'])
del cleaned_data[fieldname]
# Always return the full collection of cleaned data.
return cleaned_data
-
-# our own code to indicate that a request has been rejected.
-ACCOMMODATION_REJECTED_CODE = 'NONE'
-
+
+# our own code to indicate that a request has been rejected.
+ACCOMMODATION_REJECTED_CODE = 'NONE'
+
ACCOMMODATION_CODES = (
- (ACCOMMODATION_REJECTED_CODE, 'No Accommodation Granted'),
+ (ACCOMMODATION_REJECTED_CODE, 'No Accommodation Granted'),
('EQPMNT', 'Equipment'),
('ET12ET', 'Extra Time - 1/2 Exam Time'),
('ET30MN', 'Extra Time - 30 Minutes'),
@@ -334,11 +309,11 @@ ACCOMMODATION_CODES = (
('SRRERC', 'Separate Room and Reader/Recorder'),
('SRRECR', 'Separate Room and Recorder'),
('SRSEAN', 'Separate Room and Service Animal'),
- ('SRSGNR', 'Separate Room and Sign Language Interpreter'),
+ ('SRSGNR', 'Separate Room and Sign Language Interpreter'),
)
ACCOMMODATION_CODE_DICT = { code : name for (code, name) in ACCOMMODATION_CODES }
-
+
class TestCenterRegistration(models.Model):
"""
This is our representation of a user's registration for in-person testing,
@@ -353,20 +328,20 @@ class TestCenterRegistration(models.Model):
of Pearson's data import system.
"""
# to find an exam registration, we key off of the user and course_id.
- # If multiple exams per course are possible, we would also need to add the
+ # If multiple exams per course are possible, we would also need to add the
# exam_series_code.
testcenter_user = models.ForeignKey(TestCenterUser, default=None)
course_id = models.CharField(max_length=128, db_index=True)
-
+
created_at = models.DateTimeField(auto_now_add=True, db_index=True)
updated_at = models.DateTimeField(auto_now=True, db_index=True)
# user_updated_at happens only when the user makes a change to their data,
# and is something Pearson needs to know to manage updates. Unlike
# updated_at, this will not get incremented when we do a batch data import.
- # The appointment dates, the exam count, and the accommodation codes can be updated,
+ # The appointment dates, the exam count, and the accommodation codes can be updated,
# but hopefully this won't happen often.
user_updated_at = models.DateTimeField(db_index=True)
- # "client_authorization_id" is our unique identifier for the authorization.
+ # "client_authorization_id" is our unique identifier for the authorization.
# This must be present for an update or delete to be sent to Pearson.
client_authorization_id = models.CharField(max_length=20, unique=True, db_index=True)
@@ -376,10 +351,10 @@ class TestCenterRegistration(models.Model):
eligibility_appointment_date_last = models.DateField(db_index=True)
# this is really a list of codes, using an '*' as a delimiter.
- # So it's not a choice list. We use the special value of ACCOMMODATION_REJECTED_CODE
+ # So it's not a choice list. We use the special value of ACCOMMODATION_REJECTED_CODE
# to indicate the rejection of an accommodation request.
accommodation_code = models.CharField(max_length=64, blank=True)
-
+
# store the original text of the accommodation request.
accommodation_request = models.CharField(max_length=1024, blank=True, db_index=True)
@@ -387,7 +362,7 @@ class TestCenterRegistration(models.Model):
uploaded_at = models.DateTimeField(null=True, db_index=True)
# confirmation back from the test center, as well as timestamps
- # on when they processed the request, and when we received
+ # on when they processed the request, and when we received
# confirmation back.
processed_at = models.DateTimeField(null=True, db_index=True)
upload_status = models.CharField(max_length=20, blank=True, db_index=True) # 'Error' or 'Accepted'
@@ -397,11 +372,11 @@ class TestCenterRegistration(models.Model):
# (However, it may never be set if we are always initiating such candidate creation.)
authorization_id = models.IntegerField(null=True, db_index=True)
confirmed_at = models.DateTimeField(null=True, db_index=True)
-
+
@property
def candidate_id(self):
return self.testcenter_user.candidate_id
-
+
@property
def client_candidate_id(self):
return self.testcenter_user.client_candidate_id
@@ -414,20 +389,24 @@ class TestCenterRegistration(models.Model):
return 'Add'
else:
# TODO: decide what to send when we have uploaded an initial version,
- # but have not received confirmation back from that upload. If the
+ # but have not received confirmation back from that upload. If the
# registration here has been changed, then we don't know if this changed
- # registration should be submitted as an 'add' or an 'update'.
+ # registration should be submitted as an 'add' or an 'update'.
#
- # If the first registration were lost or in error (e.g. bad code),
+ # If the first registration were lost or in error (e.g. bad code),
# the second should be an "Add". If the first were processed successfully,
# then the second should be an "Update". We just don't know....
return 'Update'
-
+
@property
def exam_authorization_count(self):
# TODO: figure out if this should really go in the database (with a default value).
return 1
+ @property
+ def needs_uploading(self):
+ return self.uploaded_at is None or self.uploaded_at < self.user_updated_at
+
@classmethod
def create(cls, testcenter_user, exam, accommodation_request):
registration = cls(testcenter_user = testcenter_user)
@@ -443,7 +422,7 @@ class TestCenterRegistration(models.Model):
@staticmethod
def _generate_authorization_id():
return TestCenterUser._generate_edx_id("edXexam")
-
+
@staticmethod
def _create_client_authorization_id():
"""
@@ -455,8 +434,8 @@ class TestCenterRegistration(models.Model):
while TestCenterRegistration.objects.filter(client_authorization_id=auth_id).exists():
auth_id = TestCenterRegistration._generate_authorization_id()
return auth_id
-
- # methods for providing registration status details on registration page:
+
+ # methods for providing registration status details on registration page:
@property
def demographics_is_accepted(self):
return self.testcenter_user.is_accepted
@@ -464,7 +443,7 @@ class TestCenterRegistration(models.Model):
@property
def demographics_is_rejected(self):
return self.testcenter_user.is_rejected
-
+
@property
def demographics_is_pending(self):
return self.testcenter_user.is_pending
@@ -476,7 +455,7 @@ class TestCenterRegistration(models.Model):
@property
def accommodation_is_rejected(self):
return len(self.accommodation_request) > 0 and self.accommodation_code == ACCOMMODATION_REJECTED_CODE
-
+
@property
def accommodation_is_pending(self):
return len(self.accommodation_request) > 0 and len(self.accommodation_code) == 0
@@ -488,20 +467,20 @@ class TestCenterRegistration(models.Model):
@property
def registration_is_accepted(self):
return self.upload_status == TEST_CENTER_STATUS_ACCEPTED
-
+
@property
def registration_is_rejected(self):
return self.upload_status == TEST_CENTER_STATUS_ERROR
-
+
@property
def registration_is_pending(self):
return not self.registration_is_accepted and not self.registration_is_rejected
- # methods for providing registration status summary on dashboard page:
+ # methods for providing registration status summary on dashboard page:
@property
def is_accepted(self):
return self.registration_is_accepted and self.demographics_is_accepted
-
+
@property
def is_rejected(self):
return self.registration_is_rejected or self.demographics_is_rejected
@@ -509,17 +488,17 @@ class TestCenterRegistration(models.Model):
@property
def is_pending(self):
return not self.is_accepted and not self.is_rejected
-
+
def get_accommodation_codes(self):
return self.accommodation_code.split('*')
def get_accommodation_names(self):
- return [ ACCOMMODATION_CODE_DICT.get(code, "Unknown code " + code) for code in self.get_accommodation_codes() ]
+ return [ ACCOMMODATION_CODE_DICT.get(code, "Unknown code " + code) for code in self.get_accommodation_codes() ]
@property
def registration_signup_url(self):
return settings.PEARSONVUE_SIGNINPAGE_URL
-
+
class TestCenterRegistrationForm(ModelForm):
class Meta:
model = TestCenterRegistration
@@ -530,33 +509,37 @@ class TestCenterRegistrationForm(ModelForm):
if code and len(code) > 0:
return code.strip()
return code
-
+
def update_and_save(self):
registration = self.save(commit=False)
# create additional values here:
registration.user_updated_at = datetime.utcnow()
registration.upload_status = ''
registration.save()
- log.info("Updated registration information for user's test center exam registration: username \"{}\" course \"{}\", examcode \"{}\"".format(registration.testcenter_user.user.username, registration.course_id, registration.exam_series_code))
+ log.info("Updated registration information for user's test center exam registration: username \"{}\" course \"{}\", examcode \"{}\"".format(registration.testcenter_user.user.username, registration.course_id, registration.exam_series_code))
# TODO: add validation code for values added to accommodation_code field.
-
-
-
+
+
+
def get_testcenter_registration(user, course_id, exam_series_code):
try:
tcu = TestCenterUser.objects.get(user=user)
except TestCenterUser.DoesNotExist:
return []
return TestCenterRegistration.objects.filter(testcenter_user=tcu, course_id=course_id, exam_series_code=exam_series_code)
-
+
+# nosetests thinks that anything with _test_ in the name is a test.
+# Correct this (https://nose.readthedocs.org/en/latest/finding_tests.html)
+get_testcenter_registration.__test__ = False
+
def unique_id_for_user(user):
"""
Return a unique id for a user, suitable for inserting into
e.g. personalized survey links.
"""
# include the secret key as a salt, and to make the ids unique across
- # different LMS installs.
+ # different LMS installs.
h = hashlib.md5()
h.update(settings.SECRET_KEY)
h.update(str(user.id))
@@ -638,7 +621,20 @@ class CourseEnrollmentAllowed(models.Model):
#cache_relation(User.profile)
-#### Helper methods for use from python manage.py shell.
+#### Helper methods for use from python manage.py shell and other classes.
+
+def get_user_by_username_or_email(username_or_email):
+ """
+ Return a User object, looking up by email if username_or_email contains a
+ '@', otherwise by username.
+
+ Raises:
+ User.DoesNotExist is lookup fails.
+ """
+ if '@' in username_or_email:
+ return User.objects.get(email=username_or_email)
+ else:
+ return User.objects.get(username=username_or_email)
def get_user(email):
@@ -729,167 +725,3 @@ def update_user_information(sender, instance, created, **kwargs):
log.error(unicode(e))
log.error("update user info to discussion failed for user with id: " + str(instance.id))
-
-########################## REPLICATION SIGNALS #################################
-# @receiver(post_save, sender=User)
-def replicate_user_save(sender, **kwargs):
- user_obj = kwargs['instance']
- if not should_replicate(user_obj):
- return
- for course_db_name in db_names_to_replicate_to(user_obj.id):
- replicate_user(user_obj, course_db_name)
-
-
-# @receiver(post_save, sender=CourseEnrollment)
-def replicate_enrollment_save(sender, **kwargs):
- """This is called when a Student enrolls in a course. It has to do the
- following:
-
- 1. Make sure the User is copied into the Course DB. It may already exist
- (someone deleting and re-adding a course). This has to happen first or
- the foreign key constraint breaks.
- 2. Replicate the CourseEnrollment.
- 3. Replicate the UserProfile.
- """
- if not is_portal():
- return
-
- enrollment_obj = kwargs['instance']
- log.debug("Replicating user because of new enrollment")
- for course_db_name in db_names_to_replicate_to(enrollment_obj.user.id):
- replicate_user(enrollment_obj.user, course_db_name)
-
- log.debug("Replicating enrollment because of new enrollment")
- replicate_model(CourseEnrollment.save, enrollment_obj, enrollment_obj.user_id)
-
- log.debug("Replicating user profile because of new enrollment")
- user_profile = UserProfile.objects.get(user_id=enrollment_obj.user_id)
- replicate_model(UserProfile.save, user_profile, enrollment_obj.user_id)
-
-
-# @receiver(post_delete, sender=CourseEnrollment)
-def replicate_enrollment_delete(sender, **kwargs):
- enrollment_obj = kwargs['instance']
- return replicate_model(CourseEnrollment.delete, enrollment_obj, enrollment_obj.user_id)
-
-
-# @receiver(post_save, sender=UserProfile)
-def replicate_userprofile_save(sender, **kwargs):
- """We just updated the UserProfile (say an update to the name), so push that
- change to all Course DBs that we're enrolled in."""
- user_profile_obj = kwargs['instance']
- return replicate_model(UserProfile.save, user_profile_obj, user_profile_obj.user_id)
-
-
-######### Replication functions #########
-USER_FIELDS_TO_COPY = ["id", "username", "first_name", "last_name", "email",
- "password", "is_staff", "is_active", "is_superuser",
- "last_login", "date_joined"]
-
-
-def replicate_user(portal_user, course_db_name):
- """Replicate a User to the correct Course DB. This is more complicated than
- it should be because Askbot extends the auth_user table and adds its own
- fields. So we need to only push changes to the standard fields and leave
- the rest alone so that Askbot changes at the Course DB level don't get
- overridden.
- """
- try:
- course_user = User.objects.using(course_db_name).get(id=portal_user.id)
- log.debug("User {0} found in Course DB, replicating fields to {1}"
- .format(course_user, course_db_name))
- except User.DoesNotExist:
- log.debug("User {0} not found in Course DB, creating copy in {1}"
- .format(portal_user, course_db_name))
- course_user = User()
-
- for field in USER_FIELDS_TO_COPY:
- setattr(course_user, field, getattr(portal_user, field))
-
- mark_handled(course_user)
- course_user.save(using=course_db_name)
- unmark(course_user)
-
-
-def replicate_model(model_method, instance, user_id):
- """
- model_method is the model action that we want replicated. For instance,
- UserProfile.save
- """
- if not should_replicate(instance):
- return
-
- course_db_names = db_names_to_replicate_to(user_id)
- log.debug("Replicating {0} for user {1} to DBs: {2}"
- .format(model_method, user_id, course_db_names))
-
- mark_handled(instance)
- for db_name in course_db_names:
- model_method(instance, using=db_name)
- unmark(instance)
-
-
-######### Replication Helpers #########
-
-
-def is_valid_course_id(course_id):
- """Right now, the only database that's not a course database is 'default'.
- I had nicer checking in here originally -- it would scan the courses that
- were in the system and only let you choose that. But it was annoying to run
- tests with, since we don't have course data for some for our course test
- databases. Hence the lazy version.
- """
- return course_id != 'default'
-
-
-def is_portal():
- """Are we in the portal pool? Only Portal servers are allowed to replicate
- their changes. For now, only Portal servers see multiple DBs, so we use
- that to decide."""
- return len(settings.DATABASES) > 1
-
-
-def db_names_to_replicate_to(user_id):
- """Return a list of DB names that this user_id is enrolled in."""
- return [c.course_id
- for c in CourseEnrollment.objects.filter(user_id=user_id)
- if is_valid_course_id(c.course_id)]
-
-
-def marked_handled(instance):
- """Have we marked this instance as being handled to avoid infinite loops
- caused by saving models in post_save hooks for the same models?"""
- return hasattr(instance, '_do_not_copy_to_course_db') and instance._do_not_copy_to_course_db
-
-
-def mark_handled(instance):
- """You have to mark your instance with this function or else we'll go into
- an infinite loop since we're putting listeners on Model saves/deletes and
- the act of replication requires us to call the same model method.
-
- We create a _replicated attribute to differentiate the first save of this
- model vs. the duplicate save we force on to the course database. Kind of
- a hack -- suggestions welcome.
- """
- instance._do_not_copy_to_course_db = True
-
-
-def unmark(instance):
- """If we don't unmark a model after we do replication, then consecutive
- save() calls won't be properly replicated."""
- instance._do_not_copy_to_course_db = False
-
-
-def should_replicate(instance):
- """Should this instance be replicated? We need to be a Portal server and
- the instance has to not have been marked_handled."""
- if marked_handled(instance):
- # Basically, avoid an infinite loop. You should
- log.debug("{0} should not be replicated because it's been marked"
- .format(instance))
- return False
- if not is_portal():
- log.debug("{0} should not be replicated because we're not a portal."
- .format(instance))
- return False
- return True
diff --git a/common/djangoapps/student/tests.py b/common/djangoapps/student/tests.py
index 4c7c9e2592..8ce407bcd1 100644
--- a/common/djangoapps/student/tests.py
+++ b/common/djangoapps/student/tests.py
@@ -5,16 +5,11 @@ when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
import logging
-from datetime import datetime
-from hashlib import sha1
from django.test import TestCase
-from mock import patch, Mock
-from nose.plugins.skip import SkipTest
+from mock import Mock
-from .models import (User, UserProfile, CourseEnrollment,
- replicate_user, USER_FIELDS_TO_COPY,
- unique_id_for_user)
+from .models import unique_id_for_user
from .views import process_survey_link, _cert_info
COURSE_1 = 'edX/toy/2012_Fall'
@@ -22,185 +17,6 @@ COURSE_2 = 'edx/full/6.002_Spring_2012'
log = logging.getLogger(__name__)
-class ReplicationTest(TestCase):
-
- multi_db = True
-
- def test_user_replication(self):
- """Test basic user replication."""
- raise SkipTest()
- portal_user = User.objects.create_user('rusty', 'rusty@edx.org', 'fakepass')
- portal_user.first_name='Rusty'
- portal_user.last_name='Skids'
- portal_user.is_staff=True
- portal_user.is_active=True
- portal_user.is_superuser=True
- portal_user.last_login=datetime(2012, 1, 1)
- portal_user.date_joined=datetime(2011, 1, 1)
- # This is an Askbot field and will break if askbot is not included
-
- if hasattr(portal_user, 'seen_response_count'):
- portal_user.seen_response_count = 10
-
- portal_user.save(using='default')
-
- # We replicate this user to Course 1, then pull the same user and verify
- # that the fields copied over properly.
- replicate_user(portal_user, COURSE_1)
- course_user = User.objects.using(COURSE_1).get(id=portal_user.id)
-
- # Make sure the fields we care about got copied over for this user.
- for field in USER_FIELDS_TO_COPY:
- self.assertEqual(getattr(portal_user, field),
- getattr(course_user, field),
- "{0} not copied from {1} to {2}".format(
- field, portal_user, course_user
- ))
-
- # This hasattr lameness is here because we don't want this test to be
- # triggered when we're being run by CMS tests (Askbot doesn't exist
- # there, so the test will fail).
- #
- # seen_response_count isn't a field we care about, so it shouldn't have
- # been copied over.
- if hasattr(portal_user, 'seen_response_count'):
- portal_user.seen_response_count = 20
- replicate_user(portal_user, COURSE_1)
- course_user = User.objects.using(COURSE_1).get(id=portal_user.id)
- self.assertEqual(portal_user.seen_response_count, 20)
- self.assertEqual(course_user.seen_response_count, 0)
-
- # Another replication should work for an email change however, since
- # it's a field we care about.
- portal_user.email = "clyde@edx.org"
- replicate_user(portal_user, COURSE_1)
- course_user = User.objects.using(COURSE_1).get(id=portal_user.id)
- self.assertEqual(portal_user.email, course_user.email)
-
- # During this entire time, the user data should never have made it over
- # to COURSE_2
- self.assertRaises(User.DoesNotExist,
- User.objects.using(COURSE_2).get,
- id=portal_user.id)
-
-
- def test_enrollment_for_existing_user_info(self):
- """Test the effect of Enrolling in a class if you've already got user
- data to be copied over."""
- raise SkipTest()
- # Create our User
- portal_user = User.objects.create_user('jack', 'jack@edx.org', 'fakepass')
- portal_user.first_name = "Jack"
- portal_user.save()
-
- # Set up our UserProfile info
- portal_user_profile = UserProfile.objects.create(
- user=portal_user,
- name="Jack Foo",
- level_of_education=None,
- gender='m',
- mailing_address=None,
- goals="World domination",
- )
- portal_user_profile.save()
-
- # Now let's see if creating a CourseEnrollment copies all the relevant
- # data.
- portal_enrollment = CourseEnrollment.objects.create(user=portal_user,
- course_id=COURSE_1)
- portal_enrollment.save()
-
- # Grab all the copies we expect
- course_user = User.objects.using(COURSE_1).get(id=portal_user.id)
- self.assertEquals(portal_user, course_user)
- self.assertRaises(User.DoesNotExist,
- User.objects.using(COURSE_2).get,
- id=portal_user.id)
-
- course_enrollment = CourseEnrollment.objects.using(COURSE_1).get(id=portal_enrollment.id)
- self.assertEquals(portal_enrollment, course_enrollment)
- self.assertRaises(CourseEnrollment.DoesNotExist,
- CourseEnrollment.objects.using(COURSE_2).get,
- id=portal_enrollment.id)
-
- course_user_profile = UserProfile.objects.using(COURSE_1).get(id=portal_user_profile.id)
- self.assertEquals(portal_user_profile, course_user_profile)
- self.assertRaises(UserProfile.DoesNotExist,
- UserProfile.objects.using(COURSE_2).get,
- id=portal_user_profile.id)
-
- log.debug("Make sure our seen_response_count is not replicated.")
- if hasattr(portal_user, 'seen_response_count'):
- portal_user.seen_response_count = 200
- course_user = User.objects.using(COURSE_1).get(id=portal_user.id)
- self.assertEqual(portal_user.seen_response_count, 200)
- self.assertEqual(course_user.seen_response_count, 0)
- portal_user.save()
-
- course_user = User.objects.using(COURSE_1).get(id=portal_user.id)
- self.assertEqual(portal_user.seen_response_count, 200)
- self.assertEqual(course_user.seen_response_count, 0)
-
- portal_user.email = 'jim@edx.org'
- portal_user.save()
- course_user = User.objects.using(COURSE_1).get(id=portal_user.id)
- self.assertEqual(portal_user.email, 'jim@edx.org')
- self.assertEqual(course_user.email, 'jim@edx.org')
-
-
-
- def test_enrollment_for_user_info_after_enrollment(self):
- """Test the effect of modifying User data after you've enrolled."""
- raise SkipTest()
-
- # Create our User
- portal_user = User.objects.create_user('patty', 'patty@edx.org', 'fakepass')
- portal_user.first_name = "Patty"
- portal_user.save()
-
- # Set up our UserProfile info
- portal_user_profile = UserProfile.objects.create(
- user=portal_user,
- name="Patty Foo",
- level_of_education=None,
- gender='f',
- mailing_address=None,
- goals="World peace",
- )
- portal_user_profile.save()
-
- # Now let's see if creating a CourseEnrollment copies all the relevant
- # data when things are saved.
- portal_enrollment = CourseEnrollment.objects.create(user=portal_user,
- course_id=COURSE_1)
- portal_enrollment.save()
-
- portal_user.last_name = "Bar"
- portal_user.save()
- portal_user_profile.gender = 'm'
- portal_user_profile.save()
-
- # Grab all the copies we expect, and make sure it doesn't end up in
- # places we don't expect.
- course_user = User.objects.using(COURSE_1).get(id=portal_user.id)
- self.assertEquals(portal_user, course_user)
- self.assertRaises(User.DoesNotExist,
- User.objects.using(COURSE_2).get,
- id=portal_user.id)
-
- course_enrollment = CourseEnrollment.objects.using(COURSE_1).get(id=portal_enrollment.id)
- self.assertEquals(portal_enrollment, course_enrollment)
- self.assertRaises(CourseEnrollment.DoesNotExist,
- CourseEnrollment.objects.using(COURSE_2).get,
- id=portal_enrollment.id)
-
- course_user_profile = UserProfile.objects.using(COURSE_1).get(id=portal_user_profile.id)
- self.assertEquals(portal_user_profile, course_user_profile)
- self.assertRaises(UserProfile.DoesNotExist,
- UserProfile.objects.using(COURSE_2).get,
- id=portal_user_profile.id)
-
-
class CourseEndingTest(TestCase):
"""Test things related to course endings: certificates, surveys, etc"""
diff --git a/common/lib/xmodule/xmodule/course_module.py b/common/lib/xmodule/xmodule/course_module.py
index 7f97ca69dc..6e3e2cfa39 100644
--- a/common/lib/xmodule/xmodule/course_module.py
+++ b/common/lib/xmodule/xmodule/course_module.py
@@ -223,7 +223,7 @@ class CourseDescriptor(SequenceDescriptor):
return policy_str
-
+
@classmethod
def from_xml(cls, xml_data, system, org=None, course=None):
instance = super(CourseDescriptor, cls).from_xml(xml_data, system, org, course)
@@ -248,7 +248,7 @@ class CourseDescriptor(SequenceDescriptor):
except ValueError:
system.error_tracker("Unable to decode grading policy as json")
policy = None
-
+
# cdodge: import the grading policy information that is on disk and put into the
# descriptor 'definition' bucket as a dictionary so that it is persisted in the DB
instance.definition['data']['grading_policy'] = policy
@@ -303,28 +303,28 @@ class CourseDescriptor(SequenceDescriptor):
@property
def enrollment_start(self):
return self._try_parse_time("enrollment_start")
-
+
@enrollment_start.setter
def enrollment_start(self, value):
if isinstance(value, time.struct_time):
self.metadata['enrollment_start'] = stringify_time(value)
@property
- def enrollment_end(self):
+ def enrollment_end(self):
return self._try_parse_time("enrollment_end")
-
+
@enrollment_end.setter
def enrollment_end(self, value):
if isinstance(value, time.struct_time):
self.metadata['enrollment_end'] = stringify_time(value)
-
+
@property
def grader(self):
return self._grading_policy['GRADER']
-
+
@property
def raw_grader(self):
return self._grading_policy['RAW_GRADER']
-
+
@raw_grader.setter
def raw_grader(self, value):
# NOTE WELL: this change will not update the processed graders. If we need that, this needs to call grader_from_conf
@@ -334,12 +334,12 @@ class CourseDescriptor(SequenceDescriptor):
@property
def grade_cutoffs(self):
return self._grading_policy['GRADE_CUTOFFS']
-
+
@grade_cutoffs.setter
def grade_cutoffs(self, value):
self._grading_policy['GRADE_CUTOFFS'] = value
self.definition['data'].setdefault('grading_policy',{})['GRADE_CUTOFFS'] = value
-
+
@property
def lowest_passing_grade(self):
@@ -360,6 +360,41 @@ class CourseDescriptor(SequenceDescriptor):
def show_calculator(self):
return self.metadata.get("show_calculator", None) == "Yes"
+ @property
+ def is_cohorted(self):
+ """
+ Return whether the course is cohorted.
+ """
+ config = self.metadata.get("cohort_config")
+ if config is None:
+ return False
+
+ return bool(config.get("cohorted"))
+
+ @property
+ def top_level_discussion_topic_ids(self):
+ """
+ Return list of topic ids defined in course policy.
+ """
+ topics = self.metadata.get("discussion_topics", {})
+ return [d["id"] for d in topics.values()]
+
+
+ @property
+ def cohorted_discussions(self):
+ """
+ Return the set of discussions that is explicitly cohorted. It may be
+ the empty set. Note that all inline discussions are automatically
+ cohorted based on the course's is_cohorted setting.
+ """
+ config = self.metadata.get("cohort_config")
+ if config is None:
+ return set()
+
+ return set(config.get("cohorted_discussions", []))
+
+
+
@property
def is_new(self):
"""
diff --git a/common/lib/xmodule/xmodule/discussion_module.py b/common/lib/xmodule/xmodule/discussion_module.py
index 1deceac5d0..57d7780d95 100644
--- a/common/lib/xmodule/xmodule/discussion_module.py
+++ b/common/lib/xmodule/xmodule/discussion_module.py
@@ -18,8 +18,10 @@ class DiscussionModule(XModule):
}
return self.system.render_template('discussion/_discussion_module.html', context)
- def __init__(self, system, location, definition, descriptor, instance_state=None, shared_state=None, **kwargs):
- XModule.__init__(self, system, location, definition, descriptor, instance_state, shared_state, **kwargs)
+ def __init__(self, system, location, definition, descriptor,
+ instance_state=None, shared_state=None, **kwargs):
+ XModule.__init__(self, system, location, definition, descriptor,
+ instance_state, shared_state, **kwargs)
if isinstance(instance_state, str):
instance_state = json.loads(instance_state)
diff --git a/common/lib/xmodule/xmodule/js/src/video/display/video_player.coffee b/common/lib/xmodule/xmodule/js/src/video/display/video_player.coffee
index 93f90d9248..22308a5568 100644
--- a/common/lib/xmodule/xmodule/js/src/video/display/video_player.coffee
+++ b/common/lib/xmodule/xmodule/js/src/video/display/video_player.coffee
@@ -45,6 +45,7 @@ class @VideoPlayer extends Subview
modestbranding: 1
if @video.start
@playerVars.start = @video.start
+ @playerVars.wmode = 'window'
if @video.end
# work in AS3, not HMLT5. but iframe use AS3
@playerVars.end = @video.end
diff --git a/common/lib/xmodule/xmodule/tests/test_import.py b/common/lib/xmodule/xmodule/tests/test_import.py
index 554e89ac74..7cd91223e3 100644
--- a/common/lib/xmodule/xmodule/tests/test_import.py
+++ b/common/lib/xmodule/xmodule/tests/test_import.py
@@ -45,13 +45,24 @@ class DummySystem(ImportSystem):
raise Exception("Shouldn't be called")
-class ImportTestCase(unittest.TestCase):
+class BaseCourseTestCase(unittest.TestCase):
'''Make sure module imports work properly, including for malformed inputs'''
@staticmethod
def get_system(load_error_modules=True):
'''Get a dummy system'''
return DummySystem(load_error_modules)
+ def get_course(self, name):
+ """Get a test course by directory name. If there's more than one, error."""
+ print "Importing {0}".format(name)
+
+ modulestore = XMLModuleStore(DATA_DIR, course_dirs=[name])
+ courses = modulestore.get_courses()
+ self.assertEquals(len(courses), 1)
+ return courses[0]
+
+class ImportTestCase(BaseCourseTestCase):
+
def test_fallback(self):
'''Check that malformed xml loads as an ErrorDescriptor.'''
@@ -207,11 +218,7 @@ class ImportTestCase(unittest.TestCase):
"""Make sure that metadata is inherited properly"""
print "Starting import"
- initial_import = XMLModuleStore(DATA_DIR, course_dirs=['toy'])
-
- courses = initial_import.get_courses()
- self.assertEquals(len(courses), 1)
- course = courses[0]
+ course = self.get_course('toy')
def check_for_key(key, node):
"recursive check for presence of key"
@@ -227,16 +234,8 @@ class ImportTestCase(unittest.TestCase):
"""Make sure that when two courses share content with the same
org and course names, policy applies to the right one."""
- def get_course(name):
- print "Importing {0}".format(name)
-
- modulestore = XMLModuleStore(DATA_DIR, course_dirs=[name])
- courses = modulestore.get_courses()
- self.assertEquals(len(courses), 1)
- return courses[0]
-
- toy = get_course('toy')
- two_toys = get_course('two_toys')
+ toy = self.get_course('toy')
+ two_toys = self.get_course('two_toys')
self.assertEqual(toy.url_name, "2012_Fall")
self.assertEqual(two_toys.url_name, "TT_2012_Fall")
@@ -279,8 +278,8 @@ class ImportTestCase(unittest.TestCase):
"""Ensure that colons in url_names convert to file paths properly"""
print "Starting import"
+ # Not using get_courses because we need the modulestore object too afterward
modulestore = XMLModuleStore(DATA_DIR, course_dirs=['toy'])
-
courses = modulestore.get_courses()
self.assertEquals(len(courses), 1)
course = courses[0]
@@ -317,7 +316,7 @@ class ImportTestCase(unittest.TestCase):
toy_id = "edX/toy/2012_Fall"
- course = modulestore.get_courses()[0]
+ course = modulestore.get_course(toy_id)
chapters = course.get_children()
ch1 = chapters[0]
sections = ch1.get_children()
@@ -355,3 +354,30 @@ class ImportTestCase(unittest.TestCase):
\
""".strip()
self.assertEqual(gst_sample.definition['render'], render_string_from_sample_gst_xml)
+
+ def test_cohort_config(self):
+ """
+ Check that cohort config parsing works right.
+ """
+ modulestore = XMLModuleStore(DATA_DIR, course_dirs=['toy'])
+
+ toy_id = "edX/toy/2012_Fall"
+
+ course = modulestore.get_course(toy_id)
+
+ # No config -> False
+ self.assertFalse(course.is_cohorted)
+
+ # empty config -> False
+ course.metadata['cohort_config'] = {}
+ self.assertFalse(course.is_cohorted)
+
+ # false config -> False
+ course.metadata['cohort_config'] = {'cohorted': False}
+ self.assertFalse(course.is_cohorted)
+
+ # and finally...
+ course.metadata['cohort_config'] = {'cohorted': True}
+ self.assertTrue(course.is_cohorted)
+
+
diff --git a/common/static/js/course_groups/cohorts.js b/common/static/js/course_groups/cohorts.js
new file mode 100644
index 0000000000..aa3ce34b5b
--- /dev/null
+++ b/common/static/js/course_groups/cohorts.js
@@ -0,0 +1,256 @@
+// structure stolen from http://briancray.com/posts/javascript-module-pattern
+
+var CohortManager = (function ($) {
+ // private variables and functions
+
+ // using jQuery
+ function getCookie(name) {
+ var cookieValue = null;
+ if (document.cookie && document.cookie != '') {
+ var cookies = document.cookie.split(';');
+ for (var i = 0; i < cookies.length; i++) {
+ var cookie = $.trim(cookies[i]);
+ // Does this cookie string begin with the name we want?
+ if (cookie.substring(0, name.length + 1) == (name + '=')) {
+ cookieValue = decodeURIComponent(cookie.substring(name.length + 1));
+ break;
+ }
+ }
+ }
+ return cookieValue;
+ }
+ var csrftoken = getCookie('csrftoken');
+
+ function csrfSafeMethod(method) {
+ // these HTTP methods do not require CSRF protection
+ return (/^(GET|HEAD|OPTIONS|TRACE)$/.test(method));
+ }
+ $.ajaxSetup({
+ crossDomain: false, // obviates need for sameOrigin test
+ beforeSend: function(xhr, settings) {
+ if (!csrfSafeMethod(settings.type)) {
+ xhr.setRequestHeader("X-CSRFToken", csrftoken);
+ }
+ }
+ });
+
+ // constructor
+ var module = function () {
+ var el = $(".cohort_manager");
+ // localized jquery
+ var $$ = function (selector) {
+ return $(selector, el)
+ }
+ var state_init = "init";
+ var state_summary = "summary";
+ var state_detail = "detail";
+ var state = state_init;
+
+ var url = el.data('ajax_url');
+ var self = this;
+
+ // Pull out the relevant parts of the html
+ // global stuff
+ var errors = $$(".errors");
+
+ // cohort summary display
+ var summary = $$(".summary");
+ var cohorts = $$(".cohorts");
+ var show_cohorts_button = $$(".controls .show_cohorts");
+ var add_cohort_input = $$(".cohort_name");
+ var add_cohort_button = $$(".add_cohort");
+
+ // single cohort user display
+ var detail = $$(".detail");
+ var detail_header = $(".header", detail);
+ var detail_users = $$(".users");
+ var detail_page_num = $$(".page_num");
+ var users_area = $$(".users_area");
+ var add_members_button = $$(".add_members");
+ var op_results = $$(".op_results");
+ var cohort_id = null;
+ var cohort_title = null;
+ var detail_url = null;
+ var page = null;
+
+ // *********** Summary view methods
+
+ function show_cohort(item) {
+ // item is a li that has a data-href link to the cohort base url
+ var el = $(this);
+ cohort_title = el.text();
+ detail_url = el.data('href');
+ cohort_id = el.data('id');
+ state = state_detail;
+ render();
+ }
+
+ function add_to_cohorts_list(item) {
+ var li = $('
');
+ $("a", li).text(item.name)
+ .data('href', url + '/' + item.id)
+ .addClass('link')
+ .click(show_cohort);
+ cohorts.append(li);
+ };
+
+ function log_error(msg) {
+ errors.empty();
+ errors.append($("").text(msg).addClass("error"));
+ };
+
+ function load_cohorts(response) {
+ cohorts.empty();
+ if (response && response.success) {
+ response.cohorts.forEach(add_to_cohorts_list);
+ } else {
+ log_error(response.msg || "There was an error loading cohorts");
+ }
+ summary.show();
+ };
+
+
+ function added_cohort(response) {
+ if (response && response.success) {
+ add_to_cohorts_list(response.cohort);
+ } else {
+ log_error(response.msg || "There was an error adding a cohort");
+ }
+ }
+
+ // *********** Detail view methods
+
+ function remove_user_from_cohort(username, cohort_id, row) {
+ var delete_url = detail_url + '/delete';
+ var data = {'username': username}
+ $.post(delete_url, data).done(function() {row.remove()})
+ .fail(function(jqXHR, status, error) {
+ log_error('Error removing user ' + username +
+ ' from cohort. ' + status + ' ' + error);
+ });
+ }
+
+ function add_to_users_list(item) {
+ var tr = $('